selftests/bpf: Fix some bugs in map_lookup_percpu_elem testcase
[linux-block.git] / net / mptcp / pm_netlink.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Multipath TCP
3  *
4  * Copyright (c) 2020, Red Hat, Inc.
5  */
6
7 #define pr_fmt(fmt) "MPTCP: " fmt
8
9 #include <linux/inet.h>
10 #include <linux/kernel.h>
11 #include <net/tcp.h>
12 #include <net/netns/generic.h>
13 #include <net/mptcp.h>
14 #include <net/genetlink.h>
15 #include <uapi/linux/mptcp.h>
16
17 #include "protocol.h"
18 #include "mib.h"
19
20 /* forward declaration */
21 static struct genl_family mptcp_genl_family;
22
23 static int pm_nl_pernet_id;
24
25 struct mptcp_pm_addr_entry {
26         struct list_head        list;
27         struct mptcp_addr_info  addr;
28         u8                      flags;
29         int                     ifindex;
30         struct socket           *lsk;
31 };
32
33 struct mptcp_pm_add_entry {
34         struct list_head        list;
35         struct mptcp_addr_info  addr;
36         struct timer_list       add_timer;
37         struct mptcp_sock       *sock;
38         u8                      retrans_times;
39 };
40
41 struct pm_nl_pernet {
42         /* protects pernet updates */
43         spinlock_t              lock;
44         struct list_head        local_addr_list;
45         unsigned int            addrs;
46         unsigned int            stale_loss_cnt;
47         unsigned int            add_addr_signal_max;
48         unsigned int            add_addr_accept_max;
49         unsigned int            local_addr_max;
50         unsigned int            subflows_max;
51         unsigned int            next_id;
52         DECLARE_BITMAP(id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
53 };
54
55 #define MPTCP_PM_ADDR_MAX       8
56 #define ADD_ADDR_RETRANS_MAX    3
57
58 static struct pm_nl_pernet *pm_nl_get_pernet(const struct net *net)
59 {
60         return net_generic(net, pm_nl_pernet_id);
61 }
62
63 static struct pm_nl_pernet *
64 pm_nl_get_pernet_from_msk(const struct mptcp_sock *msk)
65 {
66         return pm_nl_get_pernet(sock_net((struct sock *)msk));
67 }
68
69 static bool addresses_equal(const struct mptcp_addr_info *a,
70                             const struct mptcp_addr_info *b, bool use_port)
71 {
72         bool addr_equals = false;
73
74         if (a->family == b->family) {
75                 if (a->family == AF_INET)
76                         addr_equals = a->addr.s_addr == b->addr.s_addr;
77 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
78                 else
79                         addr_equals = !ipv6_addr_cmp(&a->addr6, &b->addr6);
80         } else if (a->family == AF_INET) {
81                 if (ipv6_addr_v4mapped(&b->addr6))
82                         addr_equals = a->addr.s_addr == b->addr6.s6_addr32[3];
83         } else if (b->family == AF_INET) {
84                 if (ipv6_addr_v4mapped(&a->addr6))
85                         addr_equals = a->addr6.s6_addr32[3] == b->addr.s_addr;
86 #endif
87         }
88
89         if (!addr_equals)
90                 return false;
91         if (!use_port)
92                 return true;
93
94         return a->port == b->port;
95 }
96
97 static void local_address(const struct sock_common *skc,
98                           struct mptcp_addr_info *addr)
99 {
100         addr->family = skc->skc_family;
101         addr->port = htons(skc->skc_num);
102         if (addr->family == AF_INET)
103                 addr->addr.s_addr = skc->skc_rcv_saddr;
104 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
105         else if (addr->family == AF_INET6)
106                 addr->addr6 = skc->skc_v6_rcv_saddr;
107 #endif
108 }
109
110 static void remote_address(const struct sock_common *skc,
111                            struct mptcp_addr_info *addr)
112 {
113         addr->family = skc->skc_family;
114         addr->port = skc->skc_dport;
115         if (addr->family == AF_INET)
116                 addr->addr.s_addr = skc->skc_daddr;
117 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
118         else if (addr->family == AF_INET6)
119                 addr->addr6 = skc->skc_v6_daddr;
120 #endif
121 }
122
123 static bool lookup_subflow_by_saddr(const struct list_head *list,
124                                     const struct mptcp_addr_info *saddr)
125 {
126         struct mptcp_subflow_context *subflow;
127         struct mptcp_addr_info cur;
128         struct sock_common *skc;
129
130         list_for_each_entry(subflow, list, node) {
131                 skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow);
132
133                 local_address(skc, &cur);
134                 if (addresses_equal(&cur, saddr, saddr->port))
135                         return true;
136         }
137
138         return false;
139 }
140
141 static bool lookup_subflow_by_daddr(const struct list_head *list,
142                                     const struct mptcp_addr_info *daddr)
143 {
144         struct mptcp_subflow_context *subflow;
145         struct mptcp_addr_info cur;
146         struct sock_common *skc;
147
148         list_for_each_entry(subflow, list, node) {
149                 skc = (struct sock_common *)mptcp_subflow_tcp_sock(subflow);
150
151                 remote_address(skc, &cur);
152                 if (addresses_equal(&cur, daddr, daddr->port))
153                         return true;
154         }
155
156         return false;
157 }
158
159 static struct mptcp_pm_addr_entry *
160 select_local_address(const struct pm_nl_pernet *pernet,
161                      const struct mptcp_sock *msk)
162 {
163         const struct sock *sk = (const struct sock *)msk;
164         struct mptcp_pm_addr_entry *entry, *ret = NULL;
165
166         msk_owned_by_me(msk);
167
168         rcu_read_lock();
169         list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
170                 if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW))
171                         continue;
172
173                 if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap))
174                         continue;
175
176                 if (entry->addr.family != sk->sk_family) {
177 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
178                         if ((entry->addr.family == AF_INET &&
179                              !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) ||
180                             (sk->sk_family == AF_INET &&
181                              !ipv6_addr_v4mapped(&entry->addr.addr6)))
182 #endif
183                                 continue;
184                 }
185
186                 ret = entry;
187                 break;
188         }
189         rcu_read_unlock();
190         return ret;
191 }
192
193 static struct mptcp_pm_addr_entry *
194 select_signal_address(struct pm_nl_pernet *pernet, const struct mptcp_sock *msk)
195 {
196         struct mptcp_pm_addr_entry *entry, *ret = NULL;
197
198         rcu_read_lock();
199         /* do not keep any additional per socket state, just signal
200          * the address list in order.
201          * Note: removal from the local address list during the msk life-cycle
202          * can lead to additional addresses not being announced.
203          */
204         list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
205                 if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap))
206                         continue;
207
208                 if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL))
209                         continue;
210
211                 ret = entry;
212                 break;
213         }
214         rcu_read_unlock();
215         return ret;
216 }
217
218 unsigned int mptcp_pm_get_add_addr_signal_max(const struct mptcp_sock *msk)
219 {
220         const struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
221
222         return READ_ONCE(pernet->add_addr_signal_max);
223 }
224 EXPORT_SYMBOL_GPL(mptcp_pm_get_add_addr_signal_max);
225
226 unsigned int mptcp_pm_get_add_addr_accept_max(const struct mptcp_sock *msk)
227 {
228         struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
229
230         return READ_ONCE(pernet->add_addr_accept_max);
231 }
232 EXPORT_SYMBOL_GPL(mptcp_pm_get_add_addr_accept_max);
233
234 unsigned int mptcp_pm_get_subflows_max(const struct mptcp_sock *msk)
235 {
236         struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
237
238         return READ_ONCE(pernet->subflows_max);
239 }
240 EXPORT_SYMBOL_GPL(mptcp_pm_get_subflows_max);
241
242 unsigned int mptcp_pm_get_local_addr_max(const struct mptcp_sock *msk)
243 {
244         struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
245
246         return READ_ONCE(pernet->local_addr_max);
247 }
248 EXPORT_SYMBOL_GPL(mptcp_pm_get_local_addr_max);
249
250 bool mptcp_pm_nl_check_work_pending(struct mptcp_sock *msk)
251 {
252         struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
253
254         if (msk->pm.subflows == mptcp_pm_get_subflows_max(msk) ||
255             (find_next_and_bit(pernet->id_bitmap, msk->pm.id_avail_bitmap,
256                                MPTCP_PM_MAX_ADDR_ID + 1, 0) == MPTCP_PM_MAX_ADDR_ID + 1)) {
257                 WRITE_ONCE(msk->pm.work_pending, false);
258                 return false;
259         }
260         return true;
261 }
262
263 struct mptcp_pm_add_entry *
264 mptcp_lookup_anno_list_by_saddr(const struct mptcp_sock *msk,
265                                 const struct mptcp_addr_info *addr)
266 {
267         struct mptcp_pm_add_entry *entry;
268
269         lockdep_assert_held(&msk->pm.lock);
270
271         list_for_each_entry(entry, &msk->pm.anno_list, list) {
272                 if (addresses_equal(&entry->addr, addr, true))
273                         return entry;
274         }
275
276         return NULL;
277 }
278
279 bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk)
280 {
281         struct mptcp_pm_add_entry *entry;
282         struct mptcp_addr_info saddr;
283         bool ret = false;
284
285         local_address((struct sock_common *)sk, &saddr);
286
287         spin_lock_bh(&msk->pm.lock);
288         list_for_each_entry(entry, &msk->pm.anno_list, list) {
289                 if (addresses_equal(&entry->addr, &saddr, true)) {
290                         ret = true;
291                         goto out;
292                 }
293         }
294
295 out:
296         spin_unlock_bh(&msk->pm.lock);
297         return ret;
298 }
299
300 static void mptcp_pm_add_timer(struct timer_list *timer)
301 {
302         struct mptcp_pm_add_entry *entry = from_timer(entry, timer, add_timer);
303         struct mptcp_sock *msk = entry->sock;
304         struct sock *sk = (struct sock *)msk;
305
306         pr_debug("msk=%p", msk);
307
308         if (!msk)
309                 return;
310
311         if (inet_sk_state_load(sk) == TCP_CLOSE)
312                 return;
313
314         if (!entry->addr.id)
315                 return;
316
317         if (mptcp_pm_should_add_signal_addr(msk)) {
318                 sk_reset_timer(sk, timer, jiffies + TCP_RTO_MAX / 8);
319                 goto out;
320         }
321
322         spin_lock_bh(&msk->pm.lock);
323
324         if (!mptcp_pm_should_add_signal_addr(msk)) {
325                 pr_debug("retransmit ADD_ADDR id=%d", entry->addr.id);
326                 mptcp_pm_announce_addr(msk, &entry->addr, false);
327                 mptcp_pm_add_addr_send_ack(msk);
328                 entry->retrans_times++;
329         }
330
331         if (entry->retrans_times < ADD_ADDR_RETRANS_MAX)
332                 sk_reset_timer(sk, timer,
333                                jiffies + mptcp_get_add_addr_timeout(sock_net(sk)));
334
335         spin_unlock_bh(&msk->pm.lock);
336
337         if (entry->retrans_times == ADD_ADDR_RETRANS_MAX)
338                 mptcp_pm_subflow_established(msk);
339
340 out:
341         __sock_put(sk);
342 }
343
344 struct mptcp_pm_add_entry *
345 mptcp_pm_del_add_timer(struct mptcp_sock *msk,
346                        const struct mptcp_addr_info *addr, bool check_id)
347 {
348         struct mptcp_pm_add_entry *entry;
349         struct sock *sk = (struct sock *)msk;
350
351         spin_lock_bh(&msk->pm.lock);
352         entry = mptcp_lookup_anno_list_by_saddr(msk, addr);
353         if (entry && (!check_id || entry->addr.id == addr->id))
354                 entry->retrans_times = ADD_ADDR_RETRANS_MAX;
355         spin_unlock_bh(&msk->pm.lock);
356
357         if (entry && (!check_id || entry->addr.id == addr->id))
358                 sk_stop_timer_sync(sk, &entry->add_timer);
359
360         return entry;
361 }
362
363 static bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
364                                      const struct mptcp_pm_addr_entry *entry)
365 {
366         struct mptcp_pm_add_entry *add_entry = NULL;
367         struct sock *sk = (struct sock *)msk;
368         struct net *net = sock_net(sk);
369
370         lockdep_assert_held(&msk->pm.lock);
371
372         if (mptcp_lookup_anno_list_by_saddr(msk, &entry->addr))
373                 return false;
374
375         add_entry = kmalloc(sizeof(*add_entry), GFP_ATOMIC);
376         if (!add_entry)
377                 return false;
378
379         list_add(&add_entry->list, &msk->pm.anno_list);
380
381         add_entry->addr = entry->addr;
382         add_entry->sock = msk;
383         add_entry->retrans_times = 0;
384
385         timer_setup(&add_entry->add_timer, mptcp_pm_add_timer, 0);
386         sk_reset_timer(sk, &add_entry->add_timer,
387                        jiffies + mptcp_get_add_addr_timeout(net));
388
389         return true;
390 }
391
392 void mptcp_pm_free_anno_list(struct mptcp_sock *msk)
393 {
394         struct mptcp_pm_add_entry *entry, *tmp;
395         struct sock *sk = (struct sock *)msk;
396         LIST_HEAD(free_list);
397
398         pr_debug("msk=%p", msk);
399
400         spin_lock_bh(&msk->pm.lock);
401         list_splice_init(&msk->pm.anno_list, &free_list);
402         spin_unlock_bh(&msk->pm.lock);
403
404         list_for_each_entry_safe(entry, tmp, &free_list, list) {
405                 sk_stop_timer_sync(sk, &entry->add_timer);
406                 kfree(entry);
407         }
408 }
409
410 static bool lookup_address_in_vec(const struct mptcp_addr_info *addrs, unsigned int nr,
411                                   const struct mptcp_addr_info *addr)
412 {
413         int i;
414
415         for (i = 0; i < nr; i++) {
416                 if (addresses_equal(&addrs[i], addr, addr->port))
417                         return true;
418         }
419
420         return false;
421 }
422
423 /* Fill all the remote addresses into the array addrs[],
424  * and return the array size.
425  */
426 static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk, bool fullmesh,
427                                               struct mptcp_addr_info *addrs)
428 {
429         bool deny_id0 = READ_ONCE(msk->pm.remote_deny_join_id0);
430         struct sock *sk = (struct sock *)msk, *ssk;
431         struct mptcp_subflow_context *subflow;
432         struct mptcp_addr_info remote = { 0 };
433         unsigned int subflows_max;
434         int i = 0;
435
436         subflows_max = mptcp_pm_get_subflows_max(msk);
437         remote_address((struct sock_common *)sk, &remote);
438
439         /* Non-fullmesh endpoint, fill in the single entry
440          * corresponding to the primary MPC subflow remote address
441          */
442         if (!fullmesh) {
443                 if (deny_id0)
444                         return 0;
445
446                 msk->pm.subflows++;
447                 addrs[i++] = remote;
448         } else {
449                 mptcp_for_each_subflow(msk, subflow) {
450                         ssk = mptcp_subflow_tcp_sock(subflow);
451                         remote_address((struct sock_common *)ssk, &addrs[i]);
452                         if (deny_id0 && addresses_equal(&addrs[i], &remote, false))
453                                 continue;
454
455                         if (!lookup_address_in_vec(addrs, i, &addrs[i]) &&
456                             msk->pm.subflows < subflows_max) {
457                                 msk->pm.subflows++;
458                                 i++;
459                         }
460                 }
461         }
462
463         return i;
464 }
465
466 static struct mptcp_pm_addr_entry *
467 __lookup_addr_by_id(struct pm_nl_pernet *pernet, unsigned int id)
468 {
469         struct mptcp_pm_addr_entry *entry;
470
471         list_for_each_entry(entry, &pernet->local_addr_list, list) {
472                 if (entry->addr.id == id)
473                         return entry;
474         }
475         return NULL;
476 }
477
478 static struct mptcp_pm_addr_entry *
479 __lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info,
480               bool lookup_by_id)
481 {
482         struct mptcp_pm_addr_entry *entry;
483
484         list_for_each_entry(entry, &pernet->local_addr_list, list) {
485                 if ((!lookup_by_id && addresses_equal(&entry->addr, info, true)) ||
486                     (lookup_by_id && entry->addr.id == info->id))
487                         return entry;
488         }
489         return NULL;
490 }
491
492 static int
493 lookup_id_by_addr(const struct pm_nl_pernet *pernet, const struct mptcp_addr_info *addr)
494 {
495         const struct mptcp_pm_addr_entry *entry;
496         int ret = -1;
497
498         rcu_read_lock();
499         list_for_each_entry(entry, &pernet->local_addr_list, list) {
500                 if (addresses_equal(&entry->addr, addr, entry->addr.port)) {
501                         ret = entry->addr.id;
502                         break;
503                 }
504         }
505         rcu_read_unlock();
506         return ret;
507 }
508
509 static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
510 {
511         struct sock *sk = (struct sock *)msk;
512         struct mptcp_pm_addr_entry *local;
513         unsigned int add_addr_signal_max;
514         unsigned int local_addr_max;
515         struct pm_nl_pernet *pernet;
516         unsigned int subflows_max;
517
518         pernet = pm_nl_get_pernet(sock_net(sk));
519
520         add_addr_signal_max = mptcp_pm_get_add_addr_signal_max(msk);
521         local_addr_max = mptcp_pm_get_local_addr_max(msk);
522         subflows_max = mptcp_pm_get_subflows_max(msk);
523
524         /* do lazy endpoint usage accounting for the MPC subflows */
525         if (unlikely(!(msk->pm.status & BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED))) && msk->first) {
526                 struct mptcp_addr_info mpc_addr;
527                 int mpc_id;
528
529                 local_address((struct sock_common *)msk->first, &mpc_addr);
530                 mpc_id = lookup_id_by_addr(pernet, &mpc_addr);
531                 if (mpc_id >= 0)
532                         __clear_bit(mpc_id, msk->pm.id_avail_bitmap);
533
534                 msk->pm.status |= BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED);
535         }
536
537         pr_debug("local %d:%d signal %d:%d subflows %d:%d\n",
538                  msk->pm.local_addr_used, local_addr_max,
539                  msk->pm.add_addr_signaled, add_addr_signal_max,
540                  msk->pm.subflows, subflows_max);
541
542         /* check first for announce */
543         if (msk->pm.add_addr_signaled < add_addr_signal_max) {
544                 local = select_signal_address(pernet, msk);
545
546                 /* due to racing events on both ends we can reach here while
547                  * previous add address is still running: if we invoke now
548                  * mptcp_pm_announce_addr(), that will fail and the
549                  * corresponding id will be marked as used.
550                  * Instead let the PM machinery reschedule us when the
551                  * current address announce will be completed.
552                  */
553                 if (msk->pm.addr_signal & BIT(MPTCP_ADD_ADDR_SIGNAL))
554                         return;
555
556                 if (local) {
557                         if (mptcp_pm_alloc_anno_list(msk, local)) {
558                                 __clear_bit(local->addr.id, msk->pm.id_avail_bitmap);
559                                 msk->pm.add_addr_signaled++;
560                                 mptcp_pm_announce_addr(msk, &local->addr, false);
561                                 mptcp_pm_nl_addr_send_ack(msk);
562                         }
563                 }
564         }
565
566         /* check if should create a new subflow */
567         while (msk->pm.local_addr_used < local_addr_max &&
568                msk->pm.subflows < subflows_max) {
569                 struct mptcp_addr_info addrs[MPTCP_PM_ADDR_MAX];
570                 bool fullmesh;
571                 int i, nr;
572
573                 local = select_local_address(pernet, msk);
574                 if (!local)
575                         break;
576
577                 fullmesh = !!(local->flags & MPTCP_PM_ADDR_FLAG_FULLMESH);
578
579                 msk->pm.local_addr_used++;
580                 nr = fill_remote_addresses_vec(msk, fullmesh, addrs);
581                 if (nr)
582                         __clear_bit(local->addr.id, msk->pm.id_avail_bitmap);
583                 spin_unlock_bh(&msk->pm.lock);
584                 for (i = 0; i < nr; i++)
585                         __mptcp_subflow_connect(sk, &local->addr, &addrs[i]);
586                 spin_lock_bh(&msk->pm.lock);
587         }
588         mptcp_pm_nl_check_work_pending(msk);
589 }
590
591 static void mptcp_pm_nl_fully_established(struct mptcp_sock *msk)
592 {
593         mptcp_pm_create_subflow_or_signal_addr(msk);
594 }
595
596 static void mptcp_pm_nl_subflow_established(struct mptcp_sock *msk)
597 {
598         mptcp_pm_create_subflow_or_signal_addr(msk);
599 }
600
601 /* Fill all the local addresses into the array addrs[],
602  * and return the array size.
603  */
604 static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
605                                              struct mptcp_addr_info *addrs)
606 {
607         struct sock *sk = (struct sock *)msk;
608         struct mptcp_pm_addr_entry *entry;
609         struct mptcp_addr_info local;
610         struct pm_nl_pernet *pernet;
611         unsigned int subflows_max;
612         int i = 0;
613
614         pernet = pm_nl_get_pernet_from_msk(msk);
615         subflows_max = mptcp_pm_get_subflows_max(msk);
616
617         rcu_read_lock();
618         list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
619                 if (!(entry->flags & MPTCP_PM_ADDR_FLAG_FULLMESH))
620                         continue;
621
622                 if (entry->addr.family != sk->sk_family) {
623 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
624                         if ((entry->addr.family == AF_INET &&
625                              !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) ||
626                             (sk->sk_family == AF_INET &&
627                              !ipv6_addr_v4mapped(&entry->addr.addr6)))
628 #endif
629                                 continue;
630                 }
631
632                 if (msk->pm.subflows < subflows_max) {
633                         msk->pm.subflows++;
634                         addrs[i++] = entry->addr;
635                 }
636         }
637         rcu_read_unlock();
638
639         /* If the array is empty, fill in the single
640          * 'IPADDRANY' local address
641          */
642         if (!i) {
643                 memset(&local, 0, sizeof(local));
644                 local.family = msk->pm.remote.family;
645
646                 msk->pm.subflows++;
647                 addrs[i++] = local;
648         }
649
650         return i;
651 }
652
653 static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
654 {
655         struct mptcp_addr_info addrs[MPTCP_PM_ADDR_MAX];
656         struct sock *sk = (struct sock *)msk;
657         unsigned int add_addr_accept_max;
658         struct mptcp_addr_info remote;
659         unsigned int subflows_max;
660         int i, nr;
661
662         add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk);
663         subflows_max = mptcp_pm_get_subflows_max(msk);
664
665         pr_debug("accepted %d:%d remote family %d",
666                  msk->pm.add_addr_accepted, add_addr_accept_max,
667                  msk->pm.remote.family);
668
669         remote = msk->pm.remote;
670         mptcp_pm_announce_addr(msk, &remote, true);
671         mptcp_pm_nl_addr_send_ack(msk);
672
673         if (lookup_subflow_by_daddr(&msk->conn_list, &remote))
674                 return;
675
676         /* pick id 0 port, if none is provided the remote address */
677         if (!remote.port)
678                 remote.port = sk->sk_dport;
679
680         /* connect to the specified remote address, using whatever
681          * local address the routing configuration will pick.
682          */
683         nr = fill_local_addresses_vec(msk, addrs);
684
685         msk->pm.add_addr_accepted++;
686         if (msk->pm.add_addr_accepted >= add_addr_accept_max ||
687             msk->pm.subflows >= subflows_max)
688                 WRITE_ONCE(msk->pm.accept_addr, false);
689
690         spin_unlock_bh(&msk->pm.lock);
691         for (i = 0; i < nr; i++)
692                 __mptcp_subflow_connect(sk, &addrs[i], &remote);
693         spin_lock_bh(&msk->pm.lock);
694 }
695
696 void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
697 {
698         struct mptcp_subflow_context *subflow;
699
700         msk_owned_by_me(msk);
701         lockdep_assert_held(&msk->pm.lock);
702
703         if (!mptcp_pm_should_add_signal(msk) &&
704             !mptcp_pm_should_rm_signal(msk))
705                 return;
706
707         subflow = list_first_entry_or_null(&msk->conn_list, typeof(*subflow), node);
708         if (subflow) {
709                 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
710
711                 spin_unlock_bh(&msk->pm.lock);
712                 pr_debug("send ack for %s",
713                          mptcp_pm_should_add_signal(msk) ? "add_addr" : "rm_addr");
714
715                 mptcp_subflow_send_ack(ssk);
716                 spin_lock_bh(&msk->pm.lock);
717         }
718 }
719
720 static int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
721                                         struct mptcp_addr_info *addr,
722                                         u8 bkup)
723 {
724         struct mptcp_subflow_context *subflow;
725
726         pr_debug("bkup=%d", bkup);
727
728         mptcp_for_each_subflow(msk, subflow) {
729                 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
730                 struct sock *sk = (struct sock *)msk;
731                 struct mptcp_addr_info local;
732
733                 local_address((struct sock_common *)ssk, &local);
734                 if (!addresses_equal(&local, addr, addr->port))
735                         continue;
736
737                 if (subflow->backup != bkup)
738                         msk->last_snd = NULL;
739                 subflow->backup = bkup;
740                 subflow->send_mp_prio = 1;
741                 subflow->request_bkup = bkup;
742                 __MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPPRIOTX);
743
744                 spin_unlock_bh(&msk->pm.lock);
745                 pr_debug("send ack for mp_prio");
746                 mptcp_subflow_send_ack(ssk);
747                 spin_lock_bh(&msk->pm.lock);
748
749                 return 0;
750         }
751
752         return -EINVAL;
753 }
754
755 static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
756                                            const struct mptcp_rm_list *rm_list,
757                                            enum linux_mptcp_mib_field rm_type)
758 {
759         struct mptcp_subflow_context *subflow, *tmp;
760         struct sock *sk = (struct sock *)msk;
761         u8 i;
762
763         pr_debug("%s rm_list_nr %d",
764                  rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow", rm_list->nr);
765
766         msk_owned_by_me(msk);
767
768         if (sk->sk_state == TCP_LISTEN)
769                 return;
770
771         if (!rm_list->nr)
772                 return;
773
774         if (list_empty(&msk->conn_list))
775                 return;
776
777         for (i = 0; i < rm_list->nr; i++) {
778                 bool removed = false;
779
780                 list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
781                         struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
782                         int how = RCV_SHUTDOWN | SEND_SHUTDOWN;
783                         u8 id = subflow->local_id;
784
785                         if (rm_type == MPTCP_MIB_RMADDR)
786                                 id = subflow->remote_id;
787
788                         if (rm_list->ids[i] != id)
789                                 continue;
790
791                         pr_debug(" -> %s rm_list_ids[%d]=%u local_id=%u remote_id=%u",
792                                  rm_type == MPTCP_MIB_RMADDR ? "address" : "subflow",
793                                  i, rm_list->ids[i], subflow->local_id, subflow->remote_id);
794                         spin_unlock_bh(&msk->pm.lock);
795                         mptcp_subflow_shutdown(sk, ssk, how);
796
797                         /* the following takes care of updating the subflows counter */
798                         mptcp_close_ssk(sk, ssk, subflow);
799                         spin_lock_bh(&msk->pm.lock);
800
801                         removed = true;
802                         __MPTCP_INC_STATS(sock_net(sk), rm_type);
803                 }
804                 __set_bit(rm_list->ids[i], msk->pm.id_avail_bitmap);
805                 if (!removed)
806                         continue;
807
808                 if (rm_type == MPTCP_MIB_RMADDR) {
809                         msk->pm.add_addr_accepted--;
810                         WRITE_ONCE(msk->pm.accept_addr, true);
811                 } else if (rm_type == MPTCP_MIB_RMSUBFLOW) {
812                         msk->pm.local_addr_used--;
813                 }
814         }
815 }
816
817 static void mptcp_pm_nl_rm_addr_received(struct mptcp_sock *msk)
818 {
819         mptcp_pm_nl_rm_addr_or_subflow(msk, &msk->pm.rm_list_rx, MPTCP_MIB_RMADDR);
820 }
821
822 void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk,
823                                      const struct mptcp_rm_list *rm_list)
824 {
825         mptcp_pm_nl_rm_addr_or_subflow(msk, rm_list, MPTCP_MIB_RMSUBFLOW);
826 }
827
828 void mptcp_pm_nl_work(struct mptcp_sock *msk)
829 {
830         struct mptcp_pm_data *pm = &msk->pm;
831
832         msk_owned_by_me(msk);
833
834         if (!(pm->status & MPTCP_PM_WORK_MASK))
835                 return;
836
837         spin_lock_bh(&msk->pm.lock);
838
839         pr_debug("msk=%p status=%x", msk, pm->status);
840         if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) {
841                 pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED);
842                 mptcp_pm_nl_add_addr_received(msk);
843         }
844         if (pm->status & BIT(MPTCP_PM_ADD_ADDR_SEND_ACK)) {
845                 pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_SEND_ACK);
846                 mptcp_pm_nl_addr_send_ack(msk);
847         }
848         if (pm->status & BIT(MPTCP_PM_RM_ADDR_RECEIVED)) {
849                 pm->status &= ~BIT(MPTCP_PM_RM_ADDR_RECEIVED);
850                 mptcp_pm_nl_rm_addr_received(msk);
851         }
852         if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) {
853                 pm->status &= ~BIT(MPTCP_PM_ESTABLISHED);
854                 mptcp_pm_nl_fully_established(msk);
855         }
856         if (pm->status & BIT(MPTCP_PM_SUBFLOW_ESTABLISHED)) {
857                 pm->status &= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED);
858                 mptcp_pm_nl_subflow_established(msk);
859         }
860
861         spin_unlock_bh(&msk->pm.lock);
862 }
863
864 static bool address_use_port(struct mptcp_pm_addr_entry *entry)
865 {
866         return (entry->flags &
867                 (MPTCP_PM_ADDR_FLAG_SIGNAL | MPTCP_PM_ADDR_FLAG_SUBFLOW)) ==
868                 MPTCP_PM_ADDR_FLAG_SIGNAL;
869 }
870
871 /* caller must ensure the RCU grace period is already elapsed */
872 static void __mptcp_pm_release_addr_entry(struct mptcp_pm_addr_entry *entry)
873 {
874         if (entry->lsk)
875                 sock_release(entry->lsk);
876         kfree(entry);
877 }
878
879 static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
880                                              struct mptcp_pm_addr_entry *entry)
881 {
882         struct mptcp_pm_addr_entry *cur, *del_entry = NULL;
883         unsigned int addr_max;
884         int ret = -EINVAL;
885
886         spin_lock_bh(&pernet->lock);
887         /* to keep the code simple, don't do IDR-like allocation for address ID,
888          * just bail when we exceed limits
889          */
890         if (pernet->next_id == MPTCP_PM_MAX_ADDR_ID)
891                 pernet->next_id = 1;
892         if (pernet->addrs >= MPTCP_PM_ADDR_MAX)
893                 goto out;
894         if (test_bit(entry->addr.id, pernet->id_bitmap))
895                 goto out;
896
897         /* do not insert duplicate address, differentiate on port only
898          * singled addresses
899          */
900         list_for_each_entry(cur, &pernet->local_addr_list, list) {
901                 if (addresses_equal(&cur->addr, &entry->addr,
902                                     address_use_port(entry) &&
903                                     address_use_port(cur))) {
904                         /* allow replacing the exiting endpoint only if such
905                          * endpoint is an implicit one and the user-space
906                          * did not provide an endpoint id
907                          */
908                         if (!(cur->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT))
909                                 goto out;
910                         if (entry->addr.id)
911                                 goto out;
912
913                         pernet->addrs--;
914                         entry->addr.id = cur->addr.id;
915                         list_del_rcu(&cur->list);
916                         del_entry = cur;
917                         break;
918                 }
919         }
920
921         if (!entry->addr.id) {
922 find_next:
923                 entry->addr.id = find_next_zero_bit(pernet->id_bitmap,
924                                                     MPTCP_PM_MAX_ADDR_ID + 1,
925                                                     pernet->next_id);
926                 if (!entry->addr.id && pernet->next_id != 1) {
927                         pernet->next_id = 1;
928                         goto find_next;
929                 }
930         }
931
932         if (!entry->addr.id)
933                 goto out;
934
935         __set_bit(entry->addr.id, pernet->id_bitmap);
936         if (entry->addr.id > pernet->next_id)
937                 pernet->next_id = entry->addr.id;
938
939         if (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL) {
940                 addr_max = pernet->add_addr_signal_max;
941                 WRITE_ONCE(pernet->add_addr_signal_max, addr_max + 1);
942         }
943         if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) {
944                 addr_max = pernet->local_addr_max;
945                 WRITE_ONCE(pernet->local_addr_max, addr_max + 1);
946         }
947
948         pernet->addrs++;
949         list_add_tail_rcu(&entry->list, &pernet->local_addr_list);
950         ret = entry->addr.id;
951
952 out:
953         spin_unlock_bh(&pernet->lock);
954
955         /* just replaced an existing entry, free it */
956         if (del_entry) {
957                 synchronize_rcu();
958                 __mptcp_pm_release_addr_entry(del_entry);
959         }
960         return ret;
961 }
962
963 static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
964                                             struct mptcp_pm_addr_entry *entry)
965 {
966         int addrlen = sizeof(struct sockaddr_in);
967         struct sockaddr_storage addr;
968         struct mptcp_sock *msk;
969         struct socket *ssock;
970         int backlog = 1024;
971         int err;
972
973         err = sock_create_kern(sock_net(sk), entry->addr.family,
974                                SOCK_STREAM, IPPROTO_MPTCP, &entry->lsk);
975         if (err)
976                 return err;
977
978         msk = mptcp_sk(entry->lsk->sk);
979         if (!msk) {
980                 err = -EINVAL;
981                 goto out;
982         }
983
984         ssock = __mptcp_nmpc_socket(msk);
985         if (!ssock) {
986                 err = -EINVAL;
987                 goto out;
988         }
989
990         mptcp_info2sockaddr(&entry->addr, &addr, entry->addr.family);
991 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
992         if (entry->addr.family == AF_INET6)
993                 addrlen = sizeof(struct sockaddr_in6);
994 #endif
995         err = kernel_bind(ssock, (struct sockaddr *)&addr, addrlen);
996         if (err) {
997                 pr_warn("kernel_bind error, err=%d", err);
998                 goto out;
999         }
1000
1001         err = kernel_listen(ssock, backlog);
1002         if (err) {
1003                 pr_warn("kernel_listen error, err=%d", err);
1004                 goto out;
1005         }
1006
1007         return 0;
1008
1009 out:
1010         sock_release(entry->lsk);
1011         return err;
1012 }
1013
1014 int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
1015 {
1016         struct mptcp_pm_addr_entry *entry;
1017         struct mptcp_addr_info skc_local;
1018         struct mptcp_addr_info msk_local;
1019         struct pm_nl_pernet *pernet;
1020         int ret = -1;
1021
1022         if (WARN_ON_ONCE(!msk))
1023                 return -1;
1024
1025         /* The 0 ID mapping is defined by the first subflow, copied into the msk
1026          * addr
1027          */
1028         local_address((struct sock_common *)msk, &msk_local);
1029         local_address((struct sock_common *)skc, &skc_local);
1030         if (addresses_equal(&msk_local, &skc_local, false))
1031                 return 0;
1032
1033         pernet = pm_nl_get_pernet_from_msk(msk);
1034
1035         rcu_read_lock();
1036         list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
1037                 if (addresses_equal(&entry->addr, &skc_local, entry->addr.port)) {
1038                         ret = entry->addr.id;
1039                         break;
1040                 }
1041         }
1042         rcu_read_unlock();
1043         if (ret >= 0)
1044                 return ret;
1045
1046         /* address not found, add to local list */
1047         entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
1048         if (!entry)
1049                 return -ENOMEM;
1050
1051         entry->addr = skc_local;
1052         entry->addr.id = 0;
1053         entry->addr.port = 0;
1054         entry->ifindex = 0;
1055         entry->flags = MPTCP_PM_ADDR_FLAG_IMPLICIT;
1056         entry->lsk = NULL;
1057         ret = mptcp_pm_nl_append_new_local_addr(pernet, entry);
1058         if (ret < 0)
1059                 kfree(entry);
1060
1061         return ret;
1062 }
1063
1064 void mptcp_pm_nl_data_init(struct mptcp_sock *msk)
1065 {
1066         struct mptcp_pm_data *pm = &msk->pm;
1067         bool subflows;
1068
1069         subflows = !!mptcp_pm_get_subflows_max(msk);
1070         WRITE_ONCE(pm->work_pending, (!!mptcp_pm_get_local_addr_max(msk) && subflows) ||
1071                    !!mptcp_pm_get_add_addr_signal_max(msk));
1072         WRITE_ONCE(pm->accept_addr, !!mptcp_pm_get_add_addr_accept_max(msk) && subflows);
1073         WRITE_ONCE(pm->accept_subflow, subflows);
1074 }
1075
1076 #define MPTCP_PM_CMD_GRP_OFFSET       0
1077 #define MPTCP_PM_EV_GRP_OFFSET        1
1078
1079 static const struct genl_multicast_group mptcp_pm_mcgrps[] = {
1080         [MPTCP_PM_CMD_GRP_OFFSET]       = { .name = MPTCP_PM_CMD_GRP_NAME, },
1081         [MPTCP_PM_EV_GRP_OFFSET]        = { .name = MPTCP_PM_EV_GRP_NAME,
1082                                             .flags = GENL_UNS_ADMIN_PERM,
1083                                           },
1084 };
1085
1086 static const struct nla_policy
1087 mptcp_pm_addr_policy[MPTCP_PM_ADDR_ATTR_MAX + 1] = {
1088         [MPTCP_PM_ADDR_ATTR_FAMILY]     = { .type       = NLA_U16,      },
1089         [MPTCP_PM_ADDR_ATTR_ID]         = { .type       = NLA_U8,       },
1090         [MPTCP_PM_ADDR_ATTR_ADDR4]      = { .type       = NLA_U32,      },
1091         [MPTCP_PM_ADDR_ATTR_ADDR6]      =
1092                 NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
1093         [MPTCP_PM_ADDR_ATTR_PORT]       = { .type       = NLA_U16       },
1094         [MPTCP_PM_ADDR_ATTR_FLAGS]      = { .type       = NLA_U32       },
1095         [MPTCP_PM_ADDR_ATTR_IF_IDX]     = { .type       = NLA_S32       },
1096 };
1097
1098 static const struct nla_policy mptcp_pm_policy[MPTCP_PM_ATTR_MAX + 1] = {
1099         [MPTCP_PM_ATTR_ADDR]            =
1100                                         NLA_POLICY_NESTED(mptcp_pm_addr_policy),
1101         [MPTCP_PM_ATTR_RCV_ADD_ADDRS]   = { .type       = NLA_U32,      },
1102         [MPTCP_PM_ATTR_SUBFLOWS]        = { .type       = NLA_U32,      },
1103 };
1104
1105 void mptcp_pm_nl_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk)
1106 {
1107         struct mptcp_subflow_context *iter, *subflow = mptcp_subflow_ctx(ssk);
1108         struct sock *sk = (struct sock *)msk;
1109         unsigned int active_max_loss_cnt;
1110         struct net *net = sock_net(sk);
1111         unsigned int stale_loss_cnt;
1112         bool slow;
1113
1114         stale_loss_cnt = mptcp_stale_loss_cnt(net);
1115         if (subflow->stale || !stale_loss_cnt || subflow->stale_count <= stale_loss_cnt)
1116                 return;
1117
1118         /* look for another available subflow not in loss state */
1119         active_max_loss_cnt = max_t(int, stale_loss_cnt - 1, 1);
1120         mptcp_for_each_subflow(msk, iter) {
1121                 if (iter != subflow && mptcp_subflow_active(iter) &&
1122                     iter->stale_count < active_max_loss_cnt) {
1123                         /* we have some alternatives, try to mark this subflow as idle ...*/
1124                         slow = lock_sock_fast(ssk);
1125                         if (!tcp_rtx_and_write_queues_empty(ssk)) {
1126                                 subflow->stale = 1;
1127                                 __mptcp_retransmit_pending_data(sk);
1128                                 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_SUBFLOWSTALE);
1129                         }
1130                         unlock_sock_fast(ssk, slow);
1131
1132                         /* always try to push the pending data regarless of re-injections:
1133                          * we can possibly use backup subflows now, and subflow selection
1134                          * is cheap under the msk socket lock
1135                          */
1136                         __mptcp_push_pending(sk, 0);
1137                         return;
1138                 }
1139         }
1140 }
1141
1142 static int mptcp_pm_family_to_addr(int family)
1143 {
1144 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1145         if (family == AF_INET6)
1146                 return MPTCP_PM_ADDR_ATTR_ADDR6;
1147 #endif
1148         return MPTCP_PM_ADDR_ATTR_ADDR4;
1149 }
1150
1151 static int mptcp_pm_parse_addr(struct nlattr *attr, struct genl_info *info,
1152                                bool require_family,
1153                                struct mptcp_pm_addr_entry *entry)
1154 {
1155         struct nlattr *tb[MPTCP_PM_ADDR_ATTR_MAX + 1];
1156         int err, addr_addr;
1157
1158         if (!attr) {
1159                 GENL_SET_ERR_MSG(info, "missing address info");
1160                 return -EINVAL;
1161         }
1162
1163         /* no validation needed - was already done via nested policy */
1164         err = nla_parse_nested_deprecated(tb, MPTCP_PM_ADDR_ATTR_MAX, attr,
1165                                           mptcp_pm_addr_policy, info->extack);
1166         if (err)
1167                 return err;
1168
1169         memset(entry, 0, sizeof(*entry));
1170         if (!tb[MPTCP_PM_ADDR_ATTR_FAMILY]) {
1171                 if (!require_family)
1172                         goto skip_family;
1173
1174                 NL_SET_ERR_MSG_ATTR(info->extack, attr,
1175                                     "missing family");
1176                 return -EINVAL;
1177         }
1178
1179         entry->addr.family = nla_get_u16(tb[MPTCP_PM_ADDR_ATTR_FAMILY]);
1180         if (entry->addr.family != AF_INET
1181 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1182             && entry->addr.family != AF_INET6
1183 #endif
1184             ) {
1185                 NL_SET_ERR_MSG_ATTR(info->extack, attr,
1186                                     "unknown address family");
1187                 return -EINVAL;
1188         }
1189         addr_addr = mptcp_pm_family_to_addr(entry->addr.family);
1190         if (!tb[addr_addr]) {
1191                 NL_SET_ERR_MSG_ATTR(info->extack, attr,
1192                                     "missing address data");
1193                 return -EINVAL;
1194         }
1195
1196 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1197         if (entry->addr.family == AF_INET6)
1198                 entry->addr.addr6 = nla_get_in6_addr(tb[addr_addr]);
1199         else
1200 #endif
1201                 entry->addr.addr.s_addr = nla_get_in_addr(tb[addr_addr]);
1202
1203 skip_family:
1204         if (tb[MPTCP_PM_ADDR_ATTR_IF_IDX]) {
1205                 u32 val = nla_get_s32(tb[MPTCP_PM_ADDR_ATTR_IF_IDX]);
1206
1207                 entry->ifindex = val;
1208         }
1209
1210         if (tb[MPTCP_PM_ADDR_ATTR_ID])
1211                 entry->addr.id = nla_get_u8(tb[MPTCP_PM_ADDR_ATTR_ID]);
1212
1213         if (tb[MPTCP_PM_ADDR_ATTR_FLAGS])
1214                 entry->flags = nla_get_u32(tb[MPTCP_PM_ADDR_ATTR_FLAGS]);
1215
1216         if (tb[MPTCP_PM_ADDR_ATTR_PORT])
1217                 entry->addr.port = htons(nla_get_u16(tb[MPTCP_PM_ADDR_ATTR_PORT]));
1218
1219         return 0;
1220 }
1221
1222 static struct pm_nl_pernet *genl_info_pm_nl(struct genl_info *info)
1223 {
1224         return pm_nl_get_pernet(genl_info_net(info));
1225 }
1226
1227 static int mptcp_nl_add_subflow_or_signal_addr(struct net *net)
1228 {
1229         struct mptcp_sock *msk;
1230         long s_slot = 0, s_num = 0;
1231
1232         while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
1233                 struct sock *sk = (struct sock *)msk;
1234
1235                 if (!READ_ONCE(msk->fully_established))
1236                         goto next;
1237
1238                 lock_sock(sk);
1239                 spin_lock_bh(&msk->pm.lock);
1240                 mptcp_pm_create_subflow_or_signal_addr(msk);
1241                 spin_unlock_bh(&msk->pm.lock);
1242                 release_sock(sk);
1243
1244 next:
1245                 sock_put(sk);
1246                 cond_resched();
1247         }
1248
1249         return 0;
1250 }
1251
1252 static int mptcp_nl_cmd_add_addr(struct sk_buff *skb, struct genl_info *info)
1253 {
1254         struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
1255         struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
1256         struct mptcp_pm_addr_entry addr, *entry;
1257         int ret;
1258
1259         ret = mptcp_pm_parse_addr(attr, info, true, &addr);
1260         if (ret < 0)
1261                 return ret;
1262
1263         if (addr.addr.port && !(addr.flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) {
1264                 GENL_SET_ERR_MSG(info, "flags must have signal when using port");
1265                 return -EINVAL;
1266         }
1267
1268         if (addr.flags & MPTCP_PM_ADDR_FLAG_SIGNAL &&
1269             addr.flags & MPTCP_PM_ADDR_FLAG_FULLMESH) {
1270                 GENL_SET_ERR_MSG(info, "flags mustn't have both signal and fullmesh");
1271                 return -EINVAL;
1272         }
1273
1274         if (addr.flags & MPTCP_PM_ADDR_FLAG_IMPLICIT) {
1275                 GENL_SET_ERR_MSG(info, "can't create IMPLICIT endpoint");
1276                 return -EINVAL;
1277         }
1278
1279         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1280         if (!entry) {
1281                 GENL_SET_ERR_MSG(info, "can't allocate addr");
1282                 return -ENOMEM;
1283         }
1284
1285         *entry = addr;
1286         if (entry->addr.port) {
1287                 ret = mptcp_pm_nl_create_listen_socket(skb->sk, entry);
1288                 if (ret) {
1289                         GENL_SET_ERR_MSG(info, "create listen socket error");
1290                         kfree(entry);
1291                         return ret;
1292                 }
1293         }
1294         ret = mptcp_pm_nl_append_new_local_addr(pernet, entry);
1295         if (ret < 0) {
1296                 GENL_SET_ERR_MSG(info, "too many addresses or duplicate one");
1297                 if (entry->lsk)
1298                         sock_release(entry->lsk);
1299                 kfree(entry);
1300                 return ret;
1301         }
1302
1303         mptcp_nl_add_subflow_or_signal_addr(sock_net(skb->sk));
1304
1305         return 0;
1306 }
1307
1308 int mptcp_pm_get_flags_and_ifindex_by_id(struct net *net, unsigned int id,
1309                                          u8 *flags, int *ifindex)
1310 {
1311         struct mptcp_pm_addr_entry *entry;
1312
1313         *flags = 0;
1314         *ifindex = 0;
1315
1316         if (id) {
1317                 rcu_read_lock();
1318                 entry = __lookup_addr_by_id(pm_nl_get_pernet(net), id);
1319                 if (entry) {
1320                         *flags = entry->flags;
1321                         *ifindex = entry->ifindex;
1322                 }
1323                 rcu_read_unlock();
1324         }
1325
1326         return 0;
1327 }
1328
1329 static bool remove_anno_list_by_saddr(struct mptcp_sock *msk,
1330                                       const struct mptcp_addr_info *addr)
1331 {
1332         struct mptcp_pm_add_entry *entry;
1333
1334         entry = mptcp_pm_del_add_timer(msk, addr, false);
1335         if (entry) {
1336                 list_del(&entry->list);
1337                 kfree(entry);
1338                 return true;
1339         }
1340
1341         return false;
1342 }
1343
1344 static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk,
1345                                       const struct mptcp_addr_info *addr,
1346                                       bool force)
1347 {
1348         struct mptcp_rm_list list = { .nr = 0 };
1349         bool ret;
1350
1351         list.ids[list.nr++] = addr->id;
1352
1353         ret = remove_anno_list_by_saddr(msk, addr);
1354         if (ret || force) {
1355                 spin_lock_bh(&msk->pm.lock);
1356                 mptcp_pm_remove_addr(msk, &list);
1357                 spin_unlock_bh(&msk->pm.lock);
1358         }
1359         return ret;
1360 }
1361
1362 static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net,
1363                                                    const struct mptcp_pm_addr_entry *entry)
1364 {
1365         const struct mptcp_addr_info *addr = &entry->addr;
1366         struct mptcp_rm_list list = { .nr = 0 };
1367         long s_slot = 0, s_num = 0;
1368         struct mptcp_sock *msk;
1369
1370         pr_debug("remove_id=%d", addr->id);
1371
1372         list.ids[list.nr++] = addr->id;
1373
1374         while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
1375                 struct sock *sk = (struct sock *)msk;
1376                 bool remove_subflow;
1377
1378                 if (list_empty(&msk->conn_list)) {
1379                         mptcp_pm_remove_anno_addr(msk, addr, false);
1380                         goto next;
1381                 }
1382
1383                 lock_sock(sk);
1384                 remove_subflow = lookup_subflow_by_saddr(&msk->conn_list, addr);
1385                 mptcp_pm_remove_anno_addr(msk, addr, remove_subflow &&
1386                                           !(entry->flags & MPTCP_PM_ADDR_FLAG_IMPLICIT));
1387                 if (remove_subflow)
1388                         mptcp_pm_remove_subflow(msk, &list);
1389                 release_sock(sk);
1390
1391 next:
1392                 sock_put(sk);
1393                 cond_resched();
1394         }
1395
1396         return 0;
1397 }
1398
1399 static int mptcp_nl_remove_id_zero_address(struct net *net,
1400                                            struct mptcp_addr_info *addr)
1401 {
1402         struct mptcp_rm_list list = { .nr = 0 };
1403         long s_slot = 0, s_num = 0;
1404         struct mptcp_sock *msk;
1405
1406         list.ids[list.nr++] = 0;
1407
1408         while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
1409                 struct sock *sk = (struct sock *)msk;
1410                 struct mptcp_addr_info msk_local;
1411
1412                 if (list_empty(&msk->conn_list))
1413                         goto next;
1414
1415                 local_address((struct sock_common *)msk, &msk_local);
1416                 if (!addresses_equal(&msk_local, addr, addr->port))
1417                         goto next;
1418
1419                 lock_sock(sk);
1420                 spin_lock_bh(&msk->pm.lock);
1421                 mptcp_pm_remove_addr(msk, &list);
1422                 mptcp_pm_nl_rm_subflow_received(msk, &list);
1423                 spin_unlock_bh(&msk->pm.lock);
1424                 release_sock(sk);
1425
1426 next:
1427                 sock_put(sk);
1428                 cond_resched();
1429         }
1430
1431         return 0;
1432 }
1433
1434 static int mptcp_nl_cmd_del_addr(struct sk_buff *skb, struct genl_info *info)
1435 {
1436         struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
1437         struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
1438         struct mptcp_pm_addr_entry addr, *entry;
1439         unsigned int addr_max;
1440         int ret;
1441
1442         ret = mptcp_pm_parse_addr(attr, info, false, &addr);
1443         if (ret < 0)
1444                 return ret;
1445
1446         /* the zero id address is special: the first address used by the msk
1447          * always gets such an id, so different subflows can have different zero
1448          * id addresses. Additionally zero id is not accounted for in id_bitmap.
1449          * Let's use an 'mptcp_rm_list' instead of the common remove code.
1450          */
1451         if (addr.addr.id == 0)
1452                 return mptcp_nl_remove_id_zero_address(sock_net(skb->sk), &addr.addr);
1453
1454         spin_lock_bh(&pernet->lock);
1455         entry = __lookup_addr_by_id(pernet, addr.addr.id);
1456         if (!entry) {
1457                 GENL_SET_ERR_MSG(info, "address not found");
1458                 spin_unlock_bh(&pernet->lock);
1459                 return -EINVAL;
1460         }
1461         if (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL) {
1462                 addr_max = pernet->add_addr_signal_max;
1463                 WRITE_ONCE(pernet->add_addr_signal_max, addr_max - 1);
1464         }
1465         if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW) {
1466                 addr_max = pernet->local_addr_max;
1467                 WRITE_ONCE(pernet->local_addr_max, addr_max - 1);
1468         }
1469
1470         pernet->addrs--;
1471         list_del_rcu(&entry->list);
1472         __clear_bit(entry->addr.id, pernet->id_bitmap);
1473         spin_unlock_bh(&pernet->lock);
1474
1475         mptcp_nl_remove_subflow_and_signal_addr(sock_net(skb->sk), entry);
1476         synchronize_rcu();
1477         __mptcp_pm_release_addr_entry(entry);
1478
1479         return ret;
1480 }
1481
1482 static void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
1483                                                struct list_head *rm_list)
1484 {
1485         struct mptcp_rm_list alist = { .nr = 0 }, slist = { .nr = 0 };
1486         struct mptcp_pm_addr_entry *entry;
1487
1488         list_for_each_entry(entry, rm_list, list) {
1489                 if (lookup_subflow_by_saddr(&msk->conn_list, &entry->addr) &&
1490                     slist.nr < MPTCP_RM_IDS_MAX)
1491                         slist.ids[slist.nr++] = entry->addr.id;
1492
1493                 if (remove_anno_list_by_saddr(msk, &entry->addr) &&
1494                     alist.nr < MPTCP_RM_IDS_MAX)
1495                         alist.ids[alist.nr++] = entry->addr.id;
1496         }
1497
1498         if (alist.nr) {
1499                 spin_lock_bh(&msk->pm.lock);
1500                 mptcp_pm_remove_addr(msk, &alist);
1501                 spin_unlock_bh(&msk->pm.lock);
1502         }
1503         if (slist.nr)
1504                 mptcp_pm_remove_subflow(msk, &slist);
1505 }
1506
1507 static void mptcp_nl_remove_addrs_list(struct net *net,
1508                                        struct list_head *rm_list)
1509 {
1510         long s_slot = 0, s_num = 0;
1511         struct mptcp_sock *msk;
1512
1513         if (list_empty(rm_list))
1514                 return;
1515
1516         while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
1517                 struct sock *sk = (struct sock *)msk;
1518
1519                 lock_sock(sk);
1520                 mptcp_pm_remove_addrs_and_subflows(msk, rm_list);
1521                 release_sock(sk);
1522
1523                 sock_put(sk);
1524                 cond_resched();
1525         }
1526 }
1527
1528 /* caller must ensure the RCU grace period is already elapsed */
1529 static void __flush_addrs(struct list_head *list)
1530 {
1531         while (!list_empty(list)) {
1532                 struct mptcp_pm_addr_entry *cur;
1533
1534                 cur = list_entry(list->next,
1535                                  struct mptcp_pm_addr_entry, list);
1536                 list_del_rcu(&cur->list);
1537                 __mptcp_pm_release_addr_entry(cur);
1538         }
1539 }
1540
1541 static void __reset_counters(struct pm_nl_pernet *pernet)
1542 {
1543         WRITE_ONCE(pernet->add_addr_signal_max, 0);
1544         WRITE_ONCE(pernet->add_addr_accept_max, 0);
1545         WRITE_ONCE(pernet->local_addr_max, 0);
1546         pernet->addrs = 0;
1547 }
1548
1549 static int mptcp_nl_cmd_flush_addrs(struct sk_buff *skb, struct genl_info *info)
1550 {
1551         struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
1552         LIST_HEAD(free_list);
1553
1554         spin_lock_bh(&pernet->lock);
1555         list_splice_init(&pernet->local_addr_list, &free_list);
1556         __reset_counters(pernet);
1557         pernet->next_id = 1;
1558         bitmap_zero(pernet->id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
1559         spin_unlock_bh(&pernet->lock);
1560         mptcp_nl_remove_addrs_list(sock_net(skb->sk), &free_list);
1561         synchronize_rcu();
1562         __flush_addrs(&free_list);
1563         return 0;
1564 }
1565
1566 static int mptcp_nl_fill_addr(struct sk_buff *skb,
1567                               struct mptcp_pm_addr_entry *entry)
1568 {
1569         struct mptcp_addr_info *addr = &entry->addr;
1570         struct nlattr *attr;
1571
1572         attr = nla_nest_start(skb, MPTCP_PM_ATTR_ADDR);
1573         if (!attr)
1574                 return -EMSGSIZE;
1575
1576         if (nla_put_u16(skb, MPTCP_PM_ADDR_ATTR_FAMILY, addr->family))
1577                 goto nla_put_failure;
1578         if (nla_put_u16(skb, MPTCP_PM_ADDR_ATTR_PORT, ntohs(addr->port)))
1579                 goto nla_put_failure;
1580         if (nla_put_u8(skb, MPTCP_PM_ADDR_ATTR_ID, addr->id))
1581                 goto nla_put_failure;
1582         if (nla_put_u32(skb, MPTCP_PM_ADDR_ATTR_FLAGS, entry->flags))
1583                 goto nla_put_failure;
1584         if (entry->ifindex &&
1585             nla_put_s32(skb, MPTCP_PM_ADDR_ATTR_IF_IDX, entry->ifindex))
1586                 goto nla_put_failure;
1587
1588         if (addr->family == AF_INET &&
1589             nla_put_in_addr(skb, MPTCP_PM_ADDR_ATTR_ADDR4,
1590                             addr->addr.s_addr))
1591                 goto nla_put_failure;
1592 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1593         else if (addr->family == AF_INET6 &&
1594                  nla_put_in6_addr(skb, MPTCP_PM_ADDR_ATTR_ADDR6, &addr->addr6))
1595                 goto nla_put_failure;
1596 #endif
1597         nla_nest_end(skb, attr);
1598         return 0;
1599
1600 nla_put_failure:
1601         nla_nest_cancel(skb, attr);
1602         return -EMSGSIZE;
1603 }
1604
1605 static int mptcp_nl_cmd_get_addr(struct sk_buff *skb, struct genl_info *info)
1606 {
1607         struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
1608         struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
1609         struct mptcp_pm_addr_entry addr, *entry;
1610         struct sk_buff *msg;
1611         void *reply;
1612         int ret;
1613
1614         ret = mptcp_pm_parse_addr(attr, info, false, &addr);
1615         if (ret < 0)
1616                 return ret;
1617
1618         msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1619         if (!msg)
1620                 return -ENOMEM;
1621
1622         reply = genlmsg_put_reply(msg, info, &mptcp_genl_family, 0,
1623                                   info->genlhdr->cmd);
1624         if (!reply) {
1625                 GENL_SET_ERR_MSG(info, "not enough space in Netlink message");
1626                 ret = -EMSGSIZE;
1627                 goto fail;
1628         }
1629
1630         spin_lock_bh(&pernet->lock);
1631         entry = __lookup_addr_by_id(pernet, addr.addr.id);
1632         if (!entry) {
1633                 GENL_SET_ERR_MSG(info, "address not found");
1634                 ret = -EINVAL;
1635                 goto unlock_fail;
1636         }
1637
1638         ret = mptcp_nl_fill_addr(msg, entry);
1639         if (ret)
1640                 goto unlock_fail;
1641
1642         genlmsg_end(msg, reply);
1643         ret = genlmsg_reply(msg, info);
1644         spin_unlock_bh(&pernet->lock);
1645         return ret;
1646
1647 unlock_fail:
1648         spin_unlock_bh(&pernet->lock);
1649
1650 fail:
1651         nlmsg_free(msg);
1652         return ret;
1653 }
1654
1655 static int mptcp_nl_cmd_dump_addrs(struct sk_buff *msg,
1656                                    struct netlink_callback *cb)
1657 {
1658         struct net *net = sock_net(msg->sk);
1659         struct mptcp_pm_addr_entry *entry;
1660         struct pm_nl_pernet *pernet;
1661         int id = cb->args[0];
1662         void *hdr;
1663         int i;
1664
1665         pernet = pm_nl_get_pernet(net);
1666
1667         spin_lock_bh(&pernet->lock);
1668         for (i = id; i < MPTCP_PM_MAX_ADDR_ID + 1; i++) {
1669                 if (test_bit(i, pernet->id_bitmap)) {
1670                         entry = __lookup_addr_by_id(pernet, i);
1671                         if (!entry)
1672                                 break;
1673
1674                         if (entry->addr.id <= id)
1675                                 continue;
1676
1677                         hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).portid,
1678                                           cb->nlh->nlmsg_seq, &mptcp_genl_family,
1679                                           NLM_F_MULTI, MPTCP_PM_CMD_GET_ADDR);
1680                         if (!hdr)
1681                                 break;
1682
1683                         if (mptcp_nl_fill_addr(msg, entry) < 0) {
1684                                 genlmsg_cancel(msg, hdr);
1685                                 break;
1686                         }
1687
1688                         id = entry->addr.id;
1689                         genlmsg_end(msg, hdr);
1690                 }
1691         }
1692         spin_unlock_bh(&pernet->lock);
1693
1694         cb->args[0] = id;
1695         return msg->len;
1696 }
1697
1698 static int parse_limit(struct genl_info *info, int id, unsigned int *limit)
1699 {
1700         struct nlattr *attr = info->attrs[id];
1701
1702         if (!attr)
1703                 return 0;
1704
1705         *limit = nla_get_u32(attr);
1706         if (*limit > MPTCP_PM_ADDR_MAX) {
1707                 GENL_SET_ERR_MSG(info, "limit greater than maximum");
1708                 return -EINVAL;
1709         }
1710         return 0;
1711 }
1712
1713 static int
1714 mptcp_nl_cmd_set_limits(struct sk_buff *skb, struct genl_info *info)
1715 {
1716         struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
1717         unsigned int rcv_addrs, subflows;
1718         int ret;
1719
1720         spin_lock_bh(&pernet->lock);
1721         rcv_addrs = pernet->add_addr_accept_max;
1722         ret = parse_limit(info, MPTCP_PM_ATTR_RCV_ADD_ADDRS, &rcv_addrs);
1723         if (ret)
1724                 goto unlock;
1725
1726         subflows = pernet->subflows_max;
1727         ret = parse_limit(info, MPTCP_PM_ATTR_SUBFLOWS, &subflows);
1728         if (ret)
1729                 goto unlock;
1730
1731         WRITE_ONCE(pernet->add_addr_accept_max, rcv_addrs);
1732         WRITE_ONCE(pernet->subflows_max, subflows);
1733
1734 unlock:
1735         spin_unlock_bh(&pernet->lock);
1736         return ret;
1737 }
1738
1739 static int
1740 mptcp_nl_cmd_get_limits(struct sk_buff *skb, struct genl_info *info)
1741 {
1742         struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
1743         struct sk_buff *msg;
1744         void *reply;
1745
1746         msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1747         if (!msg)
1748                 return -ENOMEM;
1749
1750         reply = genlmsg_put_reply(msg, info, &mptcp_genl_family, 0,
1751                                   MPTCP_PM_CMD_GET_LIMITS);
1752         if (!reply)
1753                 goto fail;
1754
1755         if (nla_put_u32(msg, MPTCP_PM_ATTR_RCV_ADD_ADDRS,
1756                         READ_ONCE(pernet->add_addr_accept_max)))
1757                 goto fail;
1758
1759         if (nla_put_u32(msg, MPTCP_PM_ATTR_SUBFLOWS,
1760                         READ_ONCE(pernet->subflows_max)))
1761                 goto fail;
1762
1763         genlmsg_end(msg, reply);
1764         return genlmsg_reply(msg, info);
1765
1766 fail:
1767         GENL_SET_ERR_MSG(info, "not enough space in Netlink message");
1768         nlmsg_free(msg);
1769         return -EMSGSIZE;
1770 }
1771
1772 static void mptcp_pm_nl_fullmesh(struct mptcp_sock *msk,
1773                                  struct mptcp_addr_info *addr)
1774 {
1775         struct mptcp_rm_list list = { .nr = 0 };
1776
1777         list.ids[list.nr++] = addr->id;
1778
1779         mptcp_pm_nl_rm_subflow_received(msk, &list);
1780         mptcp_pm_create_subflow_or_signal_addr(msk);
1781 }
1782
1783 static int mptcp_nl_set_flags(struct net *net,
1784                               struct mptcp_addr_info *addr,
1785                               u8 bkup, u8 changed)
1786 {
1787         long s_slot = 0, s_num = 0;
1788         struct mptcp_sock *msk;
1789         int ret = -EINVAL;
1790
1791         while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
1792                 struct sock *sk = (struct sock *)msk;
1793
1794                 if (list_empty(&msk->conn_list))
1795                         goto next;
1796
1797                 lock_sock(sk);
1798                 spin_lock_bh(&msk->pm.lock);
1799                 if (changed & MPTCP_PM_ADDR_FLAG_BACKUP)
1800                         ret = mptcp_pm_nl_mp_prio_send_ack(msk, addr, bkup);
1801                 if (changed & MPTCP_PM_ADDR_FLAG_FULLMESH)
1802                         mptcp_pm_nl_fullmesh(msk, addr);
1803                 spin_unlock_bh(&msk->pm.lock);
1804                 release_sock(sk);
1805
1806 next:
1807                 sock_put(sk);
1808                 cond_resched();
1809         }
1810
1811         return ret;
1812 }
1813
1814 static int mptcp_nl_cmd_set_flags(struct sk_buff *skb, struct genl_info *info)
1815 {
1816         struct mptcp_pm_addr_entry addr = { .addr = { .family = AF_UNSPEC }, }, *entry;
1817         struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
1818         struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
1819         u8 changed, mask = MPTCP_PM_ADDR_FLAG_BACKUP |
1820                            MPTCP_PM_ADDR_FLAG_FULLMESH;
1821         struct net *net = sock_net(skb->sk);
1822         u8 bkup = 0, lookup_by_id = 0;
1823         int ret;
1824
1825         ret = mptcp_pm_parse_addr(attr, info, false, &addr);
1826         if (ret < 0)
1827                 return ret;
1828
1829         if (addr.flags & MPTCP_PM_ADDR_FLAG_BACKUP)
1830                 bkup = 1;
1831         if (addr.addr.family == AF_UNSPEC) {
1832                 lookup_by_id = 1;
1833                 if (!addr.addr.id)
1834                         return -EOPNOTSUPP;
1835         }
1836
1837         spin_lock_bh(&pernet->lock);
1838         entry = __lookup_addr(pernet, &addr.addr, lookup_by_id);
1839         if (!entry) {
1840                 spin_unlock_bh(&pernet->lock);
1841                 return -EINVAL;
1842         }
1843         if ((addr.flags & MPTCP_PM_ADDR_FLAG_FULLMESH) &&
1844             (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) {
1845                 spin_unlock_bh(&pernet->lock);
1846                 return -EINVAL;
1847         }
1848
1849         changed = (addr.flags ^ entry->flags) & mask;
1850         entry->flags = (entry->flags & ~mask) | (addr.flags & mask);
1851         addr = *entry;
1852         spin_unlock_bh(&pernet->lock);
1853
1854         mptcp_nl_set_flags(net, &addr.addr, bkup, changed);
1855         return 0;
1856 }
1857
1858 static void mptcp_nl_mcast_send(struct net *net, struct sk_buff *nlskb, gfp_t gfp)
1859 {
1860         genlmsg_multicast_netns(&mptcp_genl_family, net,
1861                                 nlskb, 0, MPTCP_PM_EV_GRP_OFFSET, gfp);
1862 }
1863
1864 static int mptcp_event_add_subflow(struct sk_buff *skb, const struct sock *ssk)
1865 {
1866         const struct inet_sock *issk = inet_sk(ssk);
1867         const struct mptcp_subflow_context *sf;
1868
1869         if (nla_put_u16(skb, MPTCP_ATTR_FAMILY, ssk->sk_family))
1870                 return -EMSGSIZE;
1871
1872         switch (ssk->sk_family) {
1873         case AF_INET:
1874                 if (nla_put_in_addr(skb, MPTCP_ATTR_SADDR4, issk->inet_saddr))
1875                         return -EMSGSIZE;
1876                 if (nla_put_in_addr(skb, MPTCP_ATTR_DADDR4, issk->inet_daddr))
1877                         return -EMSGSIZE;
1878                 break;
1879 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1880         case AF_INET6: {
1881                 const struct ipv6_pinfo *np = inet6_sk(ssk);
1882
1883                 if (nla_put_in6_addr(skb, MPTCP_ATTR_SADDR6, &np->saddr))
1884                         return -EMSGSIZE;
1885                 if (nla_put_in6_addr(skb, MPTCP_ATTR_DADDR6, &ssk->sk_v6_daddr))
1886                         return -EMSGSIZE;
1887                 break;
1888         }
1889 #endif
1890         default:
1891                 WARN_ON_ONCE(1);
1892                 return -EMSGSIZE;
1893         }
1894
1895         if (nla_put_be16(skb, MPTCP_ATTR_SPORT, issk->inet_sport))
1896                 return -EMSGSIZE;
1897         if (nla_put_be16(skb, MPTCP_ATTR_DPORT, issk->inet_dport))
1898                 return -EMSGSIZE;
1899
1900         sf = mptcp_subflow_ctx(ssk);
1901         if (WARN_ON_ONCE(!sf))
1902                 return -EINVAL;
1903
1904         if (nla_put_u8(skb, MPTCP_ATTR_LOC_ID, sf->local_id))
1905                 return -EMSGSIZE;
1906
1907         if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, sf->remote_id))
1908                 return -EMSGSIZE;
1909
1910         return 0;
1911 }
1912
1913 static int mptcp_event_put_token_and_ssk(struct sk_buff *skb,
1914                                          const struct mptcp_sock *msk,
1915                                          const struct sock *ssk)
1916 {
1917         const struct sock *sk = (const struct sock *)msk;
1918         const struct mptcp_subflow_context *sf;
1919         u8 sk_err;
1920
1921         if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token))
1922                 return -EMSGSIZE;
1923
1924         if (mptcp_event_add_subflow(skb, ssk))
1925                 return -EMSGSIZE;
1926
1927         sf = mptcp_subflow_ctx(ssk);
1928         if (WARN_ON_ONCE(!sf))
1929                 return -EINVAL;
1930
1931         if (nla_put_u8(skb, MPTCP_ATTR_BACKUP, sf->backup))
1932                 return -EMSGSIZE;
1933
1934         if (ssk->sk_bound_dev_if &&
1935             nla_put_s32(skb, MPTCP_ATTR_IF_IDX, ssk->sk_bound_dev_if))
1936                 return -EMSGSIZE;
1937
1938         sk_err = ssk->sk_err;
1939         if (sk_err && sk->sk_state == TCP_ESTABLISHED &&
1940             nla_put_u8(skb, MPTCP_ATTR_ERROR, sk_err))
1941                 return -EMSGSIZE;
1942
1943         return 0;
1944 }
1945
1946 static int mptcp_event_sub_established(struct sk_buff *skb,
1947                                        const struct mptcp_sock *msk,
1948                                        const struct sock *ssk)
1949 {
1950         return mptcp_event_put_token_and_ssk(skb, msk, ssk);
1951 }
1952
1953 static int mptcp_event_sub_closed(struct sk_buff *skb,
1954                                   const struct mptcp_sock *msk,
1955                                   const struct sock *ssk)
1956 {
1957         const struct mptcp_subflow_context *sf;
1958
1959         if (mptcp_event_put_token_and_ssk(skb, msk, ssk))
1960                 return -EMSGSIZE;
1961
1962         sf = mptcp_subflow_ctx(ssk);
1963         if (!sf->reset_seen)
1964                 return 0;
1965
1966         if (nla_put_u32(skb, MPTCP_ATTR_RESET_REASON, sf->reset_reason))
1967                 return -EMSGSIZE;
1968
1969         if (nla_put_u32(skb, MPTCP_ATTR_RESET_FLAGS, sf->reset_transient))
1970                 return -EMSGSIZE;
1971
1972         return 0;
1973 }
1974
1975 static int mptcp_event_created(struct sk_buff *skb,
1976                                const struct mptcp_sock *msk,
1977                                const struct sock *ssk)
1978 {
1979         int err = nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token);
1980
1981         if (err)
1982                 return err;
1983
1984         return mptcp_event_add_subflow(skb, ssk);
1985 }
1986
1987 void mptcp_event_addr_removed(const struct mptcp_sock *msk, uint8_t id)
1988 {
1989         struct net *net = sock_net((const struct sock *)msk);
1990         struct nlmsghdr *nlh;
1991         struct sk_buff *skb;
1992
1993         if (!genl_has_listeners(&mptcp_genl_family, net, MPTCP_PM_EV_GRP_OFFSET))
1994                 return;
1995
1996         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1997         if (!skb)
1998                 return;
1999
2000         nlh = genlmsg_put(skb, 0, 0, &mptcp_genl_family, 0, MPTCP_EVENT_REMOVED);
2001         if (!nlh)
2002                 goto nla_put_failure;
2003
2004         if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token))
2005                 goto nla_put_failure;
2006
2007         if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, id))
2008                 goto nla_put_failure;
2009
2010         genlmsg_end(skb, nlh);
2011         mptcp_nl_mcast_send(net, skb, GFP_ATOMIC);
2012         return;
2013
2014 nla_put_failure:
2015         kfree_skb(skb);
2016 }
2017
2018 void mptcp_event_addr_announced(const struct mptcp_sock *msk,
2019                                 const struct mptcp_addr_info *info)
2020 {
2021         struct net *net = sock_net((const struct sock *)msk);
2022         struct nlmsghdr *nlh;
2023         struct sk_buff *skb;
2024
2025         if (!genl_has_listeners(&mptcp_genl_family, net, MPTCP_PM_EV_GRP_OFFSET))
2026                 return;
2027
2028         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
2029         if (!skb)
2030                 return;
2031
2032         nlh = genlmsg_put(skb, 0, 0, &mptcp_genl_family, 0,
2033                           MPTCP_EVENT_ANNOUNCED);
2034         if (!nlh)
2035                 goto nla_put_failure;
2036
2037         if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token))
2038                 goto nla_put_failure;
2039
2040         if (nla_put_u8(skb, MPTCP_ATTR_REM_ID, info->id))
2041                 goto nla_put_failure;
2042
2043         if (nla_put_be16(skb, MPTCP_ATTR_DPORT, info->port))
2044                 goto nla_put_failure;
2045
2046         switch (info->family) {
2047         case AF_INET:
2048                 if (nla_put_in_addr(skb, MPTCP_ATTR_DADDR4, info->addr.s_addr))
2049                         goto nla_put_failure;
2050                 break;
2051 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
2052         case AF_INET6:
2053                 if (nla_put_in6_addr(skb, MPTCP_ATTR_DADDR6, &info->addr6))
2054                         goto nla_put_failure;
2055                 break;
2056 #endif
2057         default:
2058                 WARN_ON_ONCE(1);
2059                 goto nla_put_failure;
2060         }
2061
2062         genlmsg_end(skb, nlh);
2063         mptcp_nl_mcast_send(net, skb, GFP_ATOMIC);
2064         return;
2065
2066 nla_put_failure:
2067         kfree_skb(skb);
2068 }
2069
2070 void mptcp_event(enum mptcp_event_type type, const struct mptcp_sock *msk,
2071                  const struct sock *ssk, gfp_t gfp)
2072 {
2073         struct net *net = sock_net((const struct sock *)msk);
2074         struct nlmsghdr *nlh;
2075         struct sk_buff *skb;
2076
2077         if (!genl_has_listeners(&mptcp_genl_family, net, MPTCP_PM_EV_GRP_OFFSET))
2078                 return;
2079
2080         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
2081         if (!skb)
2082                 return;
2083
2084         nlh = genlmsg_put(skb, 0, 0, &mptcp_genl_family, 0, type);
2085         if (!nlh)
2086                 goto nla_put_failure;
2087
2088         switch (type) {
2089         case MPTCP_EVENT_UNSPEC:
2090                 WARN_ON_ONCE(1);
2091                 break;
2092         case MPTCP_EVENT_CREATED:
2093         case MPTCP_EVENT_ESTABLISHED:
2094                 if (mptcp_event_created(skb, msk, ssk) < 0)
2095                         goto nla_put_failure;
2096                 break;
2097         case MPTCP_EVENT_CLOSED:
2098                 if (nla_put_u32(skb, MPTCP_ATTR_TOKEN, msk->token) < 0)
2099                         goto nla_put_failure;
2100                 break;
2101         case MPTCP_EVENT_ANNOUNCED:
2102         case MPTCP_EVENT_REMOVED:
2103                 /* call mptcp_event_addr_announced()/removed instead */
2104                 WARN_ON_ONCE(1);
2105                 break;
2106         case MPTCP_EVENT_SUB_ESTABLISHED:
2107         case MPTCP_EVENT_SUB_PRIORITY:
2108                 if (mptcp_event_sub_established(skb, msk, ssk) < 0)
2109                         goto nla_put_failure;
2110                 break;
2111         case MPTCP_EVENT_SUB_CLOSED:
2112                 if (mptcp_event_sub_closed(skb, msk, ssk) < 0)
2113                         goto nla_put_failure;
2114                 break;
2115         }
2116
2117         genlmsg_end(skb, nlh);
2118         mptcp_nl_mcast_send(net, skb, gfp);
2119         return;
2120
2121 nla_put_failure:
2122         kfree_skb(skb);
2123 }
2124
2125 static const struct genl_small_ops mptcp_pm_ops[] = {
2126         {
2127                 .cmd    = MPTCP_PM_CMD_ADD_ADDR,
2128                 .doit   = mptcp_nl_cmd_add_addr,
2129                 .flags  = GENL_ADMIN_PERM,
2130         },
2131         {
2132                 .cmd    = MPTCP_PM_CMD_DEL_ADDR,
2133                 .doit   = mptcp_nl_cmd_del_addr,
2134                 .flags  = GENL_ADMIN_PERM,
2135         },
2136         {
2137                 .cmd    = MPTCP_PM_CMD_FLUSH_ADDRS,
2138                 .doit   = mptcp_nl_cmd_flush_addrs,
2139                 .flags  = GENL_ADMIN_PERM,
2140         },
2141         {
2142                 .cmd    = MPTCP_PM_CMD_GET_ADDR,
2143                 .doit   = mptcp_nl_cmd_get_addr,
2144                 .dumpit   = mptcp_nl_cmd_dump_addrs,
2145         },
2146         {
2147                 .cmd    = MPTCP_PM_CMD_SET_LIMITS,
2148                 .doit   = mptcp_nl_cmd_set_limits,
2149                 .flags  = GENL_ADMIN_PERM,
2150         },
2151         {
2152                 .cmd    = MPTCP_PM_CMD_GET_LIMITS,
2153                 .doit   = mptcp_nl_cmd_get_limits,
2154         },
2155         {
2156                 .cmd    = MPTCP_PM_CMD_SET_FLAGS,
2157                 .doit   = mptcp_nl_cmd_set_flags,
2158                 .flags  = GENL_ADMIN_PERM,
2159         },
2160 };
2161
2162 static struct genl_family mptcp_genl_family __ro_after_init = {
2163         .name           = MPTCP_PM_NAME,
2164         .version        = MPTCP_PM_VER,
2165         .maxattr        = MPTCP_PM_ATTR_MAX,
2166         .policy         = mptcp_pm_policy,
2167         .netnsok        = true,
2168         .module         = THIS_MODULE,
2169         .small_ops      = mptcp_pm_ops,
2170         .n_small_ops    = ARRAY_SIZE(mptcp_pm_ops),
2171         .mcgrps         = mptcp_pm_mcgrps,
2172         .n_mcgrps       = ARRAY_SIZE(mptcp_pm_mcgrps),
2173 };
2174
2175 static int __net_init pm_nl_init_net(struct net *net)
2176 {
2177         struct pm_nl_pernet *pernet = pm_nl_get_pernet(net);
2178
2179         INIT_LIST_HEAD_RCU(&pernet->local_addr_list);
2180
2181         /* Cit. 2 subflows ought to be enough for anybody. */
2182         pernet->subflows_max = 2;
2183         pernet->next_id = 1;
2184         pernet->stale_loss_cnt = 4;
2185         spin_lock_init(&pernet->lock);
2186
2187         /* No need to initialize other pernet fields, the struct is zeroed at
2188          * allocation time.
2189          */
2190
2191         return 0;
2192 }
2193
2194 static void __net_exit pm_nl_exit_net(struct list_head *net_list)
2195 {
2196         struct net *net;
2197
2198         list_for_each_entry(net, net_list, exit_list) {
2199                 struct pm_nl_pernet *pernet = pm_nl_get_pernet(net);
2200
2201                 /* net is removed from namespace list, can't race with
2202                  * other modifiers, also netns core already waited for a
2203                  * RCU grace period.
2204                  */
2205                 __flush_addrs(&pernet->local_addr_list);
2206         }
2207 }
2208
2209 static struct pernet_operations mptcp_pm_pernet_ops = {
2210         .init = pm_nl_init_net,
2211         .exit_batch = pm_nl_exit_net,
2212         .id = &pm_nl_pernet_id,
2213         .size = sizeof(struct pm_nl_pernet),
2214 };
2215
2216 void __init mptcp_pm_nl_init(void)
2217 {
2218         if (register_pernet_subsys(&mptcp_pm_pernet_ops) < 0)
2219                 panic("Failed to register MPTCP PM pernet subsystem.\n");
2220
2221         if (genl_register_family(&mptcp_genl_family))
2222                 panic("Failed to register MPTCP PM netlink family\n");
2223 }