1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * ip6_flowlabel.c IPv6 flowlabel manager.
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
8 #include <linux/capability.h>
9 #include <linux/errno.h>
10 #include <linux/types.h>
11 #include <linux/socket.h>
12 #include <linux/net.h>
13 #include <linux/netdevice.h>
14 #include <linux/in6.h>
15 #include <linux/proc_fs.h>
16 #include <linux/seq_file.h>
17 #include <linux/slab.h>
18 #include <linux/export.h>
19 #include <linux/pid_namespace.h>
21 #include <net/net_namespace.h>
25 #include <net/rawv6.h>
26 #include <net/transp_v6.h>
28 #include <linux/uaccess.h>
30 #define FL_MIN_LINGER 6 /* Minimal linger. It is set to 6sec specified
31 in old IPv6 RFC. Well, it was reasonable value.
33 #define FL_MAX_LINGER 150 /* Maximal linger timeout */
37 #define FL_MAX_PER_SOCK 32
38 #define FL_MAX_SIZE 4096
39 #define FL_HASH_MASK 255
40 #define FL_HASH(l) (ntohl(l)&FL_HASH_MASK)
42 static atomic_t fl_size = ATOMIC_INIT(0);
43 static struct ip6_flowlabel __rcu *fl_ht[FL_HASH_MASK+1];
45 static void ip6_fl_gc(struct timer_list *unused);
46 static DEFINE_TIMER(ip6_fl_gc_timer, ip6_fl_gc);
48 /* FL hash table lock: it protects only of GC */
50 static DEFINE_SPINLOCK(ip6_fl_lock);
54 static DEFINE_SPINLOCK(ip6_sk_fl_lock);
56 #define for_each_fl_rcu(hash, fl) \
57 for (fl = rcu_dereference_bh(fl_ht[(hash)]); \
59 fl = rcu_dereference_bh(fl->next))
60 #define for_each_fl_continue_rcu(fl) \
61 for (fl = rcu_dereference_bh(fl->next); \
63 fl = rcu_dereference_bh(fl->next))
65 #define for_each_sk_fl_rcu(np, sfl) \
66 for (sfl = rcu_dereference_bh(np->ipv6_fl_list); \
68 sfl = rcu_dereference_bh(sfl->next))
70 static inline struct ip6_flowlabel *__fl_lookup(struct net *net, __be32 label)
72 struct ip6_flowlabel *fl;
74 for_each_fl_rcu(FL_HASH(label), fl) {
75 if (fl->label == label && net_eq(fl->fl_net, net))
81 static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
83 struct ip6_flowlabel *fl;
86 fl = __fl_lookup(net, label);
87 if (fl && !atomic_inc_not_zero(&fl->users))
93 static void fl_free_rcu(struct rcu_head *head)
95 struct ip6_flowlabel *fl = container_of(head, struct ip6_flowlabel, rcu);
97 if (fl->share == IPV6_FL_S_PROCESS)
98 put_pid(fl->owner.pid);
104 static void fl_free(struct ip6_flowlabel *fl)
107 call_rcu(&fl->rcu, fl_free_rcu);
110 static void fl_release(struct ip6_flowlabel *fl)
112 spin_lock_bh(&ip6_fl_lock);
114 fl->lastuse = jiffies;
115 if (atomic_dec_and_test(&fl->users)) {
116 unsigned long ttd = fl->lastuse + fl->linger;
117 if (time_after(ttd, fl->expires))
120 if (fl->opt && fl->share == IPV6_FL_S_EXCL) {
121 struct ipv6_txoptions *opt = fl->opt;
125 if (!timer_pending(&ip6_fl_gc_timer) ||
126 time_after(ip6_fl_gc_timer.expires, ttd))
127 mod_timer(&ip6_fl_gc_timer, ttd);
129 spin_unlock_bh(&ip6_fl_lock);
132 static void ip6_fl_gc(struct timer_list *unused)
135 unsigned long now = jiffies;
136 unsigned long sched = 0;
138 spin_lock(&ip6_fl_lock);
140 for (i = 0; i <= FL_HASH_MASK; i++) {
141 struct ip6_flowlabel *fl;
142 struct ip6_flowlabel __rcu **flp;
145 while ((fl = rcu_dereference_protected(*flp,
146 lockdep_is_held(&ip6_fl_lock))) != NULL) {
147 if (atomic_read(&fl->users) == 0) {
148 unsigned long ttd = fl->lastuse + fl->linger;
149 if (time_after(ttd, fl->expires))
152 if (time_after_eq(now, ttd)) {
155 atomic_dec(&fl_size);
158 if (!sched || time_before(ttd, sched))
164 if (!sched && atomic_read(&fl_size))
165 sched = now + FL_MAX_LINGER;
167 mod_timer(&ip6_fl_gc_timer, sched);
169 spin_unlock(&ip6_fl_lock);
172 static void __net_exit ip6_fl_purge(struct net *net)
176 spin_lock_bh(&ip6_fl_lock);
177 for (i = 0; i <= FL_HASH_MASK; i++) {
178 struct ip6_flowlabel *fl;
179 struct ip6_flowlabel __rcu **flp;
182 while ((fl = rcu_dereference_protected(*flp,
183 lockdep_is_held(&ip6_fl_lock))) != NULL) {
184 if (net_eq(fl->fl_net, net) &&
185 atomic_read(&fl->users) == 0) {
188 atomic_dec(&fl_size);
194 spin_unlock_bh(&ip6_fl_lock);
197 static struct ip6_flowlabel *fl_intern(struct net *net,
198 struct ip6_flowlabel *fl, __be32 label)
200 struct ip6_flowlabel *lfl;
202 fl->label = label & IPV6_FLOWLABEL_MASK;
204 spin_lock_bh(&ip6_fl_lock);
207 fl->label = htonl(prandom_u32())&IPV6_FLOWLABEL_MASK;
209 lfl = __fl_lookup(net, fl->label);
216 * we dropper the ip6_fl_lock, so this entry could reappear
217 * and we need to recheck with it.
219 * OTOH no need to search the active socket first, like it is
220 * done in ipv6_flowlabel_opt - sock is locked, so new entry
221 * with the same label can only appear on another sock
223 lfl = __fl_lookup(net, fl->label);
225 atomic_inc(&lfl->users);
226 spin_unlock_bh(&ip6_fl_lock);
231 fl->lastuse = jiffies;
232 fl->next = fl_ht[FL_HASH(fl->label)];
233 rcu_assign_pointer(fl_ht[FL_HASH(fl->label)], fl);
234 atomic_inc(&fl_size);
235 spin_unlock_bh(&ip6_fl_lock);
241 /* Socket flowlabel lists */
243 struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label)
245 struct ipv6_fl_socklist *sfl;
246 struct ipv6_pinfo *np = inet6_sk(sk);
248 label &= IPV6_FLOWLABEL_MASK;
251 for_each_sk_fl_rcu(np, sfl) {
252 struct ip6_flowlabel *fl = sfl->fl;
254 if (fl->label == label && atomic_inc_not_zero(&fl->users)) {
255 fl->lastuse = jiffies;
256 rcu_read_unlock_bh();
260 rcu_read_unlock_bh();
263 EXPORT_SYMBOL_GPL(fl6_sock_lookup);
265 void fl6_free_socklist(struct sock *sk)
267 struct ipv6_pinfo *np = inet6_sk(sk);
268 struct ipv6_fl_socklist *sfl;
270 if (!rcu_access_pointer(np->ipv6_fl_list))
273 spin_lock_bh(&ip6_sk_fl_lock);
274 while ((sfl = rcu_dereference_protected(np->ipv6_fl_list,
275 lockdep_is_held(&ip6_sk_fl_lock))) != NULL) {
276 np->ipv6_fl_list = sfl->next;
277 spin_unlock_bh(&ip6_sk_fl_lock);
282 spin_lock_bh(&ip6_sk_fl_lock);
284 spin_unlock_bh(&ip6_sk_fl_lock);
287 /* Service routines */
291 It is the only difficult place. flowlabel enforces equal headers
292 before and including routing header, however user may supply options
296 struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
297 struct ip6_flowlabel *fl,
298 struct ipv6_txoptions *fopt)
300 struct ipv6_txoptions *fl_opt = fl->opt;
302 if (!fopt || fopt->opt_flen == 0)
306 opt_space->hopopt = fl_opt->hopopt;
307 opt_space->dst0opt = fl_opt->dst0opt;
308 opt_space->srcrt = fl_opt->srcrt;
309 opt_space->opt_nflen = fl_opt->opt_nflen;
311 if (fopt->opt_nflen == 0)
313 opt_space->hopopt = NULL;
314 opt_space->dst0opt = NULL;
315 opt_space->srcrt = NULL;
316 opt_space->opt_nflen = 0;
318 opt_space->dst1opt = fopt->dst1opt;
319 opt_space->opt_flen = fopt->opt_flen;
320 opt_space->tot_len = fopt->tot_len;
323 EXPORT_SYMBOL_GPL(fl6_merge_options);
325 static unsigned long check_linger(unsigned long ttl)
327 if (ttl < FL_MIN_LINGER)
328 return FL_MIN_LINGER*HZ;
329 if (ttl > FL_MAX_LINGER && !capable(CAP_NET_ADMIN))
334 static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned long expires)
336 linger = check_linger(linger);
339 expires = check_linger(expires);
343 spin_lock_bh(&ip6_fl_lock);
344 fl->lastuse = jiffies;
345 if (time_before(fl->linger, linger))
347 if (time_before(expires, fl->linger))
348 expires = fl->linger;
349 if (time_before(fl->expires, fl->lastuse + expires))
350 fl->expires = fl->lastuse + expires;
351 spin_unlock_bh(&ip6_fl_lock);
356 static struct ip6_flowlabel *
357 fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
358 char __user *optval, int optlen, int *err_p)
360 struct ip6_flowlabel *fl = NULL;
365 olen = optlen - CMSG_ALIGN(sizeof(*freq));
367 if (olen > 64 * 1024)
371 fl = kzalloc(sizeof(*fl), GFP_KERNEL);
377 struct flowi6 flowi6;
378 struct ipcm6_cookie ipc6;
381 fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL);
385 memset(fl->opt, 0, sizeof(*fl->opt));
386 fl->opt->tot_len = sizeof(*fl->opt) + olen;
388 if (copy_from_user(fl->opt+1, optval+CMSG_ALIGN(sizeof(*freq)), olen))
391 msg.msg_controllen = olen;
392 msg.msg_control = (void *)(fl->opt+1);
393 memset(&flowi6, 0, sizeof(flowi6));
396 err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, &ipc6);
400 if (fl->opt->opt_flen)
402 if (fl->opt->opt_nflen == 0) {
409 fl->expires = jiffies;
410 err = fl6_renew(fl, freq->flr_linger, freq->flr_expires);
413 fl->share = freq->flr_share;
414 addr_type = ipv6_addr_type(&freq->flr_dst);
415 if ((addr_type & IPV6_ADDR_MAPPED) ||
416 addr_type == IPV6_ADDR_ANY) {
420 fl->dst = freq->flr_dst;
421 atomic_set(&fl->users, 1);
426 case IPV6_FL_S_PROCESS:
427 fl->owner.pid = get_task_pid(current, PIDTYPE_PID);
430 fl->owner.uid = current_euid();
444 static int mem_check(struct sock *sk)
446 struct ipv6_pinfo *np = inet6_sk(sk);
447 struct ipv6_fl_socklist *sfl;
448 int room = FL_MAX_SIZE - atomic_read(&fl_size);
451 if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK)
455 for_each_sk_fl_rcu(np, sfl)
457 rcu_read_unlock_bh();
460 ((count >= FL_MAX_PER_SOCK ||
461 (count > 0 && room < FL_MAX_SIZE/2) || room < FL_MAX_SIZE/4) &&
462 !capable(CAP_NET_ADMIN)))
468 static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl,
469 struct ip6_flowlabel *fl)
471 spin_lock_bh(&ip6_sk_fl_lock);
473 sfl->next = np->ipv6_fl_list;
474 rcu_assign_pointer(np->ipv6_fl_list, sfl);
475 spin_unlock_bh(&ip6_sk_fl_lock);
478 int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
481 struct ipv6_pinfo *np = inet6_sk(sk);
482 struct ipv6_fl_socklist *sfl;
484 if (flags & IPV6_FL_F_REMOTE) {
485 freq->flr_label = np->rcv_flowinfo & IPV6_FLOWLABEL_MASK;
490 freq->flr_label = np->flow_label;
496 for_each_sk_fl_rcu(np, sfl) {
497 if (sfl->fl->label == (np->flow_label & IPV6_FLOWLABEL_MASK)) {
498 spin_lock_bh(&ip6_fl_lock);
499 freq->flr_label = sfl->fl->label;
500 freq->flr_dst = sfl->fl->dst;
501 freq->flr_share = sfl->fl->share;
502 freq->flr_expires = (sfl->fl->expires - jiffies) / HZ;
503 freq->flr_linger = sfl->fl->linger / HZ;
505 spin_unlock_bh(&ip6_fl_lock);
506 rcu_read_unlock_bh();
510 rcu_read_unlock_bh();
515 int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
517 int uninitialized_var(err);
518 struct net *net = sock_net(sk);
519 struct ipv6_pinfo *np = inet6_sk(sk);
520 struct in6_flowlabel_req freq;
521 struct ipv6_fl_socklist *sfl1 = NULL;
522 struct ipv6_fl_socklist *sfl;
523 struct ipv6_fl_socklist __rcu **sflp;
524 struct ip6_flowlabel *fl, *fl1 = NULL;
527 if (optlen < sizeof(freq))
530 if (copy_from_user(&freq, optval, sizeof(freq)))
533 switch (freq.flr_action) {
535 if (freq.flr_flags & IPV6_FL_F_REFLECT) {
536 if (sk->sk_protocol != IPPROTO_TCP)
544 spin_lock_bh(&ip6_sk_fl_lock);
545 for (sflp = &np->ipv6_fl_list;
546 (sfl = rcu_dereference_protected(*sflp,
547 lockdep_is_held(&ip6_sk_fl_lock))) != NULL;
549 if (sfl->fl->label == freq.flr_label) {
550 if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
551 np->flow_label &= ~IPV6_FLOWLABEL_MASK;
553 spin_unlock_bh(&ip6_sk_fl_lock);
559 spin_unlock_bh(&ip6_sk_fl_lock);
562 case IPV6_FL_A_RENEW:
564 for_each_sk_fl_rcu(np, sfl) {
565 if (sfl->fl->label == freq.flr_label) {
566 err = fl6_renew(sfl->fl, freq.flr_linger, freq.flr_expires);
567 rcu_read_unlock_bh();
571 rcu_read_unlock_bh();
573 if (freq.flr_share == IPV6_FL_S_NONE &&
574 ns_capable(net->user_ns, CAP_NET_ADMIN)) {
575 fl = fl_lookup(net, freq.flr_label);
577 err = fl6_renew(fl, freq.flr_linger, freq.flr_expires);
585 if (freq.flr_flags & IPV6_FL_F_REFLECT) {
586 struct net *net = sock_net(sk);
587 if (net->ipv6.sysctl.flowlabel_consistency) {
588 net_info_ratelimited("Can not set IPV6_FL_F_REFLECT if flowlabel_consistency sysctl is enable\n");
592 if (sk->sk_protocol != IPPROTO_TCP)
599 if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
602 if (net->ipv6.sysctl.flowlabel_state_ranges &&
603 (freq.flr_label & IPV6_FLOWLABEL_STATELESS_FLAG))
606 fl = fl_create(net, sk, &freq, optval, optlen, &err);
609 sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
611 if (freq.flr_label) {
614 for_each_sk_fl_rcu(np, sfl) {
615 if (sfl->fl->label == freq.flr_label) {
616 if (freq.flr_flags&IPV6_FL_F_EXCL) {
617 rcu_read_unlock_bh();
621 if (!atomic_inc_not_zero(&fl1->users))
626 rcu_read_unlock_bh();
629 fl1 = fl_lookup(net, freq.flr_label);
633 if (freq.flr_flags&IPV6_FL_F_EXCL)
636 if (fl1->share == IPV6_FL_S_EXCL ||
637 fl1->share != fl->share ||
638 ((fl1->share == IPV6_FL_S_PROCESS) &&
639 (fl1->owner.pid != fl->owner.pid)) ||
640 ((fl1->share == IPV6_FL_S_USER) &&
641 !uid_eq(fl1->owner.uid, fl->owner.uid)))
647 if (fl->linger > fl1->linger)
648 fl1->linger = fl->linger;
649 if ((long)(fl->expires - fl1->expires) > 0)
650 fl1->expires = fl->expires;
651 fl_link(np, sfl1, fl1);
661 if (!(freq.flr_flags&IPV6_FL_F_CREATE))
672 fl1 = fl_intern(net, fl, freq.flr_label);
676 if (!freq.flr_label) {
677 if (copy_to_user(&((struct in6_flowlabel_req __user *) optval)->flr_label,
678 &fl->label, sizeof(fl->label))) {
679 /* Intentionally ignore fault. */
683 fl_link(np, sfl1, fl);
696 #ifdef CONFIG_PROC_FS
698 struct ip6fl_iter_state {
699 struct seq_net_private p;
700 struct pid_namespace *pid_ns;
704 #define ip6fl_seq_private(seq) ((struct ip6fl_iter_state *)(seq)->private)
706 static struct ip6_flowlabel *ip6fl_get_first(struct seq_file *seq)
708 struct ip6_flowlabel *fl = NULL;
709 struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
710 struct net *net = seq_file_net(seq);
712 for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) {
713 for_each_fl_rcu(state->bucket, fl) {
714 if (net_eq(fl->fl_net, net))
723 static struct ip6_flowlabel *ip6fl_get_next(struct seq_file *seq, struct ip6_flowlabel *fl)
725 struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
726 struct net *net = seq_file_net(seq);
728 for_each_fl_continue_rcu(fl) {
729 if (net_eq(fl->fl_net, net))
734 if (++state->bucket <= FL_HASH_MASK) {
735 for_each_fl_rcu(state->bucket, fl) {
736 if (net_eq(fl->fl_net, net))
747 static struct ip6_flowlabel *ip6fl_get_idx(struct seq_file *seq, loff_t pos)
749 struct ip6_flowlabel *fl = ip6fl_get_first(seq);
751 while (pos && (fl = ip6fl_get_next(seq, fl)) != NULL)
753 return pos ? NULL : fl;
756 static void *ip6fl_seq_start(struct seq_file *seq, loff_t *pos)
759 struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
761 state->pid_ns = proc_pid_ns(file_inode(seq->file));
764 return *pos ? ip6fl_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
767 static void *ip6fl_seq_next(struct seq_file *seq, void *v, loff_t *pos)
769 struct ip6_flowlabel *fl;
771 if (v == SEQ_START_TOKEN)
772 fl = ip6fl_get_first(seq);
774 fl = ip6fl_get_next(seq, v);
779 static void ip6fl_seq_stop(struct seq_file *seq, void *v)
782 rcu_read_unlock_bh();
785 static int ip6fl_seq_show(struct seq_file *seq, void *v)
787 struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
788 if (v == SEQ_START_TOKEN) {
789 seq_puts(seq, "Label S Owner Users Linger Expires Dst Opt\n");
791 struct ip6_flowlabel *fl = v;
793 "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n",
794 (unsigned int)ntohl(fl->label),
796 ((fl->share == IPV6_FL_S_PROCESS) ?
797 pid_nr_ns(fl->owner.pid, state->pid_ns) :
798 ((fl->share == IPV6_FL_S_USER) ?
799 from_kuid_munged(seq_user_ns(seq), fl->owner.uid) :
801 atomic_read(&fl->users),
803 (long)(fl->expires - jiffies)/HZ,
805 fl->opt ? fl->opt->opt_nflen : 0);
810 static const struct seq_operations ip6fl_seq_ops = {
811 .start = ip6fl_seq_start,
812 .next = ip6fl_seq_next,
813 .stop = ip6fl_seq_stop,
814 .show = ip6fl_seq_show,
817 static int __net_init ip6_flowlabel_proc_init(struct net *net)
819 if (!proc_create_net("ip6_flowlabel", 0444, net->proc_net,
820 &ip6fl_seq_ops, sizeof(struct ip6fl_iter_state)))
825 static void __net_exit ip6_flowlabel_proc_fini(struct net *net)
827 remove_proc_entry("ip6_flowlabel", net->proc_net);
830 static inline int ip6_flowlabel_proc_init(struct net *net)
834 static inline void ip6_flowlabel_proc_fini(struct net *net)
839 static void __net_exit ip6_flowlabel_net_exit(struct net *net)
842 ip6_flowlabel_proc_fini(net);
845 static struct pernet_operations ip6_flowlabel_net_ops = {
846 .init = ip6_flowlabel_proc_init,
847 .exit = ip6_flowlabel_net_exit,
850 int ip6_flowlabel_init(void)
852 return register_pernet_subsys(&ip6_flowlabel_net_ops);
855 void ip6_flowlabel_cleanup(void)
857 del_timer(&ip6_fl_gc_timer);
858 unregister_pernet_subsys(&ip6_flowlabel_net_ops);