1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 #include <linux/workqueue.h>
4 #include <linux/rtnetlink.h>
5 #include <linux/cache.h>
6 #include <linux/slab.h>
7 #include <linux/list.h>
8 #include <linux/delay.h>
9 #include <linux/sched.h>
10 #include <linux/idr.h>
11 #include <linux/rculist.h>
12 #include <linux/nsproxy.h>
14 #include <linux/proc_ns.h>
15 #include <linux/file.h>
16 #include <linux/export.h>
17 #include <linux/user_namespace.h>
18 #include <linux/net_namespace.h>
20 #include <net/netlink.h>
21 #include <net/net_namespace.h>
22 #include <net/netns/generic.h>
25 * Our network namespace constructor/destructor lists
28 static LIST_HEAD(pernet_list);
29 static struct list_head *first_device = &pernet_list;
30 DEFINE_MUTEX(net_mutex);
32 LIST_HEAD(net_namespace_list);
33 EXPORT_SYMBOL_GPL(net_namespace_list);
35 struct net init_net = {
36 .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head),
38 EXPORT_SYMBOL(init_net);
40 #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
42 static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
44 static struct net_generic *net_alloc_generic(void)
46 struct net_generic *ng;
47 size_t generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]);
49 ng = kzalloc(generic_size, GFP_KERNEL);
51 ng->len = max_gen_ptrs;
56 static int net_assign_generic(struct net *net, int id, void *data)
58 struct net_generic *ng, *old_ng;
60 BUG_ON(!mutex_is_locked(&net_mutex));
63 old_ng = rcu_dereference_protected(net->gen,
64 lockdep_is_held(&net_mutex));
66 if (old_ng->len >= id)
69 ng = net_alloc_generic();
74 * Some synchronisation notes:
76 * The net_generic explores the net->gen array inside rcu
77 * read section. Besides once set the net->gen->ptr[x]
78 * pointer never changes (see rules in netns/generic.h).
80 * That said, we simply duplicate this array and schedule
81 * the old copy for kfree after a grace period.
84 memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));
86 rcu_assign_pointer(net->gen, ng);
87 kfree_rcu(old_ng, rcu);
89 ng->ptr[id - 1] = data;
93 static int ops_init(const struct pernet_operations *ops, struct net *net)
98 if (ops->id && ops->size) {
99 data = kzalloc(ops->size, GFP_KERNEL);
103 err = net_assign_generic(net, *ops->id, data);
109 err = ops->init(net);
120 static void ops_free(const struct pernet_operations *ops, struct net *net)
122 if (ops->id && ops->size) {
124 kfree(net_generic(net, id));
128 static void ops_exit_list(const struct pernet_operations *ops,
129 struct list_head *net_exit_list)
133 list_for_each_entry(net, net_exit_list, exit_list)
137 ops->exit_batch(net_exit_list);
140 static void ops_free_list(const struct pernet_operations *ops,
141 struct list_head *net_exit_list)
144 if (ops->size && ops->id) {
145 list_for_each_entry(net, net_exit_list, exit_list)
150 /* should be called with nsid_lock held */
151 static int alloc_netid(struct net *net, struct net *peer, int reqid)
153 int min = 0, max = 0;
160 return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC);
163 /* This function is used by idr_for_each(). If net is equal to peer, the
164 * function returns the id so that idr_for_each() stops. Because we cannot
165 * returns the id 0 (idr_for_each() will not stop), we return the magic value
166 * NET_ID_ZERO (-1) for it.
168 #define NET_ID_ZERO -1
169 static int net_eq_idr(int id, void *net, void *peer)
171 if (net_eq(net, peer))
172 return id ? : NET_ID_ZERO;
176 /* Should be called with nsid_lock held. If a new id is assigned, the bool alloc
177 * is set to true, thus the caller knows that the new id must be notified via
180 static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc)
182 int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
183 bool alloc_it = *alloc;
187 /* Magic value for id 0. */
188 if (id == NET_ID_ZERO)
194 id = alloc_netid(net, peer, -1);
196 return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED;
199 return NETNSA_NSID_NOT_ASSIGNED;
202 /* should be called with nsid_lock held */
203 static int __peernet2id(struct net *net, struct net *peer)
207 return __peernet2id_alloc(net, peer, &no);
210 static void rtnl_net_notifyid(struct net *net, int cmd, int id);
211 /* This function returns the id of a peer netns. If no id is assigned, one will
212 * be allocated and returned.
214 int peernet2id_alloc(struct net *net, struct net *peer)
219 spin_lock_bh(&net->nsid_lock);
220 alloc = atomic_read(&peer->count) == 0 ? false : true;
221 id = __peernet2id_alloc(net, peer, &alloc);
222 spin_unlock_bh(&net->nsid_lock);
223 if (alloc && id >= 0)
224 rtnl_net_notifyid(net, RTM_NEWNSID, id);
227 EXPORT_SYMBOL(peernet2id_alloc);
229 /* This function returns, if assigned, the id of a peer netns. */
230 int peernet2id(struct net *net, struct net *peer)
234 spin_lock_bh(&net->nsid_lock);
235 id = __peernet2id(net, peer);
236 spin_unlock_bh(&net->nsid_lock);
240 /* This function returns true is the peer netns has an id assigned into the
243 bool peernet_has_id(struct net *net, struct net *peer)
245 return peernet2id(net, peer) >= 0;
248 struct net *get_net_ns_by_id(struct net *net, int id)
256 spin_lock_bh(&net->nsid_lock);
257 peer = idr_find(&net->netns_ids, id);
260 spin_unlock_bh(&net->nsid_lock);
267 * setup_net runs the initializers for the network namespace object.
269 static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
271 /* Must be called with net_mutex held */
272 const struct pernet_operations *ops, *saved_ops;
274 LIST_HEAD(net_exit_list);
276 atomic_set(&net->count, 1);
277 atomic_set(&net->passive, 1);
278 net->dev_base_seq = 1;
279 net->user_ns = user_ns;
280 idr_init(&net->netns_ids);
281 spin_lock_init(&net->nsid_lock);
283 list_for_each_entry(ops, &pernet_list, list) {
284 error = ops_init(ops, net);
292 /* Walk through the list backwards calling the exit functions
293 * for the pernet modules whose init functions did not fail.
295 list_add(&net->exit_list, &net_exit_list);
297 list_for_each_entry_continue_reverse(ops, &pernet_list, list)
298 ops_exit_list(ops, &net_exit_list);
301 list_for_each_entry_continue_reverse(ops, &pernet_list, list)
302 ops_free_list(ops, &net_exit_list);
310 static struct kmem_cache *net_cachep;
311 static struct workqueue_struct *netns_wq;
313 static struct net *net_alloc(void)
315 struct net *net = NULL;
316 struct net_generic *ng;
318 ng = net_alloc_generic();
322 net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
326 rcu_assign_pointer(net->gen, ng);
335 static void net_free(struct net *net)
337 kfree(rcu_access_pointer(net->gen));
338 kmem_cache_free(net_cachep, net);
341 void net_drop_ns(void *p)
344 if (ns && atomic_dec_and_test(&ns->passive))
348 struct net *copy_net_ns(unsigned long flags,
349 struct user_namespace *user_ns, struct net *old_net)
354 if (!(flags & CLONE_NEWNET))
355 return get_net(old_net);
359 return ERR_PTR(-ENOMEM);
361 get_user_ns(user_ns);
363 mutex_lock(&net_mutex);
364 rv = setup_net(net, user_ns);
367 list_add_tail_rcu(&net->list, &net_namespace_list);
370 mutex_unlock(&net_mutex);
372 put_user_ns(user_ns);
379 static DEFINE_SPINLOCK(cleanup_list_lock);
380 static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */
382 static void cleanup_net(struct work_struct *work)
384 const struct pernet_operations *ops;
385 struct net *net, *tmp;
386 struct list_head net_kill_list;
387 LIST_HEAD(net_exit_list);
389 /* Atomically snapshot the list of namespaces to cleanup */
390 spin_lock_irq(&cleanup_list_lock);
391 list_replace_init(&cleanup_list, &net_kill_list);
392 spin_unlock_irq(&cleanup_list_lock);
394 mutex_lock(&net_mutex);
396 /* Don't let anyone else find us. */
398 list_for_each_entry(net, &net_kill_list, cleanup_list) {
399 list_del_rcu(&net->list);
400 list_add_tail(&net->exit_list, &net_exit_list);
404 spin_lock_bh(&tmp->nsid_lock);
405 id = __peernet2id(tmp, net);
407 idr_remove(&tmp->netns_ids, id);
408 spin_unlock_bh(&tmp->nsid_lock);
410 rtnl_net_notifyid(tmp, RTM_DELNSID, id);
412 spin_lock_bh(&net->nsid_lock);
413 idr_destroy(&net->netns_ids);
414 spin_unlock_bh(&net->nsid_lock);
420 * Another CPU might be rcu-iterating the list, wait for it.
421 * This needs to be before calling the exit() notifiers, so
422 * the rcu_barrier() below isn't sufficient alone.
426 /* Run all of the network namespace exit methods */
427 list_for_each_entry_reverse(ops, &pernet_list, list)
428 ops_exit_list(ops, &net_exit_list);
430 /* Free the net generic variables */
431 list_for_each_entry_reverse(ops, &pernet_list, list)
432 ops_free_list(ops, &net_exit_list);
434 mutex_unlock(&net_mutex);
436 /* Ensure there are no outstanding rcu callbacks using this
441 /* Finally it is safe to free my network namespace structure */
442 list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
443 list_del_init(&net->exit_list);
444 put_user_ns(net->user_ns);
448 static DECLARE_WORK(net_cleanup_work, cleanup_net);
450 void __put_net(struct net *net)
452 /* Cleanup the network namespace in process context */
455 spin_lock_irqsave(&cleanup_list_lock, flags);
456 list_add(&net->cleanup_list, &cleanup_list);
457 spin_unlock_irqrestore(&cleanup_list_lock, flags);
459 queue_work(netns_wq, &net_cleanup_work);
461 EXPORT_SYMBOL_GPL(__put_net);
463 struct net *get_net_ns_by_fd(int fd)
466 struct ns_common *ns;
469 file = proc_ns_fget(fd);
471 return ERR_CAST(file);
473 ns = get_proc_ns(file_inode(file));
474 if (ns->ops == &netns_operations)
475 net = get_net(container_of(ns, struct net, ns));
477 net = ERR_PTR(-EINVAL);
484 struct net *get_net_ns_by_fd(int fd)
486 return ERR_PTR(-EINVAL);
489 EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
491 struct net *get_net_ns_by_pid(pid_t pid)
493 struct task_struct *tsk;
496 /* Lookup the network namespace */
497 net = ERR_PTR(-ESRCH);
499 tsk = find_task_by_vpid(pid);
501 struct nsproxy *nsproxy;
503 nsproxy = tsk->nsproxy;
505 net = get_net(nsproxy->net_ns);
511 EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
513 static __net_init int net_ns_net_init(struct net *net)
516 net->ns.ops = &netns_operations;
518 return ns_alloc_inum(&net->ns);
521 static __net_exit void net_ns_net_exit(struct net *net)
523 ns_free_inum(&net->ns);
526 static struct pernet_operations __net_initdata net_ns_ops = {
527 .init = net_ns_net_init,
528 .exit = net_ns_net_exit,
531 static struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
532 [NETNSA_NONE] = { .type = NLA_UNSPEC },
533 [NETNSA_NSID] = { .type = NLA_S32 },
534 [NETNSA_PID] = { .type = NLA_U32 },
535 [NETNSA_FD] = { .type = NLA_U32 },
538 static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh)
540 struct net *net = sock_net(skb->sk);
541 struct nlattr *tb[NETNSA_MAX + 1];
545 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
549 if (!tb[NETNSA_NSID])
551 nsid = nla_get_s32(tb[NETNSA_NSID]);
554 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
555 else if (tb[NETNSA_FD])
556 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
560 return PTR_ERR(peer);
562 spin_lock_bh(&net->nsid_lock);
563 if (__peernet2id(net, peer) >= 0) {
564 spin_unlock_bh(&net->nsid_lock);
569 err = alloc_netid(net, peer, nsid);
570 spin_unlock_bh(&net->nsid_lock);
572 rtnl_net_notifyid(net, RTM_NEWNSID, err);
580 static int rtnl_net_get_size(void)
582 return NLMSG_ALIGN(sizeof(struct rtgenmsg))
583 + nla_total_size(sizeof(s32)) /* NETNSA_NSID */
587 static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags,
588 int cmd, struct net *net, int nsid)
590 struct nlmsghdr *nlh;
591 struct rtgenmsg *rth;
593 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags);
597 rth = nlmsg_data(nlh);
598 rth->rtgen_family = AF_UNSPEC;
600 if (nla_put_s32(skb, NETNSA_NSID, nsid))
601 goto nla_put_failure;
607 nlmsg_cancel(skb, nlh);
611 static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh)
613 struct net *net = sock_net(skb->sk);
614 struct nlattr *tb[NETNSA_MAX + 1];
619 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
624 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
625 else if (tb[NETNSA_FD])
626 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
631 return PTR_ERR(peer);
633 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
639 id = peernet2id(net, peer);
640 err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
641 RTM_NEWNSID, net, id);
645 err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid);
655 struct rtnl_net_dump_cb {
658 struct netlink_callback *cb;
663 static int rtnl_net_dumpid_one(int id, void *peer, void *data)
665 struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
668 if (net_cb->idx < net_cb->s_idx)
671 ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid,
672 net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI,
673 RTM_NEWNSID, net_cb->net, id);
682 static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
684 struct net *net = sock_net(skb->sk);
685 struct rtnl_net_dump_cb net_cb = {
690 .s_idx = cb->args[0],
693 spin_lock_bh(&net->nsid_lock);
694 idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
695 spin_unlock_bh(&net->nsid_lock);
697 cb->args[0] = net_cb.idx;
701 static void rtnl_net_notifyid(struct net *net, int cmd, int id)
706 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
710 err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, id);
714 rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0);
720 rtnl_set_sk_err(net, RTNLGRP_NSID, err);
723 static int __init net_ns_init(void)
725 struct net_generic *ng;
728 net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
732 /* Create workqueue for cleanup */
733 netns_wq = create_singlethread_workqueue("netns");
735 panic("Could not create netns workq");
738 ng = net_alloc_generic();
740 panic("Could not allocate generic netns");
742 rcu_assign_pointer(init_net.gen, ng);
744 mutex_lock(&net_mutex);
745 if (setup_net(&init_net, &init_user_ns))
746 panic("Could not setup the initial network namespace");
749 list_add_tail_rcu(&init_net.list, &net_namespace_list);
752 mutex_unlock(&net_mutex);
754 register_pernet_subsys(&net_ns_ops);
756 rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, NULL);
757 rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid,
763 pure_initcall(net_ns_init);
766 static int __register_pernet_operations(struct list_head *list,
767 struct pernet_operations *ops)
771 LIST_HEAD(net_exit_list);
773 list_add_tail(&ops->list, list);
774 if (ops->init || (ops->id && ops->size)) {
776 error = ops_init(ops, net);
779 list_add_tail(&net->exit_list, &net_exit_list);
785 /* If I have an error cleanup all namespaces I initialized */
786 list_del(&ops->list);
787 ops_exit_list(ops, &net_exit_list);
788 ops_free_list(ops, &net_exit_list);
792 static void __unregister_pernet_operations(struct pernet_operations *ops)
795 LIST_HEAD(net_exit_list);
797 list_del(&ops->list);
799 list_add_tail(&net->exit_list, &net_exit_list);
800 ops_exit_list(ops, &net_exit_list);
801 ops_free_list(ops, &net_exit_list);
806 static int __register_pernet_operations(struct list_head *list,
807 struct pernet_operations *ops)
809 return ops_init(ops, &init_net);
812 static void __unregister_pernet_operations(struct pernet_operations *ops)
814 LIST_HEAD(net_exit_list);
815 list_add(&init_net.exit_list, &net_exit_list);
816 ops_exit_list(ops, &net_exit_list);
817 ops_free_list(ops, &net_exit_list);
820 #endif /* CONFIG_NET_NS */
822 static DEFINE_IDA(net_generic_ids);
824 static int register_pernet_operations(struct list_head *list,
825 struct pernet_operations *ops)
831 error = ida_get_new_above(&net_generic_ids, 1, ops->id);
833 if (error == -EAGAIN) {
834 ida_pre_get(&net_generic_ids, GFP_KERNEL);
839 max_gen_ptrs = max_t(unsigned int, max_gen_ptrs, *ops->id);
841 error = __register_pernet_operations(list, ops);
845 ida_remove(&net_generic_ids, *ops->id);
851 static void unregister_pernet_operations(struct pernet_operations *ops)
854 __unregister_pernet_operations(ops);
857 ida_remove(&net_generic_ids, *ops->id);
861 * register_pernet_subsys - register a network namespace subsystem
862 * @ops: pernet operations structure for the subsystem
864 * Register a subsystem which has init and exit functions
865 * that are called when network namespaces are created and
866 * destroyed respectively.
868 * When registered all network namespace init functions are
869 * called for every existing network namespace. Allowing kernel
870 * modules to have a race free view of the set of network namespaces.
872 * When a new network namespace is created all of the init
873 * methods are called in the order in which they were registered.
875 * When a network namespace is destroyed all of the exit methods
876 * are called in the reverse of the order with which they were
879 int register_pernet_subsys(struct pernet_operations *ops)
882 mutex_lock(&net_mutex);
883 error = register_pernet_operations(first_device, ops);
884 mutex_unlock(&net_mutex);
887 EXPORT_SYMBOL_GPL(register_pernet_subsys);
890 * unregister_pernet_subsys - unregister a network namespace subsystem
891 * @ops: pernet operations structure to manipulate
893 * Remove the pernet operations structure from the list to be
894 * used when network namespaces are created or destroyed. In
895 * addition run the exit method for all existing network
898 void unregister_pernet_subsys(struct pernet_operations *ops)
900 mutex_lock(&net_mutex);
901 unregister_pernet_operations(ops);
902 mutex_unlock(&net_mutex);
904 EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
907 * register_pernet_device - register a network namespace device
908 * @ops: pernet operations structure for the subsystem
910 * Register a device which has init and exit functions
911 * that are called when network namespaces are created and
912 * destroyed respectively.
914 * When registered all network namespace init functions are
915 * called for every existing network namespace. Allowing kernel
916 * modules to have a race free view of the set of network namespaces.
918 * When a new network namespace is created all of the init
919 * methods are called in the order in which they were registered.
921 * When a network namespace is destroyed all of the exit methods
922 * are called in the reverse of the order with which they were
925 int register_pernet_device(struct pernet_operations *ops)
928 mutex_lock(&net_mutex);
929 error = register_pernet_operations(&pernet_list, ops);
930 if (!error && (first_device == &pernet_list))
931 first_device = &ops->list;
932 mutex_unlock(&net_mutex);
935 EXPORT_SYMBOL_GPL(register_pernet_device);
938 * unregister_pernet_device - unregister a network namespace netdevice
939 * @ops: pernet operations structure to manipulate
941 * Remove the pernet operations structure from the list to be
942 * used when network namespaces are created or destroyed. In
943 * addition run the exit method for all existing network
946 void unregister_pernet_device(struct pernet_operations *ops)
948 mutex_lock(&net_mutex);
949 if (&ops->list == first_device)
950 first_device = first_device->next;
951 unregister_pernet_operations(ops);
952 mutex_unlock(&net_mutex);
954 EXPORT_SYMBOL_GPL(unregister_pernet_device);
957 static struct ns_common *netns_get(struct task_struct *task)
959 struct net *net = NULL;
960 struct nsproxy *nsproxy;
963 nsproxy = task->nsproxy;
965 net = get_net(nsproxy->net_ns);
968 return net ? &net->ns : NULL;
971 static inline struct net *to_net_ns(struct ns_common *ns)
973 return container_of(ns, struct net, ns);
976 static void netns_put(struct ns_common *ns)
978 put_net(to_net_ns(ns));
981 static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns)
983 struct net *net = to_net_ns(ns);
985 if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
986 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
989 put_net(nsproxy->net_ns);
990 nsproxy->net_ns = get_net(net);
994 const struct proc_ns_operations netns_operations = {
996 .type = CLONE_NEWNET,
999 .install = netns_install,