1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 #include <linux/workqueue.h>
4 #include <linux/rtnetlink.h>
5 #include <linux/cache.h>
6 #include <linux/slab.h>
7 #include <linux/list.h>
8 #include <linux/delay.h>
9 #include <linux/sched.h>
10 #include <linux/idr.h>
11 #include <linux/rculist.h>
12 #include <linux/nsproxy.h>
14 #include <linux/proc_ns.h>
15 #include <linux/file.h>
16 #include <linux/export.h>
17 #include <linux/user_namespace.h>
18 #include <linux/net_namespace.h>
20 #include <net/netlink.h>
21 #include <net/net_namespace.h>
22 #include <net/netns/generic.h>
25 * Our network namespace constructor/destructor lists
28 static LIST_HEAD(pernet_list);
29 static struct list_head *first_device = &pernet_list;
30 DEFINE_MUTEX(net_mutex);
32 LIST_HEAD(net_namespace_list);
33 EXPORT_SYMBOL_GPL(net_namespace_list);
35 struct net init_net = {
36 .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head),
38 EXPORT_SYMBOL(init_net);
40 static bool init_net_initialized;
42 #define MIN_PERNET_OPS_ID \
43 ((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *))
45 #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
47 static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
49 static struct net_generic *net_alloc_generic(void)
51 struct net_generic *ng;
52 unsigned int generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]);
54 ng = kzalloc(generic_size, GFP_KERNEL);
56 ng->s.len = max_gen_ptrs;
61 static int net_assign_generic(struct net *net, unsigned int id, void *data)
63 struct net_generic *ng, *old_ng;
65 BUG_ON(!mutex_is_locked(&net_mutex));
66 BUG_ON(id < MIN_PERNET_OPS_ID);
68 old_ng = rcu_dereference_protected(net->gen,
69 lockdep_is_held(&net_mutex));
70 if (old_ng->s.len > id) {
71 old_ng->ptr[id] = data;
75 ng = net_alloc_generic();
80 * Some synchronisation notes:
82 * The net_generic explores the net->gen array inside rcu
83 * read section. Besides once set the net->gen->ptr[x]
84 * pointer never changes (see rules in netns/generic.h).
86 * That said, we simply duplicate this array and schedule
87 * the old copy for kfree after a grace period.
90 memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID],
91 (old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *));
94 rcu_assign_pointer(net->gen, ng);
95 kfree_rcu(old_ng, s.rcu);
99 static int ops_init(const struct pernet_operations *ops, struct net *net)
104 if (ops->id && ops->size) {
105 data = kzalloc(ops->size, GFP_KERNEL);
109 err = net_assign_generic(net, *ops->id, data);
115 err = ops->init(net);
126 static void ops_free(const struct pernet_operations *ops, struct net *net)
128 if (ops->id && ops->size) {
129 kfree(net_generic(net, *ops->id));
133 static void ops_exit_list(const struct pernet_operations *ops,
134 struct list_head *net_exit_list)
138 list_for_each_entry(net, net_exit_list, exit_list)
142 ops->exit_batch(net_exit_list);
145 static void ops_free_list(const struct pernet_operations *ops,
146 struct list_head *net_exit_list)
149 if (ops->size && ops->id) {
150 list_for_each_entry(net, net_exit_list, exit_list)
155 /* should be called with nsid_lock held */
156 static int alloc_netid(struct net *net, struct net *peer, int reqid)
158 int min = 0, max = 0;
165 return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC);
168 /* This function is used by idr_for_each(). If net is equal to peer, the
169 * function returns the id so that idr_for_each() stops. Because we cannot
170 * returns the id 0 (idr_for_each() will not stop), we return the magic value
171 * NET_ID_ZERO (-1) for it.
173 #define NET_ID_ZERO -1
174 static int net_eq_idr(int id, void *net, void *peer)
176 if (net_eq(net, peer))
177 return id ? : NET_ID_ZERO;
181 /* Should be called with nsid_lock held. If a new id is assigned, the bool alloc
182 * is set to true, thus the caller knows that the new id must be notified via
185 static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc)
187 int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
188 bool alloc_it = *alloc;
192 /* Magic value for id 0. */
193 if (id == NET_ID_ZERO)
199 id = alloc_netid(net, peer, -1);
201 return id >= 0 ? id : NETNSA_NSID_NOT_ASSIGNED;
204 return NETNSA_NSID_NOT_ASSIGNED;
207 /* should be called with nsid_lock held */
208 static int __peernet2id(struct net *net, struct net *peer)
212 return __peernet2id_alloc(net, peer, &no);
215 static void rtnl_net_notifyid(struct net *net, int cmd, int id);
216 /* This function returns the id of a peer netns. If no id is assigned, one will
217 * be allocated and returned.
219 int peernet2id_alloc(struct net *net, struct net *peer)
224 if (atomic_read(&net->count) == 0)
225 return NETNSA_NSID_NOT_ASSIGNED;
226 spin_lock_bh(&net->nsid_lock);
227 alloc = atomic_read(&peer->count) == 0 ? false : true;
228 id = __peernet2id_alloc(net, peer, &alloc);
229 spin_unlock_bh(&net->nsid_lock);
230 if (alloc && id >= 0)
231 rtnl_net_notifyid(net, RTM_NEWNSID, id);
235 /* This function returns, if assigned, the id of a peer netns. */
236 int peernet2id(struct net *net, struct net *peer)
240 spin_lock_bh(&net->nsid_lock);
241 id = __peernet2id(net, peer);
242 spin_unlock_bh(&net->nsid_lock);
245 EXPORT_SYMBOL(peernet2id);
247 /* This function returns true is the peer netns has an id assigned into the
250 bool peernet_has_id(struct net *net, struct net *peer)
252 return peernet2id(net, peer) >= 0;
255 struct net *get_net_ns_by_id(struct net *net, int id)
263 spin_lock_bh(&net->nsid_lock);
264 peer = idr_find(&net->netns_ids, id);
267 spin_unlock_bh(&net->nsid_lock);
274 * setup_net runs the initializers for the network namespace object.
276 static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
278 /* Must be called with net_mutex held */
279 const struct pernet_operations *ops, *saved_ops;
281 LIST_HEAD(net_exit_list);
283 atomic_set(&net->count, 1);
284 atomic_set(&net->passive, 1);
285 net->dev_base_seq = 1;
286 net->user_ns = user_ns;
287 idr_init(&net->netns_ids);
288 spin_lock_init(&net->nsid_lock);
290 list_for_each_entry(ops, &pernet_list, list) {
291 error = ops_init(ops, net);
299 /* Walk through the list backwards calling the exit functions
300 * for the pernet modules whose init functions did not fail.
302 list_add(&net->exit_list, &net_exit_list);
304 list_for_each_entry_continue_reverse(ops, &pernet_list, list)
305 ops_exit_list(ops, &net_exit_list);
308 list_for_each_entry_continue_reverse(ops, &pernet_list, list)
309 ops_free_list(ops, &net_exit_list);
317 static struct ucounts *inc_net_namespaces(struct user_namespace *ns)
319 return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES);
322 static void dec_net_namespaces(struct ucounts *ucounts)
324 dec_ucount(ucounts, UCOUNT_NET_NAMESPACES);
327 static struct kmem_cache *net_cachep;
328 static struct workqueue_struct *netns_wq;
330 static struct net *net_alloc(void)
332 struct net *net = NULL;
333 struct net_generic *ng;
335 ng = net_alloc_generic();
339 net = kmem_cache_zalloc(net_cachep, GFP_KERNEL);
343 rcu_assign_pointer(net->gen, ng);
352 static void net_free(struct net *net)
354 kfree(rcu_access_pointer(net->gen));
355 kmem_cache_free(net_cachep, net);
358 void net_drop_ns(void *p)
361 if (ns && atomic_dec_and_test(&ns->passive))
365 struct net *copy_net_ns(unsigned long flags,
366 struct user_namespace *user_ns, struct net *old_net)
368 struct ucounts *ucounts;
372 if (!(flags & CLONE_NEWNET))
373 return get_net(old_net);
375 ucounts = inc_net_namespaces(user_ns);
377 return ERR_PTR(-ENOSPC);
381 dec_net_namespaces(ucounts);
382 return ERR_PTR(-ENOMEM);
385 get_user_ns(user_ns);
387 rv = mutex_lock_killable(&net_mutex);
390 dec_net_namespaces(ucounts);
391 put_user_ns(user_ns);
395 net->ucounts = ucounts;
396 rv = setup_net(net, user_ns);
399 list_add_tail_rcu(&net->list, &net_namespace_list);
402 mutex_unlock(&net_mutex);
404 dec_net_namespaces(ucounts);
405 put_user_ns(user_ns);
412 static DEFINE_SPINLOCK(cleanup_list_lock);
413 static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */
415 static void cleanup_net(struct work_struct *work)
417 const struct pernet_operations *ops;
418 struct net *net, *tmp;
419 struct list_head net_kill_list;
420 LIST_HEAD(net_exit_list);
422 /* Atomically snapshot the list of namespaces to cleanup */
423 spin_lock_irq(&cleanup_list_lock);
424 list_replace_init(&cleanup_list, &net_kill_list);
425 spin_unlock_irq(&cleanup_list_lock);
427 mutex_lock(&net_mutex);
429 /* Don't let anyone else find us. */
431 list_for_each_entry(net, &net_kill_list, cleanup_list) {
432 list_del_rcu(&net->list);
433 list_add_tail(&net->exit_list, &net_exit_list);
437 spin_lock_bh(&tmp->nsid_lock);
438 id = __peernet2id(tmp, net);
440 idr_remove(&tmp->netns_ids, id);
441 spin_unlock_bh(&tmp->nsid_lock);
443 rtnl_net_notifyid(tmp, RTM_DELNSID, id);
445 spin_lock_bh(&net->nsid_lock);
446 idr_destroy(&net->netns_ids);
447 spin_unlock_bh(&net->nsid_lock);
453 * Another CPU might be rcu-iterating the list, wait for it.
454 * This needs to be before calling the exit() notifiers, so
455 * the rcu_barrier() below isn't sufficient alone.
459 /* Run all of the network namespace exit methods */
460 list_for_each_entry_reverse(ops, &pernet_list, list)
461 ops_exit_list(ops, &net_exit_list);
463 /* Free the net generic variables */
464 list_for_each_entry_reverse(ops, &pernet_list, list)
465 ops_free_list(ops, &net_exit_list);
467 mutex_unlock(&net_mutex);
469 /* Ensure there are no outstanding rcu callbacks using this
474 /* Finally it is safe to free my network namespace structure */
475 list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
476 list_del_init(&net->exit_list);
477 dec_net_namespaces(net->ucounts);
478 put_user_ns(net->user_ns);
482 static DECLARE_WORK(net_cleanup_work, cleanup_net);
484 void __put_net(struct net *net)
486 /* Cleanup the network namespace in process context */
489 spin_lock_irqsave(&cleanup_list_lock, flags);
490 list_add(&net->cleanup_list, &cleanup_list);
491 spin_unlock_irqrestore(&cleanup_list_lock, flags);
493 queue_work(netns_wq, &net_cleanup_work);
495 EXPORT_SYMBOL_GPL(__put_net);
497 struct net *get_net_ns_by_fd(int fd)
500 struct ns_common *ns;
503 file = proc_ns_fget(fd);
505 return ERR_CAST(file);
507 ns = get_proc_ns(file_inode(file));
508 if (ns->ops == &netns_operations)
509 net = get_net(container_of(ns, struct net, ns));
511 net = ERR_PTR(-EINVAL);
518 struct net *get_net_ns_by_fd(int fd)
520 return ERR_PTR(-EINVAL);
523 EXPORT_SYMBOL_GPL(get_net_ns_by_fd);
525 struct net *get_net_ns_by_pid(pid_t pid)
527 struct task_struct *tsk;
530 /* Lookup the network namespace */
531 net = ERR_PTR(-ESRCH);
533 tsk = find_task_by_vpid(pid);
535 struct nsproxy *nsproxy;
537 nsproxy = tsk->nsproxy;
539 net = get_net(nsproxy->net_ns);
545 EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
547 static __net_init int net_ns_net_init(struct net *net)
550 net->ns.ops = &netns_operations;
552 return ns_alloc_inum(&net->ns);
555 static __net_exit void net_ns_net_exit(struct net *net)
557 ns_free_inum(&net->ns);
560 static struct pernet_operations __net_initdata net_ns_ops = {
561 .init = net_ns_net_init,
562 .exit = net_ns_net_exit,
565 static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
566 [NETNSA_NONE] = { .type = NLA_UNSPEC },
567 [NETNSA_NSID] = { .type = NLA_S32 },
568 [NETNSA_PID] = { .type = NLA_U32 },
569 [NETNSA_FD] = { .type = NLA_U32 },
572 static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh)
574 struct net *net = sock_net(skb->sk);
575 struct nlattr *tb[NETNSA_MAX + 1];
579 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
583 if (!tb[NETNSA_NSID])
585 nsid = nla_get_s32(tb[NETNSA_NSID]);
588 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
589 else if (tb[NETNSA_FD])
590 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
594 return PTR_ERR(peer);
596 spin_lock_bh(&net->nsid_lock);
597 if (__peernet2id(net, peer) >= 0) {
598 spin_unlock_bh(&net->nsid_lock);
603 err = alloc_netid(net, peer, nsid);
604 spin_unlock_bh(&net->nsid_lock);
606 rtnl_net_notifyid(net, RTM_NEWNSID, err);
614 static int rtnl_net_get_size(void)
616 return NLMSG_ALIGN(sizeof(struct rtgenmsg))
617 + nla_total_size(sizeof(s32)) /* NETNSA_NSID */
621 static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags,
622 int cmd, struct net *net, int nsid)
624 struct nlmsghdr *nlh;
625 struct rtgenmsg *rth;
627 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags);
631 rth = nlmsg_data(nlh);
632 rth->rtgen_family = AF_UNSPEC;
634 if (nla_put_s32(skb, NETNSA_NSID, nsid))
635 goto nla_put_failure;
641 nlmsg_cancel(skb, nlh);
645 static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh)
647 struct net *net = sock_net(skb->sk);
648 struct nlattr *tb[NETNSA_MAX + 1];
653 err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
658 peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
659 else if (tb[NETNSA_FD])
660 peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
665 return PTR_ERR(peer);
667 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
673 id = peernet2id(net, peer);
674 err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
675 RTM_NEWNSID, net, id);
679 err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid);
689 struct rtnl_net_dump_cb {
692 struct netlink_callback *cb;
697 static int rtnl_net_dumpid_one(int id, void *peer, void *data)
699 struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data;
702 if (net_cb->idx < net_cb->s_idx)
705 ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid,
706 net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI,
707 RTM_NEWNSID, net_cb->net, id);
716 static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
718 struct net *net = sock_net(skb->sk);
719 struct rtnl_net_dump_cb net_cb = {
724 .s_idx = cb->args[0],
727 spin_lock_bh(&net->nsid_lock);
728 idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
729 spin_unlock_bh(&net->nsid_lock);
731 cb->args[0] = net_cb.idx;
735 static void rtnl_net_notifyid(struct net *net, int cmd, int id)
740 msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
744 err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, id);
748 rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0);
754 rtnl_set_sk_err(net, RTNLGRP_NSID, err);
757 static int __init net_ns_init(void)
759 struct net_generic *ng;
762 net_cachep = kmem_cache_create("net_namespace", sizeof(struct net),
766 /* Create workqueue for cleanup */
767 netns_wq = create_singlethread_workqueue("netns");
769 panic("Could not create netns workq");
772 ng = net_alloc_generic();
774 panic("Could not allocate generic netns");
776 rcu_assign_pointer(init_net.gen, ng);
778 mutex_lock(&net_mutex);
779 if (setup_net(&init_net, &init_user_ns))
780 panic("Could not setup the initial network namespace");
782 init_net_initialized = true;
785 list_add_tail_rcu(&init_net.list, &net_namespace_list);
788 mutex_unlock(&net_mutex);
790 register_pernet_subsys(&net_ns_ops);
792 rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, NULL);
793 rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid,
799 pure_initcall(net_ns_init);
802 static int __register_pernet_operations(struct list_head *list,
803 struct pernet_operations *ops)
807 LIST_HEAD(net_exit_list);
809 list_add_tail(&ops->list, list);
810 if (ops->init || (ops->id && ops->size)) {
812 error = ops_init(ops, net);
815 list_add_tail(&net->exit_list, &net_exit_list);
821 /* If I have an error cleanup all namespaces I initialized */
822 list_del(&ops->list);
823 ops_exit_list(ops, &net_exit_list);
824 ops_free_list(ops, &net_exit_list);
828 static void __unregister_pernet_operations(struct pernet_operations *ops)
831 LIST_HEAD(net_exit_list);
833 list_del(&ops->list);
835 list_add_tail(&net->exit_list, &net_exit_list);
836 ops_exit_list(ops, &net_exit_list);
837 ops_free_list(ops, &net_exit_list);
842 static int __register_pernet_operations(struct list_head *list,
843 struct pernet_operations *ops)
845 if (!init_net_initialized) {
846 list_add_tail(&ops->list, list);
850 return ops_init(ops, &init_net);
853 static void __unregister_pernet_operations(struct pernet_operations *ops)
855 if (!init_net_initialized) {
856 list_del(&ops->list);
858 LIST_HEAD(net_exit_list);
859 list_add(&init_net.exit_list, &net_exit_list);
860 ops_exit_list(ops, &net_exit_list);
861 ops_free_list(ops, &net_exit_list);
865 #endif /* CONFIG_NET_NS */
867 static DEFINE_IDA(net_generic_ids);
869 static int register_pernet_operations(struct list_head *list,
870 struct pernet_operations *ops)
876 error = ida_get_new_above(&net_generic_ids, MIN_PERNET_OPS_ID, ops->id);
878 if (error == -EAGAIN) {
879 ida_pre_get(&net_generic_ids, GFP_KERNEL);
884 max_gen_ptrs = max(max_gen_ptrs, *ops->id + 1);
886 error = __register_pernet_operations(list, ops);
890 ida_remove(&net_generic_ids, *ops->id);
896 static void unregister_pernet_operations(struct pernet_operations *ops)
899 __unregister_pernet_operations(ops);
902 ida_remove(&net_generic_ids, *ops->id);
906 * register_pernet_subsys - register a network namespace subsystem
907 * @ops: pernet operations structure for the subsystem
909 * Register a subsystem which has init and exit functions
910 * that are called when network namespaces are created and
911 * destroyed respectively.
913 * When registered all network namespace init functions are
914 * called for every existing network namespace. Allowing kernel
915 * modules to have a race free view of the set of network namespaces.
917 * When a new network namespace is created all of the init
918 * methods are called in the order in which they were registered.
920 * When a network namespace is destroyed all of the exit methods
921 * are called in the reverse of the order with which they were
924 int register_pernet_subsys(struct pernet_operations *ops)
927 mutex_lock(&net_mutex);
928 error = register_pernet_operations(first_device, ops);
929 mutex_unlock(&net_mutex);
932 EXPORT_SYMBOL_GPL(register_pernet_subsys);
935 * unregister_pernet_subsys - unregister a network namespace subsystem
936 * @ops: pernet operations structure to manipulate
938 * Remove the pernet operations structure from the list to be
939 * used when network namespaces are created or destroyed. In
940 * addition run the exit method for all existing network
943 void unregister_pernet_subsys(struct pernet_operations *ops)
945 mutex_lock(&net_mutex);
946 unregister_pernet_operations(ops);
947 mutex_unlock(&net_mutex);
949 EXPORT_SYMBOL_GPL(unregister_pernet_subsys);
952 * register_pernet_device - register a network namespace device
953 * @ops: pernet operations structure for the subsystem
955 * Register a device which has init and exit functions
956 * that are called when network namespaces are created and
957 * destroyed respectively.
959 * When registered all network namespace init functions are
960 * called for every existing network namespace. Allowing kernel
961 * modules to have a race free view of the set of network namespaces.
963 * When a new network namespace is created all of the init
964 * methods are called in the order in which they were registered.
966 * When a network namespace is destroyed all of the exit methods
967 * are called in the reverse of the order with which they were
970 int register_pernet_device(struct pernet_operations *ops)
973 mutex_lock(&net_mutex);
974 error = register_pernet_operations(&pernet_list, ops);
975 if (!error && (first_device == &pernet_list))
976 first_device = &ops->list;
977 mutex_unlock(&net_mutex);
980 EXPORT_SYMBOL_GPL(register_pernet_device);
983 * unregister_pernet_device - unregister a network namespace netdevice
984 * @ops: pernet operations structure to manipulate
986 * Remove the pernet operations structure from the list to be
987 * used when network namespaces are created or destroyed. In
988 * addition run the exit method for all existing network
991 void unregister_pernet_device(struct pernet_operations *ops)
993 mutex_lock(&net_mutex);
994 if (&ops->list == first_device)
995 first_device = first_device->next;
996 unregister_pernet_operations(ops);
997 mutex_unlock(&net_mutex);
999 EXPORT_SYMBOL_GPL(unregister_pernet_device);
1001 #ifdef CONFIG_NET_NS
1002 static struct ns_common *netns_get(struct task_struct *task)
1004 struct net *net = NULL;
1005 struct nsproxy *nsproxy;
1008 nsproxy = task->nsproxy;
1010 net = get_net(nsproxy->net_ns);
1013 return net ? &net->ns : NULL;
1016 static inline struct net *to_net_ns(struct ns_common *ns)
1018 return container_of(ns, struct net, ns);
1021 static void netns_put(struct ns_common *ns)
1023 put_net(to_net_ns(ns));
1026 static int netns_install(struct nsproxy *nsproxy, struct ns_common *ns)
1028 struct net *net = to_net_ns(ns);
1030 if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) ||
1031 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
1034 put_net(nsproxy->net_ns);
1035 nsproxy->net_ns = get_net(net);
1039 static struct user_namespace *netns_owner(struct ns_common *ns)
1041 return to_net_ns(ns)->user_ns;
1044 const struct proc_ns_operations netns_operations = {
1046 .type = CLONE_NEWNET,
1049 .install = netns_install,
1050 .owner = netns_owner,