2 * NETLINK Kernel-user communication protocol.
4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
6 * Patrick McHardy <kaber@trash.net>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
13 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
14 * added netlink_proto_exit
15 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
16 * use nlk_sk, as sk->protinfo is on a diet 8)
17 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
18 * - inc module use count of module that owns
19 * the kernel socket in case userspace opens
20 * socket of same protocol
21 * - remove all module support, since netlink is
22 * mandatory if CONFIG_NET=y these days
25 #include <linux/module.h>
27 #include <linux/capability.h>
28 #include <linux/kernel.h>
29 #include <linux/init.h>
30 #include <linux/signal.h>
31 #include <linux/sched.h>
32 #include <linux/errno.h>
33 #include <linux/string.h>
34 #include <linux/stat.h>
35 #include <linux/socket.h>
37 #include <linux/fcntl.h>
38 #include <linux/termios.h>
39 #include <linux/sockios.h>
40 #include <linux/net.h>
42 #include <linux/slab.h>
43 #include <asm/uaccess.h>
44 #include <linux/skbuff.h>
45 #include <linux/netdevice.h>
46 #include <linux/rtnetlink.h>
47 #include <linux/proc_fs.h>
48 #include <linux/seq_file.h>
49 #include <linux/notifier.h>
50 #include <linux/security.h>
51 #include <linux/jhash.h>
52 #include <linux/jiffies.h>
53 #include <linux/random.h>
54 #include <linux/bitops.h>
56 #include <linux/types.h>
57 #include <linux/audit.h>
58 #include <linux/mutex.h>
59 #include <linux/vmalloc.h>
60 #include <linux/if_arp.h>
61 #include <linux/rhashtable.h>
62 #include <asm/cacheflush.h>
63 #include <linux/hash.h>
64 #include <linux/genetlink.h>
66 #include <net/net_namespace.h>
69 #include <net/netlink.h>
71 #include "af_netlink.h"
75 unsigned long masks[0];
79 #define NETLINK_S_CONGESTED 0x0
82 #define NETLINK_F_KERNEL_SOCKET 0x1
83 #define NETLINK_F_RECV_PKTINFO 0x2
84 #define NETLINK_F_BROADCAST_SEND_ERROR 0x4
85 #define NETLINK_F_RECV_NO_ENOBUFS 0x8
86 #define NETLINK_F_LISTEN_ALL_NSID 0x10
87 #define NETLINK_F_CAP_ACK 0x20
89 static inline int netlink_is_kernel(struct sock *sk)
91 return nlk_sk(sk)->flags & NETLINK_F_KERNEL_SOCKET;
94 struct netlink_table *nl_table __read_mostly;
95 EXPORT_SYMBOL_GPL(nl_table);
97 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
99 static int netlink_dump(struct sock *sk);
100 static void netlink_skb_destructor(struct sk_buff *skb);
102 /* nl_table locking explained:
103 * Lookup and traversal are protected with an RCU read-side lock. Insertion
104 * and removal are protected with per bucket lock while using RCU list
105 * modification primitives and may run in parallel to RCU protected lookups.
106 * Destruction of the Netlink socket may only occur *after* nl_table_lock has
107 * been acquired * either during or after the socket has been removed from
108 * the list and after an RCU grace period.
110 DEFINE_RWLOCK(nl_table_lock);
111 EXPORT_SYMBOL_GPL(nl_table_lock);
112 static atomic_t nl_table_users = ATOMIC_INIT(0);
114 #define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
116 static ATOMIC_NOTIFIER_HEAD(netlink_chain);
118 static DEFINE_SPINLOCK(netlink_tap_lock);
119 static struct list_head netlink_tap_all __read_mostly;
121 static const struct rhashtable_params netlink_rhashtable_params;
123 static inline u32 netlink_group_mask(u32 group)
125 return group ? 1 << (group - 1) : 0;
128 static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
131 unsigned int len = skb_end_offset(skb);
134 new = alloc_skb(len, gfp_mask);
138 NETLINK_CB(new).portid = NETLINK_CB(skb).portid;
139 NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group;
140 NETLINK_CB(new).creds = NETLINK_CB(skb).creds;
142 memcpy(skb_put(new, len), skb->data, len);
146 int netlink_add_tap(struct netlink_tap *nt)
148 if (unlikely(nt->dev->type != ARPHRD_NETLINK))
151 spin_lock(&netlink_tap_lock);
152 list_add_rcu(&nt->list, &netlink_tap_all);
153 spin_unlock(&netlink_tap_lock);
155 __module_get(nt->module);
159 EXPORT_SYMBOL_GPL(netlink_add_tap);
161 static int __netlink_remove_tap(struct netlink_tap *nt)
164 struct netlink_tap *tmp;
166 spin_lock(&netlink_tap_lock);
168 list_for_each_entry(tmp, &netlink_tap_all, list) {
170 list_del_rcu(&nt->list);
176 pr_warn("__netlink_remove_tap: %p not found\n", nt);
178 spin_unlock(&netlink_tap_lock);
181 module_put(nt->module);
183 return found ? 0 : -ENODEV;
186 int netlink_remove_tap(struct netlink_tap *nt)
190 ret = __netlink_remove_tap(nt);
195 EXPORT_SYMBOL_GPL(netlink_remove_tap);
197 static bool netlink_filter_tap(const struct sk_buff *skb)
199 struct sock *sk = skb->sk;
201 /* We take the more conservative approach and
202 * whitelist socket protocols that may pass.
204 switch (sk->sk_protocol) {
206 case NETLINK_USERSOCK:
207 case NETLINK_SOCK_DIAG:
210 case NETLINK_FIB_LOOKUP:
211 case NETLINK_NETFILTER:
212 case NETLINK_GENERIC:
219 static int __netlink_deliver_tap_skb(struct sk_buff *skb,
220 struct net_device *dev)
222 struct sk_buff *nskb;
223 struct sock *sk = skb->sk;
228 if (netlink_skb_is_mmaped(skb) || is_vmalloc_addr(skb->head))
229 nskb = netlink_to_full_skb(skb, GFP_ATOMIC);
231 nskb = skb_clone(skb, GFP_ATOMIC);
234 nskb->protocol = htons((u16) sk->sk_protocol);
235 nskb->pkt_type = netlink_is_kernel(sk) ?
236 PACKET_KERNEL : PACKET_USER;
237 skb_reset_network_header(nskb);
238 ret = dev_queue_xmit(nskb);
239 if (unlikely(ret > 0))
240 ret = net_xmit_errno(ret);
247 static void __netlink_deliver_tap(struct sk_buff *skb)
250 struct netlink_tap *tmp;
252 if (!netlink_filter_tap(skb))
255 list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
256 ret = __netlink_deliver_tap_skb(skb, tmp->dev);
262 static void netlink_deliver_tap(struct sk_buff *skb)
266 if (unlikely(!list_empty(&netlink_tap_all)))
267 __netlink_deliver_tap(skb);
272 static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
275 if (!(netlink_is_kernel(dst) && netlink_is_kernel(src)))
276 netlink_deliver_tap(skb);
279 static void netlink_overrun(struct sock *sk)
281 struct netlink_sock *nlk = nlk_sk(sk);
283 if (!(nlk->flags & NETLINK_F_RECV_NO_ENOBUFS)) {
284 if (!test_and_set_bit(NETLINK_S_CONGESTED,
285 &nlk_sk(sk)->state)) {
286 sk->sk_err = ENOBUFS;
287 sk->sk_error_report(sk);
290 atomic_inc(&sk->sk_drops);
293 static void netlink_rcv_wake(struct sock *sk)
295 struct netlink_sock *nlk = nlk_sk(sk);
297 if (skb_queue_empty(&sk->sk_receive_queue))
298 clear_bit(NETLINK_S_CONGESTED, &nlk->state);
299 if (!test_bit(NETLINK_S_CONGESTED, &nlk->state))
300 wake_up_interruptible(&nlk->wait);
303 #ifdef CONFIG_NETLINK_MMAP
304 static bool netlink_rx_is_mmaped(struct sock *sk)
306 return nlk_sk(sk)->rx_ring.pg_vec != NULL;
309 static bool netlink_tx_is_mmaped(struct sock *sk)
311 return nlk_sk(sk)->tx_ring.pg_vec != NULL;
314 static __pure struct page *pgvec_to_page(const void *addr)
316 if (is_vmalloc_addr(addr))
317 return vmalloc_to_page(addr);
319 return virt_to_page(addr);
322 static void free_pg_vec(void **pg_vec, unsigned int order, unsigned int len)
326 for (i = 0; i < len; i++) {
327 if (pg_vec[i] != NULL) {
328 if (is_vmalloc_addr(pg_vec[i]))
331 free_pages((unsigned long)pg_vec[i], order);
337 static void *alloc_one_pg_vec_page(unsigned long order)
340 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO |
341 __GFP_NOWARN | __GFP_NORETRY;
343 buffer = (void *)__get_free_pages(gfp_flags, order);
347 buffer = vzalloc((1 << order) * PAGE_SIZE);
351 gfp_flags &= ~__GFP_NORETRY;
352 return (void *)__get_free_pages(gfp_flags, order);
355 static void **alloc_pg_vec(struct netlink_sock *nlk,
356 struct nl_mmap_req *req, unsigned int order)
358 unsigned int block_nr = req->nm_block_nr;
362 pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL);
366 for (i = 0; i < block_nr; i++) {
367 pg_vec[i] = alloc_one_pg_vec_page(order);
368 if (pg_vec[i] == NULL)
374 free_pg_vec(pg_vec, order, block_nr);
380 __netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec,
383 struct netlink_sock *nlk = nlk_sk(sk);
384 struct sk_buff_head *queue;
385 struct netlink_ring *ring;
387 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
388 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
390 spin_lock_bh(&queue->lock);
392 ring->frame_max = req->nm_frame_nr - 1;
394 ring->frame_size = req->nm_frame_size;
395 ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
397 swap(ring->pg_vec_len, req->nm_block_nr);
398 swap(ring->pg_vec_order, order);
399 swap(ring->pg_vec, pg_vec);
401 __skb_queue_purge(queue);
402 spin_unlock_bh(&queue->lock);
404 WARN_ON(atomic_read(&nlk->mapped));
407 free_pg_vec(pg_vec, order, req->nm_block_nr);
410 static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
413 struct netlink_sock *nlk = nlk_sk(sk);
414 struct netlink_ring *ring;
415 void **pg_vec = NULL;
416 unsigned int order = 0;
418 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
420 if (atomic_read(&nlk->mapped))
422 if (atomic_read(&ring->pending))
425 if (req->nm_block_nr) {
426 if (ring->pg_vec != NULL)
429 if ((int)req->nm_block_size <= 0)
431 if (!PAGE_ALIGNED(req->nm_block_size))
433 if (req->nm_frame_size < NL_MMAP_HDRLEN)
435 if (!IS_ALIGNED(req->nm_frame_size, NL_MMAP_MSG_ALIGNMENT))
438 ring->frames_per_block = req->nm_block_size /
440 if (ring->frames_per_block == 0)
442 if (ring->frames_per_block * req->nm_block_nr !=
446 order = get_order(req->nm_block_size);
447 pg_vec = alloc_pg_vec(nlk, req, order);
451 if (req->nm_frame_nr)
455 mutex_lock(&nlk->pg_vec_lock);
456 if (atomic_read(&nlk->mapped) == 0) {
457 __netlink_set_ring(sk, req, tx_ring, pg_vec, order);
458 mutex_unlock(&nlk->pg_vec_lock);
462 mutex_unlock(&nlk->pg_vec_lock);
465 free_pg_vec(pg_vec, order, req->nm_block_nr);
470 static void netlink_mm_open(struct vm_area_struct *vma)
472 struct file *file = vma->vm_file;
473 struct socket *sock = file->private_data;
474 struct sock *sk = sock->sk;
477 atomic_inc(&nlk_sk(sk)->mapped);
480 static void netlink_mm_close(struct vm_area_struct *vma)
482 struct file *file = vma->vm_file;
483 struct socket *sock = file->private_data;
484 struct sock *sk = sock->sk;
487 atomic_dec(&nlk_sk(sk)->mapped);
490 static const struct vm_operations_struct netlink_mmap_ops = {
491 .open = netlink_mm_open,
492 .close = netlink_mm_close,
495 static int netlink_mmap(struct file *file, struct socket *sock,
496 struct vm_area_struct *vma)
498 struct sock *sk = sock->sk;
499 struct netlink_sock *nlk = nlk_sk(sk);
500 struct netlink_ring *ring;
501 unsigned long start, size, expected;
508 mutex_lock(&nlk->pg_vec_lock);
511 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
512 if (ring->pg_vec == NULL)
514 expected += ring->pg_vec_len * ring->pg_vec_pages * PAGE_SIZE;
520 size = vma->vm_end - vma->vm_start;
521 if (size != expected)
524 start = vma->vm_start;
525 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
526 if (ring->pg_vec == NULL)
529 for (i = 0; i < ring->pg_vec_len; i++) {
531 void *kaddr = ring->pg_vec[i];
534 for (pg_num = 0; pg_num < ring->pg_vec_pages; pg_num++) {
535 page = pgvec_to_page(kaddr);
536 err = vm_insert_page(vma, start, page);
545 atomic_inc(&nlk->mapped);
546 vma->vm_ops = &netlink_mmap_ops;
549 mutex_unlock(&nlk->pg_vec_lock);
553 static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len)
555 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
556 struct page *p_start, *p_end;
558 /* First page is flushed through netlink_{get,set}_status */
559 p_start = pgvec_to_page(hdr + PAGE_SIZE);
560 p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
561 while (p_start <= p_end) {
562 flush_dcache_page(p_start);
568 static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
571 flush_dcache_page(pgvec_to_page(hdr));
572 return hdr->nm_status;
575 static void netlink_set_status(struct nl_mmap_hdr *hdr,
576 enum nl_mmap_status status)
579 hdr->nm_status = status;
580 flush_dcache_page(pgvec_to_page(hdr));
583 static struct nl_mmap_hdr *
584 __netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos)
586 unsigned int pg_vec_pos, frame_off;
588 pg_vec_pos = pos / ring->frames_per_block;
589 frame_off = pos % ring->frames_per_block;
591 return ring->pg_vec[pg_vec_pos] + (frame_off * ring->frame_size);
594 static struct nl_mmap_hdr *
595 netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos,
596 enum nl_mmap_status status)
598 struct nl_mmap_hdr *hdr;
600 hdr = __netlink_lookup_frame(ring, pos);
601 if (netlink_get_status(hdr) != status)
607 static struct nl_mmap_hdr *
608 netlink_current_frame(const struct netlink_ring *ring,
609 enum nl_mmap_status status)
611 return netlink_lookup_frame(ring, ring->head, status);
614 static void netlink_increment_head(struct netlink_ring *ring)
616 ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0;
619 static void netlink_forward_ring(struct netlink_ring *ring)
621 unsigned int head = ring->head;
622 const struct nl_mmap_hdr *hdr;
625 hdr = __netlink_lookup_frame(ring, ring->head);
626 if (hdr->nm_status == NL_MMAP_STATUS_UNUSED)
628 if (hdr->nm_status != NL_MMAP_STATUS_SKIP)
630 netlink_increment_head(ring);
631 } while (ring->head != head);
634 static bool netlink_has_valid_frame(struct netlink_ring *ring)
636 unsigned int head = ring->head, pos = head;
637 const struct nl_mmap_hdr *hdr;
640 hdr = __netlink_lookup_frame(ring, pos);
641 if (hdr->nm_status == NL_MMAP_STATUS_VALID)
643 pos = pos != 0 ? pos - 1 : ring->frame_max;
644 } while (pos != head);
649 static bool netlink_dump_space(struct netlink_sock *nlk)
651 struct netlink_ring *ring = &nlk->rx_ring;
652 struct nl_mmap_hdr *hdr;
655 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
659 n = ring->head + ring->frame_max / 2;
660 if (n > ring->frame_max)
661 n -= ring->frame_max;
663 hdr = __netlink_lookup_frame(ring, n);
665 return hdr->nm_status == NL_MMAP_STATUS_UNUSED;
668 static unsigned int netlink_poll(struct file *file, struct socket *sock,
671 struct sock *sk = sock->sk;
672 struct netlink_sock *nlk = nlk_sk(sk);
676 if (nlk->rx_ring.pg_vec != NULL) {
677 /* Memory mapped sockets don't call recvmsg(), so flow control
678 * for dumps is performed here. A dump is allowed to continue
679 * if at least half the ring is unused.
681 while (nlk->cb_running && netlink_dump_space(nlk)) {
682 err = netlink_dump(sk);
685 sk->sk_error_report(sk);
689 netlink_rcv_wake(sk);
692 mask = datagram_poll(file, sock, wait);
694 /* We could already have received frames in the normal receive
695 * queue, that will show up as NL_MMAP_STATUS_COPY in the ring,
696 * so if mask contains pollin/etc already, there's no point
699 if ((mask & (POLLIN | POLLRDNORM)) != (POLLIN | POLLRDNORM)) {
700 spin_lock_bh(&sk->sk_receive_queue.lock);
701 if (nlk->rx_ring.pg_vec) {
702 if (netlink_has_valid_frame(&nlk->rx_ring))
703 mask |= POLLIN | POLLRDNORM;
705 spin_unlock_bh(&sk->sk_receive_queue.lock);
708 spin_lock_bh(&sk->sk_write_queue.lock);
709 if (nlk->tx_ring.pg_vec) {
710 if (netlink_current_frame(&nlk->tx_ring, NL_MMAP_STATUS_UNUSED))
711 mask |= POLLOUT | POLLWRNORM;
713 spin_unlock_bh(&sk->sk_write_queue.lock);
718 static struct nl_mmap_hdr *netlink_mmap_hdr(struct sk_buff *skb)
720 return (struct nl_mmap_hdr *)(skb->head - NL_MMAP_HDRLEN);
723 static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk,
724 struct netlink_ring *ring,
725 struct nl_mmap_hdr *hdr)
730 size = ring->frame_size - NL_MMAP_HDRLEN;
731 data = (void *)hdr + NL_MMAP_HDRLEN;
735 skb_reset_tail_pointer(skb);
736 skb->end = skb->tail + size;
739 skb->destructor = netlink_skb_destructor;
740 NETLINK_CB(skb).flags |= NETLINK_SKB_MMAPED;
741 NETLINK_CB(skb).sk = sk;
744 static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
745 u32 dst_portid, u32 dst_group,
746 struct scm_cookie *scm)
748 struct netlink_sock *nlk = nlk_sk(sk);
749 struct netlink_ring *ring;
750 struct nl_mmap_hdr *hdr;
753 int err = 0, len = 0;
755 mutex_lock(&nlk->pg_vec_lock);
757 ring = &nlk->tx_ring;
758 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
763 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
765 if (!(msg->msg_flags & MSG_DONTWAIT) &&
766 atomic_read(&nlk->tx_ring.pending))
771 nm_len = ACCESS_ONCE(hdr->nm_len);
772 if (nm_len > maxlen) {
777 netlink_frame_flush_dcache(hdr, nm_len);
779 skb = alloc_skb(nm_len, GFP_KERNEL);
784 __skb_put(skb, nm_len);
785 memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
786 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
788 netlink_increment_head(ring);
790 NETLINK_CB(skb).portid = nlk->portid;
791 NETLINK_CB(skb).dst_group = dst_group;
792 NETLINK_CB(skb).creds = scm->creds;
794 err = security_netlink_send(sk, skb);
800 if (unlikely(dst_group)) {
801 atomic_inc(&skb->users);
802 netlink_broadcast(sk, skb, dst_portid, dst_group,
805 err = netlink_unicast(sk, skb, dst_portid,
806 msg->msg_flags & MSG_DONTWAIT);
811 } while (hdr != NULL ||
812 (!(msg->msg_flags & MSG_DONTWAIT) &&
813 atomic_read(&nlk->tx_ring.pending)));
818 mutex_unlock(&nlk->pg_vec_lock);
822 static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
824 struct nl_mmap_hdr *hdr;
826 hdr = netlink_mmap_hdr(skb);
827 hdr->nm_len = skb->len;
828 hdr->nm_group = NETLINK_CB(skb).dst_group;
829 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
830 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
831 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
832 netlink_frame_flush_dcache(hdr, hdr->nm_len);
833 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
835 NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
839 static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
841 struct netlink_sock *nlk = nlk_sk(sk);
842 struct netlink_ring *ring = &nlk->rx_ring;
843 struct nl_mmap_hdr *hdr;
845 spin_lock_bh(&sk->sk_receive_queue.lock);
846 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
848 spin_unlock_bh(&sk->sk_receive_queue.lock);
853 netlink_increment_head(ring);
854 __skb_queue_tail(&sk->sk_receive_queue, skb);
855 spin_unlock_bh(&sk->sk_receive_queue.lock);
857 hdr->nm_len = skb->len;
858 hdr->nm_group = NETLINK_CB(skb).dst_group;
859 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
860 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
861 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
862 netlink_set_status(hdr, NL_MMAP_STATUS_COPY);
865 #else /* CONFIG_NETLINK_MMAP */
866 #define netlink_rx_is_mmaped(sk) false
867 #define netlink_tx_is_mmaped(sk) false
868 #define netlink_mmap sock_no_mmap
869 #define netlink_poll datagram_poll
870 #define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, scm) 0
871 #endif /* CONFIG_NETLINK_MMAP */
873 static void netlink_skb_destructor(struct sk_buff *skb)
875 #ifdef CONFIG_NETLINK_MMAP
876 struct nl_mmap_hdr *hdr;
877 struct netlink_ring *ring;
880 /* If a packet from the kernel to userspace was freed because of an
881 * error without being delivered to userspace, the kernel must reset
882 * the status. In the direction userspace to kernel, the status is
883 * always reset here after the packet was processed and freed.
885 if (netlink_skb_is_mmaped(skb)) {
886 hdr = netlink_mmap_hdr(skb);
887 sk = NETLINK_CB(skb).sk;
889 if (NETLINK_CB(skb).flags & NETLINK_SKB_TX) {
890 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
891 ring = &nlk_sk(sk)->tx_ring;
893 if (!(NETLINK_CB(skb).flags & NETLINK_SKB_DELIVERED)) {
895 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
897 ring = &nlk_sk(sk)->rx_ring;
900 WARN_ON(atomic_read(&ring->pending) == 0);
901 atomic_dec(&ring->pending);
907 if (is_vmalloc_addr(skb->head)) {
909 !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
918 static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
920 WARN_ON(skb->sk != NULL);
922 skb->destructor = netlink_skb_destructor;
923 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
924 sk_mem_charge(sk, skb->truesize);
927 static void netlink_sock_destruct(struct sock *sk)
929 struct netlink_sock *nlk = nlk_sk(sk);
931 if (nlk->cb_running) {
933 nlk->cb.done(&nlk->cb);
935 module_put(nlk->cb.module);
936 kfree_skb(nlk->cb.skb);
939 skb_queue_purge(&sk->sk_receive_queue);
940 #ifdef CONFIG_NETLINK_MMAP
942 struct nl_mmap_req req;
944 memset(&req, 0, sizeof(req));
945 if (nlk->rx_ring.pg_vec)
946 __netlink_set_ring(sk, &req, false, NULL, 0);
947 memset(&req, 0, sizeof(req));
948 if (nlk->tx_ring.pg_vec)
949 __netlink_set_ring(sk, &req, true, NULL, 0);
951 #endif /* CONFIG_NETLINK_MMAP */
953 if (!sock_flag(sk, SOCK_DEAD)) {
954 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
958 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
959 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
960 WARN_ON(nlk_sk(sk)->groups);
963 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
964 * SMP. Look, when several writers sleep and reader wakes them up, all but one
965 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
966 * this, _but_ remember, it adds useless work on UP machines.
969 void netlink_table_grab(void)
970 __acquires(nl_table_lock)
974 write_lock_irq(&nl_table_lock);
976 if (atomic_read(&nl_table_users)) {
977 DECLARE_WAITQUEUE(wait, current);
979 add_wait_queue_exclusive(&nl_table_wait, &wait);
981 set_current_state(TASK_UNINTERRUPTIBLE);
982 if (atomic_read(&nl_table_users) == 0)
984 write_unlock_irq(&nl_table_lock);
986 write_lock_irq(&nl_table_lock);
989 __set_current_state(TASK_RUNNING);
990 remove_wait_queue(&nl_table_wait, &wait);
994 void netlink_table_ungrab(void)
995 __releases(nl_table_lock)
997 write_unlock_irq(&nl_table_lock);
998 wake_up(&nl_table_wait);
1002 netlink_lock_table(void)
1004 /* read_lock() synchronizes us to netlink_table_grab */
1006 read_lock(&nl_table_lock);
1007 atomic_inc(&nl_table_users);
1008 read_unlock(&nl_table_lock);
1012 netlink_unlock_table(void)
1014 if (atomic_dec_and_test(&nl_table_users))
1015 wake_up(&nl_table_wait);
1018 struct netlink_compare_arg
1020 possible_net_t pnet;
1024 /* Doing sizeof directly may yield 4 extra bytes on 64-bit. */
1025 #define netlink_compare_arg_len \
1026 (offsetof(struct netlink_compare_arg, portid) + sizeof(u32))
1028 static inline int netlink_compare(struct rhashtable_compare_arg *arg,
1031 const struct netlink_compare_arg *x = arg->key;
1032 const struct netlink_sock *nlk = ptr;
1034 return nlk->rhash_portid != x->portid ||
1035 !net_eq(sock_net(&nlk->sk), read_pnet(&x->pnet));
1038 static void netlink_compare_arg_init(struct netlink_compare_arg *arg,
1039 struct net *net, u32 portid)
1041 memset(arg, 0, sizeof(*arg));
1042 write_pnet(&arg->pnet, net);
1043 arg->portid = portid;
1046 static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
1049 struct netlink_compare_arg arg;
1051 netlink_compare_arg_init(&arg, net, portid);
1052 return rhashtable_lookup_fast(&table->hash, &arg,
1053 netlink_rhashtable_params);
1056 static int __netlink_insert(struct netlink_table *table, struct sock *sk)
1058 struct netlink_compare_arg arg;
1060 netlink_compare_arg_init(&arg, sock_net(sk), nlk_sk(sk)->rhash_portid);
1061 return rhashtable_lookup_insert_key(&table->hash, &arg,
1063 netlink_rhashtable_params);
1066 static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
1068 struct netlink_table *table = &nl_table[protocol];
1072 sk = __netlink_lookup(table, portid, net);
1080 static const struct proto_ops netlink_ops;
1083 netlink_update_listeners(struct sock *sk)
1085 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
1088 struct listeners *listeners;
1090 listeners = nl_deref_protected(tbl->listeners);
1094 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
1096 sk_for_each_bound(sk, &tbl->mc_list) {
1097 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
1098 mask |= nlk_sk(sk)->groups[i];
1100 listeners->masks[i] = mask;
1102 /* this function is only called with the netlink table "grabbed", which
1103 * makes sure updates are visible before bind or setsockopt return. */
1106 static int netlink_insert(struct sock *sk, u32 portid)
1108 struct netlink_table *table = &nl_table[sk->sk_protocol];
1114 if (nlk_sk(sk)->portid)
1118 if (BITS_PER_LONG > 32 &&
1119 unlikely(atomic_read(&table->hash.nelems) >= UINT_MAX))
1122 nlk_sk(sk)->rhash_portid = portid;
1125 err = __netlink_insert(table, sk);
1127 /* In case the hashtable backend returns with -EBUSY
1128 * from here, it must not escape to the caller.
1130 if (unlikely(err == -EBUSY))
1138 nlk_sk(sk)->portid = portid;
1145 static void netlink_remove(struct sock *sk)
1147 struct netlink_table *table;
1149 table = &nl_table[sk->sk_protocol];
1150 if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node,
1151 netlink_rhashtable_params)) {
1152 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
1156 netlink_table_grab();
1157 if (nlk_sk(sk)->subscriptions) {
1158 __sk_del_bind_node(sk);
1159 netlink_update_listeners(sk);
1161 if (sk->sk_protocol == NETLINK_GENERIC)
1162 atomic_inc(&genl_sk_destructing_cnt);
1163 netlink_table_ungrab();
1166 static struct proto netlink_proto = {
1168 .owner = THIS_MODULE,
1169 .obj_size = sizeof(struct netlink_sock),
1172 static int __netlink_create(struct net *net, struct socket *sock,
1173 struct mutex *cb_mutex, int protocol,
1177 struct netlink_sock *nlk;
1179 sock->ops = &netlink_ops;
1181 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto, kern);
1185 sock_init_data(sock, sk);
1189 nlk->cb_mutex = cb_mutex;
1191 nlk->cb_mutex = &nlk->cb_def_mutex;
1192 mutex_init(nlk->cb_mutex);
1194 init_waitqueue_head(&nlk->wait);
1195 #ifdef CONFIG_NETLINK_MMAP
1196 mutex_init(&nlk->pg_vec_lock);
1199 sk->sk_destruct = netlink_sock_destruct;
1200 sk->sk_protocol = protocol;
1204 static int netlink_create(struct net *net, struct socket *sock, int protocol,
1207 struct module *module = NULL;
1208 struct mutex *cb_mutex;
1209 struct netlink_sock *nlk;
1210 int (*bind)(struct net *net, int group);
1211 void (*unbind)(struct net *net, int group);
1214 sock->state = SS_UNCONNECTED;
1216 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
1217 return -ESOCKTNOSUPPORT;
1219 if (protocol < 0 || protocol >= MAX_LINKS)
1220 return -EPROTONOSUPPORT;
1222 netlink_lock_table();
1223 #ifdef CONFIG_MODULES
1224 if (!nl_table[protocol].registered) {
1225 netlink_unlock_table();
1226 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
1227 netlink_lock_table();
1230 if (nl_table[protocol].registered &&
1231 try_module_get(nl_table[protocol].module))
1232 module = nl_table[protocol].module;
1234 err = -EPROTONOSUPPORT;
1235 cb_mutex = nl_table[protocol].cb_mutex;
1236 bind = nl_table[protocol].bind;
1237 unbind = nl_table[protocol].unbind;
1238 netlink_unlock_table();
1243 err = __netlink_create(net, sock, cb_mutex, protocol, kern);
1248 sock_prot_inuse_add(net, &netlink_proto, 1);
1251 nlk = nlk_sk(sock->sk);
1252 nlk->module = module;
1253 nlk->netlink_bind = bind;
1254 nlk->netlink_unbind = unbind;
1263 static void deferred_put_nlk_sk(struct rcu_head *head)
1265 struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
1270 static int netlink_release(struct socket *sock)
1272 struct sock *sk = sock->sk;
1273 struct netlink_sock *nlk;
1283 * OK. Socket is unlinked, any packets that arrive now
1287 /* must not acquire netlink_table_lock in any way again before unbind
1288 * and notifying genetlink is done as otherwise it might deadlock
1290 if (nlk->netlink_unbind) {
1293 for (i = 0; i < nlk->ngroups; i++)
1294 if (test_bit(i, nlk->groups))
1295 nlk->netlink_unbind(sock_net(sk), i + 1);
1297 if (sk->sk_protocol == NETLINK_GENERIC &&
1298 atomic_dec_return(&genl_sk_destructing_cnt) == 0)
1299 wake_up(&genl_sk_destructing_waitq);
1302 wake_up_interruptible_all(&nlk->wait);
1304 skb_queue_purge(&sk->sk_write_queue);
1307 struct netlink_notify n = {
1308 .net = sock_net(sk),
1309 .protocol = sk->sk_protocol,
1310 .portid = nlk->portid,
1312 atomic_notifier_call_chain(&netlink_chain,
1313 NETLINK_URELEASE, &n);
1316 module_put(nlk->module);
1318 if (netlink_is_kernel(sk)) {
1319 netlink_table_grab();
1320 BUG_ON(nl_table[sk->sk_protocol].registered == 0);
1321 if (--nl_table[sk->sk_protocol].registered == 0) {
1322 struct listeners *old;
1324 old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
1325 RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
1326 kfree_rcu(old, rcu);
1327 nl_table[sk->sk_protocol].module = NULL;
1328 nl_table[sk->sk_protocol].bind = NULL;
1329 nl_table[sk->sk_protocol].unbind = NULL;
1330 nl_table[sk->sk_protocol].flags = 0;
1331 nl_table[sk->sk_protocol].registered = 0;
1333 netlink_table_ungrab();
1340 sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
1342 call_rcu(&nlk->rcu, deferred_put_nlk_sk);
1346 static int netlink_autobind(struct socket *sock)
1348 struct sock *sk = sock->sk;
1349 struct net *net = sock_net(sk);
1350 struct netlink_table *table = &nl_table[sk->sk_protocol];
1351 s32 portid = task_tgid_vnr(current);
1359 ok = !__netlink_lookup(table, portid, net);
1362 /* Bind collision, search negative portid values. */
1364 /* rover will be in range [S32_MIN, -4097] */
1365 rover = S32_MIN + prandom_u32_max(-4096 - S32_MIN);
1366 else if (rover >= -4096)
1372 err = netlink_insert(sk, portid);
1373 if (err == -EADDRINUSE)
1376 /* If 2 threads race to autobind, that is fine. */
1384 * __netlink_ns_capable - General netlink message capability test
1385 * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
1386 * @user_ns: The user namespace of the capability to use
1387 * @cap: The capability to use
1389 * Test to see if the opener of the socket we received the message
1390 * from had when the netlink socket was created and the sender of the
1391 * message has has the capability @cap in the user namespace @user_ns.
1393 bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
1394 struct user_namespace *user_ns, int cap)
1396 return ((nsp->flags & NETLINK_SKB_DST) ||
1397 file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
1398 ns_capable(user_ns, cap);
1400 EXPORT_SYMBOL(__netlink_ns_capable);
1403 * netlink_ns_capable - General netlink message capability test
1404 * @skb: socket buffer holding a netlink command from userspace
1405 * @user_ns: The user namespace of the capability to use
1406 * @cap: The capability to use
1408 * Test to see if the opener of the socket we received the message
1409 * from had when the netlink socket was created and the sender of the
1410 * message has has the capability @cap in the user namespace @user_ns.
1412 bool netlink_ns_capable(const struct sk_buff *skb,
1413 struct user_namespace *user_ns, int cap)
1415 return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
1417 EXPORT_SYMBOL(netlink_ns_capable);
1420 * netlink_capable - Netlink global message capability test
1421 * @skb: socket buffer holding a netlink command from userspace
1422 * @cap: The capability to use
1424 * Test to see if the opener of the socket we received the message
1425 * from had when the netlink socket was created and the sender of the
1426 * message has has the capability @cap in all user namespaces.
1428 bool netlink_capable(const struct sk_buff *skb, int cap)
1430 return netlink_ns_capable(skb, &init_user_ns, cap);
1432 EXPORT_SYMBOL(netlink_capable);
1435 * netlink_net_capable - Netlink network namespace message capability test
1436 * @skb: socket buffer holding a netlink command from userspace
1437 * @cap: The capability to use
1439 * Test to see if the opener of the socket we received the message
1440 * from had when the netlink socket was created and the sender of the
1441 * message has has the capability @cap over the network namespace of
1442 * the socket we received the message from.
1444 bool netlink_net_capable(const struct sk_buff *skb, int cap)
1446 return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
1448 EXPORT_SYMBOL(netlink_net_capable);
1450 static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
1452 return (nl_table[sock->sk->sk_protocol].flags & flag) ||
1453 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
1457 netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
1459 struct netlink_sock *nlk = nlk_sk(sk);
1461 if (nlk->subscriptions && !subscriptions)
1462 __sk_del_bind_node(sk);
1463 else if (!nlk->subscriptions && subscriptions)
1464 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
1465 nlk->subscriptions = subscriptions;
1468 static int netlink_realloc_groups(struct sock *sk)
1470 struct netlink_sock *nlk = nlk_sk(sk);
1471 unsigned int groups;
1472 unsigned long *new_groups;
1475 netlink_table_grab();
1477 groups = nl_table[sk->sk_protocol].groups;
1478 if (!nl_table[sk->sk_protocol].registered) {
1483 if (nlk->ngroups >= groups)
1486 new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
1487 if (new_groups == NULL) {
1491 memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
1492 NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
1494 nlk->groups = new_groups;
1495 nlk->ngroups = groups;
1497 netlink_table_ungrab();
1501 static void netlink_undo_bind(int group, long unsigned int groups,
1504 struct netlink_sock *nlk = nlk_sk(sk);
1507 if (!nlk->netlink_unbind)
1510 for (undo = 0; undo < group; undo++)
1511 if (test_bit(undo, &groups))
1512 nlk->netlink_unbind(sock_net(sk), undo + 1);
1515 static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1518 struct sock *sk = sock->sk;
1519 struct net *net = sock_net(sk);
1520 struct netlink_sock *nlk = nlk_sk(sk);
1521 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1523 long unsigned int groups = nladdr->nl_groups;
1525 if (addr_len < sizeof(struct sockaddr_nl))
1528 if (nladdr->nl_family != AF_NETLINK)
1531 /* Only superuser is allowed to listen multicasts */
1533 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
1535 err = netlink_realloc_groups(sk);
1541 if (nladdr->nl_pid != nlk->portid)
1544 if (nlk->netlink_bind && groups) {
1547 for (group = 0; group < nlk->ngroups; group++) {
1548 if (!test_bit(group, &groups))
1550 err = nlk->netlink_bind(net, group + 1);
1553 netlink_undo_bind(group, groups, sk);
1559 err = nladdr->nl_pid ?
1560 netlink_insert(sk, nladdr->nl_pid) :
1561 netlink_autobind(sock);
1563 netlink_undo_bind(nlk->ngroups, groups, sk);
1568 if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
1571 netlink_table_grab();
1572 netlink_update_subscriptions(sk, nlk->subscriptions +
1574 hweight32(nlk->groups[0]));
1575 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | groups;
1576 netlink_update_listeners(sk);
1577 netlink_table_ungrab();
1582 static int netlink_connect(struct socket *sock, struct sockaddr *addr,
1583 int alen, int flags)
1586 struct sock *sk = sock->sk;
1587 struct netlink_sock *nlk = nlk_sk(sk);
1588 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1590 if (alen < sizeof(addr->sa_family))
1593 if (addr->sa_family == AF_UNSPEC) {
1594 sk->sk_state = NETLINK_UNCONNECTED;
1595 nlk->dst_portid = 0;
1599 if (addr->sa_family != AF_NETLINK)
1602 if ((nladdr->nl_groups || nladdr->nl_pid) &&
1603 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
1607 err = netlink_autobind(sock);
1610 sk->sk_state = NETLINK_CONNECTED;
1611 nlk->dst_portid = nladdr->nl_pid;
1612 nlk->dst_group = ffs(nladdr->nl_groups);
1618 static int netlink_getname(struct socket *sock, struct sockaddr *addr,
1619 int *addr_len, int peer)
1621 struct sock *sk = sock->sk;
1622 struct netlink_sock *nlk = nlk_sk(sk);
1623 DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
1625 nladdr->nl_family = AF_NETLINK;
1627 *addr_len = sizeof(*nladdr);
1630 nladdr->nl_pid = nlk->dst_portid;
1631 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
1633 nladdr->nl_pid = nlk->portid;
1634 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
1639 static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
1642 struct netlink_sock *nlk;
1644 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
1646 return ERR_PTR(-ECONNREFUSED);
1648 /* Don't bother queuing skb if kernel socket has no input function */
1650 if (sock->sk_state == NETLINK_CONNECTED &&
1651 nlk->dst_portid != nlk_sk(ssk)->portid) {
1653 return ERR_PTR(-ECONNREFUSED);
1658 struct sock *netlink_getsockbyfilp(struct file *filp)
1660 struct inode *inode = file_inode(filp);
1663 if (!S_ISSOCK(inode->i_mode))
1664 return ERR_PTR(-ENOTSOCK);
1666 sock = SOCKET_I(inode)->sk;
1667 if (sock->sk_family != AF_NETLINK)
1668 return ERR_PTR(-EINVAL);
1674 static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
1677 struct sk_buff *skb;
1680 if (size <= NLMSG_GOODSIZE || broadcast)
1681 return alloc_skb(size, GFP_KERNEL);
1683 size = SKB_DATA_ALIGN(size) +
1684 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1686 data = vmalloc(size);
1690 skb = __build_skb(data, size);
1694 skb->destructor = netlink_skb_destructor;
1700 * Attach a skb to a netlink socket.
1701 * The caller must hold a reference to the destination socket. On error, the
1702 * reference is dropped. The skb is not send to the destination, just all
1703 * all error checks are performed and memory in the queue is reserved.
1705 * < 0: error. skb freed, reference to sock dropped.
1707 * 1: repeat lookup - reference dropped while waiting for socket memory.
1709 int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
1710 long *timeo, struct sock *ssk)
1712 struct netlink_sock *nlk;
1716 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1717 test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
1718 !netlink_skb_is_mmaped(skb)) {
1719 DECLARE_WAITQUEUE(wait, current);
1721 if (!ssk || netlink_is_kernel(ssk))
1722 netlink_overrun(sk);
1728 __set_current_state(TASK_INTERRUPTIBLE);
1729 add_wait_queue(&nlk->wait, &wait);
1731 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1732 test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
1733 !sock_flag(sk, SOCK_DEAD))
1734 *timeo = schedule_timeout(*timeo);
1736 __set_current_state(TASK_RUNNING);
1737 remove_wait_queue(&nlk->wait, &wait);
1740 if (signal_pending(current)) {
1742 return sock_intr_errno(*timeo);
1746 netlink_skb_set_owner_r(skb, sk);
1750 static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1754 netlink_deliver_tap(skb);
1756 #ifdef CONFIG_NETLINK_MMAP
1757 if (netlink_skb_is_mmaped(skb))
1758 netlink_queue_mmaped_skb(sk, skb);
1759 else if (netlink_rx_is_mmaped(sk))
1760 netlink_ring_set_copied(sk, skb);
1762 #endif /* CONFIG_NETLINK_MMAP */
1763 skb_queue_tail(&sk->sk_receive_queue, skb);
1764 sk->sk_data_ready(sk);
1768 int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1770 int len = __netlink_sendskb(sk, skb);
1776 void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
1782 static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
1786 WARN_ON(skb->sk != NULL);
1787 if (netlink_skb_is_mmaped(skb))
1790 delta = skb->end - skb->tail;
1791 if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
1794 if (skb_shared(skb)) {
1795 struct sk_buff *nskb = skb_clone(skb, allocation);
1802 if (!pskb_expand_head(skb, 0, -delta, allocation))
1803 skb->truesize -= delta;
1808 static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
1812 struct netlink_sock *nlk = nlk_sk(sk);
1814 ret = -ECONNREFUSED;
1815 if (nlk->netlink_rcv != NULL) {
1817 netlink_skb_set_owner_r(skb, sk);
1818 NETLINK_CB(skb).sk = ssk;
1819 netlink_deliver_tap_kernel(sk, ssk, skb);
1820 nlk->netlink_rcv(skb);
1829 int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
1830 u32 portid, int nonblock)
1836 skb = netlink_trim(skb, gfp_any());
1838 timeo = sock_sndtimeo(ssk, nonblock);
1840 sk = netlink_getsockbyportid(ssk, portid);
1845 if (netlink_is_kernel(sk))
1846 return netlink_unicast_kernel(sk, skb, ssk);
1848 if (sk_filter(sk, skb)) {
1855 err = netlink_attachskb(sk, skb, &timeo, ssk);
1861 return netlink_sendskb(sk, skb);
1863 EXPORT_SYMBOL(netlink_unicast);
1865 struct sk_buff *__netlink_alloc_skb(struct sock *ssk, unsigned int size,
1866 unsigned int ldiff, u32 dst_portid,
1869 #ifdef CONFIG_NETLINK_MMAP
1870 unsigned int maxlen, linear_size;
1871 struct sock *sk = NULL;
1872 struct sk_buff *skb;
1873 struct netlink_ring *ring;
1874 struct nl_mmap_hdr *hdr;
1876 sk = netlink_getsockbyportid(ssk, dst_portid);
1880 ring = &nlk_sk(sk)->rx_ring;
1881 /* fast-path without atomic ops for common case: non-mmaped receiver */
1882 if (ring->pg_vec == NULL)
1885 /* We need to account the full linear size needed as a ring
1886 * slot cannot have non-linear parts.
1888 linear_size = size + ldiff;
1889 if (ring->frame_size - NL_MMAP_HDRLEN < linear_size)
1892 skb = alloc_skb_head(gfp_mask);
1896 spin_lock_bh(&sk->sk_receive_queue.lock);
1897 /* check again under lock */
1898 if (ring->pg_vec == NULL)
1901 /* check again under lock */
1902 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
1903 if (maxlen < linear_size)
1906 netlink_forward_ring(ring);
1907 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
1911 netlink_ring_setup_skb(skb, sk, ring, hdr);
1912 netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
1913 atomic_inc(&ring->pending);
1914 netlink_increment_head(ring);
1916 spin_unlock_bh(&sk->sk_receive_queue.lock);
1921 spin_unlock_bh(&sk->sk_receive_queue.lock);
1922 netlink_overrun(sk);
1929 spin_unlock_bh(&sk->sk_receive_queue.lock);
1934 return alloc_skb(size, gfp_mask);
1936 EXPORT_SYMBOL_GPL(__netlink_alloc_skb);
1938 int netlink_has_listeners(struct sock *sk, unsigned int group)
1941 struct listeners *listeners;
1943 BUG_ON(!netlink_is_kernel(sk));
1946 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
1948 if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
1949 res = test_bit(group - 1, listeners->masks);
1955 EXPORT_SYMBOL_GPL(netlink_has_listeners);
1957 static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
1959 struct netlink_sock *nlk = nlk_sk(sk);
1961 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
1962 !test_bit(NETLINK_S_CONGESTED, &nlk->state)) {
1963 netlink_skb_set_owner_r(skb, sk);
1964 __netlink_sendskb(sk, skb);
1965 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
1970 struct netlink_broadcast_data {
1971 struct sock *exclude_sk;
1976 int delivery_failure;
1980 struct sk_buff *skb, *skb2;
1981 int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
1985 static void do_one_broadcast(struct sock *sk,
1986 struct netlink_broadcast_data *p)
1988 struct netlink_sock *nlk = nlk_sk(sk);
1991 if (p->exclude_sk == sk)
1994 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
1995 !test_bit(p->group - 1, nlk->groups))
1998 if (!net_eq(sock_net(sk), p->net)) {
1999 if (!(nlk->flags & NETLINK_F_LISTEN_ALL_NSID))
2002 if (!peernet_has_id(sock_net(sk), p->net))
2005 if (!file_ns_capable(sk->sk_socket->file, p->net->user_ns,
2011 netlink_overrun(sk);
2016 if (p->skb2 == NULL) {
2017 if (skb_shared(p->skb)) {
2018 p->skb2 = skb_clone(p->skb, p->allocation);
2020 p->skb2 = skb_get(p->skb);
2022 * skb ownership may have been set when
2023 * delivered to a previous socket.
2025 skb_orphan(p->skb2);
2028 if (p->skb2 == NULL) {
2029 netlink_overrun(sk);
2030 /* Clone failed. Notify ALL listeners. */
2032 if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
2033 p->delivery_failure = 1;
2036 if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
2041 if (sk_filter(sk, p->skb2)) {
2046 NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net);
2047 NETLINK_CB(p->skb2).nsid_is_set = true;
2048 val = netlink_broadcast_deliver(sk, p->skb2);
2050 netlink_overrun(sk);
2051 if (nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR)
2052 p->delivery_failure = 1;
2054 p->congested |= val;
2062 int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
2063 u32 group, gfp_t allocation,
2064 int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
2067 struct net *net = sock_net(ssk);
2068 struct netlink_broadcast_data info;
2071 skb = netlink_trim(skb, allocation);
2073 info.exclude_sk = ssk;
2075 info.portid = portid;
2078 info.delivery_failure = 0;
2081 info.allocation = allocation;
2084 info.tx_filter = filter;
2085 info.tx_data = filter_data;
2087 /* While we sleep in clone, do not allow to change socket list */
2089 netlink_lock_table();
2091 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
2092 do_one_broadcast(sk, &info);
2096 netlink_unlock_table();
2098 if (info.delivery_failure) {
2099 kfree_skb(info.skb2);
2102 consume_skb(info.skb2);
2104 if (info.delivered) {
2105 if (info.congested && (allocation & __GFP_WAIT))
2111 EXPORT_SYMBOL(netlink_broadcast_filtered);
2113 int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
2114 u32 group, gfp_t allocation)
2116 return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
2119 EXPORT_SYMBOL(netlink_broadcast);
2121 struct netlink_set_err_data {
2122 struct sock *exclude_sk;
2128 static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
2130 struct netlink_sock *nlk = nlk_sk(sk);
2133 if (sk == p->exclude_sk)
2136 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
2139 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
2140 !test_bit(p->group - 1, nlk->groups))
2143 if (p->code == ENOBUFS && nlk->flags & NETLINK_F_RECV_NO_ENOBUFS) {
2148 sk->sk_err = p->code;
2149 sk->sk_error_report(sk);
2155 * netlink_set_err - report error to broadcast listeners
2156 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
2157 * @portid: the PORTID of a process that we want to skip (if any)
2158 * @group: the broadcast group that will notice the error
2159 * @code: error code, must be negative (as usual in kernelspace)
2161 * This function returns the number of broadcast listeners that have set the
2162 * NETLINK_NO_ENOBUFS socket option.
2164 int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
2166 struct netlink_set_err_data info;
2170 info.exclude_sk = ssk;
2171 info.portid = portid;
2173 /* sk->sk_err wants a positive error value */
2176 read_lock(&nl_table_lock);
2178 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
2179 ret += do_one_set_err(sk, &info);
2181 read_unlock(&nl_table_lock);
2184 EXPORT_SYMBOL(netlink_set_err);
2186 /* must be called with netlink table grabbed */
2187 static void netlink_update_socket_mc(struct netlink_sock *nlk,
2191 int old, new = !!is_new, subscriptions;
2193 old = test_bit(group - 1, nlk->groups);
2194 subscriptions = nlk->subscriptions - old + new;
2196 __set_bit(group - 1, nlk->groups);
2198 __clear_bit(group - 1, nlk->groups);
2199 netlink_update_subscriptions(&nlk->sk, subscriptions);
2200 netlink_update_listeners(&nlk->sk);
2203 static int netlink_setsockopt(struct socket *sock, int level, int optname,
2204 char __user *optval, unsigned int optlen)
2206 struct sock *sk = sock->sk;
2207 struct netlink_sock *nlk = nlk_sk(sk);
2208 unsigned int val = 0;
2211 if (level != SOL_NETLINK)
2212 return -ENOPROTOOPT;
2214 if (optname != NETLINK_RX_RING && optname != NETLINK_TX_RING &&
2215 optlen >= sizeof(int) &&
2216 get_user(val, (unsigned int __user *)optval))
2220 case NETLINK_PKTINFO:
2222 nlk->flags |= NETLINK_F_RECV_PKTINFO;
2224 nlk->flags &= ~NETLINK_F_RECV_PKTINFO;
2227 case NETLINK_ADD_MEMBERSHIP:
2228 case NETLINK_DROP_MEMBERSHIP: {
2229 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
2231 err = netlink_realloc_groups(sk);
2234 if (!val || val - 1 >= nlk->ngroups)
2236 if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
2237 err = nlk->netlink_bind(sock_net(sk), val);
2241 netlink_table_grab();
2242 netlink_update_socket_mc(nlk, val,
2243 optname == NETLINK_ADD_MEMBERSHIP);
2244 netlink_table_ungrab();
2245 if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
2246 nlk->netlink_unbind(sock_net(sk), val);
2251 case NETLINK_BROADCAST_ERROR:
2253 nlk->flags |= NETLINK_F_BROADCAST_SEND_ERROR;
2255 nlk->flags &= ~NETLINK_F_BROADCAST_SEND_ERROR;
2258 case NETLINK_NO_ENOBUFS:
2260 nlk->flags |= NETLINK_F_RECV_NO_ENOBUFS;
2261 clear_bit(NETLINK_S_CONGESTED, &nlk->state);
2262 wake_up_interruptible(&nlk->wait);
2264 nlk->flags &= ~NETLINK_F_RECV_NO_ENOBUFS;
2268 #ifdef CONFIG_NETLINK_MMAP
2269 case NETLINK_RX_RING:
2270 case NETLINK_TX_RING: {
2271 struct nl_mmap_req req;
2273 /* Rings might consume more memory than queue limits, require
2276 if (!capable(CAP_NET_ADMIN))
2278 if (optlen < sizeof(req))
2280 if (copy_from_user(&req, optval, sizeof(req)))
2282 err = netlink_set_ring(sk, &req,
2283 optname == NETLINK_TX_RING);
2286 #endif /* CONFIG_NETLINK_MMAP */
2287 case NETLINK_LISTEN_ALL_NSID:
2288 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_BROADCAST))
2292 nlk->flags |= NETLINK_F_LISTEN_ALL_NSID;
2294 nlk->flags &= ~NETLINK_F_LISTEN_ALL_NSID;
2297 case NETLINK_CAP_ACK:
2299 nlk->flags |= NETLINK_F_CAP_ACK;
2301 nlk->flags &= ~NETLINK_F_CAP_ACK;
2310 static int netlink_getsockopt(struct socket *sock, int level, int optname,
2311 char __user *optval, int __user *optlen)
2313 struct sock *sk = sock->sk;
2314 struct netlink_sock *nlk = nlk_sk(sk);
2317 if (level != SOL_NETLINK)
2318 return -ENOPROTOOPT;
2320 if (get_user(len, optlen))
2326 case NETLINK_PKTINFO:
2327 if (len < sizeof(int))
2330 val = nlk->flags & NETLINK_F_RECV_PKTINFO ? 1 : 0;
2331 if (put_user(len, optlen) ||
2332 put_user(val, optval))
2336 case NETLINK_BROADCAST_ERROR:
2337 if (len < sizeof(int))
2340 val = nlk->flags & NETLINK_F_BROADCAST_SEND_ERROR ? 1 : 0;
2341 if (put_user(len, optlen) ||
2342 put_user(val, optval))
2346 case NETLINK_NO_ENOBUFS:
2347 if (len < sizeof(int))
2350 val = nlk->flags & NETLINK_F_RECV_NO_ENOBUFS ? 1 : 0;
2351 if (put_user(len, optlen) ||
2352 put_user(val, optval))
2356 case NETLINK_LIST_MEMBERSHIPS: {
2357 int pos, idx, shift;
2360 netlink_table_grab();
2361 for (pos = 0; pos * 8 < nlk->ngroups; pos += sizeof(u32)) {
2362 if (len - pos < sizeof(u32))
2365 idx = pos / sizeof(unsigned long);
2366 shift = (pos % sizeof(unsigned long)) * 8;
2367 if (put_user((u32)(nlk->groups[idx] >> shift),
2368 (u32 __user *)(optval + pos))) {
2373 if (put_user(ALIGN(nlk->ngroups / 8, sizeof(u32)), optlen))
2375 netlink_table_ungrab();
2378 case NETLINK_CAP_ACK:
2379 if (len < sizeof(int))
2382 val = nlk->flags & NETLINK_F_CAP_ACK ? 1 : 0;
2383 if (put_user(len, optlen) ||
2384 put_user(val, optval))
2394 static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
2396 struct nl_pktinfo info;
2398 info.group = NETLINK_CB(skb).dst_group;
2399 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
2402 static void netlink_cmsg_listen_all_nsid(struct sock *sk, struct msghdr *msg,
2403 struct sk_buff *skb)
2405 if (!NETLINK_CB(skb).nsid_is_set)
2408 put_cmsg(msg, SOL_NETLINK, NETLINK_LISTEN_ALL_NSID, sizeof(int),
2409 &NETLINK_CB(skb).nsid);
2412 static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2414 struct sock *sk = sock->sk;
2415 struct netlink_sock *nlk = nlk_sk(sk);
2416 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
2419 struct sk_buff *skb;
2421 struct scm_cookie scm;
2422 u32 netlink_skb_flags = 0;
2424 if (msg->msg_flags&MSG_OOB)
2427 err = scm_send(sock, msg, &scm, true);
2431 if (msg->msg_namelen) {
2433 if (addr->nl_family != AF_NETLINK)
2435 dst_portid = addr->nl_pid;
2436 dst_group = ffs(addr->nl_groups);
2438 if ((dst_group || dst_portid) &&
2439 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
2441 netlink_skb_flags |= NETLINK_SKB_DST;
2443 dst_portid = nlk->dst_portid;
2444 dst_group = nlk->dst_group;
2448 err = netlink_autobind(sock);
2453 /* It's a really convoluted way for userland to ask for mmaped
2454 * sendmsg(), but that's what we've got...
2456 if (netlink_tx_is_mmaped(sk) &&
2457 iter_is_iovec(&msg->msg_iter) &&
2458 msg->msg_iter.nr_segs == 1 &&
2459 msg->msg_iter.iov->iov_base == NULL) {
2460 err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
2466 if (len > sk->sk_sndbuf - 32)
2469 skb = netlink_alloc_large_skb(len, dst_group);
2473 NETLINK_CB(skb).portid = nlk->portid;
2474 NETLINK_CB(skb).dst_group = dst_group;
2475 NETLINK_CB(skb).creds = scm.creds;
2476 NETLINK_CB(skb).flags = netlink_skb_flags;
2479 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
2484 err = security_netlink_send(sk, skb);
2491 atomic_inc(&skb->users);
2492 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
2494 err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
2501 static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
2504 struct scm_cookie scm;
2505 struct sock *sk = sock->sk;
2506 struct netlink_sock *nlk = nlk_sk(sk);
2507 int noblock = flags&MSG_DONTWAIT;
2509 struct sk_buff *skb, *data_skb;
2517 skb = skb_recv_datagram(sk, flags, noblock, &err);
2523 #ifdef CONFIG_COMPAT_NETLINK_MESSAGES
2524 if (unlikely(skb_shinfo(skb)->frag_list)) {
2526 * If this skb has a frag_list, then here that means that we
2527 * will have to use the frag_list skb's data for compat tasks
2528 * and the regular skb's data for normal (non-compat) tasks.
2530 * If we need to send the compat skb, assign it to the
2531 * 'data_skb' variable so that it will be used below for data
2532 * copying. We keep 'skb' for everything else, including
2533 * freeing both later.
2535 if (flags & MSG_CMSG_COMPAT)
2536 data_skb = skb_shinfo(skb)->frag_list;
2540 /* Record the max length of recvmsg() calls for future allocations */
2541 nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
2542 nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
2545 copied = data_skb->len;
2547 msg->msg_flags |= MSG_TRUNC;
2551 skb_reset_transport_header(data_skb);
2552 err = skb_copy_datagram_msg(data_skb, 0, msg, copied);
2554 if (msg->msg_name) {
2555 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
2556 addr->nl_family = AF_NETLINK;
2558 addr->nl_pid = NETLINK_CB(skb).portid;
2559 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
2560 msg->msg_namelen = sizeof(*addr);
2563 if (nlk->flags & NETLINK_F_RECV_PKTINFO)
2564 netlink_cmsg_recv_pktinfo(msg, skb);
2565 if (nlk->flags & NETLINK_F_LISTEN_ALL_NSID)
2566 netlink_cmsg_listen_all_nsid(sk, msg, skb);
2568 memset(&scm, 0, sizeof(scm));
2569 scm.creds = *NETLINK_CREDS(skb);
2570 if (flags & MSG_TRUNC)
2571 copied = data_skb->len;
2573 skb_free_datagram(sk, skb);
2575 if (nlk->cb_running &&
2576 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
2577 ret = netlink_dump(sk);
2580 sk->sk_error_report(sk);
2584 scm_recv(sock, msg, &scm, flags);
2586 netlink_rcv_wake(sk);
2587 return err ? : copied;
2590 static void netlink_data_ready(struct sock *sk)
2596 * We export these functions to other modules. They provide a
2597 * complete set of kernel non-blocking support for message
2602 __netlink_kernel_create(struct net *net, int unit, struct module *module,
2603 struct netlink_kernel_cfg *cfg)
2605 struct socket *sock;
2607 struct netlink_sock *nlk;
2608 struct listeners *listeners = NULL;
2609 struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
2610 unsigned int groups;
2614 if (unit < 0 || unit >= MAX_LINKS)
2617 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
2620 if (__netlink_create(net, sock, cb_mutex, unit, 1) < 0)
2621 goto out_sock_release_nosk;
2625 if (!cfg || cfg->groups < 32)
2628 groups = cfg->groups;
2630 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
2632 goto out_sock_release;
2634 sk->sk_data_ready = netlink_data_ready;
2635 if (cfg && cfg->input)
2636 nlk_sk(sk)->netlink_rcv = cfg->input;
2638 if (netlink_insert(sk, 0))
2639 goto out_sock_release;
2642 nlk->flags |= NETLINK_F_KERNEL_SOCKET;
2644 netlink_table_grab();
2645 if (!nl_table[unit].registered) {
2646 nl_table[unit].groups = groups;
2647 rcu_assign_pointer(nl_table[unit].listeners, listeners);
2648 nl_table[unit].cb_mutex = cb_mutex;
2649 nl_table[unit].module = module;
2651 nl_table[unit].bind = cfg->bind;
2652 nl_table[unit].unbind = cfg->unbind;
2653 nl_table[unit].flags = cfg->flags;
2655 nl_table[unit].compare = cfg->compare;
2657 nl_table[unit].registered = 1;
2660 nl_table[unit].registered++;
2662 netlink_table_ungrab();
2667 netlink_kernel_release(sk);
2670 out_sock_release_nosk:
2674 EXPORT_SYMBOL(__netlink_kernel_create);
2677 netlink_kernel_release(struct sock *sk)
2679 if (sk == NULL || sk->sk_socket == NULL)
2682 sock_release(sk->sk_socket);
2684 EXPORT_SYMBOL(netlink_kernel_release);
2686 int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
2688 struct listeners *new, *old;
2689 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
2694 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
2695 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
2698 old = nl_deref_protected(tbl->listeners);
2699 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
2700 rcu_assign_pointer(tbl->listeners, new);
2702 kfree_rcu(old, rcu);
2704 tbl->groups = groups;
2710 * netlink_change_ngroups - change number of multicast groups
2712 * This changes the number of multicast groups that are available
2713 * on a certain netlink family. Note that it is not possible to
2714 * change the number of groups to below 32. Also note that it does
2715 * not implicitly call netlink_clear_multicast_users() when the
2716 * number of groups is reduced.
2718 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
2719 * @groups: The new number of groups.
2721 int netlink_change_ngroups(struct sock *sk, unsigned int groups)
2725 netlink_table_grab();
2726 err = __netlink_change_ngroups(sk, groups);
2727 netlink_table_ungrab();
2732 void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2735 struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
2737 sk_for_each_bound(sk, &tbl->mc_list)
2738 netlink_update_socket_mc(nlk_sk(sk), group, 0);
2742 __nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
2744 struct nlmsghdr *nlh;
2745 int size = nlmsg_msg_size(len);
2747 nlh = (struct nlmsghdr *)skb_put(skb, NLMSG_ALIGN(size));
2748 nlh->nlmsg_type = type;
2749 nlh->nlmsg_len = size;
2750 nlh->nlmsg_flags = flags;
2751 nlh->nlmsg_pid = portid;
2752 nlh->nlmsg_seq = seq;
2753 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
2754 memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
2757 EXPORT_SYMBOL(__nlmsg_put);
2760 * It looks a bit ugly.
2761 * It would be better to create kernel thread.
2764 static int netlink_dump(struct sock *sk)
2766 struct netlink_sock *nlk = nlk_sk(sk);
2767 struct netlink_callback *cb;
2768 struct sk_buff *skb = NULL;
2769 struct nlmsghdr *nlh;
2770 int len, err = -ENOBUFS;
2773 mutex_lock(nlk->cb_mutex);
2774 if (!nlk->cb_running) {
2780 alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
2782 if (!netlink_rx_is_mmaped(sk) &&
2783 atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2786 /* NLMSG_GOODSIZE is small to avoid high order allocations being
2787 * required, but it makes sense to _attempt_ a 16K bytes allocation
2788 * to reduce number of system calls on dump operations, if user
2789 * ever provided a big enough buffer.
2791 if (alloc_size < nlk->max_recvmsg_len) {
2792 skb = netlink_alloc_skb(sk,
2793 nlk->max_recvmsg_len,
2798 /* available room should be exact amount to avoid MSG_TRUNC */
2800 skb_reserve(skb, skb_tailroom(skb) -
2801 nlk->max_recvmsg_len);
2804 skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
2808 netlink_skb_set_owner_r(skb, sk);
2810 len = cb->dump(skb, cb);
2813 mutex_unlock(nlk->cb_mutex);
2815 if (sk_filter(sk, skb))
2818 __netlink_sendskb(sk, skb);
2822 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
2826 nl_dump_check_consistent(cb, nlh);
2828 memcpy(nlmsg_data(nlh), &len, sizeof(len));
2830 if (sk_filter(sk, skb))
2833 __netlink_sendskb(sk, skb);
2838 nlk->cb_running = false;
2839 mutex_unlock(nlk->cb_mutex);
2840 module_put(cb->module);
2841 consume_skb(cb->skb);
2845 mutex_unlock(nlk->cb_mutex);
2850 int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2851 const struct nlmsghdr *nlh,
2852 struct netlink_dump_control *control)
2854 struct netlink_callback *cb;
2856 struct netlink_sock *nlk;
2859 /* Memory mapped dump requests need to be copied to avoid looping
2860 * on the pending state in netlink_mmap_sendmsg() while the CB hold
2861 * a reference to the skb.
2863 if (netlink_skb_is_mmaped(skb)) {
2864 skb = skb_copy(skb, GFP_KERNEL);
2868 atomic_inc(&skb->users);
2870 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
2872 ret = -ECONNREFUSED;
2877 mutex_lock(nlk->cb_mutex);
2878 /* A dump is in progress... */
2879 if (nlk->cb_running) {
2883 /* add reference of module which cb->dump belongs to */
2884 if (!try_module_get(control->module)) {
2885 ret = -EPROTONOSUPPORT;
2890 memset(cb, 0, sizeof(*cb));
2891 cb->dump = control->dump;
2892 cb->done = control->done;
2894 cb->data = control->data;
2895 cb->module = control->module;
2896 cb->min_dump_alloc = control->min_dump_alloc;
2899 nlk->cb_running = true;
2901 mutex_unlock(nlk->cb_mutex);
2903 ret = netlink_dump(sk);
2909 /* We successfully started a dump, by returning -EINTR we
2910 * signal not to send ACK even if it was requested.
2916 mutex_unlock(nlk->cb_mutex);
2921 EXPORT_SYMBOL(__netlink_dump_start);
2923 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
2925 struct sk_buff *skb;
2926 struct nlmsghdr *rep;
2927 struct nlmsgerr *errmsg;
2928 size_t payload = sizeof(*errmsg);
2929 struct netlink_sock *nlk = nlk_sk(NETLINK_CB(in_skb).sk);
2931 /* Error messages get the original request appened, unless the user
2932 * requests to cap the error message.
2934 if (!(nlk->flags & NETLINK_F_CAP_ACK) && err)
2935 payload += nlmsg_len(nlh);
2937 skb = netlink_alloc_skb(in_skb->sk, nlmsg_total_size(payload),
2938 NETLINK_CB(in_skb).portid, GFP_KERNEL);
2942 sk = netlink_lookup(sock_net(in_skb->sk),
2943 in_skb->sk->sk_protocol,
2944 NETLINK_CB(in_skb).portid);
2946 sk->sk_err = ENOBUFS;
2947 sk->sk_error_report(sk);
2953 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
2954 NLMSG_ERROR, payload, 0);
2955 errmsg = nlmsg_data(rep);
2956 errmsg->error = err;
2957 memcpy(&errmsg->msg, nlh, payload > sizeof(*errmsg) ? nlh->nlmsg_len : sizeof(*nlh));
2958 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
2960 EXPORT_SYMBOL(netlink_ack);
2962 int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
2965 struct nlmsghdr *nlh;
2968 while (skb->len >= nlmsg_total_size(0)) {
2971 nlh = nlmsg_hdr(skb);
2974 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
2977 /* Only requests are handled by the kernel */
2978 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
2981 /* Skip control messages */
2982 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
2990 if (nlh->nlmsg_flags & NLM_F_ACK || err)
2991 netlink_ack(skb, nlh, err);
2994 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
2995 if (msglen > skb->len)
2997 skb_pull(skb, msglen);
3002 EXPORT_SYMBOL(netlink_rcv_skb);
3005 * nlmsg_notify - send a notification netlink message
3006 * @sk: netlink socket to use
3007 * @skb: notification message
3008 * @portid: destination netlink portid for reports or 0
3009 * @group: destination multicast group or 0
3010 * @report: 1 to report back, 0 to disable
3011 * @flags: allocation flags
3013 int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
3014 unsigned int group, int report, gfp_t flags)
3019 int exclude_portid = 0;
3022 atomic_inc(&skb->users);
3023 exclude_portid = portid;
3026 /* errors reported via destination sk->sk_err, but propagate
3027 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
3028 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
3034 err2 = nlmsg_unicast(sk, skb, portid);
3035 if (!err || err == -ESRCH)
3041 EXPORT_SYMBOL(nlmsg_notify);
3043 #ifdef CONFIG_PROC_FS
3044 struct nl_seq_iter {
3045 struct seq_net_private p;
3046 struct rhashtable_iter hti;
3050 static int netlink_walk_start(struct nl_seq_iter *iter)
3054 err = rhashtable_walk_init(&nl_table[iter->link].hash, &iter->hti);
3056 iter->link = MAX_LINKS;
3060 err = rhashtable_walk_start(&iter->hti);
3061 return err == -EAGAIN ? 0 : err;
3064 static void netlink_walk_stop(struct nl_seq_iter *iter)
3066 rhashtable_walk_stop(&iter->hti);
3067 rhashtable_walk_exit(&iter->hti);
3070 static void *__netlink_seq_next(struct seq_file *seq)
3072 struct nl_seq_iter *iter = seq->private;
3073 struct netlink_sock *nlk;
3079 nlk = rhashtable_walk_next(&iter->hti);
3082 if (PTR_ERR(nlk) == -EAGAIN)
3091 netlink_walk_stop(iter);
3092 if (++iter->link >= MAX_LINKS)
3095 err = netlink_walk_start(iter);
3097 return ERR_PTR(err);
3099 } while (sock_net(&nlk->sk) != seq_file_net(seq));
3104 static void *netlink_seq_start(struct seq_file *seq, loff_t *posp)
3106 struct nl_seq_iter *iter = seq->private;
3107 void *obj = SEQ_START_TOKEN;
3113 err = netlink_walk_start(iter);
3115 return ERR_PTR(err);
3117 for (pos = *posp; pos && obj && !IS_ERR(obj); pos--)
3118 obj = __netlink_seq_next(seq);
3123 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3126 return __netlink_seq_next(seq);
3129 static void netlink_seq_stop(struct seq_file *seq, void *v)
3131 struct nl_seq_iter *iter = seq->private;
3133 if (iter->link >= MAX_LINKS)
3136 netlink_walk_stop(iter);
3140 static int netlink_seq_show(struct seq_file *seq, void *v)
3142 if (v == SEQ_START_TOKEN) {
3144 "sk Eth Pid Groups "
3145 "Rmem Wmem Dump Locks Drops Inode\n");
3148 struct netlink_sock *nlk = nlk_sk(s);
3150 seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n",
3154 nlk->groups ? (u32)nlk->groups[0] : 0,
3155 sk_rmem_alloc_get(s),
3156 sk_wmem_alloc_get(s),
3158 atomic_read(&s->sk_refcnt),
3159 atomic_read(&s->sk_drops),
3167 static const struct seq_operations netlink_seq_ops = {
3168 .start = netlink_seq_start,
3169 .next = netlink_seq_next,
3170 .stop = netlink_seq_stop,
3171 .show = netlink_seq_show,
3175 static int netlink_seq_open(struct inode *inode, struct file *file)
3177 return seq_open_net(inode, file, &netlink_seq_ops,
3178 sizeof(struct nl_seq_iter));
3181 static const struct file_operations netlink_seq_fops = {
3182 .owner = THIS_MODULE,
3183 .open = netlink_seq_open,
3185 .llseek = seq_lseek,
3186 .release = seq_release_net,
3191 int netlink_register_notifier(struct notifier_block *nb)
3193 return atomic_notifier_chain_register(&netlink_chain, nb);
3195 EXPORT_SYMBOL(netlink_register_notifier);
3197 int netlink_unregister_notifier(struct notifier_block *nb)
3199 return atomic_notifier_chain_unregister(&netlink_chain, nb);
3201 EXPORT_SYMBOL(netlink_unregister_notifier);
3203 static const struct proto_ops netlink_ops = {
3204 .family = PF_NETLINK,
3205 .owner = THIS_MODULE,
3206 .release = netlink_release,
3207 .bind = netlink_bind,
3208 .connect = netlink_connect,
3209 .socketpair = sock_no_socketpair,
3210 .accept = sock_no_accept,
3211 .getname = netlink_getname,
3212 .poll = netlink_poll,
3213 .ioctl = sock_no_ioctl,
3214 .listen = sock_no_listen,
3215 .shutdown = sock_no_shutdown,
3216 .setsockopt = netlink_setsockopt,
3217 .getsockopt = netlink_getsockopt,
3218 .sendmsg = netlink_sendmsg,
3219 .recvmsg = netlink_recvmsg,
3220 .mmap = netlink_mmap,
3221 .sendpage = sock_no_sendpage,
3224 static const struct net_proto_family netlink_family_ops = {
3225 .family = PF_NETLINK,
3226 .create = netlink_create,
3227 .owner = THIS_MODULE, /* for consistency 8) */
3230 static int __net_init netlink_net_init(struct net *net)
3232 #ifdef CONFIG_PROC_FS
3233 if (!proc_create("netlink", 0, net->proc_net, &netlink_seq_fops))
3239 static void __net_exit netlink_net_exit(struct net *net)
3241 #ifdef CONFIG_PROC_FS
3242 remove_proc_entry("netlink", net->proc_net);
3246 static void __init netlink_add_usersock_entry(void)
3248 struct listeners *listeners;
3251 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
3253 panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
3255 netlink_table_grab();
3257 nl_table[NETLINK_USERSOCK].groups = groups;
3258 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
3259 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
3260 nl_table[NETLINK_USERSOCK].registered = 1;
3261 nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
3263 netlink_table_ungrab();
3266 static struct pernet_operations __net_initdata netlink_net_ops = {
3267 .init = netlink_net_init,
3268 .exit = netlink_net_exit,
3271 static inline u32 netlink_hash(const void *data, u32 len, u32 seed)
3273 const struct netlink_sock *nlk = data;
3274 struct netlink_compare_arg arg;
3276 netlink_compare_arg_init(&arg, sock_net(&nlk->sk), nlk->rhash_portid);
3277 return jhash2((u32 *)&arg, netlink_compare_arg_len / sizeof(u32), seed);
3280 static const struct rhashtable_params netlink_rhashtable_params = {
3281 .head_offset = offsetof(struct netlink_sock, node),
3282 .key_len = netlink_compare_arg_len,
3283 .obj_hashfn = netlink_hash,
3284 .obj_cmpfn = netlink_compare,
3285 .automatic_shrinking = true,
3288 static int __init netlink_proto_init(void)
3291 int err = proto_register(&netlink_proto, 0);
3296 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
3298 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
3302 for (i = 0; i < MAX_LINKS; i++) {
3303 if (rhashtable_init(&nl_table[i].hash,
3304 &netlink_rhashtable_params) < 0) {
3306 rhashtable_destroy(&nl_table[i].hash);
3312 INIT_LIST_HEAD(&netlink_tap_all);
3314 netlink_add_usersock_entry();
3316 sock_register(&netlink_family_ops);
3317 register_pernet_subsys(&netlink_net_ops);
3318 /* The netlink device handler may be needed early. */
3323 panic("netlink_init: Cannot allocate nl_table\n");
3326 core_initcall(netlink_proto_init);