genetlink: pass only network namespace to genl_has_listeners()
[linux-2.6-block.git] / net / netlink / af_netlink.c
CommitLineData
1da177e4
LT
1/*
2 * NETLINK Kernel-user communication protocol.
3 *
113aa838 4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
1da177e4 5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
cd1df525 6 * Patrick McHardy <kaber@trash.net>
1da177e4
LT
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
746fac4d 12 *
1da177e4
LT
13 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
14 * added netlink_proto_exit
15 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
16 * use nlk_sk, as sk->protinfo is on a diet 8)
4fdb3bb7
HW
17 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
18 * - inc module use count of module that owns
19 * the kernel socket in case userspace opens
20 * socket of same protocol
21 * - remove all module support, since netlink is
22 * mandatory if CONFIG_NET=y these days
1da177e4
LT
23 */
24
1da177e4
LT
25#include <linux/module.h>
26
4fc268d2 27#include <linux/capability.h>
1da177e4
LT
28#include <linux/kernel.h>
29#include <linux/init.h>
1da177e4
LT
30#include <linux/signal.h>
31#include <linux/sched.h>
32#include <linux/errno.h>
33#include <linux/string.h>
34#include <linux/stat.h>
35#include <linux/socket.h>
36#include <linux/un.h>
37#include <linux/fcntl.h>
38#include <linux/termios.h>
39#include <linux/sockios.h>
40#include <linux/net.h>
41#include <linux/fs.h>
42#include <linux/slab.h>
43#include <asm/uaccess.h>
44#include <linux/skbuff.h>
45#include <linux/netdevice.h>
46#include <linux/rtnetlink.h>
47#include <linux/proc_fs.h>
48#include <linux/seq_file.h>
1da177e4
LT
49#include <linux/notifier.h>
50#include <linux/security.h>
51#include <linux/jhash.h>
52#include <linux/jiffies.h>
53#include <linux/random.h>
54#include <linux/bitops.h>
55#include <linux/mm.h>
56#include <linux/types.h>
54e0f520 57#include <linux/audit.h>
af65bdfc 58#include <linux/mutex.h>
ccdfcc39 59#include <linux/vmalloc.h>
bcbde0d4 60#include <linux/if_arp.h>
e341694e 61#include <linux/rhashtable.h>
9652e931 62#include <asm/cacheflush.h>
e341694e 63#include <linux/hash.h>
54e0f520 64
457c4cbc 65#include <net/net_namespace.h>
1da177e4
LT
66#include <net/sock.h>
67#include <net/scm.h>
82ace47a 68#include <net/netlink.h>
1da177e4 69
0f29c768 70#include "af_netlink.h"
1da177e4 71
5c398dc8
ED
72struct listeners {
73 struct rcu_head rcu;
74 unsigned long masks[0];
6c04bb18
JB
75};
76
cd967e05
PM
77/* state bits */
78#define NETLINK_CONGESTED 0x0
79
80/* flags */
77247bbb 81#define NETLINK_KERNEL_SOCKET 0x1
9a4595bc 82#define NETLINK_RECV_PKTINFO 0x2
be0c22a4 83#define NETLINK_BROADCAST_SEND_ERROR 0x4
38938bfe 84#define NETLINK_RECV_NO_ENOBUFS 0x8
77247bbb 85
035c4c16 86static inline int netlink_is_kernel(struct sock *sk)
aed81560
DL
87{
88 return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
89}
90
0f29c768
AV
91struct netlink_table *nl_table;
92EXPORT_SYMBOL_GPL(nl_table);
1da177e4
LT
93
94static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
95
96static int netlink_dump(struct sock *sk);
9652e931 97static void netlink_skb_destructor(struct sk_buff *skb);
1da177e4 98
78fd1d0a
TG
99/* nl_table locking explained:
100 * Lookup and traversal are protected with nl_sk_hash_lock or nl_table_lock
101 * combined with an RCU read-side lock. Insertion and removal are protected
102 * with nl_sk_hash_lock while using RCU list modification primitives and may
103 * run in parallel to nl_table_lock protected lookups. Destruction of the
104 * Netlink socket may only occur *after* nl_table_lock has been acquired
105 * either during or after the socket has been removed from the list.
106 */
0f29c768
AV
107DEFINE_RWLOCK(nl_table_lock);
108EXPORT_SYMBOL_GPL(nl_table_lock);
1da177e4
LT
109static atomic_t nl_table_users = ATOMIC_INIT(0);
110
6d772ac5
ED
111#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
112
e341694e
TG
113/* Protects netlink socket hash table mutations */
114DEFINE_MUTEX(nl_sk_hash_lock);
6c8f7e70 115EXPORT_SYMBOL_GPL(nl_sk_hash_lock);
e341694e 116
97127566 117#ifdef CONFIG_PROVE_LOCKING
7b4ce235 118static int lockdep_nl_sk_hash_is_held(void *parent)
e341694e 119{
78fd1d0a
TG
120 if (debug_locks)
121 return lockdep_is_held(&nl_sk_hash_lock) || lockdep_is_held(&nl_table_lock);
78fd1d0a 122 return 1;
e341694e 123}
97127566 124#endif
e341694e 125
e041c683 126static ATOMIC_NOTIFIER_HEAD(netlink_chain);
1da177e4 127
bcbde0d4
DB
128static DEFINE_SPINLOCK(netlink_tap_lock);
129static struct list_head netlink_tap_all __read_mostly;
130
b57ef81f 131static inline u32 netlink_group_mask(u32 group)
d629b836
PM
132{
133 return group ? 1 << (group - 1) : 0;
134}
135
bcbde0d4
DB
136int netlink_add_tap(struct netlink_tap *nt)
137{
138 if (unlikely(nt->dev->type != ARPHRD_NETLINK))
139 return -EINVAL;
140
141 spin_lock(&netlink_tap_lock);
142 list_add_rcu(&nt->list, &netlink_tap_all);
143 spin_unlock(&netlink_tap_lock);
144
fcd4d35e 145 __module_get(nt->module);
bcbde0d4
DB
146
147 return 0;
148}
149EXPORT_SYMBOL_GPL(netlink_add_tap);
150
2173f8d9 151static int __netlink_remove_tap(struct netlink_tap *nt)
bcbde0d4
DB
152{
153 bool found = false;
154 struct netlink_tap *tmp;
155
156 spin_lock(&netlink_tap_lock);
157
158 list_for_each_entry(tmp, &netlink_tap_all, list) {
159 if (nt == tmp) {
160 list_del_rcu(&nt->list);
161 found = true;
162 goto out;
163 }
164 }
165
166 pr_warn("__netlink_remove_tap: %p not found\n", nt);
167out:
168 spin_unlock(&netlink_tap_lock);
169
170 if (found && nt->module)
171 module_put(nt->module);
172
173 return found ? 0 : -ENODEV;
174}
bcbde0d4
DB
175
176int netlink_remove_tap(struct netlink_tap *nt)
177{
178 int ret;
179
180 ret = __netlink_remove_tap(nt);
181 synchronize_net();
182
183 return ret;
184}
185EXPORT_SYMBOL_GPL(netlink_remove_tap);
186
5ffd5cdd
DB
187static bool netlink_filter_tap(const struct sk_buff *skb)
188{
189 struct sock *sk = skb->sk;
5ffd5cdd
DB
190
191 /* We take the more conservative approach and
192 * whitelist socket protocols that may pass.
193 */
194 switch (sk->sk_protocol) {
195 case NETLINK_ROUTE:
196 case NETLINK_USERSOCK:
197 case NETLINK_SOCK_DIAG:
198 case NETLINK_NFLOG:
199 case NETLINK_XFRM:
200 case NETLINK_FIB_LOOKUP:
201 case NETLINK_NETFILTER:
202 case NETLINK_GENERIC:
498044bb 203 return true;
5ffd5cdd
DB
204 }
205
498044bb 206 return false;
5ffd5cdd
DB
207}
208
bcbde0d4
DB
209static int __netlink_deliver_tap_skb(struct sk_buff *skb,
210 struct net_device *dev)
211{
212 struct sk_buff *nskb;
5ffd5cdd 213 struct sock *sk = skb->sk;
bcbde0d4
DB
214 int ret = -ENOMEM;
215
216 dev_hold(dev);
217 nskb = skb_clone(skb, GFP_ATOMIC);
218 if (nskb) {
219 nskb->dev = dev;
5ffd5cdd 220 nskb->protocol = htons((u16) sk->sk_protocol);
604d13c9
DB
221 nskb->pkt_type = netlink_is_kernel(sk) ?
222 PACKET_KERNEL : PACKET_USER;
4e48ed88 223 skb_reset_network_header(nskb);
bcbde0d4
DB
224 ret = dev_queue_xmit(nskb);
225 if (unlikely(ret > 0))
226 ret = net_xmit_errno(ret);
227 }
228
229 dev_put(dev);
230 return ret;
231}
232
233static void __netlink_deliver_tap(struct sk_buff *skb)
234{
235 int ret;
236 struct netlink_tap *tmp;
237
5ffd5cdd
DB
238 if (!netlink_filter_tap(skb))
239 return;
240
bcbde0d4
DB
241 list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
242 ret = __netlink_deliver_tap_skb(skb, tmp->dev);
243 if (unlikely(ret))
244 break;
245 }
246}
247
248static void netlink_deliver_tap(struct sk_buff *skb)
249{
250 rcu_read_lock();
251
252 if (unlikely(!list_empty(&netlink_tap_all)))
253 __netlink_deliver_tap(skb);
254
255 rcu_read_unlock();
256}
257
73bfd370
DB
258static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
259 struct sk_buff *skb)
260{
261 if (!(netlink_is_kernel(dst) && netlink_is_kernel(src)))
262 netlink_deliver_tap(skb);
263}
264
cd1df525
PM
265static void netlink_overrun(struct sock *sk)
266{
267 struct netlink_sock *nlk = nlk_sk(sk);
268
269 if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) {
270 if (!test_and_set_bit(NETLINK_CONGESTED, &nlk_sk(sk)->state)) {
271 sk->sk_err = ENOBUFS;
272 sk->sk_error_report(sk);
273 }
274 }
275 atomic_inc(&sk->sk_drops);
276}
277
278static void netlink_rcv_wake(struct sock *sk)
279{
280 struct netlink_sock *nlk = nlk_sk(sk);
281
282 if (skb_queue_empty(&sk->sk_receive_queue))
283 clear_bit(NETLINK_CONGESTED, &nlk->state);
284 if (!test_bit(NETLINK_CONGESTED, &nlk->state))
285 wake_up_interruptible(&nlk->wait);
286}
287
ccdfcc39 288#ifdef CONFIG_NETLINK_MMAP
9652e931
PM
289static bool netlink_skb_is_mmaped(const struct sk_buff *skb)
290{
291 return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
292}
293
f9c22888
PM
294static bool netlink_rx_is_mmaped(struct sock *sk)
295{
296 return nlk_sk(sk)->rx_ring.pg_vec != NULL;
297}
298
5fd96123
PM
299static bool netlink_tx_is_mmaped(struct sock *sk)
300{
301 return nlk_sk(sk)->tx_ring.pg_vec != NULL;
302}
303
ccdfcc39
PM
304static __pure struct page *pgvec_to_page(const void *addr)
305{
306 if (is_vmalloc_addr(addr))
307 return vmalloc_to_page(addr);
308 else
309 return virt_to_page(addr);
310}
311
312static void free_pg_vec(void **pg_vec, unsigned int order, unsigned int len)
313{
314 unsigned int i;
315
316 for (i = 0; i < len; i++) {
317 if (pg_vec[i] != NULL) {
318 if (is_vmalloc_addr(pg_vec[i]))
319 vfree(pg_vec[i]);
320 else
321 free_pages((unsigned long)pg_vec[i], order);
322 }
323 }
324 kfree(pg_vec);
325}
326
327static void *alloc_one_pg_vec_page(unsigned long order)
328{
329 void *buffer;
330 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO |
331 __GFP_NOWARN | __GFP_NORETRY;
332
333 buffer = (void *)__get_free_pages(gfp_flags, order);
334 if (buffer != NULL)
335 return buffer;
336
337 buffer = vzalloc((1 << order) * PAGE_SIZE);
338 if (buffer != NULL)
339 return buffer;
340
341 gfp_flags &= ~__GFP_NORETRY;
342 return (void *)__get_free_pages(gfp_flags, order);
343}
344
345static void **alloc_pg_vec(struct netlink_sock *nlk,
346 struct nl_mmap_req *req, unsigned int order)
347{
348 unsigned int block_nr = req->nm_block_nr;
349 unsigned int i;
8a849bb7 350 void **pg_vec;
ccdfcc39
PM
351
352 pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL);
353 if (pg_vec == NULL)
354 return NULL;
355
356 for (i = 0; i < block_nr; i++) {
8a849bb7 357 pg_vec[i] = alloc_one_pg_vec_page(order);
ccdfcc39
PM
358 if (pg_vec[i] == NULL)
359 goto err1;
360 }
361
362 return pg_vec;
363err1:
364 free_pg_vec(pg_vec, order, block_nr);
365 return NULL;
366}
367
368static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
369 bool closing, bool tx_ring)
370{
371 struct netlink_sock *nlk = nlk_sk(sk);
372 struct netlink_ring *ring;
373 struct sk_buff_head *queue;
374 void **pg_vec = NULL;
375 unsigned int order = 0;
376 int err;
377
378 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
379 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
380
381 if (!closing) {
382 if (atomic_read(&nlk->mapped))
383 return -EBUSY;
384 if (atomic_read(&ring->pending))
385 return -EBUSY;
386 }
387
388 if (req->nm_block_nr) {
389 if (ring->pg_vec != NULL)
390 return -EBUSY;
391
392 if ((int)req->nm_block_size <= 0)
393 return -EINVAL;
74e83b23 394 if (!PAGE_ALIGNED(req->nm_block_size))
ccdfcc39
PM
395 return -EINVAL;
396 if (req->nm_frame_size < NL_MMAP_HDRLEN)
397 return -EINVAL;
398 if (!IS_ALIGNED(req->nm_frame_size, NL_MMAP_MSG_ALIGNMENT))
399 return -EINVAL;
400
401 ring->frames_per_block = req->nm_block_size /
402 req->nm_frame_size;
403 if (ring->frames_per_block == 0)
404 return -EINVAL;
405 if (ring->frames_per_block * req->nm_block_nr !=
406 req->nm_frame_nr)
407 return -EINVAL;
408
409 order = get_order(req->nm_block_size);
410 pg_vec = alloc_pg_vec(nlk, req, order);
411 if (pg_vec == NULL)
412 return -ENOMEM;
413 } else {
414 if (req->nm_frame_nr)
415 return -EINVAL;
416 }
417
418 err = -EBUSY;
419 mutex_lock(&nlk->pg_vec_lock);
420 if (closing || atomic_read(&nlk->mapped) == 0) {
421 err = 0;
422 spin_lock_bh(&queue->lock);
423
424 ring->frame_max = req->nm_frame_nr - 1;
425 ring->head = 0;
426 ring->frame_size = req->nm_frame_size;
427 ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
428
429 swap(ring->pg_vec_len, req->nm_block_nr);
430 swap(ring->pg_vec_order, order);
431 swap(ring->pg_vec, pg_vec);
432
433 __skb_queue_purge(queue);
434 spin_unlock_bh(&queue->lock);
435
436 WARN_ON(atomic_read(&nlk->mapped));
437 }
438 mutex_unlock(&nlk->pg_vec_lock);
439
440 if (pg_vec)
441 free_pg_vec(pg_vec, order, req->nm_block_nr);
442 return err;
443}
444
445static void netlink_mm_open(struct vm_area_struct *vma)
446{
447 struct file *file = vma->vm_file;
448 struct socket *sock = file->private_data;
449 struct sock *sk = sock->sk;
450
451 if (sk)
452 atomic_inc(&nlk_sk(sk)->mapped);
453}
454
455static void netlink_mm_close(struct vm_area_struct *vma)
456{
457 struct file *file = vma->vm_file;
458 struct socket *sock = file->private_data;
459 struct sock *sk = sock->sk;
460
461 if (sk)
462 atomic_dec(&nlk_sk(sk)->mapped);
463}
464
465static const struct vm_operations_struct netlink_mmap_ops = {
466 .open = netlink_mm_open,
467 .close = netlink_mm_close,
468};
469
470static int netlink_mmap(struct file *file, struct socket *sock,
471 struct vm_area_struct *vma)
472{
473 struct sock *sk = sock->sk;
474 struct netlink_sock *nlk = nlk_sk(sk);
475 struct netlink_ring *ring;
476 unsigned long start, size, expected;
477 unsigned int i;
478 int err = -EINVAL;
479
480 if (vma->vm_pgoff)
481 return -EINVAL;
482
483 mutex_lock(&nlk->pg_vec_lock);
484
485 expected = 0;
486 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
487 if (ring->pg_vec == NULL)
488 continue;
489 expected += ring->pg_vec_len * ring->pg_vec_pages * PAGE_SIZE;
490 }
491
492 if (expected == 0)
493 goto out;
494
495 size = vma->vm_end - vma->vm_start;
496 if (size != expected)
497 goto out;
498
499 start = vma->vm_start;
500 for (ring = &nlk->rx_ring; ring <= &nlk->tx_ring; ring++) {
501 if (ring->pg_vec == NULL)
502 continue;
503
504 for (i = 0; i < ring->pg_vec_len; i++) {
505 struct page *page;
506 void *kaddr = ring->pg_vec[i];
507 unsigned int pg_num;
508
509 for (pg_num = 0; pg_num < ring->pg_vec_pages; pg_num++) {
510 page = pgvec_to_page(kaddr);
511 err = vm_insert_page(vma, start, page);
512 if (err < 0)
513 goto out;
514 start += PAGE_SIZE;
515 kaddr += PAGE_SIZE;
516 }
517 }
518 }
519
520 atomic_inc(&nlk->mapped);
521 vma->vm_ops = &netlink_mmap_ops;
522 err = 0;
523out:
524 mutex_unlock(&nlk->pg_vec_lock);
7cdbac71 525 return err;
ccdfcc39 526}
9652e931 527
4682a035 528static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len)
9652e931
PM
529{
530#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
531 struct page *p_start, *p_end;
532
533 /* First page is flushed through netlink_{get,set}_status */
534 p_start = pgvec_to_page(hdr + PAGE_SIZE);
4682a035 535 p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
9652e931
PM
536 while (p_start <= p_end) {
537 flush_dcache_page(p_start);
538 p_start++;
539 }
540#endif
541}
542
543static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
544{
545 smp_rmb();
546 flush_dcache_page(pgvec_to_page(hdr));
547 return hdr->nm_status;
548}
549
550static void netlink_set_status(struct nl_mmap_hdr *hdr,
551 enum nl_mmap_status status)
552{
a18e6a18 553 smp_mb();
9652e931
PM
554 hdr->nm_status = status;
555 flush_dcache_page(pgvec_to_page(hdr));
9652e931
PM
556}
557
558static struct nl_mmap_hdr *
559__netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos)
560{
561 unsigned int pg_vec_pos, frame_off;
562
563 pg_vec_pos = pos / ring->frames_per_block;
564 frame_off = pos % ring->frames_per_block;
565
566 return ring->pg_vec[pg_vec_pos] + (frame_off * ring->frame_size);
567}
568
569static struct nl_mmap_hdr *
570netlink_lookup_frame(const struct netlink_ring *ring, unsigned int pos,
571 enum nl_mmap_status status)
572{
573 struct nl_mmap_hdr *hdr;
574
575 hdr = __netlink_lookup_frame(ring, pos);
576 if (netlink_get_status(hdr) != status)
577 return NULL;
578
579 return hdr;
580}
581
582static struct nl_mmap_hdr *
583netlink_current_frame(const struct netlink_ring *ring,
584 enum nl_mmap_status status)
585{
586 return netlink_lookup_frame(ring, ring->head, status);
587}
588
589static struct nl_mmap_hdr *
590netlink_previous_frame(const struct netlink_ring *ring,
591 enum nl_mmap_status status)
592{
593 unsigned int prev;
594
595 prev = ring->head ? ring->head - 1 : ring->frame_max;
596 return netlink_lookup_frame(ring, prev, status);
597}
598
599static void netlink_increment_head(struct netlink_ring *ring)
600{
601 ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0;
602}
603
604static void netlink_forward_ring(struct netlink_ring *ring)
605{
606 unsigned int head = ring->head, pos = head;
607 const struct nl_mmap_hdr *hdr;
608
609 do {
610 hdr = __netlink_lookup_frame(ring, pos);
611 if (hdr->nm_status == NL_MMAP_STATUS_UNUSED)
612 break;
613 if (hdr->nm_status != NL_MMAP_STATUS_SKIP)
614 break;
615 netlink_increment_head(ring);
616 } while (ring->head != head);
617}
618
cd1df525
PM
619static bool netlink_dump_space(struct netlink_sock *nlk)
620{
621 struct netlink_ring *ring = &nlk->rx_ring;
622 struct nl_mmap_hdr *hdr;
623 unsigned int n;
624
625 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
626 if (hdr == NULL)
627 return false;
628
629 n = ring->head + ring->frame_max / 2;
630 if (n > ring->frame_max)
631 n -= ring->frame_max;
632
633 hdr = __netlink_lookup_frame(ring, n);
634
635 return hdr->nm_status == NL_MMAP_STATUS_UNUSED;
636}
637
9652e931
PM
638static unsigned int netlink_poll(struct file *file, struct socket *sock,
639 poll_table *wait)
640{
641 struct sock *sk = sock->sk;
642 struct netlink_sock *nlk = nlk_sk(sk);
643 unsigned int mask;
cd1df525 644 int err;
9652e931 645
cd1df525
PM
646 if (nlk->rx_ring.pg_vec != NULL) {
647 /* Memory mapped sockets don't call recvmsg(), so flow control
648 * for dumps is performed here. A dump is allowed to continue
649 * if at least half the ring is unused.
650 */
16b304f3 651 while (nlk->cb_running && netlink_dump_space(nlk)) {
cd1df525
PM
652 err = netlink_dump(sk);
653 if (err < 0) {
ac30ef83 654 sk->sk_err = -err;
cd1df525
PM
655 sk->sk_error_report(sk);
656 break;
657 }
658 }
659 netlink_rcv_wake(sk);
660 }
5fd96123 661
9652e931
PM
662 mask = datagram_poll(file, sock, wait);
663
664 spin_lock_bh(&sk->sk_receive_queue.lock);
665 if (nlk->rx_ring.pg_vec) {
666 netlink_forward_ring(&nlk->rx_ring);
667 if (!netlink_previous_frame(&nlk->rx_ring, NL_MMAP_STATUS_UNUSED))
668 mask |= POLLIN | POLLRDNORM;
669 }
670 spin_unlock_bh(&sk->sk_receive_queue.lock);
671
672 spin_lock_bh(&sk->sk_write_queue.lock);
673 if (nlk->tx_ring.pg_vec) {
674 if (netlink_current_frame(&nlk->tx_ring, NL_MMAP_STATUS_UNUSED))
675 mask |= POLLOUT | POLLWRNORM;
676 }
677 spin_unlock_bh(&sk->sk_write_queue.lock);
678
679 return mask;
680}
681
682static struct nl_mmap_hdr *netlink_mmap_hdr(struct sk_buff *skb)
683{
684 return (struct nl_mmap_hdr *)(skb->head - NL_MMAP_HDRLEN);
685}
686
687static void netlink_ring_setup_skb(struct sk_buff *skb, struct sock *sk,
688 struct netlink_ring *ring,
689 struct nl_mmap_hdr *hdr)
690{
691 unsigned int size;
692 void *data;
693
694 size = ring->frame_size - NL_MMAP_HDRLEN;
695 data = (void *)hdr + NL_MMAP_HDRLEN;
696
697 skb->head = data;
698 skb->data = data;
699 skb_reset_tail_pointer(skb);
700 skb->end = skb->tail + size;
701 skb->len = 0;
702
703 skb->destructor = netlink_skb_destructor;
704 NETLINK_CB(skb).flags |= NETLINK_SKB_MMAPED;
705 NETLINK_CB(skb).sk = sk;
706}
5fd96123
PM
707
708static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
709 u32 dst_portid, u32 dst_group,
710 struct sock_iocb *siocb)
711{
712 struct netlink_sock *nlk = nlk_sk(sk);
713 struct netlink_ring *ring;
714 struct nl_mmap_hdr *hdr;
715 struct sk_buff *skb;
716 unsigned int maxlen;
5fd96123
PM
717 int err = 0, len = 0;
718
5fd96123
PM
719 mutex_lock(&nlk->pg_vec_lock);
720
721 ring = &nlk->tx_ring;
722 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
723
724 do {
4682a035
DM
725 unsigned int nm_len;
726
5fd96123
PM
727 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
728 if (hdr == NULL) {
729 if (!(msg->msg_flags & MSG_DONTWAIT) &&
730 atomic_read(&nlk->tx_ring.pending))
731 schedule();
732 continue;
733 }
4682a035
DM
734
735 nm_len = ACCESS_ONCE(hdr->nm_len);
736 if (nm_len > maxlen) {
5fd96123
PM
737 err = -EINVAL;
738 goto out;
739 }
740
4682a035 741 netlink_frame_flush_dcache(hdr, nm_len);
5fd96123 742
4682a035
DM
743 skb = alloc_skb(nm_len, GFP_KERNEL);
744 if (skb == NULL) {
745 err = -ENOBUFS;
746 goto out;
5fd96123 747 }
4682a035
DM
748 __skb_put(skb, nm_len);
749 memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
750 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
5fd96123
PM
751
752 netlink_increment_head(ring);
753
754 NETLINK_CB(skb).portid = nlk->portid;
755 NETLINK_CB(skb).dst_group = dst_group;
756 NETLINK_CB(skb).creds = siocb->scm->creds;
757
758 err = security_netlink_send(sk, skb);
759 if (err) {
760 kfree_skb(skb);
761 goto out;
762 }
763
764 if (unlikely(dst_group)) {
765 atomic_inc(&skb->users);
766 netlink_broadcast(sk, skb, dst_portid, dst_group,
767 GFP_KERNEL);
768 }
769 err = netlink_unicast(sk, skb, dst_portid,
770 msg->msg_flags & MSG_DONTWAIT);
771 if (err < 0)
772 goto out;
773 len += err;
774
775 } while (hdr != NULL ||
776 (!(msg->msg_flags & MSG_DONTWAIT) &&
777 atomic_read(&nlk->tx_ring.pending)));
778
779 if (len > 0)
780 err = len;
781out:
782 mutex_unlock(&nlk->pg_vec_lock);
783 return err;
784}
f9c22888
PM
785
786static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
787{
788 struct nl_mmap_hdr *hdr;
789
790 hdr = netlink_mmap_hdr(skb);
791 hdr->nm_len = skb->len;
792 hdr->nm_group = NETLINK_CB(skb).dst_group;
793 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
1bf9310a
ND
794 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
795 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
4682a035 796 netlink_frame_flush_dcache(hdr, hdr->nm_len);
f9c22888
PM
797 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
798
799 NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
800 kfree_skb(skb);
801}
802
803static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
804{
805 struct netlink_sock *nlk = nlk_sk(sk);
806 struct netlink_ring *ring = &nlk->rx_ring;
807 struct nl_mmap_hdr *hdr;
808
809 spin_lock_bh(&sk->sk_receive_queue.lock);
810 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
811 if (hdr == NULL) {
812 spin_unlock_bh(&sk->sk_receive_queue.lock);
813 kfree_skb(skb);
cd1df525 814 netlink_overrun(sk);
f9c22888
PM
815 return;
816 }
817 netlink_increment_head(ring);
818 __skb_queue_tail(&sk->sk_receive_queue, skb);
819 spin_unlock_bh(&sk->sk_receive_queue.lock);
820
821 hdr->nm_len = skb->len;
822 hdr->nm_group = NETLINK_CB(skb).dst_group;
823 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
1bf9310a
ND
824 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
825 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
f9c22888
PM
826 netlink_set_status(hdr, NL_MMAP_STATUS_COPY);
827}
828
ccdfcc39 829#else /* CONFIG_NETLINK_MMAP */
9652e931 830#define netlink_skb_is_mmaped(skb) false
f9c22888 831#define netlink_rx_is_mmaped(sk) false
5fd96123 832#define netlink_tx_is_mmaped(sk) false
ccdfcc39 833#define netlink_mmap sock_no_mmap
9652e931 834#define netlink_poll datagram_poll
5fd96123 835#define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, siocb) 0
ccdfcc39
PM
836#endif /* CONFIG_NETLINK_MMAP */
837
cf0a018a
PM
838static void netlink_skb_destructor(struct sk_buff *skb)
839{
9652e931
PM
840#ifdef CONFIG_NETLINK_MMAP
841 struct nl_mmap_hdr *hdr;
842 struct netlink_ring *ring;
843 struct sock *sk;
844
845 /* If a packet from the kernel to userspace was freed because of an
846 * error without being delivered to userspace, the kernel must reset
847 * the status. In the direction userspace to kernel, the status is
848 * always reset here after the packet was processed and freed.
849 */
850 if (netlink_skb_is_mmaped(skb)) {
851 hdr = netlink_mmap_hdr(skb);
852 sk = NETLINK_CB(skb).sk;
853
5fd96123
PM
854 if (NETLINK_CB(skb).flags & NETLINK_SKB_TX) {
855 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
856 ring = &nlk_sk(sk)->tx_ring;
857 } else {
858 if (!(NETLINK_CB(skb).flags & NETLINK_SKB_DELIVERED)) {
859 hdr->nm_len = 0;
860 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
861 }
862 ring = &nlk_sk(sk)->rx_ring;
9652e931 863 }
9652e931
PM
864
865 WARN_ON(atomic_read(&ring->pending) == 0);
866 atomic_dec(&ring->pending);
867 sock_put(sk);
868
5e71d9d7 869 skb->head = NULL;
9652e931
PM
870 }
871#endif
c05cdb1b 872 if (is_vmalloc_addr(skb->head)) {
3a36515f
PN
873 if (!skb->cloned ||
874 !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
875 vfree(skb->head);
876
c05cdb1b
PNA
877 skb->head = NULL;
878 }
9652e931
PM
879 if (skb->sk != NULL)
880 sock_rfree(skb);
cf0a018a
PM
881}
882
883static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
884{
885 WARN_ON(skb->sk != NULL);
886 skb->sk = sk;
887 skb->destructor = netlink_skb_destructor;
888 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
889 sk_mem_charge(sk, skb->truesize);
890}
891
1da177e4
LT
892static void netlink_sock_destruct(struct sock *sk)
893{
3f660d66
HX
894 struct netlink_sock *nlk = nlk_sk(sk);
895
16b304f3
PS
896 if (nlk->cb_running) {
897 if (nlk->cb.done)
898 nlk->cb.done(&nlk->cb);
6dc878a8 899
16b304f3
PS
900 module_put(nlk->cb.module);
901 kfree_skb(nlk->cb.skb);
3f660d66
HX
902 }
903
1da177e4 904 skb_queue_purge(&sk->sk_receive_queue);
ccdfcc39
PM
905#ifdef CONFIG_NETLINK_MMAP
906 if (1) {
907 struct nl_mmap_req req;
908
909 memset(&req, 0, sizeof(req));
910 if (nlk->rx_ring.pg_vec)
911 netlink_set_ring(sk, &req, true, false);
912 memset(&req, 0, sizeof(req));
913 if (nlk->tx_ring.pg_vec)
914 netlink_set_ring(sk, &req, true, true);
915 }
916#endif /* CONFIG_NETLINK_MMAP */
1da177e4
LT
917
918 if (!sock_flag(sk, SOCK_DEAD)) {
6ac552fd 919 printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
1da177e4
LT
920 return;
921 }
547b792c
IJ
922
923 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
924 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
925 WARN_ON(nlk_sk(sk)->groups);
1da177e4
LT
926}
927
6ac552fd
PM
928/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
929 * SMP. Look, when several writers sleep and reader wakes them up, all but one
1da177e4
LT
930 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
931 * this, _but_ remember, it adds useless work on UP machines.
932 */
933
d136f1bd 934void netlink_table_grab(void)
9a429c49 935 __acquires(nl_table_lock)
1da177e4 936{
d136f1bd
JB
937 might_sleep();
938
6abd219c 939 write_lock_irq(&nl_table_lock);
1da177e4
LT
940
941 if (atomic_read(&nl_table_users)) {
942 DECLARE_WAITQUEUE(wait, current);
943
944 add_wait_queue_exclusive(&nl_table_wait, &wait);
6ac552fd 945 for (;;) {
1da177e4
LT
946 set_current_state(TASK_UNINTERRUPTIBLE);
947 if (atomic_read(&nl_table_users) == 0)
948 break;
6abd219c 949 write_unlock_irq(&nl_table_lock);
1da177e4 950 schedule();
6abd219c 951 write_lock_irq(&nl_table_lock);
1da177e4
LT
952 }
953
954 __set_current_state(TASK_RUNNING);
955 remove_wait_queue(&nl_table_wait, &wait);
956 }
957}
958
d136f1bd 959void netlink_table_ungrab(void)
9a429c49 960 __releases(nl_table_lock)
1da177e4 961{
6abd219c 962 write_unlock_irq(&nl_table_lock);
1da177e4
LT
963 wake_up(&nl_table_wait);
964}
965
6ac552fd 966static inline void
1da177e4
LT
967netlink_lock_table(void)
968{
969 /* read_lock() synchronizes us to netlink_table_grab */
970
971 read_lock(&nl_table_lock);
972 atomic_inc(&nl_table_users);
973 read_unlock(&nl_table_lock);
974}
975
6ac552fd 976static inline void
1da177e4
LT
977netlink_unlock_table(void)
978{
979 if (atomic_dec_and_test(&nl_table_users))
980 wake_up(&nl_table_wait);
981}
982
e341694e 983struct netlink_compare_arg
1da177e4 984{
e341694e
TG
985 struct net *net;
986 u32 portid;
987};
1da177e4 988
e341694e 989static bool netlink_compare(void *ptr, void *arg)
1da177e4 990{
e341694e
TG
991 struct netlink_compare_arg *x = arg;
992 struct sock *sk = ptr;
1da177e4 993
e341694e
TG
994 return nlk_sk(sk)->portid == x->portid &&
995 net_eq(sock_net(sk), x->net);
1da177e4
LT
996}
997
e341694e
TG
998static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
999 struct net *net)
1da177e4 1000{
e341694e
TG
1001 struct netlink_compare_arg arg = {
1002 .net = net,
1003 .portid = portid,
1004 };
1005 u32 hash;
1da177e4 1006
e341694e 1007 hash = rhashtable_hashfn(&table->hash, &portid, sizeof(portid));
1da177e4 1008
e341694e
TG
1009 return rhashtable_lookup_compare(&table->hash, hash,
1010 &netlink_compare, &arg);
1da177e4
LT
1011}
1012
e341694e 1013static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
1da177e4 1014{
e341694e
TG
1015 struct netlink_table *table = &nl_table[protocol];
1016 struct sock *sk;
1da177e4 1017
78fd1d0a 1018 read_lock(&nl_table_lock);
e341694e
TG
1019 rcu_read_lock();
1020 sk = __netlink_lookup(table, portid, net);
1021 if (sk)
1022 sock_hold(sk);
1023 rcu_read_unlock();
78fd1d0a 1024 read_unlock(&nl_table_lock);
1da177e4 1025
e341694e 1026 return sk;
1da177e4
LT
1027}
1028
90ddc4f0 1029static const struct proto_ops netlink_ops;
1da177e4 1030
4277a083
PM
1031static void
1032netlink_update_listeners(struct sock *sk)
1033{
1034 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
4277a083
PM
1035 unsigned long mask;
1036 unsigned int i;
6d772ac5
ED
1037 struct listeners *listeners;
1038
1039 listeners = nl_deref_protected(tbl->listeners);
1040 if (!listeners)
1041 return;
4277a083 1042
b4ff4f04 1043 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
4277a083 1044 mask = 0;
b67bfe0d 1045 sk_for_each_bound(sk, &tbl->mc_list) {
b4ff4f04
JB
1046 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
1047 mask |= nlk_sk(sk)->groups[i];
1048 }
6d772ac5 1049 listeners->masks[i] = mask;
4277a083
PM
1050 }
1051 /* this function is only called with the netlink table "grabbed", which
1052 * makes sure updates are visible before bind or setsockopt return. */
1053}
1054
15e47304 1055static int netlink_insert(struct sock *sk, struct net *net, u32 portid)
1da177e4 1056{
da12c90e 1057 struct netlink_table *table = &nl_table[sk->sk_protocol];
1da177e4 1058 int err = -EADDRINUSE;
1da177e4 1059
e341694e
TG
1060 mutex_lock(&nl_sk_hash_lock);
1061 if (__netlink_lookup(table, portid, net))
1da177e4
LT
1062 goto err;
1063
1064 err = -EBUSY;
15e47304 1065 if (nlk_sk(sk)->portid)
1da177e4
LT
1066 goto err;
1067
1068 err = -ENOMEM;
e341694e 1069 if (BITS_PER_LONG > 32 && unlikely(table->hash.nelems >= UINT_MAX))
1da177e4
LT
1070 goto err;
1071
15e47304 1072 nlk_sk(sk)->portid = portid;
e341694e 1073 sock_hold(sk);
6eba8224 1074 rhashtable_insert(&table->hash, &nlk_sk(sk)->node);
1da177e4 1075 err = 0;
1da177e4 1076err:
e341694e 1077 mutex_unlock(&nl_sk_hash_lock);
1da177e4
LT
1078 return err;
1079}
1080
1081static void netlink_remove(struct sock *sk)
1082{
e341694e
TG
1083 struct netlink_table *table;
1084
1085 mutex_lock(&nl_sk_hash_lock);
1086 table = &nl_table[sk->sk_protocol];
6eba8224 1087 if (rhashtable_remove(&table->hash, &nlk_sk(sk)->node)) {
e341694e
TG
1088 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
1089 __sock_put(sk);
1090 }
1091 mutex_unlock(&nl_sk_hash_lock);
1092
1da177e4 1093 netlink_table_grab();
f7fa9b10 1094 if (nlk_sk(sk)->subscriptions)
1da177e4
LT
1095 __sk_del_bind_node(sk);
1096 netlink_table_ungrab();
1097}
1098
1099static struct proto netlink_proto = {
1100 .name = "NETLINK",
1101 .owner = THIS_MODULE,
1102 .obj_size = sizeof(struct netlink_sock),
1103};
1104
1b8d7ae4
EB
1105static int __netlink_create(struct net *net, struct socket *sock,
1106 struct mutex *cb_mutex, int protocol)
1da177e4
LT
1107{
1108 struct sock *sk;
1109 struct netlink_sock *nlk;
ab33a171
PM
1110
1111 sock->ops = &netlink_ops;
1112
6257ff21 1113 sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto);
ab33a171
PM
1114 if (!sk)
1115 return -ENOMEM;
1116
1117 sock_init_data(sock, sk);
1118
1119 nlk = nlk_sk(sk);
658cb354 1120 if (cb_mutex) {
ffa4d721 1121 nlk->cb_mutex = cb_mutex;
658cb354 1122 } else {
ffa4d721
PM
1123 nlk->cb_mutex = &nlk->cb_def_mutex;
1124 mutex_init(nlk->cb_mutex);
1125 }
ab33a171 1126 init_waitqueue_head(&nlk->wait);
ccdfcc39
PM
1127#ifdef CONFIG_NETLINK_MMAP
1128 mutex_init(&nlk->pg_vec_lock);
1129#endif
ab33a171
PM
1130
1131 sk->sk_destruct = netlink_sock_destruct;
1132 sk->sk_protocol = protocol;
1133 return 0;
1134}
1135
3f378b68
EP
1136static int netlink_create(struct net *net, struct socket *sock, int protocol,
1137 int kern)
ab33a171
PM
1138{
1139 struct module *module = NULL;
af65bdfc 1140 struct mutex *cb_mutex;
f7fa9b10 1141 struct netlink_sock *nlk;
4f520900
RGB
1142 int (*bind)(int group);
1143 void (*unbind)(int group);
ab33a171 1144 int err = 0;
1da177e4
LT
1145
1146 sock->state = SS_UNCONNECTED;
1147
1148 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
1149 return -ESOCKTNOSUPPORT;
1150
6ac552fd 1151 if (protocol < 0 || protocol >= MAX_LINKS)
1da177e4
LT
1152 return -EPROTONOSUPPORT;
1153
77247bbb 1154 netlink_lock_table();
95a5afca 1155#ifdef CONFIG_MODULES
ab33a171 1156 if (!nl_table[protocol].registered) {
77247bbb 1157 netlink_unlock_table();
4fdb3bb7 1158 request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
77247bbb 1159 netlink_lock_table();
4fdb3bb7 1160 }
ab33a171
PM
1161#endif
1162 if (nl_table[protocol].registered &&
1163 try_module_get(nl_table[protocol].module))
1164 module = nl_table[protocol].module;
974c37e9
AD
1165 else
1166 err = -EPROTONOSUPPORT;
af65bdfc 1167 cb_mutex = nl_table[protocol].cb_mutex;
03292745 1168 bind = nl_table[protocol].bind;
4f520900 1169 unbind = nl_table[protocol].unbind;
77247bbb 1170 netlink_unlock_table();
4fdb3bb7 1171
974c37e9
AD
1172 if (err < 0)
1173 goto out;
1174
6ac552fd
PM
1175 err = __netlink_create(net, sock, cb_mutex, protocol);
1176 if (err < 0)
f7fa9b10
PM
1177 goto out_module;
1178
6f756a8c 1179 local_bh_disable();
c1fd3b94 1180 sock_prot_inuse_add(net, &netlink_proto, 1);
6f756a8c
DM
1181 local_bh_enable();
1182
f7fa9b10 1183 nlk = nlk_sk(sock->sk);
f7fa9b10 1184 nlk->module = module;
03292745 1185 nlk->netlink_bind = bind;
4f520900 1186 nlk->netlink_unbind = unbind;
ab33a171
PM
1187out:
1188 return err;
1da177e4 1189
ab33a171
PM
1190out_module:
1191 module_put(module);
1192 goto out;
1da177e4
LT
1193}
1194
1195static int netlink_release(struct socket *sock)
1196{
1197 struct sock *sk = sock->sk;
1198 struct netlink_sock *nlk;
1199
1200 if (!sk)
1201 return 0;
1202
1203 netlink_remove(sk);
ac57b3a9 1204 sock_orphan(sk);
1da177e4
LT
1205 nlk = nlk_sk(sk);
1206
3f660d66
HX
1207 /*
1208 * OK. Socket is unlinked, any packets that arrive now
1209 * will be purged.
1210 */
1da177e4 1211
1da177e4
LT
1212 sock->sk = NULL;
1213 wake_up_interruptible_all(&nlk->wait);
1214
1215 skb_queue_purge(&sk->sk_write_queue);
1216
15e47304 1217 if (nlk->portid) {
1da177e4 1218 struct netlink_notify n = {
3b1e0a65 1219 .net = sock_net(sk),
1da177e4 1220 .protocol = sk->sk_protocol,
15e47304 1221 .portid = nlk->portid,
1da177e4 1222 };
e041c683
AS
1223 atomic_notifier_call_chain(&netlink_chain,
1224 NETLINK_URELEASE, &n);
746fac4d 1225 }
4fdb3bb7 1226
5e7c001c 1227 module_put(nlk->module);
4fdb3bb7 1228
4277a083 1229 netlink_table_grab();
aed81560 1230 if (netlink_is_kernel(sk)) {
869e58f8
DL
1231 BUG_ON(nl_table[sk->sk_protocol].registered == 0);
1232 if (--nl_table[sk->sk_protocol].registered == 0) {
6d772ac5
ED
1233 struct listeners *old;
1234
1235 old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
1236 RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
1237 kfree_rcu(old, rcu);
869e58f8 1238 nl_table[sk->sk_protocol].module = NULL;
9785e10a 1239 nl_table[sk->sk_protocol].bind = NULL;
4f520900 1240 nl_table[sk->sk_protocol].unbind = NULL;
9785e10a 1241 nl_table[sk->sk_protocol].flags = 0;
869e58f8
DL
1242 nl_table[sk->sk_protocol].registered = 0;
1243 }
658cb354 1244 } else if (nlk->subscriptions) {
4277a083 1245 netlink_update_listeners(sk);
658cb354 1246 }
4277a083 1247 netlink_table_ungrab();
77247bbb 1248
f7fa9b10
PM
1249 kfree(nlk->groups);
1250 nlk->groups = NULL;
1251
3755810c 1252 local_bh_disable();
c1fd3b94 1253 sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
3755810c 1254 local_bh_enable();
1da177e4
LT
1255 sock_put(sk);
1256 return 0;
1257}
1258
1259static int netlink_autobind(struct socket *sock)
1260{
1261 struct sock *sk = sock->sk;
3b1e0a65 1262 struct net *net = sock_net(sk);
da12c90e 1263 struct netlink_table *table = &nl_table[sk->sk_protocol];
15e47304 1264 s32 portid = task_tgid_vnr(current);
1da177e4
LT
1265 int err;
1266 static s32 rover = -4097;
1267
1268retry:
1269 cond_resched();
78fd1d0a 1270 netlink_table_grab();
e341694e
TG
1271 rcu_read_lock();
1272 if (__netlink_lookup(table, portid, net)) {
1273 /* Bind collision, search negative portid values. */
1274 portid = rover--;
1275 if (rover > -4097)
1276 rover = -4097;
1277 rcu_read_unlock();
78fd1d0a 1278 netlink_table_ungrab();
e341694e 1279 goto retry;
1da177e4 1280 }
e341694e 1281 rcu_read_unlock();
78fd1d0a 1282 netlink_table_ungrab();
1da177e4 1283
15e47304 1284 err = netlink_insert(sk, net, portid);
1da177e4
LT
1285 if (err == -EADDRINUSE)
1286 goto retry;
d470e3b4
DM
1287
1288 /* If 2 threads race to autobind, that is fine. */
1289 if (err == -EBUSY)
1290 err = 0;
1291
1292 return err;
1da177e4
LT
1293}
1294
aa4cf945
EB
1295/**
1296 * __netlink_ns_capable - General netlink message capability test
1297 * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
1298 * @user_ns: The user namespace of the capability to use
1299 * @cap: The capability to use
1300 *
1301 * Test to see if the opener of the socket we received the message
1302 * from had when the netlink socket was created and the sender of the
1303 * message has has the capability @cap in the user namespace @user_ns.
1304 */
1305bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
1306 struct user_namespace *user_ns, int cap)
1307{
2d7a85f4
EB
1308 return ((nsp->flags & NETLINK_SKB_DST) ||
1309 file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
1310 ns_capable(user_ns, cap);
aa4cf945
EB
1311}
1312EXPORT_SYMBOL(__netlink_ns_capable);
1313
1314/**
1315 * netlink_ns_capable - General netlink message capability test
1316 * @skb: socket buffer holding a netlink command from userspace
1317 * @user_ns: The user namespace of the capability to use
1318 * @cap: The capability to use
1319 *
1320 * Test to see if the opener of the socket we received the message
1321 * from had when the netlink socket was created and the sender of the
1322 * message has has the capability @cap in the user namespace @user_ns.
1323 */
1324bool netlink_ns_capable(const struct sk_buff *skb,
1325 struct user_namespace *user_ns, int cap)
1326{
1327 return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
1328}
1329EXPORT_SYMBOL(netlink_ns_capable);
1330
1331/**
1332 * netlink_capable - Netlink global message capability test
1333 * @skb: socket buffer holding a netlink command from userspace
1334 * @cap: The capability to use
1335 *
1336 * Test to see if the opener of the socket we received the message
1337 * from had when the netlink socket was created and the sender of the
1338 * message has has the capability @cap in all user namespaces.
1339 */
1340bool netlink_capable(const struct sk_buff *skb, int cap)
1341{
1342 return netlink_ns_capable(skb, &init_user_ns, cap);
1343}
1344EXPORT_SYMBOL(netlink_capable);
1345
1346/**
1347 * netlink_net_capable - Netlink network namespace message capability test
1348 * @skb: socket buffer holding a netlink command from userspace
1349 * @cap: The capability to use
1350 *
1351 * Test to see if the opener of the socket we received the message
1352 * from had when the netlink socket was created and the sender of the
1353 * message has has the capability @cap over the network namespace of
1354 * the socket we received the message from.
1355 */
1356bool netlink_net_capable(const struct sk_buff *skb, int cap)
1357{
1358 return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
1359}
1360EXPORT_SYMBOL(netlink_net_capable);
1361
5187cd05 1362static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
746fac4d 1363{
9785e10a 1364 return (nl_table[sock->sk->sk_protocol].flags & flag) ||
df008c91 1365 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
746fac4d 1366}
1da177e4 1367
f7fa9b10
PM
1368static void
1369netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
1370{
1371 struct netlink_sock *nlk = nlk_sk(sk);
1372
1373 if (nlk->subscriptions && !subscriptions)
1374 __sk_del_bind_node(sk);
1375 else if (!nlk->subscriptions && subscriptions)
1376 sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
1377 nlk->subscriptions = subscriptions;
1378}
1379
b4ff4f04 1380static int netlink_realloc_groups(struct sock *sk)
513c2500
PM
1381{
1382 struct netlink_sock *nlk = nlk_sk(sk);
1383 unsigned int groups;
b4ff4f04 1384 unsigned long *new_groups;
513c2500
PM
1385 int err = 0;
1386
b4ff4f04
JB
1387 netlink_table_grab();
1388
513c2500 1389 groups = nl_table[sk->sk_protocol].groups;
b4ff4f04 1390 if (!nl_table[sk->sk_protocol].registered) {
513c2500 1391 err = -ENOENT;
b4ff4f04
JB
1392 goto out_unlock;
1393 }
513c2500 1394
b4ff4f04
JB
1395 if (nlk->ngroups >= groups)
1396 goto out_unlock;
513c2500 1397
b4ff4f04
JB
1398 new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC);
1399 if (new_groups == NULL) {
1400 err = -ENOMEM;
1401 goto out_unlock;
1402 }
6ac552fd 1403 memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0,
b4ff4f04
JB
1404 NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups));
1405
1406 nlk->groups = new_groups;
513c2500 1407 nlk->ngroups = groups;
b4ff4f04
JB
1408 out_unlock:
1409 netlink_table_ungrab();
1410 return err;
513c2500
PM
1411}
1412
02c81ab9
JB
1413static void netlink_undo_bind(int group, long unsigned int groups,
1414 struct netlink_sock *nlk)
4f520900
RGB
1415{
1416 int undo;
1417
1418 if (!nlk->netlink_unbind)
1419 return;
1420
1421 for (undo = 0; undo < group; undo++)
6251edd9 1422 if (test_bit(undo, &groups))
4f520900
RGB
1423 nlk->netlink_unbind(undo);
1424}
1425
6ac552fd
PM
1426static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1427 int addr_len)
1da177e4
LT
1428{
1429 struct sock *sk = sock->sk;
3b1e0a65 1430 struct net *net = sock_net(sk);
1da177e4
LT
1431 struct netlink_sock *nlk = nlk_sk(sk);
1432 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1433 int err;
4f520900 1434 long unsigned int groups = nladdr->nl_groups;
746fac4d 1435
4e4b5376
HFS
1436 if (addr_len < sizeof(struct sockaddr_nl))
1437 return -EINVAL;
1438
1da177e4
LT
1439 if (nladdr->nl_family != AF_NETLINK)
1440 return -EINVAL;
1441
1442 /* Only superuser is allowed to listen multicasts */
4f520900 1443 if (groups) {
5187cd05 1444 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
513c2500 1445 return -EPERM;
b4ff4f04
JB
1446 err = netlink_realloc_groups(sk);
1447 if (err)
1448 return err;
513c2500 1449 }
1da177e4 1450
4f520900 1451 if (nlk->portid)
15e47304 1452 if (nladdr->nl_pid != nlk->portid)
1da177e4 1453 return -EINVAL;
4f520900
RGB
1454
1455 if (nlk->netlink_bind && groups) {
1456 int group;
1457
1458 for (group = 0; group < nlk->ngroups; group++) {
1459 if (!test_bit(group, &groups))
1460 continue;
1461 err = nlk->netlink_bind(group);
1462 if (!err)
1463 continue;
02c81ab9 1464 netlink_undo_bind(group, groups, nlk);
4f520900
RGB
1465 return err;
1466 }
1467 }
1468
1469 if (!nlk->portid) {
1da177e4 1470 err = nladdr->nl_pid ?
b4b51029 1471 netlink_insert(sk, net, nladdr->nl_pid) :
1da177e4 1472 netlink_autobind(sock);
4f520900 1473 if (err) {
02c81ab9 1474 netlink_undo_bind(nlk->ngroups, groups, nlk);
1da177e4 1475 return err;
4f520900 1476 }
1da177e4
LT
1477 }
1478
4f520900 1479 if (!groups && (nlk->groups == NULL || !(u32)nlk->groups[0]))
1da177e4
LT
1480 return 0;
1481
1482 netlink_table_grab();
f7fa9b10 1483 netlink_update_subscriptions(sk, nlk->subscriptions +
4f520900 1484 hweight32(groups) -
746fac4d 1485 hweight32(nlk->groups[0]));
4f520900 1486 nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | groups;
4277a083 1487 netlink_update_listeners(sk);
1da177e4
LT
1488 netlink_table_ungrab();
1489
1490 return 0;
1491}
1492
1493static int netlink_connect(struct socket *sock, struct sockaddr *addr,
1494 int alen, int flags)
1495{
1496 int err = 0;
1497 struct sock *sk = sock->sk;
1498 struct netlink_sock *nlk = nlk_sk(sk);
6ac552fd 1499 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
1da177e4 1500
6503d961
CG
1501 if (alen < sizeof(addr->sa_family))
1502 return -EINVAL;
1503
1da177e4
LT
1504 if (addr->sa_family == AF_UNSPEC) {
1505 sk->sk_state = NETLINK_UNCONNECTED;
15e47304 1506 nlk->dst_portid = 0;
d629b836 1507 nlk->dst_group = 0;
1da177e4
LT
1508 return 0;
1509 }
1510 if (addr->sa_family != AF_NETLINK)
1511 return -EINVAL;
1512
46833a86 1513 if ((nladdr->nl_groups || nladdr->nl_pid) &&
5187cd05 1514 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
1da177e4
LT
1515 return -EPERM;
1516
15e47304 1517 if (!nlk->portid)
1da177e4
LT
1518 err = netlink_autobind(sock);
1519
1520 if (err == 0) {
1521 sk->sk_state = NETLINK_CONNECTED;
15e47304 1522 nlk->dst_portid = nladdr->nl_pid;
d629b836 1523 nlk->dst_group = ffs(nladdr->nl_groups);
1da177e4
LT
1524 }
1525
1526 return err;
1527}
1528
6ac552fd
PM
1529static int netlink_getname(struct socket *sock, struct sockaddr *addr,
1530 int *addr_len, int peer)
1da177e4
LT
1531{
1532 struct sock *sk = sock->sk;
1533 struct netlink_sock *nlk = nlk_sk(sk);
13cfa97b 1534 DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr);
746fac4d 1535
1da177e4
LT
1536 nladdr->nl_family = AF_NETLINK;
1537 nladdr->nl_pad = 0;
1538 *addr_len = sizeof(*nladdr);
1539
1540 if (peer) {
15e47304 1541 nladdr->nl_pid = nlk->dst_portid;
d629b836 1542 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
1da177e4 1543 } else {
15e47304 1544 nladdr->nl_pid = nlk->portid;
513c2500 1545 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
1da177e4
LT
1546 }
1547 return 0;
1548}
1549
15e47304 1550static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
1da177e4 1551{
1da177e4
LT
1552 struct sock *sock;
1553 struct netlink_sock *nlk;
1554
15e47304 1555 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid);
1da177e4
LT
1556 if (!sock)
1557 return ERR_PTR(-ECONNREFUSED);
1558
1559 /* Don't bother queuing skb if kernel socket has no input function */
1560 nlk = nlk_sk(sock);
cd40b7d3 1561 if (sock->sk_state == NETLINK_CONNECTED &&
15e47304 1562 nlk->dst_portid != nlk_sk(ssk)->portid) {
1da177e4
LT
1563 sock_put(sock);
1564 return ERR_PTR(-ECONNREFUSED);
1565 }
1566 return sock;
1567}
1568
1569struct sock *netlink_getsockbyfilp(struct file *filp)
1570{
496ad9aa 1571 struct inode *inode = file_inode(filp);
1da177e4
LT
1572 struct sock *sock;
1573
1574 if (!S_ISSOCK(inode->i_mode))
1575 return ERR_PTR(-ENOTSOCK);
1576
1577 sock = SOCKET_I(inode)->sk;
1578 if (sock->sk_family != AF_NETLINK)
1579 return ERR_PTR(-EINVAL);
1580
1581 sock_hold(sock);
1582 return sock;
1583}
1584
3a36515f
PN
1585static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
1586 int broadcast)
c05cdb1b
PNA
1587{
1588 struct sk_buff *skb;
1589 void *data;
1590
3a36515f 1591 if (size <= NLMSG_GOODSIZE || broadcast)
c05cdb1b
PNA
1592 return alloc_skb(size, GFP_KERNEL);
1593
3a36515f
PN
1594 size = SKB_DATA_ALIGN(size) +
1595 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
c05cdb1b
PNA
1596
1597 data = vmalloc(size);
1598 if (data == NULL)
3a36515f 1599 return NULL;
c05cdb1b 1600
3a36515f
PN
1601 skb = build_skb(data, size);
1602 if (skb == NULL)
1603 vfree(data);
1604 else {
1605 skb->head_frag = 0;
1606 skb->destructor = netlink_skb_destructor;
1607 }
c05cdb1b
PNA
1608
1609 return skb;
c05cdb1b
PNA
1610}
1611
1da177e4
LT
1612/*
1613 * Attach a skb to a netlink socket.
1614 * The caller must hold a reference to the destination socket. On error, the
1615 * reference is dropped. The skb is not send to the destination, just all
1616 * all error checks are performed and memory in the queue is reserved.
1617 * Return values:
1618 * < 0: error. skb freed, reference to sock dropped.
1619 * 0: continue
1620 * 1: repeat lookup - reference dropped while waiting for socket memory.
1621 */
9457afee 1622int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
c3d8d1e3 1623 long *timeo, struct sock *ssk)
1da177e4
LT
1624{
1625 struct netlink_sock *nlk;
1626
1627 nlk = nlk_sk(sk);
1628
5fd96123
PM
1629 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
1630 test_bit(NETLINK_CONGESTED, &nlk->state)) &&
1631 !netlink_skb_is_mmaped(skb)) {
1da177e4 1632 DECLARE_WAITQUEUE(wait, current);
c3d8d1e3 1633 if (!*timeo) {
aed81560 1634 if (!ssk || netlink_is_kernel(ssk))
1da177e4
LT
1635 netlink_overrun(sk);
1636 sock_put(sk);
1637 kfree_skb(skb);
1638 return -EAGAIN;
1639 }
1640
1641 __set_current_state(TASK_INTERRUPTIBLE);
1642 add_wait_queue(&nlk->wait, &wait);
1643
1644 if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
cd967e05 1645 test_bit(NETLINK_CONGESTED, &nlk->state)) &&
1da177e4 1646 !sock_flag(sk, SOCK_DEAD))
c3d8d1e3 1647 *timeo = schedule_timeout(*timeo);
1da177e4
LT
1648
1649 __set_current_state(TASK_RUNNING);
1650 remove_wait_queue(&nlk->wait, &wait);
1651 sock_put(sk);
1652
1653 if (signal_pending(current)) {
1654 kfree_skb(skb);
c3d8d1e3 1655 return sock_intr_errno(*timeo);
1da177e4
LT
1656 }
1657 return 1;
1658 }
cf0a018a 1659 netlink_skb_set_owner_r(skb, sk);
1da177e4
LT
1660 return 0;
1661}
1662
4a7e7c2a 1663static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1da177e4 1664{
1da177e4
LT
1665 int len = skb->len;
1666
bcbde0d4
DB
1667 netlink_deliver_tap(skb);
1668
f9c22888
PM
1669#ifdef CONFIG_NETLINK_MMAP
1670 if (netlink_skb_is_mmaped(skb))
1671 netlink_queue_mmaped_skb(sk, skb);
1672 else if (netlink_rx_is_mmaped(sk))
1673 netlink_ring_set_copied(sk, skb);
1674 else
1675#endif /* CONFIG_NETLINK_MMAP */
1676 skb_queue_tail(&sk->sk_receive_queue, skb);
676d2369 1677 sk->sk_data_ready(sk);
4a7e7c2a
ED
1678 return len;
1679}
1680
1681int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1682{
1683 int len = __netlink_sendskb(sk, skb);
1684
1da177e4
LT
1685 sock_put(sk);
1686 return len;
1687}
1688
1689void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
1690{
1691 kfree_skb(skb);
1692 sock_put(sk);
1693}
1694
b57ef81f 1695static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
1da177e4
LT
1696{
1697 int delta;
1698
1298ca46 1699 WARN_ON(skb->sk != NULL);
5fd96123
PM
1700 if (netlink_skb_is_mmaped(skb))
1701 return skb;
1da177e4 1702
4305b541 1703 delta = skb->end - skb->tail;
c05cdb1b 1704 if (is_vmalloc_addr(skb->head) || delta * 2 < skb->truesize)
1da177e4
LT
1705 return skb;
1706
1707 if (skb_shared(skb)) {
1708 struct sk_buff *nskb = skb_clone(skb, allocation);
1709 if (!nskb)
1710 return skb;
8460c00f 1711 consume_skb(skb);
1da177e4
LT
1712 skb = nskb;
1713 }
1714
1715 if (!pskb_expand_head(skb, 0, -delta, allocation))
1716 skb->truesize -= delta;
1717
1718 return skb;
1719}
1720
3fbc2905
EB
1721static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
1722 struct sock *ssk)
cd40b7d3
DL
1723{
1724 int ret;
1725 struct netlink_sock *nlk = nlk_sk(sk);
1726
1727 ret = -ECONNREFUSED;
1728 if (nlk->netlink_rcv != NULL) {
1729 ret = skb->len;
cf0a018a 1730 netlink_skb_set_owner_r(skb, sk);
e32123e5 1731 NETLINK_CB(skb).sk = ssk;
73bfd370 1732 netlink_deliver_tap_kernel(sk, ssk, skb);
cd40b7d3 1733 nlk->netlink_rcv(skb);
bfb253c9
ED
1734 consume_skb(skb);
1735 } else {
1736 kfree_skb(skb);
cd40b7d3 1737 }
cd40b7d3
DL
1738 sock_put(sk);
1739 return ret;
1740}
1741
1742int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
15e47304 1743 u32 portid, int nonblock)
1da177e4
LT
1744{
1745 struct sock *sk;
1746 int err;
1747 long timeo;
1748
1749 skb = netlink_trim(skb, gfp_any());
1750
1751 timeo = sock_sndtimeo(ssk, nonblock);
1752retry:
15e47304 1753 sk = netlink_getsockbyportid(ssk, portid);
1da177e4
LT
1754 if (IS_ERR(sk)) {
1755 kfree_skb(skb);
1756 return PTR_ERR(sk);
1757 }
cd40b7d3 1758 if (netlink_is_kernel(sk))
3fbc2905 1759 return netlink_unicast_kernel(sk, skb, ssk);
cd40b7d3 1760
b1153f29 1761 if (sk_filter(sk, skb)) {
84874607 1762 err = skb->len;
b1153f29
SH
1763 kfree_skb(skb);
1764 sock_put(sk);
1765 return err;
1766 }
1767
9457afee 1768 err = netlink_attachskb(sk, skb, &timeo, ssk);
1da177e4
LT
1769 if (err == 1)
1770 goto retry;
1771 if (err)
1772 return err;
1773
7ee015e0 1774 return netlink_sendskb(sk, skb);
1da177e4 1775}
6ac552fd 1776EXPORT_SYMBOL(netlink_unicast);
1da177e4 1777
f9c22888
PM
1778struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
1779 u32 dst_portid, gfp_t gfp_mask)
1780{
1781#ifdef CONFIG_NETLINK_MMAP
1782 struct sock *sk = NULL;
1783 struct sk_buff *skb;
1784 struct netlink_ring *ring;
1785 struct nl_mmap_hdr *hdr;
1786 unsigned int maxlen;
1787
1788 sk = netlink_getsockbyportid(ssk, dst_portid);
1789 if (IS_ERR(sk))
1790 goto out;
1791
1792 ring = &nlk_sk(sk)->rx_ring;
1793 /* fast-path without atomic ops for common case: non-mmaped receiver */
1794 if (ring->pg_vec == NULL)
1795 goto out_put;
1796
aae9f0e2
TG
1797 if (ring->frame_size - NL_MMAP_HDRLEN < size)
1798 goto out_put;
1799
f9c22888
PM
1800 skb = alloc_skb_head(gfp_mask);
1801 if (skb == NULL)
1802 goto err1;
1803
1804 spin_lock_bh(&sk->sk_receive_queue.lock);
1805 /* check again under lock */
1806 if (ring->pg_vec == NULL)
1807 goto out_free;
1808
aae9f0e2 1809 /* check again under lock */
f9c22888
PM
1810 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
1811 if (maxlen < size)
1812 goto out_free;
1813
1814 netlink_forward_ring(ring);
1815 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
1816 if (hdr == NULL)
1817 goto err2;
1818 netlink_ring_setup_skb(skb, sk, ring, hdr);
1819 netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
1820 atomic_inc(&ring->pending);
1821 netlink_increment_head(ring);
1822
1823 spin_unlock_bh(&sk->sk_receive_queue.lock);
1824 return skb;
1825
1826err2:
1827 kfree_skb(skb);
1828 spin_unlock_bh(&sk->sk_receive_queue.lock);
cd1df525 1829 netlink_overrun(sk);
f9c22888
PM
1830err1:
1831 sock_put(sk);
1832 return NULL;
1833
1834out_free:
1835 kfree_skb(skb);
1836 spin_unlock_bh(&sk->sk_receive_queue.lock);
1837out_put:
1838 sock_put(sk);
1839out:
1840#endif
1841 return alloc_skb(size, gfp_mask);
1842}
1843EXPORT_SYMBOL_GPL(netlink_alloc_skb);
1844
4277a083
PM
1845int netlink_has_listeners(struct sock *sk, unsigned int group)
1846{
1847 int res = 0;
5c398dc8 1848 struct listeners *listeners;
4277a083 1849
aed81560 1850 BUG_ON(!netlink_is_kernel(sk));
b4ff4f04
JB
1851
1852 rcu_read_lock();
1853 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
1854
6d772ac5 1855 if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
5c398dc8 1856 res = test_bit(group - 1, listeners->masks);
b4ff4f04
JB
1857
1858 rcu_read_unlock();
1859
4277a083
PM
1860 return res;
1861}
1862EXPORT_SYMBOL_GPL(netlink_has_listeners);
1863
b57ef81f 1864static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
1da177e4
LT
1865{
1866 struct netlink_sock *nlk = nlk_sk(sk);
1867
1868 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
cd967e05 1869 !test_bit(NETLINK_CONGESTED, &nlk->state)) {
cf0a018a 1870 netlink_skb_set_owner_r(skb, sk);
4a7e7c2a 1871 __netlink_sendskb(sk, skb);
2c645800 1872 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
1da177e4
LT
1873 }
1874 return -1;
1875}
1876
1877struct netlink_broadcast_data {
1878 struct sock *exclude_sk;
b4b51029 1879 struct net *net;
15e47304 1880 u32 portid;
1da177e4
LT
1881 u32 group;
1882 int failure;
ff491a73 1883 int delivery_failure;
1da177e4
LT
1884 int congested;
1885 int delivered;
7d877f3b 1886 gfp_t allocation;
1da177e4 1887 struct sk_buff *skb, *skb2;
910a7e90
EB
1888 int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data);
1889 void *tx_data;
1da177e4
LT
1890};
1891
46c9521f
RR
1892static void do_one_broadcast(struct sock *sk,
1893 struct netlink_broadcast_data *p)
1da177e4
LT
1894{
1895 struct netlink_sock *nlk = nlk_sk(sk);
1896 int val;
1897
1898 if (p->exclude_sk == sk)
46c9521f 1899 return;
1da177e4 1900
15e47304 1901 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
f7fa9b10 1902 !test_bit(p->group - 1, nlk->groups))
46c9521f 1903 return;
1da177e4 1904
878628fb 1905 if (!net_eq(sock_net(sk), p->net))
46c9521f 1906 return;
b4b51029 1907
1da177e4
LT
1908 if (p->failure) {
1909 netlink_overrun(sk);
46c9521f 1910 return;
1da177e4
LT
1911 }
1912
1913 sock_hold(sk);
1914 if (p->skb2 == NULL) {
68acc024 1915 if (skb_shared(p->skb)) {
1da177e4
LT
1916 p->skb2 = skb_clone(p->skb, p->allocation);
1917 } else {
68acc024
TC
1918 p->skb2 = skb_get(p->skb);
1919 /*
1920 * skb ownership may have been set when
1921 * delivered to a previous socket.
1922 */
1923 skb_orphan(p->skb2);
1da177e4
LT
1924 }
1925 }
1926 if (p->skb2 == NULL) {
1927 netlink_overrun(sk);
1928 /* Clone failed. Notify ALL listeners. */
1929 p->failure = 1;
be0c22a4
PNA
1930 if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1931 p->delivery_failure = 1;
910a7e90
EB
1932 } else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
1933 kfree_skb(p->skb2);
1934 p->skb2 = NULL;
b1153f29
SH
1935 } else if (sk_filter(sk, p->skb2)) {
1936 kfree_skb(p->skb2);
1937 p->skb2 = NULL;
1da177e4
LT
1938 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
1939 netlink_overrun(sk);
be0c22a4
PNA
1940 if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR)
1941 p->delivery_failure = 1;
1da177e4
LT
1942 } else {
1943 p->congested |= val;
1944 p->delivered = 1;
1945 p->skb2 = NULL;
1946 }
1947 sock_put(sk);
1da177e4
LT
1948}
1949
15e47304 1950int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
910a7e90
EB
1951 u32 group, gfp_t allocation,
1952 int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
1953 void *filter_data)
1da177e4 1954{
3b1e0a65 1955 struct net *net = sock_net(ssk);
1da177e4 1956 struct netlink_broadcast_data info;
1da177e4
LT
1957 struct sock *sk;
1958
1959 skb = netlink_trim(skb, allocation);
1960
1961 info.exclude_sk = ssk;
b4b51029 1962 info.net = net;
15e47304 1963 info.portid = portid;
1da177e4
LT
1964 info.group = group;
1965 info.failure = 0;
ff491a73 1966 info.delivery_failure = 0;
1da177e4
LT
1967 info.congested = 0;
1968 info.delivered = 0;
1969 info.allocation = allocation;
1970 info.skb = skb;
1971 info.skb2 = NULL;
910a7e90
EB
1972 info.tx_filter = filter;
1973 info.tx_data = filter_data;
1da177e4
LT
1974
1975 /* While we sleep in clone, do not allow to change socket list */
1976
1977 netlink_lock_table();
1978
b67bfe0d 1979 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1da177e4
LT
1980 do_one_broadcast(sk, &info);
1981
70d4bf6d 1982 consume_skb(skb);
aa1c6a6f 1983
1da177e4
LT
1984 netlink_unlock_table();
1985
70d4bf6d
NH
1986 if (info.delivery_failure) {
1987 kfree_skb(info.skb2);
ff491a73 1988 return -ENOBUFS;
658cb354
ED
1989 }
1990 consume_skb(info.skb2);
ff491a73 1991
1da177e4
LT
1992 if (info.delivered) {
1993 if (info.congested && (allocation & __GFP_WAIT))
1994 yield();
1995 return 0;
1996 }
1da177e4
LT
1997 return -ESRCH;
1998}
910a7e90
EB
1999EXPORT_SYMBOL(netlink_broadcast_filtered);
2000
15e47304 2001int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
910a7e90
EB
2002 u32 group, gfp_t allocation)
2003{
15e47304 2004 return netlink_broadcast_filtered(ssk, skb, portid, group, allocation,
910a7e90
EB
2005 NULL, NULL);
2006}
6ac552fd 2007EXPORT_SYMBOL(netlink_broadcast);
1da177e4
LT
2008
2009struct netlink_set_err_data {
2010 struct sock *exclude_sk;
15e47304 2011 u32 portid;
1da177e4
LT
2012 u32 group;
2013 int code;
2014};
2015
b57ef81f 2016static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
1da177e4
LT
2017{
2018 struct netlink_sock *nlk = nlk_sk(sk);
1a50307b 2019 int ret = 0;
1da177e4
LT
2020
2021 if (sk == p->exclude_sk)
2022 goto out;
2023
09ad9bc7 2024 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
b4b51029
EB
2025 goto out;
2026
15e47304 2027 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
f7fa9b10 2028 !test_bit(p->group - 1, nlk->groups))
1da177e4
LT
2029 goto out;
2030
1a50307b
PNA
2031 if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) {
2032 ret = 1;
2033 goto out;
2034 }
2035
1da177e4
LT
2036 sk->sk_err = p->code;
2037 sk->sk_error_report(sk);
2038out:
1a50307b 2039 return ret;
1da177e4
LT
2040}
2041
4843b93c
PNA
2042/**
2043 * netlink_set_err - report error to broadcast listeners
2044 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
15e47304 2045 * @portid: the PORTID of a process that we want to skip (if any)
840e93f2 2046 * @group: the broadcast group that will notice the error
4843b93c 2047 * @code: error code, must be negative (as usual in kernelspace)
1a50307b
PNA
2048 *
2049 * This function returns the number of broadcast listeners that have set the
2050 * NETLINK_RECV_NO_ENOBUFS socket option.
4843b93c 2051 */
15e47304 2052int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
1da177e4
LT
2053{
2054 struct netlink_set_err_data info;
1da177e4 2055 struct sock *sk;
1a50307b 2056 int ret = 0;
1da177e4
LT
2057
2058 info.exclude_sk = ssk;
15e47304 2059 info.portid = portid;
1da177e4 2060 info.group = group;
4843b93c
PNA
2061 /* sk->sk_err wants a positive error value */
2062 info.code = -code;
1da177e4
LT
2063
2064 read_lock(&nl_table_lock);
2065
b67bfe0d 2066 sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
1a50307b 2067 ret += do_one_set_err(sk, &info);
1da177e4
LT
2068
2069 read_unlock(&nl_table_lock);
1a50307b 2070 return ret;
1da177e4 2071}
dd5b6ce6 2072EXPORT_SYMBOL(netlink_set_err);
1da177e4 2073
84659eb5
JB
2074/* must be called with netlink table grabbed */
2075static void netlink_update_socket_mc(struct netlink_sock *nlk,
2076 unsigned int group,
2077 int is_new)
2078{
2079 int old, new = !!is_new, subscriptions;
2080
2081 old = test_bit(group - 1, nlk->groups);
2082 subscriptions = nlk->subscriptions - old + new;
2083 if (new)
2084 __set_bit(group - 1, nlk->groups);
2085 else
2086 __clear_bit(group - 1, nlk->groups);
2087 netlink_update_subscriptions(&nlk->sk, subscriptions);
2088 netlink_update_listeners(&nlk->sk);
2089}
2090
9a4595bc 2091static int netlink_setsockopt(struct socket *sock, int level, int optname,
b7058842 2092 char __user *optval, unsigned int optlen)
9a4595bc
PM
2093{
2094 struct sock *sk = sock->sk;
2095 struct netlink_sock *nlk = nlk_sk(sk);
eb496534
JB
2096 unsigned int val = 0;
2097 int err;
9a4595bc
PM
2098
2099 if (level != SOL_NETLINK)
2100 return -ENOPROTOOPT;
2101
ccdfcc39
PM
2102 if (optname != NETLINK_RX_RING && optname != NETLINK_TX_RING &&
2103 optlen >= sizeof(int) &&
eb496534 2104 get_user(val, (unsigned int __user *)optval))
9a4595bc
PM
2105 return -EFAULT;
2106
2107 switch (optname) {
2108 case NETLINK_PKTINFO:
2109 if (val)
2110 nlk->flags |= NETLINK_RECV_PKTINFO;
2111 else
2112 nlk->flags &= ~NETLINK_RECV_PKTINFO;
2113 err = 0;
2114 break;
2115 case NETLINK_ADD_MEMBERSHIP:
2116 case NETLINK_DROP_MEMBERSHIP: {
5187cd05 2117 if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
9a4595bc 2118 return -EPERM;
b4ff4f04
JB
2119 err = netlink_realloc_groups(sk);
2120 if (err)
2121 return err;
9a4595bc
PM
2122 if (!val || val - 1 >= nlk->ngroups)
2123 return -EINVAL;
7774d5e0 2124 if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
4f520900
RGB
2125 err = nlk->netlink_bind(val);
2126 if (err)
2127 return err;
2128 }
9a4595bc 2129 netlink_table_grab();
84659eb5
JB
2130 netlink_update_socket_mc(nlk, val,
2131 optname == NETLINK_ADD_MEMBERSHIP);
9a4595bc 2132 netlink_table_ungrab();
7774d5e0
RGB
2133 if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
2134 nlk->netlink_unbind(val);
03292745 2135
9a4595bc
PM
2136 err = 0;
2137 break;
2138 }
be0c22a4
PNA
2139 case NETLINK_BROADCAST_ERROR:
2140 if (val)
2141 nlk->flags |= NETLINK_BROADCAST_SEND_ERROR;
2142 else
2143 nlk->flags &= ~NETLINK_BROADCAST_SEND_ERROR;
2144 err = 0;
2145 break;
38938bfe
PNA
2146 case NETLINK_NO_ENOBUFS:
2147 if (val) {
2148 nlk->flags |= NETLINK_RECV_NO_ENOBUFS;
cd967e05 2149 clear_bit(NETLINK_CONGESTED, &nlk->state);
38938bfe 2150 wake_up_interruptible(&nlk->wait);
658cb354 2151 } else {
38938bfe 2152 nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS;
658cb354 2153 }
38938bfe
PNA
2154 err = 0;
2155 break;
ccdfcc39
PM
2156#ifdef CONFIG_NETLINK_MMAP
2157 case NETLINK_RX_RING:
2158 case NETLINK_TX_RING: {
2159 struct nl_mmap_req req;
2160
2161 /* Rings might consume more memory than queue limits, require
2162 * CAP_NET_ADMIN.
2163 */
2164 if (!capable(CAP_NET_ADMIN))
2165 return -EPERM;
2166 if (optlen < sizeof(req))
2167 return -EINVAL;
2168 if (copy_from_user(&req, optval, sizeof(req)))
2169 return -EFAULT;
2170 err = netlink_set_ring(sk, &req, false,
2171 optname == NETLINK_TX_RING);
2172 break;
2173 }
2174#endif /* CONFIG_NETLINK_MMAP */
9a4595bc
PM
2175 default:
2176 err = -ENOPROTOOPT;
2177 }
2178 return err;
2179}
2180
2181static int netlink_getsockopt(struct socket *sock, int level, int optname,
746fac4d 2182 char __user *optval, int __user *optlen)
9a4595bc
PM
2183{
2184 struct sock *sk = sock->sk;
2185 struct netlink_sock *nlk = nlk_sk(sk);
2186 int len, val, err;
2187
2188 if (level != SOL_NETLINK)
2189 return -ENOPROTOOPT;
2190
2191 if (get_user(len, optlen))
2192 return -EFAULT;
2193 if (len < 0)
2194 return -EINVAL;
2195
2196 switch (optname) {
2197 case NETLINK_PKTINFO:
2198 if (len < sizeof(int))
2199 return -EINVAL;
2200 len = sizeof(int);
2201 val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0;
a27b58fe
HC
2202 if (put_user(len, optlen) ||
2203 put_user(val, optval))
2204 return -EFAULT;
9a4595bc
PM
2205 err = 0;
2206 break;
be0c22a4
PNA
2207 case NETLINK_BROADCAST_ERROR:
2208 if (len < sizeof(int))
2209 return -EINVAL;
2210 len = sizeof(int);
2211 val = nlk->flags & NETLINK_BROADCAST_SEND_ERROR ? 1 : 0;
2212 if (put_user(len, optlen) ||
2213 put_user(val, optval))
2214 return -EFAULT;
2215 err = 0;
2216 break;
38938bfe
PNA
2217 case NETLINK_NO_ENOBUFS:
2218 if (len < sizeof(int))
2219 return -EINVAL;
2220 len = sizeof(int);
2221 val = nlk->flags & NETLINK_RECV_NO_ENOBUFS ? 1 : 0;
2222 if (put_user(len, optlen) ||
2223 put_user(val, optval))
2224 return -EFAULT;
2225 err = 0;
2226 break;
9a4595bc
PM
2227 default:
2228 err = -ENOPROTOOPT;
2229 }
2230 return err;
2231}
2232
2233static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
2234{
2235 struct nl_pktinfo info;
2236
2237 info.group = NETLINK_CB(skb).dst_group;
2238 put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info);
2239}
2240
1da177e4
LT
2241static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
2242 struct msghdr *msg, size_t len)
2243{
2244 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
2245 struct sock *sk = sock->sk;
2246 struct netlink_sock *nlk = nlk_sk(sk);
342dfc30 2247 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
15e47304 2248 u32 dst_portid;
d629b836 2249 u32 dst_group;
1da177e4
LT
2250 struct sk_buff *skb;
2251 int err;
2252 struct scm_cookie scm;
2d7a85f4 2253 u32 netlink_skb_flags = 0;
1da177e4
LT
2254
2255 if (msg->msg_flags&MSG_OOB)
2256 return -EOPNOTSUPP;
2257
16e57262 2258 if (NULL == siocb->scm)
1da177e4 2259 siocb->scm = &scm;
16e57262 2260
e0e3cea4 2261 err = scm_send(sock, msg, siocb->scm, true);
1da177e4
LT
2262 if (err < 0)
2263 return err;
2264
2265 if (msg->msg_namelen) {
b47030c7 2266 err = -EINVAL;
1da177e4 2267 if (addr->nl_family != AF_NETLINK)
b47030c7 2268 goto out;
15e47304 2269 dst_portid = addr->nl_pid;
d629b836 2270 dst_group = ffs(addr->nl_groups);
b47030c7 2271 err = -EPERM;
15e47304 2272 if ((dst_group || dst_portid) &&
5187cd05 2273 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
b47030c7 2274 goto out;
2d7a85f4 2275 netlink_skb_flags |= NETLINK_SKB_DST;
1da177e4 2276 } else {
15e47304 2277 dst_portid = nlk->dst_portid;
d629b836 2278 dst_group = nlk->dst_group;
1da177e4
LT
2279 }
2280
15e47304 2281 if (!nlk->portid) {
1da177e4
LT
2282 err = netlink_autobind(sock);
2283 if (err)
2284 goto out;
2285 }
2286
5fd96123 2287 if (netlink_tx_is_mmaped(sk) &&
c0371da6 2288 msg->msg_iter.iov->iov_base == NULL) {
5fd96123
PM
2289 err = netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group,
2290 siocb);
2291 goto out;
2292 }
2293
1da177e4
LT
2294 err = -EMSGSIZE;
2295 if (len > sk->sk_sndbuf - 32)
2296 goto out;
2297 err = -ENOBUFS;
3a36515f 2298 skb = netlink_alloc_large_skb(len, dst_group);
6ac552fd 2299 if (skb == NULL)
1da177e4
LT
2300 goto out;
2301
15e47304 2302 NETLINK_CB(skb).portid = nlk->portid;
d629b836 2303 NETLINK_CB(skb).dst_group = dst_group;
dbe9a417 2304 NETLINK_CB(skb).creds = siocb->scm->creds;
2d7a85f4 2305 NETLINK_CB(skb).flags = netlink_skb_flags;
1da177e4 2306
1da177e4 2307 err = -EFAULT;
6ce8e9ce 2308 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1da177e4
LT
2309 kfree_skb(skb);
2310 goto out;
2311 }
2312
2313 err = security_netlink_send(sk, skb);
2314 if (err) {
2315 kfree_skb(skb);
2316 goto out;
2317 }
2318
d629b836 2319 if (dst_group) {
1da177e4 2320 atomic_inc(&skb->users);
15e47304 2321 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
1da177e4 2322 }
15e47304 2323 err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
1da177e4
LT
2324
2325out:
b47030c7 2326 scm_destroy(siocb->scm);
1da177e4
LT
2327 return err;
2328}
2329
2330static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
2331 struct msghdr *msg, size_t len,
2332 int flags)
2333{
2334 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
2335 struct scm_cookie scm;
2336 struct sock *sk = sock->sk;
2337 struct netlink_sock *nlk = nlk_sk(sk);
2338 int noblock = flags&MSG_DONTWAIT;
2339 size_t copied;
68d6ac6d 2340 struct sk_buff *skb, *data_skb;
b44d211e 2341 int err, ret;
1da177e4
LT
2342
2343 if (flags&MSG_OOB)
2344 return -EOPNOTSUPP;
2345
2346 copied = 0;
2347
6ac552fd
PM
2348 skb = skb_recv_datagram(sk, flags, noblock, &err);
2349 if (skb == NULL)
1da177e4
LT
2350 goto out;
2351
68d6ac6d
JB
2352 data_skb = skb;
2353
1dacc76d
JB
2354#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
2355 if (unlikely(skb_shinfo(skb)->frag_list)) {
1dacc76d 2356 /*
68d6ac6d
JB
2357 * If this skb has a frag_list, then here that means that we
2358 * will have to use the frag_list skb's data for compat tasks
2359 * and the regular skb's data for normal (non-compat) tasks.
1dacc76d 2360 *
68d6ac6d
JB
2361 * If we need to send the compat skb, assign it to the
2362 * 'data_skb' variable so that it will be used below for data
2363 * copying. We keep 'skb' for everything else, including
2364 * freeing both later.
1dacc76d 2365 */
68d6ac6d
JB
2366 if (flags & MSG_CMSG_COMPAT)
2367 data_skb = skb_shinfo(skb)->frag_list;
1dacc76d
JB
2368 }
2369#endif
2370
9063e21f
ED
2371 /* Record the max length of recvmsg() calls for future allocations */
2372 nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
2373 nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
2374 16384);
2375
68d6ac6d 2376 copied = data_skb->len;
1da177e4
LT
2377 if (len < copied) {
2378 msg->msg_flags |= MSG_TRUNC;
2379 copied = len;
2380 }
2381
68d6ac6d 2382 skb_reset_transport_header(data_skb);
51f3d02b 2383 err = skb_copy_datagram_msg(data_skb, 0, msg, copied);
1da177e4
LT
2384
2385 if (msg->msg_name) {
342dfc30 2386 DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
1da177e4
LT
2387 addr->nl_family = AF_NETLINK;
2388 addr->nl_pad = 0;
15e47304 2389 addr->nl_pid = NETLINK_CB(skb).portid;
d629b836 2390 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
1da177e4
LT
2391 msg->msg_namelen = sizeof(*addr);
2392 }
2393
cc9a06cd
PM
2394 if (nlk->flags & NETLINK_RECV_PKTINFO)
2395 netlink_cmsg_recv_pktinfo(msg, skb);
2396
1da177e4
LT
2397 if (NULL == siocb->scm) {
2398 memset(&scm, 0, sizeof(scm));
2399 siocb->scm = &scm;
2400 }
2401 siocb->scm->creds = *NETLINK_CREDS(skb);
188ccb55 2402 if (flags & MSG_TRUNC)
68d6ac6d 2403 copied = data_skb->len;
daa3766e 2404
1da177e4
LT
2405 skb_free_datagram(sk, skb);
2406
16b304f3
PS
2407 if (nlk->cb_running &&
2408 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
b44d211e
AV
2409 ret = netlink_dump(sk);
2410 if (ret) {
ac30ef83 2411 sk->sk_err = -ret;
b44d211e
AV
2412 sk->sk_error_report(sk);
2413 }
2414 }
1da177e4
LT
2415
2416 scm_recv(sock, msg, siocb->scm, flags);
1da177e4
LT
2417out:
2418 netlink_rcv_wake(sk);
2419 return err ? : copied;
2420}
2421
676d2369 2422static void netlink_data_ready(struct sock *sk)
1da177e4 2423{
cd40b7d3 2424 BUG();
1da177e4
LT
2425}
2426
2427/*
746fac4d 2428 * We export these functions to other modules. They provide a
1da177e4
LT
2429 * complete set of kernel non-blocking support for message
2430 * queueing.
2431 */
2432
2433struct sock *
9f00d977
PNA
2434__netlink_kernel_create(struct net *net, int unit, struct module *module,
2435 struct netlink_kernel_cfg *cfg)
1da177e4
LT
2436{
2437 struct socket *sock;
2438 struct sock *sk;
77247bbb 2439 struct netlink_sock *nlk;
5c398dc8 2440 struct listeners *listeners = NULL;
a31f2d17
PNA
2441 struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
2442 unsigned int groups;
1da177e4 2443
fab2caf6 2444 BUG_ON(!nl_table);
1da177e4 2445
6ac552fd 2446 if (unit < 0 || unit >= MAX_LINKS)
1da177e4
LT
2447 return NULL;
2448
2449 if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock))
2450 return NULL;
2451
23fe1866
PE
2452 /*
2453 * We have to just have a reference on the net from sk, but don't
2454 * get_net it. Besides, we cannot get and then put the net here.
2455 * So we create one inside init_net and the move it to net.
2456 */
2457
2458 if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0)
2459 goto out_sock_release_nosk;
2460
2461 sk = sock->sk;
edf02087 2462 sk_change_net(sk, net);
4fdb3bb7 2463
a31f2d17 2464 if (!cfg || cfg->groups < 32)
4277a083 2465 groups = 32;
a31f2d17
PNA
2466 else
2467 groups = cfg->groups;
4277a083 2468
5c398dc8 2469 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
4277a083
PM
2470 if (!listeners)
2471 goto out_sock_release;
2472
1da177e4 2473 sk->sk_data_ready = netlink_data_ready;
a31f2d17
PNA
2474 if (cfg && cfg->input)
2475 nlk_sk(sk)->netlink_rcv = cfg->input;
1da177e4 2476
b4b51029 2477 if (netlink_insert(sk, net, 0))
77247bbb 2478 goto out_sock_release;
4fdb3bb7 2479
77247bbb
PM
2480 nlk = nlk_sk(sk);
2481 nlk->flags |= NETLINK_KERNEL_SOCKET;
4fdb3bb7 2482
4fdb3bb7 2483 netlink_table_grab();
b4b51029
EB
2484 if (!nl_table[unit].registered) {
2485 nl_table[unit].groups = groups;
5c398dc8 2486 rcu_assign_pointer(nl_table[unit].listeners, listeners);
b4b51029
EB
2487 nl_table[unit].cb_mutex = cb_mutex;
2488 nl_table[unit].module = module;
9785e10a
PNA
2489 if (cfg) {
2490 nl_table[unit].bind = cfg->bind;
6251edd9 2491 nl_table[unit].unbind = cfg->unbind;
9785e10a 2492 nl_table[unit].flags = cfg->flags;
da12c90e
G
2493 if (cfg->compare)
2494 nl_table[unit].compare = cfg->compare;
9785e10a 2495 }
b4b51029 2496 nl_table[unit].registered = 1;
f937f1f4
JJ
2497 } else {
2498 kfree(listeners);
869e58f8 2499 nl_table[unit].registered++;
b4b51029 2500 }
4fdb3bb7 2501 netlink_table_ungrab();
77247bbb
PM
2502 return sk;
2503
4fdb3bb7 2504out_sock_release:
4277a083 2505 kfree(listeners);
9dfbec1f 2506 netlink_kernel_release(sk);
23fe1866
PE
2507 return NULL;
2508
2509out_sock_release_nosk:
4fdb3bb7 2510 sock_release(sock);
77247bbb 2511 return NULL;
1da177e4 2512}
9f00d977 2513EXPORT_SYMBOL(__netlink_kernel_create);
b7c6ba6e
DL
2514
2515void
2516netlink_kernel_release(struct sock *sk)
2517{
edf02087 2518 sk_release_kernel(sk);
b7c6ba6e
DL
2519}
2520EXPORT_SYMBOL(netlink_kernel_release);
2521
d136f1bd 2522int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
b4ff4f04 2523{
5c398dc8 2524 struct listeners *new, *old;
b4ff4f04 2525 struct netlink_table *tbl = &nl_table[sk->sk_protocol];
b4ff4f04
JB
2526
2527 if (groups < 32)
2528 groups = 32;
2529
b4ff4f04 2530 if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
5c398dc8
ED
2531 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
2532 if (!new)
d136f1bd 2533 return -ENOMEM;
6d772ac5 2534 old = nl_deref_protected(tbl->listeners);
5c398dc8
ED
2535 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
2536 rcu_assign_pointer(tbl->listeners, new);
2537
37b6b935 2538 kfree_rcu(old, rcu);
b4ff4f04
JB
2539 }
2540 tbl->groups = groups;
2541
d136f1bd
JB
2542 return 0;
2543}
2544
2545/**
2546 * netlink_change_ngroups - change number of multicast groups
2547 *
2548 * This changes the number of multicast groups that are available
2549 * on a certain netlink family. Note that it is not possible to
2550 * change the number of groups to below 32. Also note that it does
2551 * not implicitly call netlink_clear_multicast_users() when the
2552 * number of groups is reduced.
2553 *
2554 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
2555 * @groups: The new number of groups.
2556 */
2557int netlink_change_ngroups(struct sock *sk, unsigned int groups)
2558{
2559 int err;
2560
2561 netlink_table_grab();
2562 err = __netlink_change_ngroups(sk, groups);
b4ff4f04 2563 netlink_table_ungrab();
d136f1bd 2564
b4ff4f04
JB
2565 return err;
2566}
b4ff4f04 2567
b8273570
JB
2568void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
2569{
2570 struct sock *sk;
b8273570
JB
2571 struct netlink_table *tbl = &nl_table[ksk->sk_protocol];
2572
b67bfe0d 2573 sk_for_each_bound(sk, &tbl->mc_list)
b8273570
JB
2574 netlink_update_socket_mc(nlk_sk(sk), group, 0);
2575}
2576
a46621a3 2577struct nlmsghdr *
15e47304 2578__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
a46621a3
DV
2579{
2580 struct nlmsghdr *nlh;
573ce260 2581 int size = nlmsg_msg_size(len);
a46621a3 2582
23b45672 2583 nlh = (struct nlmsghdr *)skb_put(skb, NLMSG_ALIGN(size));
a46621a3
DV
2584 nlh->nlmsg_type = type;
2585 nlh->nlmsg_len = size;
2586 nlh->nlmsg_flags = flags;
15e47304 2587 nlh->nlmsg_pid = portid;
a46621a3
DV
2588 nlh->nlmsg_seq = seq;
2589 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
573ce260 2590 memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
a46621a3
DV
2591 return nlh;
2592}
2593EXPORT_SYMBOL(__nlmsg_put);
2594
1da177e4
LT
2595/*
2596 * It looks a bit ugly.
2597 * It would be better to create kernel thread.
2598 */
2599
2600static int netlink_dump(struct sock *sk)
2601{
2602 struct netlink_sock *nlk = nlk_sk(sk);
2603 struct netlink_callback *cb;
c7ac8679 2604 struct sk_buff *skb = NULL;
1da177e4 2605 struct nlmsghdr *nlh;
bf8b79e4 2606 int len, err = -ENOBUFS;
c7ac8679 2607 int alloc_size;
1da177e4 2608
af65bdfc 2609 mutex_lock(nlk->cb_mutex);
16b304f3 2610 if (!nlk->cb_running) {
bf8b79e4
TG
2611 err = -EINVAL;
2612 goto errout_skb;
1da177e4
LT
2613 }
2614
16b304f3 2615 cb = &nlk->cb;
c7ac8679
GR
2616 alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
2617
f9c22888
PM
2618 if (!netlink_rx_is_mmaped(sk) &&
2619 atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2620 goto errout_skb;
9063e21f
ED
2621
2622 /* NLMSG_GOODSIZE is small to avoid high order allocations being
2623 * required, but it makes sense to _attempt_ a 16K bytes allocation
2624 * to reduce number of system calls on dump operations, if user
2625 * ever provided a big enough buffer.
2626 */
2627 if (alloc_size < nlk->max_recvmsg_len) {
2628 skb = netlink_alloc_skb(sk,
2629 nlk->max_recvmsg_len,
2630 nlk->portid,
2631 GFP_KERNEL |
2632 __GFP_NOWARN |
2633 __GFP_NORETRY);
2634 /* available room should be exact amount to avoid MSG_TRUNC */
2635 if (skb)
2636 skb_reserve(skb, skb_tailroom(skb) -
2637 nlk->max_recvmsg_len);
2638 }
2639 if (!skb)
2640 skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
2641 GFP_KERNEL);
c7ac8679 2642 if (!skb)
c63d6ea3 2643 goto errout_skb;
f9c22888 2644 netlink_skb_set_owner_r(skb, sk);
c7ac8679 2645
1da177e4
LT
2646 len = cb->dump(skb, cb);
2647
2648 if (len > 0) {
af65bdfc 2649 mutex_unlock(nlk->cb_mutex);
b1153f29
SH
2650
2651 if (sk_filter(sk, skb))
2652 kfree_skb(skb);
4a7e7c2a
ED
2653 else
2654 __netlink_sendskb(sk, skb);
1da177e4
LT
2655 return 0;
2656 }
2657
bf8b79e4
TG
2658 nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
2659 if (!nlh)
2660 goto errout_skb;
2661
670dc283
JB
2662 nl_dump_check_consistent(cb, nlh);
2663
bf8b79e4
TG
2664 memcpy(nlmsg_data(nlh), &len, sizeof(len));
2665
b1153f29
SH
2666 if (sk_filter(sk, skb))
2667 kfree_skb(skb);
4a7e7c2a
ED
2668 else
2669 __netlink_sendskb(sk, skb);
1da177e4 2670
a8f74b22
TG
2671 if (cb->done)
2672 cb->done(cb);
1da177e4 2673
16b304f3
PS
2674 nlk->cb_running = false;
2675 mutex_unlock(nlk->cb_mutex);
6dc878a8 2676 module_put(cb->module);
16b304f3 2677 consume_skb(cb->skb);
1da177e4 2678 return 0;
1797754e 2679
bf8b79e4 2680errout_skb:
af65bdfc 2681 mutex_unlock(nlk->cb_mutex);
bf8b79e4 2682 kfree_skb(skb);
bf8b79e4 2683 return err;
1da177e4
LT
2684}
2685
6dc878a8
G
2686int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
2687 const struct nlmsghdr *nlh,
2688 struct netlink_dump_control *control)
1da177e4
LT
2689{
2690 struct netlink_callback *cb;
2691 struct sock *sk;
2692 struct netlink_sock *nlk;
b44d211e 2693 int ret;
1da177e4 2694
f9c22888
PM
2695 /* Memory mapped dump requests need to be copied to avoid looping
2696 * on the pending state in netlink_mmap_sendmsg() while the CB hold
2697 * a reference to the skb.
2698 */
2699 if (netlink_skb_is_mmaped(skb)) {
2700 skb = skb_copy(skb, GFP_KERNEL);
16b304f3 2701 if (skb == NULL)
f9c22888 2702 return -ENOBUFS;
f9c22888
PM
2703 } else
2704 atomic_inc(&skb->users);
2705
15e47304 2706 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
1da177e4 2707 if (sk == NULL) {
16b304f3
PS
2708 ret = -ECONNREFUSED;
2709 goto error_free;
1da177e4 2710 }
6dc878a8 2711
16b304f3 2712 nlk = nlk_sk(sk);
af65bdfc 2713 mutex_lock(nlk->cb_mutex);
6dc878a8 2714 /* A dump is in progress... */
16b304f3 2715 if (nlk->cb_running) {
6dc878a8 2716 ret = -EBUSY;
16b304f3 2717 goto error_unlock;
1da177e4 2718 }
6dc878a8 2719 /* add reference of module which cb->dump belongs to */
16b304f3 2720 if (!try_module_get(control->module)) {
6dc878a8 2721 ret = -EPROTONOSUPPORT;
16b304f3 2722 goto error_unlock;
6dc878a8
G
2723 }
2724
16b304f3
PS
2725 cb = &nlk->cb;
2726 memset(cb, 0, sizeof(*cb));
2727 cb->dump = control->dump;
2728 cb->done = control->done;
2729 cb->nlh = nlh;
2730 cb->data = control->data;
2731 cb->module = control->module;
2732 cb->min_dump_alloc = control->min_dump_alloc;
2733 cb->skb = skb;
2734
2735 nlk->cb_running = true;
2736
af65bdfc 2737 mutex_unlock(nlk->cb_mutex);
1da177e4 2738
b44d211e 2739 ret = netlink_dump(sk);
1da177e4 2740 sock_put(sk);
5c58298c 2741
b44d211e
AV
2742 if (ret)
2743 return ret;
2744
5c58298c
DL
2745 /* We successfully started a dump, by returning -EINTR we
2746 * signal not to send ACK even if it was requested.
2747 */
2748 return -EINTR;
16b304f3
PS
2749
2750error_unlock:
2751 sock_put(sk);
2752 mutex_unlock(nlk->cb_mutex);
2753error_free:
2754 kfree_skb(skb);
2755 return ret;
1da177e4 2756}
6dc878a8 2757EXPORT_SYMBOL(__netlink_dump_start);
1da177e4
LT
2758
2759void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
2760{
2761 struct sk_buff *skb;
2762 struct nlmsghdr *rep;
2763 struct nlmsgerr *errmsg;
339bf98f 2764 size_t payload = sizeof(*errmsg);
1da177e4 2765
339bf98f
TG
2766 /* error messages get the original request appened */
2767 if (err)
2768 payload += nlmsg_len(nlh);
1da177e4 2769
f9c22888
PM
2770 skb = netlink_alloc_skb(in_skb->sk, nlmsg_total_size(payload),
2771 NETLINK_CB(in_skb).portid, GFP_KERNEL);
1da177e4
LT
2772 if (!skb) {
2773 struct sock *sk;
2774
3b1e0a65 2775 sk = netlink_lookup(sock_net(in_skb->sk),
b4b51029 2776 in_skb->sk->sk_protocol,
15e47304 2777 NETLINK_CB(in_skb).portid);
1da177e4
LT
2778 if (sk) {
2779 sk->sk_err = ENOBUFS;
2780 sk->sk_error_report(sk);
2781 sock_put(sk);
2782 }
2783 return;
2784 }
2785
15e47304 2786 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
5dba93ae 2787 NLMSG_ERROR, payload, 0);
bf8b79e4 2788 errmsg = nlmsg_data(rep);
1da177e4 2789 errmsg->error = err;
bf8b79e4 2790 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
15e47304 2791 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
1da177e4 2792}
6ac552fd 2793EXPORT_SYMBOL(netlink_ack);
1da177e4 2794
cd40b7d3 2795int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
1d00a4eb 2796 struct nlmsghdr *))
82ace47a 2797{
82ace47a
TG
2798 struct nlmsghdr *nlh;
2799 int err;
2800
2801 while (skb->len >= nlmsg_total_size(0)) {
cd40b7d3
DL
2802 int msglen;
2803
b529ccf2 2804 nlh = nlmsg_hdr(skb);
d35b6856 2805 err = 0;
82ace47a 2806
ad8e4b75 2807 if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len)
82ace47a
TG
2808 return 0;
2809
d35b6856
TG
2810 /* Only requests are handled by the kernel */
2811 if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
5c58298c 2812 goto ack;
45e7ae7f
TG
2813
2814 /* Skip control messages */
2815 if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
5c58298c 2816 goto ack;
d35b6856 2817
1d00a4eb 2818 err = cb(skb, nlh);
5c58298c
DL
2819 if (err == -EINTR)
2820 goto skip;
2821
2822ack:
d35b6856 2823 if (nlh->nlmsg_flags & NLM_F_ACK || err)
82ace47a 2824 netlink_ack(skb, nlh, err);
82ace47a 2825
5c58298c 2826skip:
6ac552fd 2827 msglen = NLMSG_ALIGN(nlh->nlmsg_len);
cd40b7d3
DL
2828 if (msglen > skb->len)
2829 msglen = skb->len;
2830 skb_pull(skb, msglen);
82ace47a
TG
2831 }
2832
2833 return 0;
2834}
6ac552fd 2835EXPORT_SYMBOL(netlink_rcv_skb);
82ace47a 2836
d387f6ad
TG
2837/**
2838 * nlmsg_notify - send a notification netlink message
2839 * @sk: netlink socket to use
2840 * @skb: notification message
15e47304 2841 * @portid: destination netlink portid for reports or 0
d387f6ad
TG
2842 * @group: destination multicast group or 0
2843 * @report: 1 to report back, 0 to disable
2844 * @flags: allocation flags
2845 */
15e47304 2846int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
d387f6ad
TG
2847 unsigned int group, int report, gfp_t flags)
2848{
2849 int err = 0;
2850
2851 if (group) {
15e47304 2852 int exclude_portid = 0;
d387f6ad
TG
2853
2854 if (report) {
2855 atomic_inc(&skb->users);
15e47304 2856 exclude_portid = portid;
d387f6ad
TG
2857 }
2858
1ce85fe4
PNA
2859 /* errors reported via destination sk->sk_err, but propagate
2860 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
15e47304 2861 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
d387f6ad
TG
2862 }
2863
1ce85fe4
PNA
2864 if (report) {
2865 int err2;
2866
15e47304 2867 err2 = nlmsg_unicast(sk, skb, portid);
1ce85fe4
PNA
2868 if (!err || err == -ESRCH)
2869 err = err2;
2870 }
d387f6ad
TG
2871
2872 return err;
2873}
6ac552fd 2874EXPORT_SYMBOL(nlmsg_notify);
d387f6ad 2875
1da177e4
LT
2876#ifdef CONFIG_PROC_FS
2877struct nl_seq_iter {
e372c414 2878 struct seq_net_private p;
1da177e4
LT
2879 int link;
2880 int hash_idx;
2881};
2882
2883static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
2884{
2885 struct nl_seq_iter *iter = seq->private;
2886 int i, j;
e341694e 2887 struct netlink_sock *nlk;
1da177e4 2888 struct sock *s;
1da177e4
LT
2889 loff_t off = 0;
2890
6ac552fd 2891 for (i = 0; i < MAX_LINKS; i++) {
e341694e 2892 struct rhashtable *ht = &nl_table[i].hash;
67a24ac1 2893 const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
e341694e
TG
2894
2895 for (j = 0; j < tbl->size; j++) {
2896 rht_for_each_entry_rcu(nlk, tbl->buckets[j], node) {
2897 s = (struct sock *)nlk;
1da177e4 2898
1218854a 2899 if (sock_net(s) != seq_file_net(seq))
b4b51029 2900 continue;
1da177e4
LT
2901 if (off == pos) {
2902 iter->link = i;
2903 iter->hash_idx = j;
2904 return s;
2905 }
2906 ++off;
2907 }
2908 }
2909 }
2910 return NULL;
2911}
2912
2913static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
78fd1d0a 2914 __acquires(nl_table_lock) __acquires(RCU)
1da177e4 2915{
78fd1d0a 2916 read_lock(&nl_table_lock);
e341694e 2917 rcu_read_lock();
1da177e4
LT
2918 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2919}
2920
2921static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2922{
78fd1d0a 2923 struct rhashtable *ht;
e341694e 2924 struct netlink_sock *nlk;
1da177e4 2925 struct nl_seq_iter *iter;
da12c90e 2926 struct net *net;
1da177e4
LT
2927 int i, j;
2928
2929 ++*pos;
2930
2931 if (v == SEQ_START_TOKEN)
2932 return netlink_seq_socket_idx(seq, 0);
746fac4d 2933
da12c90e 2934 net = seq_file_net(seq);
b4b51029 2935 iter = seq->private;
e341694e
TG
2936 nlk = v;
2937
78fd1d0a
TG
2938 i = iter->link;
2939 ht = &nl_table[i].hash;
2940 rht_for_each_entry(nlk, nlk->node.next, ht, node)
e341694e
TG
2941 if (net_eq(sock_net((struct sock *)nlk), net))
2942 return nlk;
1da177e4 2943
1da177e4
LT
2944 j = iter->hash_idx + 1;
2945
2946 do {
67a24ac1 2947 const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
da12c90e 2948
e341694e 2949 for (; j < tbl->size; j++) {
78fd1d0a 2950 rht_for_each_entry(nlk, tbl->buckets[j], ht, node) {
e341694e
TG
2951 if (net_eq(sock_net((struct sock *)nlk), net)) {
2952 iter->link = i;
2953 iter->hash_idx = j;
2954 return nlk;
2955 }
1da177e4
LT
2956 }
2957 }
2958
2959 j = 0;
2960 } while (++i < MAX_LINKS);
2961
2962 return NULL;
2963}
2964
2965static void netlink_seq_stop(struct seq_file *seq, void *v)
78fd1d0a 2966 __releases(RCU) __releases(nl_table_lock)
1da177e4 2967{
e341694e 2968 rcu_read_unlock();
78fd1d0a 2969 read_unlock(&nl_table_lock);
1da177e4
LT
2970}
2971
2972
2973static int netlink_seq_show(struct seq_file *seq, void *v)
2974{
658cb354 2975 if (v == SEQ_START_TOKEN) {
1da177e4
LT
2976 seq_puts(seq,
2977 "sk Eth Pid Groups "
cf0aa4e0 2978 "Rmem Wmem Dump Locks Drops Inode\n");
658cb354 2979 } else {
1da177e4
LT
2980 struct sock *s = v;
2981 struct netlink_sock *nlk = nlk_sk(s);
2982
16b304f3 2983 seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n",
1da177e4
LT
2984 s,
2985 s->sk_protocol,
15e47304 2986 nlk->portid,
513c2500 2987 nlk->groups ? (u32)nlk->groups[0] : 0,
31e6d363
ED
2988 sk_rmem_alloc_get(s),
2989 sk_wmem_alloc_get(s),
16b304f3 2990 nlk->cb_running,
38938bfe 2991 atomic_read(&s->sk_refcnt),
cf0aa4e0
MY
2992 atomic_read(&s->sk_drops),
2993 sock_i_ino(s)
1da177e4
LT
2994 );
2995
2996 }
2997 return 0;
2998}
2999
56b3d975 3000static const struct seq_operations netlink_seq_ops = {
1da177e4
LT
3001 .start = netlink_seq_start,
3002 .next = netlink_seq_next,
3003 .stop = netlink_seq_stop,
3004 .show = netlink_seq_show,
3005};
3006
3007
3008static int netlink_seq_open(struct inode *inode, struct file *file)
3009{
e372c414
DL
3010 return seq_open_net(inode, file, &netlink_seq_ops,
3011 sizeof(struct nl_seq_iter));
b4b51029
EB
3012}
3013
da7071d7 3014static const struct file_operations netlink_seq_fops = {
1da177e4
LT
3015 .owner = THIS_MODULE,
3016 .open = netlink_seq_open,
3017 .read = seq_read,
3018 .llseek = seq_lseek,
e372c414 3019 .release = seq_release_net,
1da177e4
LT
3020};
3021
3022#endif
3023
3024int netlink_register_notifier(struct notifier_block *nb)
3025{
e041c683 3026 return atomic_notifier_chain_register(&netlink_chain, nb);
1da177e4 3027}
6ac552fd 3028EXPORT_SYMBOL(netlink_register_notifier);
1da177e4
LT
3029
3030int netlink_unregister_notifier(struct notifier_block *nb)
3031{
e041c683 3032 return atomic_notifier_chain_unregister(&netlink_chain, nb);
1da177e4 3033}
6ac552fd 3034EXPORT_SYMBOL(netlink_unregister_notifier);
746fac4d 3035
90ddc4f0 3036static const struct proto_ops netlink_ops = {
1da177e4
LT
3037 .family = PF_NETLINK,
3038 .owner = THIS_MODULE,
3039 .release = netlink_release,
3040 .bind = netlink_bind,
3041 .connect = netlink_connect,
3042 .socketpair = sock_no_socketpair,
3043 .accept = sock_no_accept,
3044 .getname = netlink_getname,
9652e931 3045 .poll = netlink_poll,
1da177e4
LT
3046 .ioctl = sock_no_ioctl,
3047 .listen = sock_no_listen,
3048 .shutdown = sock_no_shutdown,
9a4595bc
PM
3049 .setsockopt = netlink_setsockopt,
3050 .getsockopt = netlink_getsockopt,
1da177e4
LT
3051 .sendmsg = netlink_sendmsg,
3052 .recvmsg = netlink_recvmsg,
ccdfcc39 3053 .mmap = netlink_mmap,
1da177e4
LT
3054 .sendpage = sock_no_sendpage,
3055};
3056
ec1b4cf7 3057static const struct net_proto_family netlink_family_ops = {
1da177e4
LT
3058 .family = PF_NETLINK,
3059 .create = netlink_create,
3060 .owner = THIS_MODULE, /* for consistency 8) */
3061};
3062
4665079c 3063static int __net_init netlink_net_init(struct net *net)
b4b51029
EB
3064{
3065#ifdef CONFIG_PROC_FS
d4beaa66 3066 if (!proc_create("netlink", 0, net->proc_net, &netlink_seq_fops))
b4b51029
EB
3067 return -ENOMEM;
3068#endif
3069 return 0;
3070}
3071
4665079c 3072static void __net_exit netlink_net_exit(struct net *net)
b4b51029
EB
3073{
3074#ifdef CONFIG_PROC_FS
ece31ffd 3075 remove_proc_entry("netlink", net->proc_net);
b4b51029
EB
3076#endif
3077}
3078
b963ea89
DM
3079static void __init netlink_add_usersock_entry(void)
3080{
5c398dc8 3081 struct listeners *listeners;
b963ea89
DM
3082 int groups = 32;
3083
5c398dc8 3084 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
b963ea89 3085 if (!listeners)
5c398dc8 3086 panic("netlink_add_usersock_entry: Cannot allocate listeners\n");
b963ea89
DM
3087
3088 netlink_table_grab();
3089
3090 nl_table[NETLINK_USERSOCK].groups = groups;
5c398dc8 3091 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
b963ea89
DM
3092 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
3093 nl_table[NETLINK_USERSOCK].registered = 1;
9785e10a 3094 nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
b963ea89
DM
3095
3096 netlink_table_ungrab();
3097}
3098
022cbae6 3099static struct pernet_operations __net_initdata netlink_net_ops = {
b4b51029
EB
3100 .init = netlink_net_init,
3101 .exit = netlink_net_exit,
3102};
3103
1da177e4
LT
3104static int __init netlink_proto_init(void)
3105{
1da177e4 3106 int i;
1da177e4 3107 int err = proto_register(&netlink_proto, 0);
e341694e
TG
3108 struct rhashtable_params ht_params = {
3109 .head_offset = offsetof(struct netlink_sock, node),
3110 .key_offset = offsetof(struct netlink_sock, portid),
3111 .key_len = sizeof(u32), /* portid */
7f19fc5e 3112 .hashfn = jhash,
e341694e
TG
3113 .max_shift = 16, /* 64K */
3114 .grow_decision = rht_grow_above_75,
3115 .shrink_decision = rht_shrink_below_30,
97127566 3116#ifdef CONFIG_PROVE_LOCKING
e341694e 3117 .mutex_is_held = lockdep_nl_sk_hash_is_held,
97127566 3118#endif
e341694e 3119 };
1da177e4
LT
3120
3121 if (err != 0)
3122 goto out;
3123
fab25745 3124 BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
1da177e4 3125
0da974f4 3126 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
fab2caf6
AM
3127 if (!nl_table)
3128 goto panic;
1da177e4 3129
1da177e4 3130 for (i = 0; i < MAX_LINKS; i++) {
e341694e
TG
3131 if (rhashtable_init(&nl_table[i].hash, &ht_params) < 0) {
3132 while (--i > 0)
3133 rhashtable_destroy(&nl_table[i].hash);
1da177e4 3134 kfree(nl_table);
fab2caf6 3135 goto panic;
1da177e4 3136 }
1da177e4
LT
3137 }
3138
bcbde0d4
DB
3139 INIT_LIST_HEAD(&netlink_tap_all);
3140
b963ea89
DM
3141 netlink_add_usersock_entry();
3142
1da177e4 3143 sock_register(&netlink_family_ops);
b4b51029 3144 register_pernet_subsys(&netlink_net_ops);
746fac4d 3145 /* The netlink device handler may be needed early. */
1da177e4
LT
3146 rtnetlink_init();
3147out:
3148 return err;
fab2caf6
AM
3149panic:
3150 panic("netlink_init: Cannot allocate nl_table\n");
1da177e4
LT
3151}
3152
1da177e4 3153core_initcall(netlink_proto_init);