netfilter: Fix format string of nfnetlink_log proc file
[linux-2.6-block.git] / net / ipv4 / inet_diag.c
... / ...
CommitLineData
1/*
2 * inet_diag.c Module for monitoring INET transport protocols sockets.
3 *
4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/types.h>
15#include <linux/fcntl.h>
16#include <linux/random.h>
17#include <linux/slab.h>
18#include <linux/cache.h>
19#include <linux/init.h>
20#include <linux/time.h>
21
22#include <net/icmp.h>
23#include <net/tcp.h>
24#include <net/ipv6.h>
25#include <net/inet_common.h>
26#include <net/inet_connection_sock.h>
27#include <net/inet_hashtables.h>
28#include <net/inet_timewait_sock.h>
29#include <net/inet6_hashtables.h>
30#include <net/netlink.h>
31
32#include <linux/inet.h>
33#include <linux/stddef.h>
34
35#include <linux/inet_diag.h>
36#include <linux/sock_diag.h>
37
38static const struct inet_diag_handler **inet_diag_table;
39
40struct inet_diag_entry {
41 const __be32 *saddr;
42 const __be32 *daddr;
43 u16 sport;
44 u16 dport;
45 u16 family;
46 u16 userlocks;
47};
48
49static DEFINE_MUTEX(inet_diag_table_mutex);
50
51static const struct inet_diag_handler *inet_diag_lock_handler(int proto)
52{
53 if (!inet_diag_table[proto])
54 request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK,
55 NETLINK_SOCK_DIAG, AF_INET, proto);
56
57 mutex_lock(&inet_diag_table_mutex);
58 if (!inet_diag_table[proto])
59 return ERR_PTR(-ENOENT);
60
61 return inet_diag_table[proto];
62}
63
64static void inet_diag_unlock_handler(const struct inet_diag_handler *handler)
65{
66 mutex_unlock(&inet_diag_table_mutex);
67}
68
69static void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk)
70{
71 r->idiag_family = sk->sk_family;
72
73 r->id.idiag_sport = htons(sk->sk_num);
74 r->id.idiag_dport = sk->sk_dport;
75 r->id.idiag_if = sk->sk_bound_dev_if;
76 sock_diag_save_cookie(sk, r->id.idiag_cookie);
77
78#if IS_ENABLED(CONFIG_IPV6)
79 if (sk->sk_family == AF_INET6) {
80 *(struct in6_addr *)r->id.idiag_src = sk->sk_v6_rcv_saddr;
81 *(struct in6_addr *)r->id.idiag_dst = sk->sk_v6_daddr;
82 } else
83#endif
84 {
85 memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
86 memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
87
88 r->id.idiag_src[0] = sk->sk_rcv_saddr;
89 r->id.idiag_dst[0] = sk->sk_daddr;
90 }
91}
92
93static size_t inet_sk_attr_size(void)
94{
95 return nla_total_size(sizeof(struct tcp_info))
96 + nla_total_size(1) /* INET_DIAG_SHUTDOWN */
97 + nla_total_size(1) /* INET_DIAG_TOS */
98 + nla_total_size(1) /* INET_DIAG_TCLASS */
99 + nla_total_size(sizeof(struct inet_diag_meminfo))
100 + nla_total_size(sizeof(struct inet_diag_msg))
101 + nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
102 + nla_total_size(TCP_CA_NAME_MAX)
103 + nla_total_size(sizeof(struct tcpvegas_info))
104 + 64;
105}
106
107int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
108 struct sk_buff *skb, const struct inet_diag_req_v2 *req,
109 struct user_namespace *user_ns,
110 u32 portid, u32 seq, u16 nlmsg_flags,
111 const struct nlmsghdr *unlh)
112{
113 const struct inet_sock *inet = inet_sk(sk);
114 const struct inet_diag_handler *handler;
115 int ext = req->idiag_ext;
116 struct inet_diag_msg *r;
117 struct nlmsghdr *nlh;
118 struct nlattr *attr;
119 void *info = NULL;
120
121 handler = inet_diag_table[req->sdiag_protocol];
122 BUG_ON(!handler);
123
124 nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
125 nlmsg_flags);
126 if (!nlh)
127 return -EMSGSIZE;
128
129 r = nlmsg_data(nlh);
130 BUG_ON(!sk_fullsock(sk));
131
132 inet_diag_msg_common_fill(r, sk);
133 r->idiag_state = sk->sk_state;
134 r->idiag_timer = 0;
135 r->idiag_retrans = 0;
136
137 if (nla_put_u8(skb, INET_DIAG_SHUTDOWN, sk->sk_shutdown))
138 goto errout;
139
140 /* IPv6 dual-stack sockets use inet->tos for IPv4 connections,
141 * hence this needs to be included regardless of socket family.
142 */
143 if (ext & (1 << (INET_DIAG_TOS - 1)))
144 if (nla_put_u8(skb, INET_DIAG_TOS, inet->tos) < 0)
145 goto errout;
146
147#if IS_ENABLED(CONFIG_IPV6)
148 if (r->idiag_family == AF_INET6) {
149 if (ext & (1 << (INET_DIAG_TCLASS - 1)))
150 if (nla_put_u8(skb, INET_DIAG_TCLASS,
151 inet6_sk(sk)->tclass) < 0)
152 goto errout;
153 }
154#endif
155
156 r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
157 r->idiag_inode = sock_i_ino(sk);
158
159 if (ext & (1 << (INET_DIAG_MEMINFO - 1))) {
160 struct inet_diag_meminfo minfo = {
161 .idiag_rmem = sk_rmem_alloc_get(sk),
162 .idiag_wmem = sk->sk_wmem_queued,
163 .idiag_fmem = sk->sk_forward_alloc,
164 .idiag_tmem = sk_wmem_alloc_get(sk),
165 };
166
167 if (nla_put(skb, INET_DIAG_MEMINFO, sizeof(minfo), &minfo) < 0)
168 goto errout;
169 }
170
171 if (ext & (1 << (INET_DIAG_SKMEMINFO - 1)))
172 if (sock_diag_put_meminfo(sk, skb, INET_DIAG_SKMEMINFO))
173 goto errout;
174
175 if (!icsk) {
176 handler->idiag_get_info(sk, r, NULL);
177 goto out;
178 }
179
180#define EXPIRES_IN_MS(tmo) DIV_ROUND_UP((tmo - jiffies) * 1000, HZ)
181
182 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
183 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
184 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
185 r->idiag_timer = 1;
186 r->idiag_retrans = icsk->icsk_retransmits;
187 r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
188 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
189 r->idiag_timer = 4;
190 r->idiag_retrans = icsk->icsk_probes_out;
191 r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
192 } else if (timer_pending(&sk->sk_timer)) {
193 r->idiag_timer = 2;
194 r->idiag_retrans = icsk->icsk_probes_out;
195 r->idiag_expires = EXPIRES_IN_MS(sk->sk_timer.expires);
196 } else {
197 r->idiag_timer = 0;
198 r->idiag_expires = 0;
199 }
200#undef EXPIRES_IN_MS
201
202 if (ext & (1 << (INET_DIAG_INFO - 1))) {
203 attr = nla_reserve(skb, INET_DIAG_INFO,
204 sizeof(struct tcp_info));
205 if (!attr)
206 goto errout;
207
208 info = nla_data(attr);
209 }
210
211 if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops)
212 if (nla_put_string(skb, INET_DIAG_CONG,
213 icsk->icsk_ca_ops->name) < 0)
214 goto errout;
215
216 handler->idiag_get_info(sk, r, info);
217
218 if (sk->sk_state < TCP_TIME_WAIT &&
219 icsk->icsk_ca_ops && icsk->icsk_ca_ops->get_info)
220 icsk->icsk_ca_ops->get_info(sk, ext, skb);
221
222out:
223 nlmsg_end(skb, nlh);
224 return 0;
225
226errout:
227 nlmsg_cancel(skb, nlh);
228 return -EMSGSIZE;
229}
230EXPORT_SYMBOL_GPL(inet_sk_diag_fill);
231
232static int inet_csk_diag_fill(struct sock *sk,
233 struct sk_buff *skb,
234 const struct inet_diag_req_v2 *req,
235 struct user_namespace *user_ns,
236 u32 portid, u32 seq, u16 nlmsg_flags,
237 const struct nlmsghdr *unlh)
238{
239 return inet_sk_diag_fill(sk, inet_csk(sk), skb, req,
240 user_ns, portid, seq, nlmsg_flags, unlh);
241}
242
243static int inet_twsk_diag_fill(struct sock *sk,
244 struct sk_buff *skb,
245 u32 portid, u32 seq, u16 nlmsg_flags,
246 const struct nlmsghdr *unlh)
247{
248 struct inet_timewait_sock *tw = inet_twsk(sk);
249 struct inet_diag_msg *r;
250 struct nlmsghdr *nlh;
251 s32 tmo;
252
253 nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
254 nlmsg_flags);
255 if (!nlh)
256 return -EMSGSIZE;
257
258 r = nlmsg_data(nlh);
259 BUG_ON(tw->tw_state != TCP_TIME_WAIT);
260
261 tmo = tw->tw_ttd - inet_tw_time_stamp();
262 if (tmo < 0)
263 tmo = 0;
264
265 inet_diag_msg_common_fill(r, sk);
266 r->idiag_retrans = 0;
267
268 r->idiag_state = tw->tw_substate;
269 r->idiag_timer = 3;
270 r->idiag_expires = jiffies_to_msecs(tmo);
271 r->idiag_rqueue = 0;
272 r->idiag_wqueue = 0;
273 r->idiag_uid = 0;
274 r->idiag_inode = 0;
275
276 nlmsg_end(skb, nlh);
277 return 0;
278}
279
280static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb,
281 u32 portid, u32 seq, u16 nlmsg_flags,
282 const struct nlmsghdr *unlh)
283{
284 struct inet_diag_msg *r;
285 struct nlmsghdr *nlh;
286 long tmo;
287
288 nlh = nlmsg_put(skb, portid, seq, unlh->nlmsg_type, sizeof(*r),
289 nlmsg_flags);
290 if (!nlh)
291 return -EMSGSIZE;
292
293 r = nlmsg_data(nlh);
294 inet_diag_msg_common_fill(r, sk);
295 r->idiag_state = TCP_SYN_RECV;
296 r->idiag_timer = 1;
297 r->idiag_retrans = inet_reqsk(sk)->num_retrans;
298
299 BUILD_BUG_ON(offsetof(struct inet_request_sock, ir_cookie) !=
300 offsetof(struct sock, sk_cookie));
301
302 tmo = inet_reqsk(sk)->rsk_timer.expires - jiffies;
303 r->idiag_expires = (tmo >= 0) ? jiffies_to_msecs(tmo) : 0;
304 r->idiag_rqueue = 0;
305 r->idiag_wqueue = 0;
306 r->idiag_uid = 0;
307 r->idiag_inode = 0;
308
309 nlmsg_end(skb, nlh);
310 return 0;
311}
312
313static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
314 const struct inet_diag_req_v2 *r,
315 struct user_namespace *user_ns,
316 u32 portid, u32 seq, u16 nlmsg_flags,
317 const struct nlmsghdr *unlh)
318{
319 if (sk->sk_state == TCP_TIME_WAIT)
320 return inet_twsk_diag_fill(sk, skb, portid, seq,
321 nlmsg_flags, unlh);
322
323 if (sk->sk_state == TCP_NEW_SYN_RECV)
324 return inet_req_diag_fill(sk, skb, portid, seq,
325 nlmsg_flags, unlh);
326
327 return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq,
328 nlmsg_flags, unlh);
329}
330
331int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo,
332 struct sk_buff *in_skb,
333 const struct nlmsghdr *nlh,
334 const struct inet_diag_req_v2 *req)
335{
336 struct net *net = sock_net(in_skb->sk);
337 struct sk_buff *rep;
338 struct sock *sk;
339 int err;
340
341 err = -EINVAL;
342 if (req->sdiag_family == AF_INET)
343 sk = inet_lookup(net, hashinfo, req->id.idiag_dst[0],
344 req->id.idiag_dport, req->id.idiag_src[0],
345 req->id.idiag_sport, req->id.idiag_if);
346#if IS_ENABLED(CONFIG_IPV6)
347 else if (req->sdiag_family == AF_INET6)
348 sk = inet6_lookup(net, hashinfo,
349 (struct in6_addr *)req->id.idiag_dst,
350 req->id.idiag_dport,
351 (struct in6_addr *)req->id.idiag_src,
352 req->id.idiag_sport,
353 req->id.idiag_if);
354#endif
355 else
356 goto out_nosk;
357
358 err = -ENOENT;
359 if (!sk)
360 goto out_nosk;
361
362 err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
363 if (err)
364 goto out;
365
366 rep = nlmsg_new(inet_sk_attr_size(), GFP_KERNEL);
367 if (!rep) {
368 err = -ENOMEM;
369 goto out;
370 }
371
372 err = sk_diag_fill(sk, rep, req,
373 sk_user_ns(NETLINK_CB(in_skb).sk),
374 NETLINK_CB(in_skb).portid,
375 nlh->nlmsg_seq, 0, nlh);
376 if (err < 0) {
377 WARN_ON(err == -EMSGSIZE);
378 nlmsg_free(rep);
379 goto out;
380 }
381 err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
382 MSG_DONTWAIT);
383 if (err > 0)
384 err = 0;
385
386out:
387 if (sk)
388 sock_gen_put(sk);
389
390out_nosk:
391 return err;
392}
393EXPORT_SYMBOL_GPL(inet_diag_dump_one_icsk);
394
395static int inet_diag_get_exact(struct sk_buff *in_skb,
396 const struct nlmsghdr *nlh,
397 const struct inet_diag_req_v2 *req)
398{
399 const struct inet_diag_handler *handler;
400 int err;
401
402 handler = inet_diag_lock_handler(req->sdiag_protocol);
403 if (IS_ERR(handler))
404 err = PTR_ERR(handler);
405 else
406 err = handler->dump_one(in_skb, nlh, req);
407 inet_diag_unlock_handler(handler);
408
409 return err;
410}
411
412static int bitstring_match(const __be32 *a1, const __be32 *a2, int bits)
413{
414 int words = bits >> 5;
415
416 bits &= 0x1f;
417
418 if (words) {
419 if (memcmp(a1, a2, words << 2))
420 return 0;
421 }
422 if (bits) {
423 __be32 w1, w2;
424 __be32 mask;
425
426 w1 = a1[words];
427 w2 = a2[words];
428
429 mask = htonl((0xffffffff) << (32 - bits));
430
431 if ((w1 ^ w2) & mask)
432 return 0;
433 }
434
435 return 1;
436}
437
438static int inet_diag_bc_run(const struct nlattr *_bc,
439 const struct inet_diag_entry *entry)
440{
441 const void *bc = nla_data(_bc);
442 int len = nla_len(_bc);
443
444 while (len > 0) {
445 int yes = 1;
446 const struct inet_diag_bc_op *op = bc;
447
448 switch (op->code) {
449 case INET_DIAG_BC_NOP:
450 break;
451 case INET_DIAG_BC_JMP:
452 yes = 0;
453 break;
454 case INET_DIAG_BC_S_GE:
455 yes = entry->sport >= op[1].no;
456 break;
457 case INET_DIAG_BC_S_LE:
458 yes = entry->sport <= op[1].no;
459 break;
460 case INET_DIAG_BC_D_GE:
461 yes = entry->dport >= op[1].no;
462 break;
463 case INET_DIAG_BC_D_LE:
464 yes = entry->dport <= op[1].no;
465 break;
466 case INET_DIAG_BC_AUTO:
467 yes = !(entry->userlocks & SOCK_BINDPORT_LOCK);
468 break;
469 case INET_DIAG_BC_S_COND:
470 case INET_DIAG_BC_D_COND: {
471 const struct inet_diag_hostcond *cond;
472 const __be32 *addr;
473
474 cond = (const struct inet_diag_hostcond *)(op + 1);
475 if (cond->port != -1 &&
476 cond->port != (op->code == INET_DIAG_BC_S_COND ?
477 entry->sport : entry->dport)) {
478 yes = 0;
479 break;
480 }
481
482 if (op->code == INET_DIAG_BC_S_COND)
483 addr = entry->saddr;
484 else
485 addr = entry->daddr;
486
487 if (cond->family != AF_UNSPEC &&
488 cond->family != entry->family) {
489 if (entry->family == AF_INET6 &&
490 cond->family == AF_INET) {
491 if (addr[0] == 0 && addr[1] == 0 &&
492 addr[2] == htonl(0xffff) &&
493 bitstring_match(addr + 3,
494 cond->addr,
495 cond->prefix_len))
496 break;
497 }
498 yes = 0;
499 break;
500 }
501
502 if (cond->prefix_len == 0)
503 break;
504 if (bitstring_match(addr, cond->addr,
505 cond->prefix_len))
506 break;
507 yes = 0;
508 break;
509 }
510 }
511
512 if (yes) {
513 len -= op->yes;
514 bc += op->yes;
515 } else {
516 len -= op->no;
517 bc += op->no;
518 }
519 }
520 return len == 0;
521}
522
523/* This helper is available for all sockets (ESTABLISH, TIMEWAIT, SYN_RECV)
524 */
525static void entry_fill_addrs(struct inet_diag_entry *entry,
526 const struct sock *sk)
527{
528#if IS_ENABLED(CONFIG_IPV6)
529 if (sk->sk_family == AF_INET6) {
530 entry->saddr = sk->sk_v6_rcv_saddr.s6_addr32;
531 entry->daddr = sk->sk_v6_daddr.s6_addr32;
532 } else
533#endif
534 {
535 entry->saddr = &sk->sk_rcv_saddr;
536 entry->daddr = &sk->sk_daddr;
537 }
538}
539
540int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk)
541{
542 struct inet_sock *inet = inet_sk(sk);
543 struct inet_diag_entry entry;
544
545 if (!bc)
546 return 1;
547
548 entry.family = sk->sk_family;
549 entry_fill_addrs(&entry, sk);
550 entry.sport = inet->inet_num;
551 entry.dport = ntohs(inet->inet_dport);
552 entry.userlocks = sk_fullsock(sk) ? sk->sk_userlocks : 0;
553
554 return inet_diag_bc_run(bc, &entry);
555}
556EXPORT_SYMBOL_GPL(inet_diag_bc_sk);
557
558static int valid_cc(const void *bc, int len, int cc)
559{
560 while (len >= 0) {
561 const struct inet_diag_bc_op *op = bc;
562
563 if (cc > len)
564 return 0;
565 if (cc == len)
566 return 1;
567 if (op->yes < 4 || op->yes & 3)
568 return 0;
569 len -= op->yes;
570 bc += op->yes;
571 }
572 return 0;
573}
574
575/* Validate an inet_diag_hostcond. */
576static bool valid_hostcond(const struct inet_diag_bc_op *op, int len,
577 int *min_len)
578{
579 struct inet_diag_hostcond *cond;
580 int addr_len;
581
582 /* Check hostcond space. */
583 *min_len += sizeof(struct inet_diag_hostcond);
584 if (len < *min_len)
585 return false;
586 cond = (struct inet_diag_hostcond *)(op + 1);
587
588 /* Check address family and address length. */
589 switch (cond->family) {
590 case AF_UNSPEC:
591 addr_len = 0;
592 break;
593 case AF_INET:
594 addr_len = sizeof(struct in_addr);
595 break;
596 case AF_INET6:
597 addr_len = sizeof(struct in6_addr);
598 break;
599 default:
600 return false;
601 }
602 *min_len += addr_len;
603 if (len < *min_len)
604 return false;
605
606 /* Check prefix length (in bits) vs address length (in bytes). */
607 if (cond->prefix_len > 8 * addr_len)
608 return false;
609
610 return true;
611}
612
613/* Validate a port comparison operator. */
614static bool valid_port_comparison(const struct inet_diag_bc_op *op,
615 int len, int *min_len)
616{
617 /* Port comparisons put the port in a follow-on inet_diag_bc_op. */
618 *min_len += sizeof(struct inet_diag_bc_op);
619 if (len < *min_len)
620 return false;
621 return true;
622}
623
624static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
625{
626 const void *bc = bytecode;
627 int len = bytecode_len;
628
629 while (len > 0) {
630 int min_len = sizeof(struct inet_diag_bc_op);
631 const struct inet_diag_bc_op *op = bc;
632
633 switch (op->code) {
634 case INET_DIAG_BC_S_COND:
635 case INET_DIAG_BC_D_COND:
636 if (!valid_hostcond(bc, len, &min_len))
637 return -EINVAL;
638 break;
639 case INET_DIAG_BC_S_GE:
640 case INET_DIAG_BC_S_LE:
641 case INET_DIAG_BC_D_GE:
642 case INET_DIAG_BC_D_LE:
643 if (!valid_port_comparison(bc, len, &min_len))
644 return -EINVAL;
645 break;
646 case INET_DIAG_BC_AUTO:
647 case INET_DIAG_BC_JMP:
648 case INET_DIAG_BC_NOP:
649 break;
650 default:
651 return -EINVAL;
652 }
653
654 if (op->code != INET_DIAG_BC_NOP) {
655 if (op->no < min_len || op->no > len + 4 || op->no & 3)
656 return -EINVAL;
657 if (op->no < len &&
658 !valid_cc(bytecode, bytecode_len, len - op->no))
659 return -EINVAL;
660 }
661
662 if (op->yes < min_len || op->yes > len + 4 || op->yes & 3)
663 return -EINVAL;
664 bc += op->yes;
665 len -= op->yes;
666 }
667 return len == 0 ? 0 : -EINVAL;
668}
669
670static int inet_csk_diag_dump(struct sock *sk,
671 struct sk_buff *skb,
672 struct netlink_callback *cb,
673 const struct inet_diag_req_v2 *r,
674 const struct nlattr *bc)
675{
676 if (!inet_diag_bc_sk(bc, sk))
677 return 0;
678
679 return inet_csk_diag_fill(sk, skb, r,
680 sk_user_ns(NETLINK_CB(cb->skb).sk),
681 NETLINK_CB(cb->skb).portid,
682 cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
683}
684
685static void twsk_build_assert(void)
686{
687 BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_family) !=
688 offsetof(struct sock, sk_family));
689
690 BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_num) !=
691 offsetof(struct inet_sock, inet_num));
692
693 BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_dport) !=
694 offsetof(struct inet_sock, inet_dport));
695
696 BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_rcv_saddr) !=
697 offsetof(struct inet_sock, inet_rcv_saddr));
698
699 BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_daddr) !=
700 offsetof(struct inet_sock, inet_daddr));
701
702#if IS_ENABLED(CONFIG_IPV6)
703 BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_v6_rcv_saddr) !=
704 offsetof(struct sock, sk_v6_rcv_saddr));
705
706 BUILD_BUG_ON(offsetof(struct inet_timewait_sock, tw_v6_daddr) !=
707 offsetof(struct sock, sk_v6_daddr));
708#endif
709}
710
711static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
712 struct netlink_callback *cb,
713 const struct inet_diag_req_v2 *r,
714 const struct nlattr *bc)
715{
716 struct inet_connection_sock *icsk = inet_csk(sk);
717 struct inet_sock *inet = inet_sk(sk);
718 struct inet_diag_entry entry;
719 int j, s_j, reqnum, s_reqnum;
720 struct listen_sock *lopt;
721 int err = 0;
722
723 s_j = cb->args[3];
724 s_reqnum = cb->args[4];
725
726 if (s_j > 0)
727 s_j--;
728
729 entry.family = sk->sk_family;
730
731 spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
732
733 lopt = icsk->icsk_accept_queue.listen_opt;
734 if (!lopt || !listen_sock_qlen(lopt))
735 goto out;
736
737 if (bc) {
738 entry.sport = inet->inet_num;
739 entry.userlocks = sk->sk_userlocks;
740 }
741
742 for (j = s_j; j < lopt->nr_table_entries; j++) {
743 struct request_sock *req, *head = lopt->syn_table[j];
744
745 reqnum = 0;
746 for (req = head; req; reqnum++, req = req->dl_next) {
747 struct inet_request_sock *ireq = inet_rsk(req);
748
749 if (reqnum < s_reqnum)
750 continue;
751 if (r->id.idiag_dport != ireq->ir_rmt_port &&
752 r->id.idiag_dport)
753 continue;
754
755 if (bc) {
756 /* Note: entry.sport and entry.userlocks are already set */
757 entry_fill_addrs(&entry, req_to_sk(req));
758 entry.dport = ntohs(ireq->ir_rmt_port);
759
760 if (!inet_diag_bc_run(bc, &entry))
761 continue;
762 }
763
764 err = inet_req_diag_fill(req_to_sk(req), skb,
765 NETLINK_CB(cb->skb).portid,
766 cb->nlh->nlmsg_seq,
767 NLM_F_MULTI, cb->nlh);
768 if (err < 0) {
769 cb->args[3] = j + 1;
770 cb->args[4] = reqnum;
771 goto out;
772 }
773 }
774
775 s_reqnum = 0;
776 }
777
778out:
779 spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
780
781 return err;
782}
783
784void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
785 struct netlink_callback *cb,
786 const struct inet_diag_req_v2 *r, struct nlattr *bc)
787{
788 struct net *net = sock_net(skb->sk);
789 int i, num, s_i, s_num;
790
791 s_i = cb->args[1];
792 s_num = num = cb->args[2];
793
794 if (cb->args[0] == 0) {
795 if (!(r->idiag_states & (TCPF_LISTEN | TCPF_SYN_RECV)))
796 goto skip_listen_ht;
797
798 for (i = s_i; i < INET_LHTABLE_SIZE; i++) {
799 struct inet_listen_hashbucket *ilb;
800 struct hlist_nulls_node *node;
801 struct sock *sk;
802
803 num = 0;
804 ilb = &hashinfo->listening_hash[i];
805 spin_lock_bh(&ilb->lock);
806 sk_nulls_for_each(sk, node, &ilb->head) {
807 struct inet_sock *inet = inet_sk(sk);
808
809 if (!net_eq(sock_net(sk), net))
810 continue;
811
812 if (num < s_num) {
813 num++;
814 continue;
815 }
816
817 if (r->sdiag_family != AF_UNSPEC &&
818 sk->sk_family != r->sdiag_family)
819 goto next_listen;
820
821 if (r->id.idiag_sport != inet->inet_sport &&
822 r->id.idiag_sport)
823 goto next_listen;
824
825 if (!(r->idiag_states & TCPF_LISTEN) ||
826 r->id.idiag_dport ||
827 cb->args[3] > 0)
828 goto syn_recv;
829
830 if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) {
831 spin_unlock_bh(&ilb->lock);
832 goto done;
833 }
834
835syn_recv:
836 if (!(r->idiag_states & TCPF_SYN_RECV))
837 goto next_listen;
838
839 if (inet_diag_dump_reqs(skb, sk, cb, r, bc) < 0) {
840 spin_unlock_bh(&ilb->lock);
841 goto done;
842 }
843
844next_listen:
845 cb->args[3] = 0;
846 cb->args[4] = 0;
847 ++num;
848 }
849 spin_unlock_bh(&ilb->lock);
850
851 s_num = 0;
852 cb->args[3] = 0;
853 cb->args[4] = 0;
854 }
855skip_listen_ht:
856 cb->args[0] = 1;
857 s_i = num = s_num = 0;
858 }
859
860 if (!(r->idiag_states & ~(TCPF_LISTEN | TCPF_SYN_RECV)))
861 goto out;
862
863 for (i = s_i; i <= hashinfo->ehash_mask; i++) {
864 struct inet_ehash_bucket *head = &hashinfo->ehash[i];
865 spinlock_t *lock = inet_ehash_lockp(hashinfo, i);
866 struct hlist_nulls_node *node;
867 struct sock *sk;
868
869 num = 0;
870
871 if (hlist_nulls_empty(&head->chain))
872 continue;
873
874 if (i > s_i)
875 s_num = 0;
876
877 spin_lock_bh(lock);
878 sk_nulls_for_each(sk, node, &head->chain) {
879 int state, res;
880
881 if (!net_eq(sock_net(sk), net))
882 continue;
883 if (num < s_num)
884 goto next_normal;
885 state = (sk->sk_state == TCP_TIME_WAIT) ?
886 inet_twsk(sk)->tw_substate : sk->sk_state;
887 if (!(r->idiag_states & (1 << state)))
888 goto next_normal;
889 if (r->sdiag_family != AF_UNSPEC &&
890 sk->sk_family != r->sdiag_family)
891 goto next_normal;
892 if (r->id.idiag_sport != htons(sk->sk_num) &&
893 r->id.idiag_sport)
894 goto next_normal;
895 if (r->id.idiag_dport != sk->sk_dport &&
896 r->id.idiag_dport)
897 goto next_normal;
898 twsk_build_assert();
899
900 if (!inet_diag_bc_sk(bc, sk))
901 goto next_normal;
902
903 res = sk_diag_fill(sk, skb, r,
904 sk_user_ns(NETLINK_CB(cb->skb).sk),
905 NETLINK_CB(cb->skb).portid,
906 cb->nlh->nlmsg_seq, NLM_F_MULTI,
907 cb->nlh);
908 if (res < 0) {
909 spin_unlock_bh(lock);
910 goto done;
911 }
912next_normal:
913 ++num;
914 }
915
916 spin_unlock_bh(lock);
917 }
918
919done:
920 cb->args[1] = i;
921 cb->args[2] = num;
922out:
923 ;
924}
925EXPORT_SYMBOL_GPL(inet_diag_dump_icsk);
926
927static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
928 const struct inet_diag_req_v2 *r,
929 struct nlattr *bc)
930{
931 const struct inet_diag_handler *handler;
932 int err = 0;
933
934 handler = inet_diag_lock_handler(r->sdiag_protocol);
935 if (!IS_ERR(handler))
936 handler->dump(skb, cb, r, bc);
937 else
938 err = PTR_ERR(handler);
939 inet_diag_unlock_handler(handler);
940
941 return err ? : skb->len;
942}
943
944static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
945{
946 int hdrlen = sizeof(struct inet_diag_req_v2);
947 struct nlattr *bc = NULL;
948
949 if (nlmsg_attrlen(cb->nlh, hdrlen))
950 bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
951
952 return __inet_diag_dump(skb, cb, nlmsg_data(cb->nlh), bc);
953}
954
955static int inet_diag_type2proto(int type)
956{
957 switch (type) {
958 case TCPDIAG_GETSOCK:
959 return IPPROTO_TCP;
960 case DCCPDIAG_GETSOCK:
961 return IPPROTO_DCCP;
962 default:
963 return 0;
964 }
965}
966
967static int inet_diag_dump_compat(struct sk_buff *skb,
968 struct netlink_callback *cb)
969{
970 struct inet_diag_req *rc = nlmsg_data(cb->nlh);
971 int hdrlen = sizeof(struct inet_diag_req);
972 struct inet_diag_req_v2 req;
973 struct nlattr *bc = NULL;
974
975 req.sdiag_family = AF_UNSPEC; /* compatibility */
976 req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type);
977 req.idiag_ext = rc->idiag_ext;
978 req.idiag_states = rc->idiag_states;
979 req.id = rc->id;
980
981 if (nlmsg_attrlen(cb->nlh, hdrlen))
982 bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
983
984 return __inet_diag_dump(skb, cb, &req, bc);
985}
986
987static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
988 const struct nlmsghdr *nlh)
989{
990 struct inet_diag_req *rc = nlmsg_data(nlh);
991 struct inet_diag_req_v2 req;
992
993 req.sdiag_family = rc->idiag_family;
994 req.sdiag_protocol = inet_diag_type2proto(nlh->nlmsg_type);
995 req.idiag_ext = rc->idiag_ext;
996 req.idiag_states = rc->idiag_states;
997 req.id = rc->id;
998
999 return inet_diag_get_exact(in_skb, nlh, &req);
1000}
1001
1002static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
1003{
1004 int hdrlen = sizeof(struct inet_diag_req);
1005 struct net *net = sock_net(skb->sk);
1006
1007 if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX ||
1008 nlmsg_len(nlh) < hdrlen)
1009 return -EINVAL;
1010
1011 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1012 if (nlmsg_attrlen(nlh, hdrlen)) {
1013 struct nlattr *attr;
1014
1015 attr = nlmsg_find_attr(nlh, hdrlen,
1016 INET_DIAG_REQ_BYTECODE);
1017 if (!attr ||
1018 nla_len(attr) < sizeof(struct inet_diag_bc_op) ||
1019 inet_diag_bc_audit(nla_data(attr), nla_len(attr)))
1020 return -EINVAL;
1021 }
1022 {
1023 struct netlink_dump_control c = {
1024 .dump = inet_diag_dump_compat,
1025 };
1026 return netlink_dump_start(net->diag_nlsk, skb, nlh, &c);
1027 }
1028 }
1029
1030 return inet_diag_get_exact_compat(skb, nlh);
1031}
1032
1033static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
1034{
1035 int hdrlen = sizeof(struct inet_diag_req_v2);
1036 struct net *net = sock_net(skb->sk);
1037
1038 if (nlmsg_len(h) < hdrlen)
1039 return -EINVAL;
1040
1041 if (h->nlmsg_flags & NLM_F_DUMP) {
1042 if (nlmsg_attrlen(h, hdrlen)) {
1043 struct nlattr *attr;
1044
1045 attr = nlmsg_find_attr(h, hdrlen,
1046 INET_DIAG_REQ_BYTECODE);
1047 if (!attr ||
1048 nla_len(attr) < sizeof(struct inet_diag_bc_op) ||
1049 inet_diag_bc_audit(nla_data(attr), nla_len(attr)))
1050 return -EINVAL;
1051 }
1052 {
1053 struct netlink_dump_control c = {
1054 .dump = inet_diag_dump,
1055 };
1056 return netlink_dump_start(net->diag_nlsk, skb, h, &c);
1057 }
1058 }
1059
1060 return inet_diag_get_exact(skb, h, nlmsg_data(h));
1061}
1062
1063static const struct sock_diag_handler inet_diag_handler = {
1064 .family = AF_INET,
1065 .dump = inet_diag_handler_dump,
1066};
1067
1068static const struct sock_diag_handler inet6_diag_handler = {
1069 .family = AF_INET6,
1070 .dump = inet_diag_handler_dump,
1071};
1072
1073int inet_diag_register(const struct inet_diag_handler *h)
1074{
1075 const __u16 type = h->idiag_type;
1076 int err = -EINVAL;
1077
1078 if (type >= IPPROTO_MAX)
1079 goto out;
1080
1081 mutex_lock(&inet_diag_table_mutex);
1082 err = -EEXIST;
1083 if (!inet_diag_table[type]) {
1084 inet_diag_table[type] = h;
1085 err = 0;
1086 }
1087 mutex_unlock(&inet_diag_table_mutex);
1088out:
1089 return err;
1090}
1091EXPORT_SYMBOL_GPL(inet_diag_register);
1092
1093void inet_diag_unregister(const struct inet_diag_handler *h)
1094{
1095 const __u16 type = h->idiag_type;
1096
1097 if (type >= IPPROTO_MAX)
1098 return;
1099
1100 mutex_lock(&inet_diag_table_mutex);
1101 inet_diag_table[type] = NULL;
1102 mutex_unlock(&inet_diag_table_mutex);
1103}
1104EXPORT_SYMBOL_GPL(inet_diag_unregister);
1105
1106static int __init inet_diag_init(void)
1107{
1108 const int inet_diag_table_size = (IPPROTO_MAX *
1109 sizeof(struct inet_diag_handler *));
1110 int err = -ENOMEM;
1111
1112 inet_diag_table = kzalloc(inet_diag_table_size, GFP_KERNEL);
1113 if (!inet_diag_table)
1114 goto out;
1115
1116 err = sock_diag_register(&inet_diag_handler);
1117 if (err)
1118 goto out_free_nl;
1119
1120 err = sock_diag_register(&inet6_diag_handler);
1121 if (err)
1122 goto out_free_inet;
1123
1124 sock_diag_register_inet_compat(inet_diag_rcv_msg_compat);
1125out:
1126 return err;
1127
1128out_free_inet:
1129 sock_diag_unregister(&inet_diag_handler);
1130out_free_nl:
1131 kfree(inet_diag_table);
1132 goto out;
1133}
1134
1135static void __exit inet_diag_exit(void)
1136{
1137 sock_diag_unregister(&inet6_diag_handler);
1138 sock_diag_unregister(&inet_diag_handler);
1139 sock_diag_unregister_inet_compat(inet_diag_rcv_msg_compat);
1140 kfree(inet_diag_table);
1141}
1142
1143module_init(inet_diag_init);
1144module_exit(inet_diag_exit);
1145MODULE_LICENSE("GPL");
1146MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2 /* AF_INET */);
1147MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 10 /* AF_INET6 */);