[PATCH] netlabel gfp annotations
[linux-block.git] / net / ipv4 / netfilter / ip_tables.c
CommitLineData
1da177e4
LT
1/*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
2e4e6a17 5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
1da177e4
LT
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * 19 Jan 2002 Harald Welte <laforge@gnumonks.org>
12 * - increase module usage count as soon as we have rules inside
13 * a table
2e4e6a17
HW
14 * 08 Oct 2005 Harald Welte <lafore@netfilter.org>
15 * - Generalize into "x_tables" layer and "{ip,ip6,arp}_tables"
1da177e4 16 */
1da177e4 17#include <linux/cache.h>
4fc268d2 18#include <linux/capability.h>
1da177e4
LT
19#include <linux/skbuff.h>
20#include <linux/kmod.h>
21#include <linux/vmalloc.h>
22#include <linux/netdevice.h>
23#include <linux/module.h>
1da177e4
LT
24#include <linux/icmp.h>
25#include <net/ip.h>
2722971c 26#include <net/compat.h>
1da177e4 27#include <asm/uaccess.h>
57b47a53 28#include <linux/mutex.h>
1da177e4
LT
29#include <linux/proc_fs.h>
30#include <linux/err.h>
c8923c6b 31#include <linux/cpumask.h>
1da177e4 32
2e4e6a17 33#include <linux/netfilter/x_tables.h>
1da177e4
LT
34#include <linux/netfilter_ipv4/ip_tables.h>
35
36MODULE_LICENSE("GPL");
37MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
38MODULE_DESCRIPTION("IPv4 packet filter");
39
40/*#define DEBUG_IP_FIREWALL*/
41/*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
42/*#define DEBUG_IP_FIREWALL_USER*/
43
44#ifdef DEBUG_IP_FIREWALL
45#define dprintf(format, args...) printk(format , ## args)
46#else
47#define dprintf(format, args...)
48#endif
49
50#ifdef DEBUG_IP_FIREWALL_USER
51#define duprintf(format, args...) printk(format , ## args)
52#else
53#define duprintf(format, args...)
54#endif
55
56#ifdef CONFIG_NETFILTER_DEBUG
57#define IP_NF_ASSERT(x) \
58do { \
59 if (!(x)) \
60 printk("IP_NF_ASSERT: %s:%s:%u\n", \
61 __FUNCTION__, __FILE__, __LINE__); \
62} while(0)
63#else
64#define IP_NF_ASSERT(x)
65#endif
1da177e4
LT
66
67#if 0
68/* All the better to debug you with... */
69#define static
70#define inline
71#endif
72
73/*
74 We keep a set of rules for each CPU, so we can avoid write-locking
75 them in the softirq when updating the counters and therefore
76 only need to read-lock in the softirq; doing a write_lock_bh() in user
77 context stops packets coming through and allows user context to read
78 the counters or update the rules.
79
1da177e4
LT
80 Hence the start of any table is given by get_table() below. */
81
1da177e4
LT
82/* Returns whether matches rule or not. */
83static inline int
84ip_packet_match(const struct iphdr *ip,
85 const char *indev,
86 const char *outdev,
87 const struct ipt_ip *ipinfo,
88 int isfrag)
89{
90 size_t i;
91 unsigned long ret;
92
93#define FWINV(bool,invflg) ((bool) ^ !!(ipinfo->invflags & invflg))
94
95 if (FWINV((ip->saddr&ipinfo->smsk.s_addr) != ipinfo->src.s_addr,
96 IPT_INV_SRCIP)
97 || FWINV((ip->daddr&ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr,
98 IPT_INV_DSTIP)) {
99 dprintf("Source or dest mismatch.\n");
100
101 dprintf("SRC: %u.%u.%u.%u. Mask: %u.%u.%u.%u. Target: %u.%u.%u.%u.%s\n",
102 NIPQUAD(ip->saddr),
103 NIPQUAD(ipinfo->smsk.s_addr),
104 NIPQUAD(ipinfo->src.s_addr),
105 ipinfo->invflags & IPT_INV_SRCIP ? " (INV)" : "");
106 dprintf("DST: %u.%u.%u.%u Mask: %u.%u.%u.%u Target: %u.%u.%u.%u.%s\n",
107 NIPQUAD(ip->daddr),
108 NIPQUAD(ipinfo->dmsk.s_addr),
109 NIPQUAD(ipinfo->dst.s_addr),
110 ipinfo->invflags & IPT_INV_DSTIP ? " (INV)" : "");
111 return 0;
112 }
113
114 /* Look for ifname matches; this should unroll nicely. */
115 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
116 ret |= (((const unsigned long *)indev)[i]
117 ^ ((const unsigned long *)ipinfo->iniface)[i])
118 & ((const unsigned long *)ipinfo->iniface_mask)[i];
119 }
120
121 if (FWINV(ret != 0, IPT_INV_VIA_IN)) {
122 dprintf("VIA in mismatch (%s vs %s).%s\n",
123 indev, ipinfo->iniface,
124 ipinfo->invflags&IPT_INV_VIA_IN ?" (INV)":"");
125 return 0;
126 }
127
128 for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned long); i++) {
129 ret |= (((const unsigned long *)outdev)[i]
130 ^ ((const unsigned long *)ipinfo->outiface)[i])
131 & ((const unsigned long *)ipinfo->outiface_mask)[i];
132 }
133
134 if (FWINV(ret != 0, IPT_INV_VIA_OUT)) {
135 dprintf("VIA out mismatch (%s vs %s).%s\n",
136 outdev, ipinfo->outiface,
137 ipinfo->invflags&IPT_INV_VIA_OUT ?" (INV)":"");
138 return 0;
139 }
140
141 /* Check specific protocol */
142 if (ipinfo->proto
143 && FWINV(ip->protocol != ipinfo->proto, IPT_INV_PROTO)) {
144 dprintf("Packet protocol %hi does not match %hi.%s\n",
145 ip->protocol, ipinfo->proto,
146 ipinfo->invflags&IPT_INV_PROTO ? " (INV)":"");
147 return 0;
148 }
149
150 /* If we have a fragment rule but the packet is not a fragment
151 * then we return zero */
152 if (FWINV((ipinfo->flags&IPT_F_FRAG) && !isfrag, IPT_INV_FRAG)) {
153 dprintf("Fragment rule but not fragment.%s\n",
154 ipinfo->invflags & IPT_INV_FRAG ? " (INV)" : "");
155 return 0;
156 }
157
158 return 1;
159}
160
161static inline int
162ip_checkentry(const struct ipt_ip *ip)
163{
164 if (ip->flags & ~IPT_F_MASK) {
165 duprintf("Unknown flag bits set: %08X\n",
166 ip->flags & ~IPT_F_MASK);
167 return 0;
168 }
169 if (ip->invflags & ~IPT_INV_MASK) {
170 duprintf("Unknown invflag bits set: %08X\n",
171 ip->invflags & ~IPT_INV_MASK);
172 return 0;
173 }
174 return 1;
175}
176
177static unsigned int
178ipt_error(struct sk_buff **pskb,
179 const struct net_device *in,
180 const struct net_device *out,
181 unsigned int hooknum,
c4986734 182 const struct xt_target *target,
fe1cb108 183 const void *targinfo)
1da177e4
LT
184{
185 if (net_ratelimit())
186 printk("ip_tables: error: `%s'\n", (char *)targinfo);
187
188 return NF_DROP;
189}
190
191static inline
192int do_match(struct ipt_entry_match *m,
193 const struct sk_buff *skb,
194 const struct net_device *in,
195 const struct net_device *out,
196 int offset,
197 int *hotdrop)
198{
199 /* Stop iteration if it doesn't match */
1c524830
PM
200 if (!m->u.kernel.match->match(skb, in, out, m->u.kernel.match, m->data,
201 offset, skb->nh.iph->ihl*4, hotdrop))
1da177e4
LT
202 return 1;
203 else
204 return 0;
205}
206
207static inline struct ipt_entry *
208get_entry(void *base, unsigned int offset)
209{
210 return (struct ipt_entry *)(base + offset);
211}
212
213/* Returns one of the generic firewall policies, like NF_ACCEPT. */
214unsigned int
215ipt_do_table(struct sk_buff **pskb,
216 unsigned int hook,
217 const struct net_device *in,
218 const struct net_device *out,
fe1cb108 219 struct ipt_table *table)
1da177e4
LT
220{
221 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
222 u_int16_t offset;
223 struct iphdr *ip;
224 u_int16_t datalen;
225 int hotdrop = 0;
226 /* Initializing verdict to NF_DROP keeps gcc happy. */
227 unsigned int verdict = NF_DROP;
228 const char *indev, *outdev;
229 void *table_base;
230 struct ipt_entry *e, *back;
8311731a 231 struct xt_table_info *private;
1da177e4
LT
232
233 /* Initialization */
234 ip = (*pskb)->nh.iph;
235 datalen = (*pskb)->len - ip->ihl * 4;
236 indev = in ? in->name : nulldevname;
237 outdev = out ? out->name : nulldevname;
238 /* We handle fragments by dealing with the first fragment as
239 * if it was a normal packet. All other fragments are treated
240 * normally, except that they will NEVER match rules that ask
241 * things we don't know, ie. tcp syn flag or ports). If the
242 * rule is also a fragment-specific rule, non-fragments won't
243 * match it. */
244 offset = ntohs(ip->frag_off) & IP_OFFSET;
245
246 read_lock_bh(&table->lock);
247 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
8311731a 248 private = table->private;
2e4e6a17
HW
249 table_base = (void *)private->entries[smp_processor_id()];
250 e = get_entry(table_base, private->hook_entry[hook]);
1da177e4
LT
251
252 /* For return from builtin chain */
2e4e6a17 253 back = get_entry(table_base, private->underflow[hook]);
1da177e4
LT
254
255 do {
256 IP_NF_ASSERT(e);
257 IP_NF_ASSERT(back);
1da177e4
LT
258 if (ip_packet_match(ip, indev, outdev, &e->ip, offset)) {
259 struct ipt_entry_target *t;
260
261 if (IPT_MATCH_ITERATE(e, do_match,
262 *pskb, in, out,
263 offset, &hotdrop) != 0)
264 goto no_match;
265
266 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1);
267
268 t = ipt_get_target(e);
269 IP_NF_ASSERT(t->u.kernel.target);
270 /* Standard target? */
271 if (!t->u.kernel.target->target) {
272 int v;
273
274 v = ((struct ipt_standard_target *)t)->verdict;
275 if (v < 0) {
276 /* Pop from stack? */
277 if (v != IPT_RETURN) {
278 verdict = (unsigned)(-v) - 1;
279 break;
280 }
281 e = back;
282 back = get_entry(table_base,
283 back->comefrom);
284 continue;
285 }
05465343
PM
286 if (table_base + v != (void *)e + e->next_offset
287 && !(e->ip.flags & IPT_F_GOTO)) {
1da177e4
LT
288 /* Save old back ptr in next entry */
289 struct ipt_entry *next
290 = (void *)e + e->next_offset;
291 next->comefrom
292 = (void *)back - table_base;
293 /* set back pointer to next entry */
294 back = next;
295 }
296
297 e = get_entry(table_base, v);
298 } else {
299 /* Targets which reenter must return
300 abs. verdicts */
301#ifdef CONFIG_NETFILTER_DEBUG
302 ((struct ipt_entry *)table_base)->comefrom
303 = 0xeeeeeeec;
304#endif
305 verdict = t->u.kernel.target->target(pskb,
306 in, out,
307 hook,
1c524830 308 t->u.kernel.target,
fe1cb108 309 t->data);
1da177e4
LT
310
311#ifdef CONFIG_NETFILTER_DEBUG
312 if (((struct ipt_entry *)table_base)->comefrom
313 != 0xeeeeeeec
314 && verdict == IPT_CONTINUE) {
315 printk("Target %s reentered!\n",
316 t->u.kernel.target->name);
317 verdict = NF_DROP;
318 }
319 ((struct ipt_entry *)table_base)->comefrom
320 = 0x57acc001;
321#endif
322 /* Target might have changed stuff. */
323 ip = (*pskb)->nh.iph;
324 datalen = (*pskb)->len - ip->ihl * 4;
325
326 if (verdict == IPT_CONTINUE)
327 e = (void *)e + e->next_offset;
328 else
329 /* Verdict */
330 break;
331 }
332 } else {
333
334 no_match:
335 e = (void *)e + e->next_offset;
336 }
337 } while (!hotdrop);
338
1da177e4
LT
339 read_unlock_bh(&table->lock);
340
341#ifdef DEBUG_ALLOW_ALL
342 return NF_ACCEPT;
343#else
344 if (hotdrop)
345 return NF_DROP;
346 else return verdict;
347#endif
348}
349
1da177e4
LT
350/* All zeroes == unconditional rule. */
351static inline int
352unconditional(const struct ipt_ip *ip)
353{
354 unsigned int i;
355
356 for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
357 if (((__u32 *)ip)[i])
358 return 0;
359
360 return 1;
361}
362
363/* Figures out from what hook each rule can be called: returns 0 if
364 there are loops. Puts hook bitmask in comefrom. */
365static int
2e4e6a17 366mark_source_chains(struct xt_table_info *newinfo,
31836064 367 unsigned int valid_hooks, void *entry0)
1da177e4
LT
368{
369 unsigned int hook;
370
371 /* No recursion; use packet counter to save back ptrs (reset
372 to 0 as we leave), and comefrom to save source hook bitmask */
373 for (hook = 0; hook < NF_IP_NUMHOOKS; hook++) {
374 unsigned int pos = newinfo->hook_entry[hook];
375 struct ipt_entry *e
31836064 376 = (struct ipt_entry *)(entry0 + pos);
1da177e4
LT
377
378 if (!(valid_hooks & (1 << hook)))
379 continue;
380
381 /* Set initial back pointer. */
382 e->counters.pcnt = pos;
383
384 for (;;) {
385 struct ipt_standard_target *t
386 = (void *)ipt_get_target(e);
387
388 if (e->comefrom & (1 << NF_IP_NUMHOOKS)) {
389 printk("iptables: loop hook %u pos %u %08X.\n",
390 hook, pos, e->comefrom);
391 return 0;
392 }
393 e->comefrom
394 |= ((1 << hook) | (1 << NF_IP_NUMHOOKS));
395
396 /* Unconditional return/END. */
397 if (e->target_offset == sizeof(struct ipt_entry)
398 && (strcmp(t->target.u.user.name,
399 IPT_STANDARD_TARGET) == 0)
400 && t->verdict < 0
401 && unconditional(&e->ip)) {
402 unsigned int oldpos, size;
403
404 /* Return: backtrack through the last
405 big jump. */
406 do {
407 e->comefrom ^= (1<<NF_IP_NUMHOOKS);
408#ifdef DEBUG_IP_FIREWALL_USER
409 if (e->comefrom
410 & (1 << NF_IP_NUMHOOKS)) {
411 duprintf("Back unset "
412 "on hook %u "
413 "rule %u\n",
414 hook, pos);
415 }
416#endif
417 oldpos = pos;
418 pos = e->counters.pcnt;
419 e->counters.pcnt = 0;
420
421 /* We're at the start. */
422 if (pos == oldpos)
423 goto next;
424
425 e = (struct ipt_entry *)
31836064 426 (entry0 + pos);
1da177e4
LT
427 } while (oldpos == pos + e->next_offset);
428
429 /* Move along one */
430 size = e->next_offset;
431 e = (struct ipt_entry *)
31836064 432 (entry0 + pos + size);
1da177e4
LT
433 e->counters.pcnt = pos;
434 pos += size;
435 } else {
436 int newpos = t->verdict;
437
438 if (strcmp(t->target.u.user.name,
439 IPT_STANDARD_TARGET) == 0
440 && newpos >= 0) {
441 /* This a jump; chase it. */
442 duprintf("Jump rule %u -> %u\n",
443 pos, newpos);
444 } else {
445 /* ... this is a fallthru */
446 newpos = pos + e->next_offset;
447 }
448 e = (struct ipt_entry *)
31836064 449 (entry0 + newpos);
1da177e4
LT
450 e->counters.pcnt = pos;
451 pos = newpos;
452 }
453 }
454 next:
455 duprintf("Finished chain %u\n", hook);
456 }
457 return 1;
458}
459
460static inline int
461cleanup_match(struct ipt_entry_match *m, unsigned int *i)
462{
463 if (i && (*i)-- == 0)
464 return 1;
465
466 if (m->u.kernel.match->destroy)
efa74165 467 m->u.kernel.match->destroy(m->u.kernel.match, m->data);
1da177e4
LT
468 module_put(m->u.kernel.match->me);
469 return 0;
470}
471
472static inline int
473standard_check(const struct ipt_entry_target *t,
474 unsigned int max_offset)
475{
476 struct ipt_standard_target *targ = (void *)t;
477
478 /* Check standard info. */
1da177e4
LT
479 if (targ->verdict >= 0
480 && targ->verdict > max_offset - sizeof(struct ipt_entry)) {
481 duprintf("ipt_standard_check: bad verdict (%i)\n",
482 targ->verdict);
483 return 0;
484 }
1da177e4
LT
485 if (targ->verdict < -NF_MAX_VERDICT - 1) {
486 duprintf("ipt_standard_check: bad negative verdict (%i)\n",
487 targ->verdict);
488 return 0;
489 }
490 return 1;
491}
492
493static inline int
494check_match(struct ipt_entry_match *m,
495 const char *name,
496 const struct ipt_ip *ip,
497 unsigned int hookmask,
498 unsigned int *i)
499{
500 struct ipt_match *match;
3cdc7c95 501 int ret;
1da177e4 502
2e4e6a17 503 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1da177e4
LT
504 m->u.user.revision),
505 "ipt_%s", m->u.user.name);
506 if (IS_ERR(match) || !match) {
507 duprintf("check_match: `%s' not found\n", m->u.user.name);
508 return match ? PTR_ERR(match) : -ENOENT;
509 }
510 m->u.kernel.match = match;
511
3cdc7c95
PM
512 ret = xt_check_match(match, AF_INET, m->u.match_size - sizeof(*m),
513 name, hookmask, ip->proto,
514 ip->invflags & IPT_INV_PROTO);
515 if (ret)
516 goto err;
517
1da177e4 518 if (m->u.kernel.match->checkentry
1c524830 519 && !m->u.kernel.match->checkentry(name, ip, match, m->data,
1da177e4 520 hookmask)) {
1da177e4
LT
521 duprintf("ip_tables: check failed for `%s'.\n",
522 m->u.kernel.match->name);
3cdc7c95
PM
523 ret = -EINVAL;
524 goto err;
1da177e4
LT
525 }
526
527 (*i)++;
528 return 0;
3cdc7c95
PM
529err:
530 module_put(m->u.kernel.match->me);
531 return ret;
1da177e4
LT
532}
533
534static struct ipt_target ipt_standard_target;
535
536static inline int
537check_entry(struct ipt_entry *e, const char *name, unsigned int size,
538 unsigned int *i)
539{
540 struct ipt_entry_target *t;
541 struct ipt_target *target;
542 int ret;
543 unsigned int j;
544
545 if (!ip_checkentry(&e->ip)) {
546 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
547 return -EINVAL;
548 }
549
550 j = 0;
551 ret = IPT_MATCH_ITERATE(e, check_match, name, &e->ip, e->comefrom, &j);
552 if (ret != 0)
553 goto cleanup_matches;
554
555 t = ipt_get_target(e);
2e4e6a17
HW
556 target = try_then_request_module(xt_find_target(AF_INET,
557 t->u.user.name,
1da177e4
LT
558 t->u.user.revision),
559 "ipt_%s", t->u.user.name);
560 if (IS_ERR(target) || !target) {
561 duprintf("check_entry: `%s' not found\n", t->u.user.name);
562 ret = target ? PTR_ERR(target) : -ENOENT;
563 goto cleanup_matches;
564 }
565 t->u.kernel.target = target;
566
3cdc7c95
PM
567 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
568 name, e->comefrom, e->ip.proto,
569 e->ip.invflags & IPT_INV_PROTO);
570 if (ret)
571 goto err;
572
1da177e4
LT
573 if (t->u.kernel.target == &ipt_standard_target) {
574 if (!standard_check(t, size)) {
575 ret = -EINVAL;
90d47db4 576 goto err;
1da177e4
LT
577 }
578 } else if (t->u.kernel.target->checkentry
1c524830 579 && !t->u.kernel.target->checkentry(name, e, target, t->data,
1da177e4 580 e->comefrom)) {
1da177e4
LT
581 duprintf("ip_tables: check failed for `%s'.\n",
582 t->u.kernel.target->name);
583 ret = -EINVAL;
3cdc7c95 584 goto err;
1da177e4
LT
585 }
586
587 (*i)++;
588 return 0;
3cdc7c95
PM
589 err:
590 module_put(t->u.kernel.target->me);
1da177e4
LT
591 cleanup_matches:
592 IPT_MATCH_ITERATE(e, cleanup_match, &j);
593 return ret;
594}
595
596static inline int
597check_entry_size_and_hooks(struct ipt_entry *e,
2e4e6a17 598 struct xt_table_info *newinfo,
1da177e4
LT
599 unsigned char *base,
600 unsigned char *limit,
601 const unsigned int *hook_entries,
602 const unsigned int *underflows,
603 unsigned int *i)
604{
605 unsigned int h;
606
607 if ((unsigned long)e % __alignof__(struct ipt_entry) != 0
608 || (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
609 duprintf("Bad offset %p\n", e);
610 return -EINVAL;
611 }
612
613 if (e->next_offset
614 < sizeof(struct ipt_entry) + sizeof(struct ipt_entry_target)) {
615 duprintf("checking: element %p size %u\n",
616 e, e->next_offset);
617 return -EINVAL;
618 }
619
620 /* Check hooks & underflows */
621 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
622 if ((unsigned char *)e - base == hook_entries[h])
623 newinfo->hook_entry[h] = hook_entries[h];
624 if ((unsigned char *)e - base == underflows[h])
625 newinfo->underflow[h] = underflows[h];
626 }
627
628 /* FIXME: underflows must be unconditional, standard verdicts
629 < 0 (not IPT_RETURN). --RR */
630
631 /* Clear counters and comefrom */
2e4e6a17 632 e->counters = ((struct xt_counters) { 0, 0 });
1da177e4
LT
633 e->comefrom = 0;
634
635 (*i)++;
636 return 0;
637}
638
639static inline int
640cleanup_entry(struct ipt_entry *e, unsigned int *i)
641{
642 struct ipt_entry_target *t;
643
644 if (i && (*i)-- == 0)
645 return 1;
646
647 /* Cleanup all matches */
648 IPT_MATCH_ITERATE(e, cleanup_match, NULL);
649 t = ipt_get_target(e);
650 if (t->u.kernel.target->destroy)
efa74165 651 t->u.kernel.target->destroy(t->u.kernel.target, t->data);
1da177e4
LT
652 module_put(t->u.kernel.target->me);
653 return 0;
654}
655
656/* Checks and translates the user-supplied table segment (held in
657 newinfo) */
658static int
659translate_table(const char *name,
660 unsigned int valid_hooks,
2e4e6a17 661 struct xt_table_info *newinfo,
31836064 662 void *entry0,
1da177e4
LT
663 unsigned int size,
664 unsigned int number,
665 const unsigned int *hook_entries,
666 const unsigned int *underflows)
667{
668 unsigned int i;
669 int ret;
670
671 newinfo->size = size;
672 newinfo->number = number;
673
674 /* Init all hooks to impossible value. */
675 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
676 newinfo->hook_entry[i] = 0xFFFFFFFF;
677 newinfo->underflow[i] = 0xFFFFFFFF;
678 }
679
680 duprintf("translate_table: size %u\n", newinfo->size);
681 i = 0;
682 /* Walk through entries, checking offsets. */
31836064 683 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
1da177e4
LT
684 check_entry_size_and_hooks,
685 newinfo,
31836064
ED
686 entry0,
687 entry0 + size,
1da177e4
LT
688 hook_entries, underflows, &i);
689 if (ret != 0)
690 return ret;
691
692 if (i != number) {
693 duprintf("translate_table: %u not %u entries\n",
694 i, number);
695 return -EINVAL;
696 }
697
698 /* Check hooks all assigned */
699 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
700 /* Only hooks which are valid */
701 if (!(valid_hooks & (1 << i)))
702 continue;
703 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
704 duprintf("Invalid hook entry %u %u\n",
705 i, hook_entries[i]);
706 return -EINVAL;
707 }
708 if (newinfo->underflow[i] == 0xFFFFFFFF) {
709 duprintf("Invalid underflow %u %u\n",
710 i, underflows[i]);
711 return -EINVAL;
712 }
713 }
714
31836064 715 if (!mark_source_chains(newinfo, valid_hooks, entry0))
1da177e4
LT
716 return -ELOOP;
717
718 /* Finally, each sanity check must pass */
719 i = 0;
31836064 720 ret = IPT_ENTRY_ITERATE(entry0, newinfo->size,
1da177e4
LT
721 check_entry, name, size, &i);
722
723 if (ret != 0) {
31836064 724 IPT_ENTRY_ITERATE(entry0, newinfo->size,
1da177e4
LT
725 cleanup_entry, &i);
726 return ret;
727 }
728
729 /* And one copy for every other CPU */
6f912042 730 for_each_possible_cpu(i) {
31836064
ED
731 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
732 memcpy(newinfo->entries[i], entry0, newinfo->size);
1da177e4
LT
733 }
734
735 return ret;
736}
737
1da177e4
LT
738/* Gets counters. */
739static inline int
740add_entry_to_counter(const struct ipt_entry *e,
2e4e6a17 741 struct xt_counters total[],
1da177e4
LT
742 unsigned int *i)
743{
744 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
745
746 (*i)++;
747 return 0;
748}
749
31836064
ED
750static inline int
751set_entry_to_counter(const struct ipt_entry *e,
752 struct ipt_counters total[],
753 unsigned int *i)
754{
755 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
756
757 (*i)++;
758 return 0;
759}
760
1da177e4 761static void
2e4e6a17
HW
762get_counters(const struct xt_table_info *t,
763 struct xt_counters counters[])
1da177e4
LT
764{
765 unsigned int cpu;
766 unsigned int i;
31836064
ED
767 unsigned int curcpu;
768
769 /* Instead of clearing (by a previous call to memset())
770 * the counters and using adds, we set the counters
771 * with data used by 'current' CPU
772 * We dont care about preemption here.
773 */
774 curcpu = raw_smp_processor_id();
775
776 i = 0;
777 IPT_ENTRY_ITERATE(t->entries[curcpu],
778 t->size,
779 set_entry_to_counter,
780 counters,
781 &i);
1da177e4 782
6f912042 783 for_each_possible_cpu(cpu) {
31836064
ED
784 if (cpu == curcpu)
785 continue;
1da177e4 786 i = 0;
31836064 787 IPT_ENTRY_ITERATE(t->entries[cpu],
1da177e4
LT
788 t->size,
789 add_entry_to_counter,
790 counters,
791 &i);
792 }
793}
794
2722971c 795static inline struct xt_counters * alloc_counters(struct ipt_table *table)
1da177e4 796{
2722971c 797 unsigned int countersize;
2e4e6a17
HW
798 struct xt_counters *counters;
799 struct xt_table_info *private = table->private;
1da177e4
LT
800
801 /* We need atomic snapshot of counters: rest doesn't change
802 (other than comefrom, which userspace doesn't care
803 about). */
2e4e6a17 804 countersize = sizeof(struct xt_counters) * private->number;
31836064 805 counters = vmalloc_node(countersize, numa_node_id());
1da177e4
LT
806
807 if (counters == NULL)
2722971c 808 return ERR_PTR(-ENOMEM);
1da177e4
LT
809
810 /* First, sum counters... */
1da177e4 811 write_lock_bh(&table->lock);
2e4e6a17 812 get_counters(private, counters);
1da177e4
LT
813 write_unlock_bh(&table->lock);
814
2722971c
DM
815 return counters;
816}
817
818static int
819copy_entries_to_user(unsigned int total_size,
820 struct ipt_table *table,
821 void __user *userptr)
822{
823 unsigned int off, num;
824 struct ipt_entry *e;
825 struct xt_counters *counters;
826 struct xt_table_info *private = table->private;
827 int ret = 0;
828 void *loc_cpu_entry;
829
830 counters = alloc_counters(table);
831 if (IS_ERR(counters))
832 return PTR_ERR(counters);
833
31836064
ED
834 /* choose the copy that is on our node/cpu, ...
835 * This choice is lazy (because current thread is
836 * allowed to migrate to another cpu)
837 */
2e4e6a17 838 loc_cpu_entry = private->entries[raw_smp_processor_id()];
31836064
ED
839 /* ... then copy entire thing ... */
840 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1da177e4
LT
841 ret = -EFAULT;
842 goto free_counters;
843 }
844
845 /* FIXME: use iterator macros --RR */
846 /* ... then go back and fix counters and names */
847 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
848 unsigned int i;
849 struct ipt_entry_match *m;
850 struct ipt_entry_target *t;
851
31836064 852 e = (struct ipt_entry *)(loc_cpu_entry + off);
1da177e4
LT
853 if (copy_to_user(userptr + off
854 + offsetof(struct ipt_entry, counters),
855 &counters[num],
856 sizeof(counters[num])) != 0) {
857 ret = -EFAULT;
858 goto free_counters;
859 }
860
861 for (i = sizeof(struct ipt_entry);
862 i < e->target_offset;
863 i += m->u.match_size) {
864 m = (void *)e + i;
865
866 if (copy_to_user(userptr + off + i
867 + offsetof(struct ipt_entry_match,
868 u.user.name),
869 m->u.kernel.match->name,
870 strlen(m->u.kernel.match->name)+1)
871 != 0) {
872 ret = -EFAULT;
873 goto free_counters;
874 }
875 }
876
877 t = ipt_get_target(e);
878 if (copy_to_user(userptr + off + e->target_offset
879 + offsetof(struct ipt_entry_target,
880 u.user.name),
881 t->u.kernel.target->name,
882 strlen(t->u.kernel.target->name)+1) != 0) {
883 ret = -EFAULT;
884 goto free_counters;
885 }
886 }
887
888 free_counters:
889 vfree(counters);
890 return ret;
891}
892
2722971c
DM
893#ifdef CONFIG_COMPAT
894struct compat_delta {
895 struct compat_delta *next;
896 u_int16_t offset;
897 short delta;
898};
899
900static struct compat_delta *compat_offsets = NULL;
901
902static int compat_add_offset(u_int16_t offset, short delta)
903{
904 struct compat_delta *tmp;
905
906 tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
907 if (!tmp)
908 return -ENOMEM;
909 tmp->offset = offset;
910 tmp->delta = delta;
911 if (compat_offsets) {
912 tmp->next = compat_offsets->next;
913 compat_offsets->next = tmp;
914 } else {
915 compat_offsets = tmp;
916 tmp->next = NULL;
917 }
918 return 0;
919}
920
921static void compat_flush_offsets(void)
922{
923 struct compat_delta *tmp, *next;
924
925 if (compat_offsets) {
926 for(tmp = compat_offsets; tmp; tmp = next) {
927 next = tmp->next;
928 kfree(tmp);
929 }
930 compat_offsets = NULL;
931 }
932}
933
934static short compat_calc_jump(u_int16_t offset)
935{
936 struct compat_delta *tmp;
937 short delta;
938
939 for(tmp = compat_offsets, delta = 0; tmp; tmp = tmp->next)
940 if (tmp->offset < offset)
941 delta += tmp->delta;
942 return delta;
943}
944
9fa492cd 945static void compat_standard_from_user(void *dst, void *src)
2722971c 946{
9fa492cd 947 int v = *(compat_int_t *)src;
2722971c 948
9fa492cd
PM
949 if (v > 0)
950 v += compat_calc_jump(v);
951 memcpy(dst, &v, sizeof(v));
952}
46c5ea3c 953
9fa492cd 954static int compat_standard_to_user(void __user *dst, void *src)
2722971c 955{
9fa492cd 956 compat_int_t cv = *(int *)src;
2722971c 957
9fa492cd
PM
958 if (cv > 0)
959 cv -= compat_calc_jump(cv);
960 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
2722971c
DM
961}
962
963static inline int
964compat_calc_match(struct ipt_entry_match *m, int * size)
965{
9fa492cd 966 *size += xt_compat_match_offset(m->u.kernel.match);
2722971c
DM
967 return 0;
968}
969
970static int compat_calc_entry(struct ipt_entry *e, struct xt_table_info *info,
971 void *base, struct xt_table_info *newinfo)
972{
973 struct ipt_entry_target *t;
974 u_int16_t entry_offset;
975 int off, i, ret;
976
977 off = 0;
978 entry_offset = (void *)e - base;
979 IPT_MATCH_ITERATE(e, compat_calc_match, &off);
980 t = ipt_get_target(e);
9fa492cd 981 off += xt_compat_target_offset(t->u.kernel.target);
2722971c
DM
982 newinfo->size -= off;
983 ret = compat_add_offset(entry_offset, off);
984 if (ret)
985 return ret;
986
987 for (i = 0; i< NF_IP_NUMHOOKS; i++) {
988 if (info->hook_entry[i] && (e < (struct ipt_entry *)
989 (base + info->hook_entry[i])))
990 newinfo->hook_entry[i] -= off;
991 if (info->underflow[i] && (e < (struct ipt_entry *)
992 (base + info->underflow[i])))
993 newinfo->underflow[i] -= off;
994 }
995 return 0;
996}
997
998static int compat_table_info(struct xt_table_info *info,
999 struct xt_table_info *newinfo)
1000{
1001 void *loc_cpu_entry;
1002 int i;
1003
1004 if (!newinfo || !info)
1005 return -EINVAL;
1006
1007 memset(newinfo, 0, sizeof(struct xt_table_info));
1008 newinfo->size = info->size;
1009 newinfo->number = info->number;
1010 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1011 newinfo->hook_entry[i] = info->hook_entry[i];
1012 newinfo->underflow[i] = info->underflow[i];
1013 }
1014 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1015 return IPT_ENTRY_ITERATE(loc_cpu_entry, info->size,
1016 compat_calc_entry, info, loc_cpu_entry, newinfo);
1017}
1018#endif
1019
1020static int get_info(void __user *user, int *len, int compat)
1021{
1022 char name[IPT_TABLE_MAXNAMELEN];
1023 struct ipt_table *t;
1024 int ret;
1025
1026 if (*len != sizeof(struct ipt_getinfo)) {
1027 duprintf("length %u != %u\n", *len,
1028 (unsigned int)sizeof(struct ipt_getinfo));
1029 return -EINVAL;
1030 }
1031
1032 if (copy_from_user(name, user, sizeof(name)) != 0)
1033 return -EFAULT;
1034
1035 name[IPT_TABLE_MAXNAMELEN-1] = '\0';
1036#ifdef CONFIG_COMPAT
1037 if (compat)
1038 xt_compat_lock(AF_INET);
1039#endif
1040 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1041 "iptable_%s", name);
1042 if (t && !IS_ERR(t)) {
1043 struct ipt_getinfo info;
1044 struct xt_table_info *private = t->private;
1045
1046#ifdef CONFIG_COMPAT
1047 if (compat) {
1048 struct xt_table_info tmp;
1049 ret = compat_table_info(private, &tmp);
1050 compat_flush_offsets();
1051 private = &tmp;
1052 }
1053#endif
1054 info.valid_hooks = t->valid_hooks;
1055 memcpy(info.hook_entry, private->hook_entry,
1056 sizeof(info.hook_entry));
1057 memcpy(info.underflow, private->underflow,
1058 sizeof(info.underflow));
1059 info.num_entries = private->number;
1060 info.size = private->size;
1061 strcpy(info.name, name);
1062
1063 if (copy_to_user(user, &info, *len) != 0)
1064 ret = -EFAULT;
1065 else
1066 ret = 0;
1067
1068 xt_table_unlock(t);
1069 module_put(t->me);
1070 } else
1071 ret = t ? PTR_ERR(t) : -ENOENT;
1072#ifdef CONFIG_COMPAT
1073 if (compat)
1074 xt_compat_unlock(AF_INET);
1075#endif
1076 return ret;
1077}
1078
1079static int
1080get_entries(struct ipt_get_entries __user *uptr, int *len)
1081{
1082 int ret;
1083 struct ipt_get_entries get;
1084 struct ipt_table *t;
1085
1086 if (*len < sizeof(get)) {
1087 duprintf("get_entries: %u < %d\n", *len,
1088 (unsigned int)sizeof(get));
1089 return -EINVAL;
1090 }
1091 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1092 return -EFAULT;
1093 if (*len != sizeof(struct ipt_get_entries) + get.size) {
1094 duprintf("get_entries: %u != %u\n", *len,
1095 (unsigned int)(sizeof(struct ipt_get_entries) +
1096 get.size));
1097 return -EINVAL;
1098 }
1099
1100 t = xt_find_table_lock(AF_INET, get.name);
1101 if (t && !IS_ERR(t)) {
1102 struct xt_table_info *private = t->private;
1103 duprintf("t->private->number = %u\n",
1104 private->number);
1105 if (get.size == private->size)
1106 ret = copy_entries_to_user(private->size,
1107 t, uptr->entrytable);
1108 else {
1109 duprintf("get_entries: I've got %u not %u!\n",
1110 private->size,
1111 get.size);
1112 ret = -EINVAL;
1113 }
1114 module_put(t->me);
1115 xt_table_unlock(t);
1116 } else
1117 ret = t ? PTR_ERR(t) : -ENOENT;
1118
1119 return ret;
1120}
1121
1122static int
1123__do_replace(const char *name, unsigned int valid_hooks,
1124 struct xt_table_info *newinfo, unsigned int num_counters,
1125 void __user *counters_ptr)
1126{
1127 int ret;
1128 struct ipt_table *t;
1129 struct xt_table_info *oldinfo;
1130 struct xt_counters *counters;
1131 void *loc_cpu_old_entry;
1132
1133 ret = 0;
1134 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1135 if (!counters) {
1136 ret = -ENOMEM;
1137 goto out;
1138 }
1139
1140 t = try_then_request_module(xt_find_table_lock(AF_INET, name),
1141 "iptable_%s", name);
1142 if (!t || IS_ERR(t)) {
1143 ret = t ? PTR_ERR(t) : -ENOENT;
1144 goto free_newinfo_counters_untrans;
1145 }
1146
1147 /* You lied! */
1148 if (valid_hooks != t->valid_hooks) {
1149 duprintf("Valid hook crap: %08X vs %08X\n",
1150 valid_hooks, t->valid_hooks);
1151 ret = -EINVAL;
1152 goto put_module;
1153 }
1154
1155 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1156 if (!oldinfo)
1157 goto put_module;
1158
1159 /* Update module usage count based on number of rules */
1160 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1161 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1162 if ((oldinfo->number > oldinfo->initial_entries) ||
1163 (newinfo->number <= oldinfo->initial_entries))
1164 module_put(t->me);
1165 if ((oldinfo->number > oldinfo->initial_entries) &&
1166 (newinfo->number <= oldinfo->initial_entries))
1167 module_put(t->me);
1168
1169 /* Get the old counters. */
1170 get_counters(oldinfo, counters);
1171 /* Decrease module usage counts and free resource */
1172 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1173 IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
1174 xt_free_table_info(oldinfo);
1175 if (copy_to_user(counters_ptr, counters,
1176 sizeof(struct xt_counters) * num_counters) != 0)
1177 ret = -EFAULT;
1178 vfree(counters);
1179 xt_table_unlock(t);
1180 return ret;
1181
1182 put_module:
1183 module_put(t->me);
1184 xt_table_unlock(t);
1185 free_newinfo_counters_untrans:
1186 vfree(counters);
1187 out:
1188 return ret;
1189}
1190
1191static int
1192do_replace(void __user *user, unsigned int len)
1193{
1194 int ret;
1195 struct ipt_replace tmp;
1196 struct xt_table_info *newinfo;
1197 void *loc_cpu_entry;
1198
1199 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1200 return -EFAULT;
1201
1202 /* Hack: Causes ipchains to give correct error msg --RR */
1203 if (len != sizeof(tmp) + tmp.size)
1204 return -ENOPROTOOPT;
1205
1206 /* overflow check */
1207 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1208 SMP_CACHE_BYTES)
1209 return -ENOMEM;
1210 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1211 return -ENOMEM;
1212
1213 newinfo = xt_alloc_table_info(tmp.size);
1214 if (!newinfo)
1215 return -ENOMEM;
1216
1217 /* choose the copy that is our node/cpu */
1218 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1219 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1220 tmp.size) != 0) {
1221 ret = -EFAULT;
1222 goto free_newinfo;
1223 }
1224
1225 ret = translate_table(tmp.name, tmp.valid_hooks,
1226 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1227 tmp.hook_entry, tmp.underflow);
1228 if (ret != 0)
1229 goto free_newinfo;
1230
1231 duprintf("ip_tables: Translated table\n");
1232
1233 ret = __do_replace(tmp.name, tmp.valid_hooks,
1234 newinfo, tmp.num_counters,
1235 tmp.counters);
1236 if (ret)
1237 goto free_newinfo_untrans;
1238 return 0;
1239
1240 free_newinfo_untrans:
1241 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1242 free_newinfo:
1243 xt_free_table_info(newinfo);
1244 return ret;
1245}
1246
1247/* We're lazy, and add to the first CPU; overflow works its fey magic
1248 * and everything is OK. */
1249static inline int
1250add_counter_to_entry(struct ipt_entry *e,
1251 const struct xt_counters addme[],
1252 unsigned int *i)
1253{
1254#if 0
1255 duprintf("add_counter: Entry %u %lu/%lu + %lu/%lu\n",
1256 *i,
1257 (long unsigned int)e->counters.pcnt,
1258 (long unsigned int)e->counters.bcnt,
1259 (long unsigned int)addme[*i].pcnt,
1260 (long unsigned int)addme[*i].bcnt);
1261#endif
1262
1263 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1264
1265 (*i)++;
1266 return 0;
1267}
1268
1269static int
1270do_add_counters(void __user *user, unsigned int len, int compat)
1271{
1272 unsigned int i;
1273 struct xt_counters_info tmp;
1274 struct xt_counters *paddc;
1275 unsigned int num_counters;
1276 char *name;
1277 int size;
1278 void *ptmp;
1279 struct ipt_table *t;
1280 struct xt_table_info *private;
1281 int ret = 0;
1282 void *loc_cpu_entry;
1283#ifdef CONFIG_COMPAT
1284 struct compat_xt_counters_info compat_tmp;
1285
1286 if (compat) {
1287 ptmp = &compat_tmp;
1288 size = sizeof(struct compat_xt_counters_info);
1289 } else
1290#endif
1291 {
1292 ptmp = &tmp;
1293 size = sizeof(struct xt_counters_info);
1294 }
1295
1296 if (copy_from_user(ptmp, user, size) != 0)
1297 return -EFAULT;
1298
1299#ifdef CONFIG_COMPAT
1300 if (compat) {
1301 num_counters = compat_tmp.num_counters;
1302 name = compat_tmp.name;
1303 } else
1304#endif
1305 {
1306 num_counters = tmp.num_counters;
1307 name = tmp.name;
1308 }
1309
1310 if (len != size + num_counters * sizeof(struct xt_counters))
1311 return -EINVAL;
1312
1313 paddc = vmalloc_node(len - size, numa_node_id());
1314 if (!paddc)
1315 return -ENOMEM;
1316
1317 if (copy_from_user(paddc, user + size, len - size) != 0) {
1318 ret = -EFAULT;
1319 goto free;
1320 }
1321
1322 t = xt_find_table_lock(AF_INET, name);
1323 if (!t || IS_ERR(t)) {
1324 ret = t ? PTR_ERR(t) : -ENOENT;
1325 goto free;
1326 }
1327
1328 write_lock_bh(&t->lock);
1329 private = t->private;
1330 if (private->number != num_counters) {
1331 ret = -EINVAL;
1332 goto unlock_up_free;
1333 }
1334
1335 i = 0;
1336 /* Choose the copy that is on our node */
1337 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1338 IPT_ENTRY_ITERATE(loc_cpu_entry,
1339 private->size,
1340 add_counter_to_entry,
1341 paddc,
1342 &i);
1343 unlock_up_free:
1344 write_unlock_bh(&t->lock);
1345 xt_table_unlock(t);
1346 module_put(t->me);
1347 free:
1348 vfree(paddc);
1349
1350 return ret;
1351}
1352
1353#ifdef CONFIG_COMPAT
1354struct compat_ipt_replace {
1355 char name[IPT_TABLE_MAXNAMELEN];
1356 u32 valid_hooks;
1357 u32 num_entries;
1358 u32 size;
1359 u32 hook_entry[NF_IP_NUMHOOKS];
1360 u32 underflow[NF_IP_NUMHOOKS];
1361 u32 num_counters;
1362 compat_uptr_t counters; /* struct ipt_counters * */
1363 struct compat_ipt_entry entries[0];
1364};
1365
1366static inline int compat_copy_match_to_user(struct ipt_entry_match *m,
9fa492cd 1367 void * __user *dstptr, compat_uint_t *size)
2722971c 1368{
9fa492cd 1369 return xt_compat_match_to_user(m, dstptr, size);
2722971c
DM
1370}
1371
1372static int compat_copy_entry_to_user(struct ipt_entry *e,
9fa492cd 1373 void * __user *dstptr, compat_uint_t *size)
2722971c
DM
1374{
1375 struct ipt_entry_target __user *t;
1376 struct compat_ipt_entry __user *ce;
1377 u_int16_t target_offset, next_offset;
1378 compat_uint_t origsize;
1379 int ret;
1380
1381 ret = -EFAULT;
1382 origsize = *size;
1383 ce = (struct compat_ipt_entry __user *)*dstptr;
7800007c 1384 if (copy_to_user(ce, e, sizeof(struct ipt_entry)))
2722971c
DM
1385 goto out;
1386
1387 *dstptr += sizeof(struct compat_ipt_entry);
1388 ret = IPT_MATCH_ITERATE(e, compat_copy_match_to_user, dstptr, size);
1389 target_offset = e->target_offset - (origsize - *size);
1390 if (ret)
1391 goto out;
1392 t = ipt_get_target(e);
9fa492cd 1393 ret = xt_compat_target_to_user(t, dstptr, size);
2722971c
DM
1394 if (ret)
1395 goto out;
1396 ret = -EFAULT;
1397 next_offset = e->next_offset - (origsize - *size);
7800007c 1398 if (put_user(target_offset, &ce->target_offset))
2722971c 1399 goto out;
7800007c 1400 if (put_user(next_offset, &ce->next_offset))
2722971c
DM
1401 goto out;
1402 return 0;
1403out:
1404 return ret;
1405}
1406
1407static inline int
1408compat_check_calc_match(struct ipt_entry_match *m,
1409 const char *name,
1410 const struct ipt_ip *ip,
1411 unsigned int hookmask,
1412 int *size, int *i)
1413{
1414 struct ipt_match *match;
1415
1416 match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
1417 m->u.user.revision),
1418 "ipt_%s", m->u.user.name);
1419 if (IS_ERR(match) || !match) {
1420 duprintf("compat_check_calc_match: `%s' not found\n",
1421 m->u.user.name);
1422 return match ? PTR_ERR(match) : -ENOENT;
1423 }
1424 m->u.kernel.match = match;
9fa492cd 1425 *size += xt_compat_match_offset(match);
2722971c
DM
1426
1427 (*i)++;
1428 return 0;
1429}
1430
1431static inline int
1432check_compat_entry_size_and_hooks(struct ipt_entry *e,
1433 struct xt_table_info *newinfo,
1434 unsigned int *size,
1435 unsigned char *base,
1436 unsigned char *limit,
1437 unsigned int *hook_entries,
1438 unsigned int *underflows,
1439 unsigned int *i,
1440 const char *name)
1441{
1442 struct ipt_entry_target *t;
1443 struct ipt_target *target;
1444 u_int16_t entry_offset;
1445 int ret, off, h, j;
1446
1447 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1448 if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0
1449 || (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
1450 duprintf("Bad offset %p, limit = %p\n", e, limit);
1451 return -EINVAL;
1452 }
1453
1454 if (e->next_offset < sizeof(struct compat_ipt_entry) +
1455 sizeof(struct compat_xt_entry_target)) {
1456 duprintf("checking: element %p size %u\n",
1457 e, e->next_offset);
1458 return -EINVAL;
1459 }
1460
1461 if (!ip_checkentry(&e->ip)) {
1462 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
1463 return -EINVAL;
1464 }
1465
1466 off = 0;
1467 entry_offset = (void *)e - (void *)base;
1468 j = 0;
1469 ret = IPT_MATCH_ITERATE(e, compat_check_calc_match, name, &e->ip,
1470 e->comefrom, &off, &j);
1471 if (ret != 0)
bec71b16 1472 goto cleanup_matches;
2722971c
DM
1473
1474 t = ipt_get_target(e);
1475 target = try_then_request_module(xt_find_target(AF_INET,
1476 t->u.user.name,
1477 t->u.user.revision),
1478 "ipt_%s", t->u.user.name);
1479 if (IS_ERR(target) || !target) {
1480 duprintf("check_entry: `%s' not found\n", t->u.user.name);
1481 ret = target ? PTR_ERR(target) : -ENOENT;
bec71b16 1482 goto cleanup_matches;
2722971c
DM
1483 }
1484 t->u.kernel.target = target;
1485
9fa492cd 1486 off += xt_compat_target_offset(target);
2722971c
DM
1487 *size += off;
1488 ret = compat_add_offset(entry_offset, off);
1489 if (ret)
1490 goto out;
1491
1492 /* Check hooks & underflows */
1493 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1494 if ((unsigned char *)e - base == hook_entries[h])
1495 newinfo->hook_entry[h] = hook_entries[h];
1496 if ((unsigned char *)e - base == underflows[h])
1497 newinfo->underflow[h] = underflows[h];
1498 }
1499
1500 /* Clear counters and comefrom */
1501 e->counters = ((struct ipt_counters) { 0, 0 });
1502 e->comefrom = 0;
1503
1504 (*i)++;
1505 return 0;
bec71b16 1506
2722971c 1507out:
bec71b16
PM
1508 module_put(t->u.kernel.target->me);
1509cleanup_matches:
2722971c
DM
1510 IPT_MATCH_ITERATE(e, cleanup_match, &j);
1511 return ret;
1512}
1513
1514static inline int compat_copy_match_from_user(struct ipt_entry_match *m,
1515 void **dstptr, compat_uint_t *size, const char *name,
bec71b16 1516 const struct ipt_ip *ip, unsigned int hookmask, int *i)
2722971c
DM
1517{
1518 struct ipt_entry_match *dm;
1519 struct ipt_match *match;
1520 int ret;
1521
1522 dm = (struct ipt_entry_match *)*dstptr;
1523 match = m->u.kernel.match;
9fa492cd 1524 xt_compat_match_from_user(m, dstptr, size);
2722971c
DM
1525
1526 ret = xt_check_match(match, AF_INET, dm->u.match_size - sizeof(*dm),
1527 name, hookmask, ip->proto,
1528 ip->invflags & IPT_INV_PROTO);
1529 if (ret)
bec71b16 1530 goto err;
2722971c
DM
1531
1532 if (m->u.kernel.match->checkentry
1533 && !m->u.kernel.match->checkentry(name, ip, match, dm->data,
2722971c
DM
1534 hookmask)) {
1535 duprintf("ip_tables: check failed for `%s'.\n",
1536 m->u.kernel.match->name);
bec71b16
PM
1537 ret = -EINVAL;
1538 goto err;
2722971c 1539 }
bec71b16 1540 (*i)++;
2722971c 1541 return 0;
bec71b16
PM
1542
1543err:
1544 module_put(m->u.kernel.match->me);
1545 return ret;
2722971c
DM
1546}
1547
1548static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr,
1549 unsigned int *size, const char *name,
1550 struct xt_table_info *newinfo, unsigned char *base)
1551{
1552 struct ipt_entry_target *t;
1553 struct ipt_target *target;
1554 struct ipt_entry *de;
1555 unsigned int origsize;
bec71b16 1556 int ret, h, j;
2722971c
DM
1557
1558 ret = 0;
1559 origsize = *size;
1560 de = (struct ipt_entry *)*dstptr;
1561 memcpy(de, e, sizeof(struct ipt_entry));
1562
bec71b16 1563 j = 0;
2722971c
DM
1564 *dstptr += sizeof(struct compat_ipt_entry);
1565 ret = IPT_MATCH_ITERATE(e, compat_copy_match_from_user, dstptr, size,
bec71b16 1566 name, &de->ip, de->comefrom, &j);
2722971c 1567 if (ret)
bec71b16 1568 goto cleanup_matches;
2722971c
DM
1569 de->target_offset = e->target_offset - (origsize - *size);
1570 t = ipt_get_target(e);
1571 target = t->u.kernel.target;
9fa492cd 1572 xt_compat_target_from_user(t, dstptr, size);
2722971c
DM
1573
1574 de->next_offset = e->next_offset - (origsize - *size);
1575 for (h = 0; h < NF_IP_NUMHOOKS; h++) {
1576 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1577 newinfo->hook_entry[h] -= origsize - *size;
1578 if ((unsigned char *)de - base < newinfo->underflow[h])
1579 newinfo->underflow[h] -= origsize - *size;
1580 }
1581
1582 t = ipt_get_target(de);
1583 target = t->u.kernel.target;
1584 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
1585 name, e->comefrom, e->ip.proto,
1586 e->ip.invflags & IPT_INV_PROTO);
1587 if (ret)
bec71b16 1588 goto err;
2722971c
DM
1589
1590 ret = -EINVAL;
1591 if (t->u.kernel.target == &ipt_standard_target) {
1592 if (!standard_check(t, *size))
bec71b16 1593 goto err;
2722971c
DM
1594 } else if (t->u.kernel.target->checkentry
1595 && !t->u.kernel.target->checkentry(name, de, target,
efa74165 1596 t->data, de->comefrom)) {
2722971c
DM
1597 duprintf("ip_tables: compat: check failed for `%s'.\n",
1598 t->u.kernel.target->name);
bec71b16 1599 goto err;
2722971c
DM
1600 }
1601 ret = 0;
bec71b16
PM
1602 return ret;
1603
1604err:
1605 module_put(t->u.kernel.target->me);
1606cleanup_matches:
1607 IPT_MATCH_ITERATE(e, cleanup_match, &j);
2722971c
DM
1608 return ret;
1609}
1610
1da177e4 1611static int
2722971c
DM
1612translate_compat_table(const char *name,
1613 unsigned int valid_hooks,
1614 struct xt_table_info **pinfo,
1615 void **pentry0,
1616 unsigned int total_size,
1617 unsigned int number,
1618 unsigned int *hook_entries,
1619 unsigned int *underflows)
1da177e4 1620{
2722971c
DM
1621 unsigned int i;
1622 struct xt_table_info *newinfo, *info;
1623 void *pos, *entry0, *entry1;
1624 unsigned int size;
1da177e4 1625 int ret;
1da177e4 1626
2722971c
DM
1627 info = *pinfo;
1628 entry0 = *pentry0;
1629 size = total_size;
1630 info->number = number;
1631
1632 /* Init all hooks to impossible value. */
1633 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1634 info->hook_entry[i] = 0xFFFFFFFF;
1635 info->underflow[i] = 0xFFFFFFFF;
1636 }
1637
1638 duprintf("translate_compat_table: size %u\n", info->size);
1639 i = 0;
1640 xt_compat_lock(AF_INET);
1641 /* Walk through entries, checking offsets. */
1642 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1643 check_compat_entry_size_and_hooks,
1644 info, &size, entry0,
1645 entry0 + total_size,
1646 hook_entries, underflows, &i, name);
1647 if (ret != 0)
1648 goto out_unlock;
1649
1650 ret = -EINVAL;
1651 if (i != number) {
1652 duprintf("translate_compat_table: %u not %u entries\n",
1653 i, number);
1654 goto out_unlock;
1655 }
1656
1657 /* Check hooks all assigned */
1658 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1659 /* Only hooks which are valid */
1660 if (!(valid_hooks & (1 << i)))
1661 continue;
1662 if (info->hook_entry[i] == 0xFFFFFFFF) {
1663 duprintf("Invalid hook entry %u %u\n",
1664 i, hook_entries[i]);
1665 goto out_unlock;
1da177e4 1666 }
2722971c
DM
1667 if (info->underflow[i] == 0xFFFFFFFF) {
1668 duprintf("Invalid underflow %u %u\n",
1669 i, underflows[i]);
1670 goto out_unlock;
1671 }
1672 }
1673
1674 ret = -ENOMEM;
1675 newinfo = xt_alloc_table_info(size);
1676 if (!newinfo)
1677 goto out_unlock;
1678
1679 newinfo->number = number;
1680 for (i = 0; i < NF_IP_NUMHOOKS; i++) {
1681 newinfo->hook_entry[i] = info->hook_entry[i];
1682 newinfo->underflow[i] = info->underflow[i];
1683 }
1684 entry1 = newinfo->entries[raw_smp_processor_id()];
1685 pos = entry1;
1686 size = total_size;
1687 ret = IPT_ENTRY_ITERATE(entry0, total_size,
1688 compat_copy_entry_from_user, &pos, &size,
1689 name, newinfo, entry1);
1690 compat_flush_offsets();
1691 xt_compat_unlock(AF_INET);
1692 if (ret)
1693 goto free_newinfo;
1694
1695 ret = -ELOOP;
1696 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1697 goto free_newinfo;
1698
1699 /* And one copy for every other CPU */
fb1bb34d 1700 for_each_possible_cpu(i)
2722971c
DM
1701 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1702 memcpy(newinfo->entries[i], entry1, newinfo->size);
1703
1704 *pinfo = newinfo;
1705 *pentry0 = entry1;
1706 xt_free_table_info(info);
1707 return 0;
1da177e4 1708
2722971c
DM
1709free_newinfo:
1710 xt_free_table_info(newinfo);
1711out:
1da177e4 1712 return ret;
2722971c
DM
1713out_unlock:
1714 xt_compat_unlock(AF_INET);
1715 goto out;
1da177e4
LT
1716}
1717
1718static int
2722971c 1719compat_do_replace(void __user *user, unsigned int len)
1da177e4
LT
1720{
1721 int ret;
2722971c
DM
1722 struct compat_ipt_replace tmp;
1723 struct xt_table_info *newinfo;
1724 void *loc_cpu_entry;
1da177e4
LT
1725
1726 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1727 return -EFAULT;
1728
1729 /* Hack: Causes ipchains to give correct error msg --RR */
1730 if (len != sizeof(tmp) + tmp.size)
1731 return -ENOPROTOOPT;
1732
ee4bb818
KK
1733 /* overflow check */
1734 if (tmp.size >= (INT_MAX - sizeof(struct xt_table_info)) / NR_CPUS -
1735 SMP_CACHE_BYTES)
1736 return -ENOMEM;
1737 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1738 return -ENOMEM;
1739
2e4e6a17 1740 newinfo = xt_alloc_table_info(tmp.size);
1da177e4
LT
1741 if (!newinfo)
1742 return -ENOMEM;
1743
31836064
ED
1744 /* choose the copy that is our node/cpu */
1745 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1746 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1da177e4
LT
1747 tmp.size) != 0) {
1748 ret = -EFAULT;
1749 goto free_newinfo;
1750 }
1751
2722971c
DM
1752 ret = translate_compat_table(tmp.name, tmp.valid_hooks,
1753 &newinfo, &loc_cpu_entry, tmp.size,
1754 tmp.num_entries, tmp.hook_entry, tmp.underflow);
1755 if (ret != 0)
1da177e4 1756 goto free_newinfo;
1da177e4 1757
2722971c 1758 duprintf("compat_do_replace: Translated table\n");
1da177e4 1759
2722971c
DM
1760 ret = __do_replace(tmp.name, tmp.valid_hooks,
1761 newinfo, tmp.num_counters,
1762 compat_ptr(tmp.counters));
1763 if (ret)
1764 goto free_newinfo_untrans;
1765 return 0;
1da177e4 1766
2722971c
DM
1767 free_newinfo_untrans:
1768 IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
1769 free_newinfo:
1770 xt_free_table_info(newinfo);
1771 return ret;
1772}
1da177e4 1773
2722971c
DM
1774static int
1775compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
1776 unsigned int len)
1777{
1778 int ret;
1da177e4 1779
2722971c
DM
1780 if (!capable(CAP_NET_ADMIN))
1781 return -EPERM;
1da177e4 1782
2722971c
DM
1783 switch (cmd) {
1784 case IPT_SO_SET_REPLACE:
1785 ret = compat_do_replace(user, len);
1786 break;
1da177e4 1787
2722971c
DM
1788 case IPT_SO_SET_ADD_COUNTERS:
1789 ret = do_add_counters(user, len, 1);
1790 break;
1791
1792 default:
1793 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1794 ret = -EINVAL;
1795 }
1da177e4 1796
1da177e4
LT
1797 return ret;
1798}
1799
2722971c 1800struct compat_ipt_get_entries
1da177e4 1801{
2722971c
DM
1802 char name[IPT_TABLE_MAXNAMELEN];
1803 compat_uint_t size;
1804 struct compat_ipt_entry entrytable[0];
1805};
1da177e4 1806
2722971c
DM
1807static int compat_copy_entries_to_user(unsigned int total_size,
1808 struct ipt_table *table, void __user *userptr)
1809{
1810 unsigned int off, num;
1811 struct compat_ipt_entry e;
1812 struct xt_counters *counters;
1813 struct xt_table_info *private = table->private;
1814 void __user *pos;
1815 unsigned int size;
1816 int ret = 0;
1817 void *loc_cpu_entry;
1da177e4 1818
2722971c
DM
1819 counters = alloc_counters(table);
1820 if (IS_ERR(counters))
1821 return PTR_ERR(counters);
1822
1823 /* choose the copy that is on our node/cpu, ...
1824 * This choice is lazy (because current thread is
1825 * allowed to migrate to another cpu)
1826 */
1827 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1828 pos = userptr;
1829 size = total_size;
1830 ret = IPT_ENTRY_ITERATE(loc_cpu_entry, total_size,
1831 compat_copy_entry_to_user, &pos, &size);
1832 if (ret)
1833 goto free_counters;
1834
1835 /* ... then go back and fix counters and names */
1836 for (off = 0, num = 0; off < size; off += e.next_offset, num++) {
1837 unsigned int i;
1838 struct ipt_entry_match m;
1839 struct ipt_entry_target t;
1840
1841 ret = -EFAULT;
1842 if (copy_from_user(&e, userptr + off,
1843 sizeof(struct compat_ipt_entry)))
1844 goto free_counters;
1845 if (copy_to_user(userptr + off +
1846 offsetof(struct compat_ipt_entry, counters),
1847 &counters[num], sizeof(counters[num])))
1848 goto free_counters;
1849
1850 for (i = sizeof(struct compat_ipt_entry);
1851 i < e.target_offset; i += m.u.match_size) {
1852 if (copy_from_user(&m, userptr + off + i,
1853 sizeof(struct ipt_entry_match)))
1854 goto free_counters;
1855 if (copy_to_user(userptr + off + i +
1856 offsetof(struct ipt_entry_match, u.user.name),
1857 m.u.kernel.match->name,
1858 strlen(m.u.kernel.match->name) + 1))
1859 goto free_counters;
1860 }
1861
1862 if (copy_from_user(&t, userptr + off + e.target_offset,
1863 sizeof(struct ipt_entry_target)))
1864 goto free_counters;
1865 if (copy_to_user(userptr + off + e.target_offset +
1866 offsetof(struct ipt_entry_target, u.user.name),
1867 t.u.kernel.target->name,
1868 strlen(t.u.kernel.target->name) + 1))
1869 goto free_counters;
1870 }
1871 ret = 0;
1872free_counters:
1873 vfree(counters);
1874 return ret;
1da177e4
LT
1875}
1876
1877static int
2722971c 1878compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
1da177e4 1879{
2722971c
DM
1880 int ret;
1881 struct compat_ipt_get_entries get;
1da177e4 1882 struct ipt_table *t;
1da177e4 1883
1da177e4 1884
2722971c
DM
1885 if (*len < sizeof(get)) {
1886 duprintf("compat_get_entries: %u < %u\n",
1887 *len, (unsigned int)sizeof(get));
1da177e4 1888 return -EINVAL;
2722971c 1889 }
1da177e4 1890
2722971c
DM
1891 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1892 return -EFAULT;
1da177e4 1893
2722971c
DM
1894 if (*len != sizeof(struct compat_ipt_get_entries) + get.size) {
1895 duprintf("compat_get_entries: %u != %u\n", *len,
1896 (unsigned int)(sizeof(struct compat_ipt_get_entries) +
1897 get.size));
1898 return -EINVAL;
1da177e4
LT
1899 }
1900
2722971c
DM
1901 xt_compat_lock(AF_INET);
1902 t = xt_find_table_lock(AF_INET, get.name);
1903 if (t && !IS_ERR(t)) {
1904 struct xt_table_info *private = t->private;
1905 struct xt_table_info info;
1906 duprintf("t->private->number = %u\n",
1907 private->number);
1908 ret = compat_table_info(private, &info);
1909 if (!ret && get.size == info.size) {
1910 ret = compat_copy_entries_to_user(private->size,
1911 t, uptr->entrytable);
1912 } else if (!ret) {
1913 duprintf("compat_get_entries: I've got %u not %u!\n",
1914 private->size,
1915 get.size);
1916 ret = -EINVAL;
1917 }
1918 compat_flush_offsets();
1919 module_put(t->me);
1920 xt_table_unlock(t);
1921 } else
1da177e4 1922 ret = t ? PTR_ERR(t) : -ENOENT;
1da177e4 1923
2722971c
DM
1924 xt_compat_unlock(AF_INET);
1925 return ret;
1926}
1da177e4 1927
79030ed0
PM
1928static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1929
2722971c
DM
1930static int
1931compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1932{
1933 int ret;
1da177e4 1934
2722971c
DM
1935 switch (cmd) {
1936 case IPT_SO_GET_INFO:
1937 ret = get_info(user, len, 1);
1938 break;
1939 case IPT_SO_GET_ENTRIES:
1940 ret = compat_get_entries(user, len);
1941 break;
1942 default:
79030ed0 1943 ret = do_ipt_get_ctl(sk, cmd, user, len);
2722971c 1944 }
1da177e4
LT
1945 return ret;
1946}
2722971c 1947#endif
1da177e4
LT
1948
1949static int
1950do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1951{
1952 int ret;
1953
1954 if (!capable(CAP_NET_ADMIN))
1955 return -EPERM;
1956
1957 switch (cmd) {
1958 case IPT_SO_SET_REPLACE:
1959 ret = do_replace(user, len);
1960 break;
1961
1962 case IPT_SO_SET_ADD_COUNTERS:
2722971c 1963 ret = do_add_counters(user, len, 0);
1da177e4
LT
1964 break;
1965
1966 default:
1967 duprintf("do_ipt_set_ctl: unknown request %i\n", cmd);
1968 ret = -EINVAL;
1969 }
1970
1971 return ret;
1972}
1973
1974static int
1975do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1976{
1977 int ret;
1978
1979 if (!capable(CAP_NET_ADMIN))
1980 return -EPERM;
1981
1982 switch (cmd) {
2722971c
DM
1983 case IPT_SO_GET_INFO:
1984 ret = get_info(user, len, 0);
1985 break;
1da177e4 1986
2722971c
DM
1987 case IPT_SO_GET_ENTRIES:
1988 ret = get_entries(user, len);
1da177e4 1989 break;
1da177e4
LT
1990
1991 case IPT_SO_GET_REVISION_MATCH:
1992 case IPT_SO_GET_REVISION_TARGET: {
1993 struct ipt_get_revision rev;
2e4e6a17 1994 int target;
1da177e4
LT
1995
1996 if (*len != sizeof(rev)) {
1997 ret = -EINVAL;
1998 break;
1999 }
2000 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2001 ret = -EFAULT;
2002 break;
2003 }
2004
2005 if (cmd == IPT_SO_GET_REVISION_TARGET)
2e4e6a17 2006 target = 1;
1da177e4 2007 else
2e4e6a17 2008 target = 0;
1da177e4 2009
2e4e6a17
HW
2010 try_then_request_module(xt_find_revision(AF_INET, rev.name,
2011 rev.revision,
2012 target, &ret),
1da177e4
LT
2013 "ipt_%s", rev.name);
2014 break;
2015 }
2016
2017 default:
2018 duprintf("do_ipt_get_ctl: unknown request %i\n", cmd);
2019 ret = -EINVAL;
2020 }
2021
2022 return ret;
2023}
2024
2e4e6a17 2025int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
1da177e4
LT
2026{
2027 int ret;
2e4e6a17
HW
2028 struct xt_table_info *newinfo;
2029 static struct xt_table_info bootstrap
1da177e4 2030 = { 0, 0, 0, { 0 }, { 0 }, { } };
31836064 2031 void *loc_cpu_entry;
1da177e4 2032
2e4e6a17 2033 newinfo = xt_alloc_table_info(repl->size);
1da177e4
LT
2034 if (!newinfo)
2035 return -ENOMEM;
2036
31836064
ED
2037 /* choose the copy on our node/cpu
2038 * but dont care of preemption
2039 */
2040 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2041 memcpy(loc_cpu_entry, repl->entries, repl->size);
1da177e4
LT
2042
2043 ret = translate_table(table->name, table->valid_hooks,
31836064 2044 newinfo, loc_cpu_entry, repl->size,
1da177e4
LT
2045 repl->num_entries,
2046 repl->hook_entry,
2047 repl->underflow);
2048 if (ret != 0) {
2e4e6a17 2049 xt_free_table_info(newinfo);
1da177e4
LT
2050 return ret;
2051 }
2052
da298d3a
PM
2053 ret = xt_register_table(table, &bootstrap, newinfo);
2054 if (ret != 0) {
2e4e6a17 2055 xt_free_table_info(newinfo);
1da177e4
LT
2056 return ret;
2057 }
2058
2e4e6a17 2059 return 0;
1da177e4
LT
2060}
2061
2062void ipt_unregister_table(struct ipt_table *table)
2063{
2e4e6a17 2064 struct xt_table_info *private;
31836064
ED
2065 void *loc_cpu_entry;
2066
2e4e6a17 2067 private = xt_unregister_table(table);
1da177e4
LT
2068
2069 /* Decrease module usage counts and free resources */
2e4e6a17
HW
2070 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2071 IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
2072 xt_free_table_info(private);
1da177e4
LT
2073}
2074
2075/* Returns 1 if the type and code is matched by the range, 0 otherwise */
2076static inline int
2077icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2078 u_int8_t type, u_int8_t code,
2079 int invert)
2080{
2081 return ((test_type == 0xFF) || (type == test_type && code >= min_code && code <= max_code))
2082 ^ invert;
2083}
2084
2085static int
2086icmp_match(const struct sk_buff *skb,
2087 const struct net_device *in,
2088 const struct net_device *out,
c4986734 2089 const struct xt_match *match,
1da177e4
LT
2090 const void *matchinfo,
2091 int offset,
2e4e6a17 2092 unsigned int protoff,
1da177e4
LT
2093 int *hotdrop)
2094{
2095 struct icmphdr _icmph, *ic;
2096 const struct ipt_icmp *icmpinfo = matchinfo;
2097
2098 /* Must not be a fragment. */
2099 if (offset)
2100 return 0;
2101
2e4e6a17 2102 ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
1da177e4
LT
2103 if (ic == NULL) {
2104 /* We've been asked to examine this packet, and we
2105 * can't. Hence, no choice but to drop.
2106 */
2107 duprintf("Dropping evil ICMP tinygram.\n");
2108 *hotdrop = 1;
2109 return 0;
2110 }
2111
2112 return icmp_type_code_match(icmpinfo->type,
2113 icmpinfo->code[0],
2114 icmpinfo->code[1],
2115 ic->type, ic->code,
2116 !!(icmpinfo->invflags&IPT_ICMP_INV));
2117}
2118
2119/* Called when user tries to insert an entry of this type. */
2120static int
2121icmp_checkentry(const char *tablename,
2e4e6a17 2122 const void *info,
c4986734 2123 const struct xt_match *match,
1da177e4 2124 void *matchinfo,
1da177e4
LT
2125 unsigned int hook_mask)
2126{
2127 const struct ipt_icmp *icmpinfo = matchinfo;
2128
1d5cd909
PM
2129 /* Must specify no unknown invflags */
2130 return !(icmpinfo->invflags & ~IPT_ICMP_INV);
1da177e4
LT
2131}
2132
2133/* The built-in targets: standard (NULL) and error. */
2134static struct ipt_target ipt_standard_target = {
2135 .name = IPT_STANDARD_TARGET,
1d5cd909 2136 .targetsize = sizeof(int),
a45049c5 2137 .family = AF_INET,
2722971c 2138#ifdef CONFIG_COMPAT
9fa492cd
PM
2139 .compatsize = sizeof(compat_int_t),
2140 .compat_from_user = compat_standard_from_user,
2141 .compat_to_user = compat_standard_to_user,
2722971c 2142#endif
1da177e4
LT
2143};
2144
2145static struct ipt_target ipt_error_target = {
2146 .name = IPT_ERROR_TARGET,
2147 .target = ipt_error,
1d5cd909 2148 .targetsize = IPT_FUNCTION_MAXNAMELEN,
a45049c5 2149 .family = AF_INET,
1da177e4
LT
2150};
2151
2152static struct nf_sockopt_ops ipt_sockopts = {
2153 .pf = PF_INET,
2154 .set_optmin = IPT_BASE_CTL,
2155 .set_optmax = IPT_SO_SET_MAX+1,
2156 .set = do_ipt_set_ctl,
2722971c
DM
2157#ifdef CONFIG_COMPAT
2158 .compat_set = compat_do_ipt_set_ctl,
2159#endif
1da177e4
LT
2160 .get_optmin = IPT_BASE_CTL,
2161 .get_optmax = IPT_SO_GET_MAX+1,
2162 .get = do_ipt_get_ctl,
2722971c
DM
2163#ifdef CONFIG_COMPAT
2164 .compat_get = compat_do_ipt_get_ctl,
2165#endif
1da177e4
LT
2166};
2167
1da177e4
LT
2168static struct ipt_match icmp_matchstruct = {
2169 .name = "icmp",
1d5cd909
PM
2170 .match = icmp_match,
2171 .matchsize = sizeof(struct ipt_icmp),
2172 .proto = IPPROTO_ICMP,
a45049c5 2173 .family = AF_INET,
1d5cd909 2174 .checkentry = icmp_checkentry,
1da177e4
LT
2175};
2176
65b4b4e8 2177static int __init ip_tables_init(void)
1da177e4
LT
2178{
2179 int ret;
2180
0eff66e6
PM
2181 ret = xt_proto_init(AF_INET);
2182 if (ret < 0)
2183 goto err1;
2e4e6a17 2184
1da177e4 2185 /* Noone else will be downing sem now, so we won't sleep */
0eff66e6
PM
2186 ret = xt_register_target(&ipt_standard_target);
2187 if (ret < 0)
2188 goto err2;
2189 ret = xt_register_target(&ipt_error_target);
2190 if (ret < 0)
2191 goto err3;
2192 ret = xt_register_match(&icmp_matchstruct);
2193 if (ret < 0)
2194 goto err4;
1da177e4
LT
2195
2196 /* Register setsockopt */
2197 ret = nf_register_sockopt(&ipt_sockopts);
0eff66e6
PM
2198 if (ret < 0)
2199 goto err5;
1da177e4 2200
2e4e6a17 2201 printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n");
1da177e4 2202 return 0;
0eff66e6
PM
2203
2204err5:
2205 xt_unregister_match(&icmp_matchstruct);
2206err4:
2207 xt_unregister_target(&ipt_error_target);
2208err3:
2209 xt_unregister_target(&ipt_standard_target);
2210err2:
2211 xt_proto_fini(AF_INET);
2212err1:
2213 return ret;
1da177e4
LT
2214}
2215
65b4b4e8 2216static void __exit ip_tables_fini(void)
1da177e4
LT
2217{
2218 nf_unregister_sockopt(&ipt_sockopts);
2e4e6a17 2219
a45049c5
PNA
2220 xt_unregister_match(&icmp_matchstruct);
2221 xt_unregister_target(&ipt_error_target);
2222 xt_unregister_target(&ipt_standard_target);
2e4e6a17
HW
2223
2224 xt_proto_fini(AF_INET);
1da177e4
LT
2225}
2226
2227EXPORT_SYMBOL(ipt_register_table);
2228EXPORT_SYMBOL(ipt_unregister_table);
1da177e4 2229EXPORT_SYMBOL(ipt_do_table);
65b4b4e8
AM
2230module_init(ip_tables_init);
2231module_exit(ip_tables_fini);