2 * (C) 1999-2001 Paul `Rusty' Russell
3 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
4 * (C) 2011 Patrick McHardy <kaber@trash.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/timer.h>
16 #include <linux/skbuff.h>
17 #include <linux/gfp.h>
19 #include <linux/jhash.h>
20 #include <linux/rtnetlink.h>
22 #include <net/netfilter/nf_conntrack.h>
23 #include <net/netfilter/nf_conntrack_core.h>
24 #include <net/netfilter/nf_nat.h>
25 #include <net/netfilter/nf_nat_l3proto.h>
26 #include <net/netfilter/nf_nat_core.h>
27 #include <net/netfilter/nf_nat_helper.h>
28 #include <net/netfilter/nf_conntrack_helper.h>
29 #include <net/netfilter/nf_conntrack_seqadj.h>
30 #include <net/netfilter/nf_conntrack_zones.h>
31 #include <linux/netfilter/nf_nat.h>
33 #include "nf_internals.h"
35 static spinlock_t nf_nat_locks[CONNTRACK_LOCKS];
37 static DEFINE_MUTEX(nf_nat_proto_mutex);
38 static const struct nf_nat_l3proto __rcu *nf_nat_l3protos[NFPROTO_NUMPROTO]
40 static unsigned int nat_net_id __read_mostly;
42 static struct hlist_head *nf_nat_bysource __read_mostly;
43 static unsigned int nf_nat_htable_size __read_mostly;
44 static unsigned int nf_nat_hash_rnd __read_mostly;
46 struct nf_nat_lookup_hook_priv {
47 struct nf_hook_entries __rcu *entries;
49 struct rcu_head rcu_head;
52 struct nf_nat_hooks_net {
53 struct nf_hook_ops *nat_hook_ops;
58 struct nf_nat_hooks_net nat_proto_net[NFPROTO_NUMPROTO];
61 inline const struct nf_nat_l3proto *
62 __nf_nat_l3proto_find(u8 family)
64 return rcu_dereference(nf_nat_l3protos[family]);
68 static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl)
70 const struct nf_nat_l3proto *l3proto;
71 const struct nf_conn *ct;
72 enum ip_conntrack_info ctinfo;
73 enum ip_conntrack_dir dir;
74 unsigned long statusbit;
77 ct = nf_ct_get(skb, &ctinfo);
81 family = nf_ct_l3num(ct);
82 l3proto = __nf_nat_l3proto_find(family);
86 dir = CTINFO2DIR(ctinfo);
87 if (dir == IP_CT_DIR_ORIGINAL)
88 statusbit = IPS_DST_NAT;
90 statusbit = IPS_SRC_NAT;
92 l3proto->decode_session(skb, ct, dir, statusbit, fl);
95 int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family)
99 struct dst_entry *dst;
100 struct sock *sk = skb->sk;
103 err = xfrm_decode_session(skb, &fl, family);
109 dst = ((struct xfrm_dst *)dst)->route;
112 if (sk && !net_eq(net, sock_net(sk)))
115 dst = xfrm_lookup(net, dst, &fl, sk, 0);
120 skb_dst_set(skb, dst);
122 /* Change in oif may mean change in hh_len. */
123 hh_len = skb_dst(skb)->dev->hard_header_len;
124 if (skb_headroom(skb) < hh_len &&
125 pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC))
129 EXPORT_SYMBOL(nf_xfrm_me_harder);
130 #endif /* CONFIG_XFRM */
132 /* We keep an extra hash for each conntrack, for fast searching. */
134 hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple)
138 get_random_once(&nf_nat_hash_rnd, sizeof(nf_nat_hash_rnd));
140 /* Original src, to ensure we map it consistently if poss. */
141 hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32),
142 tuple->dst.protonum ^ nf_nat_hash_rnd ^ net_hash_mix(n));
144 return reciprocal_scale(hash, nf_nat_htable_size);
147 /* Is this tuple already taken? (not by us) */
149 nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
150 const struct nf_conn *ignored_conntrack)
152 /* Conntrack tracking doesn't keep track of outgoing tuples; only
153 * incoming ones. NAT means they don't have a fixed mapping,
154 * so we invert the tuple and look for the incoming reply.
156 * We could keep a separate hash if this proves too slow.
158 struct nf_conntrack_tuple reply;
160 nf_ct_invert_tuplepr(&reply, tuple);
161 return nf_conntrack_tuple_taken(&reply, ignored_conntrack);
163 EXPORT_SYMBOL(nf_nat_used_tuple);
165 static bool nf_nat_inet_in_range(const struct nf_conntrack_tuple *t,
166 const struct nf_nat_range2 *range)
168 if (t->src.l3num == NFPROTO_IPV4)
169 return ntohl(t->src.u3.ip) >= ntohl(range->min_addr.ip) &&
170 ntohl(t->src.u3.ip) <= ntohl(range->max_addr.ip);
172 return ipv6_addr_cmp(&t->src.u3.in6, &range->min_addr.in6) >= 0 &&
173 ipv6_addr_cmp(&t->src.u3.in6, &range->max_addr.in6) <= 0;
176 /* Is the manipable part of the tuple between min and max incl? */
177 static bool l4proto_in_range(const struct nf_conntrack_tuple *tuple,
178 enum nf_nat_manip_type maniptype,
179 const union nf_conntrack_man_proto *min,
180 const union nf_conntrack_man_proto *max)
184 switch (tuple->dst.protonum) {
185 case IPPROTO_ICMP: /* fallthrough */
187 return ntohs(tuple->src.u.icmp.id) >= ntohs(min->icmp.id) &&
188 ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id);
189 case IPPROTO_GRE: /* all fall though */
192 case IPPROTO_UDPLITE:
195 if (maniptype == NF_NAT_MANIP_SRC)
196 port = tuple->src.u.all;
198 port = tuple->dst.u.all;
200 return ntohs(port) >= ntohs(min->all) &&
201 ntohs(port) <= ntohs(max->all);
207 /* If we source map this tuple so reply looks like reply_tuple, will
208 * that meet the constraints of range.
210 static int in_range(const struct nf_conntrack_tuple *tuple,
211 const struct nf_nat_range2 *range)
213 /* If we are supposed to map IPs, then we must be in the
214 * range specified, otherwise let this drag us onto a new src IP.
216 if (range->flags & NF_NAT_RANGE_MAP_IPS &&
217 !nf_nat_inet_in_range(tuple, range))
220 if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED))
223 return l4proto_in_range(tuple, NF_NAT_MANIP_SRC,
224 &range->min_proto, &range->max_proto);
228 same_src(const struct nf_conn *ct,
229 const struct nf_conntrack_tuple *tuple)
231 const struct nf_conntrack_tuple *t;
233 t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
234 return (t->dst.protonum == tuple->dst.protonum &&
235 nf_inet_addr_cmp(&t->src.u3, &tuple->src.u3) &&
236 t->src.u.all == tuple->src.u.all);
239 /* Only called for SRC manip */
241 find_appropriate_src(struct net *net,
242 const struct nf_conntrack_zone *zone,
243 const struct nf_conntrack_tuple *tuple,
244 struct nf_conntrack_tuple *result,
245 const struct nf_nat_range2 *range)
247 unsigned int h = hash_by_src(net, tuple);
248 const struct nf_conn *ct;
250 hlist_for_each_entry_rcu(ct, &nf_nat_bysource[h], nat_bysource) {
251 if (same_src(ct, tuple) &&
252 net_eq(net, nf_ct_net(ct)) &&
253 nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) {
254 /* Copy source part from reply tuple. */
255 nf_ct_invert_tuplepr(result,
256 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
257 result->dst = tuple->dst;
259 if (in_range(result, range))
266 /* For [FUTURE] fragmentation handling, we want the least-used
267 * src-ip/dst-ip/proto triple. Fairness doesn't come into it. Thus
268 * if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports
269 * 1-65535, we don't do pro-rata allocation based on ports; we choose
270 * the ip with the lowest src-ip/dst-ip/proto usage.
273 find_best_ips_proto(const struct nf_conntrack_zone *zone,
274 struct nf_conntrack_tuple *tuple,
275 const struct nf_nat_range2 *range,
276 const struct nf_conn *ct,
277 enum nf_nat_manip_type maniptype)
279 union nf_inet_addr *var_ipp;
282 u32 minip, maxip, j, dist;
285 /* No IP mapping? Do nothing. */
286 if (!(range->flags & NF_NAT_RANGE_MAP_IPS))
289 if (maniptype == NF_NAT_MANIP_SRC)
290 var_ipp = &tuple->src.u3;
292 var_ipp = &tuple->dst.u3;
294 /* Fast path: only one choice. */
295 if (nf_inet_addr_cmp(&range->min_addr, &range->max_addr)) {
296 *var_ipp = range->min_addr;
300 if (nf_ct_l3num(ct) == NFPROTO_IPV4)
301 max = sizeof(var_ipp->ip) / sizeof(u32) - 1;
303 max = sizeof(var_ipp->ip6) / sizeof(u32) - 1;
305 /* Hashing source and destination IPs gives a fairly even
306 * spread in practice (if there are a small number of IPs
307 * involved, there usually aren't that many connections
308 * anyway). The consistency means that servers see the same
309 * client coming from the same IP (some Internet Banking sites
310 * like this), even across reboots.
312 j = jhash2((u32 *)&tuple->src.u3, sizeof(tuple->src.u3) / sizeof(u32),
313 range->flags & NF_NAT_RANGE_PERSISTENT ?
314 0 : (__force u32)tuple->dst.u3.all[max] ^ zone->id);
317 for (i = 0; i <= max; i++) {
318 /* If first bytes of the address are at the maximum, use the
319 * distance. Otherwise use the full range.
322 minip = ntohl((__force __be32)range->min_addr.all[i]);
323 maxip = ntohl((__force __be32)range->max_addr.all[i]);
324 dist = maxip - minip + 1;
330 var_ipp->all[i] = (__force __u32)
331 htonl(minip + reciprocal_scale(j, dist));
332 if (var_ipp->all[i] != range->max_addr.all[i])
335 if (!(range->flags & NF_NAT_RANGE_PERSISTENT))
336 j ^= (__force u32)tuple->dst.u3.all[i];
340 /* Alter the per-proto part of the tuple (depending on maniptype), to
341 * give a unique tuple in the given range if possible.
343 * Per-protocol part of tuple is initialized to the incoming packet.
345 static void nf_nat_l4proto_unique_tuple(struct nf_conntrack_tuple *tuple,
346 const struct nf_nat_range2 *range,
347 enum nf_nat_manip_type maniptype,
348 const struct nf_conn *ct)
350 unsigned int range_size, min, max, i, attempts;
353 static const unsigned int max_attempts = 128;
355 switch (tuple->dst.protonum) {
356 case IPPROTO_ICMP: /* fallthrough */
358 /* id is same for either direction... */
359 keyptr = &tuple->src.u.icmp.id;
360 min = range->min_proto.icmp.id;
361 range_size = ntohs(range->max_proto.icmp.id) -
362 ntohs(range->min_proto.icmp.id) + 1;
364 #if IS_ENABLED(CONFIG_NF_CT_PROTO_GRE)
366 /* If there is no master conntrack we are not PPTP,
367 do not change tuples */
371 if (maniptype == NF_NAT_MANIP_SRC)
372 keyptr = &tuple->src.u.gre.key;
374 keyptr = &tuple->dst.u.gre.key;
376 if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
380 min = ntohs(range->min_proto.gre.key);
381 range_size = ntohs(range->max_proto.gre.key) - min + 1;
385 case IPPROTO_UDP: /* fallthrough */
386 case IPPROTO_UDPLITE: /* fallthrough */
387 case IPPROTO_TCP: /* fallthrough */
388 case IPPROTO_SCTP: /* fallthrough */
389 case IPPROTO_DCCP: /* fallthrough */
390 if (maniptype == NF_NAT_MANIP_SRC)
391 keyptr = &tuple->src.u.all;
393 keyptr = &tuple->dst.u.all;
400 /* If no range specified... */
401 if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
402 /* If it's dst rewrite, can't change port */
403 if (maniptype == NF_NAT_MANIP_DST)
406 if (ntohs(*keyptr) < 1024) {
407 /* Loose convention: >> 512 is credential passing */
408 if (ntohs(*keyptr) < 512) {
410 range_size = 511 - min + 1;
413 range_size = 1023 - min + 1;
417 range_size = 65535 - 1024 + 1;
420 min = ntohs(range->min_proto.all);
421 max = ntohs(range->max_proto.all);
422 if (unlikely(max < min))
424 range_size = max - min + 1;
428 if (range->flags & NF_NAT_RANGE_PROTO_OFFSET)
429 off = (ntohs(*keyptr) - ntohs(range->base_proto.all));
433 attempts = range_size;
434 if (attempts > max_attempts)
435 attempts = max_attempts;
437 /* We are in softirq; doing a search of the entire range risks
438 * soft lockup when all tuples are already used.
440 * If we can't find any free port from first offset, pick a new
441 * one and try again, with ever smaller search window.
444 for (i = 0; i < attempts; i++, off++) {
445 *keyptr = htons(min + off % range_size);
446 if (!nf_nat_used_tuple(tuple, ct))
450 if (attempts >= range_size || attempts < 16)
457 /* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING,
458 * we change the source to map into the range. For NF_INET_PRE_ROUTING
459 * and NF_INET_LOCAL_OUT, we change the destination to map into the
460 * range. It might not be possible to get a unique tuple, but we try.
461 * At worst (or if we race), we will end up with a final duplicate in
462 * __ip_conntrack_confirm and drop the packet. */
464 get_unique_tuple(struct nf_conntrack_tuple *tuple,
465 const struct nf_conntrack_tuple *orig_tuple,
466 const struct nf_nat_range2 *range,
468 enum nf_nat_manip_type maniptype)
470 const struct nf_conntrack_zone *zone;
471 struct net *net = nf_ct_net(ct);
473 zone = nf_ct_zone(ct);
475 /* 1) If this srcip/proto/src-proto-part is currently mapped,
476 * and that same mapping gives a unique tuple within the given
479 * This is only required for source (ie. NAT/masq) mappings.
480 * So far, we don't do local source mappings, so multiple
481 * manips not an issue.
483 if (maniptype == NF_NAT_MANIP_SRC &&
484 !(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
485 /* try the original tuple first */
486 if (in_range(orig_tuple, range)) {
487 if (!nf_nat_used_tuple(orig_tuple, ct)) {
488 *tuple = *orig_tuple;
491 } else if (find_appropriate_src(net, zone,
492 orig_tuple, tuple, range)) {
493 pr_debug("get_unique_tuple: Found current src map\n");
494 if (!nf_nat_used_tuple(tuple, ct))
499 /* 2) Select the least-used IP/proto combination in the given range */
500 *tuple = *orig_tuple;
501 find_best_ips_proto(zone, tuple, range, ct, maniptype);
503 /* 3) The per-protocol part of the manip is made to map into
504 * the range to make a unique tuple.
507 /* Only bother mapping if it's not already in range and unique */
508 if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
509 if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
510 if (!(range->flags & NF_NAT_RANGE_PROTO_OFFSET) &&
511 l4proto_in_range(tuple, maniptype,
513 &range->max_proto) &&
514 (range->min_proto.all == range->max_proto.all ||
515 !nf_nat_used_tuple(tuple, ct)))
517 } else if (!nf_nat_used_tuple(tuple, ct)) {
522 /* Last chance: get protocol to try to obtain unique tuple. */
523 nf_nat_l4proto_unique_tuple(tuple, range, maniptype, ct);
526 struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct)
528 struct nf_conn_nat *nat = nfct_nat(ct);
532 if (!nf_ct_is_confirmed(ct))
533 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
537 EXPORT_SYMBOL_GPL(nf_ct_nat_ext_add);
540 nf_nat_setup_info(struct nf_conn *ct,
541 const struct nf_nat_range2 *range,
542 enum nf_nat_manip_type maniptype)
544 struct net *net = nf_ct_net(ct);
545 struct nf_conntrack_tuple curr_tuple, new_tuple;
547 /* Can't setup nat info for confirmed ct. */
548 if (nf_ct_is_confirmed(ct))
551 WARN_ON(maniptype != NF_NAT_MANIP_SRC &&
552 maniptype != NF_NAT_MANIP_DST);
554 if (WARN_ON(nf_nat_initialized(ct, maniptype)))
557 /* What we've got will look like inverse of reply. Normally
558 * this is what is in the conntrack, except for prior
559 * manipulations (future optimization: if num_manips == 0,
560 * orig_tp = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
562 nf_ct_invert_tuplepr(&curr_tuple,
563 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
565 get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype);
567 if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) {
568 struct nf_conntrack_tuple reply;
570 /* Alter conntrack table so will recognize replies. */
571 nf_ct_invert_tuplepr(&reply, &new_tuple);
572 nf_conntrack_alter_reply(ct, &reply);
574 /* Non-atomic: we own this at the moment. */
575 if (maniptype == NF_NAT_MANIP_SRC)
576 ct->status |= IPS_SRC_NAT;
578 ct->status |= IPS_DST_NAT;
580 if (nfct_help(ct) && !nfct_seqadj(ct))
581 if (!nfct_seqadj_ext_add(ct))
585 if (maniptype == NF_NAT_MANIP_SRC) {
586 unsigned int srchash;
589 srchash = hash_by_src(net,
590 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
591 lock = &nf_nat_locks[srchash % CONNTRACK_LOCKS];
593 hlist_add_head_rcu(&ct->nat_bysource,
594 &nf_nat_bysource[srchash]);
595 spin_unlock_bh(lock);
599 if (maniptype == NF_NAT_MANIP_DST)
600 ct->status |= IPS_DST_NAT_DONE;
602 ct->status |= IPS_SRC_NAT_DONE;
606 EXPORT_SYMBOL(nf_nat_setup_info);
609 __nf_nat_alloc_null_binding(struct nf_conn *ct, enum nf_nat_manip_type manip)
611 /* Force range to this IP; let proto decide mapping for
612 * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
613 * Use reply in case it's already been mangled (eg local packet).
615 union nf_inet_addr ip =
616 (manip == NF_NAT_MANIP_SRC ?
617 ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 :
618 ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3);
619 struct nf_nat_range2 range = {
620 .flags = NF_NAT_RANGE_MAP_IPS,
624 return nf_nat_setup_info(ct, &range, manip);
628 nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
630 return __nf_nat_alloc_null_binding(ct, HOOK2MANIP(hooknum));
632 EXPORT_SYMBOL_GPL(nf_nat_alloc_null_binding);
634 static unsigned int nf_nat_manip_pkt(struct sk_buff *skb, struct nf_conn *ct,
635 enum nf_nat_manip_type mtype,
636 enum ip_conntrack_dir dir)
638 const struct nf_nat_l3proto *l3proto;
639 struct nf_conntrack_tuple target;
641 /* We are aiming to look like inverse of other direction. */
642 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
644 l3proto = __nf_nat_l3proto_find(target.src.l3num);
645 if (!l3proto->manip_pkt(skb, 0, &target, mtype))
651 /* Do packet manipulations according to nf_nat_setup_info. */
652 unsigned int nf_nat_packet(struct nf_conn *ct,
653 enum ip_conntrack_info ctinfo,
654 unsigned int hooknum,
657 enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum);
658 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
659 unsigned int verdict = NF_ACCEPT;
660 unsigned long statusbit;
662 if (mtype == NF_NAT_MANIP_SRC)
663 statusbit = IPS_SRC_NAT;
665 statusbit = IPS_DST_NAT;
667 /* Invert if this is reply dir. */
668 if (dir == IP_CT_DIR_REPLY)
669 statusbit ^= IPS_NAT_MASK;
671 /* Non-atomic: these bits don't change. */
672 if (ct->status & statusbit)
673 verdict = nf_nat_manip_pkt(skb, ct, mtype, dir);
677 EXPORT_SYMBOL_GPL(nf_nat_packet);
680 nf_nat_inet_fn(void *priv, struct sk_buff *skb,
681 const struct nf_hook_state *state)
684 enum ip_conntrack_info ctinfo;
685 struct nf_conn_nat *nat;
686 /* maniptype == SRC for postrouting. */
687 enum nf_nat_manip_type maniptype = HOOK2MANIP(state->hook);
689 ct = nf_ct_get(skb, &ctinfo);
690 /* Can't track? It's not due to stress, or conntrack would
691 * have dropped it. Hence it's the user's responsibilty to
692 * packet filter it out, or implement conntrack/NAT for that
702 case IP_CT_RELATED_REPLY:
703 /* Only ICMPs can be IP_CT_IS_REPLY. Fallthrough */
705 /* Seen it before? This can happen for loopback, retrans,
708 if (!nf_nat_initialized(ct, maniptype)) {
709 struct nf_nat_lookup_hook_priv *lpriv = priv;
710 struct nf_hook_entries *e = rcu_dereference(lpriv->entries);
717 for (i = 0; i < e->num_hook_entries; i++) {
718 ret = e->hooks[i].hook(e->hooks[i].priv, skb,
720 if (ret != NF_ACCEPT)
722 if (nf_nat_initialized(ct, maniptype))
726 ret = nf_nat_alloc_null_binding(ct, state->hook);
727 if (ret != NF_ACCEPT)
730 pr_debug("Already setup manip %s for ct %p (status bits 0x%lx)\n",
731 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
733 if (nf_nat_oif_changed(state->hook, ctinfo, nat,
740 WARN_ON(ctinfo != IP_CT_ESTABLISHED &&
741 ctinfo != IP_CT_ESTABLISHED_REPLY);
742 if (nf_nat_oif_changed(state->hook, ctinfo, nat, state->out))
746 return nf_nat_packet(ct, ctinfo, state->hook, skb);
749 nf_ct_kill_acct(ct, ctinfo, skb);
752 EXPORT_SYMBOL_GPL(nf_nat_inet_fn);
754 struct nf_nat_proto_clean {
759 /* kill conntracks with affected NAT section */
760 static int nf_nat_proto_remove(struct nf_conn *i, void *data)
762 const struct nf_nat_proto_clean *clean = data;
764 if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) ||
765 (clean->l4proto && nf_ct_protonum(i) != clean->l4proto))
768 return i->status & IPS_NAT_MASK ? 1 : 0;
771 static void __nf_nat_cleanup_conntrack(struct nf_conn *ct)
775 h = hash_by_src(nf_ct_net(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
776 spin_lock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
777 hlist_del_rcu(&ct->nat_bysource);
778 spin_unlock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
781 static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
783 if (nf_nat_proto_remove(ct, data))
786 /* This module is being removed and conntrack has nat null binding.
787 * Remove it from bysource hash, as the table will be freed soon.
789 * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
790 * will delete entry from already-freed table.
792 if (test_and_clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status))
793 __nf_nat_cleanup_conntrack(ct);
795 /* don't delete conntrack. Although that would make things a lot
796 * simpler, we'd end up flushing all conntracks on nat rmmod.
801 static void nf_nat_l3proto_clean(u8 l3proto)
803 struct nf_nat_proto_clean clean = {
807 nf_ct_iterate_destroy(nf_nat_proto_remove, &clean);
810 int nf_nat_l3proto_register(const struct nf_nat_l3proto *l3proto)
812 RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], l3proto);
815 EXPORT_SYMBOL_GPL(nf_nat_l3proto_register);
817 void nf_nat_l3proto_unregister(const struct nf_nat_l3proto *l3proto)
819 mutex_lock(&nf_nat_proto_mutex);
820 RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], NULL);
821 mutex_unlock(&nf_nat_proto_mutex);
824 nf_nat_l3proto_clean(l3proto->l3proto);
826 EXPORT_SYMBOL_GPL(nf_nat_l3proto_unregister);
828 /* No one using conntrack by the time this called. */
829 static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
831 if (ct->status & IPS_SRC_NAT_DONE)
832 __nf_nat_cleanup_conntrack(ct);
835 static struct nf_ct_ext_type nat_extend __read_mostly = {
836 .len = sizeof(struct nf_conn_nat),
837 .align = __alignof__(struct nf_conn_nat),
838 .destroy = nf_nat_cleanup_conntrack,
842 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
844 #include <linux/netfilter/nfnetlink.h>
845 #include <linux/netfilter/nfnetlink_conntrack.h>
847 static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
848 [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 },
849 [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 },
852 static int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
853 struct nf_nat_range2 *range)
855 if (tb[CTA_PROTONAT_PORT_MIN]) {
856 range->min_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]);
857 range->max_proto.all = range->min_proto.all;
858 range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
860 if (tb[CTA_PROTONAT_PORT_MAX]) {
861 range->max_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MAX]);
862 range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
867 static int nfnetlink_parse_nat_proto(struct nlattr *attr,
868 const struct nf_conn *ct,
869 struct nf_nat_range2 *range)
871 struct nlattr *tb[CTA_PROTONAT_MAX+1];
874 err = nla_parse_nested(tb, CTA_PROTONAT_MAX, attr,
875 protonat_nla_policy, NULL);
879 return nf_nat_l4proto_nlattr_to_range(tb, range);
882 static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = {
883 [CTA_NAT_V4_MINIP] = { .type = NLA_U32 },
884 [CTA_NAT_V4_MAXIP] = { .type = NLA_U32 },
885 [CTA_NAT_V6_MINIP] = { .len = sizeof(struct in6_addr) },
886 [CTA_NAT_V6_MAXIP] = { .len = sizeof(struct in6_addr) },
887 [CTA_NAT_PROTO] = { .type = NLA_NESTED },
891 nfnetlink_parse_nat(const struct nlattr *nat,
892 const struct nf_conn *ct, struct nf_nat_range2 *range,
893 const struct nf_nat_l3proto *l3proto)
895 struct nlattr *tb[CTA_NAT_MAX+1];
898 memset(range, 0, sizeof(*range));
900 err = nla_parse_nested(tb, CTA_NAT_MAX, nat, nat_nla_policy, NULL);
904 err = l3proto->nlattr_to_range(tb, range);
908 if (!tb[CTA_NAT_PROTO])
911 return nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range);
914 /* This function is called under rcu_read_lock() */
916 nfnetlink_parse_nat_setup(struct nf_conn *ct,
917 enum nf_nat_manip_type manip,
918 const struct nlattr *attr)
920 struct nf_nat_range2 range;
921 const struct nf_nat_l3proto *l3proto;
924 /* Should not happen, restricted to creating new conntracks
927 if (WARN_ON_ONCE(nf_nat_initialized(ct, manip)))
930 /* Make sure that L3 NAT is there by when we call nf_nat_setup_info to
931 * attach the null binding, otherwise this may oops.
933 l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct));
937 /* No NAT information has been passed, allocate the null-binding */
939 return __nf_nat_alloc_null_binding(ct, manip) == NF_DROP ? -ENOMEM : 0;
941 err = nfnetlink_parse_nat(attr, ct, &range, l3proto);
945 return nf_nat_setup_info(ct, &range, manip) == NF_DROP ? -ENOMEM : 0;
949 nfnetlink_parse_nat_setup(struct nf_conn *ct,
950 enum nf_nat_manip_type manip,
951 const struct nlattr *attr)
957 static struct nf_ct_helper_expectfn follow_master_nat = {
958 .name = "nat-follow-master",
959 .expectfn = nf_nat_follow_master,
962 int nf_nat_register_fn(struct net *net, const struct nf_hook_ops *ops,
963 const struct nf_hook_ops *orig_nat_ops, unsigned int ops_count)
965 struct nat_net *nat_net = net_generic(net, nat_net_id);
966 struct nf_nat_hooks_net *nat_proto_net;
967 struct nf_nat_lookup_hook_priv *priv;
968 unsigned int hooknum = ops->hooknum;
969 struct nf_hook_ops *nat_ops;
972 if (WARN_ON_ONCE(ops->pf >= ARRAY_SIZE(nat_net->nat_proto_net)))
975 nat_proto_net = &nat_net->nat_proto_net[ops->pf];
977 for (i = 0; i < ops_count; i++) {
978 if (WARN_ON(orig_nat_ops[i].pf != ops->pf))
980 if (orig_nat_ops[i].hooknum == hooknum) {
986 if (WARN_ON_ONCE(i == ops_count))
989 mutex_lock(&nf_nat_proto_mutex);
990 if (!nat_proto_net->nat_hook_ops) {
991 WARN_ON(nat_proto_net->users != 0);
993 nat_ops = kmemdup(orig_nat_ops, sizeof(*orig_nat_ops) * ops_count, GFP_KERNEL);
995 mutex_unlock(&nf_nat_proto_mutex);
999 for (i = 0; i < ops_count; i++) {
1000 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1002 nat_ops[i].priv = priv;
1005 mutex_unlock(&nf_nat_proto_mutex);
1007 kfree(nat_ops[--i].priv);
1012 ret = nf_register_net_hooks(net, nat_ops, ops_count);
1014 mutex_unlock(&nf_nat_proto_mutex);
1015 for (i = 0; i < ops_count; i++)
1016 kfree(nat_ops[i].priv);
1021 nat_proto_net->nat_hook_ops = nat_ops;
1024 nat_ops = nat_proto_net->nat_hook_ops;
1025 priv = nat_ops[hooknum].priv;
1026 if (WARN_ON_ONCE(!priv)) {
1027 mutex_unlock(&nf_nat_proto_mutex);
1031 ret = nf_hook_entries_insert_raw(&priv->entries, ops);
1033 nat_proto_net->users++;
1035 mutex_unlock(&nf_nat_proto_mutex);
1038 EXPORT_SYMBOL_GPL(nf_nat_register_fn);
1040 void nf_nat_unregister_fn(struct net *net, const struct nf_hook_ops *ops,
1041 unsigned int ops_count)
1043 struct nat_net *nat_net = net_generic(net, nat_net_id);
1044 struct nf_nat_hooks_net *nat_proto_net;
1045 struct nf_nat_lookup_hook_priv *priv;
1046 struct nf_hook_ops *nat_ops;
1047 int hooknum = ops->hooknum;
1050 if (ops->pf >= ARRAY_SIZE(nat_net->nat_proto_net))
1053 nat_proto_net = &nat_net->nat_proto_net[ops->pf];
1055 mutex_lock(&nf_nat_proto_mutex);
1056 if (WARN_ON(nat_proto_net->users == 0))
1059 nat_proto_net->users--;
1061 nat_ops = nat_proto_net->nat_hook_ops;
1062 for (i = 0; i < ops_count; i++) {
1063 if (nat_ops[i].hooknum == hooknum) {
1068 if (WARN_ON_ONCE(i == ops_count))
1070 priv = nat_ops[hooknum].priv;
1071 nf_hook_entries_delete_raw(&priv->entries, ops);
1073 if (nat_proto_net->users == 0) {
1074 nf_unregister_net_hooks(net, nat_ops, ops_count);
1076 for (i = 0; i < ops_count; i++) {
1077 priv = nat_ops[i].priv;
1078 kfree_rcu(priv, rcu_head);
1081 nat_proto_net->nat_hook_ops = NULL;
1085 mutex_unlock(&nf_nat_proto_mutex);
1087 EXPORT_SYMBOL_GPL(nf_nat_unregister_fn);
1089 static struct pernet_operations nat_net_ops = {
1091 .size = sizeof(struct nat_net),
1094 static struct nf_nat_hook nat_hook = {
1095 .parse_nat_setup = nfnetlink_parse_nat_setup,
1097 .decode_session = __nf_nat_decode_session,
1099 .manip_pkt = nf_nat_manip_pkt,
1102 static int __init nf_nat_init(void)
1106 /* Leave them the same for the moment. */
1107 nf_nat_htable_size = nf_conntrack_htable_size;
1108 if (nf_nat_htable_size < CONNTRACK_LOCKS)
1109 nf_nat_htable_size = CONNTRACK_LOCKS;
1111 nf_nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 0);
1112 if (!nf_nat_bysource)
1115 ret = nf_ct_extend_register(&nat_extend);
1117 kvfree(nf_nat_bysource);
1118 pr_err("Unable to register extension\n");
1122 for (i = 0; i < CONNTRACK_LOCKS; i++)
1123 spin_lock_init(&nf_nat_locks[i]);
1125 ret = register_pernet_subsys(&nat_net_ops);
1127 nf_ct_extend_unregister(&nat_extend);
1131 nf_ct_helper_expectfn_register(&follow_master_nat);
1133 WARN_ON(nf_nat_hook != NULL);
1134 RCU_INIT_POINTER(nf_nat_hook, &nat_hook);
1139 static void __exit nf_nat_cleanup(void)
1141 struct nf_nat_proto_clean clean = {};
1143 nf_ct_iterate_destroy(nf_nat_proto_clean, &clean);
1145 nf_ct_extend_unregister(&nat_extend);
1146 nf_ct_helper_expectfn_unregister(&follow_master_nat);
1147 RCU_INIT_POINTER(nf_nat_hook, NULL);
1150 kvfree(nf_nat_bysource);
1151 unregister_pernet_subsys(&nat_net_ops);
1154 MODULE_LICENSE("GPL");
1156 module_init(nf_nat_init);
1157 module_exit(nf_nat_cleanup);