e1000e: start network tx queue only when link is up
[linux-2.6-block.git] / net / netfilter / nf_conntrack_core.c
CommitLineData
9fb9cbb1
YK
1/* Connection state tracking for netfilter. This is separated from,
2 but required by, the NAT layer; it can also be used by an iptables
3 extension. */
4
5/* (C) 1999-2001 Paul `Rusty' Russell
dc808fe2 6 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
9fb9cbb1 7 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
f229f6ce 8 * (C) 2005-2012 Patrick McHardy <kaber@trash.net>
9fb9cbb1
YK
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
9fb9cbb1
YK
13 */
14
ccd63c20
WJ
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
9fb9cbb1
YK
17#include <linux/types.h>
18#include <linux/netfilter.h>
19#include <linux/module.h>
d43c36dc 20#include <linux/sched.h>
9fb9cbb1
YK
21#include <linux/skbuff.h>
22#include <linux/proc_fs.h>
23#include <linux/vmalloc.h>
24#include <linux/stddef.h>
25#include <linux/slab.h>
26#include <linux/random.h>
27#include <linux/jhash.h>
3c791076 28#include <linux/siphash.h>
9fb9cbb1
YK
29#include <linux/err.h>
30#include <linux/percpu.h>
31#include <linux/moduleparam.h>
32#include <linux/notifier.h>
33#include <linux/kernel.h>
34#include <linux/netdevice.h>
35#include <linux/socket.h>
d7fe0f24 36#include <linux/mm.h>
d696c7bd 37#include <linux/nsproxy.h>
ea781f19 38#include <linux/rculist_nulls.h>
9fb9cbb1 39
9fb9cbb1 40#include <net/netfilter/nf_conntrack.h>
605dcad6 41#include <net/netfilter/nf_conntrack_l4proto.h>
77ab9cff 42#include <net/netfilter/nf_conntrack_expect.h>
9fb9cbb1 43#include <net/netfilter/nf_conntrack_helper.h>
41d73ec0 44#include <net/netfilter/nf_conntrack_seqadj.h>
9fb9cbb1 45#include <net/netfilter/nf_conntrack_core.h>
ecfab2c9 46#include <net/netfilter/nf_conntrack_extend.h>
58401572 47#include <net/netfilter/nf_conntrack_acct.h>
a0891aa6 48#include <net/netfilter/nf_conntrack_ecache.h>
5d0aa2cc 49#include <net/netfilter/nf_conntrack_zones.h>
a992ca2a 50#include <net/netfilter/nf_conntrack_timestamp.h>
dd705072 51#include <net/netfilter/nf_conntrack_timeout.h>
c539f017 52#include <net/netfilter/nf_conntrack_labels.h>
48b1de4c 53#include <net/netfilter/nf_conntrack_synproxy.h>
e6a7d3c0 54#include <net/netfilter/nf_nat.h>
49376368 55#include <net/netfilter/nf_nat_helper.h>
1b8c8a9f 56#include <net/netns/hash.h>
6816d931 57#include <net/ip.h>
9fb9cbb1 58
e2a75007
FW
59#include "nf_internals.h"
60
93bb0ceb
JDB
61__cacheline_aligned_in_smp spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
62EXPORT_SYMBOL_GPL(nf_conntrack_locks);
9fb9cbb1 63
ca7433df
JDB
64__cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock);
65EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);
66
56d52d48
FW
67struct hlist_nulls_head *nf_conntrack_hash __read_mostly;
68EXPORT_SYMBOL_GPL(nf_conntrack_hash);
69
b87a2f91
FW
70struct conntrack_gc_work {
71 struct delayed_work dwork;
72 u32 last_bucket;
73 bool exiting;
c6dd940b 74 bool early_drop;
e0df8cae 75 long next_gc_run;
b87a2f91
FW
76};
77
0c5366b3 78static __read_mostly struct kmem_cache *nf_conntrack_cachep;
b16c2919 79static __read_mostly spinlock_t nf_conntrack_locks_all_lock;
70d72b7e 80static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
b16c2919
SL
81static __read_mostly bool nf_conntrack_locks_all;
82
e0df8cae 83/* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */
e5072053
FW
84#define GC_MAX_BUCKETS_DIV 128u
85/* upper bound of full table scan */
86#define GC_MAX_SCAN_JIFFIES (16u * HZ)
87/* desired ratio of entries found to be expired */
88#define GC_EVICT_RATIO 50u
b87a2f91
FW
89
90static struct conntrack_gc_work conntrack_gc_work;
91
b16c2919
SL
92void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
93{
3ef0c7a7 94 /* 1) Acquire the lock */
b16c2919 95 spin_lock(lock);
b316ff78 96
3ef0c7a7
MS
97 /* 2) read nf_conntrack_locks_all, with ACQUIRE semantics
98 * It pairs with the smp_store_release() in nf_conntrack_all_unlock()
99 */
100 if (likely(smp_load_acquire(&nf_conntrack_locks_all) == false))
101 return;
102
103 /* fast path failed, unlock */
104 spin_unlock(lock);
105
106 /* Slow path 1) get global lock */
107 spin_lock(&nf_conntrack_locks_all_lock);
108
109 /* Slow path 2) get the lock we want */
110 spin_lock(lock);
111
112 /* Slow path 3) release the global lock */
113 spin_unlock(&nf_conntrack_locks_all_lock);
b16c2919
SL
114}
115EXPORT_SYMBOL_GPL(nf_conntrack_lock);
116
93bb0ceb
JDB
117static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2)
118{
119 h1 %= CONNTRACK_LOCKS;
120 h2 %= CONNTRACK_LOCKS;
121 spin_unlock(&nf_conntrack_locks[h1]);
122 if (h1 != h2)
123 spin_unlock(&nf_conntrack_locks[h2]);
124}
125
126/* return true if we need to recompute hashes (in case hash table was resized) */
127static bool nf_conntrack_double_lock(struct net *net, unsigned int h1,
128 unsigned int h2, unsigned int sequence)
129{
130 h1 %= CONNTRACK_LOCKS;
131 h2 %= CONNTRACK_LOCKS;
132 if (h1 <= h2) {
b16c2919 133 nf_conntrack_lock(&nf_conntrack_locks[h1]);
93bb0ceb
JDB
134 if (h1 != h2)
135 spin_lock_nested(&nf_conntrack_locks[h2],
136 SINGLE_DEPTH_NESTING);
137 } else {
b16c2919 138 nf_conntrack_lock(&nf_conntrack_locks[h2]);
93bb0ceb
JDB
139 spin_lock_nested(&nf_conntrack_locks[h1],
140 SINGLE_DEPTH_NESTING);
141 }
a3efd812 142 if (read_seqcount_retry(&nf_conntrack_generation, sequence)) {
93bb0ceb
JDB
143 nf_conntrack_double_unlock(h1, h2);
144 return true;
145 }
146 return false;
147}
148
149static void nf_conntrack_all_lock(void)
150{
151 int i;
152
b16c2919 153 spin_lock(&nf_conntrack_locks_all_lock);
b16c2919 154
3ef0c7a7 155 nf_conntrack_locks_all = true;
b316ff78 156
b16c2919 157 for (i = 0; i < CONNTRACK_LOCKS; i++) {
3ef0c7a7
MS
158 spin_lock(&nf_conntrack_locks[i]);
159
160 /* This spin_unlock provides the "release" to ensure that
161 * nf_conntrack_locks_all==true is visible to everyone that
162 * acquired spin_lock(&nf_conntrack_locks[]).
163 */
164 spin_unlock(&nf_conntrack_locks[i]);
b16c2919 165 }
93bb0ceb
JDB
166}
167
168static void nf_conntrack_all_unlock(void)
169{
3ef0c7a7 170 /* All prior stores must be complete before we clear
b316ff78
PZ
171 * 'nf_conntrack_locks_all'. Otherwise nf_conntrack_lock()
172 * might observe the false value but not the entire
3ef0c7a7
MS
173 * critical section.
174 * It pairs with the smp_load_acquire() in nf_conntrack_lock()
b316ff78
PZ
175 */
176 smp_store_release(&nf_conntrack_locks_all, false);
b16c2919 177 spin_unlock(&nf_conntrack_locks_all_lock);
93bb0ceb
JDB
178}
179
e2b7606c 180unsigned int nf_conntrack_htable_size __read_mostly;
2567c4ea
PNA
181EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
182
e478075c 183unsigned int nf_conntrack_max __read_mostly;
538c5672 184EXPORT_SYMBOL_GPL(nf_conntrack_max);
92e47ba8 185seqcount_t nf_conntrack_generation __read_mostly;
141658fb 186static unsigned int nf_conntrack_hash_rnd __read_mostly;
9fb9cbb1 187
1b8c8a9f
FW
188static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
189 const struct net *net)
9fb9cbb1 190{
0794935e 191 unsigned int n;
1b8c8a9f 192 u32 seed;
0794935e 193
141658fb
FW
194 get_random_once(&nf_conntrack_hash_rnd, sizeof(nf_conntrack_hash_rnd));
195
0794935e
PM
196 /* The direction must be ignored, so we hash everything up to the
197 * destination ports (which is a multiple of 4) and treat the last
198 * three bytes manually.
199 */
1b8c8a9f 200 seed = nf_conntrack_hash_rnd ^ net_hash_mix(net);
0794935e 201 n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
1b8c8a9f 202 return jhash2((u32 *)tuple, n, seed ^
99f07e91
CG
203 (((__force __u16)tuple->dst.u.all << 16) |
204 tuple->dst.protonum));
205}
206
56d52d48 207static u32 scale_hash(u32 hash)
99f07e91 208{
56d52d48 209 return reciprocal_scale(hash, nf_conntrack_htable_size);
99f07e91 210}
0794935e 211
1b8c8a9f
FW
212static u32 __hash_conntrack(const struct net *net,
213 const struct nf_conntrack_tuple *tuple,
214 unsigned int size)
99f07e91 215{
1b8c8a9f 216 return reciprocal_scale(hash_conntrack_raw(tuple, net), size);
9fb9cbb1
YK
217}
218
1b8c8a9f
FW
219static u32 hash_conntrack(const struct net *net,
220 const struct nf_conntrack_tuple *tuple)
9fb9cbb1 221{
56d52d48 222 return scale_hash(hash_conntrack_raw(tuple, net));
9fb9cbb1
YK
223}
224
e2f7cc72
FW
225static bool nf_ct_get_tuple_ports(const struct sk_buff *skb,
226 unsigned int dataoff,
227 struct nf_conntrack_tuple *tuple)
228{ struct {
229 __be16 sport;
230 __be16 dport;
231 } _inet_hdr, *inet_hdr;
232
233 /* Actually only need first 4 bytes to get ports. */
234 inet_hdr = skb_header_pointer(skb, dataoff, sizeof(_inet_hdr), &_inet_hdr);
235 if (!inet_hdr)
236 return false;
237
238 tuple->src.u.udp.port = inet_hdr->sport;
239 tuple->dst.u.udp.port = inet_hdr->dport;
240 return true;
241}
242
60e3be94 243static bool
9fb9cbb1
YK
244nf_ct_get_tuple(const struct sk_buff *skb,
245 unsigned int nhoff,
246 unsigned int dataoff,
247 u_int16_t l3num,
248 u_int8_t protonum,
a31f1adc 249 struct net *net,
303e0c55 250 struct nf_conntrack_tuple *tuple)
9fb9cbb1 251{
47a91b14
FW
252 unsigned int size;
253 const __be32 *ap;
254 __be32 _addrs[8];
255
443a70d5 256 memset(tuple, 0, sizeof(*tuple));
9fb9cbb1
YK
257
258 tuple->src.l3num = l3num;
47a91b14
FW
259 switch (l3num) {
260 case NFPROTO_IPV4:
261 nhoff += offsetof(struct iphdr, saddr);
262 size = 2 * sizeof(__be32);
263 break;
264 case NFPROTO_IPV6:
265 nhoff += offsetof(struct ipv6hdr, saddr);
266 size = sizeof(_addrs);
267 break;
268 default:
269 return true;
270 }
271
272 ap = skb_header_pointer(skb, nhoff, size, _addrs);
273 if (!ap)
5f2b4c90 274 return false;
9fb9cbb1 275
47a91b14
FW
276 switch (l3num) {
277 case NFPROTO_IPV4:
278 tuple->src.u3.ip = ap[0];
279 tuple->dst.u3.ip = ap[1];
280 break;
281 case NFPROTO_IPV6:
282 memcpy(tuple->src.u3.ip6, ap, sizeof(tuple->src.u3.ip6));
283 memcpy(tuple->dst.u3.ip6, ap + 4, sizeof(tuple->dst.u3.ip6));
284 break;
285 }
286
9fb9cbb1
YK
287 tuple->dst.protonum = protonum;
288 tuple->dst.dir = IP_CT_DIR_ORIGINAL;
289
e2e48b47 290 switch (protonum) {
81e01647 291#if IS_ENABLED(CONFIG_IPV6)
e2e48b47
FW
292 case IPPROTO_ICMPV6:
293 return icmpv6_pkt_to_tuple(skb, dataoff, net, tuple);
81e01647 294#endif
e2e48b47
FW
295 case IPPROTO_ICMP:
296 return icmp_pkt_to_tuple(skb, dataoff, net, tuple);
df5e1629
FW
297#ifdef CONFIG_NF_CT_PROTO_GRE
298 case IPPROTO_GRE:
299 return gre_pkt_to_tuple(skb, dataoff, net, tuple);
300#endif
e2f7cc72
FW
301 case IPPROTO_TCP:
302 case IPPROTO_UDP: /* fallthrough */
303 return nf_ct_get_tuple_ports(skb, dataoff, tuple);
304#ifdef CONFIG_NF_CT_PROTO_UDPLITE
305 case IPPROTO_UDPLITE:
306 return nf_ct_get_tuple_ports(skb, dataoff, tuple);
307#endif
308#ifdef CONFIG_NF_CT_PROTO_SCTP
309 case IPPROTO_SCTP:
310 return nf_ct_get_tuple_ports(skb, dataoff, tuple);
311#endif
312#ifdef CONFIG_NF_CT_PROTO_DCCP
313 case IPPROTO_DCCP:
314 return nf_ct_get_tuple_ports(skb, dataoff, tuple);
315#endif
316 default:
317 break;
e2e48b47 318 }
97e08cae 319
97e08cae 320 return true;
9fb9cbb1
YK
321}
322
6816d931
FW
323static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
324 u_int8_t *protonum)
325{
326 int dataoff = -1;
6816d931
FW
327 const struct iphdr *iph;
328 struct iphdr _iph;
329
330 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
331 if (!iph)
332 return -1;
333
334 /* Conntrack defragments packets, we might still see fragments
335 * inside ICMP packets though.
336 */
337 if (iph->frag_off & htons(IP_OFFSET))
338 return -1;
339
340 dataoff = nhoff + (iph->ihl << 2);
341 *protonum = iph->protocol;
342
343 /* Check bogus IP headers */
344 if (dataoff > skb->len) {
345 pr_debug("bogus IPv4 packet: nhoff %u, ihl %u, skblen %u\n",
346 nhoff, iph->ihl << 2, skb->len);
347 return -1;
348 }
6816d931
FW
349 return dataoff;
350}
351
a0ae2562 352#if IS_ENABLED(CONFIG_IPV6)
6816d931
FW
353static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
354 u8 *protonum)
355{
356 int protoff = -1;
6816d931
FW
357 unsigned int extoff = nhoff + sizeof(struct ipv6hdr);
358 __be16 frag_off;
359 u8 nexthdr;
360
361 if (skb_copy_bits(skb, nhoff + offsetof(struct ipv6hdr, nexthdr),
362 &nexthdr, sizeof(nexthdr)) != 0) {
363 pr_debug("can't get nexthdr\n");
364 return -1;
365 }
366 protoff = ipv6_skip_exthdr(skb, extoff, &nexthdr, &frag_off);
367 /*
368 * (protoff == skb->len) means the packet has not data, just
369 * IPv6 and possibly extensions headers, but it is tracked anyway
370 */
371 if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
372 pr_debug("can't find proto in pkt\n");
373 return -1;
374 }
375
376 *protonum = nexthdr;
6816d931
FW
377 return protoff;
378}
a0ae2562 379#endif
6816d931
FW
380
381static int get_l4proto(const struct sk_buff *skb,
382 unsigned int nhoff, u8 pf, u8 *l4num)
383{
384 switch (pf) {
385 case NFPROTO_IPV4:
386 return ipv4_get_l4proto(skb, nhoff, l4num);
a0ae2562 387#if IS_ENABLED(CONFIG_IPV6)
6816d931
FW
388 case NFPROTO_IPV6:
389 return ipv6_get_l4proto(skb, nhoff, l4num);
a0ae2562 390#endif
6816d931
FW
391 default:
392 *l4num = 0;
393 break;
394 }
395 return -1;
9fb9cbb1
YK
396}
397
5f2b4c90 398bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
a31f1adc
EB
399 u_int16_t l3num,
400 struct net *net, struct nf_conntrack_tuple *tuple)
e2a3123f 401{
6816d931
FW
402 u8 protonum;
403 int protoff;
e2a3123f 404
6816d931 405 protoff = get_l4proto(skb, nhoff, l3num, &protonum);
303e0c55 406 if (protoff <= 0)
5f2b4c90 407 return false;
e2a3123f 408
303e0c55 409 return nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple);
e2a3123f
YK
410}
411EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
412
5f2b4c90 413bool
9fb9cbb1 414nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
303e0c55 415 const struct nf_conntrack_tuple *orig)
9fb9cbb1 416{
443a70d5 417 memset(inverse, 0, sizeof(*inverse));
9fb9cbb1
YK
418
419 inverse->src.l3num = orig->src.l3num;
d1b6fe94
FW
420
421 switch (orig->src.l3num) {
422 case NFPROTO_IPV4:
423 inverse->src.u3.ip = orig->dst.u3.ip;
424 inverse->dst.u3.ip = orig->src.u3.ip;
425 break;
426 case NFPROTO_IPV6:
427 inverse->src.u3.in6 = orig->dst.u3.in6;
428 inverse->dst.u3.in6 = orig->src.u3.in6;
429 break;
430 default:
431 break;
432 }
9fb9cbb1
YK
433
434 inverse->dst.dir = !orig->dst.dir;
435
436 inverse->dst.protonum = orig->dst.protonum;
8b3892ea 437
197c4300
FW
438 switch (orig->dst.protonum) {
439 case IPPROTO_ICMP:
440 return nf_conntrack_invert_icmp_tuple(inverse, orig);
81e01647 441#if IS_ENABLED(CONFIG_IPV6)
197c4300
FW
442 case IPPROTO_ICMPV6:
443 return nf_conntrack_invert_icmpv6_tuple(inverse, orig);
81e01647 444#endif
197c4300 445 }
8b3892ea
FW
446
447 inverse->src.u.all = orig->dst.u.all;
448 inverse->dst.u.all = orig->src.u.all;
449 return true;
9fb9cbb1 450}
13b18339 451EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
9fb9cbb1 452
3c791076
FW
453/* Generate a almost-unique pseudo-id for a given conntrack.
454 *
455 * intentionally doesn't re-use any of the seeds used for hash
456 * table location, we assume id gets exposed to userspace.
457 *
458 * Following nf_conn items do not change throughout lifetime
459 * of the nf_conn after it has been committed to main hash table:
460 *
461 * 1. nf_conn address
462 * 2. nf_conn->ext address
463 * 3. nf_conn->master address (normally NULL)
464 * 4. tuple
465 * 5. the associated net namespace
466 */
467u32 nf_ct_get_id(const struct nf_conn *ct)
468{
469 static __read_mostly siphash_key_t ct_id_seed;
470 unsigned long a, b, c, d;
471
472 net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
473
474 a = (unsigned long)ct;
475 b = (unsigned long)ct->master ^ net_hash_mix(nf_ct_net(ct));
476 c = (unsigned long)ct->ext;
477 d = (unsigned long)siphash(&ct->tuplehash, sizeof(ct->tuplehash),
478 &ct_id_seed);
479#ifdef CONFIG_64BIT
480 return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
481#else
482 return siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &ct_id_seed);
483#endif
484}
485EXPORT_SYMBOL_GPL(nf_ct_get_id);
486
9fb9cbb1
YK
487static void
488clean_from_lists(struct nf_conn *ct)
489{
0d53778e 490 pr_debug("clean_from_lists(%p)\n", ct);
ea781f19
ED
491 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
492 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
9fb9cbb1
YK
493
494 /* Destroy all pending expectations */
c1d10adb 495 nf_ct_remove_expectations(ct);
9fb9cbb1
YK
496}
497
b7779d06
JDB
498/* must be called with local_bh_disable */
499static void nf_ct_add_to_dying_list(struct nf_conn *ct)
500{
501 struct ct_pcpu *pcpu;
502
503 /* add this conntrack to the (per cpu) dying list */
504 ct->cpu = smp_processor_id();
505 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
506
507 spin_lock(&pcpu->lock);
508 hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
509 &pcpu->dying);
510 spin_unlock(&pcpu->lock);
511}
512
513/* must be called with local_bh_disable */
514static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct)
515{
516 struct ct_pcpu *pcpu;
517
518 /* add this conntrack to the (per cpu) unconfirmed list */
519 ct->cpu = smp_processor_id();
520 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
521
522 spin_lock(&pcpu->lock);
523 hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
524 &pcpu->unconfirmed);
525 spin_unlock(&pcpu->lock);
526}
527
528/* must be called with local_bh_disable */
529static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
530{
531 struct ct_pcpu *pcpu;
532
533 /* We overload first tuple to link into unconfirmed or dying list.*/
534 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
535
536 spin_lock(&pcpu->lock);
537 BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
538 hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
539 spin_unlock(&pcpu->lock);
540}
541
30322309
FW
542#define NFCT_ALIGN(len) (((len) + NFCT_INFOMASK) & ~NFCT_INFOMASK)
543
0838aa7f 544/* Released via destroy_conntrack() */
308ac914
DB
545struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
546 const struct nf_conntrack_zone *zone,
547 gfp_t flags)
0838aa7f 548{
30322309 549 struct nf_conn *tmpl, *p;
0838aa7f 550
30322309
FW
551 if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK) {
552 tmpl = kzalloc(sizeof(*tmpl) + NFCT_INFOMASK, flags);
553 if (!tmpl)
554 return NULL;
555
556 p = tmpl;
557 tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
558 if (tmpl != p) {
559 tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
560 tmpl->proto.tmpl_padto = (char *)tmpl - (char *)p;
561 }
562 } else {
563 tmpl = kzalloc(sizeof(*tmpl), flags);
564 if (!tmpl)
565 return NULL;
566 }
0838aa7f
PNA
567
568 tmpl->status = IPS_TEMPLATE;
569 write_pnet(&tmpl->ct_net, net);
6c8dee98 570 nf_ct_zone_add(tmpl, zone);
0838aa7f
PNA
571 atomic_set(&tmpl->ct_general.use, 0);
572
573 return tmpl;
0838aa7f
PNA
574}
575EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
576
9cf94eab 577void nf_ct_tmpl_free(struct nf_conn *tmpl)
0838aa7f
PNA
578{
579 nf_ct_ext_destroy(tmpl);
580 nf_ct_ext_free(tmpl);
30322309
FW
581
582 if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK)
583 kfree((char *)tmpl - tmpl->proto.tmpl_padto);
584 else
585 kfree(tmpl);
0838aa7f 586}
9cf94eab 587EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
0838aa7f 588
e5689435
FW
589static void destroy_gre_conntrack(struct nf_conn *ct)
590{
81e01647 591#ifdef CONFIG_NF_CT_PROTO_GRE
e5689435
FW
592 struct nf_conn *master = ct->master;
593
594 if (master)
595 nf_ct_gre_keymap_destroy(master);
81e01647 596#endif
e5689435
FW
597}
598
9fb9cbb1
YK
599static void
600destroy_conntrack(struct nf_conntrack *nfct)
601{
602 struct nf_conn *ct = (struct nf_conn *)nfct;
9fb9cbb1 603
0d53778e 604 pr_debug("destroy_conntrack(%p)\n", ct);
44d6e2f2 605 WARN_ON(atomic_read(&nfct->use) != 0);
9fb9cbb1 606
0838aa7f
PNA
607 if (unlikely(nf_ct_is_template(ct))) {
608 nf_ct_tmpl_free(ct);
609 return;
610 }
e5689435
FW
611
612 if (unlikely(nf_ct_protonum(ct) == IPPROTO_GRE))
613 destroy_gre_conntrack(ct);
9fb9cbb1 614
ca7433df 615 local_bh_disable();
9fb9cbb1
YK
616 /* Expectations will have been removed in clean_from_lists,
617 * except TFTP can create an expectation on the first packet,
618 * before connection is in the list, so we need to clean here,
ca7433df
JDB
619 * too.
620 */
c1d10adb 621 nf_ct_remove_expectations(ct);
9fb9cbb1 622
b7779d06 623 nf_ct_del_from_dying_or_unconfirmed_list(ct);
9fb9cbb1 624
ca7433df 625 local_bh_enable();
9fb9cbb1
YK
626
627 if (ct->master)
628 nf_ct_put(ct->master);
629
0d53778e 630 pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
9fb9cbb1
YK
631 nf_conntrack_free(ct);
632}
633
02982c27 634static void nf_ct_delete_from_lists(struct nf_conn *ct)
9fb9cbb1 635{
0d55af87 636 struct net *net = nf_ct_net(ct);
93bb0ceb 637 unsigned int hash, reply_hash;
93bb0ceb 638 unsigned int sequence;
9fb9cbb1 639
9858a3ae 640 nf_ct_helper_destroy(ct);
93bb0ceb
JDB
641
642 local_bh_disable();
643 do {
a3efd812 644 sequence = read_seqcount_begin(&nf_conntrack_generation);
deedb590 645 hash = hash_conntrack(net,
93bb0ceb 646 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
deedb590 647 reply_hash = hash_conntrack(net,
93bb0ceb
JDB
648 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
649 } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
650
9fb9cbb1 651 clean_from_lists(ct);
93bb0ceb
JDB
652 nf_conntrack_double_unlock(hash, reply_hash);
653
b7779d06 654 nf_ct_add_to_dying_list(ct);
93bb0ceb 655
93bb0ceb 656 local_bh_enable();
dd7669a9 657}
dd7669a9 658
02982c27 659bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
dd7669a9 660{
a992ca2a
PNA
661 struct nf_conn_tstamp *tstamp;
662
f330a7fd
FW
663 if (test_and_set_bit(IPS_DYING_BIT, &ct->status))
664 return false;
665
a992ca2a
PNA
666 tstamp = nf_conn_tstamp_find(ct);
667 if (tstamp && tstamp->stop == 0)
d2de875c 668 tstamp->stop = ktime_get_real_ns();
dd7669a9 669
9500507c
FW
670 if (nf_conntrack_event_report(IPCT_DESTROY, ct,
671 portid, report) < 0) {
f330a7fd
FW
672 /* destroy event was not delivered. nf_ct_put will
673 * be done by event cache worker on redelivery.
674 */
dd7669a9 675 nf_ct_delete_from_lists(ct);
9500507c 676 nf_conntrack_ecache_delayed_work(nf_ct_net(ct));
02982c27 677 return false;
dd7669a9 678 }
9500507c
FW
679
680 nf_conntrack_ecache_work(nf_ct_net(ct));
dd7669a9 681 nf_ct_delete_from_lists(ct);
9fb9cbb1 682 nf_ct_put(ct);
02982c27
FW
683 return true;
684}
685EXPORT_SYMBOL_GPL(nf_ct_delete);
686
c6825c09
AV
687static inline bool
688nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
308ac914 689 const struct nf_conntrack_tuple *tuple,
e0c7d472
FW
690 const struct nf_conntrack_zone *zone,
691 const struct net *net)
c6825c09
AV
692{
693 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
694
695 /* A conntrack can be recreated with the equal tuple,
696 * so we need to check that the conntrack is confirmed
697 */
698 return nf_ct_tuple_equal(tuple, &h->tuple) &&
deedb590 699 nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
e0c7d472
FW
700 nf_ct_is_confirmed(ct) &&
701 net_eq(net, nf_ct_net(ct));
c6825c09
AV
702}
703
ed07d9a0
MP
704static inline bool
705nf_ct_match(const struct nf_conn *ct1, const struct nf_conn *ct2)
706{
707 return nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
708 &ct2->tuplehash[IP_CT_DIR_ORIGINAL].tuple) &&
709 nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_REPLY].tuple,
710 &ct2->tuplehash[IP_CT_DIR_REPLY].tuple) &&
711 nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_ORIGINAL) &&
712 nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_REPLY) &&
713 net_eq(nf_ct_net(ct1), nf_ct_net(ct2));
714}
715
f330a7fd
FW
716/* caller must hold rcu readlock and none of the nf_conntrack_locks */
717static void nf_ct_gc_expired(struct nf_conn *ct)
718{
719 if (!atomic_inc_not_zero(&ct->ct_general.use))
720 return;
721
722 if (nf_ct_should_gc(ct))
723 nf_ct_kill(ct);
724
725 nf_ct_put(ct);
726}
727
ea781f19
ED
728/*
729 * Warning :
730 * - Caller must take a reference on returned object
731 * and recheck nf_ct_tuple_equal(tuple, &h->tuple)
ea781f19 732 */
99f07e91 733static struct nf_conntrack_tuple_hash *
308ac914 734____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
99f07e91 735 const struct nf_conntrack_tuple *tuple, u32 hash)
9fb9cbb1
YK
736{
737 struct nf_conntrack_tuple_hash *h;
5e3c61f9 738 struct hlist_nulls_head *ct_hash;
ea781f19 739 struct hlist_nulls_node *n;
92e47ba8 740 unsigned int bucket, hsize;
9fb9cbb1 741
ea781f19 742begin:
92e47ba8
LZ
743 nf_conntrack_get_ht(&ct_hash, &hsize);
744 bucket = reciprocal_scale(hash, hsize);
5e3c61f9
FW
745
746 hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) {
f330a7fd
FW
747 struct nf_conn *ct;
748
749 ct = nf_ct_tuplehash_to_ctrack(h);
750 if (nf_ct_is_expired(ct)) {
751 nf_ct_gc_expired(ct);
752 continue;
753 }
754
755 if (nf_ct_is_dying(ct))
756 continue;
757
8e8118f8 758 if (nf_ct_key_equal(h, tuple, zone, net))
9fb9cbb1 759 return h;
9fb9cbb1 760 }
ea781f19
ED
761 /*
762 * if the nulls value we got at the end of this lookup is
763 * not the expected one, we must restart lookup.
764 * We probably met an item that was moved to another chain.
765 */
99f07e91 766 if (get_nulls_value(n) != bucket) {
2cf12348 767 NF_CT_STAT_INC_ATOMIC(net, search_restart);
ea781f19 768 goto begin;
af740b2c 769 }
9fb9cbb1
YK
770
771 return NULL;
772}
99f07e91 773
9fb9cbb1 774/* Find a connection corresponding to a tuple. */
99f07e91 775static struct nf_conntrack_tuple_hash *
308ac914 776__nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
99f07e91 777 const struct nf_conntrack_tuple *tuple, u32 hash)
9fb9cbb1
YK
778{
779 struct nf_conntrack_tuple_hash *h;
76507f69 780 struct nf_conn *ct;
9fb9cbb1 781
76507f69 782 rcu_read_lock();
ea781f19 783begin:
99f07e91 784 h = ____nf_conntrack_find(net, zone, tuple, hash);
76507f69
PM
785 if (h) {
786 ct = nf_ct_tuplehash_to_ctrack(h);
8d8890b7
PM
787 if (unlikely(nf_ct_is_dying(ct) ||
788 !atomic_inc_not_zero(&ct->ct_general.use)))
76507f69 789 h = NULL;
ea781f19 790 else {
e0c7d472 791 if (unlikely(!nf_ct_key_equal(h, tuple, zone, net))) {
ea781f19
ED
792 nf_ct_put(ct);
793 goto begin;
794 }
795 }
76507f69
PM
796 }
797 rcu_read_unlock();
9fb9cbb1
YK
798
799 return h;
800}
99f07e91
CG
801
802struct nf_conntrack_tuple_hash *
308ac914 803nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
99f07e91
CG
804 const struct nf_conntrack_tuple *tuple)
805{
806 return __nf_conntrack_find_get(net, zone, tuple,
1b8c8a9f 807 hash_conntrack_raw(tuple, net));
99f07e91 808}
13b18339 809EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
9fb9cbb1 810
c1d10adb
PNA
811static void __nf_conntrack_hash_insert(struct nf_conn *ct,
812 unsigned int hash,
b476b72a 813 unsigned int reply_hash)
c1d10adb 814{
ea781f19 815 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
56d52d48 816 &nf_conntrack_hash[hash]);
ea781f19 817 hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
56d52d48 818 &nf_conntrack_hash[reply_hash]);
c1d10adb
PNA
819}
820
7d367e06
JK
821int
822nf_conntrack_hash_check_insert(struct nf_conn *ct)
c1d10adb 823{
308ac914 824 const struct nf_conntrack_zone *zone;
d696c7bd 825 struct net *net = nf_ct_net(ct);
b476b72a 826 unsigned int hash, reply_hash;
7d367e06
JK
827 struct nf_conntrack_tuple_hash *h;
828 struct hlist_nulls_node *n;
93bb0ceb 829 unsigned int sequence;
c1d10adb 830
5d0aa2cc 831 zone = nf_ct_zone(ct);
7d367e06 832
93bb0ceb
JDB
833 local_bh_disable();
834 do {
a3efd812 835 sequence = read_seqcount_begin(&nf_conntrack_generation);
deedb590 836 hash = hash_conntrack(net,
93bb0ceb 837 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
deedb590 838 reply_hash = hash_conntrack(net,
93bb0ceb
JDB
839 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
840 } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
7d367e06
JK
841
842 /* See if there's one in the list already, including reverse */
56d52d48 843 hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
86804348 844 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
e0c7d472 845 zone, net))
7d367e06 846 goto out;
86804348 847
56d52d48 848 hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
86804348 849 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
e0c7d472 850 zone, net))
7d367e06 851 goto out;
c1d10adb 852
e53376be
PNA
853 smp_wmb();
854 /* The caller holds a reference to this object */
855 atomic_set(&ct->ct_general.use, 2);
b476b72a 856 __nf_conntrack_hash_insert(ct, hash, reply_hash);
93bb0ceb 857 nf_conntrack_double_unlock(hash, reply_hash);
7d367e06 858 NF_CT_STAT_INC(net, insert);
93bb0ceb 859 local_bh_enable();
7d367e06
JK
860 return 0;
861
862out:
93bb0ceb 863 nf_conntrack_double_unlock(hash, reply_hash);
7d367e06 864 NF_CT_STAT_INC(net, insert_failed);
93bb0ceb 865 local_bh_enable();
7d367e06 866 return -EEXIST;
c1d10adb 867}
7d367e06 868EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
c1d10adb 869
ba76738c
PNA
870static inline void nf_ct_acct_update(struct nf_conn *ct,
871 enum ip_conntrack_info ctinfo,
872 unsigned int len)
873{
874 struct nf_conn_acct *acct;
875
876 acct = nf_conn_acct_find(ct);
877 if (acct) {
878 struct nf_conn_counter *counter = acct->counter;
879
880 atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets);
881 atomic64_add(len, &counter[CTINFO2DIR(ctinfo)].bytes);
882 }
883}
884
71d8c47f
PNA
885static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
886 const struct nf_conn *loser_ct)
887{
888 struct nf_conn_acct *acct;
889
890 acct = nf_conn_acct_find(loser_ct);
891 if (acct) {
892 struct nf_conn_counter *counter = acct->counter;
71d8c47f
PNA
893 unsigned int bytes;
894
895 /* u32 should be fine since we must have seen one packet. */
896 bytes = atomic64_read(&counter[CTINFO2DIR(ctinfo)].bytes);
897 nf_ct_acct_update(ct, ctinfo, bytes);
898 }
899}
900
901/* Resolve race on insertion if this protocol allows this. */
902static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb,
903 enum ip_conntrack_info ctinfo,
904 struct nf_conntrack_tuple_hash *h)
905{
906 /* This is the conntrack entry already in hashes that won race. */
907 struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
b3480fe0 908 const struct nf_conntrack_l4proto *l4proto;
ed07d9a0
MP
909 enum ip_conntrack_info oldinfo;
910 struct nf_conn *loser_ct = nf_ct_get(skb, &oldinfo);
71d8c47f 911
4a60dc74 912 l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
71d8c47f
PNA
913 if (l4proto->allow_clash &&
914 !nf_ct_is_dying(ct) &&
915 atomic_inc_not_zero(&ct->ct_general.use)) {
ed07d9a0
MP
916 if (((ct->status & IPS_NAT_DONE_MASK) == 0) ||
917 nf_ct_match(ct, loser_ct)) {
918 nf_ct_acct_merge(ct, ctinfo, loser_ct);
919 nf_conntrack_put(&loser_ct->ct_general);
920 nf_ct_set(skb, ct, oldinfo);
921 return NF_ACCEPT;
922 }
923 nf_ct_put(ct);
71d8c47f
PNA
924 }
925 NF_CT_STAT_INC(net, drop);
926 return NF_DROP;
927}
928
9fb9cbb1
YK
929/* Confirm a connection given skb; places it in hash table */
930int
3db05fea 931__nf_conntrack_confirm(struct sk_buff *skb)
9fb9cbb1 932{
308ac914 933 const struct nf_conntrack_zone *zone;
b476b72a 934 unsigned int hash, reply_hash;
df0933dc 935 struct nf_conntrack_tuple_hash *h;
9fb9cbb1 936 struct nf_conn *ct;
df0933dc 937 struct nf_conn_help *help;
a992ca2a 938 struct nf_conn_tstamp *tstamp;
ea781f19 939 struct hlist_nulls_node *n;
9fb9cbb1 940 enum ip_conntrack_info ctinfo;
400dad39 941 struct net *net;
93bb0ceb 942 unsigned int sequence;
71d8c47f 943 int ret = NF_DROP;
9fb9cbb1 944
3db05fea 945 ct = nf_ct_get(skb, &ctinfo);
400dad39 946 net = nf_ct_net(ct);
9fb9cbb1
YK
947
948 /* ipt_REJECT uses nf_conntrack_attach to attach related
949 ICMP/TCP RST packets in other direction. Actual packet
950 which created connection will be IP_CT_NEW or for an
951 expected connection, IP_CT_RELATED. */
952 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
953 return NF_ACCEPT;
954
5d0aa2cc 955 zone = nf_ct_zone(ct);
93bb0ceb
JDB
956 local_bh_disable();
957
958 do {
a3efd812 959 sequence = read_seqcount_begin(&nf_conntrack_generation);
93bb0ceb
JDB
960 /* reuse the hash saved before */
961 hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
56d52d48 962 hash = scale_hash(hash);
deedb590 963 reply_hash = hash_conntrack(net,
93bb0ceb
JDB
964 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
965
966 } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
9fb9cbb1
YK
967
968 /* We're not in hash table, and we refuse to set up related
93bb0ceb
JDB
969 * connections for unconfirmed conns. But packet copies and
970 * REJECT will give spurious warnings here.
971 */
9fb9cbb1 972
13f5251f
CMW
973 /* Another skb with the same unconfirmed conntrack may
974 * win the race. This may happen for bridge(br_flood)
975 * or broadcast/multicast packets do skb_clone with
976 * unconfirmed conntrack.
93bb0ceb 977 */
13f5251f
CMW
978 if (unlikely(nf_ct_is_confirmed(ct))) {
979 WARN_ON_ONCE(1);
980 nf_conntrack_double_unlock(hash, reply_hash);
981 local_bh_enable();
982 return NF_DROP;
983 }
984
0d53778e 985 pr_debug("Confirming conntrack %p\n", ct);
8ca3f5e9
PNA
986 /* We have to check the DYING flag after unlink to prevent
987 * a race against nf_ct_get_next_corpse() possibly called from
988 * user context, else we insert an already 'dead' hash, blocking
989 * further use of that particular connection -JM.
990 */
991 nf_ct_del_from_dying_or_unconfirmed_list(ct);
992
71d8c47f
PNA
993 if (unlikely(nf_ct_is_dying(ct))) {
994 nf_ct_add_to_dying_list(ct);
995 goto dying;
996 }
fc350777 997
9fb9cbb1
YK
998 /* See if there's one in the list already, including reverse:
999 NAT could have grabbed it without realizing, since we're
1000 not in the hash. If there is, we lost race. */
56d52d48 1001 hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
86804348 1002 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
e0c7d472 1003 zone, net))
df0933dc 1004 goto out;
86804348 1005
56d52d48 1006 hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
86804348 1007 if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
e0c7d472 1008 zone, net))
df0933dc 1009 goto out;
9fb9cbb1 1010
df0933dc
PM
1011 /* Timer relative to confirmation time, not original
1012 setting time, otherwise we'd get timer wrap in
1013 weird delay cases. */
f330a7fd 1014 ct->timeout += nfct_time_stamp;
df0933dc 1015 atomic_inc(&ct->ct_general.use);
45eec341 1016 ct->status |= IPS_CONFIRMED;
5c8ec910 1017
a992ca2a
PNA
1018 /* set conntrack timestamp, if enabled. */
1019 tstamp = nf_conn_tstamp_find(ct);
916f6efa
FW
1020 if (tstamp)
1021 tstamp->start = ktime_get_real_ns();
a992ca2a 1022
5c8ec910
PM
1023 /* Since the lookup is lockless, hash insertion must be done after
1024 * starting the timer and setting the CONFIRMED bit. The RCU barriers
1025 * guarantee that no other CPU can find the conntrack before the above
1026 * stores are visible.
1027 */
b476b72a 1028 __nf_conntrack_hash_insert(ct, hash, reply_hash);
93bb0ceb 1029 nf_conntrack_double_unlock(hash, reply_hash);
93bb0ceb 1030 local_bh_enable();
5c8ec910 1031
df0933dc
PM
1032 help = nfct_help(ct);
1033 if (help && help->helper)
a71996fc 1034 nf_conntrack_event_cache(IPCT_HELPER, ct);
17e6e4ea 1035
df0933dc 1036 nf_conntrack_event_cache(master_ct(ct) ?
a71996fc 1037 IPCT_RELATED : IPCT_NEW, ct);
df0933dc 1038 return NF_ACCEPT;
9fb9cbb1 1039
df0933dc 1040out:
8ca3f5e9 1041 nf_ct_add_to_dying_list(ct);
71d8c47f
PNA
1042 ret = nf_ct_resolve_clash(net, skb, ctinfo, h);
1043dying:
93bb0ceb 1044 nf_conntrack_double_unlock(hash, reply_hash);
0d55af87 1045 NF_CT_STAT_INC(net, insert_failed);
93bb0ceb 1046 local_bh_enable();
71d8c47f 1047 return ret;
9fb9cbb1 1048}
13b18339 1049EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
9fb9cbb1
YK
1050
1051/* Returns true if a connection correspondings to the tuple (required
1052 for NAT). */
1053int
1054nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
1055 const struct nf_conn *ignored_conntrack)
1056{
400dad39 1057 struct net *net = nf_ct_net(ignored_conntrack);
308ac914 1058 const struct nf_conntrack_zone *zone;
9fb9cbb1 1059 struct nf_conntrack_tuple_hash *h;
5e3c61f9 1060 struct hlist_nulls_head *ct_hash;
92e47ba8 1061 unsigned int hash, hsize;
ea781f19 1062 struct hlist_nulls_node *n;
5d0aa2cc 1063 struct nf_conn *ct;
308ac914
DB
1064
1065 zone = nf_ct_zone(ignored_conntrack);
9fb9cbb1 1066
2cf12348 1067 rcu_read_lock();
95a8d19f 1068 begin:
92e47ba8
LZ
1069 nf_conntrack_get_ht(&ct_hash, &hsize);
1070 hash = __hash_conntrack(net, tuple, hsize);
5e3c61f9
FW
1071
1072 hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) {
5d0aa2cc 1073 ct = nf_ct_tuplehash_to_ctrack(h);
f330a7fd
FW
1074
1075 if (ct == ignored_conntrack)
1076 continue;
1077
1078 if (nf_ct_is_expired(ct)) {
1079 nf_ct_gc_expired(ct);
1080 continue;
1081 }
1082
1083 if (nf_ct_key_equal(h, tuple, zone, net)) {
4e35c1cb
MP
1084 /* Tuple is taken already, so caller will need to find
1085 * a new source port to use.
1086 *
1087 * Only exception:
1088 * If the *original tuples* are identical, then both
1089 * conntracks refer to the same flow.
1090 * This is a rare situation, it can occur e.g. when
1091 * more than one UDP packet is sent from same socket
1092 * in different threads.
1093 *
1094 * Let nf_ct_resolve_clash() deal with this later.
1095 */
1096 if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
1097 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple))
1098 continue;
1099
2cf12348
FW
1100 NF_CT_STAT_INC_ATOMIC(net, found);
1101 rcu_read_unlock();
ba419aff
PM
1102 return 1;
1103 }
ba419aff 1104 }
95a8d19f
FW
1105
1106 if (get_nulls_value(n) != hash) {
1107 NF_CT_STAT_INC_ATOMIC(net, search_restart);
1108 goto begin;
1109 }
1110
2cf12348 1111 rcu_read_unlock();
9fb9cbb1 1112
ba419aff 1113 return 0;
9fb9cbb1 1114}
13b18339 1115EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
9fb9cbb1 1116
7ae7730f
PM
1117#define NF_CT_EVICTION_RANGE 8
1118
9fb9cbb1
YK
1119/* There's a small race here where we may free a just-assured
1120 connection. Too bad: we're in trouble anyway. */
242922a0
FW
1121static unsigned int early_drop_list(struct net *net,
1122 struct hlist_nulls_head *head)
9fb9cbb1 1123{
9fb9cbb1 1124 struct nf_conntrack_tuple_hash *h;
ea781f19 1125 struct hlist_nulls_node *n;
242922a0
FW
1126 unsigned int drops = 0;
1127 struct nf_conn *tmp;
3e86638e 1128
242922a0
FW
1129 hlist_nulls_for_each_entry_rcu(h, n, head, hnnode) {
1130 tmp = nf_ct_tuplehash_to_ctrack(h);
9fb9cbb1 1131
90964016
PNA
1132 if (test_bit(IPS_OFFLOAD_BIT, &tmp->status))
1133 continue;
1134
f330a7fd
FW
1135 if (nf_ct_is_expired(tmp)) {
1136 nf_ct_gc_expired(tmp);
1137 continue;
1138 }
1139
242922a0
FW
1140 if (test_bit(IPS_ASSURED_BIT, &tmp->status) ||
1141 !net_eq(nf_ct_net(tmp), net) ||
1142 nf_ct_is_dying(tmp))
1143 continue;
76507f69 1144
242922a0
FW
1145 if (!atomic_inc_not_zero(&tmp->ct_general.use))
1146 continue;
76507f69 1147
242922a0 1148 /* kill only if still in same netns -- might have moved due to
5f0d5a3a 1149 * SLAB_TYPESAFE_BY_RCU rules.
242922a0
FW
1150 *
1151 * We steal the timer reference. If that fails timer has
1152 * already fired or someone else deleted it. Just drop ref
1153 * and move to next entry.
1154 */
1155 if (net_eq(nf_ct_net(tmp), net) &&
1156 nf_ct_is_confirmed(tmp) &&
242922a0
FW
1157 nf_ct_delete(tmp, 0, 0))
1158 drops++;
1159
1160 nf_ct_put(tmp);
9fb9cbb1 1161 }
3e86638e 1162
242922a0
FW
1163 return drops;
1164}
9fb9cbb1 1165
f393808d 1166static noinline int early_drop(struct net *net, unsigned int hash)
242922a0 1167{
f393808d 1168 unsigned int i, bucket;
9fb9cbb1 1169
242922a0
FW
1170 for (i = 0; i < NF_CT_EVICTION_RANGE; i++) {
1171 struct hlist_nulls_head *ct_hash;
f393808d 1172 unsigned int hsize, drops;
242922a0 1173
3101e0fc 1174 rcu_read_lock();
92e47ba8 1175 nf_conntrack_get_ht(&ct_hash, &hsize);
f393808d
VK
1176 if (!i)
1177 bucket = reciprocal_scale(hash, hsize);
1178 else
1179 bucket = (bucket + 1) % hsize;
242922a0 1180
f393808d 1181 drops = early_drop_list(net, &ct_hash[bucket]);
3101e0fc
LZ
1182 rcu_read_unlock();
1183
242922a0
FW
1184 if (drops) {
1185 NF_CT_STAT_ADD_ATOMIC(net, early_drop, drops);
1186 return true;
74138511 1187 }
9fb9cbb1 1188 }
3e86638e 1189
242922a0 1190 return false;
9fb9cbb1
YK
1191}
1192
c6dd940b
FW
1193static bool gc_worker_skip_ct(const struct nf_conn *ct)
1194{
1195 return !nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct);
1196}
1197
1198static bool gc_worker_can_early_drop(const struct nf_conn *ct)
1199{
1200 const struct nf_conntrack_l4proto *l4proto;
1201
1202 if (!test_bit(IPS_ASSURED_BIT, &ct->status))
1203 return true;
1204
4a60dc74 1205 l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
c6dd940b
FW
1206 if (l4proto->can_early_drop && l4proto->can_early_drop(ct))
1207 return true;
1208
1209 return false;
1210}
1211
90964016
PNA
1212#define DAY (86400 * HZ)
1213
1214/* Set an arbitrary timeout large enough not to ever expire, this save
1215 * us a check for the IPS_OFFLOAD_BIT from the packet path via
1216 * nf_ct_is_expired().
1217 */
1218static void nf_ct_offload_timeout(struct nf_conn *ct)
1219{
1220 if (nf_ct_expires(ct) < DAY / 2)
1221 ct->timeout = nfct_time_stamp + DAY;
1222}
1223
b87a2f91
FW
1224static void gc_worker(struct work_struct *work)
1225{
e5072053 1226 unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u);
b87a2f91 1227 unsigned int i, goal, buckets = 0, expired_count = 0;
c6dd940b 1228 unsigned int nf_conntrack_max95 = 0;
b87a2f91 1229 struct conntrack_gc_work *gc_work;
e0df8cae
FW
1230 unsigned int ratio, scanned = 0;
1231 unsigned long next_run;
b87a2f91
FW
1232
1233 gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
1234
e0df8cae 1235 goal = nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV;
b87a2f91 1236 i = gc_work->last_bucket;
c6dd940b
FW
1237 if (gc_work->early_drop)
1238 nf_conntrack_max95 = nf_conntrack_max / 100u * 95u;
b87a2f91
FW
1239
1240 do {
1241 struct nf_conntrack_tuple_hash *h;
1242 struct hlist_nulls_head *ct_hash;
1243 struct hlist_nulls_node *n;
1244 unsigned int hashsz;
1245 struct nf_conn *tmp;
1246
1247 i++;
1248 rcu_read_lock();
1249
1250 nf_conntrack_get_ht(&ct_hash, &hashsz);
1251 if (i >= hashsz)
1252 i = 0;
1253
1254 hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) {
c6dd940b
FW
1255 struct net *net;
1256
b87a2f91
FW
1257 tmp = nf_ct_tuplehash_to_ctrack(h);
1258
c023c0e4 1259 scanned++;
90964016
PNA
1260 if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) {
1261 nf_ct_offload_timeout(tmp);
1262 continue;
1263 }
1264
b87a2f91
FW
1265 if (nf_ct_is_expired(tmp)) {
1266 nf_ct_gc_expired(tmp);
1267 expired_count++;
1268 continue;
1269 }
c6dd940b
FW
1270
1271 if (nf_conntrack_max95 == 0 || gc_worker_skip_ct(tmp))
1272 continue;
1273
1274 net = nf_ct_net(tmp);
1275 if (atomic_read(&net->ct.count) < nf_conntrack_max95)
1276 continue;
1277
1278 /* need to take reference to avoid possible races */
1279 if (!atomic_inc_not_zero(&tmp->ct_general.use))
1280 continue;
1281
1282 if (gc_worker_skip_ct(tmp)) {
1283 nf_ct_put(tmp);
1284 continue;
1285 }
1286
1287 if (gc_worker_can_early_drop(tmp))
1288 nf_ct_kill(tmp);
1289
1290 nf_ct_put(tmp);
b87a2f91
FW
1291 }
1292
1293 /* could check get_nulls_value() here and restart if ct
1294 * was moved to another chain. But given gc is best-effort
1295 * we will just continue with next hash slot.
1296 */
1297 rcu_read_unlock();
ffa53c58 1298 cond_resched();
524b698d 1299 } while (++buckets < goal);
b87a2f91
FW
1300
1301 if (gc_work->exiting)
1302 return;
1303
e0df8cae
FW
1304 /*
1305 * Eviction will normally happen from the packet path, and not
1306 * from this gc worker.
1307 *
1308 * This worker is only here to reap expired entries when system went
1309 * idle after a busy period.
1310 *
1311 * The heuristics below are supposed to balance conflicting goals:
1312 *
1313 * 1. Minimize time until we notice a stale entry
1314 * 2. Maximize scan intervals to not waste cycles
1315 *
e5072053 1316 * Normally, expire ratio will be close to 0.
e0df8cae 1317 *
e5072053
FW
1318 * As soon as a sizeable fraction of the entries have expired
1319 * increase scan frequency.
e0df8cae 1320 */
c023c0e4 1321 ratio = scanned ? expired_count * 100 / scanned : 0;
e5072053
FW
1322 if (ratio > GC_EVICT_RATIO) {
1323 gc_work->next_gc_run = min_interval;
e0df8cae 1324 } else {
e5072053 1325 unsigned int max = GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV;
e0df8cae 1326
e5072053
FW
1327 BUILD_BUG_ON((GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV) == 0);
1328
1329 gc_work->next_gc_run += min_interval;
1330 if (gc_work->next_gc_run > max)
1331 gc_work->next_gc_run = max;
e0df8cae 1332 }
c023c0e4 1333
e5072053 1334 next_run = gc_work->next_gc_run;
b87a2f91 1335 gc_work->last_bucket = i;
c6dd940b 1336 gc_work->early_drop = false;
0984d427 1337 queue_delayed_work(system_power_efficient_wq, &gc_work->dwork, next_run);
b87a2f91
FW
1338}
1339
1340static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
1341{
a232cd0e 1342 INIT_DEFERRABLE_WORK(&gc_work->dwork, gc_worker);
e5072053 1343 gc_work->next_gc_run = HZ;
b87a2f91
FW
1344 gc_work->exiting = false;
1345}
1346
99f07e91 1347static struct nf_conn *
308ac914
DB
1348__nf_conntrack_alloc(struct net *net,
1349 const struct nf_conntrack_zone *zone,
99f07e91
CG
1350 const struct nf_conntrack_tuple *orig,
1351 const struct nf_conntrack_tuple *repl,
1352 gfp_t gfp, u32 hash)
9fb9cbb1 1353{
cd7fcbf1 1354 struct nf_conn *ct;
9fb9cbb1 1355
5251e2d2 1356 /* We don't want any race condition at early drop stage */
49ac8713 1357 atomic_inc(&net->ct.count);
5251e2d2 1358
76eb9460 1359 if (nf_conntrack_max &&
49ac8713 1360 unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
93bb0ceb 1361 if (!early_drop(net, hash)) {
c6dd940b
FW
1362 if (!conntrack_gc_work.early_drop)
1363 conntrack_gc_work.early_drop = true;
49ac8713 1364 atomic_dec(&net->ct.count);
e87cc472 1365 net_warn_ratelimited("nf_conntrack: table full, dropping packet\n");
9fb9cbb1
YK
1366 return ERR_PTR(-ENOMEM);
1367 }
1368 }
1369
941297f4
ED
1370 /*
1371 * Do not use kmem_cache_zalloc(), as this cache uses
5f0d5a3a 1372 * SLAB_TYPESAFE_BY_RCU.
941297f4 1373 */
0c5366b3 1374 ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
5e8018fc
DB
1375 if (ct == NULL)
1376 goto out;
1377
440f0d58 1378 spin_lock_init(&ct->lock);
c88130bc 1379 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
941297f4 1380 ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
c88130bc 1381 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
99f07e91
CG
1382 /* save hash for reusing when confirming */
1383 *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
c41884ce 1384 ct->status = 0;
8176c833 1385 ct->timeout = 0;
c2d9ba9b 1386 write_pnet(&ct->ct_net, net);
c41884ce
FW
1387 memset(&ct->__nfct_init_offset[0], 0,
1388 offsetof(struct nf_conn, proto) -
1389 offsetof(struct nf_conn, __nfct_init_offset[0]));
5e8018fc 1390
6c8dee98 1391 nf_ct_zone_add(ct, zone);
5e8018fc 1392
e53376be
PNA
1393 /* Because we use RCU lookups, we set ct_general.use to zero before
1394 * this is inserted in any list.
941297f4 1395 */
e53376be 1396 atomic_set(&ct->ct_general.use, 0);
c88130bc 1397 return ct;
5e8018fc
DB
1398out:
1399 atomic_dec(&net->ct.count);
5d0aa2cc 1400 return ERR_PTR(-ENOMEM);
9fb9cbb1 1401}
99f07e91 1402
308ac914
DB
1403struct nf_conn *nf_conntrack_alloc(struct net *net,
1404 const struct nf_conntrack_zone *zone,
99f07e91
CG
1405 const struct nf_conntrack_tuple *orig,
1406 const struct nf_conntrack_tuple *repl,
1407 gfp_t gfp)
1408{
1409 return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0);
1410}
13b18339 1411EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
9fb9cbb1 1412
c88130bc 1413void nf_conntrack_free(struct nf_conn *ct)
76507f69 1414{
1d45209d
ED
1415 struct net *net = nf_ct_net(ct);
1416
e53376be 1417 /* A freed object has refcnt == 0, that's
5f0d5a3a 1418 * the golden rule for SLAB_TYPESAFE_BY_RCU
e53376be 1419 */
44d6e2f2 1420 WARN_ON(atomic_read(&ct->ct_general.use) != 0);
e53376be 1421
ceeff754 1422 nf_ct_ext_destroy(ct);
ea781f19 1423 nf_ct_ext_free(ct);
0c5366b3 1424 kmem_cache_free(nf_conntrack_cachep, ct);
4e857c58 1425 smp_mb__before_atomic();
0c3c6c00 1426 atomic_dec(&net->ct.count);
76507f69 1427}
13b18339 1428EXPORT_SYMBOL_GPL(nf_conntrack_free);
9fb9cbb1 1429
c539f017 1430
9fb9cbb1
YK
1431/* Allocate a new conntrack: we return -ENOMEM if classification
1432 failed due to stress. Otherwise it really is unclassifiable. */
fc09e4a7 1433static noinline struct nf_conntrack_tuple_hash *
b2a15a60 1434init_conntrack(struct net *net, struct nf_conn *tmpl,
5a1fb391 1435 const struct nf_conntrack_tuple *tuple,
9fb9cbb1 1436 struct sk_buff *skb,
60b5f8f7 1437 unsigned int dataoff, u32 hash)
9fb9cbb1 1438{
c88130bc 1439 struct nf_conn *ct;
3c158f7f 1440 struct nf_conn_help *help;
9fb9cbb1 1441 struct nf_conntrack_tuple repl_tuple;
b2a15a60 1442 struct nf_conntrack_ecache *ecache;
ca7433df 1443 struct nf_conntrack_expect *exp = NULL;
308ac914 1444 const struct nf_conntrack_zone *zone;
60b5f8f7 1445 struct nf_conn_timeout *timeout_ext;
5e8018fc 1446 struct nf_conntrack_zone tmp;
9fb9cbb1 1447
303e0c55 1448 if (!nf_ct_invert_tuple(&repl_tuple, tuple)) {
0d53778e 1449 pr_debug("Can't invert tuple.\n");
9fb9cbb1
YK
1450 return NULL;
1451 }
1452
5e8018fc 1453 zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
99f07e91
CG
1454 ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
1455 hash);
0a9ee813 1456 if (IS_ERR(ct))
c88130bc 1457 return (struct nf_conntrack_tuple_hash *)ct;
9fb9cbb1 1458
4440a2ab
GF
1459 if (!nf_ct_add_synproxy(ct, tmpl)) {
1460 nf_conntrack_free(ct);
1461 return ERR_PTR(-ENOMEM);
48b1de4c
PM
1462 }
1463
60b5f8f7 1464 timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
60b5f8f7 1465
60b5f8f7 1466 if (timeout_ext)
ae2d708e
PNA
1467 nf_ct_timeout_ext_add(ct, rcu_dereference(timeout_ext->timeout),
1468 GFP_ATOMIC);
60b5f8f7 1469
58401572 1470 nf_ct_acct_ext_add(ct, GFP_ATOMIC);
a992ca2a 1471 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
c539f017 1472 nf_ct_labels_ext_add(ct);
b2a15a60
PM
1473
1474 ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
1475 nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
1476 ecache ? ecache->expmask : 0,
1477 GFP_ATOMIC);
58401572 1478
ca7433df
JDB
1479 local_bh_disable();
1480 if (net->ct.expect_count) {
1481 spin_lock(&nf_conntrack_expect_lock);
1482 exp = nf_ct_find_expectation(net, zone, tuple);
1483 if (exp) {
ccd63c20 1484 pr_debug("expectation arrives ct=%p exp=%p\n",
ca7433df
JDB
1485 ct, exp);
1486 /* Welcome, Mr. Bond. We've been expecting you... */
1487 __set_bit(IPS_EXPECTED_BIT, &ct->status);
1488 /* exp->master safe, refcnt bumped in nf_ct_find_expectation */
1489 ct->master = exp->master;
1490 if (exp->helper) {
440534d3 1491 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
ca7433df
JDB
1492 if (help)
1493 rcu_assign_pointer(help->helper, exp->helper);
1494 }
ceceae1b 1495
9fb9cbb1 1496#ifdef CONFIG_NF_CONNTRACK_MARK
ca7433df 1497 ct->mark = exp->master->mark;
7c9728c3
JM
1498#endif
1499#ifdef CONFIG_NF_CONNTRACK_SECMARK
ca7433df 1500 ct->secmark = exp->master->secmark;
9fb9cbb1 1501#endif
ca7433df
JDB
1502 NF_CT_STAT_INC(net, expect_new);
1503 }
1504 spin_unlock(&nf_conntrack_expect_lock);
1505 }
8e8118f8 1506 if (!exp)
b2a15a60 1507 __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
9fb9cbb1 1508
e53376be
PNA
1509 /* Now it is inserted into the unconfirmed list, bump refcount */
1510 nf_conntrack_get(&ct->ct_general);
b7779d06 1511 nf_ct_add_to_unconfirmed_list(ct);
9fb9cbb1 1512
ca7433df 1513 local_bh_enable();
9fb9cbb1
YK
1514
1515 if (exp) {
1516 if (exp->expectfn)
c88130bc 1517 exp->expectfn(ct, exp);
6823645d 1518 nf_ct_expect_put(exp);
9fb9cbb1
YK
1519 }
1520
c88130bc 1521 return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
9fb9cbb1
YK
1522}
1523
fc09e4a7
FW
1524/* On success, returns 0, sets skb->_nfct | ctinfo */
1525static int
93e66024 1526resolve_normal_ct(struct nf_conn *tmpl,
a702a65f 1527 struct sk_buff *skb,
9fb9cbb1 1528 unsigned int dataoff,
9fb9cbb1 1529 u_int8_t protonum,
93e66024 1530 const struct nf_hook_state *state)
9fb9cbb1 1531{
308ac914 1532 const struct nf_conntrack_zone *zone;
9fb9cbb1
YK
1533 struct nf_conntrack_tuple tuple;
1534 struct nf_conntrack_tuple_hash *h;
fc09e4a7 1535 enum ip_conntrack_info ctinfo;
5e8018fc 1536 struct nf_conntrack_zone tmp;
9fb9cbb1 1537 struct nf_conn *ct;
99f07e91 1538 u32 hash;
9fb9cbb1 1539
bbe735e4 1540 if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
93e66024 1541 dataoff, state->pf, protonum, state->net,
303e0c55 1542 &tuple)) {
ccd63c20 1543 pr_debug("Can't get tuple\n");
fc09e4a7 1544 return 0;
9fb9cbb1
YK
1545 }
1546
1547 /* look for tuple match */
5e8018fc 1548 zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
93e66024
FW
1549 hash = hash_conntrack_raw(&tuple, state->net);
1550 h = __nf_conntrack_find_get(state->net, zone, &tuple, hash);
9fb9cbb1 1551 if (!h) {
303e0c55 1552 h = init_conntrack(state->net, tmpl, &tuple,
60b5f8f7 1553 skb, dataoff, hash);
9fb9cbb1 1554 if (!h)
fc09e4a7 1555 return 0;
9fb9cbb1 1556 if (IS_ERR(h))
fc09e4a7 1557 return PTR_ERR(h);
9fb9cbb1
YK
1558 }
1559 ct = nf_ct_tuplehash_to_ctrack(h);
1560
1561 /* It exists; we have (non-exclusive) reference. */
1562 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
fc09e4a7 1563 ctinfo = IP_CT_ESTABLISHED_REPLY;
9fb9cbb1
YK
1564 } else {
1565 /* Once we've had two way comms, always ESTABLISHED. */
1566 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
ccd63c20 1567 pr_debug("normal packet for %p\n", ct);
fc09e4a7 1568 ctinfo = IP_CT_ESTABLISHED;
9fb9cbb1 1569 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
ccd63c20 1570 pr_debug("related packet for %p\n", ct);
fc09e4a7 1571 ctinfo = IP_CT_RELATED;
9fb9cbb1 1572 } else {
ccd63c20 1573 pr_debug("new packet for %p\n", ct);
fc09e4a7 1574 ctinfo = IP_CT_NEW;
9fb9cbb1 1575 }
9fb9cbb1 1576 }
fc09e4a7
FW
1577 nf_ct_set(skb, ct, ctinfo);
1578 return 0;
9fb9cbb1
YK
1579}
1580
6fe78fa4
FW
1581/*
1582 * icmp packets need special treatment to handle error messages that are
1583 * related to a connection.
1584 *
1585 * Callers need to check if skb has a conntrack assigned when this
1586 * helper returns; in such case skb belongs to an already known connection.
1587 */
1588static unsigned int __cold
1589nf_conntrack_handle_icmp(struct nf_conn *tmpl,
1590 struct sk_buff *skb,
1591 unsigned int dataoff,
1592 u8 protonum,
1593 const struct nf_hook_state *state)
1594{
1595 int ret;
1596
1597 if (state->pf == NFPROTO_IPV4 && protonum == IPPROTO_ICMP)
1598 ret = nf_conntrack_icmpv4_error(tmpl, skb, dataoff, state);
1599#if IS_ENABLED(CONFIG_IPV6)
1600 else if (state->pf == NFPROTO_IPV6 && protonum == IPPROTO_ICMPV6)
1601 ret = nf_conntrack_icmpv6_error(tmpl, skb, dataoff, state);
1602#endif
1603 else
1604 return NF_ACCEPT;
1605
1606 if (ret <= 0) {
1607 NF_CT_STAT_INC_ATOMIC(state->net, error);
1608 NF_CT_STAT_INC_ATOMIC(state->net, invalid);
1609 }
1610
1611 return ret;
1612}
1613
44fb87f6
FW
1614static int generic_packet(struct nf_conn *ct, struct sk_buff *skb,
1615 enum ip_conntrack_info ctinfo)
1616{
1617 const unsigned int *timeout = nf_ct_timeout_lookup(ct);
1618
1619 if (!timeout)
1620 timeout = &nf_generic_pernet(nf_ct_net(ct))->timeout;
1621
1622 nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
1623 return NF_ACCEPT;
1624}
1625
a47c5404
FW
1626/* Returns verdict for packet, or -1 for invalid. */
1627static int nf_conntrack_handle_packet(struct nf_conn *ct,
1628 struct sk_buff *skb,
1629 unsigned int dataoff,
1630 enum ip_conntrack_info ctinfo,
1631 const struct nf_hook_state *state)
1632{
1633 switch (nf_ct_protonum(ct)) {
1634 case IPPROTO_TCP:
1635 return nf_conntrack_tcp_packet(ct, skb, dataoff,
1636 ctinfo, state);
1637 case IPPROTO_UDP:
1638 return nf_conntrack_udp_packet(ct, skb, dataoff,
1639 ctinfo, state);
1640 case IPPROTO_ICMP:
1641 return nf_conntrack_icmp_packet(ct, skb, ctinfo, state);
81e01647 1642#if IS_ENABLED(CONFIG_IPV6)
a47c5404
FW
1643 case IPPROTO_ICMPV6:
1644 return nf_conntrack_icmpv6_packet(ct, skb, ctinfo, state);
81e01647 1645#endif
a47c5404
FW
1646#ifdef CONFIG_NF_CT_PROTO_UDPLITE
1647 case IPPROTO_UDPLITE:
1648 return nf_conntrack_udplite_packet(ct, skb, dataoff,
1649 ctinfo, state);
1650#endif
1651#ifdef CONFIG_NF_CT_PROTO_SCTP
1652 case IPPROTO_SCTP:
1653 return nf_conntrack_sctp_packet(ct, skb, dataoff,
1654 ctinfo, state);
1655#endif
1656#ifdef CONFIG_NF_CT_PROTO_DCCP
1657 case IPPROTO_DCCP:
1658 return nf_conntrack_dccp_packet(ct, skb, dataoff,
1659 ctinfo, state);
44fb87f6
FW
1660#endif
1661#ifdef CONFIG_NF_CT_PROTO_GRE
1662 case IPPROTO_GRE:
1663 return nf_conntrack_gre_packet(ct, skb, dataoff,
1664 ctinfo, state);
a47c5404
FW
1665#endif
1666 }
1667
44fb87f6 1668 return generic_packet(ct, skb, ctinfo);
a47c5404
FW
1669}
1670
9fb9cbb1 1671unsigned int
93e66024 1672nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state)
9fb9cbb1 1673{
9fb9cbb1 1674 enum ip_conntrack_info ctinfo;
93e66024 1675 struct nf_conn *ct, *tmpl;
9fb9cbb1 1676 u_int8_t protonum;
6816d931 1677 int dataoff, ret;
9fb9cbb1 1678
97a6ad13 1679 tmpl = nf_ct_get(skb, &ctinfo);
cc41c84b 1680 if (tmpl || ctinfo == IP_CT_UNTRACKED) {
b2a15a60 1681 /* Previously seen (loopback or untracked)? Ignore. */
cc41c84b
FW
1682 if ((tmpl && !nf_ct_is_template(tmpl)) ||
1683 ctinfo == IP_CT_UNTRACKED) {
93e66024 1684 NF_CT_STAT_INC_ATOMIC(state->net, ignore);
b2a15a60
PM
1685 return NF_ACCEPT;
1686 }
a9e419dc 1687 skb->_nfct = 0;
9fb9cbb1
YK
1688 }
1689
e2361cb9 1690 /* rcu_read_lock()ed by nf_hook_thresh */
93e66024 1691 dataoff = get_l4proto(skb, skb_network_offset(skb), state->pf, &protonum);
6816d931 1692 if (dataoff <= 0) {
25985edc 1693 pr_debug("not prepared to track yet or error occurred\n");
93e66024
FW
1694 NF_CT_STAT_INC_ATOMIC(state->net, error);
1695 NF_CT_STAT_INC_ATOMIC(state->net, invalid);
6816d931 1696 ret = NF_ACCEPT;
b2a15a60 1697 goto out;
9fb9cbb1
YK
1698 }
1699
6fe78fa4
FW
1700 if (protonum == IPPROTO_ICMP || protonum == IPPROTO_ICMPV6) {
1701 ret = nf_conntrack_handle_icmp(tmpl, skb, dataoff,
1702 protonum, state);
74c51a14 1703 if (ret <= 0) {
b2a15a60
PM
1704 ret = -ret;
1705 goto out;
74c51a14 1706 }
88ed01d1 1707 /* ICMP[v6] protocol trackers may assign one conntrack. */
a9e419dc 1708 if (skb->_nfct)
88ed01d1 1709 goto out;
9fb9cbb1 1710 }
08733a0c 1711repeat:
93e66024 1712 ret = resolve_normal_ct(tmpl, skb, dataoff,
303e0c55 1713 protonum, state);
fc09e4a7 1714 if (ret < 0) {
9fb9cbb1 1715 /* Too stressed to deal. */
93e66024 1716 NF_CT_STAT_INC_ATOMIC(state->net, drop);
b2a15a60
PM
1717 ret = NF_DROP;
1718 goto out;
9fb9cbb1
YK
1719 }
1720
fc09e4a7
FW
1721 ct = nf_ct_get(skb, &ctinfo);
1722 if (!ct) {
1723 /* Not valid part of a connection */
93e66024 1724 NF_CT_STAT_INC_ATOMIC(state->net, invalid);
fc09e4a7
FW
1725 ret = NF_ACCEPT;
1726 goto out;
1727 }
9fb9cbb1 1728
44fb87f6 1729 ret = nf_conntrack_handle_packet(ct, skb, dataoff, ctinfo, state);
ec8d5409 1730 if (ret <= 0) {
9fb9cbb1
YK
1731 /* Invalid: inverse of the return code tells
1732 * the netfilter core what to do */
0d53778e 1733 pr_debug("nf_conntrack_in: Can't track with proto module\n");
97a6ad13 1734 nf_conntrack_put(&ct->ct_general);
a9e419dc 1735 skb->_nfct = 0;
93e66024 1736 NF_CT_STAT_INC_ATOMIC(state->net, invalid);
7d1e0459 1737 if (ret == -NF_DROP)
93e66024 1738 NF_CT_STAT_INC_ATOMIC(state->net, drop);
56a62e22
AB
1739 /* Special case: TCP tracker reports an attempt to reopen a
1740 * closed/aborted connection. We have to go back and create a
1741 * fresh conntrack.
1742 */
1743 if (ret == -NF_REPEAT)
1744 goto repeat;
b2a15a60
PM
1745 ret = -ret;
1746 goto out;
9fb9cbb1
YK
1747 }
1748
fc09e4a7
FW
1749 if (ctinfo == IP_CT_ESTABLISHED_REPLY &&
1750 !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
858b3133 1751 nf_conntrack_event_cache(IPCT_REPLY, ct);
b2a15a60 1752out:
56a62e22
AB
1753 if (tmpl)
1754 nf_ct_put(tmpl);
9fb9cbb1
YK
1755
1756 return ret;
1757}
13b18339 1758EXPORT_SYMBOL_GPL(nf_conntrack_in);
9fb9cbb1 1759
5b1158e9
JK
1760/* Alter reply tuple (maybe alter helper). This is for NAT, and is
1761 implicitly racy: see __nf_conntrack_confirm */
1762void nf_conntrack_alter_reply(struct nf_conn *ct,
1763 const struct nf_conntrack_tuple *newreply)
1764{
1765 struct nf_conn_help *help = nfct_help(ct);
1766
5b1158e9 1767 /* Should be unconfirmed, so not in hash table yet */
44d6e2f2 1768 WARN_ON(nf_ct_is_confirmed(ct));
5b1158e9 1769
0d53778e 1770 pr_debug("Altering reply tuple of %p to ", ct);
3c9fba65 1771 nf_ct_dump_tuple(newreply);
5b1158e9
JK
1772
1773 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
ef1a5a50 1774 if (ct->master || (help && !hlist_empty(&help->expectations)))
c52fbb41 1775 return;
ceceae1b 1776
c52fbb41 1777 rcu_read_lock();
b2a15a60 1778 __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
c52fbb41 1779 rcu_read_unlock();
5b1158e9 1780}
13b18339 1781EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
5b1158e9 1782
9fb9cbb1
YK
1783/* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
1784void __nf_ct_refresh_acct(struct nf_conn *ct,
1785 enum ip_conntrack_info ctinfo,
1786 const struct sk_buff *skb,
cc169213
FW
1787 u32 extra_jiffies,
1788 bool do_acct)
9fb9cbb1 1789{
997ae831 1790 /* Only update if this is not a fixed timeout */
47d95045
PM
1791 if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
1792 goto acct;
997ae831 1793
9fb9cbb1 1794 /* If not in hash table, timer will not be active yet */
f330a7fd
FW
1795 if (nf_ct_is_confirmed(ct))
1796 extra_jiffies += nfct_time_stamp;
9fb9cbb1 1797
cc169213
FW
1798 if (ct->timeout != extra_jiffies)
1799 ct->timeout = extra_jiffies;
47d95045 1800acct:
ba76738c
PNA
1801 if (do_acct)
1802 nf_ct_acct_update(ct, ctinfo, skb->len);
9fb9cbb1 1803}
13b18339 1804EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
9fb9cbb1 1805
ad66713f
FW
1806bool nf_ct_kill_acct(struct nf_conn *ct,
1807 enum ip_conntrack_info ctinfo,
1808 const struct sk_buff *skb)
51091764 1809{
ad66713f 1810 nf_ct_acct_update(ct, ctinfo, skb->len);
58401572 1811
f330a7fd 1812 return nf_ct_delete(ct, 0, 0);
51091764 1813}
ad66713f 1814EXPORT_SYMBOL_GPL(nf_ct_kill_acct);
51091764 1815
c0cd1156 1816#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
c1d10adb
PNA
1817
1818#include <linux/netfilter/nfnetlink.h>
1819#include <linux/netfilter/nfnetlink_conntrack.h>
57b47a53
IM
1820#include <linux/mutex.h>
1821
c1d10adb
PNA
1822/* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
1823 * in ip_conntrack_core, since we don't want the protocols to autoload
1824 * or depend on ctnetlink */
fdf70832 1825int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
c1d10adb
PNA
1826 const struct nf_conntrack_tuple *tuple)
1827{
bae65be8
DM
1828 if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) ||
1829 nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port))
1830 goto nla_put_failure;
c1d10adb
PNA
1831 return 0;
1832
df6fb868 1833nla_put_failure:
c1d10adb
PNA
1834 return -1;
1835}
fdf70832 1836EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
c1d10adb 1837
f73e924c
PM
1838const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
1839 [CTA_PROTO_SRC_PORT] = { .type = NLA_U16 },
1840 [CTA_PROTO_DST_PORT] = { .type = NLA_U16 },
c1d10adb 1841};
f73e924c 1842EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
c1d10adb 1843
fdf70832 1844int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
c1d10adb
PNA
1845 struct nf_conntrack_tuple *t)
1846{
df6fb868 1847 if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT])
c1d10adb
PNA
1848 return -EINVAL;
1849
77236b6e
PM
1850 t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
1851 t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
c1d10adb
PNA
1852
1853 return 0;
1854}
fdf70832 1855EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
5c0de29d 1856
5caaed15 1857unsigned int nf_ct_port_nlattr_tuple_size(void)
5c0de29d 1858{
5caaed15
FW
1859 static unsigned int size __read_mostly;
1860
1861 if (!size)
1862 size = nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
1863
1864 return size;
5c0de29d
HE
1865}
1866EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
c1d10adb
PNA
1867#endif
1868
9fb9cbb1 1869/* Used by ipt_REJECT and ip6t_REJECT. */
312a0c16 1870static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
9fb9cbb1
YK
1871{
1872 struct nf_conn *ct;
1873 enum ip_conntrack_info ctinfo;
1874
1875 /* This ICMP is in reverse direction to the packet which caused it */
1876 ct = nf_ct_get(skb, &ctinfo);
1877 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
fb048833 1878 ctinfo = IP_CT_RELATED_REPLY;
9fb9cbb1
YK
1879 else
1880 ctinfo = IP_CT_RELATED;
1881
1882 /* Attach to new skbuff, and increment count */
c74454fa 1883 nf_ct_set(nskb, ct, ctinfo);
cb9c6836 1884 nf_conntrack_get(skb_nfct(nskb));
9fb9cbb1
YK
1885}
1886
368982cd
PNA
1887static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
1888{
368982cd
PNA
1889 struct nf_conntrack_tuple_hash *h;
1890 struct nf_conntrack_tuple tuple;
1891 enum ip_conntrack_info ctinfo;
1892 struct nf_nat_hook *nat_hook;
6816d931 1893 unsigned int status;
368982cd 1894 struct nf_conn *ct;
6816d931 1895 int dataoff;
368982cd
PNA
1896 u16 l3num;
1897 u8 l4num;
1898
1899 ct = nf_ct_get(skb, &ctinfo);
1900 if (!ct || nf_ct_is_confirmed(ct))
1901 return 0;
1902
1903 l3num = nf_ct_l3num(ct);
368982cd 1904
6816d931
FW
1905 dataoff = get_l4proto(skb, skb_network_offset(skb), l3num, &l4num);
1906 if (dataoff <= 0)
368982cd
PNA
1907 return -1;
1908
368982cd 1909 if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num,
303e0c55 1910 l4num, net, &tuple))
368982cd
PNA
1911 return -1;
1912
1913 if (ct->status & IPS_SRC_NAT) {
1914 memcpy(tuple.src.u3.all,
1915 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.all,
1916 sizeof(tuple.src.u3.all));
1917 tuple.src.u.all =
1918 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.all;
1919 }
1920
1921 if (ct->status & IPS_DST_NAT) {
1922 memcpy(tuple.dst.u3.all,
1923 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.all,
1924 sizeof(tuple.dst.u3.all));
1925 tuple.dst.u.all =
1926 ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all;
1927 }
1928
1929 h = nf_conntrack_find_get(net, nf_ct_zone(ct), &tuple);
1930 if (!h)
1931 return 0;
1932
1933 /* Store status bits of the conntrack that is clashing to re-do NAT
1934 * mangling according to what it has been done already to this packet.
1935 */
1936 status = ct->status;
1937
1938 nf_ct_put(ct);
1939 ct = nf_ct_tuplehash_to_ctrack(h);
1940 nf_ct_set(skb, ct, ctinfo);
1941
1942 nat_hook = rcu_dereference(nf_nat_hook);
1943 if (!nat_hook)
1944 return 0;
1945
1946 if (status & IPS_SRC_NAT &&
1947 nat_hook->manip_pkt(skb, ct, NF_NAT_MANIP_SRC,
1948 IP_CT_DIR_ORIGINAL) == NF_DROP)
1949 return -1;
1950
1951 if (status & IPS_DST_NAT &&
1952 nat_hook->manip_pkt(skb, ct, NF_NAT_MANIP_DST,
1953 IP_CT_DIR_ORIGINAL) == NF_DROP)
1954 return -1;
1955
1956 return 0;
1957}
1958
b60a6040
THJ
1959static bool nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
1960 const struct sk_buff *skb)
1961{
1962 const struct nf_conntrack_tuple *src_tuple;
1963 const struct nf_conntrack_tuple_hash *hash;
1964 struct nf_conntrack_tuple srctuple;
1965 enum ip_conntrack_info ctinfo;
1966 struct nf_conn *ct;
1967
1968 ct = nf_ct_get(skb, &ctinfo);
1969 if (ct) {
1970 src_tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo));
1971 memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple));
1972 return true;
1973 }
1974
1975 if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
1976 NFPROTO_IPV4, dev_net(skb->dev),
1977 &srctuple))
1978 return false;
1979
1980 hash = nf_conntrack_find_get(dev_net(skb->dev),
1981 &nf_ct_zone_dflt,
1982 &srctuple);
1983 if (!hash)
1984 return false;
1985
1986 ct = nf_ct_tuplehash_to_ctrack(hash);
1987 src_tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir);
1988 memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple));
1989 nf_ct_put(ct);
1990
1991 return true;
1992}
1993
9fb9cbb1 1994/* Bring out ya dead! */
df0933dc 1995static struct nf_conn *
2843fb69 1996get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
9fb9cbb1
YK
1997 void *data, unsigned int *bucket)
1998{
df0933dc
PM
1999 struct nf_conntrack_tuple_hash *h;
2000 struct nf_conn *ct;
ea781f19 2001 struct hlist_nulls_node *n;
93bb0ceb 2002 spinlock_t *lockp;
9fb9cbb1 2003
56d52d48 2004 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
93bb0ceb
JDB
2005 lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
2006 local_bh_disable();
b16c2919 2007 nf_conntrack_lock(lockp);
56d52d48
FW
2008 if (*bucket < nf_conntrack_htable_size) {
2009 hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnnode) {
93bb0ceb
JDB
2010 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
2011 continue;
2012 ct = nf_ct_tuplehash_to_ctrack(h);
2843fb69 2013 if (iter(ct, data))
93bb0ceb
JDB
2014 goto found;
2015 }
df0933dc 2016 }
93bb0ceb
JDB
2017 spin_unlock(lockp);
2018 local_bh_enable();
d93c6258 2019 cond_resched();
601e68e1 2020 }
b7779d06 2021
b0feacaa
FW
2022 return NULL;
2023found:
2024 atomic_inc(&ct->ct_general.use);
2025 spin_unlock(lockp);
2026 local_bh_enable();
2027 return ct;
2028}
2029
2843fb69
FW
2030static void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data),
2031 void *data, u32 portid, int report)
2032{
0d02d564 2033 unsigned int bucket = 0, sequence;
2843fb69 2034 struct nf_conn *ct;
2843fb69
FW
2035
2036 might_sleep();
2037
0d02d564
FW
2038 for (;;) {
2039 sequence = read_seqcount_begin(&nf_conntrack_generation);
2843fb69 2040
0d02d564
FW
2041 while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
2042 /* Time to push up daises... */
2043
2044 nf_ct_delete(ct, portid, report);
2045 nf_ct_put(ct);
2046 cond_resched();
2047 }
2048
2049 if (!read_seqcount_retry(&nf_conntrack_generation, sequence))
2050 break;
2051 bucket = 0;
2843fb69
FW
2052 }
2053}
2054
2055struct iter_data {
2056 int (*iter)(struct nf_conn *i, void *data);
2057 void *data;
2058 struct net *net;
2059};
2060
2061static int iter_net_only(struct nf_conn *i, void *data)
2062{
2063 struct iter_data *d = data;
2064
2065 if (!net_eq(d->net, nf_ct_net(i)))
2066 return 0;
2067
2068 return d->iter(i, d->data);
2069}
2070
b0feacaa
FW
2071static void
2072__nf_ct_unconfirmed_destroy(struct net *net)
2073{
2074 int cpu;
2075
b7779d06 2076 for_each_possible_cpu(cpu) {
b0feacaa
FW
2077 struct nf_conntrack_tuple_hash *h;
2078 struct hlist_nulls_node *n;
2079 struct ct_pcpu *pcpu;
2080
2081 pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
b7779d06
JDB
2082
2083 spin_lock_bh(&pcpu->lock);
2084 hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) {
b0feacaa
FW
2085 struct nf_conn *ct;
2086
b7779d06 2087 ct = nf_ct_tuplehash_to_ctrack(h);
b0feacaa
FW
2088
2089 /* we cannot call iter() on unconfirmed list, the
2090 * owning cpu can reallocate ct->ext at any time.
2091 */
2092 set_bit(IPS_DYING_BIT, &ct->status);
b7779d06
JDB
2093 }
2094 spin_unlock_bh(&pcpu->lock);
d93c6258 2095 cond_resched();
b7779d06 2096 }
9fb9cbb1
YK
2097}
2098
84657984
FW
2099void nf_ct_unconfirmed_destroy(struct net *net)
2100{
2101 might_sleep();
2102
2103 if (atomic_read(&net->ct.count) > 0) {
2104 __nf_ct_unconfirmed_destroy(net);
e2a75007 2105 nf_queue_nf_hook_drop(net);
84657984
FW
2106 synchronize_net();
2107 }
2108}
2109EXPORT_SYMBOL_GPL(nf_ct_unconfirmed_destroy);
2110
9fd6452d
FW
2111void nf_ct_iterate_cleanup_net(struct net *net,
2112 int (*iter)(struct nf_conn *i, void *data),
2113 void *data, u32 portid, int report)
9fb9cbb1 2114{
2843fb69 2115 struct iter_data d;
9fb9cbb1 2116
d93c6258
FW
2117 might_sleep();
2118
88b68bc5
FW
2119 if (atomic_read(&net->ct.count) == 0)
2120 return;
2121
2843fb69
FW
2122 d.iter = iter;
2123 d.data = data;
2124 d.net = net;
2125
2843fb69
FW
2126 nf_ct_iterate_cleanup(iter_net_only, &d, portid, report);
2127}
2128EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup_net);
9fb9cbb1 2129
2843fb69
FW
2130/**
2131 * nf_ct_iterate_destroy - destroy unconfirmed conntracks and iterate table
2132 * @iter: callback to invoke for each conntrack
2133 * @data: data to pass to @iter
2134 *
2135 * Like nf_ct_iterate_cleanup, but first marks conntracks on the
2136 * unconfirmed list as dying (so they will not be inserted into
2137 * main table).
7866cc57
FW
2138 *
2139 * Can only be called in module exit path.
2843fb69
FW
2140 */
2141void
2142nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data), void *data)
2143{
2144 struct net *net;
2145
f0b07bb1 2146 down_read(&net_rwsem);
2843fb69
FW
2147 for_each_net(net) {
2148 if (atomic_read(&net->ct.count) == 0)
2149 continue;
2150 __nf_ct_unconfirmed_destroy(net);
e2a75007 2151 nf_queue_nf_hook_drop(net);
9fb9cbb1 2152 }
f0b07bb1 2153 up_read(&net_rwsem);
2843fb69 2154
7866cc57
FW
2155 /* Need to wait for netns cleanup worker to finish, if its
2156 * running -- it might have deleted a net namespace from
2157 * the global list, so our __nf_ct_unconfirmed_destroy() might
2158 * not have affected all namespaces.
2159 */
2160 net_ns_barrier();
2161
2843fb69
FW
2162 /* a conntrack could have been unlinked from unconfirmed list
2163 * before we grabbed pcpu lock in __nf_ct_unconfirmed_destroy().
2164 * This makes sure its inserted into conntrack table.
2165 */
2166 synchronize_net();
2167
2168 nf_ct_iterate_cleanup(iter, data, 0, 0);
9fb9cbb1 2169}
2843fb69 2170EXPORT_SYMBOL_GPL(nf_ct_iterate_destroy);
9fb9cbb1 2171
274d383b
PNA
2172static int kill_all(struct nf_conn *i, void *data)
2173{
2843fb69 2174 return net_eq(nf_ct_net(i), data);
274d383b
PNA
2175}
2176
f94161c1 2177void nf_conntrack_cleanup_start(void)
9fb9cbb1 2178{
b87a2f91 2179 conntrack_gc_work.exiting = true;
f94161c1
G
2180 RCU_INIT_POINTER(ip_ct_attach, NULL);
2181}
2182
2183void nf_conntrack_cleanup_end(void)
2184{
1f4b2439 2185 RCU_INIT_POINTER(nf_ct_hook, NULL);
b87a2f91 2186 cancel_delayed_work_sync(&conntrack_gc_work.dwork);
285189c7 2187 kvfree(nf_conntrack_hash);
56d52d48 2188
04d87001 2189 nf_conntrack_proto_fini();
41d73ec0 2190 nf_conntrack_seqadj_fini();
5f69b8f5 2191 nf_conntrack_labels_fini();
5e615b22 2192 nf_conntrack_helper_fini();
8684094c 2193 nf_conntrack_timeout_fini();
3fe0f943 2194 nf_conntrack_ecache_fini();
73f4001a 2195 nf_conntrack_tstamp_fini();
b7ff3a1f 2196 nf_conntrack_acct_fini();
83b4dbe1 2197 nf_conntrack_expect_fini();
77571149
FW
2198
2199 kmem_cache_destroy(nf_conntrack_cachep);
08f6547d 2200}
9fb9cbb1 2201
f94161c1
G
2202/*
2203 * Mishearing the voices in his head, our hero wonders how he's
2204 * supposed to kill the mall.
2205 */
2206void nf_conntrack_cleanup_net(struct net *net)
08f6547d 2207{
dece40e8
VD
2208 LIST_HEAD(single);
2209
2210 list_add(&net->exit_list, &single);
2211 nf_conntrack_cleanup_net_list(&single);
2212}
2213
2214void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
2215{
2216 int busy;
2217 struct net *net;
2218
f94161c1
G
2219 /*
2220 * This makes sure all current packets have passed through
2221 * netfilter framework. Roll on, two-stage module
2222 * delete...
2223 */
2224 synchronize_net();
dece40e8
VD
2225i_see_dead_people:
2226 busy = 0;
2227 list_for_each_entry(net, net_exit_list, exit_list) {
2843fb69 2228 nf_ct_iterate_cleanup(kill_all, net, 0, 0);
dece40e8
VD
2229 if (atomic_read(&net->ct.count) != 0)
2230 busy = 1;
2231 }
2232 if (busy) {
9fb9cbb1
YK
2233 schedule();
2234 goto i_see_dead_people;
2235 }
2236
dece40e8 2237 list_for_each_entry(net, net_exit_list, exit_list) {
dece40e8 2238 nf_conntrack_proto_pernet_fini(net);
dece40e8 2239 nf_conntrack_ecache_pernet_fini(net);
dece40e8 2240 nf_conntrack_expect_pernet_fini(net);
dece40e8 2241 free_percpu(net->ct.stat);
b7779d06 2242 free_percpu(net->ct.pcpu_lists);
dece40e8 2243 }
08f6547d
AD
2244}
2245
d862a662 2246void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
9fb9cbb1 2247{
ea781f19
ED
2248 struct hlist_nulls_head *hash;
2249 unsigned int nr_slots, i;
9fb9cbb1 2250
9cc1c73a
FW
2251 if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head)))
2252 return NULL;
2253
ea781f19
ED
2254 BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
2255 nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
9cc1c73a 2256
285189c7
LR
2257 hash = kvmalloc_array(nr_slots, sizeof(struct hlist_nulls_head),
2258 GFP_KERNEL | __GFP_ZERO);
9fb9cbb1 2259
ea781f19
ED
2260 if (hash && nulls)
2261 for (i = 0; i < nr_slots; i++)
2262 INIT_HLIST_NULLS_HEAD(&hash[i], i);
9fb9cbb1
YK
2263
2264 return hash;
2265}
ac565e5f 2266EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
9fb9cbb1 2267
3183ab89 2268int nf_conntrack_hash_resize(unsigned int hashsize)
9fb9cbb1 2269{
3183ab89
FW
2270 int i, bucket;
2271 unsigned int old_size;
ea781f19 2272 struct hlist_nulls_head *hash, *old_hash;
9fb9cbb1 2273 struct nf_conntrack_tuple_hash *h;
5d0aa2cc 2274 struct nf_conn *ct;
9fb9cbb1 2275
9fb9cbb1
YK
2276 if (!hashsize)
2277 return -EINVAL;
2278
d862a662 2279 hash = nf_ct_alloc_hashtable(&hashsize, 1);
9fb9cbb1
YK
2280 if (!hash)
2281 return -ENOMEM;
2282
3183ab89
FW
2283 old_size = nf_conntrack_htable_size;
2284 if (old_size == hashsize) {
285189c7 2285 kvfree(hash);
3183ab89
FW
2286 return 0;
2287 }
2288
93bb0ceb
JDB
2289 local_bh_disable();
2290 nf_conntrack_all_lock();
a3efd812 2291 write_seqcount_begin(&nf_conntrack_generation);
93bb0ceb 2292
76507f69
PM
2293 /* Lookups in the old hash might happen in parallel, which means we
2294 * might get false negatives during connection lookup. New connections
2295 * created because of a false negative won't make it into the hash
93bb0ceb 2296 * though since that required taking the locks.
76507f69 2297 */
93bb0ceb 2298
56d52d48
FW
2299 for (i = 0; i < nf_conntrack_htable_size; i++) {
2300 while (!hlist_nulls_empty(&nf_conntrack_hash[i])) {
2301 h = hlist_nulls_entry(nf_conntrack_hash[i].first,
2302 struct nf_conntrack_tuple_hash, hnnode);
5d0aa2cc 2303 ct = nf_ct_tuplehash_to_ctrack(h);
ea781f19 2304 hlist_nulls_del_rcu(&h->hnnode);
1b8c8a9f
FW
2305 bucket = __hash_conntrack(nf_ct_net(ct),
2306 &h->tuple, hashsize);
ea781f19 2307 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
9fb9cbb1
YK
2308 }
2309 }
56d52d48
FW
2310 old_size = nf_conntrack_htable_size;
2311 old_hash = nf_conntrack_hash;
9fb9cbb1 2312
56d52d48
FW
2313 nf_conntrack_hash = hash;
2314 nf_conntrack_htable_size = hashsize;
93bb0ceb 2315
a3efd812 2316 write_seqcount_end(&nf_conntrack_generation);
93bb0ceb
JDB
2317 nf_conntrack_all_unlock();
2318 local_bh_enable();
9fb9cbb1 2319
5e3c61f9 2320 synchronize_net();
285189c7 2321 kvfree(old_hash);
9fb9cbb1
YK
2322 return 0;
2323}
3183ab89 2324
e4dca7b7 2325int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp)
3183ab89
FW
2326{
2327 unsigned int hashsize;
2328 int rc;
2329
2330 if (current->nsproxy->net_ns != &init_net)
2331 return -EOPNOTSUPP;
2332
2333 /* On boot, we can set this without any fancy locking. */
2045cdfa 2334 if (!nf_conntrack_hash)
3183ab89
FW
2335 return param_set_uint(val, kp);
2336
2337 rc = kstrtouint(val, 0, &hashsize);
2338 if (rc)
2339 return rc;
2340
2341 return nf_conntrack_hash_resize(hashsize);
2342}
fae718dd 2343EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
9fb9cbb1 2344
ab71632c 2345static __always_inline unsigned int total_extension_size(void)
b3a5db10
FW
2346{
2347 /* remember to add new extensions below */
2348 BUILD_BUG_ON(NF_CT_EXT_NUM > 9);
2349
2350 return sizeof(struct nf_ct_ext) +
2351 sizeof(struct nf_conn_help)
2352#if IS_ENABLED(CONFIG_NF_NAT)
2353 + sizeof(struct nf_conn_nat)
2354#endif
2355 + sizeof(struct nf_conn_seqadj)
2356 + sizeof(struct nf_conn_acct)
2357#ifdef CONFIG_NF_CONNTRACK_EVENTS
2358 + sizeof(struct nf_conntrack_ecache)
2359#endif
2360#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
2361 + sizeof(struct nf_conn_tstamp)
2362#endif
2363#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
2364 + sizeof(struct nf_conn_timeout)
2365#endif
2366#ifdef CONFIG_NF_CONNTRACK_LABELS
2367 + sizeof(struct nf_conn_labels)
2368#endif
2369#if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
2370 + sizeof(struct nf_conn_synproxy)
2371#endif
2372 ;
2373};
2374
f94161c1 2375int nf_conntrack_init_start(void)
9fb9cbb1 2376{
ca79b0c2 2377 unsigned long nr_pages = totalram_pages();
f205c5e0 2378 int max_factor = 8;
0c5366b3 2379 int ret = -ENOMEM;
cc41c84b 2380 int i;
93bb0ceb 2381
b3a5db10
FW
2382 /* struct nf_ct_ext uses u8 to store offsets/size */
2383 BUILD_BUG_ON(total_extension_size() > 255u);
2384
a3efd812
FW
2385 seqcount_init(&nf_conntrack_generation);
2386
d5d20912 2387 for (i = 0; i < CONNTRACK_LOCKS; i++)
93bb0ceb 2388 spin_lock_init(&nf_conntrack_locks[i]);
9fb9cbb1 2389
9fb9cbb1 2390 if (!nf_conntrack_htable_size) {
88eab472
ML
2391 /* Idea from tcp.c: use 1/16384 of memory.
2392 * On i386: 32MB machine has 512 buckets.
2393 * >= 1GB machines have 16384 buckets.
2394 * >= 4GB machines have 65536 buckets.
2395 */
9fb9cbb1 2396 nf_conntrack_htable_size
3d6357de 2397 = (((nr_pages << PAGE_SHIFT) / 16384)
f205c5e0 2398 / sizeof(struct hlist_head));
3d6357de 2399 if (nr_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE)))
88eab472 2400 nf_conntrack_htable_size = 65536;
3d6357de 2401 else if (nr_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
f205c5e0
PM
2402 nf_conntrack_htable_size = 16384;
2403 if (nf_conntrack_htable_size < 32)
2404 nf_conntrack_htable_size = 32;
2405
2406 /* Use a max. factor of four by default to get the same max as
2407 * with the old struct list_heads. When a table size is given
2408 * we use the old value of 8 to avoid reducing the max.
2409 * entries. */
2410 max_factor = 4;
9fb9cbb1 2411 }
56d52d48
FW
2412
2413 nf_conntrack_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1);
2414 if (!nf_conntrack_hash)
2415 return -ENOMEM;
2416
f205c5e0 2417 nf_conntrack_max = max_factor * nf_conntrack_htable_size;
8e5105a0 2418
0c5366b3 2419 nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
a9e419dc
FW
2420 sizeof(struct nf_conn),
2421 NFCT_INFOMASK + 1,
5f0d5a3a 2422 SLAB_TYPESAFE_BY_RCU | SLAB_HWCACHE_ALIGN, NULL);
0c5366b3
FW
2423 if (!nf_conntrack_cachep)
2424 goto err_cachep;
2425
83b4dbe1
G
2426 ret = nf_conntrack_expect_init();
2427 if (ret < 0)
2428 goto err_expect;
2429
b7ff3a1f
G
2430 ret = nf_conntrack_acct_init();
2431 if (ret < 0)
2432 goto err_acct;
2433
73f4001a
G
2434 ret = nf_conntrack_tstamp_init();
2435 if (ret < 0)
2436 goto err_tstamp;
2437
3fe0f943
G
2438 ret = nf_conntrack_ecache_init();
2439 if (ret < 0)
2440 goto err_ecache;
2441
8684094c
G
2442 ret = nf_conntrack_timeout_init();
2443 if (ret < 0)
2444 goto err_timeout;
2445
5e615b22
G
2446 ret = nf_conntrack_helper_init();
2447 if (ret < 0)
2448 goto err_helper;
2449
5f69b8f5
G
2450 ret = nf_conntrack_labels_init();
2451 if (ret < 0)
2452 goto err_labels;
2453
41d73ec0
PM
2454 ret = nf_conntrack_seqadj_init();
2455 if (ret < 0)
2456 goto err_seqadj;
2457
04d87001
G
2458 ret = nf_conntrack_proto_init();
2459 if (ret < 0)
2460 goto err_proto;
2461
b87a2f91 2462 conntrack_gc_work_init(&conntrack_gc_work);
0984d427 2463 queue_delayed_work(system_power_efficient_wq, &conntrack_gc_work.dwork, HZ);
b87a2f91 2464
08f6547d
AD
2465 return 0;
2466
04d87001 2467err_proto:
41d73ec0
PM
2468 nf_conntrack_seqadj_fini();
2469err_seqadj:
04d87001 2470 nf_conntrack_labels_fini();
5f69b8f5
G
2471err_labels:
2472 nf_conntrack_helper_fini();
5e615b22
G
2473err_helper:
2474 nf_conntrack_timeout_fini();
8684094c
G
2475err_timeout:
2476 nf_conntrack_ecache_fini();
3fe0f943
G
2477err_ecache:
2478 nf_conntrack_tstamp_fini();
73f4001a
G
2479err_tstamp:
2480 nf_conntrack_acct_fini();
b7ff3a1f
G
2481err_acct:
2482 nf_conntrack_expect_fini();
83b4dbe1 2483err_expect:
0c5366b3
FW
2484 kmem_cache_destroy(nf_conntrack_cachep);
2485err_cachep:
285189c7 2486 kvfree(nf_conntrack_hash);
08f6547d
AD
2487 return ret;
2488}
2489
1f4b2439 2490static struct nf_ct_hook nf_conntrack_hook = {
368982cd 2491 .update = nf_conntrack_update,
1f4b2439 2492 .destroy = destroy_conntrack,
b60a6040 2493 .get_tuple_skb = nf_conntrack_get_tuple_skb,
1f4b2439
PNA
2494};
2495
f94161c1
G
2496void nf_conntrack_init_end(void)
2497{
2498 /* For use by REJECT target */
2499 RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
1f4b2439 2500 RCU_INIT_POINTER(nf_ct_hook, &nf_conntrack_hook);
f94161c1
G
2501}
2502
8cc20198
ED
2503/*
2504 * We need to use special "null" values, not used in hash table
2505 */
2506#define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
2507#define DYING_NULLS_VAL ((1<<30)+1)
252b3e8c 2508#define TEMPLATE_NULLS_VAL ((1<<30)+2)
8cc20198 2509
f94161c1 2510int nf_conntrack_init_net(struct net *net)
08f6547d 2511{
b7779d06
JDB
2512 int ret = -ENOMEM;
2513 int cpu;
ceceae1b 2514
cc41c84b 2515 BUILD_BUG_ON(IP_CT_UNTRACKED == IP_CT_NUMBER);
2e7b162c 2516 BUILD_BUG_ON_NOT_POWER_OF_2(CONNTRACK_LOCKS);
08f6547d 2517 atomic_set(&net->ct.count, 0);
b7779d06
JDB
2518
2519 net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu);
2520 if (!net->ct.pcpu_lists)
08f6547d 2521 goto err_stat;
b7779d06
JDB
2522
2523 for_each_possible_cpu(cpu) {
2524 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
2525
2526 spin_lock_init(&pcpu->lock);
2527 INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL);
2528 INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL);
08f6547d 2529 }
5b3501fa 2530
b7779d06
JDB
2531 net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
2532 if (!net->ct.stat)
2533 goto err_pcpu_lists;
2534
83b4dbe1 2535 ret = nf_conntrack_expect_pernet_init(net);
08f6547d
AD
2536 if (ret < 0)
2537 goto err_expect;
fc3893fd
FW
2538
2539 nf_conntrack_acct_pernet_init(net);
2540 nf_conntrack_tstamp_pernet_init(net);
2541 nf_conntrack_ecache_pernet_init(net);
2542 nf_conntrack_helper_pernet_init(net);
4a60dc74 2543 nf_conntrack_proto_pernet_init(net);
fc3893fd 2544
08f6547d 2545 return 0;
c539f017 2546
08f6547d 2547err_expect:
0d55af87 2548 free_percpu(net->ct.stat);
b7779d06
JDB
2549err_pcpu_lists:
2550 free_percpu(net->ct.pcpu_lists);
0d55af87 2551err_stat:
08f6547d
AD
2552 return ret;
2553}