Merge branch 'linux-4.6' of git://github.com/skeggsb/linux into drm-fixes
[linux-2.6-block.git] / net / netfilter / nf_conntrack_core.c
1 /* Connection state tracking for netfilter.  This is separated from,
2    but required by, the NAT layer; it can also be used by an iptables
3    extension. */
4
5 /* (C) 1999-2001 Paul `Rusty' Russell
6  * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
7  * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
8  * (C) 2005-2012 Patrick McHardy <kaber@trash.net>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14
15 #include <linux/types.h>
16 #include <linux/netfilter.h>
17 #include <linux/module.h>
18 #include <linux/sched.h>
19 #include <linux/skbuff.h>
20 #include <linux/proc_fs.h>
21 #include <linux/vmalloc.h>
22 #include <linux/stddef.h>
23 #include <linux/slab.h>
24 #include <linux/random.h>
25 #include <linux/jhash.h>
26 #include <linux/err.h>
27 #include <linux/percpu.h>
28 #include <linux/moduleparam.h>
29 #include <linux/notifier.h>
30 #include <linux/kernel.h>
31 #include <linux/netdevice.h>
32 #include <linux/socket.h>
33 #include <linux/mm.h>
34 #include <linux/nsproxy.h>
35 #include <linux/rculist_nulls.h>
36
37 #include <net/netfilter/nf_conntrack.h>
38 #include <net/netfilter/nf_conntrack_l3proto.h>
39 #include <net/netfilter/nf_conntrack_l4proto.h>
40 #include <net/netfilter/nf_conntrack_expect.h>
41 #include <net/netfilter/nf_conntrack_helper.h>
42 #include <net/netfilter/nf_conntrack_seqadj.h>
43 #include <net/netfilter/nf_conntrack_core.h>
44 #include <net/netfilter/nf_conntrack_extend.h>
45 #include <net/netfilter/nf_conntrack_acct.h>
46 #include <net/netfilter/nf_conntrack_ecache.h>
47 #include <net/netfilter/nf_conntrack_zones.h>
48 #include <net/netfilter/nf_conntrack_timestamp.h>
49 #include <net/netfilter/nf_conntrack_timeout.h>
50 #include <net/netfilter/nf_conntrack_labels.h>
51 #include <net/netfilter/nf_conntrack_synproxy.h>
52 #include <net/netfilter/nf_nat.h>
53 #include <net/netfilter/nf_nat_core.h>
54 #include <net/netfilter/nf_nat_helper.h>
55
56 #define NF_CONNTRACK_VERSION    "0.5.0"
57
58 int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct,
59                                       enum nf_nat_manip_type manip,
60                                       const struct nlattr *attr) __read_mostly;
61 EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook);
62
63 __cacheline_aligned_in_smp spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
64 EXPORT_SYMBOL_GPL(nf_conntrack_locks);
65
66 __cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock);
67 EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);
68
69 static __read_mostly spinlock_t nf_conntrack_locks_all_lock;
70 static __read_mostly bool nf_conntrack_locks_all;
71
72 void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
73 {
74         spin_lock(lock);
75         while (unlikely(nf_conntrack_locks_all)) {
76                 spin_unlock(lock);
77                 spin_unlock_wait(&nf_conntrack_locks_all_lock);
78                 spin_lock(lock);
79         }
80 }
81 EXPORT_SYMBOL_GPL(nf_conntrack_lock);
82
83 static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2)
84 {
85         h1 %= CONNTRACK_LOCKS;
86         h2 %= CONNTRACK_LOCKS;
87         spin_unlock(&nf_conntrack_locks[h1]);
88         if (h1 != h2)
89                 spin_unlock(&nf_conntrack_locks[h2]);
90 }
91
92 /* return true if we need to recompute hashes (in case hash table was resized) */
93 static bool nf_conntrack_double_lock(struct net *net, unsigned int h1,
94                                      unsigned int h2, unsigned int sequence)
95 {
96         h1 %= CONNTRACK_LOCKS;
97         h2 %= CONNTRACK_LOCKS;
98         if (h1 <= h2) {
99                 nf_conntrack_lock(&nf_conntrack_locks[h1]);
100                 if (h1 != h2)
101                         spin_lock_nested(&nf_conntrack_locks[h2],
102                                          SINGLE_DEPTH_NESTING);
103         } else {
104                 nf_conntrack_lock(&nf_conntrack_locks[h2]);
105                 spin_lock_nested(&nf_conntrack_locks[h1],
106                                  SINGLE_DEPTH_NESTING);
107         }
108         if (read_seqcount_retry(&net->ct.generation, sequence)) {
109                 nf_conntrack_double_unlock(h1, h2);
110                 return true;
111         }
112         return false;
113 }
114
115 static void nf_conntrack_all_lock(void)
116 {
117         int i;
118
119         spin_lock(&nf_conntrack_locks_all_lock);
120         nf_conntrack_locks_all = true;
121
122         for (i = 0; i < CONNTRACK_LOCKS; i++) {
123                 spin_unlock_wait(&nf_conntrack_locks[i]);
124         }
125 }
126
127 static void nf_conntrack_all_unlock(void)
128 {
129         nf_conntrack_locks_all = false;
130         spin_unlock(&nf_conntrack_locks_all_lock);
131 }
132
133 unsigned int nf_conntrack_htable_size __read_mostly;
134 EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
135
136 unsigned int nf_conntrack_max __read_mostly;
137 EXPORT_SYMBOL_GPL(nf_conntrack_max);
138
139 DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
140 EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
141
142 unsigned int nf_conntrack_hash_rnd __read_mostly;
143 EXPORT_SYMBOL_GPL(nf_conntrack_hash_rnd);
144
145 static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple)
146 {
147         unsigned int n;
148
149         /* The direction must be ignored, so we hash everything up to the
150          * destination ports (which is a multiple of 4) and treat the last
151          * three bytes manually.
152          */
153         n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
154         return jhash2((u32 *)tuple, n, nf_conntrack_hash_rnd ^
155                       (((__force __u16)tuple->dst.u.all << 16) |
156                       tuple->dst.protonum));
157 }
158
159 static u32 __hash_bucket(u32 hash, unsigned int size)
160 {
161         return reciprocal_scale(hash, size);
162 }
163
164 static u32 hash_bucket(u32 hash, const struct net *net)
165 {
166         return __hash_bucket(hash, net->ct.htable_size);
167 }
168
169 static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
170                                   unsigned int size)
171 {
172         return __hash_bucket(hash_conntrack_raw(tuple), size);
173 }
174
175 static inline u_int32_t hash_conntrack(const struct net *net,
176                                        const struct nf_conntrack_tuple *tuple)
177 {
178         return __hash_conntrack(tuple, net->ct.htable_size);
179 }
180
181 bool
182 nf_ct_get_tuple(const struct sk_buff *skb,
183                 unsigned int nhoff,
184                 unsigned int dataoff,
185                 u_int16_t l3num,
186                 u_int8_t protonum,
187                 struct net *net,
188                 struct nf_conntrack_tuple *tuple,
189                 const struct nf_conntrack_l3proto *l3proto,
190                 const struct nf_conntrack_l4proto *l4proto)
191 {
192         memset(tuple, 0, sizeof(*tuple));
193
194         tuple->src.l3num = l3num;
195         if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
196                 return false;
197
198         tuple->dst.protonum = protonum;
199         tuple->dst.dir = IP_CT_DIR_ORIGINAL;
200
201         return l4proto->pkt_to_tuple(skb, dataoff, net, tuple);
202 }
203 EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
204
205 bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
206                        u_int16_t l3num,
207                        struct net *net, struct nf_conntrack_tuple *tuple)
208 {
209         struct nf_conntrack_l3proto *l3proto;
210         struct nf_conntrack_l4proto *l4proto;
211         unsigned int protoff;
212         u_int8_t protonum;
213         int ret;
214
215         rcu_read_lock();
216
217         l3proto = __nf_ct_l3proto_find(l3num);
218         ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum);
219         if (ret != NF_ACCEPT) {
220                 rcu_read_unlock();
221                 return false;
222         }
223
224         l4proto = __nf_ct_l4proto_find(l3num, protonum);
225
226         ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple,
227                               l3proto, l4proto);
228
229         rcu_read_unlock();
230         return ret;
231 }
232 EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
233
234 bool
235 nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
236                    const struct nf_conntrack_tuple *orig,
237                    const struct nf_conntrack_l3proto *l3proto,
238                    const struct nf_conntrack_l4proto *l4proto)
239 {
240         memset(inverse, 0, sizeof(*inverse));
241
242         inverse->src.l3num = orig->src.l3num;
243         if (l3proto->invert_tuple(inverse, orig) == 0)
244                 return false;
245
246         inverse->dst.dir = !orig->dst.dir;
247
248         inverse->dst.protonum = orig->dst.protonum;
249         return l4proto->invert_tuple(inverse, orig);
250 }
251 EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
252
253 static void
254 clean_from_lists(struct nf_conn *ct)
255 {
256         pr_debug("clean_from_lists(%p)\n", ct);
257         hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
258         hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
259
260         /* Destroy all pending expectations */
261         nf_ct_remove_expectations(ct);
262 }
263
264 /* must be called with local_bh_disable */
265 static void nf_ct_add_to_dying_list(struct nf_conn *ct)
266 {
267         struct ct_pcpu *pcpu;
268
269         /* add this conntrack to the (per cpu) dying list */
270         ct->cpu = smp_processor_id();
271         pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
272
273         spin_lock(&pcpu->lock);
274         hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
275                              &pcpu->dying);
276         spin_unlock(&pcpu->lock);
277 }
278
279 /* must be called with local_bh_disable */
280 static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct)
281 {
282         struct ct_pcpu *pcpu;
283
284         /* add this conntrack to the (per cpu) unconfirmed list */
285         ct->cpu = smp_processor_id();
286         pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
287
288         spin_lock(&pcpu->lock);
289         hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
290                              &pcpu->unconfirmed);
291         spin_unlock(&pcpu->lock);
292 }
293
294 /* must be called with local_bh_disable */
295 static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
296 {
297         struct ct_pcpu *pcpu;
298
299         /* We overload first tuple to link into unconfirmed or dying list.*/
300         pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
301
302         spin_lock(&pcpu->lock);
303         BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
304         hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
305         spin_unlock(&pcpu->lock);
306 }
307
308 /* Released via destroy_conntrack() */
309 struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
310                                  const struct nf_conntrack_zone *zone,
311                                  gfp_t flags)
312 {
313         struct nf_conn *tmpl;
314
315         tmpl = kzalloc(sizeof(*tmpl), flags);
316         if (tmpl == NULL)
317                 return NULL;
318
319         tmpl->status = IPS_TEMPLATE;
320         write_pnet(&tmpl->ct_net, net);
321
322         if (nf_ct_zone_add(tmpl, flags, zone) < 0)
323                 goto out_free;
324
325         atomic_set(&tmpl->ct_general.use, 0);
326
327         return tmpl;
328 out_free:
329         kfree(tmpl);
330         return NULL;
331 }
332 EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
333
334 void nf_ct_tmpl_free(struct nf_conn *tmpl)
335 {
336         nf_ct_ext_destroy(tmpl);
337         nf_ct_ext_free(tmpl);
338         kfree(tmpl);
339 }
340 EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
341
342 static void
343 destroy_conntrack(struct nf_conntrack *nfct)
344 {
345         struct nf_conn *ct = (struct nf_conn *)nfct;
346         struct net *net = nf_ct_net(ct);
347         struct nf_conntrack_l4proto *l4proto;
348
349         pr_debug("destroy_conntrack(%p)\n", ct);
350         NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
351         NF_CT_ASSERT(!timer_pending(&ct->timeout));
352
353         if (unlikely(nf_ct_is_template(ct))) {
354                 nf_ct_tmpl_free(ct);
355                 return;
356         }
357         rcu_read_lock();
358         l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
359         if (l4proto && l4proto->destroy)
360                 l4proto->destroy(ct);
361
362         rcu_read_unlock();
363
364         local_bh_disable();
365         /* Expectations will have been removed in clean_from_lists,
366          * except TFTP can create an expectation on the first packet,
367          * before connection is in the list, so we need to clean here,
368          * too.
369          */
370         nf_ct_remove_expectations(ct);
371
372         nf_ct_del_from_dying_or_unconfirmed_list(ct);
373
374         NF_CT_STAT_INC(net, delete);
375         local_bh_enable();
376
377         if (ct->master)
378                 nf_ct_put(ct->master);
379
380         pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
381         nf_conntrack_free(ct);
382 }
383
384 static void nf_ct_delete_from_lists(struct nf_conn *ct)
385 {
386         struct net *net = nf_ct_net(ct);
387         unsigned int hash, reply_hash;
388         unsigned int sequence;
389
390         nf_ct_helper_destroy(ct);
391
392         local_bh_disable();
393         do {
394                 sequence = read_seqcount_begin(&net->ct.generation);
395                 hash = hash_conntrack(net,
396                                       &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
397                 reply_hash = hash_conntrack(net,
398                                            &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
399         } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
400
401         clean_from_lists(ct);
402         nf_conntrack_double_unlock(hash, reply_hash);
403
404         nf_ct_add_to_dying_list(ct);
405
406         NF_CT_STAT_INC(net, delete_list);
407         local_bh_enable();
408 }
409
410 bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
411 {
412         struct nf_conn_tstamp *tstamp;
413
414         tstamp = nf_conn_tstamp_find(ct);
415         if (tstamp && tstamp->stop == 0)
416                 tstamp->stop = ktime_get_real_ns();
417
418         if (nf_ct_is_dying(ct))
419                 goto delete;
420
421         if (nf_conntrack_event_report(IPCT_DESTROY, ct,
422                                     portid, report) < 0) {
423                 /* destroy event was not delivered */
424                 nf_ct_delete_from_lists(ct);
425                 nf_conntrack_ecache_delayed_work(nf_ct_net(ct));
426                 return false;
427         }
428
429         nf_conntrack_ecache_work(nf_ct_net(ct));
430         set_bit(IPS_DYING_BIT, &ct->status);
431  delete:
432         nf_ct_delete_from_lists(ct);
433         nf_ct_put(ct);
434         return true;
435 }
436 EXPORT_SYMBOL_GPL(nf_ct_delete);
437
438 static void death_by_timeout(unsigned long ul_conntrack)
439 {
440         nf_ct_delete((struct nf_conn *)ul_conntrack, 0, 0);
441 }
442
443 static inline bool
444 nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
445                 const struct nf_conntrack_tuple *tuple,
446                 const struct nf_conntrack_zone *zone)
447 {
448         struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
449
450         /* A conntrack can be recreated with the equal tuple,
451          * so we need to check that the conntrack is confirmed
452          */
453         return nf_ct_tuple_equal(tuple, &h->tuple) &&
454                nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
455                nf_ct_is_confirmed(ct);
456 }
457
458 /*
459  * Warning :
460  * - Caller must take a reference on returned object
461  *   and recheck nf_ct_tuple_equal(tuple, &h->tuple)
462  */
463 static struct nf_conntrack_tuple_hash *
464 ____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
465                       const struct nf_conntrack_tuple *tuple, u32 hash)
466 {
467         struct nf_conntrack_tuple_hash *h;
468         struct hlist_nulls_node *n;
469         unsigned int bucket = hash_bucket(hash, net);
470
471         /* Disable BHs the entire time since we normally need to disable them
472          * at least once for the stats anyway.
473          */
474         local_bh_disable();
475 begin:
476         hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) {
477                 if (nf_ct_key_equal(h, tuple, zone)) {
478                         NF_CT_STAT_INC(net, found);
479                         local_bh_enable();
480                         return h;
481                 }
482                 NF_CT_STAT_INC(net, searched);
483         }
484         /*
485          * if the nulls value we got at the end of this lookup is
486          * not the expected one, we must restart lookup.
487          * We probably met an item that was moved to another chain.
488          */
489         if (get_nulls_value(n) != bucket) {
490                 NF_CT_STAT_INC(net, search_restart);
491                 goto begin;
492         }
493         local_bh_enable();
494
495         return NULL;
496 }
497
498 /* Find a connection corresponding to a tuple. */
499 static struct nf_conntrack_tuple_hash *
500 __nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
501                         const struct nf_conntrack_tuple *tuple, u32 hash)
502 {
503         struct nf_conntrack_tuple_hash *h;
504         struct nf_conn *ct;
505
506         rcu_read_lock();
507 begin:
508         h = ____nf_conntrack_find(net, zone, tuple, hash);
509         if (h) {
510                 ct = nf_ct_tuplehash_to_ctrack(h);
511                 if (unlikely(nf_ct_is_dying(ct) ||
512                              !atomic_inc_not_zero(&ct->ct_general.use)))
513                         h = NULL;
514                 else {
515                         if (unlikely(!nf_ct_key_equal(h, tuple, zone))) {
516                                 nf_ct_put(ct);
517                                 goto begin;
518                         }
519                 }
520         }
521         rcu_read_unlock();
522
523         return h;
524 }
525
526 struct nf_conntrack_tuple_hash *
527 nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
528                       const struct nf_conntrack_tuple *tuple)
529 {
530         return __nf_conntrack_find_get(net, zone, tuple,
531                                        hash_conntrack_raw(tuple));
532 }
533 EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
534
535 static void __nf_conntrack_hash_insert(struct nf_conn *ct,
536                                        unsigned int hash,
537                                        unsigned int reply_hash)
538 {
539         struct net *net = nf_ct_net(ct);
540
541         hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
542                            &net->ct.hash[hash]);
543         hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
544                            &net->ct.hash[reply_hash]);
545 }
546
547 int
548 nf_conntrack_hash_check_insert(struct nf_conn *ct)
549 {
550         const struct nf_conntrack_zone *zone;
551         struct net *net = nf_ct_net(ct);
552         unsigned int hash, reply_hash;
553         struct nf_conntrack_tuple_hash *h;
554         struct hlist_nulls_node *n;
555         unsigned int sequence;
556
557         zone = nf_ct_zone(ct);
558
559         local_bh_disable();
560         do {
561                 sequence = read_seqcount_begin(&net->ct.generation);
562                 hash = hash_conntrack(net,
563                                       &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
564                 reply_hash = hash_conntrack(net,
565                                            &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
566         } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
567
568         /* See if there's one in the list already, including reverse */
569         hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
570                 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
571                                       &h->tuple) &&
572                     nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
573                                      NF_CT_DIRECTION(h)))
574                         goto out;
575         hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
576                 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
577                                       &h->tuple) &&
578                     nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
579                                      NF_CT_DIRECTION(h)))
580                         goto out;
581
582         add_timer(&ct->timeout);
583         smp_wmb();
584         /* The caller holds a reference to this object */
585         atomic_set(&ct->ct_general.use, 2);
586         __nf_conntrack_hash_insert(ct, hash, reply_hash);
587         nf_conntrack_double_unlock(hash, reply_hash);
588         NF_CT_STAT_INC(net, insert);
589         local_bh_enable();
590         return 0;
591
592 out:
593         nf_conntrack_double_unlock(hash, reply_hash);
594         NF_CT_STAT_INC(net, insert_failed);
595         local_bh_enable();
596         return -EEXIST;
597 }
598 EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
599
600 /* Confirm a connection given skb; places it in hash table */
601 int
602 __nf_conntrack_confirm(struct sk_buff *skb)
603 {
604         const struct nf_conntrack_zone *zone;
605         unsigned int hash, reply_hash;
606         struct nf_conntrack_tuple_hash *h;
607         struct nf_conn *ct;
608         struct nf_conn_help *help;
609         struct nf_conn_tstamp *tstamp;
610         struct hlist_nulls_node *n;
611         enum ip_conntrack_info ctinfo;
612         struct net *net;
613         unsigned int sequence;
614
615         ct = nf_ct_get(skb, &ctinfo);
616         net = nf_ct_net(ct);
617
618         /* ipt_REJECT uses nf_conntrack_attach to attach related
619            ICMP/TCP RST packets in other direction.  Actual packet
620            which created connection will be IP_CT_NEW or for an
621            expected connection, IP_CT_RELATED. */
622         if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
623                 return NF_ACCEPT;
624
625         zone = nf_ct_zone(ct);
626         local_bh_disable();
627
628         do {
629                 sequence = read_seqcount_begin(&net->ct.generation);
630                 /* reuse the hash saved before */
631                 hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
632                 hash = hash_bucket(hash, net);
633                 reply_hash = hash_conntrack(net,
634                                            &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
635
636         } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
637
638         /* We're not in hash table, and we refuse to set up related
639          * connections for unconfirmed conns.  But packet copies and
640          * REJECT will give spurious warnings here.
641          */
642         /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
643
644         /* No external references means no one else could have
645          * confirmed us.
646          */
647         NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
648         pr_debug("Confirming conntrack %p\n", ct);
649         /* We have to check the DYING flag after unlink to prevent
650          * a race against nf_ct_get_next_corpse() possibly called from
651          * user context, else we insert an already 'dead' hash, blocking
652          * further use of that particular connection -JM.
653          */
654         nf_ct_del_from_dying_or_unconfirmed_list(ct);
655
656         if (unlikely(nf_ct_is_dying(ct)))
657                 goto out;
658
659         /* See if there's one in the list already, including reverse:
660            NAT could have grabbed it without realizing, since we're
661            not in the hash.  If there is, we lost race. */
662         hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
663                 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
664                                       &h->tuple) &&
665                     nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
666                                      NF_CT_DIRECTION(h)))
667                         goto out;
668         hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
669                 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
670                                       &h->tuple) &&
671                     nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
672                                      NF_CT_DIRECTION(h)))
673                         goto out;
674
675         /* Timer relative to confirmation time, not original
676            setting time, otherwise we'd get timer wrap in
677            weird delay cases. */
678         ct->timeout.expires += jiffies;
679         add_timer(&ct->timeout);
680         atomic_inc(&ct->ct_general.use);
681         ct->status |= IPS_CONFIRMED;
682
683         /* set conntrack timestamp, if enabled. */
684         tstamp = nf_conn_tstamp_find(ct);
685         if (tstamp) {
686                 if (skb->tstamp.tv64 == 0)
687                         __net_timestamp(skb);
688
689                 tstamp->start = ktime_to_ns(skb->tstamp);
690         }
691         /* Since the lookup is lockless, hash insertion must be done after
692          * starting the timer and setting the CONFIRMED bit. The RCU barriers
693          * guarantee that no other CPU can find the conntrack before the above
694          * stores are visible.
695          */
696         __nf_conntrack_hash_insert(ct, hash, reply_hash);
697         nf_conntrack_double_unlock(hash, reply_hash);
698         NF_CT_STAT_INC(net, insert);
699         local_bh_enable();
700
701         help = nfct_help(ct);
702         if (help && help->helper)
703                 nf_conntrack_event_cache(IPCT_HELPER, ct);
704
705         nf_conntrack_event_cache(master_ct(ct) ?
706                                  IPCT_RELATED : IPCT_NEW, ct);
707         return NF_ACCEPT;
708
709 out:
710         nf_ct_add_to_dying_list(ct);
711         nf_conntrack_double_unlock(hash, reply_hash);
712         NF_CT_STAT_INC(net, insert_failed);
713         local_bh_enable();
714         return NF_DROP;
715 }
716 EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
717
718 /* Returns true if a connection correspondings to the tuple (required
719    for NAT). */
720 int
721 nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
722                          const struct nf_conn *ignored_conntrack)
723 {
724         struct net *net = nf_ct_net(ignored_conntrack);
725         const struct nf_conntrack_zone *zone;
726         struct nf_conntrack_tuple_hash *h;
727         struct hlist_nulls_node *n;
728         struct nf_conn *ct;
729         unsigned int hash;
730
731         zone = nf_ct_zone(ignored_conntrack);
732         hash = hash_conntrack(net, tuple);
733
734         /* Disable BHs the entire time since we need to disable them at
735          * least once for the stats anyway.
736          */
737         rcu_read_lock_bh();
738         hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
739                 ct = nf_ct_tuplehash_to_ctrack(h);
740                 if (ct != ignored_conntrack &&
741                     nf_ct_tuple_equal(tuple, &h->tuple) &&
742                     nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h))) {
743                         NF_CT_STAT_INC(net, found);
744                         rcu_read_unlock_bh();
745                         return 1;
746                 }
747                 NF_CT_STAT_INC(net, searched);
748         }
749         rcu_read_unlock_bh();
750
751         return 0;
752 }
753 EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
754
755 #define NF_CT_EVICTION_RANGE    8
756
757 /* There's a small race here where we may free a just-assured
758    connection.  Too bad: we're in trouble anyway. */
759 static noinline int early_drop(struct net *net, unsigned int _hash)
760 {
761         /* Use oldest entry, which is roughly LRU */
762         struct nf_conntrack_tuple_hash *h;
763         struct nf_conn *ct = NULL, *tmp;
764         struct hlist_nulls_node *n;
765         unsigned int i = 0, cnt = 0;
766         int dropped = 0;
767         unsigned int hash, sequence;
768         spinlock_t *lockp;
769
770         local_bh_disable();
771 restart:
772         sequence = read_seqcount_begin(&net->ct.generation);
773         hash = hash_bucket(_hash, net);
774         for (; i < net->ct.htable_size; i++) {
775                 lockp = &nf_conntrack_locks[hash % CONNTRACK_LOCKS];
776                 nf_conntrack_lock(lockp);
777                 if (read_seqcount_retry(&net->ct.generation, sequence)) {
778                         spin_unlock(lockp);
779                         goto restart;
780                 }
781                 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
782                                          hnnode) {
783                         tmp = nf_ct_tuplehash_to_ctrack(h);
784                         if (!test_bit(IPS_ASSURED_BIT, &tmp->status) &&
785                             !nf_ct_is_dying(tmp) &&
786                             atomic_inc_not_zero(&tmp->ct_general.use)) {
787                                 ct = tmp;
788                                 break;
789                         }
790                         cnt++;
791                 }
792
793                 hash = (hash + 1) % net->ct.htable_size;
794                 spin_unlock(lockp);
795
796                 if (ct || cnt >= NF_CT_EVICTION_RANGE)
797                         break;
798
799         }
800         local_bh_enable();
801
802         if (!ct)
803                 return dropped;
804
805         if (del_timer(&ct->timeout)) {
806                 if (nf_ct_delete(ct, 0, 0)) {
807                         dropped = 1;
808                         NF_CT_STAT_INC_ATOMIC(net, early_drop);
809                 }
810         }
811         nf_ct_put(ct);
812         return dropped;
813 }
814
815 void init_nf_conntrack_hash_rnd(void)
816 {
817         unsigned int rand;
818
819         /*
820          * Why not initialize nf_conntrack_rnd in a "init()" function ?
821          * Because there isn't enough entropy when system initializing,
822          * and we initialize it as late as possible.
823          */
824         do {
825                 get_random_bytes(&rand, sizeof(rand));
826         } while (!rand);
827         cmpxchg(&nf_conntrack_hash_rnd, 0, rand);
828 }
829
830 static struct nf_conn *
831 __nf_conntrack_alloc(struct net *net,
832                      const struct nf_conntrack_zone *zone,
833                      const struct nf_conntrack_tuple *orig,
834                      const struct nf_conntrack_tuple *repl,
835                      gfp_t gfp, u32 hash)
836 {
837         struct nf_conn *ct;
838
839         if (unlikely(!nf_conntrack_hash_rnd)) {
840                 init_nf_conntrack_hash_rnd();
841                 /* recompute the hash as nf_conntrack_hash_rnd is initialized */
842                 hash = hash_conntrack_raw(orig);
843         }
844
845         /* We don't want any race condition at early drop stage */
846         atomic_inc(&net->ct.count);
847
848         if (nf_conntrack_max &&
849             unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
850                 if (!early_drop(net, hash)) {
851                         atomic_dec(&net->ct.count);
852                         net_warn_ratelimited("nf_conntrack: table full, dropping packet\n");
853                         return ERR_PTR(-ENOMEM);
854                 }
855         }
856
857         /*
858          * Do not use kmem_cache_zalloc(), as this cache uses
859          * SLAB_DESTROY_BY_RCU.
860          */
861         ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
862         if (ct == NULL)
863                 goto out;
864
865         spin_lock_init(&ct->lock);
866         ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
867         ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
868         ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
869         /* save hash for reusing when confirming */
870         *(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
871         ct->status = 0;
872         /* Don't set timer yet: wait for confirmation */
873         setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
874         write_pnet(&ct->ct_net, net);
875         memset(&ct->__nfct_init_offset[0], 0,
876                offsetof(struct nf_conn, proto) -
877                offsetof(struct nf_conn, __nfct_init_offset[0]));
878
879         if (zone && nf_ct_zone_add(ct, GFP_ATOMIC, zone) < 0)
880                 goto out_free;
881
882         /* Because we use RCU lookups, we set ct_general.use to zero before
883          * this is inserted in any list.
884          */
885         atomic_set(&ct->ct_general.use, 0);
886         return ct;
887 out_free:
888         kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
889 out:
890         atomic_dec(&net->ct.count);
891         return ERR_PTR(-ENOMEM);
892 }
893
894 struct nf_conn *nf_conntrack_alloc(struct net *net,
895                                    const struct nf_conntrack_zone *zone,
896                                    const struct nf_conntrack_tuple *orig,
897                                    const struct nf_conntrack_tuple *repl,
898                                    gfp_t gfp)
899 {
900         return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0);
901 }
902 EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
903
904 void nf_conntrack_free(struct nf_conn *ct)
905 {
906         struct net *net = nf_ct_net(ct);
907
908         /* A freed object has refcnt == 0, that's
909          * the golden rule for SLAB_DESTROY_BY_RCU
910          */
911         NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 0);
912
913         nf_ct_ext_destroy(ct);
914         nf_ct_ext_free(ct);
915         kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
916         smp_mb__before_atomic();
917         atomic_dec(&net->ct.count);
918 }
919 EXPORT_SYMBOL_GPL(nf_conntrack_free);
920
921
922 /* Allocate a new conntrack: we return -ENOMEM if classification
923    failed due to stress.  Otherwise it really is unclassifiable. */
924 static struct nf_conntrack_tuple_hash *
925 init_conntrack(struct net *net, struct nf_conn *tmpl,
926                const struct nf_conntrack_tuple *tuple,
927                struct nf_conntrack_l3proto *l3proto,
928                struct nf_conntrack_l4proto *l4proto,
929                struct sk_buff *skb,
930                unsigned int dataoff, u32 hash)
931 {
932         struct nf_conn *ct;
933         struct nf_conn_help *help;
934         struct nf_conntrack_tuple repl_tuple;
935         struct nf_conntrack_ecache *ecache;
936         struct nf_conntrack_expect *exp = NULL;
937         const struct nf_conntrack_zone *zone;
938         struct nf_conn_timeout *timeout_ext;
939         struct nf_conntrack_zone tmp;
940         unsigned int *timeouts;
941
942         if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
943                 pr_debug("Can't invert tuple.\n");
944                 return NULL;
945         }
946
947         zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
948         ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
949                                   hash);
950         if (IS_ERR(ct))
951                 return (struct nf_conntrack_tuple_hash *)ct;
952
953         if (tmpl && nfct_synproxy(tmpl)) {
954                 nfct_seqadj_ext_add(ct);
955                 nfct_synproxy_ext_add(ct);
956         }
957
958         timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
959         if (timeout_ext) {
960                 timeouts = nf_ct_timeout_data(timeout_ext);
961                 if (unlikely(!timeouts))
962                         timeouts = l4proto->get_timeouts(net);
963         } else {
964                 timeouts = l4proto->get_timeouts(net);
965         }
966
967         if (!l4proto->new(ct, skb, dataoff, timeouts)) {
968                 nf_conntrack_free(ct);
969                 pr_debug("init conntrack: can't track with proto module\n");
970                 return NULL;
971         }
972
973         if (timeout_ext)
974                 nf_ct_timeout_ext_add(ct, rcu_dereference(timeout_ext->timeout),
975                                       GFP_ATOMIC);
976
977         nf_ct_acct_ext_add(ct, GFP_ATOMIC);
978         nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
979         nf_ct_labels_ext_add(ct);
980
981         ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
982         nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
983                                  ecache ? ecache->expmask : 0,
984                              GFP_ATOMIC);
985
986         local_bh_disable();
987         if (net->ct.expect_count) {
988                 spin_lock(&nf_conntrack_expect_lock);
989                 exp = nf_ct_find_expectation(net, zone, tuple);
990                 if (exp) {
991                         pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",
992                                  ct, exp);
993                         /* Welcome, Mr. Bond.  We've been expecting you... */
994                         __set_bit(IPS_EXPECTED_BIT, &ct->status);
995                         /* exp->master safe, refcnt bumped in nf_ct_find_expectation */
996                         ct->master = exp->master;
997                         if (exp->helper) {
998                                 help = nf_ct_helper_ext_add(ct, exp->helper,
999                                                             GFP_ATOMIC);
1000                                 if (help)
1001                                         rcu_assign_pointer(help->helper, exp->helper);
1002                         }
1003
1004 #ifdef CONFIG_NF_CONNTRACK_MARK
1005                         ct->mark = exp->master->mark;
1006 #endif
1007 #ifdef CONFIG_NF_CONNTRACK_SECMARK
1008                         ct->secmark = exp->master->secmark;
1009 #endif
1010                         NF_CT_STAT_INC(net, expect_new);
1011                 }
1012                 spin_unlock(&nf_conntrack_expect_lock);
1013         }
1014         if (!exp) {
1015                 __nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
1016                 NF_CT_STAT_INC(net, new);
1017         }
1018
1019         /* Now it is inserted into the unconfirmed list, bump refcount */
1020         nf_conntrack_get(&ct->ct_general);
1021         nf_ct_add_to_unconfirmed_list(ct);
1022
1023         local_bh_enable();
1024
1025         if (exp) {
1026                 if (exp->expectfn)
1027                         exp->expectfn(ct, exp);
1028                 nf_ct_expect_put(exp);
1029         }
1030
1031         return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
1032 }
1033
1034 /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
1035 static inline struct nf_conn *
1036 resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
1037                   struct sk_buff *skb,
1038                   unsigned int dataoff,
1039                   u_int16_t l3num,
1040                   u_int8_t protonum,
1041                   struct nf_conntrack_l3proto *l3proto,
1042                   struct nf_conntrack_l4proto *l4proto,
1043                   int *set_reply,
1044                   enum ip_conntrack_info *ctinfo)
1045 {
1046         const struct nf_conntrack_zone *zone;
1047         struct nf_conntrack_tuple tuple;
1048         struct nf_conntrack_tuple_hash *h;
1049         struct nf_conntrack_zone tmp;
1050         struct nf_conn *ct;
1051         u32 hash;
1052
1053         if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
1054                              dataoff, l3num, protonum, net, &tuple, l3proto,
1055                              l4proto)) {
1056                 pr_debug("resolve_normal_ct: Can't get tuple\n");
1057                 return NULL;
1058         }
1059
1060         /* look for tuple match */
1061         zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
1062         hash = hash_conntrack_raw(&tuple);
1063         h = __nf_conntrack_find_get(net, zone, &tuple, hash);
1064         if (!h) {
1065                 h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
1066                                    skb, dataoff, hash);
1067                 if (!h)
1068                         return NULL;
1069                 if (IS_ERR(h))
1070                         return (void *)h;
1071         }
1072         ct = nf_ct_tuplehash_to_ctrack(h);
1073
1074         /* It exists; we have (non-exclusive) reference. */
1075         if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
1076                 *ctinfo = IP_CT_ESTABLISHED_REPLY;
1077                 /* Please set reply bit if this packet OK */
1078                 *set_reply = 1;
1079         } else {
1080                 /* Once we've had two way comms, always ESTABLISHED. */
1081                 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
1082                         pr_debug("nf_conntrack_in: normal packet for %p\n", ct);
1083                         *ctinfo = IP_CT_ESTABLISHED;
1084                 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
1085                         pr_debug("nf_conntrack_in: related packet for %p\n",
1086                                  ct);
1087                         *ctinfo = IP_CT_RELATED;
1088                 } else {
1089                         pr_debug("nf_conntrack_in: new packet for %p\n", ct);
1090                         *ctinfo = IP_CT_NEW;
1091                 }
1092                 *set_reply = 0;
1093         }
1094         skb->nfct = &ct->ct_general;
1095         skb->nfctinfo = *ctinfo;
1096         return ct;
1097 }
1098
1099 unsigned int
1100 nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
1101                 struct sk_buff *skb)
1102 {
1103         struct nf_conn *ct, *tmpl = NULL;
1104         enum ip_conntrack_info ctinfo;
1105         struct nf_conntrack_l3proto *l3proto;
1106         struct nf_conntrack_l4proto *l4proto;
1107         unsigned int *timeouts;
1108         unsigned int dataoff;
1109         u_int8_t protonum;
1110         int set_reply = 0;
1111         int ret;
1112
1113         if (skb->nfct) {
1114                 /* Previously seen (loopback or untracked)?  Ignore. */
1115                 tmpl = (struct nf_conn *)skb->nfct;
1116                 if (!nf_ct_is_template(tmpl)) {
1117                         NF_CT_STAT_INC_ATOMIC(net, ignore);
1118                         return NF_ACCEPT;
1119                 }
1120                 skb->nfct = NULL;
1121         }
1122
1123         /* rcu_read_lock()ed by nf_hook_slow */
1124         l3proto = __nf_ct_l3proto_find(pf);
1125         ret = l3proto->get_l4proto(skb, skb_network_offset(skb),
1126                                    &dataoff, &protonum);
1127         if (ret <= 0) {
1128                 pr_debug("not prepared to track yet or error occurred\n");
1129                 NF_CT_STAT_INC_ATOMIC(net, error);
1130                 NF_CT_STAT_INC_ATOMIC(net, invalid);
1131                 ret = -ret;
1132                 goto out;
1133         }
1134
1135         l4proto = __nf_ct_l4proto_find(pf, protonum);
1136
1137         /* It may be an special packet, error, unclean...
1138          * inverse of the return code tells to the netfilter
1139          * core what to do with the packet. */
1140         if (l4proto->error != NULL) {
1141                 ret = l4proto->error(net, tmpl, skb, dataoff, &ctinfo,
1142                                      pf, hooknum);
1143                 if (ret <= 0) {
1144                         NF_CT_STAT_INC_ATOMIC(net, error);
1145                         NF_CT_STAT_INC_ATOMIC(net, invalid);
1146                         ret = -ret;
1147                         goto out;
1148                 }
1149                 /* ICMP[v6] protocol trackers may assign one conntrack. */
1150                 if (skb->nfct)
1151                         goto out;
1152         }
1153
1154         ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
1155                                l3proto, l4proto, &set_reply, &ctinfo);
1156         if (!ct) {
1157                 /* Not valid part of a connection */
1158                 NF_CT_STAT_INC_ATOMIC(net, invalid);
1159                 ret = NF_ACCEPT;
1160                 goto out;
1161         }
1162
1163         if (IS_ERR(ct)) {
1164                 /* Too stressed to deal. */
1165                 NF_CT_STAT_INC_ATOMIC(net, drop);
1166                 ret = NF_DROP;
1167                 goto out;
1168         }
1169
1170         NF_CT_ASSERT(skb->nfct);
1171
1172         /* Decide what timeout policy we want to apply to this flow. */
1173         timeouts = nf_ct_timeout_lookup(net, ct, l4proto);
1174
1175         ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum, timeouts);
1176         if (ret <= 0) {
1177                 /* Invalid: inverse of the return code tells
1178                  * the netfilter core what to do */
1179                 pr_debug("nf_conntrack_in: Can't track with proto module\n");
1180                 nf_conntrack_put(skb->nfct);
1181                 skb->nfct = NULL;
1182                 NF_CT_STAT_INC_ATOMIC(net, invalid);
1183                 if (ret == -NF_DROP)
1184                         NF_CT_STAT_INC_ATOMIC(net, drop);
1185                 ret = -ret;
1186                 goto out;
1187         }
1188
1189         if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
1190                 nf_conntrack_event_cache(IPCT_REPLY, ct);
1191 out:
1192         if (tmpl) {
1193                 /* Special case: we have to repeat this hook, assign the
1194                  * template again to this packet. We assume that this packet
1195                  * has no conntrack assigned. This is used by nf_ct_tcp. */
1196                 if (ret == NF_REPEAT)
1197                         skb->nfct = (struct nf_conntrack *)tmpl;
1198                 else
1199                         nf_ct_put(tmpl);
1200         }
1201
1202         return ret;
1203 }
1204 EXPORT_SYMBOL_GPL(nf_conntrack_in);
1205
1206 bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
1207                           const struct nf_conntrack_tuple *orig)
1208 {
1209         bool ret;
1210
1211         rcu_read_lock();
1212         ret = nf_ct_invert_tuple(inverse, orig,
1213                                  __nf_ct_l3proto_find(orig->src.l3num),
1214                                  __nf_ct_l4proto_find(orig->src.l3num,
1215                                                       orig->dst.protonum));
1216         rcu_read_unlock();
1217         return ret;
1218 }
1219 EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr);
1220
1221 /* Alter reply tuple (maybe alter helper).  This is for NAT, and is
1222    implicitly racy: see __nf_conntrack_confirm */
1223 void nf_conntrack_alter_reply(struct nf_conn *ct,
1224                               const struct nf_conntrack_tuple *newreply)
1225 {
1226         struct nf_conn_help *help = nfct_help(ct);
1227
1228         /* Should be unconfirmed, so not in hash table yet */
1229         NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
1230
1231         pr_debug("Altering reply tuple of %p to ", ct);
1232         nf_ct_dump_tuple(newreply);
1233
1234         ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
1235         if (ct->master || (help && !hlist_empty(&help->expectations)))
1236                 return;
1237
1238         rcu_read_lock();
1239         __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
1240         rcu_read_unlock();
1241 }
1242 EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
1243
1244 /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
1245 void __nf_ct_refresh_acct(struct nf_conn *ct,
1246                           enum ip_conntrack_info ctinfo,
1247                           const struct sk_buff *skb,
1248                           unsigned long extra_jiffies,
1249                           int do_acct)
1250 {
1251         NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
1252         NF_CT_ASSERT(skb);
1253
1254         /* Only update if this is not a fixed timeout */
1255         if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
1256                 goto acct;
1257
1258         /* If not in hash table, timer will not be active yet */
1259         if (!nf_ct_is_confirmed(ct)) {
1260                 ct->timeout.expires = extra_jiffies;
1261         } else {
1262                 unsigned long newtime = jiffies + extra_jiffies;
1263
1264                 /* Only update the timeout if the new timeout is at least
1265                    HZ jiffies from the old timeout. Need del_timer for race
1266                    avoidance (may already be dying). */
1267                 if (newtime - ct->timeout.expires >= HZ)
1268                         mod_timer_pending(&ct->timeout, newtime);
1269         }
1270
1271 acct:
1272         if (do_acct) {
1273                 struct nf_conn_acct *acct;
1274
1275                 acct = nf_conn_acct_find(ct);
1276                 if (acct) {
1277                         struct nf_conn_counter *counter = acct->counter;
1278
1279                         atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets);
1280                         atomic64_add(skb->len, &counter[CTINFO2DIR(ctinfo)].bytes);
1281                 }
1282         }
1283 }
1284 EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
1285
1286 bool __nf_ct_kill_acct(struct nf_conn *ct,
1287                        enum ip_conntrack_info ctinfo,
1288                        const struct sk_buff *skb,
1289                        int do_acct)
1290 {
1291         if (do_acct) {
1292                 struct nf_conn_acct *acct;
1293
1294                 acct = nf_conn_acct_find(ct);
1295                 if (acct) {
1296                         struct nf_conn_counter *counter = acct->counter;
1297
1298                         atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets);
1299                         atomic64_add(skb->len - skb_network_offset(skb),
1300                                      &counter[CTINFO2DIR(ctinfo)].bytes);
1301                 }
1302         }
1303
1304         if (del_timer(&ct->timeout)) {
1305                 ct->timeout.function((unsigned long)ct);
1306                 return true;
1307         }
1308         return false;
1309 }
1310 EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
1311
1312 #ifdef CONFIG_NF_CONNTRACK_ZONES
1313 static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = {
1314         .len    = sizeof(struct nf_conntrack_zone),
1315         .align  = __alignof__(struct nf_conntrack_zone),
1316         .id     = NF_CT_EXT_ZONE,
1317 };
1318 #endif
1319
1320 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1321
1322 #include <linux/netfilter/nfnetlink.h>
1323 #include <linux/netfilter/nfnetlink_conntrack.h>
1324 #include <linux/mutex.h>
1325
1326 /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
1327  * in ip_conntrack_core, since we don't want the protocols to autoload
1328  * or depend on ctnetlink */
1329 int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
1330                                const struct nf_conntrack_tuple *tuple)
1331 {
1332         if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) ||
1333             nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port))
1334                 goto nla_put_failure;
1335         return 0;
1336
1337 nla_put_failure:
1338         return -1;
1339 }
1340 EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
1341
1342 const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
1343         [CTA_PROTO_SRC_PORT]  = { .type = NLA_U16 },
1344         [CTA_PROTO_DST_PORT]  = { .type = NLA_U16 },
1345 };
1346 EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
1347
1348 int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
1349                                struct nf_conntrack_tuple *t)
1350 {
1351         if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT])
1352                 return -EINVAL;
1353
1354         t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
1355         t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
1356
1357         return 0;
1358 }
1359 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
1360
1361 int nf_ct_port_nlattr_tuple_size(void)
1362 {
1363         return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
1364 }
1365 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
1366 #endif
1367
1368 /* Used by ipt_REJECT and ip6t_REJECT. */
1369 static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
1370 {
1371         struct nf_conn *ct;
1372         enum ip_conntrack_info ctinfo;
1373
1374         /* This ICMP is in reverse direction to the packet which caused it */
1375         ct = nf_ct_get(skb, &ctinfo);
1376         if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
1377                 ctinfo = IP_CT_RELATED_REPLY;
1378         else
1379                 ctinfo = IP_CT_RELATED;
1380
1381         /* Attach to new skbuff, and increment count */
1382         nskb->nfct = &ct->ct_general;
1383         nskb->nfctinfo = ctinfo;
1384         nf_conntrack_get(nskb->nfct);
1385 }
1386
1387 /* Bring out ya dead! */
1388 static struct nf_conn *
1389 get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
1390                 void *data, unsigned int *bucket)
1391 {
1392         struct nf_conntrack_tuple_hash *h;
1393         struct nf_conn *ct;
1394         struct hlist_nulls_node *n;
1395         int cpu;
1396         spinlock_t *lockp;
1397
1398         for (; *bucket < net->ct.htable_size; (*bucket)++) {
1399                 lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
1400                 local_bh_disable();
1401                 nf_conntrack_lock(lockp);
1402                 if (*bucket < net->ct.htable_size) {
1403                         hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
1404                                 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
1405                                         continue;
1406                                 ct = nf_ct_tuplehash_to_ctrack(h);
1407                                 if (iter(ct, data))
1408                                         goto found;
1409                         }
1410                 }
1411                 spin_unlock(lockp);
1412                 local_bh_enable();
1413                 cond_resched();
1414         }
1415
1416         for_each_possible_cpu(cpu) {
1417                 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
1418
1419                 spin_lock_bh(&pcpu->lock);
1420                 hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) {
1421                         ct = nf_ct_tuplehash_to_ctrack(h);
1422                         if (iter(ct, data))
1423                                 set_bit(IPS_DYING_BIT, &ct->status);
1424                 }
1425                 spin_unlock_bh(&pcpu->lock);
1426                 cond_resched();
1427         }
1428         return NULL;
1429 found:
1430         atomic_inc(&ct->ct_general.use);
1431         spin_unlock(lockp);
1432         local_bh_enable();
1433         return ct;
1434 }
1435
1436 void nf_ct_iterate_cleanup(struct net *net,
1437                            int (*iter)(struct nf_conn *i, void *data),
1438                            void *data, u32 portid, int report)
1439 {
1440         struct nf_conn *ct;
1441         unsigned int bucket = 0;
1442
1443         might_sleep();
1444
1445         while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
1446                 /* Time to push up daises... */
1447                 if (del_timer(&ct->timeout))
1448                         nf_ct_delete(ct, portid, report);
1449
1450                 /* ... else the timer will get him soon. */
1451
1452                 nf_ct_put(ct);
1453                 cond_resched();
1454         }
1455 }
1456 EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
1457
1458 static int kill_all(struct nf_conn *i, void *data)
1459 {
1460         return 1;
1461 }
1462
1463 void nf_ct_free_hashtable(void *hash, unsigned int size)
1464 {
1465         if (is_vmalloc_addr(hash))
1466                 vfree(hash);
1467         else
1468                 free_pages((unsigned long)hash,
1469                            get_order(sizeof(struct hlist_head) * size));
1470 }
1471 EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
1472
1473 static int untrack_refs(void)
1474 {
1475         int cnt = 0, cpu;
1476
1477         for_each_possible_cpu(cpu) {
1478                 struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
1479
1480                 cnt += atomic_read(&ct->ct_general.use) - 1;
1481         }
1482         return cnt;
1483 }
1484
1485 void nf_conntrack_cleanup_start(void)
1486 {
1487         RCU_INIT_POINTER(ip_ct_attach, NULL);
1488 }
1489
1490 void nf_conntrack_cleanup_end(void)
1491 {
1492         RCU_INIT_POINTER(nf_ct_destroy, NULL);
1493         while (untrack_refs() > 0)
1494                 schedule();
1495
1496 #ifdef CONFIG_NF_CONNTRACK_ZONES
1497         nf_ct_extend_unregister(&nf_ct_zone_extend);
1498 #endif
1499         nf_conntrack_proto_fini();
1500         nf_conntrack_seqadj_fini();
1501         nf_conntrack_labels_fini();
1502         nf_conntrack_helper_fini();
1503         nf_conntrack_timeout_fini();
1504         nf_conntrack_ecache_fini();
1505         nf_conntrack_tstamp_fini();
1506         nf_conntrack_acct_fini();
1507         nf_conntrack_expect_fini();
1508 }
1509
1510 /*
1511  * Mishearing the voices in his head, our hero wonders how he's
1512  * supposed to kill the mall.
1513  */
1514 void nf_conntrack_cleanup_net(struct net *net)
1515 {
1516         LIST_HEAD(single);
1517
1518         list_add(&net->exit_list, &single);
1519         nf_conntrack_cleanup_net_list(&single);
1520 }
1521
1522 void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
1523 {
1524         int busy;
1525         struct net *net;
1526
1527         /*
1528          * This makes sure all current packets have passed through
1529          *  netfilter framework.  Roll on, two-stage module
1530          *  delete...
1531          */
1532         synchronize_net();
1533 i_see_dead_people:
1534         busy = 0;
1535         list_for_each_entry(net, net_exit_list, exit_list) {
1536                 nf_ct_iterate_cleanup(net, kill_all, NULL, 0, 0);
1537                 if (atomic_read(&net->ct.count) != 0)
1538                         busy = 1;
1539         }
1540         if (busy) {
1541                 schedule();
1542                 goto i_see_dead_people;
1543         }
1544
1545         list_for_each_entry(net, net_exit_list, exit_list) {
1546                 nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
1547                 nf_conntrack_proto_pernet_fini(net);
1548                 nf_conntrack_helper_pernet_fini(net);
1549                 nf_conntrack_ecache_pernet_fini(net);
1550                 nf_conntrack_tstamp_pernet_fini(net);
1551                 nf_conntrack_acct_pernet_fini(net);
1552                 nf_conntrack_expect_pernet_fini(net);
1553                 kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1554                 kfree(net->ct.slabname);
1555                 free_percpu(net->ct.stat);
1556                 free_percpu(net->ct.pcpu_lists);
1557         }
1558 }
1559
1560 void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
1561 {
1562         struct hlist_nulls_head *hash;
1563         unsigned int nr_slots, i;
1564         size_t sz;
1565
1566         BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
1567         nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
1568         sz = nr_slots * sizeof(struct hlist_nulls_head);
1569         hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1570                                         get_order(sz));
1571         if (!hash)
1572                 hash = vzalloc(sz);
1573
1574         if (hash && nulls)
1575                 for (i = 0; i < nr_slots; i++)
1576                         INIT_HLIST_NULLS_HEAD(&hash[i], i);
1577
1578         return hash;
1579 }
1580 EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
1581
1582 int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1583 {
1584         int i, bucket, rc;
1585         unsigned int hashsize, old_size;
1586         struct hlist_nulls_head *hash, *old_hash;
1587         struct nf_conntrack_tuple_hash *h;
1588         struct nf_conn *ct;
1589
1590         if (current->nsproxy->net_ns != &init_net)
1591                 return -EOPNOTSUPP;
1592
1593         /* On boot, we can set this without any fancy locking. */
1594         if (!nf_conntrack_htable_size)
1595                 return param_set_uint(val, kp);
1596
1597         rc = kstrtouint(val, 0, &hashsize);
1598         if (rc)
1599                 return rc;
1600         if (!hashsize)
1601                 return -EINVAL;
1602
1603         hash = nf_ct_alloc_hashtable(&hashsize, 1);
1604         if (!hash)
1605                 return -ENOMEM;
1606
1607         local_bh_disable();
1608         nf_conntrack_all_lock();
1609         write_seqcount_begin(&init_net.ct.generation);
1610
1611         /* Lookups in the old hash might happen in parallel, which means we
1612          * might get false negatives during connection lookup. New connections
1613          * created because of a false negative won't make it into the hash
1614          * though since that required taking the locks.
1615          */
1616
1617         for (i = 0; i < init_net.ct.htable_size; i++) {
1618                 while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
1619                         h = hlist_nulls_entry(init_net.ct.hash[i].first,
1620                                         struct nf_conntrack_tuple_hash, hnnode);
1621                         ct = nf_ct_tuplehash_to_ctrack(h);
1622                         hlist_nulls_del_rcu(&h->hnnode);
1623                         bucket = __hash_conntrack(&h->tuple, hashsize);
1624                         hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
1625                 }
1626         }
1627         old_size = init_net.ct.htable_size;
1628         old_hash = init_net.ct.hash;
1629
1630         init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
1631         init_net.ct.hash = hash;
1632
1633         write_seqcount_end(&init_net.ct.generation);
1634         nf_conntrack_all_unlock();
1635         local_bh_enable();
1636
1637         nf_ct_free_hashtable(old_hash, old_size);
1638         return 0;
1639 }
1640 EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
1641
1642 module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
1643                   &nf_conntrack_htable_size, 0600);
1644
1645 void nf_ct_untracked_status_or(unsigned long bits)
1646 {
1647         int cpu;
1648
1649         for_each_possible_cpu(cpu)
1650                 per_cpu(nf_conntrack_untracked, cpu).status |= bits;
1651 }
1652 EXPORT_SYMBOL_GPL(nf_ct_untracked_status_or);
1653
1654 int nf_conntrack_init_start(void)
1655 {
1656         int max_factor = 8;
1657         int i, ret, cpu;
1658
1659         for (i = 0; i < CONNTRACK_LOCKS; i++)
1660                 spin_lock_init(&nf_conntrack_locks[i]);
1661
1662         if (!nf_conntrack_htable_size) {
1663                 /* Idea from tcp.c: use 1/16384 of memory.
1664                  * On i386: 32MB machine has 512 buckets.
1665                  * >= 1GB machines have 16384 buckets.
1666                  * >= 4GB machines have 65536 buckets.
1667                  */
1668                 nf_conntrack_htable_size
1669                         = (((totalram_pages << PAGE_SHIFT) / 16384)
1670                            / sizeof(struct hlist_head));
1671                 if (totalram_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE)))
1672                         nf_conntrack_htable_size = 65536;
1673                 else if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
1674                         nf_conntrack_htable_size = 16384;
1675                 if (nf_conntrack_htable_size < 32)
1676                         nf_conntrack_htable_size = 32;
1677
1678                 /* Use a max. factor of four by default to get the same max as
1679                  * with the old struct list_heads. When a table size is given
1680                  * we use the old value of 8 to avoid reducing the max.
1681                  * entries. */
1682                 max_factor = 4;
1683         }
1684         nf_conntrack_max = max_factor * nf_conntrack_htable_size;
1685
1686         printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n",
1687                NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
1688                nf_conntrack_max);
1689
1690         ret = nf_conntrack_expect_init();
1691         if (ret < 0)
1692                 goto err_expect;
1693
1694         ret = nf_conntrack_acct_init();
1695         if (ret < 0)
1696                 goto err_acct;
1697
1698         ret = nf_conntrack_tstamp_init();
1699         if (ret < 0)
1700                 goto err_tstamp;
1701
1702         ret = nf_conntrack_ecache_init();
1703         if (ret < 0)
1704                 goto err_ecache;
1705
1706         ret = nf_conntrack_timeout_init();
1707         if (ret < 0)
1708                 goto err_timeout;
1709
1710         ret = nf_conntrack_helper_init();
1711         if (ret < 0)
1712                 goto err_helper;
1713
1714         ret = nf_conntrack_labels_init();
1715         if (ret < 0)
1716                 goto err_labels;
1717
1718         ret = nf_conntrack_seqadj_init();
1719         if (ret < 0)
1720                 goto err_seqadj;
1721
1722 #ifdef CONFIG_NF_CONNTRACK_ZONES
1723         ret = nf_ct_extend_register(&nf_ct_zone_extend);
1724         if (ret < 0)
1725                 goto err_extend;
1726 #endif
1727         ret = nf_conntrack_proto_init();
1728         if (ret < 0)
1729                 goto err_proto;
1730
1731         /* Set up fake conntrack: to never be deleted, not in any hashes */
1732         for_each_possible_cpu(cpu) {
1733                 struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
1734                 write_pnet(&ct->ct_net, &init_net);
1735                 atomic_set(&ct->ct_general.use, 1);
1736         }
1737         /*  - and look it like as a confirmed connection */
1738         nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);
1739         return 0;
1740
1741 err_proto:
1742 #ifdef CONFIG_NF_CONNTRACK_ZONES
1743         nf_ct_extend_unregister(&nf_ct_zone_extend);
1744 err_extend:
1745 #endif
1746         nf_conntrack_seqadj_fini();
1747 err_seqadj:
1748         nf_conntrack_labels_fini();
1749 err_labels:
1750         nf_conntrack_helper_fini();
1751 err_helper:
1752         nf_conntrack_timeout_fini();
1753 err_timeout:
1754         nf_conntrack_ecache_fini();
1755 err_ecache:
1756         nf_conntrack_tstamp_fini();
1757 err_tstamp:
1758         nf_conntrack_acct_fini();
1759 err_acct:
1760         nf_conntrack_expect_fini();
1761 err_expect:
1762         return ret;
1763 }
1764
1765 void nf_conntrack_init_end(void)
1766 {
1767         /* For use by REJECT target */
1768         RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
1769         RCU_INIT_POINTER(nf_ct_destroy, destroy_conntrack);
1770 }
1771
1772 /*
1773  * We need to use special "null" values, not used in hash table
1774  */
1775 #define UNCONFIRMED_NULLS_VAL   ((1<<30)+0)
1776 #define DYING_NULLS_VAL         ((1<<30)+1)
1777 #define TEMPLATE_NULLS_VAL      ((1<<30)+2)
1778
1779 int nf_conntrack_init_net(struct net *net)
1780 {
1781         int ret = -ENOMEM;
1782         int cpu;
1783
1784         atomic_set(&net->ct.count, 0);
1785         seqcount_init(&net->ct.generation);
1786
1787         net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu);
1788         if (!net->ct.pcpu_lists)
1789                 goto err_stat;
1790
1791         for_each_possible_cpu(cpu) {
1792                 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
1793
1794                 spin_lock_init(&pcpu->lock);
1795                 INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL);
1796                 INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL);
1797         }
1798
1799         net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
1800         if (!net->ct.stat)
1801                 goto err_pcpu_lists;
1802
1803         net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
1804         if (!net->ct.slabname)
1805                 goto err_slabname;
1806
1807         net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname,
1808                                                         sizeof(struct nf_conn), 0,
1809                                                         SLAB_DESTROY_BY_RCU, NULL);
1810         if (!net->ct.nf_conntrack_cachep) {
1811                 printk(KERN_ERR "Unable to create nf_conn slab cache\n");
1812                 goto err_cache;
1813         }
1814
1815         net->ct.htable_size = nf_conntrack_htable_size;
1816         net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, 1);
1817         if (!net->ct.hash) {
1818                 printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
1819                 goto err_hash;
1820         }
1821         ret = nf_conntrack_expect_pernet_init(net);
1822         if (ret < 0)
1823                 goto err_expect;
1824         ret = nf_conntrack_acct_pernet_init(net);
1825         if (ret < 0)
1826                 goto err_acct;
1827         ret = nf_conntrack_tstamp_pernet_init(net);
1828         if (ret < 0)
1829                 goto err_tstamp;
1830         ret = nf_conntrack_ecache_pernet_init(net);
1831         if (ret < 0)
1832                 goto err_ecache;
1833         ret = nf_conntrack_helper_pernet_init(net);
1834         if (ret < 0)
1835                 goto err_helper;
1836         ret = nf_conntrack_proto_pernet_init(net);
1837         if (ret < 0)
1838                 goto err_proto;
1839         return 0;
1840
1841 err_proto:
1842         nf_conntrack_helper_pernet_fini(net);
1843 err_helper:
1844         nf_conntrack_ecache_pernet_fini(net);
1845 err_ecache:
1846         nf_conntrack_tstamp_pernet_fini(net);
1847 err_tstamp:
1848         nf_conntrack_acct_pernet_fini(net);
1849 err_acct:
1850         nf_conntrack_expect_pernet_fini(net);
1851 err_expect:
1852         nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
1853 err_hash:
1854         kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1855 err_cache:
1856         kfree(net->ct.slabname);
1857 err_slabname:
1858         free_percpu(net->ct.stat);
1859 err_pcpu_lists:
1860         free_percpu(net->ct.pcpu_lists);
1861 err_stat:
1862         return ret;
1863 }