xen-blkfront: don't add indirect pages to list when !feature_persistent
[linux-2.6-block.git] / net / netfilter / nf_conntrack_expect.c
CommitLineData
77ab9cff
MJ
1/* Expectation handling for nf_conntrack. */
2
3/* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
f229f6ce 6 * (c) 2005-2012 Patrick McHardy <kaber@trash.net>
77ab9cff
MJ
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/types.h>
14#include <linux/netfilter.h>
15#include <linux/skbuff.h>
16#include <linux/proc_fs.h>
17#include <linux/seq_file.h>
18#include <linux/stddef.h>
19#include <linux/slab.h>
20#include <linux/err.h>
21#include <linux/percpu.h>
22#include <linux/kernel.h>
a71c0855 23#include <linux/jhash.h>
d9b93842 24#include <linux/moduleparam.h>
bc3b2d7f 25#include <linux/export.h>
457c4cbc 26#include <net/net_namespace.h>
77ab9cff
MJ
27
28#include <net/netfilter/nf_conntrack.h>
29#include <net/netfilter/nf_conntrack_core.h>
30#include <net/netfilter/nf_conntrack_expect.h>
31#include <net/netfilter/nf_conntrack_helper.h>
32#include <net/netfilter/nf_conntrack_tuple.h>
5d0aa2cc 33#include <net/netfilter/nf_conntrack_zones.h>
77ab9cff 34
a71c0855
PM
35unsigned int nf_ct_expect_hsize __read_mostly;
36EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
37
f264a7df 38unsigned int nf_ct_expect_max __read_mostly;
a71c0855 39
e9c1b084 40static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
77ab9cff
MJ
41
42/* nf_conntrack_expect helper functions */
ebbf41df 43void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
ec464e5d 44 u32 portid, int report)
77ab9cff
MJ
45{
46 struct nf_conn_help *master_help = nfct_help(exp->master);
9b03f38d 47 struct net *net = nf_ct_exp_net(exp);
77ab9cff 48
3d058d7b 49 NF_CT_ASSERT(master_help);
77ab9cff
MJ
50 NF_CT_ASSERT(!timer_pending(&exp->timeout));
51
7d0742da 52 hlist_del_rcu(&exp->hnode);
9b03f38d 53 net->ct.expect_count--;
a71c0855 54
b560580a 55 hlist_del(&exp->lnode);
3d058d7b 56 master_help->expecting[exp->class]--;
bc01befd 57
ec464e5d 58 nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report);
6823645d 59 nf_ct_expect_put(exp);
b560580a 60
0d55af87 61 NF_CT_STAT_INC(net, expect_delete);
77ab9cff 62}
ebbf41df 63EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report);
77ab9cff 64
6823645d 65static void nf_ct_expectation_timed_out(unsigned long ul_expect)
77ab9cff
MJ
66{
67 struct nf_conntrack_expect *exp = (void *)ul_expect;
68
ca7433df 69 spin_lock_bh(&nf_conntrack_expect_lock);
77ab9cff 70 nf_ct_unlink_expect(exp);
ca7433df 71 spin_unlock_bh(&nf_conntrack_expect_lock);
6823645d 72 nf_ct_expect_put(exp);
77ab9cff
MJ
73}
74
a71c0855
PM
75static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple)
76{
34498825
PM
77 unsigned int hash;
78
f682cefa
CG
79 if (unlikely(!nf_conntrack_hash_rnd)) {
80 init_nf_conntrack_hash_rnd();
a71c0855
PM
81 }
82
34498825 83 hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
a71c0855 84 (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
f682cefa 85 (__force __u16)tuple->dst.u.all) ^ nf_conntrack_hash_rnd);
8fc54f68
DB
86
87 return reciprocal_scale(hash, nf_ct_expect_hsize);
a71c0855
PM
88}
89
77ab9cff 90struct nf_conntrack_expect *
5d0aa2cc
PM
91__nf_ct_expect_find(struct net *net, u16 zone,
92 const struct nf_conntrack_tuple *tuple)
77ab9cff
MJ
93{
94 struct nf_conntrack_expect *i;
a71c0855
PM
95 unsigned int h;
96
9b03f38d 97 if (!net->ct.expect_count)
a71c0855 98 return NULL;
77ab9cff 99
a71c0855 100 h = nf_ct_expect_dst_hash(tuple);
b67bfe0d 101 hlist_for_each_entry_rcu(i, &net->ct.expect_hash[h], hnode) {
5d0aa2cc
PM
102 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
103 nf_ct_zone(i->master) == zone)
77ab9cff
MJ
104 return i;
105 }
106 return NULL;
107}
6823645d 108EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
77ab9cff
MJ
109
110/* Just find a expectation corresponding to a tuple. */
111struct nf_conntrack_expect *
5d0aa2cc
PM
112nf_ct_expect_find_get(struct net *net, u16 zone,
113 const struct nf_conntrack_tuple *tuple)
77ab9cff
MJ
114{
115 struct nf_conntrack_expect *i;
116
7d0742da 117 rcu_read_lock();
5d0aa2cc 118 i = __nf_ct_expect_find(net, zone, tuple);
7d0742da
PM
119 if (i && !atomic_inc_not_zero(&i->use))
120 i = NULL;
121 rcu_read_unlock();
77ab9cff
MJ
122
123 return i;
124}
6823645d 125EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
77ab9cff
MJ
126
127/* If an expectation for this connection is found, it gets delete from
128 * global list then returned. */
129struct nf_conntrack_expect *
5d0aa2cc
PM
130nf_ct_find_expectation(struct net *net, u16 zone,
131 const struct nf_conntrack_tuple *tuple)
77ab9cff 132{
359b9ab6 133 struct nf_conntrack_expect *i, *exp = NULL;
359b9ab6
PM
134 unsigned int h;
135
9b03f38d 136 if (!net->ct.expect_count)
359b9ab6 137 return NULL;
ece00641 138
359b9ab6 139 h = nf_ct_expect_dst_hash(tuple);
b67bfe0d 140 hlist_for_each_entry(i, &net->ct.expect_hash[h], hnode) {
359b9ab6 141 if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
5d0aa2cc
PM
142 nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
143 nf_ct_zone(i->master) == zone) {
359b9ab6
PM
144 exp = i;
145 break;
146 }
147 }
ece00641
YK
148 if (!exp)
149 return NULL;
77ab9cff 150
77ab9cff
MJ
151 /* If master is not in hash table yet (ie. packet hasn't left
152 this machine yet), how can other end know about expected?
153 Hence these are not the droids you are looking for (if
154 master ct never got confirmed, we'd hold a reference to it
155 and weird things would happen to future packets). */
ece00641
YK
156 if (!nf_ct_is_confirmed(exp->master))
157 return NULL;
158
e1b207da
JDB
159 /* Avoid race with other CPUs, that for exp->master ct, is
160 * about to invoke ->destroy(), or nf_ct_delete() via timeout
161 * or early_drop().
162 *
163 * The atomic_inc_not_zero() check tells: If that fails, we
164 * know that the ct is being destroyed. If it succeeds, we
165 * can be sure the ct cannot disappear underneath.
166 */
167 if (unlikely(nf_ct_is_dying(exp->master) ||
168 !atomic_inc_not_zero(&exp->master->ct_general.use)))
169 return NULL;
170
ece00641
YK
171 if (exp->flags & NF_CT_EXPECT_PERMANENT) {
172 atomic_inc(&exp->use);
173 return exp;
174 } else if (del_timer(&exp->timeout)) {
175 nf_ct_unlink_expect(exp);
176 return exp;
77ab9cff 177 }
e1b207da
JDB
178 /* Undo exp->master refcnt increase, if del_timer() failed */
179 nf_ct_put(exp->master);
ece00641 180
77ab9cff
MJ
181 return NULL;
182}
183
184/* delete all expectations for this conntrack */
185void nf_ct_remove_expectations(struct nf_conn *ct)
186{
77ab9cff 187 struct nf_conn_help *help = nfct_help(ct);
b560580a 188 struct nf_conntrack_expect *exp;
b67bfe0d 189 struct hlist_node *next;
77ab9cff
MJ
190
191 /* Optimization: most connection never expect any others. */
6002f266 192 if (!help)
77ab9cff
MJ
193 return;
194
ca7433df 195 spin_lock_bh(&nf_conntrack_expect_lock);
b67bfe0d 196 hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
b560580a
PM
197 if (del_timer(&exp->timeout)) {
198 nf_ct_unlink_expect(exp);
199 nf_ct_expect_put(exp);
601e68e1 200 }
77ab9cff 201 }
ca7433df 202 spin_unlock_bh(&nf_conntrack_expect_lock);
77ab9cff 203}
13b18339 204EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
77ab9cff
MJ
205
206/* Would two expected things clash? */
207static inline int expect_clash(const struct nf_conntrack_expect *a,
208 const struct nf_conntrack_expect *b)
209{
210 /* Part covered by intersection of masks must be unequal,
211 otherwise they clash */
d4156e8c 212 struct nf_conntrack_tuple_mask intersect_mask;
77ab9cff
MJ
213 int count;
214
77ab9cff 215 intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
77ab9cff
MJ
216
217 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
218 intersect_mask.src.u3.all[count] =
219 a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
220 }
221
77ab9cff
MJ
222 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
223}
224
225static inline int expect_matches(const struct nf_conntrack_expect *a,
226 const struct nf_conntrack_expect *b)
227{
f64f9e71
JP
228 return a->master == b->master && a->class == b->class &&
229 nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
5d0aa2cc
PM
230 nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
231 nf_ct_zone(a->master) == nf_ct_zone(b->master);
77ab9cff
MJ
232}
233
234/* Generally a bad idea to call this: could have matched already. */
6823645d 235void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
77ab9cff 236{
ca7433df 237 spin_lock_bh(&nf_conntrack_expect_lock);
4e1d4e6c
PM
238 if (del_timer(&exp->timeout)) {
239 nf_ct_unlink_expect(exp);
240 nf_ct_expect_put(exp);
77ab9cff 241 }
ca7433df 242 spin_unlock_bh(&nf_conntrack_expect_lock);
77ab9cff 243}
6823645d 244EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
77ab9cff
MJ
245
246/* We don't increase the master conntrack refcount for non-fulfilled
247 * conntracks. During the conntrack destruction, the expectations are
248 * always killed before the conntrack itself */
6823645d 249struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
77ab9cff
MJ
250{
251 struct nf_conntrack_expect *new;
252
6823645d 253 new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
77ab9cff
MJ
254 if (!new)
255 return NULL;
256
257 new->master = me;
258 atomic_set(&new->use, 1);
259 return new;
260}
6823645d 261EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
77ab9cff 262
6002f266 263void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
76108cea 264 u_int8_t family,
1d9d7522
PM
265 const union nf_inet_addr *saddr,
266 const union nf_inet_addr *daddr,
267 u_int8_t proto, const __be16 *src, const __be16 *dst)
d6a9b650
PM
268{
269 int len;
270
271 if (family == AF_INET)
272 len = 4;
273 else
274 len = 16;
275
276 exp->flags = 0;
6002f266 277 exp->class = class;
d6a9b650
PM
278 exp->expectfn = NULL;
279 exp->helper = NULL;
280 exp->tuple.src.l3num = family;
281 exp->tuple.dst.protonum = proto;
d6a9b650
PM
282
283 if (saddr) {
284 memcpy(&exp->tuple.src.u3, saddr, len);
285 if (sizeof(exp->tuple.src.u3) > len)
286 /* address needs to be cleared for nf_ct_tuple_equal */
287 memset((void *)&exp->tuple.src.u3 + len, 0x00,
288 sizeof(exp->tuple.src.u3) - len);
289 memset(&exp->mask.src.u3, 0xFF, len);
290 if (sizeof(exp->mask.src.u3) > len)
291 memset((void *)&exp->mask.src.u3 + len, 0x00,
292 sizeof(exp->mask.src.u3) - len);
293 } else {
294 memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
295 memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
296 }
297
d6a9b650 298 if (src) {
a34c4589
AV
299 exp->tuple.src.u.all = *src;
300 exp->mask.src.u.all = htons(0xFFFF);
d6a9b650
PM
301 } else {
302 exp->tuple.src.u.all = 0;
303 exp->mask.src.u.all = 0;
304 }
305
d4156e8c
PM
306 memcpy(&exp->tuple.dst.u3, daddr, len);
307 if (sizeof(exp->tuple.dst.u3) > len)
308 /* address needs to be cleared for nf_ct_tuple_equal */
309 memset((void *)&exp->tuple.dst.u3 + len, 0x00,
310 sizeof(exp->tuple.dst.u3) - len);
311
a34c4589 312 exp->tuple.dst.u.all = *dst;
f09eca8d
PNA
313
314#ifdef CONFIG_NF_NAT_NEEDED
315 memset(&exp->saved_addr, 0, sizeof(exp->saved_addr));
316 memset(&exp->saved_proto, 0, sizeof(exp->saved_proto));
317#endif
d6a9b650 318}
6823645d 319EXPORT_SYMBOL_GPL(nf_ct_expect_init);
d6a9b650 320
7d0742da
PM
321static void nf_ct_expect_free_rcu(struct rcu_head *head)
322{
323 struct nf_conntrack_expect *exp;
324
325 exp = container_of(head, struct nf_conntrack_expect, rcu);
326 kmem_cache_free(nf_ct_expect_cachep, exp);
327}
328
6823645d 329void nf_ct_expect_put(struct nf_conntrack_expect *exp)
77ab9cff
MJ
330{
331 if (atomic_dec_and_test(&exp->use))
7d0742da 332 call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
77ab9cff 333}
6823645d 334EXPORT_SYMBOL_GPL(nf_ct_expect_put);
77ab9cff 335
3d058d7b 336static int nf_ct_expect_insert(struct nf_conntrack_expect *exp)
77ab9cff
MJ
337{
338 struct nf_conn_help *master_help = nfct_help(exp->master);
3d058d7b 339 struct nf_conntrack_helper *helper;
9b03f38d 340 struct net *net = nf_ct_exp_net(exp);
a71c0855 341 unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
77ab9cff 342
3bfd45f9
ED
343 /* two references : one for hash insert, one for the timer */
344 atomic_add(2, &exp->use);
b560580a 345
3d058d7b
PNA
346 hlist_add_head(&exp->lnode, &master_help->expectations);
347 master_help->expecting[exp->class]++;
a71c0855 348
9b03f38d
AD
349 hlist_add_head_rcu(&exp->hnode, &net->ct.expect_hash[h]);
350 net->ct.expect_count++;
77ab9cff 351
6823645d
PM
352 setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
353 (unsigned long)exp);
3d058d7b 354 helper = rcu_dereference_protected(master_help->helper,
ca7433df 355 lockdep_is_held(&nf_conntrack_expect_lock));
3d058d7b
PNA
356 if (helper) {
357 exp->timeout.expires = jiffies +
358 helper->expect_policy[exp->class].timeout * HZ;
bc01befd 359 }
77ab9cff
MJ
360 add_timer(&exp->timeout);
361
0d55af87 362 NF_CT_STAT_INC(net, expect_create);
3d058d7b 363 return 0;
77ab9cff
MJ
364}
365
366/* Race with expectations being used means we could have none to find; OK. */
6002f266
PM
367static void evict_oldest_expect(struct nf_conn *master,
368 struct nf_conntrack_expect *new)
77ab9cff 369{
b560580a 370 struct nf_conn_help *master_help = nfct_help(master);
6002f266 371 struct nf_conntrack_expect *exp, *last = NULL;
77ab9cff 372
b67bfe0d 373 hlist_for_each_entry(exp, &master_help->expectations, lnode) {
6002f266
PM
374 if (exp->class == new->class)
375 last = exp;
376 }
b560580a 377
6002f266
PM
378 if (last && del_timer(&last->timeout)) {
379 nf_ct_unlink_expect(last);
380 nf_ct_expect_put(last);
77ab9cff
MJ
381 }
382}
383
19abb7b0 384static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
77ab9cff 385{
6002f266 386 const struct nf_conntrack_expect_policy *p;
77ab9cff
MJ
387 struct nf_conntrack_expect *i;
388 struct nf_conn *master = expect->master;
389 struct nf_conn_help *master_help = nfct_help(master);
3d058d7b 390 struct nf_conntrack_helper *helper;
9b03f38d 391 struct net *net = nf_ct_exp_net(expect);
b67bfe0d 392 struct hlist_node *next;
a71c0855 393 unsigned int h;
83731671 394 int ret = 1;
77ab9cff 395
3d058d7b 396 if (!master_help) {
3c158f7f
PM
397 ret = -ESHUTDOWN;
398 goto out;
399 }
a71c0855 400 h = nf_ct_expect_dst_hash(&expect->tuple);
b67bfe0d 401 hlist_for_each_entry_safe(i, next, &net->ct.expect_hash[h], hnode) {
77ab9cff 402 if (expect_matches(i, expect)) {
2614f864
PNA
403 if (del_timer(&i->timeout)) {
404 nf_ct_unlink_expect(i);
405 nf_ct_expect_put(i);
406 break;
77ab9cff
MJ
407 }
408 } else if (expect_clash(i, expect)) {
409 ret = -EBUSY;
410 goto out;
411 }
412 }
413 /* Will be over limit? */
3d058d7b 414 helper = rcu_dereference_protected(master_help->helper,
ca7433df 415 lockdep_is_held(&nf_conntrack_expect_lock));
3d058d7b
PNA
416 if (helper) {
417 p = &helper->expect_policy[expect->class];
bc01befd
PNA
418 if (p->max_expected &&
419 master_help->expecting[expect->class] >= p->max_expected) {
420 evict_oldest_expect(master, expect);
421 if (master_help->expecting[expect->class]
422 >= p->max_expected) {
423 ret = -EMFILE;
424 goto out;
425 }
6002f266
PM
426 }
427 }
77ab9cff 428
9b03f38d 429 if (net->ct.expect_count >= nf_ct_expect_max) {
e87cc472 430 net_warn_ratelimited("nf_conntrack: expectation table full\n");
f264a7df 431 ret = -EMFILE;
f264a7df 432 }
19abb7b0
PNA
433out:
434 return ret;
435}
436
b476b72a 437int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
ec464e5d 438 u32 portid, int report)
19abb7b0
PNA
439{
440 int ret;
441
ca7433df 442 spin_lock_bh(&nf_conntrack_expect_lock);
19abb7b0 443 ret = __nf_ct_expect_check(expect);
83731671 444 if (ret <= 0)
19abb7b0 445 goto out;
f264a7df 446
3d058d7b
PNA
447 ret = nf_ct_expect_insert(expect);
448 if (ret < 0)
449 goto out;
ca7433df 450 spin_unlock_bh(&nf_conntrack_expect_lock);
ec464e5d 451 nf_ct_expect_event_report(IPEXP_NEW, expect, portid, report);
77ab9cff 452 return ret;
19abb7b0 453out:
ca7433df 454 spin_unlock_bh(&nf_conntrack_expect_lock);
19abb7b0
PNA
455 return ret;
456}
457EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
458
54b07dca 459#ifdef CONFIG_NF_CONNTRACK_PROCFS
5d08ad44 460struct ct_expect_iter_state {
dc5129f8 461 struct seq_net_private p;
5d08ad44
PM
462 unsigned int bucket;
463};
464
465static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
77ab9cff 466{
dc5129f8 467 struct net *net = seq_file_net(seq);
5d08ad44 468 struct ct_expect_iter_state *st = seq->private;
7d0742da 469 struct hlist_node *n;
77ab9cff 470
5d08ad44 471 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
0e60ebe0 472 n = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
7d0742da
PM
473 if (n)
474 return n;
5d08ad44
PM
475 }
476 return NULL;
477}
77ab9cff 478
5d08ad44
PM
479static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
480 struct hlist_node *head)
481{
dc5129f8 482 struct net *net = seq_file_net(seq);
5d08ad44 483 struct ct_expect_iter_state *st = seq->private;
77ab9cff 484
0e60ebe0 485 head = rcu_dereference(hlist_next_rcu(head));
5d08ad44
PM
486 while (head == NULL) {
487 if (++st->bucket >= nf_ct_expect_hsize)
77ab9cff 488 return NULL;
0e60ebe0 489 head = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
77ab9cff 490 }
5d08ad44 491 return head;
77ab9cff
MJ
492}
493
5d08ad44 494static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
77ab9cff 495{
5d08ad44 496 struct hlist_node *head = ct_expect_get_first(seq);
77ab9cff 497
5d08ad44
PM
498 if (head)
499 while (pos && (head = ct_expect_get_next(seq, head)))
500 pos--;
501 return pos ? NULL : head;
502}
77ab9cff 503
5d08ad44 504static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
7d0742da 505 __acquires(RCU)
5d08ad44 506{
7d0742da 507 rcu_read_lock();
5d08ad44
PM
508 return ct_expect_get_idx(seq, *pos);
509}
77ab9cff 510
5d08ad44
PM
511static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
512{
513 (*pos)++;
514 return ct_expect_get_next(seq, v);
77ab9cff
MJ
515}
516
5d08ad44 517static void exp_seq_stop(struct seq_file *seq, void *v)
7d0742da 518 __releases(RCU)
77ab9cff 519{
7d0742da 520 rcu_read_unlock();
77ab9cff
MJ
521}
522
523static int exp_seq_show(struct seq_file *s, void *v)
524{
5d08ad44 525 struct nf_conntrack_expect *expect;
b87921bd 526 struct nf_conntrack_helper *helper;
5d08ad44 527 struct hlist_node *n = v;
359b9ab6 528 char *delim = "";
5d08ad44
PM
529
530 expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
77ab9cff
MJ
531
532 if (expect->timeout.function)
533 seq_printf(s, "%ld ", timer_pending(&expect->timeout)
534 ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
535 else
536 seq_printf(s, "- ");
537 seq_printf(s, "l3proto = %u proto=%u ",
538 expect->tuple.src.l3num,
539 expect->tuple.dst.protonum);
540 print_tuple(s, &expect->tuple,
541 __nf_ct_l3proto_find(expect->tuple.src.l3num),
605dcad6 542 __nf_ct_l4proto_find(expect->tuple.src.l3num,
77ab9cff 543 expect->tuple.dst.protonum));
4bb119ea 544
359b9ab6
PM
545 if (expect->flags & NF_CT_EXPECT_PERMANENT) {
546 seq_printf(s, "PERMANENT");
547 delim = ",";
548 }
bc01befd 549 if (expect->flags & NF_CT_EXPECT_INACTIVE) {
359b9ab6 550 seq_printf(s, "%sINACTIVE", delim);
bc01befd
PNA
551 delim = ",";
552 }
553 if (expect->flags & NF_CT_EXPECT_USERSPACE)
554 seq_printf(s, "%sUSERSPACE", delim);
4bb119ea 555
b87921bd
PM
556 helper = rcu_dereference(nfct_help(expect->master)->helper);
557 if (helper) {
558 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
559 if (helper->expect_policy[expect->class].name)
560 seq_printf(s, "/%s",
561 helper->expect_policy[expect->class].name);
562 }
563
1ca9e417
JP
564 seq_putc(s, '\n');
565
566 return 0;
77ab9cff
MJ
567}
568
56b3d975 569static const struct seq_operations exp_seq_ops = {
77ab9cff
MJ
570 .start = exp_seq_start,
571 .next = exp_seq_next,
572 .stop = exp_seq_stop,
573 .show = exp_seq_show
574};
575
576static int exp_open(struct inode *inode, struct file *file)
577{
dc5129f8 578 return seq_open_net(inode, file, &exp_seq_ops,
e2da5913 579 sizeof(struct ct_expect_iter_state));
77ab9cff
MJ
580}
581
5d08ad44 582static const struct file_operations exp_file_ops = {
77ab9cff
MJ
583 .owner = THIS_MODULE,
584 .open = exp_open,
585 .read = seq_read,
586 .llseek = seq_lseek,
dc5129f8 587 .release = seq_release_net,
77ab9cff 588};
54b07dca 589#endif /* CONFIG_NF_CONNTRACK_PROCFS */
e9c1b084 590
dc5129f8 591static int exp_proc_init(struct net *net)
e9c1b084 592{
54b07dca 593#ifdef CONFIG_NF_CONNTRACK_PROCFS
e9c1b084
PM
594 struct proc_dir_entry *proc;
595
d4beaa66
G
596 proc = proc_create("nf_conntrack_expect", 0440, net->proc_net,
597 &exp_file_ops);
e9c1b084
PM
598 if (!proc)
599 return -ENOMEM;
54b07dca 600#endif /* CONFIG_NF_CONNTRACK_PROCFS */
e9c1b084
PM
601 return 0;
602}
603
dc5129f8 604static void exp_proc_remove(struct net *net)
e9c1b084 605{
54b07dca 606#ifdef CONFIG_NF_CONNTRACK_PROCFS
ece31ffd 607 remove_proc_entry("nf_conntrack_expect", net->proc_net);
54b07dca 608#endif /* CONFIG_NF_CONNTRACK_PROCFS */
e9c1b084
PM
609}
610
13ccdfc2 611module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
a71c0855 612
83b4dbe1 613int nf_conntrack_expect_pernet_init(struct net *net)
e9c1b084 614{
a71c0855
PM
615 int err = -ENOMEM;
616
9b03f38d 617 net->ct.expect_count = 0;
d862a662 618 net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
9b03f38d 619 if (net->ct.expect_hash == NULL)
a71c0855 620 goto err1;
e9c1b084 621
dc5129f8 622 err = exp_proc_init(net);
e9c1b084 623 if (err < 0)
83b4dbe1 624 goto err2;
e9c1b084
PM
625
626 return 0;
12293bf9 627err2:
d862a662 628 nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
a71c0855 629err1:
e9c1b084
PM
630 return err;
631}
632
83b4dbe1 633void nf_conntrack_expect_pernet_fini(struct net *net)
e9c1b084 634{
dc5129f8 635 exp_proc_remove(net);
d862a662 636 nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
e9c1b084 637}
83b4dbe1
G
638
639int nf_conntrack_expect_init(void)
640{
641 if (!nf_ct_expect_hsize) {
642 nf_ct_expect_hsize = nf_conntrack_htable_size / 256;
643 if (!nf_ct_expect_hsize)
644 nf_ct_expect_hsize = 1;
645 }
646 nf_ct_expect_max = nf_ct_expect_hsize * 4;
647 nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
648 sizeof(struct nf_conntrack_expect),
649 0, 0, NULL);
650 if (!nf_ct_expect_cachep)
651 return -ENOMEM;
652 return 0;
653}
654
655void nf_conntrack_expect_fini(void)
656{
657 rcu_barrier(); /* Wait for call_rcu() before destroy */
658 kmem_cache_destroy(nf_ct_expect_cachep);
659}