treewide: Add SPDX license identifier for more missed files
[linux-block.git] / net / netfilter / nf_flow_table_core.c
CommitLineData
09c434b8 1// SPDX-License-Identifier: GPL-2.0-only
ac2a6666
PNA
2#include <linux/kernel.h>
3#include <linux/init.h>
4#include <linux/module.h>
5#include <linux/netfilter.h>
6#include <linux/rhashtable.h>
7#include <linux/netdevice.h>
4f3780c0
FF
8#include <net/ip.h>
9#include <net/ip6_route.h>
c0ea1bcb 10#include <net/netfilter/nf_tables.h>
ac2a6666
PNA
11#include <net/netfilter/nf_flow_table.h>
12#include <net/netfilter/nf_conntrack.h>
13#include <net/netfilter/nf_conntrack_core.h>
14#include <net/netfilter/nf_conntrack_tuple.h>
15
16struct flow_offload_entry {
17 struct flow_offload flow;
18 struct nf_conn *ct;
19 struct rcu_head rcu_head;
20};
21
84453a90
FF
22static DEFINE_MUTEX(flowtable_lock);
23static LIST_HEAD(flowtables);
24
047b300e
FF
25static void
26flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
27 struct nf_flow_route *route,
28 enum flow_offload_tuple_dir dir)
29{
30 struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
31 struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple;
10f4e765 32 struct dst_entry *other_dst = route->tuple[!dir].dst;
4f3780c0 33 struct dst_entry *dst = route->tuple[dir].dst;
047b300e
FF
34
35 ft->dir = dir;
36
37 switch (ctt->src.l3num) {
38 case NFPROTO_IPV4:
39 ft->src_v4 = ctt->src.u3.in;
40 ft->dst_v4 = ctt->dst.u3.in;
4f3780c0 41 ft->mtu = ip_dst_mtu_maybe_forward(dst, true);
047b300e
FF
42 break;
43 case NFPROTO_IPV6:
44 ft->src_v6 = ctt->src.u3.in6;
45 ft->dst_v6 = ctt->dst.u3.in6;
4f3780c0 46 ft->mtu = ip6_dst_mtu_forward(dst);
047b300e
FF
47 break;
48 }
49
50 ft->l3proto = ctt->src.l3num;
51 ft->l4proto = ctt->dst.protonum;
52 ft->src_port = ctt->src.u.tcp.port;
53 ft->dst_port = ctt->dst.u.tcp.port;
54
10f4e765 55 ft->iifidx = other_dst->dev->ifindex;
56 ft->oifidx = dst->dev->ifindex;
4f3780c0 57 ft->dst_cache = dst;
047b300e
FF
58}
59
ac2a6666
PNA
60struct flow_offload *
61flow_offload_alloc(struct nf_conn *ct, struct nf_flow_route *route)
62{
63 struct flow_offload_entry *entry;
64 struct flow_offload *flow;
65
66 if (unlikely(nf_ct_is_dying(ct) ||
67 !atomic_inc_not_zero(&ct->ct_general.use)))
68 return NULL;
69
70 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
71 if (!entry)
72 goto err_ct_refcnt;
73
74 flow = &entry->flow;
75
76 if (!dst_hold_safe(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst))
77 goto err_dst_cache_original;
78
79 if (!dst_hold_safe(route->tuple[FLOW_OFFLOAD_DIR_REPLY].dst))
80 goto err_dst_cache_reply;
81
82 entry->ct = ct;
83
047b300e
FF
84 flow_offload_fill_dir(flow, ct, route, FLOW_OFFLOAD_DIR_ORIGINAL);
85 flow_offload_fill_dir(flow, ct, route, FLOW_OFFLOAD_DIR_REPLY);
ac2a6666
PNA
86
87 if (ct->status & IPS_SRC_NAT)
88 flow->flags |= FLOW_OFFLOAD_SNAT;
df1e2025 89 if (ct->status & IPS_DST_NAT)
ac2a6666
PNA
90 flow->flags |= FLOW_OFFLOAD_DNAT;
91
92 return flow;
93
94err_dst_cache_reply:
95 dst_release(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst);
96err_dst_cache_original:
97 kfree(entry);
98err_ct_refcnt:
99 nf_ct_put(ct);
100
101 return NULL;
102}
103EXPORT_SYMBOL_GPL(flow_offload_alloc);
104
da5984e5
FF
105static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
106{
107 tcp->state = TCP_CONNTRACK_ESTABLISHED;
108 tcp->seen[0].td_maxwin = 0;
109 tcp->seen[1].td_maxwin = 0;
110}
111
e97d9404
FW
112#define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ)
113#define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ)
114
da5984e5
FF
115static void flow_offload_fixup_ct_state(struct nf_conn *ct)
116{
117 const struct nf_conntrack_l4proto *l4proto;
da5984e5
FF
118 unsigned int timeout;
119 int l4num;
120
121 l4num = nf_ct_protonum(ct);
122 if (l4num == IPPROTO_TCP)
123 flow_offload_fixup_tcp(&ct->proto.tcp);
124
4a60dc74 125 l4proto = nf_ct_l4proto_find(l4num);
da5984e5
FF
126 if (!l4proto)
127 return;
128
da5984e5 129 if (l4num == IPPROTO_TCP)
e97d9404 130 timeout = NF_FLOWTABLE_TCP_PICKUP_TIMEOUT;
da5984e5 131 else if (l4num == IPPROTO_UDP)
e97d9404 132 timeout = NF_FLOWTABLE_UDP_PICKUP_TIMEOUT;
da5984e5
FF
133 else
134 return;
135
136 ct->timeout = nfct_time_stamp + timeout;
137}
138
ac2a6666
PNA
139void flow_offload_free(struct flow_offload *flow)
140{
141 struct flow_offload_entry *e;
142
143 dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_cache);
144 dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_cache);
145 e = container_of(flow, struct flow_offload_entry, flow);
da5984e5
FF
146 if (flow->flags & FLOW_OFFLOAD_DYING)
147 nf_ct_delete(e->ct, 0, 0);
0ff90b6c
FF
148 nf_ct_put(e->ct);
149 kfree_rcu(e, rcu_head);
ac2a6666
PNA
150}
151EXPORT_SYMBOL_GPL(flow_offload_free);
152
a268de77
FF
153static u32 flow_offload_hash(const void *data, u32 len, u32 seed)
154{
155 const struct flow_offload_tuple *tuple = data;
156
157 return jhash(tuple, offsetof(struct flow_offload_tuple, dir), seed);
158}
159
160static u32 flow_offload_hash_obj(const void *data, u32 len, u32 seed)
161{
162 const struct flow_offload_tuple_rhash *tuplehash = data;
163
164 return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, dir), seed);
165}
166
167static int flow_offload_hash_cmp(struct rhashtable_compare_arg *arg,
168 const void *ptr)
169{
170 const struct flow_offload_tuple *tuple = arg->key;
171 const struct flow_offload_tuple_rhash *x = ptr;
172
173 if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, dir)))
174 return 1;
175
176 return 0;
177}
178
179static const struct rhashtable_params nf_flow_offload_rhash_params = {
180 .head_offset = offsetof(struct flow_offload_tuple_rhash, node),
181 .hashfn = flow_offload_hash,
182 .obj_hashfn = flow_offload_hash_obj,
183 .obj_cmpfn = flow_offload_hash_cmp,
184 .automatic_shrinking = true,
185};
186
ac2a6666
PNA
187int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
188{
43c8f131
TY
189 int err;
190
191 err = rhashtable_insert_fast(&flow_table->rhashtable,
192 &flow->tuplehash[0].node,
193 nf_flow_offload_rhash_params);
194 if (err < 0)
195 return err;
196
197 err = rhashtable_insert_fast(&flow_table->rhashtable,
198 &flow->tuplehash[1].node,
199 nf_flow_offload_rhash_params);
200 if (err < 0) {
201 rhashtable_remove_fast(&flow_table->rhashtable,
202 &flow->tuplehash[0].node,
203 nf_flow_offload_rhash_params);
204 return err;
205 }
ac2a6666 206
43c8f131 207 flow->timeout = (u32)jiffies;
ac2a6666
PNA
208 return 0;
209}
210EXPORT_SYMBOL_GPL(flow_offload_add);
211
0ff90b6c
FF
212static void flow_offload_del(struct nf_flowtable *flow_table,
213 struct flow_offload *flow)
ac2a6666 214{
da5984e5
FF
215 struct flow_offload_entry *e;
216
ac2a6666
PNA
217 rhashtable_remove_fast(&flow_table->rhashtable,
218 &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
a268de77 219 nf_flow_offload_rhash_params);
ac2a6666
PNA
220 rhashtable_remove_fast(&flow_table->rhashtable,
221 &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
a268de77 222 nf_flow_offload_rhash_params);
ac2a6666 223
da5984e5
FF
224 e = container_of(flow, struct flow_offload_entry, flow);
225 clear_bit(IPS_OFFLOAD_BIT, &e->ct->status);
226
0ff90b6c 227 flow_offload_free(flow);
ac2a6666 228}
ac2a6666 229
59c466dd
FF
230void flow_offload_teardown(struct flow_offload *flow)
231{
da5984e5
FF
232 struct flow_offload_entry *e;
233
59c466dd 234 flow->flags |= FLOW_OFFLOAD_TEARDOWN;
da5984e5
FF
235
236 e = container_of(flow, struct flow_offload_entry, flow);
237 flow_offload_fixup_ct_state(e->ct);
59c466dd
FF
238}
239EXPORT_SYMBOL_GPL(flow_offload_teardown);
240
ac2a6666
PNA
241struct flow_offload_tuple_rhash *
242flow_offload_lookup(struct nf_flowtable *flow_table,
243 struct flow_offload_tuple *tuple)
244{
ba03137f
FF
245 struct flow_offload_tuple_rhash *tuplehash;
246 struct flow_offload *flow;
8cd2bc98 247 struct flow_offload_entry *e;
ba03137f
FF
248 int dir;
249
a2d88182
TY
250 tuplehash = rhashtable_lookup(&flow_table->rhashtable, tuple,
251 nf_flow_offload_rhash_params);
ba03137f
FF
252 if (!tuplehash)
253 return NULL;
254
255 dir = tuplehash->tuple.dir;
256 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
257 if (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN))
258 return NULL;
259
8cd2bc98
TY
260 e = container_of(flow, struct flow_offload_entry, flow);
261 if (unlikely(nf_ct_is_dying(e->ct)))
262 return NULL;
263
ba03137f 264 return tuplehash;
ac2a6666
PNA
265}
266EXPORT_SYMBOL_GPL(flow_offload_lookup);
267
49de9c09
TY
268static int
269nf_flow_table_iterate(struct nf_flowtable *flow_table,
270 void (*iter)(struct flow_offload *flow, void *data),
271 void *data)
ac2a6666
PNA
272{
273 struct flow_offload_tuple_rhash *tuplehash;
274 struct rhashtable_iter hti;
275 struct flow_offload *flow;
0de22baa 276 int err = 0;
ac2a6666 277
0de22baa 278 rhashtable_walk_enter(&flow_table->rhashtable, &hti);
ac2a6666
PNA
279 rhashtable_walk_start(&hti);
280
281 while ((tuplehash = rhashtable_walk_next(&hti))) {
282 if (IS_ERR(tuplehash)) {
0de22baa
TY
283 if (PTR_ERR(tuplehash) != -EAGAIN) {
284 err = PTR_ERR(tuplehash);
285 break;
286 }
ac2a6666
PNA
287 continue;
288 }
289 if (tuplehash->tuple.dir)
290 continue;
291
292 flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
293
294 iter(flow, data);
295 }
ac2a6666
PNA
296 rhashtable_walk_stop(&hti);
297 rhashtable_walk_exit(&hti);
298
299 return err;
300}
ac2a6666
PNA
301
302static inline bool nf_flow_has_expired(const struct flow_offload *flow)
303{
304 return (__s32)(flow->timeout - (u32)jiffies) <= 0;
305}
306
b9660987 307static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
ac2a6666 308{
b9660987 309 struct nf_flowtable *flow_table = data;
8cd2bc98 310 struct flow_offload_entry *e;
ac2a6666 311
8cd2bc98
TY
312 e = container_of(flow, struct flow_offload_entry, flow);
313 if (nf_flow_has_expired(flow) || nf_ct_is_dying(e->ct) ||
b9660987
TY
314 (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN)))
315 flow_offload_del(flow_table, flow);
b408c5b0
PNA
316}
317
a268de77 318static void nf_flow_offload_work_gc(struct work_struct *work)
b408c5b0
PNA
319{
320 struct nf_flowtable *flow_table;
321
322 flow_table = container_of(work, struct nf_flowtable, gc_work.work);
b9660987 323 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
ac2a6666
PNA
324 queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
325}
ac2a6666
PNA
326
327static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
328 __be16 port, __be16 new_port)
329{
330 struct tcphdr *tcph;
331
332 if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
333 skb_try_make_writable(skb, thoff + sizeof(*tcph)))
334 return -1;
335
336 tcph = (void *)(skb_network_header(skb) + thoff);
337 inet_proto_csum_replace2(&tcph->check, skb, port, new_port, true);
338
339 return 0;
340}
341
342static int nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
343 __be16 port, __be16 new_port)
344{
345 struct udphdr *udph;
346
347 if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
348 skb_try_make_writable(skb, thoff + sizeof(*udph)))
349 return -1;
350
351 udph = (void *)(skb_network_header(skb) + thoff);
352 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
353 inet_proto_csum_replace2(&udph->check, skb, port,
354 new_port, true);
355 if (!udph->check)
356 udph->check = CSUM_MANGLED_0;
357 }
358
359 return 0;
360}
361
362static int nf_flow_nat_port(struct sk_buff *skb, unsigned int thoff,
363 u8 protocol, __be16 port, __be16 new_port)
364{
365 switch (protocol) {
366 case IPPROTO_TCP:
367 if (nf_flow_nat_port_tcp(skb, thoff, port, new_port) < 0)
368 return NF_DROP;
369 break;
370 case IPPROTO_UDP:
371 if (nf_flow_nat_port_udp(skb, thoff, port, new_port) < 0)
372 return NF_DROP;
373 break;
374 }
375
376 return 0;
377}
378
379int nf_flow_snat_port(const struct flow_offload *flow,
380 struct sk_buff *skb, unsigned int thoff,
381 u8 protocol, enum flow_offload_tuple_dir dir)
382{
383 struct flow_ports *hdr;
384 __be16 port, new_port;
385
386 if (!pskb_may_pull(skb, thoff + sizeof(*hdr)) ||
387 skb_try_make_writable(skb, thoff + sizeof(*hdr)))
388 return -1;
389
390 hdr = (void *)(skb_network_header(skb) + thoff);
391
392 switch (dir) {
393 case FLOW_OFFLOAD_DIR_ORIGINAL:
394 port = hdr->source;
395 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port;
396 hdr->source = new_port;
397 break;
398 case FLOW_OFFLOAD_DIR_REPLY:
399 port = hdr->dest;
400 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port;
401 hdr->dest = new_port;
402 break;
403 default:
404 return -1;
405 }
406
407 return nf_flow_nat_port(skb, thoff, protocol, port, new_port);
408}
409EXPORT_SYMBOL_GPL(nf_flow_snat_port);
410
411int nf_flow_dnat_port(const struct flow_offload *flow,
412 struct sk_buff *skb, unsigned int thoff,
413 u8 protocol, enum flow_offload_tuple_dir dir)
414{
415 struct flow_ports *hdr;
416 __be16 port, new_port;
417
418 if (!pskb_may_pull(skb, thoff + sizeof(*hdr)) ||
419 skb_try_make_writable(skb, thoff + sizeof(*hdr)))
420 return -1;
421
422 hdr = (void *)(skb_network_header(skb) + thoff);
423
424 switch (dir) {
425 case FLOW_OFFLOAD_DIR_ORIGINAL:
426 port = hdr->dest;
427 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port;
428 hdr->dest = new_port;
429 break;
430 case FLOW_OFFLOAD_DIR_REPLY:
431 port = hdr->source;
432 new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port;
433 hdr->source = new_port;
434 break;
435 default:
436 return -1;
437 }
438
439 return nf_flow_nat_port(skb, thoff, protocol, port, new_port);
440}
441EXPORT_SYMBOL_GPL(nf_flow_dnat_port);
442
a268de77
FF
443int nf_flow_table_init(struct nf_flowtable *flowtable)
444{
445 int err;
446
447 INIT_DEFERRABLE_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
448
449 err = rhashtable_init(&flowtable->rhashtable,
450 &nf_flow_offload_rhash_params);
451 if (err < 0)
452 return err;
453
454 queue_delayed_work(system_power_efficient_wq,
455 &flowtable->gc_work, HZ);
456
84453a90
FF
457 mutex_lock(&flowtable_lock);
458 list_add(&flowtable->list, &flowtables);
459 mutex_unlock(&flowtable_lock);
460
a268de77
FF
461 return 0;
462}
463EXPORT_SYMBOL_GPL(nf_flow_table_init);
464
c0ea1bcb
PNA
465static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
466{
467 struct net_device *dev = data;
a3fb3698
TY
468 struct flow_offload_entry *e;
469
470 e = container_of(flow, struct flow_offload_entry, flow);
c0ea1bcb 471
59c466dd
FF
472 if (!dev) {
473 flow_offload_teardown(flow);
c0ea1bcb 474 return;
59c466dd 475 }
a3fb3698
TY
476 if (net_eq(nf_ct_net(e->ct), dev_net(dev)) &&
477 (flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
478 flow->tuplehash[1].tuple.iifidx == dev->ifindex))
59c466dd 479 flow_offload_dead(flow);
c0ea1bcb
PNA
480}
481
482static void nf_flow_table_iterate_cleanup(struct nf_flowtable *flowtable,
84453a90 483 struct net_device *dev)
c0ea1bcb 484{
84453a90 485 nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev);
c0ea1bcb
PNA
486 flush_delayed_work(&flowtable->gc_work);
487}
488
5f1be84a 489void nf_flow_table_cleanup(struct net_device *dev)
c0ea1bcb 490{
84453a90
FF
491 struct nf_flowtable *flowtable;
492
493 mutex_lock(&flowtable_lock);
494 list_for_each_entry(flowtable, &flowtables, list)
495 nf_flow_table_iterate_cleanup(flowtable, dev);
496 mutex_unlock(&flowtable_lock);
c0ea1bcb
PNA
497}
498EXPORT_SYMBOL_GPL(nf_flow_table_cleanup);
499
b408c5b0
PNA
500void nf_flow_table_free(struct nf_flowtable *flow_table)
501{
84453a90
FF
502 mutex_lock(&flowtable_lock);
503 list_del(&flow_table->list);
504 mutex_unlock(&flowtable_lock);
a268de77 505 cancel_delayed_work_sync(&flow_table->gc_work);
b408c5b0 506 nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
b9660987 507 nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
a268de77 508 rhashtable_destroy(&flow_table->rhashtable);
b408c5b0
PNA
509}
510EXPORT_SYMBOL_GPL(nf_flow_table_free);
511
ac2a6666
PNA
512MODULE_LICENSE("GPL");
513MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");