1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/netfilter.h>
6 #include <linux/rhashtable.h>
8 #include <linux/ipv6.h>
9 #include <linux/netdevice.h>
12 #include <net/ip6_route.h>
13 #include <net/neighbour.h>
14 #include <net/netfilter/nf_flow_table.h>
15 /* For layer 4 checksum field offset. */
16 #include <linux/tcp.h>
17 #include <linux/udp.h>
19 static int nf_flow_state_check(struct flow_offload *flow, int proto,
20 struct sk_buff *skb, unsigned int thoff)
24 if (proto != IPPROTO_TCP)
27 if (!pskb_may_pull(skb, thoff + sizeof(*tcph)))
30 tcph = (void *)(skb_network_header(skb) + thoff);
31 if (unlikely(tcph->fin || tcph->rst)) {
32 flow_offload_teardown(flow);
39 static int nf_flow_nat_ip_tcp(struct sk_buff *skb, unsigned int thoff,
40 __be32 addr, __be32 new_addr)
44 if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
45 skb_try_make_writable(skb, thoff + sizeof(*tcph)))
48 tcph = (void *)(skb_network_header(skb) + thoff);
49 inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, true);
54 static int nf_flow_nat_ip_udp(struct sk_buff *skb, unsigned int thoff,
55 __be32 addr, __be32 new_addr)
59 if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
60 skb_try_make_writable(skb, thoff + sizeof(*udph)))
63 udph = (void *)(skb_network_header(skb) + thoff);
64 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
65 inet_proto_csum_replace4(&udph->check, skb, addr,
68 udph->check = CSUM_MANGLED_0;
74 static int nf_flow_nat_ip_l4proto(struct sk_buff *skb, struct iphdr *iph,
75 unsigned int thoff, __be32 addr,
78 switch (iph->protocol) {
80 if (nf_flow_nat_ip_tcp(skb, thoff, addr, new_addr) < 0)
84 if (nf_flow_nat_ip_udp(skb, thoff, addr, new_addr) < 0)
92 static int nf_flow_snat_ip(const struct flow_offload *flow, struct sk_buff *skb,
93 struct iphdr *iph, unsigned int thoff,
94 enum flow_offload_tuple_dir dir)
96 __be32 addr, new_addr;
99 case FLOW_OFFLOAD_DIR_ORIGINAL:
101 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr;
102 iph->saddr = new_addr;
104 case FLOW_OFFLOAD_DIR_REPLY:
106 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr;
107 iph->daddr = new_addr;
112 csum_replace4(&iph->check, addr, new_addr);
114 return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
117 static int nf_flow_dnat_ip(const struct flow_offload *flow, struct sk_buff *skb,
118 struct iphdr *iph, unsigned int thoff,
119 enum flow_offload_tuple_dir dir)
121 __be32 addr, new_addr;
124 case FLOW_OFFLOAD_DIR_ORIGINAL:
126 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr;
127 iph->daddr = new_addr;
129 case FLOW_OFFLOAD_DIR_REPLY:
131 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr;
132 iph->saddr = new_addr;
137 csum_replace4(&iph->check, addr, new_addr);
139 return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
142 static int nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb,
143 unsigned int thoff, enum flow_offload_tuple_dir dir)
145 struct iphdr *iph = ip_hdr(skb);
147 if (test_bit(NF_FLOW_SNAT, &flow->flags) &&
148 (nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
149 nf_flow_snat_ip(flow, skb, iph, thoff, dir) < 0))
151 if (test_bit(NF_FLOW_DNAT, &flow->flags) &&
152 (nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
153 nf_flow_dnat_ip(flow, skb, iph, thoff, dir) < 0))
159 static bool ip_has_options(unsigned int thoff)
161 return thoff != sizeof(struct iphdr);
164 static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
165 struct flow_offload_tuple *tuple)
167 struct flow_ports *ports;
171 if (!pskb_may_pull(skb, sizeof(*iph)))
175 thoff = iph->ihl * 4;
177 if (ip_is_fragment(iph) ||
178 unlikely(ip_has_options(thoff)))
181 if (iph->protocol != IPPROTO_TCP &&
182 iph->protocol != IPPROTO_UDP)
188 thoff = iph->ihl * 4;
189 if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
192 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
194 tuple->src_v4.s_addr = iph->saddr;
195 tuple->dst_v4.s_addr = iph->daddr;
196 tuple->src_port = ports->source;
197 tuple->dst_port = ports->dest;
198 tuple->l3proto = AF_INET;
199 tuple->l4proto = iph->protocol;
200 tuple->iifidx = dev->ifindex;
205 /* Based on ip_exceeds_mtu(). */
206 static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
211 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
217 static int nf_flow_offload_dst_check(struct dst_entry *dst)
219 if (unlikely(dst_xfrm(dst)))
220 return dst_check(dst, 0) ? 0 : -1;
225 static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
226 const struct nf_hook_state *state,
227 struct dst_entry *dst)
230 skb_dst_set_noref(skb, dst);
231 dst_output(state->net, state->sk, skb);
235 static bool nf_flow_offload_refresh(struct nf_flowtable *flow_table,
236 struct flow_offload *flow)
238 return nf_flowtable_hw_offload(flow_table) &&
239 test_and_clear_bit(NF_FLOW_HW_REFRESH, &flow->flags);
243 nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
244 const struct nf_hook_state *state)
246 struct flow_offload_tuple_rhash *tuplehash;
247 struct nf_flowtable *flow_table = priv;
248 struct flow_offload_tuple tuple = {};
249 enum flow_offload_tuple_dir dir;
250 struct flow_offload *flow;
251 struct net_device *outdev;
257 if (skb->protocol != htons(ETH_P_IP))
260 if (nf_flow_tuple_ip(skb, state->in, &tuple) < 0)
263 tuplehash = flow_offload_lookup(flow_table, &tuple);
264 if (tuplehash == NULL)
267 dir = tuplehash->tuple.dir;
268 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
269 rt = (struct rtable *)flow->tuplehash[dir].tuple.dst_cache;
270 outdev = rt->dst.dev;
272 if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
275 if (skb_try_make_writable(skb, sizeof(*iph)))
278 thoff = ip_hdr(skb)->ihl * 4;
279 if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff))
282 if (unlikely(nf_flow_offload_refresh(flow_table, flow)))
283 nf_flow_offload_add(flow_table, flow);
285 if (nf_flow_offload_dst_check(&rt->dst)) {
286 flow_offload_teardown(flow);
290 if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0)
293 flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
295 ip_decrease_ttl(iph);
298 if (unlikely(dst_xfrm(&rt->dst))) {
299 memset(skb->cb, 0, sizeof(struct inet_skb_parm));
300 IPCB(skb)->iif = skb->dev->ifindex;
301 IPCB(skb)->flags = IPSKB_FORWARDED;
302 return nf_flow_xmit_xfrm(skb, state, &rt->dst);
306 nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
307 skb_dst_set_noref(skb, &rt->dst);
308 neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
312 EXPORT_SYMBOL_GPL(nf_flow_offload_ip_hook);
314 static int nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff,
315 struct in6_addr *addr,
316 struct in6_addr *new_addr)
320 if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
321 skb_try_make_writable(skb, thoff + sizeof(*tcph)))
324 tcph = (void *)(skb_network_header(skb) + thoff);
325 inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32,
326 new_addr->s6_addr32, true);
331 static int nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff,
332 struct in6_addr *addr,
333 struct in6_addr *new_addr)
337 if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
338 skb_try_make_writable(skb, thoff + sizeof(*udph)))
341 udph = (void *)(skb_network_header(skb) + thoff);
342 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
343 inet_proto_csum_replace16(&udph->check, skb, addr->s6_addr32,
344 new_addr->s6_addr32, true);
346 udph->check = CSUM_MANGLED_0;
352 static int nf_flow_nat_ipv6_l4proto(struct sk_buff *skb, struct ipv6hdr *ip6h,
353 unsigned int thoff, struct in6_addr *addr,
354 struct in6_addr *new_addr)
356 switch (ip6h->nexthdr) {
358 if (nf_flow_nat_ipv6_tcp(skb, thoff, addr, new_addr) < 0)
362 if (nf_flow_nat_ipv6_udp(skb, thoff, addr, new_addr) < 0)
370 static int nf_flow_snat_ipv6(const struct flow_offload *flow,
371 struct sk_buff *skb, struct ipv6hdr *ip6h,
373 enum flow_offload_tuple_dir dir)
375 struct in6_addr addr, new_addr;
378 case FLOW_OFFLOAD_DIR_ORIGINAL:
380 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6;
381 ip6h->saddr = new_addr;
383 case FLOW_OFFLOAD_DIR_REPLY:
385 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6;
386 ip6h->daddr = new_addr;
392 return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
395 static int nf_flow_dnat_ipv6(const struct flow_offload *flow,
396 struct sk_buff *skb, struct ipv6hdr *ip6h,
398 enum flow_offload_tuple_dir dir)
400 struct in6_addr addr, new_addr;
403 case FLOW_OFFLOAD_DIR_ORIGINAL:
405 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6;
406 ip6h->daddr = new_addr;
408 case FLOW_OFFLOAD_DIR_REPLY:
410 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6;
411 ip6h->saddr = new_addr;
417 return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
420 static int nf_flow_nat_ipv6(const struct flow_offload *flow,
422 enum flow_offload_tuple_dir dir)
424 struct ipv6hdr *ip6h = ipv6_hdr(skb);
425 unsigned int thoff = sizeof(*ip6h);
427 if (test_bit(NF_FLOW_SNAT, &flow->flags) &&
428 (nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
429 nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
431 if (test_bit(NF_FLOW_DNAT, &flow->flags) &&
432 (nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
433 nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
439 static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
440 struct flow_offload_tuple *tuple)
442 struct flow_ports *ports;
443 struct ipv6hdr *ip6h;
446 if (!pskb_may_pull(skb, sizeof(*ip6h)))
449 ip6h = ipv6_hdr(skb);
451 if (ip6h->nexthdr != IPPROTO_TCP &&
452 ip6h->nexthdr != IPPROTO_UDP)
455 if (ip6h->hop_limit <= 1)
458 thoff = sizeof(*ip6h);
459 if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
462 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
464 tuple->src_v6 = ip6h->saddr;
465 tuple->dst_v6 = ip6h->daddr;
466 tuple->src_port = ports->source;
467 tuple->dst_port = ports->dest;
468 tuple->l3proto = AF_INET6;
469 tuple->l4proto = ip6h->nexthdr;
470 tuple->iifidx = dev->ifindex;
476 nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
477 const struct nf_hook_state *state)
479 struct flow_offload_tuple_rhash *tuplehash;
480 struct nf_flowtable *flow_table = priv;
481 struct flow_offload_tuple tuple = {};
482 enum flow_offload_tuple_dir dir;
483 const struct in6_addr *nexthop;
484 struct flow_offload *flow;
485 struct net_device *outdev;
486 struct ipv6hdr *ip6h;
489 if (skb->protocol != htons(ETH_P_IPV6))
492 if (nf_flow_tuple_ipv6(skb, state->in, &tuple) < 0)
495 tuplehash = flow_offload_lookup(flow_table, &tuple);
496 if (tuplehash == NULL)
499 dir = tuplehash->tuple.dir;
500 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
501 rt = (struct rt6_info *)flow->tuplehash[dir].tuple.dst_cache;
502 outdev = rt->dst.dev;
504 if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
507 if (nf_flow_state_check(flow, ipv6_hdr(skb)->nexthdr, skb,
511 if (unlikely(nf_flow_offload_refresh(flow_table, flow)))
512 nf_flow_offload_add(flow_table, flow);
514 if (nf_flow_offload_dst_check(&rt->dst)) {
515 flow_offload_teardown(flow);
519 if (skb_try_make_writable(skb, sizeof(*ip6h)))
522 if (nf_flow_nat_ipv6(flow, skb, dir) < 0)
525 flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT;
526 ip6h = ipv6_hdr(skb);
530 if (unlikely(dst_xfrm(&rt->dst))) {
531 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
532 IP6CB(skb)->iif = skb->dev->ifindex;
533 IP6CB(skb)->flags = IP6SKB_FORWARDED;
534 return nf_flow_xmit_xfrm(skb, state, &rt->dst);
538 nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
539 skb_dst_set_noref(skb, &rt->dst);
540 neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb);
544 EXPORT_SYMBOL_GPL(nf_flow_offload_ipv6_hook);