netfilter: nf_tables: enable set expiration time for set elements
[linux-block.git] / net / ipv6 / netfilter.c
CommitLineData
f229f6ce
PM
1/*
2 * IPv6 specific functions of netfilter core
3 *
4 * Rusty Russell (C) 2000 -- This code is GPL.
5 * Patrick McHardy (C) 2006-2012
6 */
020b4c12 7#include <linux/kernel.h>
bb94aa16 8#include <linux/init.h>
020b4c12 9#include <linux/ipv6.h>
2cc7d573
HW
10#include <linux/netfilter.h>
11#include <linux/netfilter_ipv6.h>
bc3b2d7f 12#include <linux/export.h>
2a7851bf 13#include <net/addrconf.h>
020b4c12
HW
14#include <net/dst.h>
15#include <net/ipv6.h>
16#include <net/ip6_route.h>
3e3850e9 17#include <net/xfrm.h>
c01cd429 18#include <net/netfilter/nf_queue.h>
764dd163
PNA
19#include <net/netfilter/nf_conntrack_bridge.h>
20#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
21#include "../bridge/br_private.h"
020b4c12 22
5f5d74d7 23int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
020b4c12 24{
b71d1d42 25 const struct ipv6hdr *iph = ipv6_hdr(skb);
7d98386d 26 struct sock *sk = sk_to_full_sk(skb->sk);
0ad352cb 27 unsigned int hh_len;
020b4c12 28 struct dst_entry *dst;
15df03c6
EC
29 int strict = (ipv6_addr_type(&iph->daddr) &
30 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
4c9483b2 31 struct flowi6 fl6 = {
508b0904 32 .flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if :
15df03c6 33 strict ? skb_dst(skb)->dev->ifindex : 0,
4c9483b2 34 .flowi6_mark = skb->mark,
7d98386d 35 .flowi6_uid = sock_net_uid(net, sk),
4c9483b2
DM
36 .daddr = iph->daddr,
37 .saddr = iph->saddr,
020b4c12 38 };
a8951d58 39 int err;
020b4c12 40
7d98386d 41 dst = ip6_route_output(net, sk, &fl6);
a8951d58
SP
42 err = dst->error;
43 if (err) {
eef9d90d 44 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
ba7a46f1 45 net_dbg_ratelimited("ip6_route_me_harder: No more route\n");
020b4c12 46 dst_release(dst);
a8951d58 47 return err;
020b4c12
HW
48 }
49
50 /* Drop old route. */
adf30907 51 skb_dst_drop(skb);
020b4c12 52
adf30907 53 skb_dst_set(skb, dst);
90348e0e
UW
54
55#ifdef CONFIG_XFRM
56 if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
4c9483b2 57 xfrm_decode_session(skb, flowi6_to_flowi(&fl6), AF_INET6) == 0) {
90348e0e 58 skb_dst_set(skb, NULL);
7d98386d 59 dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0);
452edd59 60 if (IS_ERR(dst))
58e35d14 61 return PTR_ERR(dst);
90348e0e
UW
62 skb_dst_set(skb, dst);
63 }
64#endif
65
0ad352cb
PM
66 /* Change in oif may mean change in hh_len. */
67 hh_len = skb_dst(skb)->dev->hard_header_len;
68 if (skb_headroom(skb) < hh_len &&
69 pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)),
70 0, GFP_ATOMIC))
58e35d14 71 return -ENOMEM;
0ad352cb 72
020b4c12
HW
73 return 0;
74}
75EXPORT_SYMBOL(ip6_route_me_harder);
76
ce388f45 77static int nf_ip6_reroute(struct sk_buff *skb,
02f014d8 78 const struct nf_queue_entry *entry)
2cc7d573 79{
02f014d8 80 struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
2cc7d573 81
1d1de89b 82 if (entry->state.hook == NF_INET_LOCAL_OUT) {
b71d1d42 83 const struct ipv6hdr *iph = ipv6_hdr(skb);
2cc7d573 84 if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) ||
9f40ac71
EL
85 !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) ||
86 skb->mark != rt_info->mark)
ce388f45 87 return ip6_route_me_harder(entry->state.net, skb);
2cc7d573
HW
88 }
89 return 0;
90}
91
ac02bcf9
FW
92int __nf_ip6_route(struct net *net, struct dst_entry **dst,
93 struct flowi *fl, bool strict)
1841a4c7 94{
0fae2e77
FW
95 static const struct ipv6_pinfo fake_pinfo;
96 static const struct inet_sock fake_sk = {
97 /* makes ip6_route_output set RT6_LOOKUP_F_IFACE: */
98 .sk.sk_bound_dev_if = 1,
99 .pinet6 = (struct ipv6_pinfo *) &fake_pinfo,
100 };
101 const void *sk = strict ? &fake_sk : NULL;
2dad81ad
FW
102 struct dst_entry *result;
103 int err;
104
105 result = ip6_route_output(net, sk, &fl->u.ip6);
106 err = result->error;
107 if (err)
108 dst_release(result);
109 else
110 *dst = result;
111 return err;
1841a4c7 112}
ac02bcf9 113EXPORT_SYMBOL_GPL(__nf_ip6_route);
1841a4c7 114
764dd163
PNA
115int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
116 struct nf_ct_bridge_frag_data *data,
117 int (*output)(struct net *, struct sock *sk,
118 const struct nf_ct_bridge_frag_data *data,
119 struct sk_buff *))
120{
121 int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
122 struct ip6_frag_state state;
123 u8 *prevhdr, nexthdr = 0;
124 unsigned int mtu, hlen;
125 int hroom, err = 0;
126 __be32 frag_id;
127
128 err = ip6_find_1stfragopt(skb, &prevhdr);
129 if (err < 0)
130 goto blackhole;
131 hlen = err;
132 nexthdr = *prevhdr;
133
134 mtu = skb->dev->mtu;
135 if (frag_max_size > mtu ||
136 frag_max_size < IPV6_MIN_MTU)
137 goto blackhole;
138
139 mtu = frag_max_size;
140 if (mtu < hlen + sizeof(struct frag_hdr) + 8)
141 goto blackhole;
142 mtu -= hlen + sizeof(struct frag_hdr);
143
144 frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
145 &ipv6_hdr(skb)->saddr);
146
147 if (skb->ip_summed == CHECKSUM_PARTIAL &&
148 (err = skb_checksum_help(skb)))
149 goto blackhole;
150
151 hroom = LL_RESERVED_SPACE(skb->dev);
152 if (skb_has_frag_list(skb)) {
153 unsigned int first_len = skb_pagelen(skb);
154 struct ip6_fraglist_iter iter;
155 struct sk_buff *frag2;
156
157 if (first_len - hlen > mtu ||
158 skb_headroom(skb) < (hroom + sizeof(struct frag_hdr)))
159 goto blackhole;
160
161 if (skb_cloned(skb))
162 goto slow_path;
163
164 skb_walk_frags(skb, frag2) {
165 if (frag2->len > mtu ||
166 skb_headroom(frag2) < (hlen + hroom + sizeof(struct frag_hdr)))
167 goto blackhole;
168
169 /* Partially cloned skb? */
170 if (skb_shared(frag2))
171 goto slow_path;
172 }
173
174 err = ip6_fraglist_init(skb, hlen, prevhdr, nexthdr, frag_id,
175 &iter);
176 if (err < 0)
177 goto blackhole;
178
179 for (;;) {
180 /* Prepare header of the next frame,
181 * before previous one went down.
182 */
183 if (iter.frag)
184 ip6_fraglist_prepare(skb, &iter);
185
186 err = output(net, sk, data, skb);
187 if (err || !iter.frag)
188 break;
189
190 skb = ip6_fraglist_next(&iter);
191 }
192
193 kfree(iter.tmp_hdr);
194 if (!err)
195 return 0;
196
b7034146 197 kfree_skb_list(iter.frag);
764dd163
PNA
198 return err;
199 }
200slow_path:
201 /* This is a linearized skbuff, the original geometry is lost for us.
202 * This may also be a clone skbuff, we could preserve the geometry for
203 * the copies but probably not worth the effort.
204 */
205 ip6_frag_init(skb, hlen, mtu, skb->dev->needed_tailroom,
206 LL_RESERVED_SPACE(skb->dev), prevhdr, nexthdr, frag_id,
207 &state);
208
209 while (state.left > 0) {
210 struct sk_buff *skb2;
211
212 skb2 = ip6_frag_next(skb, &state);
213 if (IS_ERR(skb2)) {
214 err = PTR_ERR(skb2);
215 goto blackhole;
216 }
217
218 err = output(net, sk, data, skb2);
219 if (err)
220 goto blackhole;
221 }
222 consume_skb(skb);
223 return err;
224
225blackhole:
226 kfree_skb(skb);
227 return 0;
228}
229EXPORT_SYMBOL_GPL(br_ip6_fragment);
230
2a7851bf 231static const struct nf_ipv6_ops ipv6ops = {
96058728 232#if IS_MODULE(CONFIG_IPV6)
f7dcbe2f 233 .chk_addr = ipv6_chk_addr,
96058728
FW
234 .route_me_harder = ip6_route_me_harder,
235 .dev_get_saddr = ipv6_dev_get_saddr,
ac02bcf9 236 .route = __nf_ip6_route,
3006a522
FFM
237 .cookie_init_sequence = __cookie_v6_init_sequence,
238 .cookie_v6_check = __cookie_v6_check,
96058728 239#endif
ac02bcf9 240 .route_input = ip6_route_input,
f7dcbe2f 241 .fragment = ip6_fragment,
ce388f45 242 .reroute = nf_ip6_reroute,
16e6427c 243#if IS_MODULE(CONFIG_IPV6) && IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
764dd163 244 .br_defrag = nf_ct_frag6_gather,
16e6427c 245#endif
246#if IS_MODULE(CONFIG_IPV6)
764dd163
PNA
247 .br_fragment = br_ip6_fragment,
248#endif
2a7851bf
FW
249};
250
2cc7d573
HW
251int __init ipv6_netfilter_init(void)
252{
2a7851bf 253 RCU_INIT_POINTER(nf_ipv6_ops, &ipv6ops);
b3a61254 254 return 0;
2cc7d573
HW
255}
256
5bf887f2
DM
257/* This can be called from inet6_init() on errors, so it cannot
258 * be marked __exit. -DaveM
259 */
260void ipv6_netfilter_fini(void)
2cc7d573 261{
2a7851bf 262 RCU_INIT_POINTER(nf_ipv6_ops, NULL);
2cc7d573 263}