5 * Kazunori MIYAZAWA @USAGI
6 * YOSHIFUJI Hideaki @USAGI
7 * Split up af-specific portion
11 #include <linux/err.h>
12 #include <linux/kernel.h>
13 #include <linux/inetdevice.h>
14 #include <linux/if_tunnel.h>
19 static struct xfrm_policy_afinfo xfrm4_policy_afinfo;
21 static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
23 const xfrm_address_t *saddr,
24 const xfrm_address_t *daddr)
28 memset(fl4, 0, sizeof(*fl4));
29 fl4->daddr = daddr->a4;
30 fl4->flowi4_tos = tos;
32 fl4->saddr = saddr->a4;
34 rt = __ip_route_output_key(net, fl4);
41 static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos,
42 const xfrm_address_t *saddr,
43 const xfrm_address_t *daddr)
47 return __xfrm4_dst_lookup(net, &fl4, tos, saddr, daddr);
50 static int xfrm4_get_saddr(struct net *net,
51 xfrm_address_t *saddr, xfrm_address_t *daddr)
53 struct dst_entry *dst;
56 dst = __xfrm4_dst_lookup(net, &fl4, 0, NULL, daddr);
60 saddr->a4 = fl4.saddr;
65 static int xfrm4_get_tos(const struct flowi *fl)
67 return IPTOS_RT_MASK & fl->u.ip4.flowi4_tos; /* Strip ECN bits */
70 static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
76 static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
77 const struct flowi *fl)
79 struct rtable *rt = (struct rtable *)xdst->route;
80 const struct flowi4 *fl4 = &fl->u.ip4;
82 xdst->u.rt.rt_route_iif = fl4->flowi4_iif;
83 xdst->u.rt.rt_iif = fl4->flowi4_iif;
84 xdst->u.rt.rt_oif = fl4->flowi4_oif;
85 xdst->u.rt.rt_mark = fl4->flowi4_mark;
87 xdst->u.dst.dev = dev;
90 /* Sheit... I remember I did this right. Apparently,
91 * it was magically lost, so this code needs audit */
92 xdst->u.rt.rt_flags = rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST |
94 xdst->u.rt.rt_type = rt->rt_type;
95 xdst->u.rt.rt_src = rt->rt_src;
96 xdst->u.rt.rt_dst = rt->rt_dst;
97 xdst->u.rt.rt_gateway = rt->rt_gateway;
98 xdst->u.rt.rt_pmtu = rt->rt_pmtu;
104 _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
106 const struct iphdr *iph = ip_hdr(skb);
107 u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
108 struct flowi4 *fl4 = &fl->u.ip4;
110 memset(fl4, 0, sizeof(struct flowi4));
111 fl4->flowi4_mark = skb->mark;
113 if (!ip_is_fragment(iph)) {
114 switch (iph->protocol) {
116 case IPPROTO_UDPLITE:
120 if (xprth + 4 < skb->data ||
121 pskb_may_pull(skb, xprth + 4 - skb->data)) {
122 __be16 *ports = (__be16 *)xprth;
124 fl4->fl4_sport = ports[!!reverse];
125 fl4->fl4_dport = ports[!reverse];
130 if (pskb_may_pull(skb, xprth + 2 - skb->data)) {
133 fl4->fl4_icmp_type = icmp[0];
134 fl4->fl4_icmp_code = icmp[1];
139 if (pskb_may_pull(skb, xprth + 4 - skb->data)) {
140 __be32 *ehdr = (__be32 *)xprth;
142 fl4->fl4_ipsec_spi = ehdr[0];
147 if (pskb_may_pull(skb, xprth + 8 - skb->data)) {
148 __be32 *ah_hdr = (__be32 *)xprth;
150 fl4->fl4_ipsec_spi = ah_hdr[1];
155 if (pskb_may_pull(skb, xprth + 4 - skb->data)) {
156 __be16 *ipcomp_hdr = (__be16 *)xprth;
158 fl4->fl4_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
163 if (pskb_may_pull(skb, xprth + 12 - skb->data)) {
164 __be16 *greflags = (__be16 *)xprth;
165 __be32 *gre_hdr = (__be32 *)xprth;
167 if (greflags[0] & GRE_KEY) {
168 if (greflags[0] & GRE_CSUM)
170 fl4->fl4_gre_key = gre_hdr[1];
176 fl4->fl4_ipsec_spi = 0;
180 fl4->flowi4_proto = iph->protocol;
181 fl4->daddr = reverse ? iph->saddr : iph->daddr;
182 fl4->saddr = reverse ? iph->daddr : iph->saddr;
183 fl4->flowi4_tos = iph->tos;
186 static inline int xfrm4_garbage_collect(struct dst_ops *ops)
188 struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
190 xfrm4_policy_afinfo.garbage_collect(net);
191 return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
194 static void xfrm4_update_pmtu(struct dst_entry *dst, struct sock *sk,
195 struct sk_buff *skb, u32 mtu)
197 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
198 struct dst_entry *path = xdst->route;
200 path->ops->update_pmtu(path, sk, skb, mtu);
203 static void xfrm4_redirect(struct dst_entry *dst, struct sock *sk,
206 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
207 struct dst_entry *path = xdst->route;
209 path->ops->redirect(path, sk, skb);
212 static void xfrm4_dst_destroy(struct dst_entry *dst)
214 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
216 dst_destroy_metrics_generic(dst);
218 xfrm_dst_destroy(xdst);
221 static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
227 xfrm_dst_ifdown(dst, dev);
230 static struct dst_ops xfrm4_dst_ops = {
232 .protocol = cpu_to_be16(ETH_P_IP),
233 .gc = xfrm4_garbage_collect,
234 .update_pmtu = xfrm4_update_pmtu,
235 .redirect = xfrm4_redirect,
236 .cow_metrics = dst_cow_metrics_generic,
237 .destroy = xfrm4_dst_destroy,
238 .ifdown = xfrm4_dst_ifdown,
239 .local_out = __ip_local_out,
243 static struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
245 .dst_ops = &xfrm4_dst_ops,
246 .dst_lookup = xfrm4_dst_lookup,
247 .get_saddr = xfrm4_get_saddr,
248 .decode_session = _decode_session4,
249 .get_tos = xfrm4_get_tos,
250 .init_path = xfrm4_init_path,
251 .fill_dst = xfrm4_fill_dst,
252 .blackhole_route = ipv4_blackhole_route,
256 static struct ctl_table xfrm4_policy_table[] = {
258 .procname = "xfrm4_gc_thresh",
259 .data = &init_net.xfrm.xfrm4_dst_ops.gc_thresh,
260 .maxlen = sizeof(int),
262 .proc_handler = proc_dointvec,
267 static struct ctl_table_header *sysctl_hdr;
270 static void __init xfrm4_policy_init(void)
272 xfrm_policy_register_afinfo(&xfrm4_policy_afinfo);
275 static void __exit xfrm4_policy_fini(void)
279 unregister_net_sysctl_table(sysctl_hdr);
281 xfrm_policy_unregister_afinfo(&xfrm4_policy_afinfo);
284 void __init xfrm4_init(int rt_max_size)
287 * Select a default value for the gc_thresh based on the main route
288 * table hash size. It seems to me the worst case scenario is when
289 * we have ipsec operating in transport mode, in which we create a
290 * dst_entry per socket. The xfrm gc algorithm starts trying to remove
291 * entries at gc_thresh, and prevents new allocations as 2*gc_thresh
292 * so lets set an initial xfrm gc_thresh value at the rt_max_size/2.
293 * That will let us store an ipsec connection per route table entry,
294 * and start cleaning when were 1/2 full
296 xfrm4_dst_ops.gc_thresh = rt_max_size/2;
297 dst_entries_init(&xfrm4_dst_ops);
302 sysctl_hdr = register_net_sysctl(&init_net, "net/ipv4",