2 * Checksum updating actions
4 * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
13 #include <linux/types.h>
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
19 #include <linux/netlink.h>
20 #include <net/netlink.h>
21 #include <linux/rtnetlink.h>
23 #include <linux/skbuff.h>
28 #include <linux/icmpv6.h>
29 #include <linux/igmp.h>
32 #include <net/ip6_checksum.h>
34 #include <net/act_api.h>
36 #include <linux/tc_act/tc_csum.h>
37 #include <net/tc_act/tc_csum.h>
39 #define CSUM_TAB_MASK 15
41 static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = {
42 [TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), },
45 static int csum_net_id;
47 static int tcf_csum_init(struct net *net, struct nlattr *nla,
48 struct nlattr *est, struct tc_action *a, int ovr,
51 struct tc_action_net *tn = net_generic(net, csum_net_id);
52 struct nlattr *tb[TCA_CSUM_MAX + 1];
60 err = nla_parse_nested(tb, TCA_CSUM_MAX, nla, csum_policy);
64 if (tb[TCA_CSUM_PARMS] == NULL)
66 parm = nla_data(tb[TCA_CSUM_PARMS]);
68 if (!tcf_hash_check(tn, parm->index, a, bind)) {
69 ret = tcf_hash_create(tn, parm->index, est, a,
70 sizeof(*p), bind, false);
75 if (bind)/* dont override defaults */
77 tcf_hash_release(a, bind);
83 spin_lock_bh(&p->tcf_lock);
84 p->tcf_action = parm->action;
85 p->update_flags = parm->update_flags;
86 spin_unlock_bh(&p->tcf_lock);
88 if (ret == ACT_P_CREATED)
89 tcf_hash_insert(tn, a);
95 * tcf_csum_skb_nextlayer - Get next layer pointer
96 * @skb: sk_buff to use
97 * @ihl: previous summed headers length
98 * @ipl: complete packet length
99 * @jhl: next header length
101 * Check the expected next layer availability in the specified sk_buff.
102 * Return the next layer pointer if pass, NULL otherwise.
104 static void *tcf_csum_skb_nextlayer(struct sk_buff *skb,
105 unsigned int ihl, unsigned int ipl,
108 int ntkoff = skb_network_offset(skb);
111 if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) ||
112 skb_try_make_writable(skb, hl + ntkoff))
115 return (void *)(skb_network_header(skb) + ihl);
118 static int tcf_csum_ipv4_icmp(struct sk_buff *skb,
119 unsigned int ihl, unsigned int ipl)
121 struct icmphdr *icmph;
123 icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph));
128 skb->csum = csum_partial(icmph, ipl - ihl, 0);
129 icmph->checksum = csum_fold(skb->csum);
131 skb->ip_summed = CHECKSUM_NONE;
136 static int tcf_csum_ipv4_igmp(struct sk_buff *skb,
137 unsigned int ihl, unsigned int ipl)
139 struct igmphdr *igmph;
141 igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph));
146 skb->csum = csum_partial(igmph, ipl - ihl, 0);
147 igmph->csum = csum_fold(skb->csum);
149 skb->ip_summed = CHECKSUM_NONE;
154 static int tcf_csum_ipv6_icmp(struct sk_buff *skb,
155 unsigned int ihl, unsigned int ipl)
157 struct icmp6hdr *icmp6h;
158 const struct ipv6hdr *ip6h;
160 icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmp6h));
164 ip6h = ipv6_hdr(skb);
165 icmp6h->icmp6_cksum = 0;
166 skb->csum = csum_partial(icmp6h, ipl - ihl, 0);
167 icmp6h->icmp6_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
168 ipl - ihl, IPPROTO_ICMPV6,
171 skb->ip_summed = CHECKSUM_NONE;
176 static int tcf_csum_ipv4_tcp(struct sk_buff *skb,
177 unsigned int ihl, unsigned int ipl)
180 const struct iphdr *iph;
182 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
188 skb->csum = csum_partial(tcph, ipl - ihl, 0);
189 tcph->check = tcp_v4_check(ipl - ihl,
190 iph->saddr, iph->daddr, skb->csum);
192 skb->ip_summed = CHECKSUM_NONE;
197 static int tcf_csum_ipv6_tcp(struct sk_buff *skb,
198 unsigned int ihl, unsigned int ipl)
201 const struct ipv6hdr *ip6h;
203 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
207 ip6h = ipv6_hdr(skb);
209 skb->csum = csum_partial(tcph, ipl - ihl, 0);
210 tcph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
211 ipl - ihl, IPPROTO_TCP,
214 skb->ip_summed = CHECKSUM_NONE;
219 static int tcf_csum_ipv4_udp(struct sk_buff *skb,
220 unsigned int ihl, unsigned int ipl, int udplite)
223 const struct iphdr *iph;
227 * Support both UDP and UDPLITE checksum algorithms, Don't use
228 * udph->len to get the real length without any protocol check,
229 * UDPLITE uses udph->len for another thing,
230 * Use iph->tot_len, or just ipl.
233 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
238 ul = ntohs(udph->len);
240 if (udplite || udph->check) {
246 skb->csum = csum_partial(udph, ipl - ihl, 0);
247 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
248 skb->csum = csum_partial(udph, ul, 0);
250 goto ignore_obscure_skb;
253 goto ignore_obscure_skb;
255 skb->csum = csum_partial(udph, ul, 0);
258 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
263 udph->check = CSUM_MANGLED_0;
266 skb->ip_summed = CHECKSUM_NONE;
272 static int tcf_csum_ipv6_udp(struct sk_buff *skb,
273 unsigned int ihl, unsigned int ipl, int udplite)
276 const struct ipv6hdr *ip6h;
280 * Support both UDP and UDPLITE checksum algorithms, Don't use
281 * udph->len to get the real length without any protocol check,
282 * UDPLITE uses udph->len for another thing,
283 * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl.
286 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
290 ip6h = ipv6_hdr(skb);
291 ul = ntohs(udph->len);
297 skb->csum = csum_partial(udph, ipl - ihl, 0);
299 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
300 skb->csum = csum_partial(udph, ul, 0);
303 goto ignore_obscure_skb;
306 goto ignore_obscure_skb;
308 skb->csum = csum_partial(udph, ul, 0);
311 udph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ul,
312 udplite ? IPPROTO_UDPLITE : IPPROTO_UDP,
316 udph->check = CSUM_MANGLED_0;
318 skb->ip_summed = CHECKSUM_NONE;
324 static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags)
326 const struct iphdr *iph;
329 ntkoff = skb_network_offset(skb);
331 if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff))
336 switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) {
338 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
339 if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4,
340 ntohs(iph->tot_len)))
344 if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP)
345 if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4,
346 ntohs(iph->tot_len)))
350 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
351 if (!tcf_csum_ipv4_tcp(skb, iph->ihl * 4,
352 ntohs(iph->tot_len)))
356 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
357 if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
358 ntohs(iph->tot_len), 0))
361 case IPPROTO_UDPLITE:
362 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
363 if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
364 ntohs(iph->tot_len), 1))
369 if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) {
370 if (skb_try_make_writable(skb, sizeof(*iph) + ntkoff))
373 ip_send_check(ip_hdr(skb));
382 static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh,
383 unsigned int ixhl, unsigned int *pl)
385 int off, len, optlen;
386 unsigned char *xh = (void *)ip6xh;
388 off = sizeof(*ip6xh);
397 optlen = xh[off + 1] + 2;
398 if (optlen != 6 || len < 6 || (off & 3) != 2)
399 /* wrong jumbo option length/alignment */
401 *pl = ntohl(*(__be32 *)(xh + off + 2));
404 optlen = xh[off + 1] + 2;
406 /* ignore obscure options */
418 static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags)
420 struct ipv6hdr *ip6h;
421 struct ipv6_opt_hdr *ip6xh;
422 unsigned int hl, ixhl;
427 ntkoff = skb_network_offset(skb);
431 if (!pskb_may_pull(skb, hl + ntkoff))
434 ip6h = ipv6_hdr(skb);
436 pl = ntohs(ip6h->payload_len);
437 nexthdr = ip6h->nexthdr;
441 case NEXTHDR_FRAGMENT:
443 case NEXTHDR_ROUTING:
446 if (!pskb_may_pull(skb, hl + sizeof(*ip6xh) + ntkoff))
448 ip6xh = (void *)(skb_network_header(skb) + hl);
449 ixhl = ipv6_optlen(ip6xh);
450 if (!pskb_may_pull(skb, hl + ixhl + ntkoff))
452 ip6xh = (void *)(skb_network_header(skb) + hl);
453 if ((nexthdr == NEXTHDR_HOP) &&
454 !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, &pl)))
456 nexthdr = ip6xh->nexthdr;
460 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
461 if (!tcf_csum_ipv6_icmp(skb,
462 hl, pl + sizeof(*ip6h)))
466 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
467 if (!tcf_csum_ipv6_tcp(skb,
468 hl, pl + sizeof(*ip6h)))
472 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
473 if (!tcf_csum_ipv6_udp(skb, hl,
474 pl + sizeof(*ip6h), 0))
477 case IPPROTO_UDPLITE:
478 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
479 if (!tcf_csum_ipv6_udp(skb, hl,
480 pl + sizeof(*ip6h), 1))
486 } while (pskb_may_pull(skb, hl + 1 + ntkoff));
496 static int tcf_csum(struct sk_buff *skb,
497 const struct tc_action *a, struct tcf_result *res)
499 struct tcf_csum *p = a->priv;
503 spin_lock(&p->tcf_lock);
504 p->tcf_tm.lastuse = jiffies;
505 bstats_update(&p->tcf_bstats, skb);
506 action = p->tcf_action;
507 update_flags = p->update_flags;
508 spin_unlock(&p->tcf_lock);
510 if (unlikely(action == TC_ACT_SHOT))
513 switch (tc_skb_protocol(skb)) {
514 case cpu_to_be16(ETH_P_IP):
515 if (!tcf_csum_ipv4(skb, update_flags))
518 case cpu_to_be16(ETH_P_IPV6):
519 if (!tcf_csum_ipv6(skb, update_flags))
527 spin_lock(&p->tcf_lock);
528 p->tcf_qstats.drops++;
529 spin_unlock(&p->tcf_lock);
533 static int tcf_csum_dump(struct sk_buff *skb,
534 struct tc_action *a, int bind, int ref)
536 unsigned char *b = skb_tail_pointer(skb);
537 struct tcf_csum *p = a->priv;
538 struct tc_csum opt = {
539 .update_flags = p->update_flags,
540 .index = p->tcf_index,
541 .action = p->tcf_action,
542 .refcnt = p->tcf_refcnt - ref,
543 .bindcnt = p->tcf_bindcnt - bind,
547 if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
548 goto nla_put_failure;
549 t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
550 t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
551 t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
552 if (nla_put(skb, TCA_CSUM_TM, sizeof(t), &t))
553 goto nla_put_failure;
562 static int tcf_csum_walker(struct net *net, struct sk_buff *skb,
563 struct netlink_callback *cb, int type,
566 struct tc_action_net *tn = net_generic(net, csum_net_id);
568 return tcf_generic_walker(tn, skb, cb, type, a);
571 static int tcf_csum_search(struct net *net, struct tc_action *a, u32 index)
573 struct tc_action_net *tn = net_generic(net, csum_net_id);
575 return tcf_hash_search(tn, a, index);
578 static struct tc_action_ops act_csum_ops = {
580 .type = TCA_ACT_CSUM,
581 .owner = THIS_MODULE,
583 .dump = tcf_csum_dump,
584 .init = tcf_csum_init,
585 .walk = tcf_csum_walker,
586 .lookup = tcf_csum_search,
589 static __net_init int csum_init_net(struct net *net)
591 struct tc_action_net *tn = net_generic(net, csum_net_id);
593 return tc_action_net_init(tn, &act_csum_ops, CSUM_TAB_MASK);
596 static void __net_exit csum_exit_net(struct net *net)
598 struct tc_action_net *tn = net_generic(net, csum_net_id);
600 tc_action_net_exit(tn);
603 static struct pernet_operations csum_net_ops = {
604 .init = csum_init_net,
605 .exit = csum_exit_net,
607 .size = sizeof(struct tc_action_net),
610 MODULE_DESCRIPTION("Checksum updating actions");
611 MODULE_LICENSE("GPL");
613 static int __init csum_init_module(void)
615 return tcf_register_action(&act_csum_ops, &csum_net_ops);
618 static void __exit csum_cleanup_module(void)
620 tcf_unregister_action(&act_csum_ops, &csum_net_ops);
623 module_init(csum_init_module);
624 module_exit(csum_cleanup_module);