2 * Checksum updating actions
4 * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
13 #include <linux/types.h>
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
19 #include <linux/netlink.h>
20 #include <net/netlink.h>
21 #include <linux/rtnetlink.h>
23 #include <linux/skbuff.h>
28 #include <linux/icmpv6.h>
29 #include <linux/igmp.h>
32 #include <net/ip6_checksum.h>
33 #include <net/sctp/checksum.h>
35 #include <net/act_api.h>
37 #include <linux/tc_act/tc_csum.h>
38 #include <net/tc_act/tc_csum.h>
40 static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = {
41 [TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), },
44 static unsigned int csum_net_id;
45 static struct tc_action_ops act_csum_ops;
47 static int tcf_csum_init(struct net *net, struct nlattr *nla,
48 struct nlattr *est, struct tc_action **a, int ovr,
51 struct tc_action_net *tn = net_generic(net, csum_net_id);
52 struct tcf_csum_params *params_old, *params_new;
53 struct nlattr *tb[TCA_CSUM_MAX + 1];
61 err = nla_parse_nested(tb, TCA_CSUM_MAX, nla, csum_policy, NULL);
65 if (tb[TCA_CSUM_PARMS] == NULL)
67 parm = nla_data(tb[TCA_CSUM_PARMS]);
69 if (!tcf_idr_check(tn, parm->index, a, bind)) {
70 ret = tcf_idr_create(tn, parm->index, est, a,
71 &act_csum_ops, bind, true);
76 if (bind)/* dont override defaults */
78 tcf_idr_release(*a, bind);
86 params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
87 if (unlikely(!params_new)) {
88 if (ret == ACT_P_CREATED)
89 tcf_idr_release(*a, bind);
92 params_old = rtnl_dereference(p->params);
94 params_new->action = parm->action;
95 params_new->update_flags = parm->update_flags;
96 rcu_assign_pointer(p->params, params_new);
98 kfree_rcu(params_old, rcu);
100 if (ret == ACT_P_CREATED)
101 tcf_idr_insert(tn, *a);
107 * tcf_csum_skb_nextlayer - Get next layer pointer
108 * @skb: sk_buff to use
109 * @ihl: previous summed headers length
110 * @ipl: complete packet length
111 * @jhl: next header length
113 * Check the expected next layer availability in the specified sk_buff.
114 * Return the next layer pointer if pass, NULL otherwise.
116 static void *tcf_csum_skb_nextlayer(struct sk_buff *skb,
117 unsigned int ihl, unsigned int ipl,
120 int ntkoff = skb_network_offset(skb);
123 if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) ||
124 skb_try_make_writable(skb, hl + ntkoff))
127 return (void *)(skb_network_header(skb) + ihl);
130 static int tcf_csum_ipv4_icmp(struct sk_buff *skb, unsigned int ihl,
133 struct icmphdr *icmph;
135 icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph));
140 skb->csum = csum_partial(icmph, ipl - ihl, 0);
141 icmph->checksum = csum_fold(skb->csum);
143 skb->ip_summed = CHECKSUM_NONE;
148 static int tcf_csum_ipv4_igmp(struct sk_buff *skb,
149 unsigned int ihl, unsigned int ipl)
151 struct igmphdr *igmph;
153 igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph));
158 skb->csum = csum_partial(igmph, ipl - ihl, 0);
159 igmph->csum = csum_fold(skb->csum);
161 skb->ip_summed = CHECKSUM_NONE;
166 static int tcf_csum_ipv6_icmp(struct sk_buff *skb, unsigned int ihl,
169 struct icmp6hdr *icmp6h;
170 const struct ipv6hdr *ip6h;
172 icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmp6h));
176 ip6h = ipv6_hdr(skb);
177 icmp6h->icmp6_cksum = 0;
178 skb->csum = csum_partial(icmp6h, ipl - ihl, 0);
179 icmp6h->icmp6_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
180 ipl - ihl, IPPROTO_ICMPV6,
183 skb->ip_summed = CHECKSUM_NONE;
188 static int tcf_csum_ipv4_tcp(struct sk_buff *skb, unsigned int ihl,
192 const struct iphdr *iph;
194 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
197 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
203 skb->csum = csum_partial(tcph, ipl - ihl, 0);
204 tcph->check = tcp_v4_check(ipl - ihl,
205 iph->saddr, iph->daddr, skb->csum);
207 skb->ip_summed = CHECKSUM_NONE;
212 static int tcf_csum_ipv6_tcp(struct sk_buff *skb, unsigned int ihl,
216 const struct ipv6hdr *ip6h;
218 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
221 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
225 ip6h = ipv6_hdr(skb);
227 skb->csum = csum_partial(tcph, ipl - ihl, 0);
228 tcph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
229 ipl - ihl, IPPROTO_TCP,
232 skb->ip_summed = CHECKSUM_NONE;
237 static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl,
238 unsigned int ipl, int udplite)
241 const struct iphdr *iph;
244 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
248 * Support both UDP and UDPLITE checksum algorithms, Don't use
249 * udph->len to get the real length without any protocol check,
250 * UDPLITE uses udph->len for another thing,
251 * Use iph->tot_len, or just ipl.
254 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
259 ul = ntohs(udph->len);
261 if (udplite || udph->check) {
267 skb->csum = csum_partial(udph, ipl - ihl, 0);
268 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
269 skb->csum = csum_partial(udph, ul, 0);
271 goto ignore_obscure_skb;
274 goto ignore_obscure_skb;
276 skb->csum = csum_partial(udph, ul, 0);
279 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
284 udph->check = CSUM_MANGLED_0;
287 skb->ip_summed = CHECKSUM_NONE;
293 static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl,
294 unsigned int ipl, int udplite)
297 const struct ipv6hdr *ip6h;
300 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
304 * Support both UDP and UDPLITE checksum algorithms, Don't use
305 * udph->len to get the real length without any protocol check,
306 * UDPLITE uses udph->len for another thing,
307 * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl.
310 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
314 ip6h = ipv6_hdr(skb);
315 ul = ntohs(udph->len);
321 skb->csum = csum_partial(udph, ipl - ihl, 0);
323 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
324 skb->csum = csum_partial(udph, ul, 0);
327 goto ignore_obscure_skb;
330 goto ignore_obscure_skb;
332 skb->csum = csum_partial(udph, ul, 0);
335 udph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ul,
336 udplite ? IPPROTO_UDPLITE : IPPROTO_UDP,
340 udph->check = CSUM_MANGLED_0;
342 skb->ip_summed = CHECKSUM_NONE;
348 static int tcf_csum_sctp(struct sk_buff *skb, unsigned int ihl,
351 struct sctphdr *sctph;
353 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_SCTP)
356 sctph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*sctph));
360 sctph->checksum = sctp_compute_cksum(skb,
361 skb_network_offset(skb) + ihl);
362 skb->ip_summed = CHECKSUM_NONE;
363 skb->csum_not_inet = 0;
368 static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags)
370 const struct iphdr *iph;
373 ntkoff = skb_network_offset(skb);
375 if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff))
380 switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) {
382 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
383 if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4,
384 ntohs(iph->tot_len)))
388 if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP)
389 if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4,
390 ntohs(iph->tot_len)))
394 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
395 if (!tcf_csum_ipv4_tcp(skb, iph->ihl * 4,
396 ntohs(iph->tot_len)))
400 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
401 if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
402 ntohs(iph->tot_len), 0))
405 case IPPROTO_UDPLITE:
406 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
407 if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
408 ntohs(iph->tot_len), 1))
412 if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) &&
413 !tcf_csum_sctp(skb, iph->ihl * 4, ntohs(iph->tot_len)))
418 if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) {
419 if (skb_try_make_writable(skb, sizeof(*iph) + ntkoff))
422 ip_send_check(ip_hdr(skb));
431 static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh, unsigned int ixhl,
434 int off, len, optlen;
435 unsigned char *xh = (void *)ip6xh;
437 off = sizeof(*ip6xh);
446 optlen = xh[off + 1] + 2;
447 if (optlen != 6 || len < 6 || (off & 3) != 2)
448 /* wrong jumbo option length/alignment */
450 *pl = ntohl(*(__be32 *)(xh + off + 2));
453 optlen = xh[off + 1] + 2;
455 /* ignore obscure options */
467 static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags)
469 struct ipv6hdr *ip6h;
470 struct ipv6_opt_hdr *ip6xh;
471 unsigned int hl, ixhl;
476 ntkoff = skb_network_offset(skb);
480 if (!pskb_may_pull(skb, hl + ntkoff))
483 ip6h = ipv6_hdr(skb);
485 pl = ntohs(ip6h->payload_len);
486 nexthdr = ip6h->nexthdr;
490 case NEXTHDR_FRAGMENT:
492 case NEXTHDR_ROUTING:
495 if (!pskb_may_pull(skb, hl + sizeof(*ip6xh) + ntkoff))
497 ip6xh = (void *)(skb_network_header(skb) + hl);
498 ixhl = ipv6_optlen(ip6xh);
499 if (!pskb_may_pull(skb, hl + ixhl + ntkoff))
501 ip6xh = (void *)(skb_network_header(skb) + hl);
502 if ((nexthdr == NEXTHDR_HOP) &&
503 !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, &pl)))
505 nexthdr = ip6xh->nexthdr;
509 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
510 if (!tcf_csum_ipv6_icmp(skb,
511 hl, pl + sizeof(*ip6h)))
515 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
516 if (!tcf_csum_ipv6_tcp(skb,
517 hl, pl + sizeof(*ip6h)))
521 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
522 if (!tcf_csum_ipv6_udp(skb, hl,
523 pl + sizeof(*ip6h), 0))
526 case IPPROTO_UDPLITE:
527 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
528 if (!tcf_csum_ipv6_udp(skb, hl,
529 pl + sizeof(*ip6h), 1))
533 if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) &&
534 !tcf_csum_sctp(skb, hl, pl + sizeof(*ip6h)))
540 } while (pskb_may_pull(skb, hl + 1 + ntkoff));
550 static int tcf_csum(struct sk_buff *skb, const struct tc_action *a,
551 struct tcf_result *res)
553 struct tcf_csum *p = to_tcf_csum(a);
554 struct tcf_csum_params *params;
559 params = rcu_dereference(p->params);
561 tcf_lastuse_update(&p->tcf_tm);
562 bstats_cpu_update(this_cpu_ptr(p->common.cpu_bstats), skb);
564 action = params->action;
565 if (unlikely(action == TC_ACT_SHOT))
568 update_flags = params->update_flags;
569 switch (tc_skb_protocol(skb)) {
570 case cpu_to_be16(ETH_P_IP):
571 if (!tcf_csum_ipv4(skb, update_flags))
574 case cpu_to_be16(ETH_P_IPV6):
575 if (!tcf_csum_ipv6(skb, update_flags))
585 action = TC_ACT_SHOT;
588 qstats_drop_inc(this_cpu_ptr(p->common.cpu_qstats));
592 static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
595 unsigned char *b = skb_tail_pointer(skb);
596 struct tcf_csum *p = to_tcf_csum(a);
597 struct tcf_csum_params *params;
598 struct tc_csum opt = {
599 .index = p->tcf_index,
600 .refcnt = p->tcf_refcnt - ref,
601 .bindcnt = p->tcf_bindcnt - bind,
605 params = rtnl_dereference(p->params);
606 opt.action = params->action;
607 opt.update_flags = params->update_flags;
609 if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
610 goto nla_put_failure;
612 tcf_tm_dump(&t, &p->tcf_tm);
613 if (nla_put_64bit(skb, TCA_CSUM_TM, sizeof(t), &t, TCA_CSUM_PAD))
614 goto nla_put_failure;
623 static void tcf_csum_cleanup(struct tc_action *a)
625 struct tcf_csum *p = to_tcf_csum(a);
626 struct tcf_csum_params *params;
628 params = rcu_dereference_protected(p->params, 1);
629 kfree_rcu(params, rcu);
632 static int tcf_csum_walker(struct net *net, struct sk_buff *skb,
633 struct netlink_callback *cb, int type,
634 const struct tc_action_ops *ops)
636 struct tc_action_net *tn = net_generic(net, csum_net_id);
638 return tcf_generic_walker(tn, skb, cb, type, ops);
641 static int tcf_csum_search(struct net *net, struct tc_action **a, u32 index)
643 struct tc_action_net *tn = net_generic(net, csum_net_id);
645 return tcf_idr_search(tn, a, index);
648 static struct tc_action_ops act_csum_ops = {
650 .type = TCA_ACT_CSUM,
651 .owner = THIS_MODULE,
653 .dump = tcf_csum_dump,
654 .init = tcf_csum_init,
655 .cleanup = tcf_csum_cleanup,
656 .walk = tcf_csum_walker,
657 .lookup = tcf_csum_search,
658 .size = sizeof(struct tcf_csum),
661 static __net_init int csum_init_net(struct net *net)
663 struct tc_action_net *tn = net_generic(net, csum_net_id);
665 return tc_action_net_init(tn, &act_csum_ops);
668 static void __net_exit csum_exit_net(struct list_head *net_list)
670 tc_action_net_exit(net_list, csum_net_id);
673 static struct pernet_operations csum_net_ops = {
674 .init = csum_init_net,
675 .exit_batch = csum_exit_net,
677 .size = sizeof(struct tc_action_net),
680 MODULE_DESCRIPTION("Checksum updating actions");
681 MODULE_LICENSE("GPL");
683 static int __init csum_init_module(void)
685 return tcf_register_action(&act_csum_ops, &csum_net_ops);
688 static void __exit csum_cleanup_module(void)
690 tcf_unregister_action(&act_csum_ops, &csum_net_ops);
693 module_init(csum_init_module);
694 module_exit(csum_cleanup_module);