2 * Checksum updating actions
4 * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
13 #include <linux/types.h>
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/spinlock.h>
19 #include <linux/netlink.h>
20 #include <net/netlink.h>
21 #include <linux/rtnetlink.h>
23 #include <linux/skbuff.h>
28 #include <linux/icmpv6.h>
29 #include <linux/igmp.h>
32 #include <net/ip6_checksum.h>
33 #include <net/sctp/checksum.h>
35 #include <net/act_api.h>
37 #include <linux/tc_act/tc_csum.h>
38 #include <net/tc_act/tc_csum.h>
40 static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = {
41 [TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), },
44 static unsigned int csum_net_id;
45 static struct tc_action_ops act_csum_ops;
47 static int tcf_csum_init(struct net *net, struct nlattr *nla,
48 struct nlattr *est, struct tc_action **a, int ovr,
49 int bind, bool rtnl_held,
50 struct netlink_ext_ack *extack)
52 struct tc_action_net *tn = net_generic(net, csum_net_id);
53 struct tcf_csum_params *params_new;
54 struct nlattr *tb[TCA_CSUM_MAX + 1];
62 err = nla_parse_nested(tb, TCA_CSUM_MAX, nla, csum_policy, NULL);
66 if (tb[TCA_CSUM_PARMS] == NULL)
68 parm = nla_data(tb[TCA_CSUM_PARMS]);
70 err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
72 ret = tcf_idr_create(tn, parm->index, est, a,
73 &act_csum_ops, bind, true);
75 tcf_idr_cleanup(tn, parm->index);
80 if (bind)/* dont override defaults */
83 tcf_idr_release(*a, bind);
92 params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
93 if (unlikely(!params_new)) {
94 tcf_idr_release(*a, bind);
97 params_new->update_flags = parm->update_flags;
99 spin_lock_bh(&p->tcf_lock);
100 p->tcf_action = parm->action;
101 rcu_swap_protected(p->params, params_new,
102 lockdep_is_held(&p->tcf_lock));
103 spin_unlock_bh(&p->tcf_lock);
106 kfree_rcu(params_new, rcu);
108 if (ret == ACT_P_CREATED)
109 tcf_idr_insert(tn, *a);
115 * tcf_csum_skb_nextlayer - Get next layer pointer
116 * @skb: sk_buff to use
117 * @ihl: previous summed headers length
118 * @ipl: complete packet length
119 * @jhl: next header length
121 * Check the expected next layer availability in the specified sk_buff.
122 * Return the next layer pointer if pass, NULL otherwise.
124 static void *tcf_csum_skb_nextlayer(struct sk_buff *skb,
125 unsigned int ihl, unsigned int ipl,
128 int ntkoff = skb_network_offset(skb);
131 if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) ||
132 skb_try_make_writable(skb, hl + ntkoff))
135 return (void *)(skb_network_header(skb) + ihl);
138 static int tcf_csum_ipv4_icmp(struct sk_buff *skb, unsigned int ihl,
141 struct icmphdr *icmph;
143 icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph));
148 skb->csum = csum_partial(icmph, ipl - ihl, 0);
149 icmph->checksum = csum_fold(skb->csum);
151 skb->ip_summed = CHECKSUM_NONE;
156 static int tcf_csum_ipv4_igmp(struct sk_buff *skb,
157 unsigned int ihl, unsigned int ipl)
159 struct igmphdr *igmph;
161 igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph));
166 skb->csum = csum_partial(igmph, ipl - ihl, 0);
167 igmph->csum = csum_fold(skb->csum);
169 skb->ip_summed = CHECKSUM_NONE;
174 static int tcf_csum_ipv6_icmp(struct sk_buff *skb, unsigned int ihl,
177 struct icmp6hdr *icmp6h;
178 const struct ipv6hdr *ip6h;
180 icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmp6h));
184 ip6h = ipv6_hdr(skb);
185 icmp6h->icmp6_cksum = 0;
186 skb->csum = csum_partial(icmp6h, ipl - ihl, 0);
187 icmp6h->icmp6_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
188 ipl - ihl, IPPROTO_ICMPV6,
191 skb->ip_summed = CHECKSUM_NONE;
196 static int tcf_csum_ipv4_tcp(struct sk_buff *skb, unsigned int ihl,
200 const struct iphdr *iph;
202 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
205 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
211 skb->csum = csum_partial(tcph, ipl - ihl, 0);
212 tcph->check = tcp_v4_check(ipl - ihl,
213 iph->saddr, iph->daddr, skb->csum);
215 skb->ip_summed = CHECKSUM_NONE;
220 static int tcf_csum_ipv6_tcp(struct sk_buff *skb, unsigned int ihl,
224 const struct ipv6hdr *ip6h;
226 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
229 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
233 ip6h = ipv6_hdr(skb);
235 skb->csum = csum_partial(tcph, ipl - ihl, 0);
236 tcph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
237 ipl - ihl, IPPROTO_TCP,
240 skb->ip_summed = CHECKSUM_NONE;
245 static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl,
246 unsigned int ipl, int udplite)
249 const struct iphdr *iph;
252 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
256 * Support both UDP and UDPLITE checksum algorithms, Don't use
257 * udph->len to get the real length without any protocol check,
258 * UDPLITE uses udph->len for another thing,
259 * Use iph->tot_len, or just ipl.
262 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
267 ul = ntohs(udph->len);
269 if (udplite || udph->check) {
275 skb->csum = csum_partial(udph, ipl - ihl, 0);
276 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
277 skb->csum = csum_partial(udph, ul, 0);
279 goto ignore_obscure_skb;
282 goto ignore_obscure_skb;
284 skb->csum = csum_partial(udph, ul, 0);
287 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
292 udph->check = CSUM_MANGLED_0;
295 skb->ip_summed = CHECKSUM_NONE;
301 static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl,
302 unsigned int ipl, int udplite)
305 const struct ipv6hdr *ip6h;
308 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
312 * Support both UDP and UDPLITE checksum algorithms, Don't use
313 * udph->len to get the real length without any protocol check,
314 * UDPLITE uses udph->len for another thing,
315 * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl.
318 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
322 ip6h = ipv6_hdr(skb);
323 ul = ntohs(udph->len);
329 skb->csum = csum_partial(udph, ipl - ihl, 0);
331 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
332 skb->csum = csum_partial(udph, ul, 0);
335 goto ignore_obscure_skb;
338 goto ignore_obscure_skb;
340 skb->csum = csum_partial(udph, ul, 0);
343 udph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ul,
344 udplite ? IPPROTO_UDPLITE : IPPROTO_UDP,
348 udph->check = CSUM_MANGLED_0;
350 skb->ip_summed = CHECKSUM_NONE;
356 static int tcf_csum_sctp(struct sk_buff *skb, unsigned int ihl,
359 struct sctphdr *sctph;
361 if (skb_is_gso(skb) && skb_is_gso_sctp(skb))
364 sctph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*sctph));
368 sctph->checksum = sctp_compute_cksum(skb,
369 skb_network_offset(skb) + ihl);
370 skb->ip_summed = CHECKSUM_NONE;
371 skb->csum_not_inet = 0;
376 static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags)
378 const struct iphdr *iph;
381 ntkoff = skb_network_offset(skb);
383 if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff))
388 switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) {
390 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
391 if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4,
392 ntohs(iph->tot_len)))
396 if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP)
397 if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4,
398 ntohs(iph->tot_len)))
402 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
403 if (!tcf_csum_ipv4_tcp(skb, iph->ihl * 4,
404 ntohs(iph->tot_len)))
408 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
409 if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
410 ntohs(iph->tot_len), 0))
413 case IPPROTO_UDPLITE:
414 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
415 if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
416 ntohs(iph->tot_len), 1))
420 if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) &&
421 !tcf_csum_sctp(skb, iph->ihl * 4, ntohs(iph->tot_len)))
426 if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) {
427 if (skb_try_make_writable(skb, sizeof(*iph) + ntkoff))
430 ip_send_check(ip_hdr(skb));
439 static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh, unsigned int ixhl,
442 int off, len, optlen;
443 unsigned char *xh = (void *)ip6xh;
445 off = sizeof(*ip6xh);
454 optlen = xh[off + 1] + 2;
455 if (optlen != 6 || len < 6 || (off & 3) != 2)
456 /* wrong jumbo option length/alignment */
458 *pl = ntohl(*(__be32 *)(xh + off + 2));
461 optlen = xh[off + 1] + 2;
463 /* ignore obscure options */
475 static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags)
477 struct ipv6hdr *ip6h;
478 struct ipv6_opt_hdr *ip6xh;
479 unsigned int hl, ixhl;
484 ntkoff = skb_network_offset(skb);
488 if (!pskb_may_pull(skb, hl + ntkoff))
491 ip6h = ipv6_hdr(skb);
493 pl = ntohs(ip6h->payload_len);
494 nexthdr = ip6h->nexthdr;
498 case NEXTHDR_FRAGMENT:
500 case NEXTHDR_ROUTING:
503 if (!pskb_may_pull(skb, hl + sizeof(*ip6xh) + ntkoff))
505 ip6xh = (void *)(skb_network_header(skb) + hl);
506 ixhl = ipv6_optlen(ip6xh);
507 if (!pskb_may_pull(skb, hl + ixhl + ntkoff))
509 ip6xh = (void *)(skb_network_header(skb) + hl);
510 if ((nexthdr == NEXTHDR_HOP) &&
511 !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, &pl)))
513 nexthdr = ip6xh->nexthdr;
517 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
518 if (!tcf_csum_ipv6_icmp(skb,
519 hl, pl + sizeof(*ip6h)))
523 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
524 if (!tcf_csum_ipv6_tcp(skb,
525 hl, pl + sizeof(*ip6h)))
529 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
530 if (!tcf_csum_ipv6_udp(skb, hl,
531 pl + sizeof(*ip6h), 0))
534 case IPPROTO_UDPLITE:
535 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
536 if (!tcf_csum_ipv6_udp(skb, hl,
537 pl + sizeof(*ip6h), 1))
541 if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) &&
542 !tcf_csum_sctp(skb, hl, pl + sizeof(*ip6h)))
548 } while (pskb_may_pull(skb, hl + 1 + ntkoff));
558 static int tcf_csum_act(struct sk_buff *skb, const struct tc_action *a,
559 struct tcf_result *res)
561 struct tcf_csum *p = to_tcf_csum(a);
562 struct tcf_csum_params *params;
566 params = rcu_dereference_bh(p->params);
568 tcf_lastuse_update(&p->tcf_tm);
569 bstats_cpu_update(this_cpu_ptr(p->common.cpu_bstats), skb);
571 action = READ_ONCE(p->tcf_action);
572 if (unlikely(action == TC_ACT_SHOT))
575 update_flags = params->update_flags;
576 switch (tc_skb_protocol(skb)) {
577 case cpu_to_be16(ETH_P_IP):
578 if (!tcf_csum_ipv4(skb, update_flags))
581 case cpu_to_be16(ETH_P_IPV6):
582 if (!tcf_csum_ipv6(skb, update_flags))
590 qstats_drop_inc(this_cpu_ptr(p->common.cpu_qstats));
594 static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
597 unsigned char *b = skb_tail_pointer(skb);
598 struct tcf_csum *p = to_tcf_csum(a);
599 struct tcf_csum_params *params;
600 struct tc_csum opt = {
601 .index = p->tcf_index,
602 .refcnt = refcount_read(&p->tcf_refcnt) - ref,
603 .bindcnt = atomic_read(&p->tcf_bindcnt) - bind,
607 spin_lock_bh(&p->tcf_lock);
608 params = rcu_dereference_protected(p->params,
609 lockdep_is_held(&p->tcf_lock));
610 opt.action = p->tcf_action;
611 opt.update_flags = params->update_flags;
613 if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
614 goto nla_put_failure;
616 tcf_tm_dump(&t, &p->tcf_tm);
617 if (nla_put_64bit(skb, TCA_CSUM_TM, sizeof(t), &t, TCA_CSUM_PAD))
618 goto nla_put_failure;
619 spin_unlock_bh(&p->tcf_lock);
624 spin_unlock_bh(&p->tcf_lock);
629 static void tcf_csum_cleanup(struct tc_action *a)
631 struct tcf_csum *p = to_tcf_csum(a);
632 struct tcf_csum_params *params;
634 params = rcu_dereference_protected(p->params, 1);
636 kfree_rcu(params, rcu);
639 static int tcf_csum_walker(struct net *net, struct sk_buff *skb,
640 struct netlink_callback *cb, int type,
641 const struct tc_action_ops *ops,
642 struct netlink_ext_ack *extack)
644 struct tc_action_net *tn = net_generic(net, csum_net_id);
646 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
649 static int tcf_csum_search(struct net *net, struct tc_action **a, u32 index)
651 struct tc_action_net *tn = net_generic(net, csum_net_id);
653 return tcf_idr_search(tn, a, index);
656 static size_t tcf_csum_get_fill_size(const struct tc_action *act)
658 return nla_total_size(sizeof(struct tc_csum));
661 static struct tc_action_ops act_csum_ops = {
663 .type = TCA_ACT_CSUM,
664 .owner = THIS_MODULE,
666 .dump = tcf_csum_dump,
667 .init = tcf_csum_init,
668 .cleanup = tcf_csum_cleanup,
669 .walk = tcf_csum_walker,
670 .lookup = tcf_csum_search,
671 .get_fill_size = tcf_csum_get_fill_size,
672 .size = sizeof(struct tcf_csum),
675 static __net_init int csum_init_net(struct net *net)
677 struct tc_action_net *tn = net_generic(net, csum_net_id);
679 return tc_action_net_init(tn, &act_csum_ops);
682 static void __net_exit csum_exit_net(struct list_head *net_list)
684 tc_action_net_exit(net_list, csum_net_id);
687 static struct pernet_operations csum_net_ops = {
688 .init = csum_init_net,
689 .exit_batch = csum_exit_net,
691 .size = sizeof(struct tc_action_net),
694 MODULE_DESCRIPTION("Checksum updating actions");
695 MODULE_LICENSE("GPL");
697 static int __init csum_init_module(void)
699 return tcf_register_action(&act_csum_ops, &csum_net_ops);
702 static void __exit csum_cleanup_module(void)
704 tcf_unregister_action(&act_csum_ops, &csum_net_ops);
707 module_init(csum_init_module);
708 module_exit(csum_cleanup_module);