1 /* 6LoWPAN fragment reassembly
5 * Alexander Aring <aar@pengutronix.de>
7 * Based on: net/ipv6/reassembly.c
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
15 #define pr_fmt(fmt) "6LoWPAN: " fmt
17 #include <linux/net.h>
18 #include <linux/list.h>
19 #include <linux/netdevice.h>
20 #include <linux/random.h>
21 #include <linux/jhash.h>
22 #include <linux/skbuff.h>
23 #include <linux/slab.h>
24 #include <linux/export.h>
26 #include <net/ieee802154_netdev.h>
27 #include <net/6lowpan.h>
29 #include <net/inet_frag.h>
31 #include "6lowpan_i.h"
33 static const char lowpan_frags_cache_name[] = "lowpan-frags";
35 static struct inet_frags lowpan_frags;
37 static int lowpan_frag_reasm(struct lowpan_frag_queue *fq,
38 struct sk_buff *prev, struct net_device *ldev);
40 static unsigned int lowpan_hash_frag(u16 tag, u16 d_size,
41 const struct ieee802154_addr *saddr,
42 const struct ieee802154_addr *daddr)
44 net_get_random_once(&lowpan_frags.rnd, sizeof(lowpan_frags.rnd));
45 return jhash_3words(ieee802154_addr_hash(saddr),
46 ieee802154_addr_hash(daddr),
47 (__force u32)(tag + (d_size << 16)),
51 static unsigned int lowpan_hashfn(const struct inet_frag_queue *q)
53 const struct lowpan_frag_queue *fq;
55 fq = container_of(q, struct lowpan_frag_queue, q);
56 return lowpan_hash_frag(fq->tag, fq->d_size, &fq->saddr, &fq->daddr);
59 static bool lowpan_frag_match(const struct inet_frag_queue *q, const void *a)
61 const struct lowpan_frag_queue *fq;
62 const struct lowpan_create_arg *arg = a;
64 fq = container_of(q, struct lowpan_frag_queue, q);
65 return fq->tag == arg->tag && fq->d_size == arg->d_size &&
66 ieee802154_addr_equal(&fq->saddr, arg->src) &&
67 ieee802154_addr_equal(&fq->daddr, arg->dst);
70 static void lowpan_frag_init(struct inet_frag_queue *q, const void *a)
72 const struct lowpan_create_arg *arg = a;
73 struct lowpan_frag_queue *fq;
75 fq = container_of(q, struct lowpan_frag_queue, q);
78 fq->d_size = arg->d_size;
79 fq->saddr = *arg->src;
80 fq->daddr = *arg->dst;
83 static void lowpan_frag_expire(struct timer_list *t)
85 struct inet_frag_queue *frag = from_timer(frag, t, timer);
86 struct frag_queue *fq;
89 fq = container_of(frag, struct frag_queue, q);
90 net = container_of(fq->q.net, struct net, ieee802154_lowpan.frags);
92 spin_lock(&fq->q.lock);
94 if (fq->q.flags & INET_FRAG_COMPLETE)
97 inet_frag_kill(&fq->q, &lowpan_frags);
99 spin_unlock(&fq->q.lock);
100 inet_frag_put(&fq->q, &lowpan_frags);
103 static inline struct lowpan_frag_queue *
104 fq_find(struct net *net, const struct lowpan_802154_cb *cb,
105 const struct ieee802154_addr *src,
106 const struct ieee802154_addr *dst)
108 struct inet_frag_queue *q;
109 struct lowpan_create_arg arg;
111 struct netns_ieee802154_lowpan *ieee802154_lowpan =
112 net_ieee802154_lowpan(net);
115 arg.d_size = cb->d_size;
119 hash = lowpan_hash_frag(cb->d_tag, cb->d_size, src, dst);
121 q = inet_frag_find(&ieee802154_lowpan->frags,
122 &lowpan_frags, &arg, hash);
123 if (IS_ERR_OR_NULL(q)) {
124 inet_frag_maybe_warn_overflow(q, pr_fmt());
127 return container_of(q, struct lowpan_frag_queue, q);
130 static int lowpan_frag_queue(struct lowpan_frag_queue *fq,
131 struct sk_buff *skb, u8 frag_type)
133 struct sk_buff *prev, *next;
134 struct net_device *ldev;
137 if (fq->q.flags & INET_FRAG_COMPLETE)
140 offset = lowpan_802154_cb(skb)->d_offset << 3;
141 end = lowpan_802154_cb(skb)->d_size;
143 /* Is this the final fragment? */
144 if (offset + skb->len == end) {
145 /* If we already have some bits beyond end
146 * or have different end, the segment is corrupted.
148 if (end < fq->q.len ||
149 ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len))
151 fq->q.flags |= INET_FRAG_LAST_IN;
154 if (end > fq->q.len) {
155 /* Some bits beyond end -> corruption. */
156 if (fq->q.flags & INET_FRAG_LAST_IN)
162 /* Find out which fragments are in front and at the back of us
163 * in the chain of fragments so far. We must know where to put
164 * this fragment, right?
166 prev = fq->q.fragments_tail;
168 lowpan_802154_cb(prev)->d_offset <
169 lowpan_802154_cb(skb)->d_offset) {
174 for (next = fq->q.fragments; next != NULL; next = next->next) {
175 if (lowpan_802154_cb(next)->d_offset >=
176 lowpan_802154_cb(skb)->d_offset)
182 /* Insert this fragment in the chain of fragments. */
185 fq->q.fragments_tail = skb;
189 fq->q.fragments = skb;
195 fq->q.stamp = skb->tstamp;
196 if (frag_type == LOWPAN_DISPATCH_FRAG1)
197 fq->q.flags |= INET_FRAG_FIRST_IN;
199 fq->q.meat += skb->len;
200 add_frag_mem_limit(fq->q.net, skb->truesize);
202 if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
203 fq->q.meat == fq->q.len) {
205 unsigned long orefdst = skb->_skb_refdst;
207 skb->_skb_refdst = 0UL;
208 res = lowpan_frag_reasm(fq, prev, ldev);
209 skb->_skb_refdst = orefdst;
219 /* Check if this packet is complete.
220 * Returns NULL on failure by any reason, and pointer
221 * to current nexthdr field in reassembled frame.
223 * It is called with locked fq, and caller must check that
224 * queue is eligible for reassembly i.e. it is not COMPLETE,
225 * the last and the first frames arrived and all the bits are here.
227 static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
228 struct net_device *ldev)
230 struct sk_buff *fp, *head = fq->q.fragments;
233 inet_frag_kill(&fq->q, &lowpan_frags);
235 /* Make the one we just received the head. */
238 fp = skb_clone(head, GFP_ATOMIC);
243 fp->next = head->next;
245 fq->q.fragments_tail = fp;
248 skb_morph(head, fq->q.fragments);
249 head->next = fq->q.fragments->next;
251 consume_skb(fq->q.fragments);
252 fq->q.fragments = head;
255 /* Head of list must not be cloned. */
256 if (skb_unclone(head, GFP_ATOMIC))
259 /* If the first fragment is fragmented itself, we split
260 * it to two chunks: the first with data and paged part
261 * and the second, holding only fragments.
263 if (skb_has_frag_list(head)) {
264 struct sk_buff *clone;
267 clone = alloc_skb(0, GFP_ATOMIC);
270 clone->next = head->next;
272 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
273 skb_frag_list_init(head);
274 for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
275 plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
276 clone->len = head->data_len - plen;
277 clone->data_len = clone->len;
278 head->data_len -= clone->len;
279 head->len -= clone->len;
280 add_frag_mem_limit(fq->q.net, clone->truesize);
283 WARN_ON(head == NULL);
285 sum_truesize = head->truesize;
286 for (fp = head->next; fp;) {
289 struct sk_buff *next = fp->next;
291 sum_truesize += fp->truesize;
292 if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
293 kfree_skb_partial(fp, headstolen);
295 if (!skb_shinfo(head)->frag_list)
296 skb_shinfo(head)->frag_list = fp;
297 head->data_len += fp->len;
298 head->len += fp->len;
299 head->truesize += fp->truesize;
303 sub_frag_mem_limit(fq->q.net, sum_truesize);
307 head->tstamp = fq->q.stamp;
309 fq->q.fragments = NULL;
310 fq->q.fragments_tail = NULL;
314 net_dbg_ratelimited("lowpan_frag_reasm: no memory for reassembly\n");
318 static int lowpan_frag_rx_handlers_result(struct sk_buff *skb,
319 lowpan_rx_result res)
323 return NET_RX_SUCCESS;
325 /* nobody cared about this packet */
326 net_warn_ratelimited("%s: received unknown dispatch\n",
331 /* all others failure */
336 static lowpan_rx_result lowpan_frag_rx_h_iphc(struct sk_buff *skb)
340 if (!lowpan_is_iphc(*skb_network_header(skb)))
343 ret = lowpan_iphc_decompress(skb);
350 static int lowpan_invoke_frag_rx_handlers(struct sk_buff *skb)
352 lowpan_rx_result res;
354 #define CALL_RXH(rxh) \
357 if (res != RX_CONTINUE) \
361 /* likely at first */
362 CALL_RXH(lowpan_frag_rx_h_iphc);
363 CALL_RXH(lowpan_rx_h_ipv6);
366 return lowpan_frag_rx_handlers_result(skb, res);
370 #define LOWPAN_FRAG_DGRAM_SIZE_HIGH_MASK 0x07
371 #define LOWPAN_FRAG_DGRAM_SIZE_HIGH_SHIFT 8
373 static int lowpan_get_cb(struct sk_buff *skb, u8 frag_type,
374 struct lowpan_802154_cb *cb)
377 u8 high = 0, low = 0;
380 fail = lowpan_fetch_skb(skb, &high, 1);
381 fail |= lowpan_fetch_skb(skb, &low, 1);
382 /* remove the dispatch value and use first three bits as high value
383 * for the datagram size
385 cb->d_size = (high & LOWPAN_FRAG_DGRAM_SIZE_HIGH_MASK) <<
386 LOWPAN_FRAG_DGRAM_SIZE_HIGH_SHIFT | low;
387 fail |= lowpan_fetch_skb(skb, &d_tag, 2);
388 cb->d_tag = ntohs(d_tag);
390 if (frag_type == LOWPAN_DISPATCH_FRAGN) {
391 fail |= lowpan_fetch_skb(skb, &cb->d_offset, 1);
393 skb_reset_network_header(skb);
395 /* check if datagram_size has ipv6hdr on FRAG1 */
396 fail |= cb->d_size < sizeof(struct ipv6hdr);
397 /* check if we can dereference the dispatch value */
407 int lowpan_frag_rcv(struct sk_buff *skb, u8 frag_type)
409 struct lowpan_frag_queue *fq;
410 struct net *net = dev_net(skb->dev);
411 struct lowpan_802154_cb *cb = lowpan_802154_cb(skb);
412 struct ieee802154_hdr hdr;
415 if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
418 err = lowpan_get_cb(skb, frag_type, cb);
422 if (frag_type == LOWPAN_DISPATCH_FRAG1) {
423 err = lowpan_invoke_frag_rx_handlers(skb);
424 if (err == NET_RX_DROP)
428 if (cb->d_size > IPV6_MIN_MTU) {
429 net_warn_ratelimited("lowpan_frag_rcv: datagram size exceeds MTU\n");
433 fq = fq_find(net, cb, &hdr.source, &hdr.dest);
437 spin_lock(&fq->q.lock);
438 ret = lowpan_frag_queue(fq, skb, frag_type);
439 spin_unlock(&fq->q.lock);
441 inet_frag_put(&fq->q, &lowpan_frags);
453 static struct ctl_table lowpan_frags_ns_ctl_table[] = {
455 .procname = "6lowpanfrag_high_thresh",
456 .data = &init_net.ieee802154_lowpan.frags.high_thresh,
457 .maxlen = sizeof(int),
459 .proc_handler = proc_dointvec_minmax,
460 .extra1 = &init_net.ieee802154_lowpan.frags.low_thresh
463 .procname = "6lowpanfrag_low_thresh",
464 .data = &init_net.ieee802154_lowpan.frags.low_thresh,
465 .maxlen = sizeof(int),
467 .proc_handler = proc_dointvec_minmax,
469 .extra2 = &init_net.ieee802154_lowpan.frags.high_thresh
472 .procname = "6lowpanfrag_time",
473 .data = &init_net.ieee802154_lowpan.frags.timeout,
474 .maxlen = sizeof(int),
476 .proc_handler = proc_dointvec_jiffies,
481 /* secret interval has been deprecated */
482 static int lowpan_frags_secret_interval_unused;
483 static struct ctl_table lowpan_frags_ctl_table[] = {
485 .procname = "6lowpanfrag_secret_interval",
486 .data = &lowpan_frags_secret_interval_unused,
487 .maxlen = sizeof(int),
489 .proc_handler = proc_dointvec_jiffies,
494 static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
496 struct ctl_table *table;
497 struct ctl_table_header *hdr;
498 struct netns_ieee802154_lowpan *ieee802154_lowpan =
499 net_ieee802154_lowpan(net);
501 table = lowpan_frags_ns_ctl_table;
502 if (!net_eq(net, &init_net)) {
503 table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
508 table[0].data = &ieee802154_lowpan->frags.high_thresh;
509 table[0].extra1 = &ieee802154_lowpan->frags.low_thresh;
510 table[0].extra2 = &init_net.ieee802154_lowpan.frags.high_thresh;
511 table[1].data = &ieee802154_lowpan->frags.low_thresh;
512 table[1].extra2 = &ieee802154_lowpan->frags.high_thresh;
513 table[2].data = &ieee802154_lowpan->frags.timeout;
515 /* Don't export sysctls to unprivileged users */
516 if (net->user_ns != &init_user_ns)
517 table[0].procname = NULL;
520 hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
524 ieee802154_lowpan->sysctl.frags_hdr = hdr;
528 if (!net_eq(net, &init_net))
534 static void __net_exit lowpan_frags_ns_sysctl_unregister(struct net *net)
536 struct ctl_table *table;
537 struct netns_ieee802154_lowpan *ieee802154_lowpan =
538 net_ieee802154_lowpan(net);
540 table = ieee802154_lowpan->sysctl.frags_hdr->ctl_table_arg;
541 unregister_net_sysctl_table(ieee802154_lowpan->sysctl.frags_hdr);
542 if (!net_eq(net, &init_net))
546 static struct ctl_table_header *lowpan_ctl_header;
548 static int __init lowpan_frags_sysctl_register(void)
550 lowpan_ctl_header = register_net_sysctl(&init_net,
551 "net/ieee802154/6lowpan",
552 lowpan_frags_ctl_table);
553 return lowpan_ctl_header == NULL ? -ENOMEM : 0;
556 static void lowpan_frags_sysctl_unregister(void)
558 unregister_net_sysctl_table(lowpan_ctl_header);
561 static inline int lowpan_frags_ns_sysctl_register(struct net *net)
566 static inline void lowpan_frags_ns_sysctl_unregister(struct net *net)
570 static inline int __init lowpan_frags_sysctl_register(void)
575 static inline void lowpan_frags_sysctl_unregister(void)
580 static int __net_init lowpan_frags_init_net(struct net *net)
582 struct netns_ieee802154_lowpan *ieee802154_lowpan =
583 net_ieee802154_lowpan(net);
586 ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
587 ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
588 ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;
590 res = inet_frags_init_net(&ieee802154_lowpan->frags);
593 res = lowpan_frags_ns_sysctl_register(net);
595 inet_frags_exit_net(&ieee802154_lowpan->frags, &lowpan_frags);
599 static void __net_exit lowpan_frags_exit_net(struct net *net)
601 struct netns_ieee802154_lowpan *ieee802154_lowpan =
602 net_ieee802154_lowpan(net);
604 lowpan_frags_ns_sysctl_unregister(net);
605 inet_frags_exit_net(&ieee802154_lowpan->frags, &lowpan_frags);
608 static struct pernet_operations lowpan_frags_ops = {
609 .init = lowpan_frags_init_net,
610 .exit = lowpan_frags_exit_net,
613 int __init lowpan_net_frag_init(void)
617 ret = lowpan_frags_sysctl_register();
621 ret = register_pernet_subsys(&lowpan_frags_ops);
625 lowpan_frags.hashfn = lowpan_hashfn;
626 lowpan_frags.constructor = lowpan_frag_init;
627 lowpan_frags.destructor = NULL;
628 lowpan_frags.qsize = sizeof(struct frag_queue);
629 lowpan_frags.match = lowpan_frag_match;
630 lowpan_frags.frag_expire = lowpan_frag_expire;
631 lowpan_frags.frags_cache_name = lowpan_frags_cache_name;
632 ret = inet_frags_init(&lowpan_frags);
638 lowpan_frags_sysctl_unregister();
642 void lowpan_net_frag_exit(void)
644 inet_frags_fini(&lowpan_frags);
645 lowpan_frags_sysctl_unregister();
646 unregister_pernet_subsys(&lowpan_frags_ops);