b4bc7a50eccfdda2e4a8e1fdfe7288882fe40a12
[linux-2.6-block.git] / net / ieee802154 / reassembly.c
1 /*      6LoWPAN fragment reassembly
2  *
3  *
4  *      Authors:
5  *      Alexander Aring         <aar@pengutronix.de>
6  *
7  *      Based on: net/ipv6/reassembly.c
8  *
9  *      This program is free software; you can redistribute it and/or
10  *      modify it under the terms of the GNU General Public License
11  *      as published by the Free Software Foundation; either version
12  *      2 of the License, or (at your option) any later version.
13  */
14
15 #define pr_fmt(fmt) "6LoWPAN: " fmt
16
17 #include <linux/net.h>
18 #include <linux/list.h>
19 #include <linux/netdevice.h>
20 #include <linux/random.h>
21 #include <linux/jhash.h>
22 #include <linux/skbuff.h>
23 #include <linux/slab.h>
24 #include <linux/export.h>
25
26 #include <net/ieee802154_netdev.h>
27 #include <net/6lowpan.h>
28 #include <net/ipv6.h>
29 #include <net/inet_frag.h>
30
31 #include "reassembly.h"
32
33 struct lowpan_frag_info {
34         __be16 d_tag;
35         u16 d_size;
36         u8 d_offset;
37 };
38
39 static struct lowpan_frag_info *lowpan_cb(struct sk_buff *skb)
40 {
41         return (struct lowpan_frag_info *)skb->cb;
42 }
43
44 static struct inet_frags lowpan_frags;
45
46 static int lowpan_frag_reasm(struct lowpan_frag_queue *fq,
47                              struct sk_buff *prev, struct net_device *dev);
48
49 static unsigned int lowpan_hash_frag(__be16 tag, u16 d_size,
50                                      const struct ieee802154_addr *saddr,
51                                      const struct ieee802154_addr *daddr)
52 {
53         net_get_random_once(&lowpan_frags.rnd, sizeof(lowpan_frags.rnd));
54         return jhash_3words(ieee802154_addr_hash(saddr),
55                             ieee802154_addr_hash(daddr),
56                             (__force u32)(tag + (d_size << 16)),
57                             lowpan_frags.rnd);
58 }
59
60 static unsigned int lowpan_hashfn(const struct inet_frag_queue *q)
61 {
62         const struct lowpan_frag_queue *fq;
63
64         fq = container_of(q, struct lowpan_frag_queue, q);
65         return lowpan_hash_frag(fq->tag, fq->d_size, &fq->saddr, &fq->daddr);
66 }
67
68 static bool lowpan_frag_match(const struct inet_frag_queue *q, const void *a)
69 {
70         const struct lowpan_frag_queue *fq;
71         const struct lowpan_create_arg *arg = a;
72
73         fq = container_of(q, struct lowpan_frag_queue, q);
74         return  fq->tag == arg->tag && fq->d_size == arg->d_size &&
75                 ieee802154_addr_equal(&fq->saddr, arg->src) &&
76                 ieee802154_addr_equal(&fq->daddr, arg->dst);
77 }
78
79 static void lowpan_frag_init(struct inet_frag_queue *q, const void *a)
80 {
81         const struct lowpan_create_arg *arg = a;
82         struct lowpan_frag_queue *fq;
83
84         fq = container_of(q, struct lowpan_frag_queue, q);
85
86         fq->tag = arg->tag;
87         fq->d_size = arg->d_size;
88         fq->saddr = *arg->src;
89         fq->daddr = *arg->dst;
90 }
91
92 static void lowpan_frag_expire(unsigned long data)
93 {
94         struct frag_queue *fq;
95         struct net *net;
96
97         fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
98         net = container_of(fq->q.net, struct net, ieee802154_lowpan.frags);
99
100         spin_lock(&fq->q.lock);
101
102         if (fq->q.last_in & INET_FRAG_COMPLETE)
103                 goto out;
104
105         inet_frag_kill(&fq->q, &lowpan_frags);
106 out:
107         spin_unlock(&fq->q.lock);
108         inet_frag_put(&fq->q, &lowpan_frags);
109 }
110
111 static inline struct lowpan_frag_queue *
112 fq_find(struct net *net, const struct lowpan_frag_info *frag_info,
113         const struct ieee802154_addr *src,
114         const struct ieee802154_addr *dst)
115 {
116         struct inet_frag_queue *q;
117         struct lowpan_create_arg arg;
118         unsigned int hash;
119         struct netns_ieee802154_lowpan *ieee802154_lowpan =
120                 net_ieee802154_lowpan(net);
121
122         arg.tag = frag_info->d_tag;
123         arg.d_size = frag_info->d_size;
124         arg.src = src;
125         arg.dst = dst;
126
127         read_lock(&lowpan_frags.lock);
128         hash = lowpan_hash_frag(frag_info->d_tag, frag_info->d_size, src, dst);
129
130         q = inet_frag_find(&ieee802154_lowpan->frags,
131                            &lowpan_frags, &arg, hash);
132         if (IS_ERR_OR_NULL(q)) {
133                 inet_frag_maybe_warn_overflow(q, pr_fmt());
134                 return NULL;
135         }
136         return container_of(q, struct lowpan_frag_queue, q);
137 }
138
139 static int lowpan_frag_queue(struct lowpan_frag_queue *fq,
140                              struct sk_buff *skb, const u8 frag_type)
141 {
142         struct sk_buff *prev, *next;
143         struct net_device *dev;
144         int end, offset;
145
146         if (fq->q.last_in & INET_FRAG_COMPLETE)
147                 goto err;
148
149         offset = lowpan_cb(skb)->d_offset << 3;
150         end = lowpan_cb(skb)->d_size;
151
152         /* Is this the final fragment? */
153         if (offset + skb->len == end) {
154                 /* If we already have some bits beyond end
155                  * or have different end, the segment is corrupted.
156                  */
157                 if (end < fq->q.len ||
158                     ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len))
159                         goto err;
160                 fq->q.last_in |= INET_FRAG_LAST_IN;
161                 fq->q.len = end;
162         } else {
163                 if (end > fq->q.len) {
164                         /* Some bits beyond end -> corruption. */
165                         if (fq->q.last_in & INET_FRAG_LAST_IN)
166                                 goto err;
167                         fq->q.len = end;
168                 }
169         }
170
171         /* Find out which fragments are in front and at the back of us
172          * in the chain of fragments so far.  We must know where to put
173          * this fragment, right?
174          */
175         prev = fq->q.fragments_tail;
176         if (!prev || lowpan_cb(prev)->d_offset < lowpan_cb(skb)->d_offset) {
177                 next = NULL;
178                 goto found;
179         }
180         prev = NULL;
181         for (next = fq->q.fragments; next != NULL; next = next->next) {
182                 if (lowpan_cb(next)->d_offset >= lowpan_cb(skb)->d_offset)
183                         break;  /* bingo! */
184                 prev = next;
185         }
186
187 found:
188         /* Insert this fragment in the chain of fragments. */
189         skb->next = next;
190         if (!next)
191                 fq->q.fragments_tail = skb;
192         if (prev)
193                 prev->next = skb;
194         else
195                 fq->q.fragments = skb;
196
197         dev = skb->dev;
198         if (dev)
199                 skb->dev = NULL;
200
201         fq->q.stamp = skb->tstamp;
202         if (frag_type == LOWPAN_DISPATCH_FRAG1) {
203                 /* Calculate uncomp. 6lowpan header to estimate full size */
204                 fq->q.meat += lowpan_uncompress_size(skb, NULL);
205                 fq->q.last_in |= INET_FRAG_FIRST_IN;
206         } else {
207                 fq->q.meat += skb->len;
208         }
209         add_frag_mem_limit(&fq->q, skb->truesize);
210
211         if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
212             fq->q.meat == fq->q.len) {
213                 int res;
214                 unsigned long orefdst = skb->_skb_refdst;
215
216                 skb->_skb_refdst = 0UL;
217                 res = lowpan_frag_reasm(fq, prev, dev);
218                 skb->_skb_refdst = orefdst;
219                 return res;
220         }
221
222         return -1;
223 err:
224         kfree_skb(skb);
225         return -1;
226 }
227
228 /*      Check if this packet is complete.
229  *      Returns NULL on failure by any reason, and pointer
230  *      to current nexthdr field in reassembled frame.
231  *
232  *      It is called with locked fq, and caller must check that
233  *      queue is eligible for reassembly i.e. it is not COMPLETE,
234  *      the last and the first frames arrived and all the bits are here.
235  */
236 static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
237                              struct net_device *dev)
238 {
239         struct sk_buff *fp, *head = fq->q.fragments;
240         int sum_truesize;
241
242         inet_frag_kill(&fq->q, &lowpan_frags);
243
244         /* Make the one we just received the head. */
245         if (prev) {
246                 head = prev->next;
247                 fp = skb_clone(head, GFP_ATOMIC);
248
249                 if (!fp)
250                         goto out_oom;
251
252                 fp->next = head->next;
253                 if (!fp->next)
254                         fq->q.fragments_tail = fp;
255                 prev->next = fp;
256
257                 skb_morph(head, fq->q.fragments);
258                 head->next = fq->q.fragments->next;
259
260                 consume_skb(fq->q.fragments);
261                 fq->q.fragments = head;
262         }
263
264         /* Head of list must not be cloned. */
265         if (skb_unclone(head, GFP_ATOMIC))
266                 goto out_oom;
267
268         /* If the first fragment is fragmented itself, we split
269          * it to two chunks: the first with data and paged part
270          * and the second, holding only fragments.
271          */
272         if (skb_has_frag_list(head)) {
273                 struct sk_buff *clone;
274                 int i, plen = 0;
275
276                 clone = alloc_skb(0, GFP_ATOMIC);
277                 if (!clone)
278                         goto out_oom;
279                 clone->next = head->next;
280                 head->next = clone;
281                 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
282                 skb_frag_list_init(head);
283                 for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
284                         plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
285                 clone->len = head->data_len - plen;
286                 clone->data_len = clone->len;
287                 head->data_len -= clone->len;
288                 head->len -= clone->len;
289                 add_frag_mem_limit(&fq->q, clone->truesize);
290         }
291
292         WARN_ON(head == NULL);
293
294         sum_truesize = head->truesize;
295         for (fp = head->next; fp;) {
296                 bool headstolen;
297                 int delta;
298                 struct sk_buff *next = fp->next;
299
300                 sum_truesize += fp->truesize;
301                 if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
302                         kfree_skb_partial(fp, headstolen);
303                 } else {
304                         if (!skb_shinfo(head)->frag_list)
305                                 skb_shinfo(head)->frag_list = fp;
306                         head->data_len += fp->len;
307                         head->len += fp->len;
308                         head->truesize += fp->truesize;
309                 }
310                 fp = next;
311         }
312         sub_frag_mem_limit(&fq->q, sum_truesize);
313
314         head->next = NULL;
315         head->dev = dev;
316         head->tstamp = fq->q.stamp;
317
318         fq->q.fragments = NULL;
319         fq->q.fragments_tail = NULL;
320
321         return 1;
322 out_oom:
323         net_dbg_ratelimited("lowpan_frag_reasm: no memory for reassembly\n");
324         return -1;
325 }
326
327 static int lowpan_get_frag_info(struct sk_buff *skb, const u8 frag_type,
328                                 struct lowpan_frag_info *frag_info)
329 {
330         bool fail;
331         u8 pattern = 0, low = 0;
332
333         fail = lowpan_fetch_skb(skb, &pattern, 1);
334         fail |= lowpan_fetch_skb(skb, &low, 1);
335         frag_info->d_size = (pattern & 7) << 8 | low;
336         fail |= lowpan_fetch_skb(skb, &frag_info->d_tag, 2);
337
338         if (frag_type == LOWPAN_DISPATCH_FRAGN) {
339                 fail |= lowpan_fetch_skb(skb, &frag_info->d_offset, 1);
340         } else {
341                 skb_reset_network_header(skb);
342                 frag_info->d_offset = 0;
343         }
344
345         if (unlikely(fail))
346                 return -EIO;
347
348         return 0;
349 }
350
351 int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type)
352 {
353         struct lowpan_frag_queue *fq;
354         struct net *net = dev_net(skb->dev);
355         struct lowpan_frag_info *frag_info = lowpan_cb(skb);
356         struct ieee802154_addr source, dest;
357         struct netns_ieee802154_lowpan *ieee802154_lowpan =
358                 net_ieee802154_lowpan(net);
359         int err;
360
361         source = mac_cb(skb)->source;
362         dest = mac_cb(skb)->dest;
363
364         err = lowpan_get_frag_info(skb, frag_type, frag_info);
365         if (err < 0)
366                 goto err;
367
368         if (frag_info->d_size > ieee802154_lowpan->max_dsize)
369                 goto err;
370
371         fq = fq_find(net, frag_info, &source, &dest);
372         if (fq != NULL) {
373                 int ret;
374
375                 spin_lock(&fq->q.lock);
376                 ret = lowpan_frag_queue(fq, skb, frag_type);
377                 spin_unlock(&fq->q.lock);
378
379                 inet_frag_put(&fq->q, &lowpan_frags);
380                 return ret;
381         }
382
383 err:
384         kfree_skb(skb);
385         return -1;
386 }
387 EXPORT_SYMBOL(lowpan_frag_rcv);
388
389 #ifdef CONFIG_SYSCTL
390 static struct ctl_table lowpan_frags_ns_ctl_table[] = {
391         {
392                 .procname       = "6lowpanfrag_high_thresh",
393                 .data           = &init_net.ieee802154_lowpan.frags.high_thresh,
394                 .maxlen         = sizeof(int),
395                 .mode           = 0644,
396                 .proc_handler   = proc_dointvec
397         },
398         {
399                 .procname       = "6lowpanfrag_low_thresh",
400                 .data           = &init_net.ieee802154_lowpan.frags.low_thresh,
401                 .maxlen         = sizeof(int),
402                 .mode           = 0644,
403                 .proc_handler   = proc_dointvec
404         },
405         {
406                 .procname       = "6lowpanfrag_time",
407                 .data           = &init_net.ieee802154_lowpan.frags.timeout,
408                 .maxlen         = sizeof(int),
409                 .mode           = 0644,
410                 .proc_handler   = proc_dointvec_jiffies,
411         },
412         {
413                 .procname       = "6lowpanfrag_max_datagram_size",
414                 .data           = &init_net.ieee802154_lowpan.max_dsize,
415                 .maxlen         = sizeof(int),
416                 .mode           = 0644,
417                 .proc_handler   = proc_dointvec
418         },
419         { }
420 };
421
422 static struct ctl_table lowpan_frags_ctl_table[] = {
423         {
424                 .procname       = "6lowpanfrag_secret_interval",
425                 .data           = &lowpan_frags.secret_interval,
426                 .maxlen         = sizeof(int),
427                 .mode           = 0644,
428                 .proc_handler   = proc_dointvec_jiffies,
429         },
430         { }
431 };
432
433 static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
434 {
435         struct ctl_table *table;
436         struct ctl_table_header *hdr;
437         struct netns_ieee802154_lowpan *ieee802154_lowpan =
438                 net_ieee802154_lowpan(net);
439
440         table = lowpan_frags_ns_ctl_table;
441         if (!net_eq(net, &init_net)) {
442                 table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
443                                 GFP_KERNEL);
444                 if (table == NULL)
445                         goto err_alloc;
446
447                 table[0].data = &ieee802154_lowpan->frags.high_thresh;
448                 table[1].data = &ieee802154_lowpan->frags.low_thresh;
449                 table[2].data = &ieee802154_lowpan->frags.timeout;
450                 table[3].data = &ieee802154_lowpan->max_dsize;
451
452                 /* Don't export sysctls to unprivileged users */
453                 if (net->user_ns != &init_user_ns)
454                         table[0].procname = NULL;
455         }
456
457         hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
458         if (hdr == NULL)
459                 goto err_reg;
460
461         ieee802154_lowpan->sysctl.frags_hdr = hdr;
462         return 0;
463
464 err_reg:
465         if (!net_eq(net, &init_net))
466                 kfree(table);
467 err_alloc:
468         return -ENOMEM;
469 }
470
471 static void __net_exit lowpan_frags_ns_sysctl_unregister(struct net *net)
472 {
473         struct ctl_table *table;
474         struct netns_ieee802154_lowpan *ieee802154_lowpan =
475                 net_ieee802154_lowpan(net);
476
477         table = ieee802154_lowpan->sysctl.frags_hdr->ctl_table_arg;
478         unregister_net_sysctl_table(ieee802154_lowpan->sysctl.frags_hdr);
479         if (!net_eq(net, &init_net))
480                 kfree(table);
481 }
482
483 static struct ctl_table_header *lowpan_ctl_header;
484
485 static int lowpan_frags_sysctl_register(void)
486 {
487         lowpan_ctl_header = register_net_sysctl(&init_net,
488                                                 "net/ieee802154/6lowpan",
489                                                 lowpan_frags_ctl_table);
490         return lowpan_ctl_header == NULL ? -ENOMEM : 0;
491 }
492
493 static void lowpan_frags_sysctl_unregister(void)
494 {
495         unregister_net_sysctl_table(lowpan_ctl_header);
496 }
497 #else
498 static inline int lowpan_frags_ns_sysctl_register(struct net *net)
499 {
500         return 0;
501 }
502
503 static inline void lowpan_frags_ns_sysctl_unregister(struct net *net)
504 {
505 }
506
507 static inline int lowpan_frags_sysctl_register(void)
508 {
509         return 0;
510 }
511
512 static inline void lowpan_frags_sysctl_unregister(void)
513 {
514 }
515 #endif
516
517 static int __net_init lowpan_frags_init_net(struct net *net)
518 {
519         struct netns_ieee802154_lowpan *ieee802154_lowpan =
520                 net_ieee802154_lowpan(net);
521
522         ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
523         ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
524         ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;
525         ieee802154_lowpan->max_dsize = 0xFFFF;
526
527         inet_frags_init_net(&ieee802154_lowpan->frags);
528
529         return lowpan_frags_ns_sysctl_register(net);
530 }
531
532 static void __net_exit lowpan_frags_exit_net(struct net *net)
533 {
534         struct netns_ieee802154_lowpan *ieee802154_lowpan =
535                 net_ieee802154_lowpan(net);
536
537         lowpan_frags_ns_sysctl_unregister(net);
538         inet_frags_exit_net(&ieee802154_lowpan->frags, &lowpan_frags);
539 }
540
541 static struct pernet_operations lowpan_frags_ops = {
542         .init = lowpan_frags_init_net,
543         .exit = lowpan_frags_exit_net,
544 };
545
546 int __init lowpan_net_frag_init(void)
547 {
548         int ret;
549
550         ret = lowpan_frags_sysctl_register();
551         if (ret)
552                 return ret;
553
554         ret = register_pernet_subsys(&lowpan_frags_ops);
555         if (ret)
556                 goto err_pernet;
557
558         lowpan_frags.hashfn = lowpan_hashfn;
559         lowpan_frags.constructor = lowpan_frag_init;
560         lowpan_frags.destructor = NULL;
561         lowpan_frags.skb_free = NULL;
562         lowpan_frags.qsize = sizeof(struct frag_queue);
563         lowpan_frags.match = lowpan_frag_match;
564         lowpan_frags.frag_expire = lowpan_frag_expire;
565         lowpan_frags.secret_interval = 10 * 60 * HZ;
566         inet_frags_init(&lowpan_frags);
567
568         return ret;
569 err_pernet:
570         lowpan_frags_sysctl_unregister();
571         return ret;
572 }
573
574 void lowpan_net_frag_exit(void)
575 {
576         inet_frags_fini(&lowpan_frags);
577         lowpan_frags_sysctl_unregister();
578         unregister_pernet_subsys(&lowpan_frags_ops);
579 }