6lowpan: rework fragment-deleting routine
authoralex.bluesman.smirnov@gmail.com <alex.bluesman.smirnov@gmail.com>
Tue, 10 Jul 2012 21:22:48 +0000 (21:22 +0000)
committerDavid S. Miller <davem@davemloft.net>
Thu, 12 Jul 2012 14:54:46 +0000 (07:54 -0700)
6lowpan module starts collecting incomming frames and fragments
right after lowpan_module_init() therefor it will be better to
clean unfinished fragments in lowpan_cleanup_module() function
instead of doing it when link goes down.

Changed spinlocks type to prevent deadlock with expired timer event
and removed unused one.

Signed-off-by: Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/ieee802154/6lowpan.c

index 2e790fbe848d0345e06c72b8ba76eaa5b7bea311..6871ec1b30f88b724d499db4c307047ed7798869 100644 (file)
@@ -113,7 +113,6 @@ struct lowpan_dev_record {
 
 struct lowpan_fragment {
        struct sk_buff          *skb;           /* skb to be assembled */
-       spinlock_t              lock;           /* concurency lock */
        u16                     length;         /* length to be assemled */
        u32                     bytes_rcv;      /* bytes received */
        u16                     tag;            /* current fragment tag */
@@ -637,10 +636,7 @@ static void lowpan_fragment_timer_expired(unsigned long entry_addr)
 
        pr_debug("timer expired for frame with tag %d\n", entry->tag);
 
-       spin_lock(&flist_lock);
        list_del(&entry->list);
-       spin_unlock(&flist_lock);
-
        dev_kfree_skb(entry->skb);
        kfree(entry);
 }
@@ -727,7 +723,7 @@ lowpan_process_data(struct sk_buff *skb)
                 * check if frame assembling with the same tag is
                 * already in progress
                 */
-               spin_lock(&flist_lock);
+               spin_lock_bh(&flist_lock);
 
                list_for_each_entry(frame, &lowpan_fragments, list)
                        if (frame->tag == tag) {
@@ -761,9 +757,9 @@ lowpan_process_data(struct sk_buff *skb)
                if ((frame->bytes_rcv == frame->length) &&
                     frame->timer.expires > jiffies) {
                        /* if timer haven't expired - first of all delete it */
-                       del_timer(&frame->timer);
+                       del_timer_sync(&frame->timer);
                        list_del(&frame->list);
-                       spin_unlock(&flist_lock);
+                       spin_unlock_bh(&flist_lock);
 
                        dev_kfree_skb(skb);
                        skb = frame->skb;
@@ -774,7 +770,7 @@ lowpan_process_data(struct sk_buff *skb)
 
                        break;
                }
-               spin_unlock(&flist_lock);
+               spin_unlock_bh(&flist_lock);
 
                return kfree_skb(skb), 0;
        }
@@ -929,7 +925,7 @@ lowpan_process_data(struct sk_buff *skb)
        return lowpan_skb_deliver(skb, &hdr);
 
 unlock_and_drop:
-       spin_unlock(&flist_lock);
+       spin_unlock_bh(&flist_lock);
 drop:
        kfree_skb(skb);
        return -EINVAL;
@@ -1196,19 +1192,9 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head)
        struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
        struct net_device *real_dev = lowpan_dev->real_dev;
        struct lowpan_dev_record *entry, *tmp;
-       struct lowpan_fragment *frame, *tframe;
 
        ASSERT_RTNL();
 
-       spin_lock(&flist_lock);
-       list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) {
-               del_timer(&frame->timer);
-               list_del(&frame->list);
-               dev_kfree_skb(frame->skb);
-               kfree(frame);
-       }
-       spin_unlock(&flist_lock);
-
        mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
        list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
                if (entry->ldev == dev) {
@@ -1264,9 +1250,24 @@ out:
 
 static void __exit lowpan_cleanup_module(void)
 {
+       struct lowpan_fragment *frame, *tframe;
+
        lowpan_netlink_fini();
 
        dev_remove_pack(&lowpan_packet_type);
+
+       /* Now 6lowpan packet_type is removed, so no new fragments are
+        * expected on RX, therefore that's the time to clean incomplete
+        * fragments.
+        */
+       spin_lock_bh(&flist_lock);
+       list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) {
+               del_timer_sync(&frame->timer);
+               list_del(&frame->list);
+               dev_kfree_skb(frame->skb);
+               kfree(frame);
+       }
+       spin_unlock_bh(&flist_lock);
 }
 
 module_init(lowpan_init_module);