Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[linux-2.6-block.git] / net / core / dev.c
index 9abc503b19b7dad367b83a8179468ee3e1ae72d5..5367bfba094781bc74f7e67853e96d4105948534 100644 (file)
@@ -1661,6 +1661,29 @@ bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb)
 }
 EXPORT_SYMBOL_GPL(is_skb_forwardable);
 
+int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
+{
+       if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
+               if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
+                       atomic_long_inc(&dev->rx_dropped);
+                       kfree_skb(skb);
+                       return NET_RX_DROP;
+               }
+       }
+
+       if (unlikely(!is_skb_forwardable(dev, skb))) {
+               atomic_long_inc(&dev->rx_dropped);
+               kfree_skb(skb);
+               return NET_RX_DROP;
+       }
+
+       skb_scrub_packet(skb, true);
+       skb->protocol = eth_type_trans(skb, dev);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(__dev_forward_skb);
+
 /**
  * dev_forward_skb - loopback an skb to another netif
  *
@@ -1681,24 +1704,7 @@ EXPORT_SYMBOL_GPL(is_skb_forwardable);
  */
 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
 {
-       if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
-               if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
-                       atomic_long_inc(&dev->rx_dropped);
-                       kfree_skb(skb);
-                       return NET_RX_DROP;
-               }
-       }
-
-       if (unlikely(!is_skb_forwardable(dev, skb))) {
-               atomic_long_inc(&dev->rx_dropped);
-               kfree_skb(skb);
-               return NET_RX_DROP;
-       }
-
-       skb_scrub_packet(skb, true);
-       skb->protocol = eth_type_trans(skb, dev);
-
-       return netif_rx_internal(skb);
+       return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
 }
 EXPORT_SYMBOL_GPL(dev_forward_skb);
 
@@ -2283,8 +2289,8 @@ EXPORT_SYMBOL(skb_checksum_help);
 
 __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
 {
+       unsigned int vlan_depth = skb->mac_len;
        __be16 type = skb->protocol;
-       int vlan_depth = skb->mac_len;
 
        /* Tunnel gso handlers can set protocol to ethernet. */
        if (type == htons(ETH_P_TEB)) {
@@ -2297,15 +2303,30 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
                type = eth->h_proto;
        }
 
-       while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
-               struct vlan_hdr *vh;
+       /* if skb->protocol is 802.1Q/AD then the header should already be
+        * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
+        * ETH_HLEN otherwise
+        */
+       if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
+               if (vlan_depth) {
+                       if (unlikely(WARN_ON(vlan_depth < VLAN_HLEN)))
+                               return 0;
+                       vlan_depth -= VLAN_HLEN;
+               } else {
+                       vlan_depth = ETH_HLEN;
+               }
+               do {
+                       struct vlan_hdr *vh;
 
-               if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
-                       return 0;
+                       if (unlikely(!pskb_may_pull(skb,
+                                                   vlan_depth + VLAN_HLEN)))
+                               return 0;
 
-               vh = (struct vlan_hdr *)(skb->data + vlan_depth);
-               type = vh->h_vlan_encapsulated_proto;
-               vlan_depth += VLAN_HLEN;
+                       vh = (struct vlan_hdr *)(skb->data + vlan_depth);
+                       type = vh->h_vlan_encapsulated_proto;
+                       vlan_depth += VLAN_HLEN;
+               } while (type == htons(ETH_P_8021Q) ||
+                        type == htons(ETH_P_8021AD));
        }
 
        *depth = vlan_depth;
@@ -5674,10 +5695,6 @@ static void rollback_registered_many(struct list_head *head)
                */
                call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
 
-               if (!dev->rtnl_link_ops ||
-                   dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
-                       rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
-
                /*
                 *      Flush the unicast and multicast chains
                 */
@@ -5687,6 +5704,10 @@ static void rollback_registered_many(struct list_head *head)
                if (dev->netdev_ops->ndo_uninit)
                        dev->netdev_ops->ndo_uninit(dev);
 
+               if (!dev->rtnl_link_ops ||
+                   dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
+                       rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
+
                /* Notifier chain MUST detach us all upper devices. */
                WARN_ON(netdev_has_any_upper_dev(dev));
 
@@ -6497,11 +6518,6 @@ free_all:
 
 free_pcpu:
        free_percpu(dev->pcpu_refcnt);
-       netif_free_tx_queues(dev);
-#ifdef CONFIG_SYSFS
-       kfree(dev->_rx);
-#endif
-
 free_dev:
        netdev_freemem(dev);
        return NULL;