Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / net / core / dev.c
index c5d8882d100facd6f96a70186727f97f94a31ad7..46580b290450edfd2aa8a434ae2c14245bd336bb 100644 (file)
 #include "net-sysfs.h"
 
 #define MAX_GRO_SKBS 8
+#define MAX_NEST_DEV 8
 
 /* This should be increased if a protocol with a bigger head is added. */
 #define GRO_MAX_HEAD (MAX_HEADER + 128)
@@ -228,6 +229,122 @@ static inline void rps_unlock(struct softnet_data *sd)
 #endif
 }
 
+static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev,
+                                                      const char *name)
+{
+       struct netdev_name_node *name_node;
+
+       name_node = kmalloc(sizeof(*name_node), GFP_KERNEL);
+       if (!name_node)
+               return NULL;
+       INIT_HLIST_NODE(&name_node->hlist);
+       name_node->dev = dev;
+       name_node->name = name;
+       return name_node;
+}
+
+static struct netdev_name_node *
+netdev_name_node_head_alloc(struct net_device *dev)
+{
+       struct netdev_name_node *name_node;
+
+       name_node = netdev_name_node_alloc(dev, dev->name);
+       if (!name_node)
+               return NULL;
+       INIT_LIST_HEAD(&name_node->list);
+       return name_node;
+}
+
+static void netdev_name_node_free(struct netdev_name_node *name_node)
+{
+       kfree(name_node);
+}
+
+static void netdev_name_node_add(struct net *net,
+                                struct netdev_name_node *name_node)
+{
+       hlist_add_head_rcu(&name_node->hlist,
+                          dev_name_hash(net, name_node->name));
+}
+
+static void netdev_name_node_del(struct netdev_name_node *name_node)
+{
+       hlist_del_rcu(&name_node->hlist);
+}
+
+static struct netdev_name_node *netdev_name_node_lookup(struct net *net,
+                                                       const char *name)
+{
+       struct hlist_head *head = dev_name_hash(net, name);
+       struct netdev_name_node *name_node;
+
+       hlist_for_each_entry(name_node, head, hlist)
+               if (!strcmp(name_node->name, name))
+                       return name_node;
+       return NULL;
+}
+
+static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net,
+                                                           const char *name)
+{
+       struct hlist_head *head = dev_name_hash(net, name);
+       struct netdev_name_node *name_node;
+
+       hlist_for_each_entry_rcu(name_node, head, hlist)
+               if (!strcmp(name_node->name, name))
+                       return name_node;
+       return NULL;
+}
+
+int netdev_name_node_alt_create(struct net_device *dev, const char *name)
+{
+       struct netdev_name_node *name_node;
+       struct net *net = dev_net(dev);
+
+       name_node = netdev_name_node_lookup(net, name);
+       if (name_node)
+               return -EEXIST;
+       name_node = netdev_name_node_alloc(dev, name);
+       if (!name_node)
+               return -ENOMEM;
+       netdev_name_node_add(net, name_node);
+       /* The node that holds dev->name acts as a head of per-device list. */
+       list_add_tail(&name_node->list, &dev->name_node->list);
+
+       return 0;
+}
+EXPORT_SYMBOL(netdev_name_node_alt_create);
+
+static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
+{
+       list_del(&name_node->list);
+       netdev_name_node_del(name_node);
+       kfree(name_node->name);
+       netdev_name_node_free(name_node);
+}
+
+int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
+{
+       struct netdev_name_node *name_node;
+       struct net *net = dev_net(dev);
+
+       name_node = netdev_name_node_lookup(net, name);
+       if (!name_node)
+               return -ENOENT;
+       __netdev_name_node_alt_destroy(name_node);
+
+       return 0;
+}
+EXPORT_SYMBOL(netdev_name_node_alt_destroy);
+
+static void netdev_name_node_alt_flush(struct net_device *dev)
+{
+       struct netdev_name_node *name_node, *tmp;
+
+       list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list)
+               __netdev_name_node_alt_destroy(name_node);
+}
+
 /* Device list insertion */
 static void list_netdevice(struct net_device *dev)
 {
@@ -237,7 +354,7 @@ static void list_netdevice(struct net_device *dev)
 
        write_lock_bh(&dev_base_lock);
        list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
-       hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
+       netdev_name_node_add(net, dev->name_node);
        hlist_add_head_rcu(&dev->index_hlist,
                           dev_index_hash(net, dev->ifindex));
        write_unlock_bh(&dev_base_lock);
@@ -255,7 +372,7 @@ static void unlist_netdevice(struct net_device *dev)
        /* Unlink dev from the device chain */
        write_lock_bh(&dev_base_lock);
        list_del_rcu(&dev->dev_list);
-       hlist_del_rcu(&dev->name_hlist);
+       netdev_name_node_del(dev->name_node);
        hlist_del_rcu(&dev->index_hlist);
        write_unlock_bh(&dev_base_lock);
 
@@ -276,88 +393,6 @@ static RAW_NOTIFIER_HEAD(netdev_chain);
 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
 EXPORT_PER_CPU_SYMBOL(softnet_data);
 
-#ifdef CONFIG_LOCKDEP
-/*
- * register_netdevice() inits txq->_xmit_lock and sets lockdep class
- * according to dev->type
- */
-static const unsigned short netdev_lock_type[] = {
-        ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
-        ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
-        ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
-        ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
-        ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
-        ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
-        ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
-        ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
-        ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
-        ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
-        ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
-        ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
-        ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
-        ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
-        ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
-
-static const char *const netdev_lock_name[] = {
-       "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
-       "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
-       "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
-       "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
-       "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
-       "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
-       "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
-       "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
-       "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
-       "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
-       "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
-       "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
-       "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
-       "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
-       "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
-
-static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
-static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
-
-static inline unsigned short netdev_lock_pos(unsigned short dev_type)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
-               if (netdev_lock_type[i] == dev_type)
-                       return i;
-       /* the last key is used by default */
-       return ARRAY_SIZE(netdev_lock_type) - 1;
-}
-
-static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
-                                                unsigned short dev_type)
-{
-       int i;
-
-       i = netdev_lock_pos(dev_type);
-       lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
-                                  netdev_lock_name[i]);
-}
-
-static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
-{
-       int i;
-
-       i = netdev_lock_pos(dev->type);
-       lockdep_set_class_and_name(&dev->addr_list_lock,
-                                  &netdev_addr_lock_key[i],
-                                  netdev_lock_name[i]);
-}
-#else
-static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
-                                                unsigned short dev_type)
-{
-}
-static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
-{
-}
-#endif
-
 /*******************************************************************************
  *
  *             Protocol management and registration routines
@@ -733,14 +768,10 @@ EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
 
 struct net_device *__dev_get_by_name(struct net *net, const char *name)
 {
-       struct net_device *dev;
-       struct hlist_head *head = dev_name_hash(net, name);
-
-       hlist_for_each_entry(dev, head, name_hlist)
-               if (!strncmp(dev->name, name, IFNAMSIZ))
-                       return dev;
+       struct netdev_name_node *node_name;
 
-       return NULL;
+       node_name = netdev_name_node_lookup(net, name);
+       return node_name ? node_name->dev : NULL;
 }
 EXPORT_SYMBOL(__dev_get_by_name);
 
@@ -758,14 +789,10 @@ EXPORT_SYMBOL(__dev_get_by_name);
 
 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
 {
-       struct net_device *dev;
-       struct hlist_head *head = dev_name_hash(net, name);
+       struct netdev_name_node *node_name;
 
-       hlist_for_each_entry_rcu(dev, head, name_hlist)
-               if (!strncmp(dev->name, name, IFNAMSIZ))
-                       return dev;
-
-       return NULL;
+       node_name = netdev_name_node_lookup_rcu(net, name);
+       return node_name ? node_name->dev : NULL;
 }
 EXPORT_SYMBOL(dev_get_by_name_rcu);
 
@@ -1141,8 +1168,8 @@ int dev_alloc_name(struct net_device *dev, const char *name)
 }
 EXPORT_SYMBOL(dev_alloc_name);
 
-int dev_get_valid_name(struct net *net, struct net_device *dev,
-                      const char *name)
+static int dev_get_valid_name(struct net *net, struct net_device *dev,
+                             const char *name)
 {
        BUG_ON(!net);
 
@@ -1158,7 +1185,6 @@ int dev_get_valid_name(struct net *net, struct net_device *dev,
 
        return 0;
 }
-EXPORT_SYMBOL(dev_get_valid_name);
 
 /**
  *     dev_change_name - change name of a device
@@ -1232,13 +1258,13 @@ rollback:
        netdev_adjacent_rename_links(dev, oldname);
 
        write_lock_bh(&dev_base_lock);
-       hlist_del_rcu(&dev->name_hlist);
+       netdev_name_node_del(dev->name_node);
        write_unlock_bh(&dev_base_lock);
 
        synchronize_rcu();
 
        write_lock_bh(&dev_base_lock);
-       hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
+       netdev_name_node_add(net, dev->name_node);
        write_unlock_bh(&dev_base_lock);
 
        ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
@@ -1617,6 +1643,62 @@ static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
        return nb->notifier_call(nb, val, &info);
 }
 
+static int call_netdevice_register_notifiers(struct notifier_block *nb,
+                                            struct net_device *dev)
+{
+       int err;
+
+       err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
+       err = notifier_to_errno(err);
+       if (err)
+               return err;
+
+       if (!(dev->flags & IFF_UP))
+               return 0;
+
+       call_netdevice_notifier(nb, NETDEV_UP, dev);
+       return 0;
+}
+
+static void call_netdevice_unregister_notifiers(struct notifier_block *nb,
+                                               struct net_device *dev)
+{
+       if (dev->flags & IFF_UP) {
+               call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
+                                       dev);
+               call_netdevice_notifier(nb, NETDEV_DOWN, dev);
+       }
+       call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
+}
+
+static int call_netdevice_register_net_notifiers(struct notifier_block *nb,
+                                                struct net *net)
+{
+       struct net_device *dev;
+       int err;
+
+       for_each_netdev(net, dev) {
+               err = call_netdevice_register_notifiers(nb, dev);
+               if (err)
+                       goto rollback;
+       }
+       return 0;
+
+rollback:
+       for_each_netdev_continue_reverse(net, dev)
+               call_netdevice_unregister_notifiers(nb, dev);
+       return err;
+}
+
+static void call_netdevice_unregister_net_notifiers(struct notifier_block *nb,
+                                                   struct net *net)
+{
+       struct net_device *dev;
+
+       for_each_netdev(net, dev)
+               call_netdevice_unregister_notifiers(nb, dev);
+}
+
 static int dev_boot_phase = 1;
 
 /**
@@ -1635,8 +1717,6 @@ static int dev_boot_phase = 1;
 
 int register_netdevice_notifier(struct notifier_block *nb)
 {
-       struct net_device *dev;
-       struct net_device *last;
        struct net *net;
        int err;
 
@@ -1649,17 +1729,9 @@ int register_netdevice_notifier(struct notifier_block *nb)
        if (dev_boot_phase)
                goto unlock;
        for_each_net(net) {
-               for_each_netdev(net, dev) {
-                       err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
-                       err = notifier_to_errno(err);
-                       if (err)
-                               goto rollback;
-
-                       if (!(dev->flags & IFF_UP))
-                               continue;
-
-                       call_netdevice_notifier(nb, NETDEV_UP, dev);
-               }
+               err = call_netdevice_register_net_notifiers(nb, net);
+               if (err)
+                       goto rollback;
        }
 
 unlock:
@@ -1668,22 +1740,9 @@ unlock:
        return err;
 
 rollback:
-       last = dev;
-       for_each_net(net) {
-               for_each_netdev(net, dev) {
-                       if (dev == last)
-                               goto outroll;
-
-                       if (dev->flags & IFF_UP) {
-                               call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
-                                                       dev);
-                               call_netdevice_notifier(nb, NETDEV_DOWN, dev);
-                       }
-                       call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
-               }
-       }
+       for_each_net_continue_reverse(net)
+               call_netdevice_unregister_net_notifiers(nb, net);
 
-outroll:
        raw_notifier_chain_unregister(&netdev_chain, nb);
        goto unlock;
 }
@@ -1733,6 +1792,80 @@ unlock:
 }
 EXPORT_SYMBOL(unregister_netdevice_notifier);
 
+/**
+ * register_netdevice_notifier_net - register a per-netns network notifier block
+ * @net: network namespace
+ * @nb: notifier
+ *
+ * Register a notifier to be called when network device events occur.
+ * The notifier passed is linked into the kernel structures and must
+ * not be reused until it has been unregistered. A negative errno code
+ * is returned on a failure.
+ *
+ * When registered all registration and up events are replayed
+ * to the new notifier to allow device to have a race free
+ * view of the network device list.
+ */
+
+int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb)
+{
+       int err;
+
+       rtnl_lock();
+       err = raw_notifier_chain_register(&net->netdev_chain, nb);
+       if (err)
+               goto unlock;
+       if (dev_boot_phase)
+               goto unlock;
+
+       err = call_netdevice_register_net_notifiers(nb, net);
+       if (err)
+               goto chain_unregister;
+
+unlock:
+       rtnl_unlock();
+       return err;
+
+chain_unregister:
+       raw_notifier_chain_unregister(&netdev_chain, nb);
+       goto unlock;
+}
+EXPORT_SYMBOL(register_netdevice_notifier_net);
+
+/**
+ * unregister_netdevice_notifier_net - unregister a per-netns
+ *                                     network notifier block
+ * @net: network namespace
+ * @nb: notifier
+ *
+ * Unregister a notifier previously registered by
+ * register_netdevice_notifier(). The notifier is unlinked into the
+ * kernel structures and may then be reused. A negative errno code
+ * is returned on a failure.
+ *
+ * After unregistering unregister and down device events are synthesized
+ * for all devices on the device list to the removed notifier to remove
+ * the need for special case cleanup code.
+ */
+
+int unregister_netdevice_notifier_net(struct net *net,
+                                     struct notifier_block *nb)
+{
+       int err;
+
+       rtnl_lock();
+       err = raw_notifier_chain_unregister(&net->netdev_chain, nb);
+       if (err)
+               goto unlock;
+
+       call_netdevice_unregister_net_notifiers(nb, net);
+
+unlock:
+       rtnl_unlock();
+       return err;
+}
+EXPORT_SYMBOL(unregister_netdevice_notifier_net);
+
 /**
  *     call_netdevice_notifiers_info - call all network notifier blocks
  *     @val: value passed unmodified to notifier function
@@ -1745,7 +1878,18 @@ EXPORT_SYMBOL(unregister_netdevice_notifier);
 static int call_netdevice_notifiers_info(unsigned long val,
                                         struct netdev_notifier_info *info)
 {
+       struct net *net = dev_net(info->dev);
+       int ret;
+
        ASSERT_RTNL();
+
+       /* Run per-netns notifier block chain first, then run the global one.
+        * Hopefully, one day, the global one is going to be removed after
+        * all notifier block registrators get converted to be per-netns.
+        */
+       ret = raw_notifier_call_chain(&net->netdev_chain, val, info);
+       if (ret & NOTIFY_STOP_MASK)
+               return ret;
        return raw_notifier_call_chain(&netdev_chain, val, info);
 }
 
@@ -2771,7 +2915,7 @@ static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
 void netif_schedule_queue(struct netdev_queue *txq)
 {
        rcu_read_lock();
-       if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
+       if (!netif_xmit_stopped(txq)) {
                struct Qdisc *q = rcu_dereference(txq->qdisc);
 
                __netif_schedule(q);
@@ -2939,12 +3083,9 @@ int skb_checksum_help(struct sk_buff *skb)
        offset += skb->csum_offset;
        BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
 
-       if (skb_cloned(skb) &&
-           !skb_clone_writable(skb, offset + sizeof(__sum16))) {
-               ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
-               if (ret)
-                       goto out;
-       }
+       ret = skb_ensure_writable(skb, offset + sizeof(__sum16));
+       if (ret)
+               goto out;
 
        *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
 out_set_summed:
@@ -2979,12 +3120,11 @@ int skb_crc32c_csum_help(struct sk_buff *skb)
                ret = -EINVAL;
                goto out;
        }
-       if (skb_cloned(skb) &&
-           !skb_clone_writable(skb, offset + sizeof(__le32))) {
-               ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
-               if (ret)
-                       goto out;
-       }
+
+       ret = skb_ensure_writable(skb, offset + sizeof(__le32));
+       if (ret)
+               goto out;
+
        crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
                                                  skb->len - start, ~(__u32)0,
                                                  crc32c_csum_stub));
@@ -3467,7 +3607,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
        qdisc_calculate_pkt_len(skb, q);
 
        if (q->flags & TCQ_F_NOLOCK) {
-               if ((q->flags & TCQ_F_CAN_BYPASS) && q->empty &&
+               if ((q->flags & TCQ_F_CAN_BYPASS) && READ_ONCE(q->empty) &&
                    qdisc_run_begin(q)) {
                        if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED,
                                              &q->state))) {
@@ -5446,7 +5586,7 @@ static struct list_head *gro_list_prepare(struct napi_struct *napi,
                diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
                diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb);
                if (skb_vlan_tag_present(p))
-                       diffs |= p->vlan_tci ^ skb->vlan_tci;
+                       diffs |= skb_vlan_tag_get(p) ^ skb_vlan_tag_get(skb);
                diffs |= skb_metadata_dst_cmp(p, skb);
                diffs |= skb_metadata_differs(p, skb);
                if (maclen == ETH_HLEN)
@@ -5471,8 +5611,7 @@ static void skb_gro_reset_offset(struct sk_buff *skb)
        NAPI_GRO_CB(skb)->frag0 = NULL;
        NAPI_GRO_CB(skb)->frag0_len = 0;
 
-       if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
-           pinfo->nr_frags &&
+       if (!skb_headlen(skb) && pinfo->nr_frags &&
            !PageHighMem(skb_frag_page(frag0))) {
                NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
                NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
@@ -5663,6 +5802,26 @@ struct packet_offload *gro_find_complete_by_type(__be16 type)
 }
 EXPORT_SYMBOL(gro_find_complete_by_type);
 
+/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
+static void gro_normal_list(struct napi_struct *napi)
+{
+       if (!napi->rx_count)
+               return;
+       netif_receive_skb_list_internal(&napi->rx_list);
+       INIT_LIST_HEAD(&napi->rx_list);
+       napi->rx_count = 0;
+}
+
+/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
+ * pass the whole batch up to the stack.
+ */
+static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb)
+{
+       list_add_tail(&skb->list, &napi->rx_list);
+       if (++napi->rx_count >= gro_normal_batch)
+               gro_normal_list(napi);
+}
+
 static void napi_skb_free_stolen_head(struct sk_buff *skb)
 {
        skb_dst_drop(skb);
@@ -5670,12 +5829,13 @@ static void napi_skb_free_stolen_head(struct sk_buff *skb)
        kmem_cache_free(skbuff_head_cache, skb);
 }
 
-static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
+static gro_result_t napi_skb_finish(struct napi_struct *napi,
+                                   struct sk_buff *skb,
+                                   gro_result_t ret)
 {
        switch (ret) {
        case GRO_NORMAL:
-               if (netif_receive_skb_internal(skb))
-                       ret = GRO_DROP;
+               gro_normal_one(napi, skb);
                break;
 
        case GRO_DROP:
@@ -5707,7 +5867,7 @@ gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
 
        skb_gro_reset_offset(skb);
 
-       ret = napi_skb_finish(dev_gro_receive(napi, skb), skb);
+       ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
        trace_napi_gro_receive_exit(ret);
 
        return ret;
@@ -5753,26 +5913,6 @@ struct sk_buff *napi_get_frags(struct napi_struct *napi)
 }
 EXPORT_SYMBOL(napi_get_frags);
 
-/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
-static void gro_normal_list(struct napi_struct *napi)
-{
-       if (!napi->rx_count)
-               return;
-       netif_receive_skb_list_internal(&napi->rx_list);
-       INIT_LIST_HEAD(&napi->rx_list);
-       napi->rx_count = 0;
-}
-
-/* Queue one GRO_NORMAL SKB up for list processing.  If batch size exceeded,
- * pass the whole batch up to the stack.
- */
-static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb)
-{
-       list_add_tail(&skb->list, &napi->rx_list);
-       if (++napi->rx_count >= gro_normal_batch)
-               gro_normal_list(napi);
-}
-
 static gro_result_t napi_frags_finish(struct napi_struct *napi,
                                      struct sk_buff *skb,
                                      gro_result_t ret)
@@ -6489,6 +6629,9 @@ struct netdev_adjacent {
        /* upper master flag, there can only be one master device per list */
        bool master;
 
+       /* lookup ignore flag */
+       bool ignore;
+
        /* counter for the number of times this device was added to us */
        u16 ref_nr;
 
@@ -6511,7 +6654,7 @@ static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
        return NULL;
 }
 
-static int __netdev_has_upper_dev(struct net_device *upper_dev, void *data)
+static int ____netdev_has_upper_dev(struct net_device *upper_dev, void *data)
 {
        struct net_device *dev = data;
 
@@ -6532,7 +6675,7 @@ bool netdev_has_upper_dev(struct net_device *dev,
 {
        ASSERT_RTNL();
 
-       return netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
+       return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
                                             upper_dev);
 }
 EXPORT_SYMBOL(netdev_has_upper_dev);
@@ -6550,7 +6693,7 @@ EXPORT_SYMBOL(netdev_has_upper_dev);
 bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
                                  struct net_device *upper_dev)
 {
-       return !!netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
+       return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
                                               upper_dev);
 }
 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
@@ -6594,6 +6737,22 @@ struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
 }
 EXPORT_SYMBOL(netdev_master_upper_dev_get);
 
+static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev)
+{
+       struct netdev_adjacent *upper;
+
+       ASSERT_RTNL();
+
+       if (list_empty(&dev->adj_list.upper))
+               return NULL;
+
+       upper = list_first_entry(&dev->adj_list.upper,
+                                struct netdev_adjacent, list);
+       if (likely(upper->master) && !upper->ignore)
+               return upper->dev;
+       return NULL;
+}
+
 /**
  * netdev_has_any_lower_dev - Check if device is linked to some device
  * @dev: device
@@ -6644,6 +6803,23 @@ struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
 }
 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
 
+static struct net_device *__netdev_next_upper_dev(struct net_device *dev,
+                                                 struct list_head **iter,
+                                                 bool *ignore)
+{
+       struct netdev_adjacent *upper;
+
+       upper = list_entry((*iter)->next, struct netdev_adjacent, list);
+
+       if (&upper->list == &dev->adj_list.upper)
+               return NULL;
+
+       *iter = &upper->list;
+       *ignore = upper->ignore;
+
+       return upper->dev;
+}
+
 static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
                                                    struct list_head **iter)
 {
@@ -6661,34 +6837,111 @@ static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
        return upper->dev;
 }
 
+static int __netdev_walk_all_upper_dev(struct net_device *dev,
+                                      int (*fn)(struct net_device *dev,
+                                                void *data),
+                                      void *data)
+{
+       struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
+       struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
+       int ret, cur = 0;
+       bool ignore;
+
+       now = dev;
+       iter = &dev->adj_list.upper;
+
+       while (1) {
+               if (now != dev) {
+                       ret = fn(now, data);
+                       if (ret)
+                               return ret;
+               }
+
+               next = NULL;
+               while (1) {
+                       udev = __netdev_next_upper_dev(now, &iter, &ignore);
+                       if (!udev)
+                               break;
+                       if (ignore)
+                               continue;
+
+                       next = udev;
+                       niter = &udev->adj_list.upper;
+                       dev_stack[cur] = now;
+                       iter_stack[cur++] = iter;
+                       break;
+               }
+
+               if (!next) {
+                       if (!cur)
+                               return 0;
+                       next = dev_stack[--cur];
+                       niter = iter_stack[cur];
+               }
+
+               now = next;
+               iter = niter;
+       }
+
+       return 0;
+}
+
 int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
                                  int (*fn)(struct net_device *dev,
                                            void *data),
                                  void *data)
 {
-       struct net_device *udev;
-       struct list_head *iter;
-       int ret;
+       struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
+       struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
+       int ret, cur = 0;
 
-       for (iter = &dev->adj_list.upper,
-            udev = netdev_next_upper_dev_rcu(dev, &iter);
-            udev;
-            udev = netdev_next_upper_dev_rcu(dev, &iter)) {
-               /* first is the upper device itself */
-               ret = fn(udev, data);
-               if (ret)
-                       return ret;
+       now = dev;
+       iter = &dev->adj_list.upper;
 
-               /* then look at all of its upper devices */
-               ret = netdev_walk_all_upper_dev_rcu(udev, fn, data);
-               if (ret)
-                       return ret;
+       while (1) {
+               if (now != dev) {
+                       ret = fn(now, data);
+                       if (ret)
+                               return ret;
+               }
+
+               next = NULL;
+               while (1) {
+                       udev = netdev_next_upper_dev_rcu(now, &iter);
+                       if (!udev)
+                               break;
+
+                       next = udev;
+                       niter = &udev->adj_list.upper;
+                       dev_stack[cur] = now;
+                       iter_stack[cur++] = iter;
+                       break;
+               }
+
+               if (!next) {
+                       if (!cur)
+                               return 0;
+                       next = dev_stack[--cur];
+                       niter = iter_stack[cur];
+               }
+
+               now = next;
+               iter = niter;
        }
 
        return 0;
 }
 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
 
+static bool __netdev_has_upper_dev(struct net_device *dev,
+                                  struct net_device *upper_dev)
+{
+       ASSERT_RTNL();
+
+       return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev,
+                                          upper_dev);
+}
+
 /**
  * netdev_lower_get_next_private - Get the next ->private from the
  *                                lower neighbour list
@@ -6785,34 +7038,119 @@ static struct net_device *netdev_next_lower_dev(struct net_device *dev,
        return lower->dev;
 }
 
+static struct net_device *__netdev_next_lower_dev(struct net_device *dev,
+                                                 struct list_head **iter,
+                                                 bool *ignore)
+{
+       struct netdev_adjacent *lower;
+
+       lower = list_entry((*iter)->next, struct netdev_adjacent, list);
+
+       if (&lower->list == &dev->adj_list.lower)
+               return NULL;
+
+       *iter = &lower->list;
+       *ignore = lower->ignore;
+
+       return lower->dev;
+}
+
 int netdev_walk_all_lower_dev(struct net_device *dev,
                              int (*fn)(struct net_device *dev,
                                        void *data),
                              void *data)
 {
-       struct net_device *ldev;
-       struct list_head *iter;
-       int ret;
+       struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
+       struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
+       int ret, cur = 0;
 
-       for (iter = &dev->adj_list.lower,
-            ldev = netdev_next_lower_dev(dev, &iter);
-            ldev;
-            ldev = netdev_next_lower_dev(dev, &iter)) {
-               /* first is the lower device itself */
-               ret = fn(ldev, data);
-               if (ret)
-                       return ret;
+       now = dev;
+       iter = &dev->adj_list.lower;
 
-               /* then look at all of its lower devices */
-               ret = netdev_walk_all_lower_dev(ldev, fn, data);
-               if (ret)
-                       return ret;
+       while (1) {
+               if (now != dev) {
+                       ret = fn(now, data);
+                       if (ret)
+                               return ret;
+               }
+
+               next = NULL;
+               while (1) {
+                       ldev = netdev_next_lower_dev(now, &iter);
+                       if (!ldev)
+                               break;
+
+                       next = ldev;
+                       niter = &ldev->adj_list.lower;
+                       dev_stack[cur] = now;
+                       iter_stack[cur++] = iter;
+                       break;
+               }
+
+               if (!next) {
+                       if (!cur)
+                               return 0;
+                       next = dev_stack[--cur];
+                       niter = iter_stack[cur];
+               }
+
+               now = next;
+               iter = niter;
        }
 
        return 0;
 }
 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
 
+static int __netdev_walk_all_lower_dev(struct net_device *dev,
+                                      int (*fn)(struct net_device *dev,
+                                                void *data),
+                                      void *data)
+{
+       struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
+       struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
+       int ret, cur = 0;
+       bool ignore;
+
+       now = dev;
+       iter = &dev->adj_list.lower;
+
+       while (1) {
+               if (now != dev) {
+                       ret = fn(now, data);
+                       if (ret)
+                               return ret;
+               }
+
+               next = NULL;
+               while (1) {
+                       ldev = __netdev_next_lower_dev(now, &iter, &ignore);
+                       if (!ldev)
+                               break;
+                       if (ignore)
+                               continue;
+
+                       next = ldev;
+                       niter = &ldev->adj_list.lower;
+                       dev_stack[cur] = now;
+                       iter_stack[cur++] = iter;
+                       break;
+               }
+
+               if (!next) {
+                       if (!cur)
+                               return 0;
+                       next = dev_stack[--cur];
+                       niter = iter_stack[cur];
+               }
+
+               now = next;
+               iter = niter;
+       }
+
+       return 0;
+}
+
 static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
                                                    struct list_head **iter)
 {
@@ -6827,28 +7165,99 @@ static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
        return lower->dev;
 }
 
-int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
-                                 int (*fn)(struct net_device *dev,
-                                           void *data),
-                                 void *data)
+static u8 __netdev_upper_depth(struct net_device *dev)
+{
+       struct net_device *udev;
+       struct list_head *iter;
+       u8 max_depth = 0;
+       bool ignore;
+
+       for (iter = &dev->adj_list.upper,
+            udev = __netdev_next_upper_dev(dev, &iter, &ignore);
+            udev;
+            udev = __netdev_next_upper_dev(dev, &iter, &ignore)) {
+               if (ignore)
+                       continue;
+               if (max_depth < udev->upper_level)
+                       max_depth = udev->upper_level;
+       }
+
+       return max_depth;
+}
+
+static u8 __netdev_lower_depth(struct net_device *dev)
 {
        struct net_device *ldev;
        struct list_head *iter;
-       int ret;
+       u8 max_depth = 0;
+       bool ignore;
 
        for (iter = &dev->adj_list.lower,
-            ldev = netdev_next_lower_dev_rcu(dev, &iter);
+            ldev = __netdev_next_lower_dev(dev, &iter, &ignore);
             ldev;
-            ldev = netdev_next_lower_dev_rcu(dev, &iter)) {
-               /* first is the lower device itself */
-               ret = fn(ldev, data);
-               if (ret)
-                       return ret;
+            ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) {
+               if (ignore)
+                       continue;
+               if (max_depth < ldev->lower_level)
+                       max_depth = ldev->lower_level;
+       }
 
-               /* then look at all of its lower devices */
-               ret = netdev_walk_all_lower_dev_rcu(ldev, fn, data);
-               if (ret)
-                       return ret;
+       return max_depth;
+}
+
+static int __netdev_update_upper_level(struct net_device *dev, void *data)
+{
+       dev->upper_level = __netdev_upper_depth(dev) + 1;
+       return 0;
+}
+
+static int __netdev_update_lower_level(struct net_device *dev, void *data)
+{
+       dev->lower_level = __netdev_lower_depth(dev) + 1;
+       return 0;
+}
+
+int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
+                                 int (*fn)(struct net_device *dev,
+                                           void *data),
+                                 void *data)
+{
+       struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
+       struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
+       int ret, cur = 0;
+
+       now = dev;
+       iter = &dev->adj_list.lower;
+
+       while (1) {
+               if (now != dev) {
+                       ret = fn(now, data);
+                       if (ret)
+                               return ret;
+               }
+
+               next = NULL;
+               while (1) {
+                       ldev = netdev_next_lower_dev_rcu(now, &iter);
+                       if (!ldev)
+                               break;
+
+                       next = ldev;
+                       niter = &ldev->adj_list.lower;
+                       dev_stack[cur] = now;
+                       iter_stack[cur++] = iter;
+                       break;
+               }
+
+               if (!next) {
+                       if (!cur)
+                               return 0;
+                       next = dev_stack[--cur];
+                       niter = iter_stack[cur];
+               }
+
+               now = next;
+               iter = niter;
        }
 
        return 0;
@@ -6952,6 +7361,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
        adj->master = master;
        adj->ref_nr = 1;
        adj->private = private;
+       adj->ignore = false;
        dev_hold(adj_dev);
 
        pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
@@ -7102,14 +7512,17 @@ static int __netdev_upper_dev_link(struct net_device *dev,
                return -EBUSY;
 
        /* To prevent loops, check if dev is not upper device to upper_dev. */
-       if (netdev_has_upper_dev(upper_dev, dev))
+       if (__netdev_has_upper_dev(upper_dev, dev))
                return -EBUSY;
 
+       if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV)
+               return -EMLINK;
+
        if (!master) {
-               if (netdev_has_upper_dev(dev, upper_dev))
+               if (__netdev_has_upper_dev(dev, upper_dev))
                        return -EEXIST;
        } else {
-               master_dev = netdev_master_upper_dev_get(dev);
+               master_dev = __netdev_master_upper_dev_get(dev);
                if (master_dev)
                        return master_dev == upper_dev ? -EEXIST : -EBUSY;
        }
@@ -7131,6 +7544,13 @@ static int __netdev_upper_dev_link(struct net_device *dev,
        if (ret)
                goto rollback;
 
+       __netdev_update_upper_level(dev, NULL);
+       __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
+
+       __netdev_update_lower_level(upper_dev, NULL);
+       __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
+                                   NULL);
+
        return 0;
 
 rollback:
@@ -7213,9 +7633,96 @@ void netdev_upper_dev_unlink(struct net_device *dev,
 
        call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
                                      &changeupper_info.info);
+
+       __netdev_update_upper_level(dev, NULL);
+       __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
+
+       __netdev_update_lower_level(upper_dev, NULL);
+       __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
+                                   NULL);
 }
 EXPORT_SYMBOL(netdev_upper_dev_unlink);
 
+static void __netdev_adjacent_dev_set(struct net_device *upper_dev,
+                                     struct net_device *lower_dev,
+                                     bool val)
+{
+       struct netdev_adjacent *adj;
+
+       adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower);
+       if (adj)
+               adj->ignore = val;
+
+       adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper);
+       if (adj)
+               adj->ignore = val;
+}
+
+static void netdev_adjacent_dev_disable(struct net_device *upper_dev,
+                                       struct net_device *lower_dev)
+{
+       __netdev_adjacent_dev_set(upper_dev, lower_dev, true);
+}
+
+static void netdev_adjacent_dev_enable(struct net_device *upper_dev,
+                                      struct net_device *lower_dev)
+{
+       __netdev_adjacent_dev_set(upper_dev, lower_dev, false);
+}
+
+int netdev_adjacent_change_prepare(struct net_device *old_dev,
+                                  struct net_device *new_dev,
+                                  struct net_device *dev,
+                                  struct netlink_ext_ack *extack)
+{
+       int err;
+
+       if (!new_dev)
+               return 0;
+
+       if (old_dev && new_dev != old_dev)
+               netdev_adjacent_dev_disable(dev, old_dev);
+
+       err = netdev_upper_dev_link(new_dev, dev, extack);
+       if (err) {
+               if (old_dev && new_dev != old_dev)
+                       netdev_adjacent_dev_enable(dev, old_dev);
+               return err;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(netdev_adjacent_change_prepare);
+
+void netdev_adjacent_change_commit(struct net_device *old_dev,
+                                  struct net_device *new_dev,
+                                  struct net_device *dev)
+{
+       if (!new_dev || !old_dev)
+               return;
+
+       if (new_dev == old_dev)
+               return;
+
+       netdev_adjacent_dev_enable(dev, old_dev);
+       netdev_upper_dev_unlink(old_dev, dev);
+}
+EXPORT_SYMBOL(netdev_adjacent_change_commit);
+
+void netdev_adjacent_change_abort(struct net_device *old_dev,
+                                 struct net_device *new_dev,
+                                 struct net_device *dev)
+{
+       if (!new_dev)
+               return;
+
+       if (old_dev && new_dev != old_dev)
+               netdev_adjacent_dev_enable(dev, old_dev);
+
+       netdev_upper_dev_unlink(new_dev, dev);
+}
+EXPORT_SYMBOL(netdev_adjacent_change_abort);
+
 /**
  * netdev_bonding_info_change - Dispatch event about slave change
  * @dev: device
@@ -7329,25 +7836,6 @@ void *netdev_lower_dev_get_private(struct net_device *dev,
 EXPORT_SYMBOL(netdev_lower_dev_get_private);
 
 
-int dev_get_nest_level(struct net_device *dev)
-{
-       struct net_device *lower = NULL;
-       struct list_head *iter;
-       int max_nest = -1;
-       int nest;
-
-       ASSERT_RTNL();
-
-       netdev_for_each_lower_dev(dev, lower, iter) {
-               nest = dev_get_nest_level(lower);
-               if (max_nest < nest)
-                       max_nest = nest;
-       }
-
-       return max_nest + 1;
-}
-EXPORT_SYMBOL(dev_get_nest_level);
-
 /**
  * netdev_lower_change - Dispatch event about lower device state change
  * @lower_dev: device
@@ -8154,7 +8642,8 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
                        return -EINVAL;
                }
 
-               if (prog->aux->id == prog_id) {
+               /* prog->aux->id may be 0 for orphaned device-bound progs */
+               if (prog->aux->id && prog->aux->id == prog_id) {
                        bpf_prog_put(prog);
                        return 0;
                }
@@ -8264,6 +8753,9 @@ static void rollback_registered_many(struct list_head *head)
                dev_uc_flush(dev);
                dev_mc_flush(dev);
 
+               netdev_name_node_alt_flush(dev);
+               netdev_name_node_free(dev->name_node);
+
                if (dev->netdev_ops->ndo_uninit)
                        dev->netdev_ops->ndo_uninit(dev);
 
@@ -8619,7 +9111,7 @@ static void netdev_init_one_queue(struct net_device *dev,
 {
        /* Initialize queue lock */
        spin_lock_init(&queue->_xmit_lock);
-       netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
+       lockdep_set_class(&queue->_xmit_lock, &dev->qdisc_xmit_lock_key);
        queue->xmit_lock_owner = -1;
        netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
        queue->dev = dev;
@@ -8666,6 +9158,43 @@ void netif_tx_stop_all_queues(struct net_device *dev)
 }
 EXPORT_SYMBOL(netif_tx_stop_all_queues);
 
+static void netdev_register_lockdep_key(struct net_device *dev)
+{
+       lockdep_register_key(&dev->qdisc_tx_busylock_key);
+       lockdep_register_key(&dev->qdisc_running_key);
+       lockdep_register_key(&dev->qdisc_xmit_lock_key);
+       lockdep_register_key(&dev->addr_list_lock_key);
+}
+
+static void netdev_unregister_lockdep_key(struct net_device *dev)
+{
+       lockdep_unregister_key(&dev->qdisc_tx_busylock_key);
+       lockdep_unregister_key(&dev->qdisc_running_key);
+       lockdep_unregister_key(&dev->qdisc_xmit_lock_key);
+       lockdep_unregister_key(&dev->addr_list_lock_key);
+}
+
+void netdev_update_lockdep_key(struct net_device *dev)
+{
+       struct netdev_queue *queue;
+       int i;
+
+       lockdep_unregister_key(&dev->qdisc_xmit_lock_key);
+       lockdep_unregister_key(&dev->addr_list_lock_key);
+
+       lockdep_register_key(&dev->qdisc_xmit_lock_key);
+       lockdep_register_key(&dev->addr_list_lock_key);
+
+       lockdep_set_class(&dev->addr_list_lock, &dev->addr_list_lock_key);
+       for (i = 0; i < dev->num_tx_queues; i++) {
+               queue = netdev_get_tx_queue(dev, i);
+
+               lockdep_set_class(&queue->_xmit_lock,
+                                 &dev->qdisc_xmit_lock_key);
+       }
+}
+EXPORT_SYMBOL(netdev_update_lockdep_key);
+
 /**
  *     register_netdevice      - register a network device
  *     @dev: device to register
@@ -8700,12 +9229,17 @@ int register_netdevice(struct net_device *dev)
        BUG_ON(!net);
 
        spin_lock_init(&dev->addr_list_lock);
-       netdev_set_addr_lockdep_class(dev);
+       lockdep_set_class(&dev->addr_list_lock, &dev->addr_list_lock_key);
 
        ret = dev_get_valid_name(net, dev, dev->name);
        if (ret < 0)
                goto out;
 
+       ret = -ENOMEM;
+       dev->name_node = netdev_name_node_head_alloc(dev);
+       if (!dev->name_node)
+               goto out;
+
        /* Init, if this function is available */
        if (dev->netdev_ops->ndo_init) {
                ret = dev->netdev_ops->ndo_init(dev);
@@ -8827,6 +9361,8 @@ out:
        return ret;
 
 err_uninit:
+       if (dev->name_node)
+               netdev_name_node_free(dev->name_node);
        if (dev->netdev_ops->ndo_uninit)
                dev->netdev_ops->ndo_uninit(dev);
        if (dev->priv_destructor)
@@ -9210,8 +9746,12 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
 
        dev_net_set(dev, &init_net);
 
+       netdev_register_lockdep_key(dev);
+
        dev->gso_max_size = GSO_MAX_SIZE;
        dev->gso_max_segs = GSO_MAX_SEGS;
+       dev->upper_level = 1;
+       dev->lower_level = 1;
 
        INIT_LIST_HEAD(&dev->napi_list);
        INIT_LIST_HEAD(&dev->unreg_list);
@@ -9292,6 +9832,8 @@ void free_netdev(struct net_device *dev)
        free_percpu(dev->pcpu_refcnt);
        dev->pcpu_refcnt = NULL;
 
+       netdev_unregister_lockdep_key(dev);
+
        /*  Compatibility with error handling in drivers */
        if (dev->reg_state == NETREG_UNINITIALIZED) {
                netdev_freemem(dev);
@@ -9460,7 +10002,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
        call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
        rcu_barrier();
 
-       new_nsid = peernet2id_alloc(dev_net(dev), net);
+       new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL);
        /* If there is an ifindex conflict assign a new one */
        if (__dev_get_by_index(net, dev->ifindex))
                new_ifindex = dev_new_index(net);
@@ -9635,6 +10177,8 @@ static int __net_init netdev_init(struct net *net)
        if (net->dev_index_head == NULL)
                goto err_idx;
 
+       RAW_INIT_NOTIFIER_HEAD(&net->netdev_chain);
+
        return 0;
 
 err_idx: