1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * NET3 Protocol independent device support routines.
5 * Derived from the non IP parts of dev.c 1.0.19
7 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
8 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Florian la Roche <rzsfl@rz.uni-sb.de>
12 * Alan Cox <gw4pts@gw4pts.ampr.org>
13 * David Hinds <dahinds@users.sourceforge.net>
14 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
15 * Adam Sulmicki <adam@cfar.umd.edu>
16 * Pekka Riikonen <priikone@poesidon.pspt.fi>
19 * D.J. Barrow : Fixed bug where dev->refcnt gets set
20 * to 2 if register_netdev gets called
21 * before net_dev_init & also removed a
22 * few lines of code in the process.
23 * Alan Cox : device private ioctl copies fields back.
24 * Alan Cox : Transmit queue code does relevant
25 * stunts to keep the queue safe.
26 * Alan Cox : Fixed double lock.
27 * Alan Cox : Fixed promisc NULL pointer trap
28 * ???????? : Support the full private ioctl range
29 * Alan Cox : Moved ioctl permission check into
31 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
32 * Alan Cox : 100 backlog just doesn't cut it when
33 * you start doing multicast video 8)
34 * Alan Cox : Rewrote net_bh and list manager.
35 * Alan Cox : Fix ETH_P_ALL echoback lengths.
36 * Alan Cox : Took out transmit every packet pass
37 * Saved a few bytes in the ioctl handler
38 * Alan Cox : Network driver sets packet type before
39 * calling netif_rx. Saves a function
41 * Alan Cox : Hashed net_bh()
42 * Richard Kooijman: Timestamp fixes.
43 * Alan Cox : Wrong field in SIOCGIFDSTADDR
44 * Alan Cox : Device lock protection.
45 * Alan Cox : Fixed nasty side effect of device close
47 * Rudi Cilibrasi : Pass the right thing to
49 * Dave Miller : 32bit quantity for the device lock to
50 * make it work out on a Sparc.
51 * Bjorn Ekwall : Added KERNELD hack.
52 * Alan Cox : Cleaned up the backlog initialise.
53 * Craig Metz : SIOCGIFCONF fix if space for under
55 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
56 * is no device open function.
57 * Andi Kleen : Fix error reporting for SIOCGIFCONF
58 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
59 * Cyrus Durgin : Cleaned for KMOD
60 * Adam Sulmicki : Bug Fix : Network Device Unload
61 * A network device unload needs to purge
63 * Paul Rusty Russell : SIOCSIFNAME
64 * Pekka Riikonen : Netdev boot-time settings code
65 * Andrew Morton : Make unregister_netdevice wait
66 * indefinitely on dev->refcnt
67 * J Hadi Salim : - Backlog queue sampling
68 * - netif_rx() feedback
71 #include <linux/uaccess.h>
72 #include <linux/bitmap.h>
73 #include <linux/capability.h>
74 #include <linux/cpu.h>
75 #include <linux/types.h>
76 #include <linux/kernel.h>
77 #include <linux/hash.h>
78 #include <linux/slab.h>
79 #include <linux/sched.h>
80 #include <linux/sched/mm.h>
81 #include <linux/mutex.h>
82 #include <linux/rwsem.h>
83 #include <linux/string.h>
85 #include <linux/socket.h>
86 #include <linux/sockios.h>
87 #include <linux/errno.h>
88 #include <linux/interrupt.h>
89 #include <linux/if_ether.h>
90 #include <linux/netdevice.h>
91 #include <linux/etherdevice.h>
92 #include <linux/ethtool.h>
93 #include <linux/skbuff.h>
94 #include <linux/kthread.h>
95 #include <linux/bpf.h>
96 #include <linux/bpf_trace.h>
97 #include <net/net_namespace.h>
99 #include <net/busy_poll.h>
100 #include <linux/rtnetlink.h>
101 #include <linux/stat.h>
104 #include <net/dst_metadata.h>
106 #include <net/pkt_sched.h>
107 #include <net/pkt_cls.h>
108 #include <net/checksum.h>
109 #include <net/xfrm.h>
111 #include <linux/highmem.h>
112 #include <linux/init.h>
113 #include <linux/module.h>
114 #include <linux/netpoll.h>
115 #include <linux/rcupdate.h>
116 #include <linux/delay.h>
117 #include <net/iw_handler.h>
118 #include <asm/current.h>
119 #include <linux/audit.h>
120 #include <linux/dmaengine.h>
121 #include <linux/err.h>
122 #include <linux/ctype.h>
123 #include <linux/if_arp.h>
124 #include <linux/if_vlan.h>
125 #include <linux/ip.h>
127 #include <net/mpls.h>
128 #include <linux/ipv6.h>
129 #include <linux/in.h>
130 #include <linux/jhash.h>
131 #include <linux/random.h>
132 #include <trace/events/napi.h>
133 #include <trace/events/net.h>
134 #include <trace/events/skb.h>
135 #include <trace/events/qdisc.h>
136 #include <trace/events/xdp.h>
137 #include <linux/inetdevice.h>
138 #include <linux/cpu_rmap.h>
139 #include <linux/static_key.h>
140 #include <linux/hashtable.h>
141 #include <linux/vmalloc.h>
142 #include <linux/if_macvlan.h>
143 #include <linux/errqueue.h>
144 #include <linux/hrtimer.h>
145 #include <linux/netfilter_netdev.h>
146 #include <linux/crash_dump.h>
147 #include <linux/sctp.h>
148 #include <net/udp_tunnel.h>
149 #include <linux/net_namespace.h>
150 #include <linux/indirect_call_wrapper.h>
151 #include <net/devlink.h>
152 #include <linux/pm_runtime.h>
153 #include <linux/prandom.h>
154 #include <linux/once_lite.h>
155 #include <net/netdev_rx_queue.h>
156 #include <net/page_pool/types.h>
157 #include <net/page_pool/helpers.h>
161 #include "net-sysfs.h"
163 static DEFINE_SPINLOCK(ptype_lock);
164 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
166 static int netif_rx_internal(struct sk_buff *skb);
167 static int call_netdevice_notifiers_extack(unsigned long val,
168 struct net_device *dev,
169 struct netlink_ext_ack *extack);
171 static DEFINE_MUTEX(ifalias_mutex);
173 /* protects napi_hash addition/deletion and napi_gen_id */
174 static DEFINE_SPINLOCK(napi_hash_lock);
176 static unsigned int napi_gen_id = NR_CPUS;
177 static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
179 static DECLARE_RWSEM(devnet_rename_sem);
181 static inline void dev_base_seq_inc(struct net *net)
183 unsigned int val = net->dev_base_seq + 1;
185 WRITE_ONCE(net->dev_base_seq, val ?: 1);
188 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
190 unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
192 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
195 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
197 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
200 static inline void rps_lock_irqsave(struct softnet_data *sd,
201 unsigned long *flags)
203 if (IS_ENABLED(CONFIG_RPS))
204 spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
205 else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
206 local_irq_save(*flags);
209 static inline void rps_lock_irq_disable(struct softnet_data *sd)
211 if (IS_ENABLED(CONFIG_RPS))
212 spin_lock_irq(&sd->input_pkt_queue.lock);
213 else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
217 static inline void rps_unlock_irq_restore(struct softnet_data *sd,
218 unsigned long *flags)
220 if (IS_ENABLED(CONFIG_RPS))
221 spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
222 else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
223 local_irq_restore(*flags);
226 static inline void rps_unlock_irq_enable(struct softnet_data *sd)
228 if (IS_ENABLED(CONFIG_RPS))
229 spin_unlock_irq(&sd->input_pkt_queue.lock);
230 else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
234 static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev,
237 struct netdev_name_node *name_node;
239 name_node = kmalloc(sizeof(*name_node), GFP_KERNEL);
242 INIT_HLIST_NODE(&name_node->hlist);
243 name_node->dev = dev;
244 name_node->name = name;
248 static struct netdev_name_node *
249 netdev_name_node_head_alloc(struct net_device *dev)
251 struct netdev_name_node *name_node;
253 name_node = netdev_name_node_alloc(dev, dev->name);
256 INIT_LIST_HEAD(&name_node->list);
260 static void netdev_name_node_free(struct netdev_name_node *name_node)
265 static void netdev_name_node_add(struct net *net,
266 struct netdev_name_node *name_node)
268 hlist_add_head_rcu(&name_node->hlist,
269 dev_name_hash(net, name_node->name));
272 static void netdev_name_node_del(struct netdev_name_node *name_node)
274 hlist_del_rcu(&name_node->hlist);
277 static struct netdev_name_node *netdev_name_node_lookup(struct net *net,
280 struct hlist_head *head = dev_name_hash(net, name);
281 struct netdev_name_node *name_node;
283 hlist_for_each_entry(name_node, head, hlist)
284 if (!strcmp(name_node->name, name))
289 static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net,
292 struct hlist_head *head = dev_name_hash(net, name);
293 struct netdev_name_node *name_node;
295 hlist_for_each_entry_rcu(name_node, head, hlist)
296 if (!strcmp(name_node->name, name))
301 bool netdev_name_in_use(struct net *net, const char *name)
303 return netdev_name_node_lookup(net, name);
305 EXPORT_SYMBOL(netdev_name_in_use);
307 int netdev_name_node_alt_create(struct net_device *dev, const char *name)
309 struct netdev_name_node *name_node;
310 struct net *net = dev_net(dev);
312 name_node = netdev_name_node_lookup(net, name);
315 name_node = netdev_name_node_alloc(dev, name);
318 netdev_name_node_add(net, name_node);
319 /* The node that holds dev->name acts as a head of per-device list. */
320 list_add_tail_rcu(&name_node->list, &dev->name_node->list);
325 static void netdev_name_node_alt_free(struct rcu_head *head)
327 struct netdev_name_node *name_node =
328 container_of(head, struct netdev_name_node, rcu);
330 kfree(name_node->name);
331 netdev_name_node_free(name_node);
334 static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
336 netdev_name_node_del(name_node);
337 list_del(&name_node->list);
338 call_rcu(&name_node->rcu, netdev_name_node_alt_free);
341 int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
343 struct netdev_name_node *name_node;
344 struct net *net = dev_net(dev);
346 name_node = netdev_name_node_lookup(net, name);
349 /* lookup might have found our primary name or a name belonging
352 if (name_node == dev->name_node || name_node->dev != dev)
355 __netdev_name_node_alt_destroy(name_node);
359 static void netdev_name_node_alt_flush(struct net_device *dev)
361 struct netdev_name_node *name_node, *tmp;
363 list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list) {
364 list_del(&name_node->list);
365 netdev_name_node_alt_free(&name_node->rcu);
369 /* Device list insertion */
370 static void list_netdevice(struct net_device *dev)
372 struct netdev_name_node *name_node;
373 struct net *net = dev_net(dev);
377 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
378 netdev_name_node_add(net, dev->name_node);
379 hlist_add_head_rcu(&dev->index_hlist,
380 dev_index_hash(net, dev->ifindex));
382 netdev_for_each_altname(dev, name_node)
383 netdev_name_node_add(net, name_node);
385 /* We reserved the ifindex, this can't fail */
386 WARN_ON(xa_store(&net->dev_by_index, dev->ifindex, dev, GFP_KERNEL));
388 dev_base_seq_inc(net);
391 /* Device list removal
392 * caller must respect a RCU grace period before freeing/reusing dev
394 static void unlist_netdevice(struct net_device *dev)
396 struct netdev_name_node *name_node;
397 struct net *net = dev_net(dev);
401 xa_erase(&net->dev_by_index, dev->ifindex);
403 netdev_for_each_altname(dev, name_node)
404 netdev_name_node_del(name_node);
406 /* Unlink dev from the device chain */
407 list_del_rcu(&dev->dev_list);
408 netdev_name_node_del(dev->name_node);
409 hlist_del_rcu(&dev->index_hlist);
411 dev_base_seq_inc(dev_net(dev));
418 static RAW_NOTIFIER_HEAD(netdev_chain);
421 * Device drivers call our routines to queue packets here. We empty the
422 * queue in the local softnet handler.
425 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
426 EXPORT_PER_CPU_SYMBOL(softnet_data);
428 /* Page_pool has a lockless array/stack to alloc/recycle pages.
429 * PP consumers must pay attention to run APIs in the appropriate context
430 * (e.g. NAPI context).
432 static DEFINE_PER_CPU_ALIGNED(struct page_pool *, system_page_pool);
434 #ifdef CONFIG_LOCKDEP
436 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
437 * according to dev->type
439 static const unsigned short netdev_lock_type[] = {
440 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
441 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
442 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
443 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
444 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
445 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
446 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
447 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
448 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
449 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
450 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
451 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
452 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
453 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
454 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
456 static const char *const netdev_lock_name[] = {
457 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
458 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
459 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
460 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
461 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
462 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
463 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
464 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
465 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
466 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
467 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
468 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
469 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
470 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
471 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
473 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
474 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
476 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
480 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
481 if (netdev_lock_type[i] == dev_type)
483 /* the last key is used by default */
484 return ARRAY_SIZE(netdev_lock_type) - 1;
487 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
488 unsigned short dev_type)
492 i = netdev_lock_pos(dev_type);
493 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
494 netdev_lock_name[i]);
497 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
501 i = netdev_lock_pos(dev->type);
502 lockdep_set_class_and_name(&dev->addr_list_lock,
503 &netdev_addr_lock_key[i],
504 netdev_lock_name[i]);
507 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
508 unsigned short dev_type)
512 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
517 /*******************************************************************************
519 * Protocol management and registration routines
521 *******************************************************************************/
525 * Add a protocol ID to the list. Now that the input handler is
526 * smarter we can dispense with all the messy stuff that used to be
529 * BEWARE!!! Protocol handlers, mangling input packets,
530 * MUST BE last in hash buckets and checking protocol handlers
531 * MUST start from promiscuous ptype_all chain in net_bh.
532 * It is true now, do not change it.
533 * Explanation follows: if protocol handler, mangling packet, will
534 * be the first on list, it is not able to sense, that packet
535 * is cloned and should be copied-on-write, so that it will
536 * change it and subsequent readers will get broken packet.
540 static inline struct list_head *ptype_head(const struct packet_type *pt)
542 if (pt->type == htons(ETH_P_ALL))
543 return pt->dev ? &pt->dev->ptype_all : &net_hotdata.ptype_all;
545 return pt->dev ? &pt->dev->ptype_specific :
546 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
550 * dev_add_pack - add packet handler
551 * @pt: packet type declaration
553 * Add a protocol handler to the networking stack. The passed &packet_type
554 * is linked into kernel lists and may not be freed until it has been
555 * removed from the kernel lists.
557 * This call does not sleep therefore it can not
558 * guarantee all CPU's that are in middle of receiving packets
559 * will see the new packet type (until the next received packet).
562 void dev_add_pack(struct packet_type *pt)
564 struct list_head *head = ptype_head(pt);
566 spin_lock(&ptype_lock);
567 list_add_rcu(&pt->list, head);
568 spin_unlock(&ptype_lock);
570 EXPORT_SYMBOL(dev_add_pack);
573 * __dev_remove_pack - remove packet handler
574 * @pt: packet type declaration
576 * Remove a protocol handler that was previously added to the kernel
577 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
578 * from the kernel lists and can be freed or reused once this function
581 * The packet type might still be in use by receivers
582 * and must not be freed until after all the CPU's have gone
583 * through a quiescent state.
585 void __dev_remove_pack(struct packet_type *pt)
587 struct list_head *head = ptype_head(pt);
588 struct packet_type *pt1;
590 spin_lock(&ptype_lock);
592 list_for_each_entry(pt1, head, list) {
594 list_del_rcu(&pt->list);
599 pr_warn("dev_remove_pack: %p not found\n", pt);
601 spin_unlock(&ptype_lock);
603 EXPORT_SYMBOL(__dev_remove_pack);
606 * dev_remove_pack - remove packet handler
607 * @pt: packet type declaration
609 * Remove a protocol handler that was previously added to the kernel
610 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
611 * from the kernel lists and can be freed or reused once this function
614 * This call sleeps to guarantee that no CPU is looking at the packet
617 void dev_remove_pack(struct packet_type *pt)
619 __dev_remove_pack(pt);
623 EXPORT_SYMBOL(dev_remove_pack);
626 /*******************************************************************************
628 * Device Interface Subroutines
630 *******************************************************************************/
633 * dev_get_iflink - get 'iflink' value of a interface
634 * @dev: targeted interface
636 * Indicates the ifindex the interface is linked to.
637 * Physical interfaces have the same 'ifindex' and 'iflink' values.
640 int dev_get_iflink(const struct net_device *dev)
642 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
643 return dev->netdev_ops->ndo_get_iflink(dev);
645 return READ_ONCE(dev->ifindex);
647 EXPORT_SYMBOL(dev_get_iflink);
650 * dev_fill_metadata_dst - Retrieve tunnel egress information.
651 * @dev: targeted interface
654 * For better visibility of tunnel traffic OVS needs to retrieve
655 * egress tunnel information for a packet. Following API allows
656 * user to get this info.
658 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
660 struct ip_tunnel_info *info;
662 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
665 info = skb_tunnel_info_unclone(skb);
668 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
671 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
673 EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
675 static struct net_device_path *dev_fwd_path(struct net_device_path_stack *stack)
677 int k = stack->num_paths++;
679 if (WARN_ON_ONCE(k >= NET_DEVICE_PATH_STACK_MAX))
682 return &stack->path[k];
685 int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
686 struct net_device_path_stack *stack)
688 const struct net_device *last_dev;
689 struct net_device_path_ctx ctx = {
692 struct net_device_path *path;
695 memcpy(ctx.daddr, daddr, sizeof(ctx.daddr));
696 stack->num_paths = 0;
697 while (ctx.dev && ctx.dev->netdev_ops->ndo_fill_forward_path) {
699 path = dev_fwd_path(stack);
703 memset(path, 0, sizeof(struct net_device_path));
704 ret = ctx.dev->netdev_ops->ndo_fill_forward_path(&ctx, path);
708 if (WARN_ON_ONCE(last_dev == ctx.dev))
715 path = dev_fwd_path(stack);
718 path->type = DEV_PATH_ETHERNET;
723 EXPORT_SYMBOL_GPL(dev_fill_forward_path);
726 * __dev_get_by_name - find a device by its name
727 * @net: the applicable net namespace
728 * @name: name to find
730 * Find an interface by name. Must be called under RTNL semaphore.
731 * If the name is found a pointer to the device is returned.
732 * If the name is not found then %NULL is returned. The
733 * reference counters are not incremented so the caller must be
734 * careful with locks.
737 struct net_device *__dev_get_by_name(struct net *net, const char *name)
739 struct netdev_name_node *node_name;
741 node_name = netdev_name_node_lookup(net, name);
742 return node_name ? node_name->dev : NULL;
744 EXPORT_SYMBOL(__dev_get_by_name);
747 * dev_get_by_name_rcu - find a device by its name
748 * @net: the applicable net namespace
749 * @name: name to find
751 * Find an interface by name.
752 * If the name is found a pointer to the device is returned.
753 * If the name is not found then %NULL is returned.
754 * The reference counters are not incremented so the caller must be
755 * careful with locks. The caller must hold RCU lock.
758 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
760 struct netdev_name_node *node_name;
762 node_name = netdev_name_node_lookup_rcu(net, name);
763 return node_name ? node_name->dev : NULL;
765 EXPORT_SYMBOL(dev_get_by_name_rcu);
767 /* Deprecated for new users, call netdev_get_by_name() instead */
768 struct net_device *dev_get_by_name(struct net *net, const char *name)
770 struct net_device *dev;
773 dev = dev_get_by_name_rcu(net, name);
778 EXPORT_SYMBOL(dev_get_by_name);
781 * netdev_get_by_name() - find a device by its name
782 * @net: the applicable net namespace
783 * @name: name to find
784 * @tracker: tracking object for the acquired reference
785 * @gfp: allocation flags for the tracker
787 * Find an interface by name. This can be called from any
788 * context and does its own locking. The returned handle has
789 * the usage count incremented and the caller must use netdev_put() to
790 * release it when it is no longer needed. %NULL is returned if no
791 * matching device is found.
793 struct net_device *netdev_get_by_name(struct net *net, const char *name,
794 netdevice_tracker *tracker, gfp_t gfp)
796 struct net_device *dev;
798 dev = dev_get_by_name(net, name);
800 netdev_tracker_alloc(dev, tracker, gfp);
803 EXPORT_SYMBOL(netdev_get_by_name);
806 * __dev_get_by_index - find a device by its ifindex
807 * @net: the applicable net namespace
808 * @ifindex: index of device
810 * Search for an interface by index. Returns %NULL if the device
811 * is not found or a pointer to the device. The device has not
812 * had its reference counter increased so the caller must be careful
813 * about locking. The caller must hold the RTNL semaphore.
816 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
818 struct net_device *dev;
819 struct hlist_head *head = dev_index_hash(net, ifindex);
821 hlist_for_each_entry(dev, head, index_hlist)
822 if (dev->ifindex == ifindex)
827 EXPORT_SYMBOL(__dev_get_by_index);
830 * dev_get_by_index_rcu - find a device by its ifindex
831 * @net: the applicable net namespace
832 * @ifindex: index of device
834 * Search for an interface by index. Returns %NULL if the device
835 * is not found or a pointer to the device. The device has not
836 * had its reference counter increased so the caller must be careful
837 * about locking. The caller must hold RCU lock.
840 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
842 struct net_device *dev;
843 struct hlist_head *head = dev_index_hash(net, ifindex);
845 hlist_for_each_entry_rcu(dev, head, index_hlist)
846 if (dev->ifindex == ifindex)
851 EXPORT_SYMBOL(dev_get_by_index_rcu);
853 /* Deprecated for new users, call netdev_get_by_index() instead */
854 struct net_device *dev_get_by_index(struct net *net, int ifindex)
856 struct net_device *dev;
859 dev = dev_get_by_index_rcu(net, ifindex);
864 EXPORT_SYMBOL(dev_get_by_index);
867 * netdev_get_by_index() - find a device by its ifindex
868 * @net: the applicable net namespace
869 * @ifindex: index of device
870 * @tracker: tracking object for the acquired reference
871 * @gfp: allocation flags for the tracker
873 * Search for an interface by index. Returns NULL if the device
874 * is not found or a pointer to the device. The device returned has
875 * had a reference added and the pointer is safe until the user calls
876 * netdev_put() to indicate they have finished with it.
878 struct net_device *netdev_get_by_index(struct net *net, int ifindex,
879 netdevice_tracker *tracker, gfp_t gfp)
881 struct net_device *dev;
883 dev = dev_get_by_index(net, ifindex);
885 netdev_tracker_alloc(dev, tracker, gfp);
888 EXPORT_SYMBOL(netdev_get_by_index);
891 * dev_get_by_napi_id - find a device by napi_id
892 * @napi_id: ID of the NAPI struct
894 * Search for an interface by NAPI ID. Returns %NULL if the device
895 * is not found or a pointer to the device. The device has not had
896 * its reference counter increased so the caller must be careful
897 * about locking. The caller must hold RCU lock.
900 struct net_device *dev_get_by_napi_id(unsigned int napi_id)
902 struct napi_struct *napi;
904 WARN_ON_ONCE(!rcu_read_lock_held());
906 if (napi_id < MIN_NAPI_ID)
909 napi = napi_by_id(napi_id);
911 return napi ? napi->dev : NULL;
913 EXPORT_SYMBOL(dev_get_by_napi_id);
916 * netdev_get_name - get a netdevice name, knowing its ifindex.
917 * @net: network namespace
918 * @name: a pointer to the buffer where the name will be stored.
919 * @ifindex: the ifindex of the interface to get the name from.
921 int netdev_get_name(struct net *net, char *name, int ifindex)
923 struct net_device *dev;
926 down_read(&devnet_rename_sem);
929 dev = dev_get_by_index_rcu(net, ifindex);
935 strcpy(name, dev->name);
940 up_read(&devnet_rename_sem);
945 * dev_getbyhwaddr_rcu - find a device by its hardware address
946 * @net: the applicable net namespace
947 * @type: media type of device
948 * @ha: hardware address
950 * Search for an interface by MAC address. Returns NULL if the device
951 * is not found or a pointer to the device.
952 * The caller must hold RCU or RTNL.
953 * The returned device has not had its ref count increased
954 * and the caller must therefore be careful about locking
958 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
961 struct net_device *dev;
963 for_each_netdev_rcu(net, dev)
964 if (dev->type == type &&
965 !memcmp(dev->dev_addr, ha, dev->addr_len))
970 EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
972 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
974 struct net_device *dev, *ret = NULL;
977 for_each_netdev_rcu(net, dev)
978 if (dev->type == type) {
986 EXPORT_SYMBOL(dev_getfirstbyhwtype);
989 * __dev_get_by_flags - find any device with given flags
990 * @net: the applicable net namespace
991 * @if_flags: IFF_* values
992 * @mask: bitmask of bits in if_flags to check
994 * Search for any interface with the given flags. Returns NULL if a device
995 * is not found or a pointer to the device. Must be called inside
996 * rtnl_lock(), and result refcount is unchanged.
999 struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
1000 unsigned short mask)
1002 struct net_device *dev, *ret;
1007 for_each_netdev(net, dev) {
1008 if (((dev->flags ^ if_flags) & mask) == 0) {
1015 EXPORT_SYMBOL(__dev_get_by_flags);
1018 * dev_valid_name - check if name is okay for network device
1019 * @name: name string
1021 * Network device names need to be valid file names to
1022 * allow sysfs to work. We also disallow any kind of
1025 bool dev_valid_name(const char *name)
1029 if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
1031 if (!strcmp(name, ".") || !strcmp(name, ".."))
1035 if (*name == '/' || *name == ':' || isspace(*name))
1041 EXPORT_SYMBOL(dev_valid_name);
1044 * __dev_alloc_name - allocate a name for a device
1045 * @net: network namespace to allocate the device name in
1046 * @name: name format string
1047 * @res: result name string
1049 * Passed a format string - eg "lt%d" it will try and find a suitable
1050 * id. It scans list of devices to build up a free map, then chooses
1051 * the first empty slot. The caller must hold the dev_base or rtnl lock
1052 * while allocating the name and adding the device in order to avoid
1054 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1055 * Returns the number of the unit assigned or a negative errno code.
1058 static int __dev_alloc_name(struct net *net, const char *name, char *res)
1062 const int max_netdevices = 8*PAGE_SIZE;
1063 unsigned long *inuse;
1064 struct net_device *d;
1067 /* Verify the string as this thing may have come from the user.
1068 * There must be one "%d" and no other "%" characters.
1070 p = strchr(name, '%');
1071 if (!p || p[1] != 'd' || strchr(p + 2, '%'))
1074 /* Use one page as a bit array of possible slots */
1075 inuse = bitmap_zalloc(max_netdevices, GFP_ATOMIC);
1079 for_each_netdev(net, d) {
1080 struct netdev_name_node *name_node;
1082 netdev_for_each_altname(d, name_node) {
1083 if (!sscanf(name_node->name, name, &i))
1085 if (i < 0 || i >= max_netdevices)
1088 /* avoid cases where sscanf is not exact inverse of printf */
1089 snprintf(buf, IFNAMSIZ, name, i);
1090 if (!strncmp(buf, name_node->name, IFNAMSIZ))
1091 __set_bit(i, inuse);
1093 if (!sscanf(d->name, name, &i))
1095 if (i < 0 || i >= max_netdevices)
1098 /* avoid cases where sscanf is not exact inverse of printf */
1099 snprintf(buf, IFNAMSIZ, name, i);
1100 if (!strncmp(buf, d->name, IFNAMSIZ))
1101 __set_bit(i, inuse);
1104 i = find_first_zero_bit(inuse, max_netdevices);
1106 if (i == max_netdevices)
1109 /* 'res' and 'name' could overlap, use 'buf' as an intermediate buffer */
1110 strscpy(buf, name, IFNAMSIZ);
1111 snprintf(res, IFNAMSIZ, buf, i);
1115 /* Returns negative errno or allocated unit id (see __dev_alloc_name()) */
1116 static int dev_prep_valid_name(struct net *net, struct net_device *dev,
1117 const char *want_name, char *out_name,
1120 if (!dev_valid_name(want_name))
1123 if (strchr(want_name, '%'))
1124 return __dev_alloc_name(net, want_name, out_name);
1126 if (netdev_name_in_use(net, want_name))
1128 if (out_name != want_name)
1129 strscpy(out_name, want_name, IFNAMSIZ);
1134 * dev_alloc_name - allocate a name for a device
1136 * @name: name format string
1138 * Passed a format string - eg "lt%d" it will try and find a suitable
1139 * id. It scans list of devices to build up a free map, then chooses
1140 * the first empty slot. The caller must hold the dev_base or rtnl lock
1141 * while allocating the name and adding the device in order to avoid
1143 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1144 * Returns the number of the unit assigned or a negative errno code.
1147 int dev_alloc_name(struct net_device *dev, const char *name)
1149 return dev_prep_valid_name(dev_net(dev), dev, name, dev->name, ENFILE);
1151 EXPORT_SYMBOL(dev_alloc_name);
1153 static int dev_get_valid_name(struct net *net, struct net_device *dev,
1158 ret = dev_prep_valid_name(net, dev, name, dev->name, EEXIST);
1159 return ret < 0 ? ret : 0;
1163 * dev_change_name - change name of a device
1165 * @newname: name (or format string) must be at least IFNAMSIZ
1167 * Change name of a device, can pass format strings "eth%d".
1170 int dev_change_name(struct net_device *dev, const char *newname)
1172 unsigned char old_assign_type;
1173 char oldname[IFNAMSIZ];
1179 BUG_ON(!dev_net(dev));
1183 down_write(&devnet_rename_sem);
1185 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1186 up_write(&devnet_rename_sem);
1190 memcpy(oldname, dev->name, IFNAMSIZ);
1192 err = dev_get_valid_name(net, dev, newname);
1194 up_write(&devnet_rename_sem);
1198 if (oldname[0] && !strchr(oldname, '%'))
1199 netdev_info(dev, "renamed from %s%s\n", oldname,
1200 dev->flags & IFF_UP ? " (while UP)" : "");
1202 old_assign_type = dev->name_assign_type;
1203 WRITE_ONCE(dev->name_assign_type, NET_NAME_RENAMED);
1206 ret = device_rename(&dev->dev, dev->name);
1208 memcpy(dev->name, oldname, IFNAMSIZ);
1209 WRITE_ONCE(dev->name_assign_type, old_assign_type);
1210 up_write(&devnet_rename_sem);
1214 up_write(&devnet_rename_sem);
1216 netdev_adjacent_rename_links(dev, oldname);
1218 netdev_name_node_del(dev->name_node);
1222 netdev_name_node_add(net, dev->name_node);
1224 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1225 ret = notifier_to_errno(ret);
1228 /* err >= 0 after dev_alloc_name() or stores the first errno */
1231 down_write(&devnet_rename_sem);
1232 memcpy(dev->name, oldname, IFNAMSIZ);
1233 memcpy(oldname, newname, IFNAMSIZ);
1234 WRITE_ONCE(dev->name_assign_type, old_assign_type);
1235 old_assign_type = NET_NAME_RENAMED;
1238 netdev_err(dev, "name change rollback failed: %d\n",
1247 * dev_set_alias - change ifalias of a device
1249 * @alias: name up to IFALIASZ
1250 * @len: limit of bytes to copy from info
1252 * Set ifalias for a device,
1254 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1256 struct dev_ifalias *new_alias = NULL;
1258 if (len >= IFALIASZ)
1262 new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL);
1266 memcpy(new_alias->ifalias, alias, len);
1267 new_alias->ifalias[len] = 0;
1270 mutex_lock(&ifalias_mutex);
1271 new_alias = rcu_replace_pointer(dev->ifalias, new_alias,
1272 mutex_is_locked(&ifalias_mutex));
1273 mutex_unlock(&ifalias_mutex);
1276 kfree_rcu(new_alias, rcuhead);
1280 EXPORT_SYMBOL(dev_set_alias);
1283 * dev_get_alias - get ifalias of a device
1285 * @name: buffer to store name of ifalias
1286 * @len: size of buffer
1288 * get ifalias for a device. Caller must make sure dev cannot go
1289 * away, e.g. rcu read lock or own a reference count to device.
1291 int dev_get_alias(const struct net_device *dev, char *name, size_t len)
1293 const struct dev_ifalias *alias;
1297 alias = rcu_dereference(dev->ifalias);
1299 ret = snprintf(name, len, "%s", alias->ifalias);
1306 * netdev_features_change - device changes features
1307 * @dev: device to cause notification
1309 * Called to indicate a device has changed features.
1311 void netdev_features_change(struct net_device *dev)
1313 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1315 EXPORT_SYMBOL(netdev_features_change);
1318 * netdev_state_change - device changes state
1319 * @dev: device to cause notification
1321 * Called to indicate a device has changed state. This function calls
1322 * the notifier chains for netdev_chain and sends a NEWLINK message
1323 * to the routing socket.
1325 void netdev_state_change(struct net_device *dev)
1327 if (dev->flags & IFF_UP) {
1328 struct netdev_notifier_change_info change_info = {
1332 call_netdevice_notifiers_info(NETDEV_CHANGE,
1334 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL, 0, NULL);
1337 EXPORT_SYMBOL(netdev_state_change);
1340 * __netdev_notify_peers - notify network peers about existence of @dev,
1341 * to be called when rtnl lock is already held.
1342 * @dev: network device
1344 * Generate traffic such that interested network peers are aware of
1345 * @dev, such as by generating a gratuitous ARP. This may be used when
1346 * a device wants to inform the rest of the network about some sort of
1347 * reconfiguration such as a failover event or virtual machine
1350 void __netdev_notify_peers(struct net_device *dev)
1353 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1354 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
1356 EXPORT_SYMBOL(__netdev_notify_peers);
1359 * netdev_notify_peers - notify network peers about existence of @dev
1360 * @dev: network device
1362 * Generate traffic such that interested network peers are aware of
1363 * @dev, such as by generating a gratuitous ARP. This may be used when
1364 * a device wants to inform the rest of the network about some sort of
1365 * reconfiguration such as a failover event or virtual machine
1368 void netdev_notify_peers(struct net_device *dev)
1371 __netdev_notify_peers(dev);
1374 EXPORT_SYMBOL(netdev_notify_peers);
1376 static int napi_threaded_poll(void *data);
1378 static int napi_kthread_create(struct napi_struct *n)
1382 /* Create and wake up the kthread once to put it in
1383 * TASK_INTERRUPTIBLE mode to avoid the blocked task
1384 * warning and work with loadavg.
1386 n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d",
1387 n->dev->name, n->napi_id);
1388 if (IS_ERR(n->thread)) {
1389 err = PTR_ERR(n->thread);
1390 pr_err("kthread_run failed with err %d\n", err);
1397 static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1399 const struct net_device_ops *ops = dev->netdev_ops;
1403 dev_addr_check(dev);
1405 if (!netif_device_present(dev)) {
1406 /* may be detached because parent is runtime-suspended */
1407 if (dev->dev.parent)
1408 pm_runtime_resume(dev->dev.parent);
1409 if (!netif_device_present(dev))
1413 /* Block netpoll from trying to do any rx path servicing.
1414 * If we don't do this there is a chance ndo_poll_controller
1415 * or ndo_poll may be running while we open the device
1417 netpoll_poll_disable(dev);
1419 ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack);
1420 ret = notifier_to_errno(ret);
1424 set_bit(__LINK_STATE_START, &dev->state);
1426 if (ops->ndo_validate_addr)
1427 ret = ops->ndo_validate_addr(dev);
1429 if (!ret && ops->ndo_open)
1430 ret = ops->ndo_open(dev);
1432 netpoll_poll_enable(dev);
1435 clear_bit(__LINK_STATE_START, &dev->state);
1437 dev->flags |= IFF_UP;
1438 dev_set_rx_mode(dev);
1440 add_device_randomness(dev->dev_addr, dev->addr_len);
1447 * dev_open - prepare an interface for use.
1448 * @dev: device to open
1449 * @extack: netlink extended ack
1451 * Takes a device from down to up state. The device's private open
1452 * function is invoked and then the multicast lists are loaded. Finally
1453 * the device is moved into the up state and a %NETDEV_UP message is
1454 * sent to the netdev notifier chain.
1456 * Calling this function on an active interface is a nop. On a failure
1457 * a negative errno code is returned.
1459 int dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1463 if (dev->flags & IFF_UP)
1466 ret = __dev_open(dev, extack);
1470 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP | IFF_RUNNING, GFP_KERNEL, 0, NULL);
1471 call_netdevice_notifiers(NETDEV_UP, dev);
1475 EXPORT_SYMBOL(dev_open);
1477 static void __dev_close_many(struct list_head *head)
1479 struct net_device *dev;
1484 list_for_each_entry(dev, head, close_list) {
1485 /* Temporarily disable netpoll until the interface is down */
1486 netpoll_poll_disable(dev);
1488 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1490 clear_bit(__LINK_STATE_START, &dev->state);
1492 /* Synchronize to scheduled poll. We cannot touch poll list, it
1493 * can be even on different cpu. So just clear netif_running().
1495 * dev->stop() will invoke napi_disable() on all of it's
1496 * napi_struct instances on this device.
1498 smp_mb__after_atomic(); /* Commit netif_running(). */
1501 dev_deactivate_many(head);
1503 list_for_each_entry(dev, head, close_list) {
1504 const struct net_device_ops *ops = dev->netdev_ops;
1507 * Call the device specific close. This cannot fail.
1508 * Only if device is UP
1510 * We allow it to be called even after a DETACH hot-plug
1516 dev->flags &= ~IFF_UP;
1517 netpoll_poll_enable(dev);
1521 static void __dev_close(struct net_device *dev)
1525 list_add(&dev->close_list, &single);
1526 __dev_close_many(&single);
1530 void dev_close_many(struct list_head *head, bool unlink)
1532 struct net_device *dev, *tmp;
1534 /* Remove the devices that don't need to be closed */
1535 list_for_each_entry_safe(dev, tmp, head, close_list)
1536 if (!(dev->flags & IFF_UP))
1537 list_del_init(&dev->close_list);
1539 __dev_close_many(head);
1541 list_for_each_entry_safe(dev, tmp, head, close_list) {
1542 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP | IFF_RUNNING, GFP_KERNEL, 0, NULL);
1543 call_netdevice_notifiers(NETDEV_DOWN, dev);
1545 list_del_init(&dev->close_list);
1548 EXPORT_SYMBOL(dev_close_many);
1551 * dev_close - shutdown an interface.
1552 * @dev: device to shutdown
1554 * This function moves an active device into down state. A
1555 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1556 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1559 void dev_close(struct net_device *dev)
1561 if (dev->flags & IFF_UP) {
1564 list_add(&dev->close_list, &single);
1565 dev_close_many(&single, true);
1569 EXPORT_SYMBOL(dev_close);
1573 * dev_disable_lro - disable Large Receive Offload on a device
1576 * Disable Large Receive Offload (LRO) on a net device. Must be
1577 * called under RTNL. This is needed if received packets may be
1578 * forwarded to another interface.
1580 void dev_disable_lro(struct net_device *dev)
1582 struct net_device *lower_dev;
1583 struct list_head *iter;
1585 dev->wanted_features &= ~NETIF_F_LRO;
1586 netdev_update_features(dev);
1588 if (unlikely(dev->features & NETIF_F_LRO))
1589 netdev_WARN(dev, "failed to disable LRO!\n");
1591 netdev_for_each_lower_dev(dev, lower_dev, iter)
1592 dev_disable_lro(lower_dev);
1594 EXPORT_SYMBOL(dev_disable_lro);
1597 * dev_disable_gro_hw - disable HW Generic Receive Offload on a device
1600 * Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be
1601 * called under RTNL. This is needed if Generic XDP is installed on
1604 static void dev_disable_gro_hw(struct net_device *dev)
1606 dev->wanted_features &= ~NETIF_F_GRO_HW;
1607 netdev_update_features(dev);
1609 if (unlikely(dev->features & NETIF_F_GRO_HW))
1610 netdev_WARN(dev, "failed to disable GRO_HW!\n");
1613 const char *netdev_cmd_to_name(enum netdev_cmd cmd)
1616 case NETDEV_##val: \
1617 return "NETDEV_" __stringify(val);
1619 N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER)
1620 N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE)
1621 N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE)
1622 N(POST_INIT) N(PRE_UNINIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN)
1623 N(CHANGEUPPER) N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA)
1624 N(BONDING_INFO) N(PRECHANGEUPPER) N(CHANGELOWERSTATE)
1625 N(UDP_TUNNEL_PUSH_INFO) N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
1626 N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO)
1627 N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
1628 N(PRE_CHANGEADDR) N(OFFLOAD_XSTATS_ENABLE) N(OFFLOAD_XSTATS_DISABLE)
1629 N(OFFLOAD_XSTATS_REPORT_USED) N(OFFLOAD_XSTATS_REPORT_DELTA)
1633 return "UNKNOWN_NETDEV_EVENT";
1635 EXPORT_SYMBOL_GPL(netdev_cmd_to_name);
1637 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1638 struct net_device *dev)
1640 struct netdev_notifier_info info = {
1644 return nb->notifier_call(nb, val, &info);
1647 static int call_netdevice_register_notifiers(struct notifier_block *nb,
1648 struct net_device *dev)
1652 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1653 err = notifier_to_errno(err);
1657 if (!(dev->flags & IFF_UP))
1660 call_netdevice_notifier(nb, NETDEV_UP, dev);
1664 static void call_netdevice_unregister_notifiers(struct notifier_block *nb,
1665 struct net_device *dev)
1667 if (dev->flags & IFF_UP) {
1668 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1670 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1672 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1675 static int call_netdevice_register_net_notifiers(struct notifier_block *nb,
1678 struct net_device *dev;
1681 for_each_netdev(net, dev) {
1682 err = call_netdevice_register_notifiers(nb, dev);
1689 for_each_netdev_continue_reverse(net, dev)
1690 call_netdevice_unregister_notifiers(nb, dev);
1694 static void call_netdevice_unregister_net_notifiers(struct notifier_block *nb,
1697 struct net_device *dev;
1699 for_each_netdev(net, dev)
1700 call_netdevice_unregister_notifiers(nb, dev);
1703 static int dev_boot_phase = 1;
1706 * register_netdevice_notifier - register a network notifier block
1709 * Register a notifier to be called when network device events occur.
1710 * The notifier passed is linked into the kernel structures and must
1711 * not be reused until it has been unregistered. A negative errno code
1712 * is returned on a failure.
1714 * When registered all registration and up events are replayed
1715 * to the new notifier to allow device to have a race free
1716 * view of the network device list.
1719 int register_netdevice_notifier(struct notifier_block *nb)
1724 /* Close race with setup_net() and cleanup_net() */
1725 down_write(&pernet_ops_rwsem);
1727 err = raw_notifier_chain_register(&netdev_chain, nb);
1733 err = call_netdevice_register_net_notifiers(nb, net);
1740 up_write(&pernet_ops_rwsem);
1744 for_each_net_continue_reverse(net)
1745 call_netdevice_unregister_net_notifiers(nb, net);
1747 raw_notifier_chain_unregister(&netdev_chain, nb);
1750 EXPORT_SYMBOL(register_netdevice_notifier);
1753 * unregister_netdevice_notifier - unregister a network notifier block
1756 * Unregister a notifier previously registered by
1757 * register_netdevice_notifier(). The notifier is unlinked into the
1758 * kernel structures and may then be reused. A negative errno code
1759 * is returned on a failure.
1761 * After unregistering unregister and down device events are synthesized
1762 * for all devices on the device list to the removed notifier to remove
1763 * the need for special case cleanup code.
1766 int unregister_netdevice_notifier(struct notifier_block *nb)
1771 /* Close race with setup_net() and cleanup_net() */
1772 down_write(&pernet_ops_rwsem);
1774 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1779 call_netdevice_unregister_net_notifiers(nb, net);
1783 up_write(&pernet_ops_rwsem);
1786 EXPORT_SYMBOL(unregister_netdevice_notifier);
1788 static int __register_netdevice_notifier_net(struct net *net,
1789 struct notifier_block *nb,
1790 bool ignore_call_fail)
1794 err = raw_notifier_chain_register(&net->netdev_chain, nb);
1800 err = call_netdevice_register_net_notifiers(nb, net);
1801 if (err && !ignore_call_fail)
1802 goto chain_unregister;
1807 raw_notifier_chain_unregister(&net->netdev_chain, nb);
1811 static int __unregister_netdevice_notifier_net(struct net *net,
1812 struct notifier_block *nb)
1816 err = raw_notifier_chain_unregister(&net->netdev_chain, nb);
1820 call_netdevice_unregister_net_notifiers(nb, net);
1825 * register_netdevice_notifier_net - register a per-netns network notifier block
1826 * @net: network namespace
1829 * Register a notifier to be called when network device events occur.
1830 * The notifier passed is linked into the kernel structures and must
1831 * not be reused until it has been unregistered. A negative errno code
1832 * is returned on a failure.
1834 * When registered all registration and up events are replayed
1835 * to the new notifier to allow device to have a race free
1836 * view of the network device list.
1839 int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb)
1844 err = __register_netdevice_notifier_net(net, nb, false);
1848 EXPORT_SYMBOL(register_netdevice_notifier_net);
1851 * unregister_netdevice_notifier_net - unregister a per-netns
1852 * network notifier block
1853 * @net: network namespace
1856 * Unregister a notifier previously registered by
1857 * register_netdevice_notifier_net(). The notifier is unlinked from the
1858 * kernel structures and may then be reused. A negative errno code
1859 * is returned on a failure.
1861 * After unregistering unregister and down device events are synthesized
1862 * for all devices on the device list to the removed notifier to remove
1863 * the need for special case cleanup code.
1866 int unregister_netdevice_notifier_net(struct net *net,
1867 struct notifier_block *nb)
1872 err = __unregister_netdevice_notifier_net(net, nb);
1876 EXPORT_SYMBOL(unregister_netdevice_notifier_net);
1878 static void __move_netdevice_notifier_net(struct net *src_net,
1879 struct net *dst_net,
1880 struct notifier_block *nb)
1882 __unregister_netdevice_notifier_net(src_net, nb);
1883 __register_netdevice_notifier_net(dst_net, nb, true);
1886 int register_netdevice_notifier_dev_net(struct net_device *dev,
1887 struct notifier_block *nb,
1888 struct netdev_net_notifier *nn)
1893 err = __register_netdevice_notifier_net(dev_net(dev), nb, false);
1896 list_add(&nn->list, &dev->net_notifier_list);
1901 EXPORT_SYMBOL(register_netdevice_notifier_dev_net);
1903 int unregister_netdevice_notifier_dev_net(struct net_device *dev,
1904 struct notifier_block *nb,
1905 struct netdev_net_notifier *nn)
1910 list_del(&nn->list);
1911 err = __unregister_netdevice_notifier_net(dev_net(dev), nb);
1915 EXPORT_SYMBOL(unregister_netdevice_notifier_dev_net);
1917 static void move_netdevice_notifiers_dev_net(struct net_device *dev,
1920 struct netdev_net_notifier *nn;
1922 list_for_each_entry(nn, &dev->net_notifier_list, list)
1923 __move_netdevice_notifier_net(dev_net(dev), net, nn->nb);
1927 * call_netdevice_notifiers_info - call all network notifier blocks
1928 * @val: value passed unmodified to notifier function
1929 * @info: notifier information data
1931 * Call all network notifier blocks. Parameters and return value
1932 * are as for raw_notifier_call_chain().
1935 int call_netdevice_notifiers_info(unsigned long val,
1936 struct netdev_notifier_info *info)
1938 struct net *net = dev_net(info->dev);
1943 /* Run per-netns notifier block chain first, then run the global one.
1944 * Hopefully, one day, the global one is going to be removed after
1945 * all notifier block registrators get converted to be per-netns.
1947 ret = raw_notifier_call_chain(&net->netdev_chain, val, info);
1948 if (ret & NOTIFY_STOP_MASK)
1950 return raw_notifier_call_chain(&netdev_chain, val, info);
1954 * call_netdevice_notifiers_info_robust - call per-netns notifier blocks
1955 * for and rollback on error
1956 * @val_up: value passed unmodified to notifier function
1957 * @val_down: value passed unmodified to the notifier function when
1958 * recovering from an error on @val_up
1959 * @info: notifier information data
1961 * Call all per-netns network notifier blocks, but not notifier blocks on
1962 * the global notifier chain. Parameters and return value are as for
1963 * raw_notifier_call_chain_robust().
1967 call_netdevice_notifiers_info_robust(unsigned long val_up,
1968 unsigned long val_down,
1969 struct netdev_notifier_info *info)
1971 struct net *net = dev_net(info->dev);
1975 return raw_notifier_call_chain_robust(&net->netdev_chain,
1976 val_up, val_down, info);
1979 static int call_netdevice_notifiers_extack(unsigned long val,
1980 struct net_device *dev,
1981 struct netlink_ext_ack *extack)
1983 struct netdev_notifier_info info = {
1988 return call_netdevice_notifiers_info(val, &info);
1992 * call_netdevice_notifiers - call all network notifier blocks
1993 * @val: value passed unmodified to notifier function
1994 * @dev: net_device pointer passed unmodified to notifier function
1996 * Call all network notifier blocks. Parameters and return value
1997 * are as for raw_notifier_call_chain().
2000 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
2002 return call_netdevice_notifiers_extack(val, dev, NULL);
2004 EXPORT_SYMBOL(call_netdevice_notifiers);
2007 * call_netdevice_notifiers_mtu - call all network notifier blocks
2008 * @val: value passed unmodified to notifier function
2009 * @dev: net_device pointer passed unmodified to notifier function
2010 * @arg: additional u32 argument passed to the notifier function
2012 * Call all network notifier blocks. Parameters and return value
2013 * are as for raw_notifier_call_chain().
2015 static int call_netdevice_notifiers_mtu(unsigned long val,
2016 struct net_device *dev, u32 arg)
2018 struct netdev_notifier_info_ext info = {
2023 BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
2025 return call_netdevice_notifiers_info(val, &info.info);
2028 #ifdef CONFIG_NET_INGRESS
2029 static DEFINE_STATIC_KEY_FALSE(ingress_needed_key);
2031 void net_inc_ingress_queue(void)
2033 static_branch_inc(&ingress_needed_key);
2035 EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
2037 void net_dec_ingress_queue(void)
2039 static_branch_dec(&ingress_needed_key);
2041 EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
2044 #ifdef CONFIG_NET_EGRESS
2045 static DEFINE_STATIC_KEY_FALSE(egress_needed_key);
2047 void net_inc_egress_queue(void)
2049 static_branch_inc(&egress_needed_key);
2051 EXPORT_SYMBOL_GPL(net_inc_egress_queue);
2053 void net_dec_egress_queue(void)
2055 static_branch_dec(&egress_needed_key);
2057 EXPORT_SYMBOL_GPL(net_dec_egress_queue);
2060 DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
2061 EXPORT_SYMBOL(netstamp_needed_key);
2062 #ifdef CONFIG_JUMP_LABEL
2063 static atomic_t netstamp_needed_deferred;
2064 static atomic_t netstamp_wanted;
2065 static void netstamp_clear(struct work_struct *work)
2067 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
2070 wanted = atomic_add_return(deferred, &netstamp_wanted);
2072 static_branch_enable(&netstamp_needed_key);
2074 static_branch_disable(&netstamp_needed_key);
2076 static DECLARE_WORK(netstamp_work, netstamp_clear);
2079 void net_enable_timestamp(void)
2081 #ifdef CONFIG_JUMP_LABEL
2082 int wanted = atomic_read(&netstamp_wanted);
2084 while (wanted > 0) {
2085 if (atomic_try_cmpxchg(&netstamp_wanted, &wanted, wanted + 1))
2088 atomic_inc(&netstamp_needed_deferred);
2089 schedule_work(&netstamp_work);
2091 static_branch_inc(&netstamp_needed_key);
2094 EXPORT_SYMBOL(net_enable_timestamp);
2096 void net_disable_timestamp(void)
2098 #ifdef CONFIG_JUMP_LABEL
2099 int wanted = atomic_read(&netstamp_wanted);
2101 while (wanted > 1) {
2102 if (atomic_try_cmpxchg(&netstamp_wanted, &wanted, wanted - 1))
2105 atomic_dec(&netstamp_needed_deferred);
2106 schedule_work(&netstamp_work);
2108 static_branch_dec(&netstamp_needed_key);
2111 EXPORT_SYMBOL(net_disable_timestamp);
2113 static inline void net_timestamp_set(struct sk_buff *skb)
2116 skb->mono_delivery_time = 0;
2117 if (static_branch_unlikely(&netstamp_needed_key))
2118 skb->tstamp = ktime_get_real();
2121 #define net_timestamp_check(COND, SKB) \
2122 if (static_branch_unlikely(&netstamp_needed_key)) { \
2123 if ((COND) && !(SKB)->tstamp) \
2124 (SKB)->tstamp = ktime_get_real(); \
2127 bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
2129 return __is_skb_forwardable(dev, skb, true);
2131 EXPORT_SYMBOL_GPL(is_skb_forwardable);
2133 static int __dev_forward_skb2(struct net_device *dev, struct sk_buff *skb,
2136 int ret = ____dev_forward_skb(dev, skb, check_mtu);
2139 skb->protocol = eth_type_trans(skb, dev);
2140 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
2146 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2148 return __dev_forward_skb2(dev, skb, true);
2150 EXPORT_SYMBOL_GPL(__dev_forward_skb);
2153 * dev_forward_skb - loopback an skb to another netif
2155 * @dev: destination network device
2156 * @skb: buffer to forward
2159 * NET_RX_SUCCESS (no congestion)
2160 * NET_RX_DROP (packet was dropped, but freed)
2162 * dev_forward_skb can be used for injecting an skb from the
2163 * start_xmit function of one device into the receive queue
2164 * of another device.
2166 * The receiving device may be in another namespace, so
2167 * we have to clear all information in the skb that could
2168 * impact namespace isolation.
2170 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2172 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
2174 EXPORT_SYMBOL_GPL(dev_forward_skb);
2176 int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb)
2178 return __dev_forward_skb2(dev, skb, false) ?: netif_rx_internal(skb);
2181 static inline int deliver_skb(struct sk_buff *skb,
2182 struct packet_type *pt_prev,
2183 struct net_device *orig_dev)
2185 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
2187 refcount_inc(&skb->users);
2188 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2191 static inline void deliver_ptype_list_skb(struct sk_buff *skb,
2192 struct packet_type **pt,
2193 struct net_device *orig_dev,
2195 struct list_head *ptype_list)
2197 struct packet_type *ptype, *pt_prev = *pt;
2199 list_for_each_entry_rcu(ptype, ptype_list, list) {
2200 if (ptype->type != type)
2203 deliver_skb(skb, pt_prev, orig_dev);
2209 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
2211 if (!ptype->af_packet_priv || !skb->sk)
2214 if (ptype->id_match)
2215 return ptype->id_match(ptype, skb->sk);
2216 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
2223 * dev_nit_active - return true if any network interface taps are in use
2225 * @dev: network device to check for the presence of taps
2227 bool dev_nit_active(struct net_device *dev)
2229 return !list_empty(&net_hotdata.ptype_all) ||
2230 !list_empty(&dev->ptype_all);
2232 EXPORT_SYMBOL_GPL(dev_nit_active);
2235 * Support routine. Sends outgoing frames to any network
2236 * taps currently in use.
2239 void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
2241 struct list_head *ptype_list = &net_hotdata.ptype_all;
2242 struct packet_type *ptype, *pt_prev = NULL;
2243 struct sk_buff *skb2 = NULL;
2247 list_for_each_entry_rcu(ptype, ptype_list, list) {
2248 if (READ_ONCE(ptype->ignore_outgoing))
2251 /* Never send packets back to the socket
2252 * they originated from - MvS (miquels@drinkel.ow.org)
2254 if (skb_loop_sk(ptype, skb))
2258 deliver_skb(skb2, pt_prev, skb->dev);
2263 /* need to clone skb, done only once */
2264 skb2 = skb_clone(skb, GFP_ATOMIC);
2268 net_timestamp_set(skb2);
2270 /* skb->nh should be correctly
2271 * set by sender, so that the second statement is
2272 * just protection against buggy protocols.
2274 skb_reset_mac_header(skb2);
2276 if (skb_network_header(skb2) < skb2->data ||
2277 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
2278 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
2279 ntohs(skb2->protocol),
2281 skb_reset_network_header(skb2);
2284 skb2->transport_header = skb2->network_header;
2285 skb2->pkt_type = PACKET_OUTGOING;
2289 if (ptype_list == &net_hotdata.ptype_all) {
2290 ptype_list = &dev->ptype_all;
2295 if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC))
2296 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
2302 EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
2305 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
2306 * @dev: Network device
2307 * @txq: number of queues available
2309 * If real_num_tx_queues is changed the tc mappings may no longer be
2310 * valid. To resolve this verify the tc mapping remains valid and if
2311 * not NULL the mapping. With no priorities mapping to this
2312 * offset/count pair it will no longer be used. In the worst case TC0
2313 * is invalid nothing can be done so disable priority mappings. If is
2314 * expected that drivers will fix this mapping if they can before
2315 * calling netif_set_real_num_tx_queues.
2317 static void netif_setup_tc(struct net_device *dev, unsigned int txq)
2320 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2322 /* If TC0 is invalidated disable TC mapping */
2323 if (tc->offset + tc->count > txq) {
2324 netdev_warn(dev, "Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
2329 /* Invalidated prio to tc mappings set to TC0 */
2330 for (i = 1; i < TC_BITMASK + 1; i++) {
2331 int q = netdev_get_prio_tc_map(dev, i);
2333 tc = &dev->tc_to_txq[q];
2334 if (tc->offset + tc->count > txq) {
2335 netdev_warn(dev, "Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2337 netdev_set_prio_tc_map(dev, i, 0);
2342 int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
2345 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2348 /* walk through the TCs and see if it falls into any of them */
2349 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
2350 if ((txq - tc->offset) < tc->count)
2354 /* didn't find it, just return -1 to indicate no match */
2360 EXPORT_SYMBOL(netdev_txq_to_tc);
2363 static struct static_key xps_needed __read_mostly;
2364 static struct static_key xps_rxqs_needed __read_mostly;
2365 static DEFINE_MUTEX(xps_map_mutex);
2366 #define xmap_dereference(P) \
2367 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2369 static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
2370 struct xps_dev_maps *old_maps, int tci, u16 index)
2372 struct xps_map *map = NULL;
2375 map = xmap_dereference(dev_maps->attr_map[tci]);
2379 for (pos = map->len; pos--;) {
2380 if (map->queues[pos] != index)
2384 map->queues[pos] = map->queues[--map->len];
2389 RCU_INIT_POINTER(old_maps->attr_map[tci], NULL);
2390 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
2391 kfree_rcu(map, rcu);
2398 static bool remove_xps_queue_cpu(struct net_device *dev,
2399 struct xps_dev_maps *dev_maps,
2400 int cpu, u16 offset, u16 count)
2402 int num_tc = dev_maps->num_tc;
2403 bool active = false;
2406 for (tci = cpu * num_tc; num_tc--; tci++) {
2409 for (i = count, j = offset; i--; j++) {
2410 if (!remove_xps_queue(dev_maps, NULL, tci, j))
2420 static void reset_xps_maps(struct net_device *dev,
2421 struct xps_dev_maps *dev_maps,
2422 enum xps_map_type type)
2424 static_key_slow_dec_cpuslocked(&xps_needed);
2425 if (type == XPS_RXQS)
2426 static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
2428 RCU_INIT_POINTER(dev->xps_maps[type], NULL);
2430 kfree_rcu(dev_maps, rcu);
2433 static void clean_xps_maps(struct net_device *dev, enum xps_map_type type,
2434 u16 offset, u16 count)
2436 struct xps_dev_maps *dev_maps;
2437 bool active = false;
2440 dev_maps = xmap_dereference(dev->xps_maps[type]);
2444 for (j = 0; j < dev_maps->nr_ids; j++)
2445 active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, count);
2447 reset_xps_maps(dev, dev_maps, type);
2449 if (type == XPS_CPUS) {
2450 for (i = offset + (count - 1); count--; i--)
2451 netdev_queue_numa_node_write(
2452 netdev_get_tx_queue(dev, i), NUMA_NO_NODE);
2456 static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2459 if (!static_key_false(&xps_needed))
2463 mutex_lock(&xps_map_mutex);
2465 if (static_key_false(&xps_rxqs_needed))
2466 clean_xps_maps(dev, XPS_RXQS, offset, count);
2468 clean_xps_maps(dev, XPS_CPUS, offset, count);
2470 mutex_unlock(&xps_map_mutex);
2474 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2476 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2479 static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
2480 u16 index, bool is_rxqs_map)
2482 struct xps_map *new_map;
2483 int alloc_len = XPS_MIN_MAP_ALLOC;
2486 for (pos = 0; map && pos < map->len; pos++) {
2487 if (map->queues[pos] != index)
2492 /* Need to add tx-queue to this CPU's/rx-queue's existing map */
2494 if (pos < map->alloc_len)
2497 alloc_len = map->alloc_len * 2;
2500 /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's
2504 new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL);
2506 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2507 cpu_to_node(attr_index));
2511 for (i = 0; i < pos; i++)
2512 new_map->queues[i] = map->queues[i];
2513 new_map->alloc_len = alloc_len;
2519 /* Copy xps maps at a given index */
2520 static void xps_copy_dev_maps(struct xps_dev_maps *dev_maps,
2521 struct xps_dev_maps *new_dev_maps, int index,
2522 int tc, bool skip_tc)
2524 int i, tci = index * dev_maps->num_tc;
2525 struct xps_map *map;
2527 /* copy maps belonging to foreign traffic classes */
2528 for (i = 0; i < dev_maps->num_tc; i++, tci++) {
2529 if (i == tc && skip_tc)
2532 /* fill in the new device map from the old device map */
2533 map = xmap_dereference(dev_maps->attr_map[tci]);
2534 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2538 /* Must be called under cpus_read_lock */
2539 int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
2540 u16 index, enum xps_map_type type)
2542 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL, *old_dev_maps = NULL;
2543 const unsigned long *online_mask = NULL;
2544 bool active = false, copy = false;
2545 int i, j, tci, numa_node_id = -2;
2546 int maps_sz, num_tc = 1, tc = 0;
2547 struct xps_map *map, *new_map;
2548 unsigned int nr_ids;
2550 WARN_ON_ONCE(index >= dev->num_tx_queues);
2553 /* Do not allow XPS on subordinate device directly */
2554 num_tc = dev->num_tc;
2558 /* If queue belongs to subordinate dev use its map */
2559 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
2561 tc = netdev_txq_to_tc(dev, index);
2566 mutex_lock(&xps_map_mutex);
2568 dev_maps = xmap_dereference(dev->xps_maps[type]);
2569 if (type == XPS_RXQS) {
2570 maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues);
2571 nr_ids = dev->num_rx_queues;
2573 maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc);
2574 if (num_possible_cpus() > 1)
2575 online_mask = cpumask_bits(cpu_online_mask);
2576 nr_ids = nr_cpu_ids;
2579 if (maps_sz < L1_CACHE_BYTES)
2580 maps_sz = L1_CACHE_BYTES;
2582 /* The old dev_maps could be larger or smaller than the one we're
2583 * setting up now, as dev->num_tc or nr_ids could have been updated in
2584 * between. We could try to be smart, but let's be safe instead and only
2585 * copy foreign traffic classes if the two map sizes match.
2588 dev_maps->num_tc == num_tc && dev_maps->nr_ids == nr_ids)
2591 /* allocate memory for queue storage */
2592 for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids),
2594 if (!new_dev_maps) {
2595 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2596 if (!new_dev_maps) {
2597 mutex_unlock(&xps_map_mutex);
2601 new_dev_maps->nr_ids = nr_ids;
2602 new_dev_maps->num_tc = num_tc;
2605 tci = j * num_tc + tc;
2606 map = copy ? xmap_dereference(dev_maps->attr_map[tci]) : NULL;
2608 map = expand_xps_map(map, j, index, type == XPS_RXQS);
2612 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
2616 goto out_no_new_maps;
2619 /* Increment static keys at most once per type */
2620 static_key_slow_inc_cpuslocked(&xps_needed);
2621 if (type == XPS_RXQS)
2622 static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
2625 for (j = 0; j < nr_ids; j++) {
2626 bool skip_tc = false;
2628 tci = j * num_tc + tc;
2629 if (netif_attr_test_mask(j, mask, nr_ids) &&
2630 netif_attr_test_online(j, online_mask, nr_ids)) {
2631 /* add tx-queue to CPU/rx-queue maps */
2636 map = xmap_dereference(new_dev_maps->attr_map[tci]);
2637 while ((pos < map->len) && (map->queues[pos] != index))
2640 if (pos == map->len)
2641 map->queues[map->len++] = index;
2643 if (type == XPS_CPUS) {
2644 if (numa_node_id == -2)
2645 numa_node_id = cpu_to_node(j);
2646 else if (numa_node_id != cpu_to_node(j))
2653 xps_copy_dev_maps(dev_maps, new_dev_maps, j, tc,
2657 rcu_assign_pointer(dev->xps_maps[type], new_dev_maps);
2659 /* Cleanup old maps */
2661 goto out_no_old_maps;
2663 for (j = 0; j < dev_maps->nr_ids; j++) {
2664 for (i = num_tc, tci = j * dev_maps->num_tc; i--; tci++) {
2665 map = xmap_dereference(dev_maps->attr_map[tci]);
2670 new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2675 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
2676 kfree_rcu(map, rcu);
2680 old_dev_maps = dev_maps;
2683 dev_maps = new_dev_maps;
2687 if (type == XPS_CPUS)
2688 /* update Tx queue numa node */
2689 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2690 (numa_node_id >= 0) ?
2691 numa_node_id : NUMA_NO_NODE);
2696 /* removes tx-queue from unused CPUs/rx-queues */
2697 for (j = 0; j < dev_maps->nr_ids; j++) {
2698 tci = j * dev_maps->num_tc;
2700 for (i = 0; i < dev_maps->num_tc; i++, tci++) {
2702 netif_attr_test_mask(j, mask, dev_maps->nr_ids) &&
2703 netif_attr_test_online(j, online_mask, dev_maps->nr_ids))
2706 active |= remove_xps_queue(dev_maps,
2707 copy ? old_dev_maps : NULL,
2713 kfree_rcu(old_dev_maps, rcu);
2715 /* free map if not active */
2717 reset_xps_maps(dev, dev_maps, type);
2720 mutex_unlock(&xps_map_mutex);
2724 /* remove any maps that we added */
2725 for (j = 0; j < nr_ids; j++) {
2726 for (i = num_tc, tci = j * num_tc; i--; tci++) {
2727 new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2729 xmap_dereference(dev_maps->attr_map[tci]) :
2731 if (new_map && new_map != map)
2736 mutex_unlock(&xps_map_mutex);
2738 kfree(new_dev_maps);
2741 EXPORT_SYMBOL_GPL(__netif_set_xps_queue);
2743 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2749 ret = __netif_set_xps_queue(dev, cpumask_bits(mask), index, XPS_CPUS);
2754 EXPORT_SYMBOL(netif_set_xps_queue);
2757 static void netdev_unbind_all_sb_channels(struct net_device *dev)
2759 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2761 /* Unbind any subordinate channels */
2762 while (txq-- != &dev->_tx[0]) {
2764 netdev_unbind_sb_channel(dev, txq->sb_dev);
2768 void netdev_reset_tc(struct net_device *dev)
2771 netif_reset_xps_queues_gt(dev, 0);
2773 netdev_unbind_all_sb_channels(dev);
2775 /* Reset TC configuration of device */
2777 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2778 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
2780 EXPORT_SYMBOL(netdev_reset_tc);
2782 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
2784 if (tc >= dev->num_tc)
2788 netif_reset_xps_queues(dev, offset, count);
2790 dev->tc_to_txq[tc].count = count;
2791 dev->tc_to_txq[tc].offset = offset;
2794 EXPORT_SYMBOL(netdev_set_tc_queue);
2796 int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
2798 if (num_tc > TC_MAX_QUEUE)
2802 netif_reset_xps_queues_gt(dev, 0);
2804 netdev_unbind_all_sb_channels(dev);
2806 dev->num_tc = num_tc;
2809 EXPORT_SYMBOL(netdev_set_num_tc);
2811 void netdev_unbind_sb_channel(struct net_device *dev,
2812 struct net_device *sb_dev)
2814 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2817 netif_reset_xps_queues_gt(sb_dev, 0);
2819 memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq));
2820 memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map));
2822 while (txq-- != &dev->_tx[0]) {
2823 if (txq->sb_dev == sb_dev)
2827 EXPORT_SYMBOL(netdev_unbind_sb_channel);
2829 int netdev_bind_sb_channel_queue(struct net_device *dev,
2830 struct net_device *sb_dev,
2831 u8 tc, u16 count, u16 offset)
2833 /* Make certain the sb_dev and dev are already configured */
2834 if (sb_dev->num_tc >= 0 || tc >= dev->num_tc)
2837 /* We cannot hand out queues we don't have */
2838 if ((offset + count) > dev->real_num_tx_queues)
2841 /* Record the mapping */
2842 sb_dev->tc_to_txq[tc].count = count;
2843 sb_dev->tc_to_txq[tc].offset = offset;
2845 /* Provide a way for Tx queue to find the tc_to_txq map or
2846 * XPS map for itself.
2849 netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev;
2853 EXPORT_SYMBOL(netdev_bind_sb_channel_queue);
2855 int netdev_set_sb_channel(struct net_device *dev, u16 channel)
2857 /* Do not use a multiqueue device to represent a subordinate channel */
2858 if (netif_is_multiqueue(dev))
2861 /* We allow channels 1 - 32767 to be used for subordinate channels.
2862 * Channel 0 is meant to be "native" mode and used only to represent
2863 * the main root device. We allow writing 0 to reset the device back
2864 * to normal mode after being used as a subordinate channel.
2866 if (channel > S16_MAX)
2869 dev->num_tc = -channel;
2873 EXPORT_SYMBOL(netdev_set_sb_channel);
2876 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2877 * greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
2879 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2884 disabling = txq < dev->real_num_tx_queues;
2886 if (txq < 1 || txq > dev->num_tx_queues)
2889 if (dev->reg_state == NETREG_REGISTERED ||
2890 dev->reg_state == NETREG_UNREGISTERING) {
2893 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2899 netif_setup_tc(dev, txq);
2901 dev_qdisc_change_real_num_tx(dev, txq);
2903 dev->real_num_tx_queues = txq;
2907 qdisc_reset_all_tx_gt(dev, txq);
2909 netif_reset_xps_queues_gt(dev, txq);
2913 dev->real_num_tx_queues = txq;
2918 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
2922 * netif_set_real_num_rx_queues - set actual number of RX queues used
2923 * @dev: Network device
2924 * @rxq: Actual number of RX queues
2926 * This must be called either with the rtnl_lock held or before
2927 * registration of the net device. Returns 0 on success, or a
2928 * negative error code. If called before registration, it always
2931 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2935 if (rxq < 1 || rxq > dev->num_rx_queues)
2938 if (dev->reg_state == NETREG_REGISTERED) {
2941 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2947 dev->real_num_rx_queues = rxq;
2950 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2954 * netif_set_real_num_queues - set actual number of RX and TX queues used
2955 * @dev: Network device
2956 * @txq: Actual number of TX queues
2957 * @rxq: Actual number of RX queues
2959 * Set the real number of both TX and RX queues.
2960 * Does nothing if the number of queues is already correct.
2962 int netif_set_real_num_queues(struct net_device *dev,
2963 unsigned int txq, unsigned int rxq)
2965 unsigned int old_rxq = dev->real_num_rx_queues;
2968 if (txq < 1 || txq > dev->num_tx_queues ||
2969 rxq < 1 || rxq > dev->num_rx_queues)
2972 /* Start from increases, so the error path only does decreases -
2973 * decreases can't fail.
2975 if (rxq > dev->real_num_rx_queues) {
2976 err = netif_set_real_num_rx_queues(dev, rxq);
2980 if (txq > dev->real_num_tx_queues) {
2981 err = netif_set_real_num_tx_queues(dev, txq);
2985 if (rxq < dev->real_num_rx_queues)
2986 WARN_ON(netif_set_real_num_rx_queues(dev, rxq));
2987 if (txq < dev->real_num_tx_queues)
2988 WARN_ON(netif_set_real_num_tx_queues(dev, txq));
2992 WARN_ON(netif_set_real_num_rx_queues(dev, old_rxq));
2995 EXPORT_SYMBOL(netif_set_real_num_queues);
2998 * netif_set_tso_max_size() - set the max size of TSO frames supported
2999 * @dev: netdev to update
3000 * @size: max skb->len of a TSO frame
3002 * Set the limit on the size of TSO super-frames the device can handle.
3003 * Unless explicitly set the stack will assume the value of
3004 * %GSO_LEGACY_MAX_SIZE.
3006 void netif_set_tso_max_size(struct net_device *dev, unsigned int size)
3008 dev->tso_max_size = min(GSO_MAX_SIZE, size);
3009 if (size < READ_ONCE(dev->gso_max_size))
3010 netif_set_gso_max_size(dev, size);
3011 if (size < READ_ONCE(dev->gso_ipv4_max_size))
3012 netif_set_gso_ipv4_max_size(dev, size);
3014 EXPORT_SYMBOL(netif_set_tso_max_size);
3017 * netif_set_tso_max_segs() - set the max number of segs supported for TSO
3018 * @dev: netdev to update
3019 * @segs: max number of TCP segments
3021 * Set the limit on the number of TCP segments the device can generate from
3022 * a single TSO super-frame.
3023 * Unless explicitly set the stack will assume the value of %GSO_MAX_SEGS.
3025 void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs)
3027 dev->tso_max_segs = segs;
3028 if (segs < READ_ONCE(dev->gso_max_segs))
3029 netif_set_gso_max_segs(dev, segs);
3031 EXPORT_SYMBOL(netif_set_tso_max_segs);
3034 * netif_inherit_tso_max() - copy all TSO limits from a lower device to an upper
3035 * @to: netdev to update
3036 * @from: netdev from which to copy the limits
3038 void netif_inherit_tso_max(struct net_device *to, const struct net_device *from)
3040 netif_set_tso_max_size(to, from->tso_max_size);
3041 netif_set_tso_max_segs(to, from->tso_max_segs);
3043 EXPORT_SYMBOL(netif_inherit_tso_max);
3046 * netif_get_num_default_rss_queues - default number of RSS queues
3048 * Default value is the number of physical cores if there are only 1 or 2, or
3049 * divided by 2 if there are more.
3051 int netif_get_num_default_rss_queues(void)
3056 if (unlikely(is_kdump_kernel() || !zalloc_cpumask_var(&cpus, GFP_KERNEL)))
3059 cpumask_copy(cpus, cpu_online_mask);
3060 for_each_cpu(cpu, cpus) {
3062 cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu));
3064 free_cpumask_var(cpus);
3066 return count > 2 ? DIV_ROUND_UP(count, 2) : count;
3068 EXPORT_SYMBOL(netif_get_num_default_rss_queues);
3070 static void __netif_reschedule(struct Qdisc *q)
3072 struct softnet_data *sd;
3073 unsigned long flags;
3075 local_irq_save(flags);
3076 sd = this_cpu_ptr(&softnet_data);
3077 q->next_sched = NULL;
3078 *sd->output_queue_tailp = q;
3079 sd->output_queue_tailp = &q->next_sched;
3080 raise_softirq_irqoff(NET_TX_SOFTIRQ);
3081 local_irq_restore(flags);
3084 void __netif_schedule(struct Qdisc *q)
3086 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
3087 __netif_reschedule(q);
3089 EXPORT_SYMBOL(__netif_schedule);
3091 struct dev_kfree_skb_cb {
3092 enum skb_drop_reason reason;
3095 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
3097 return (struct dev_kfree_skb_cb *)skb->cb;
3100 void netif_schedule_queue(struct netdev_queue *txq)
3103 if (!netif_xmit_stopped(txq)) {
3104 struct Qdisc *q = rcu_dereference(txq->qdisc);
3106 __netif_schedule(q);
3110 EXPORT_SYMBOL(netif_schedule_queue);
3112 void netif_tx_wake_queue(struct netdev_queue *dev_queue)
3114 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
3118 q = rcu_dereference(dev_queue->qdisc);
3119 __netif_schedule(q);
3123 EXPORT_SYMBOL(netif_tx_wake_queue);
3125 void dev_kfree_skb_irq_reason(struct sk_buff *skb, enum skb_drop_reason reason)
3127 unsigned long flags;
3132 if (likely(refcount_read(&skb->users) == 1)) {
3134 refcount_set(&skb->users, 0);
3135 } else if (likely(!refcount_dec_and_test(&skb->users))) {
3138 get_kfree_skb_cb(skb)->reason = reason;
3139 local_irq_save(flags);
3140 skb->next = __this_cpu_read(softnet_data.completion_queue);
3141 __this_cpu_write(softnet_data.completion_queue, skb);
3142 raise_softirq_irqoff(NET_TX_SOFTIRQ);
3143 local_irq_restore(flags);
3145 EXPORT_SYMBOL(dev_kfree_skb_irq_reason);
3147 void dev_kfree_skb_any_reason(struct sk_buff *skb, enum skb_drop_reason reason)
3149 if (in_hardirq() || irqs_disabled())
3150 dev_kfree_skb_irq_reason(skb, reason);
3152 kfree_skb_reason(skb, reason);
3154 EXPORT_SYMBOL(dev_kfree_skb_any_reason);
3158 * netif_device_detach - mark device as removed
3159 * @dev: network device
3161 * Mark device as removed from system and therefore no longer available.
3163 void netif_device_detach(struct net_device *dev)
3165 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
3166 netif_running(dev)) {
3167 netif_tx_stop_all_queues(dev);
3170 EXPORT_SYMBOL(netif_device_detach);
3173 * netif_device_attach - mark device as attached
3174 * @dev: network device
3176 * Mark device as attached from system and restart if needed.
3178 void netif_device_attach(struct net_device *dev)
3180 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
3181 netif_running(dev)) {
3182 netif_tx_wake_all_queues(dev);
3183 __netdev_watchdog_up(dev);
3186 EXPORT_SYMBOL(netif_device_attach);
3189 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
3190 * to be used as a distribution range.
3192 static u16 skb_tx_hash(const struct net_device *dev,
3193 const struct net_device *sb_dev,
3194 struct sk_buff *skb)
3198 u16 qcount = dev->real_num_tx_queues;
3201 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
3203 qoffset = sb_dev->tc_to_txq[tc].offset;
3204 qcount = sb_dev->tc_to_txq[tc].count;
3205 if (unlikely(!qcount)) {
3206 net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n",
3207 sb_dev->name, qoffset, tc);
3209 qcount = dev->real_num_tx_queues;
3213 if (skb_rx_queue_recorded(skb)) {
3214 DEBUG_NET_WARN_ON_ONCE(qcount == 0);
3215 hash = skb_get_rx_queue(skb);
3216 if (hash >= qoffset)
3218 while (unlikely(hash >= qcount))
3220 return hash + qoffset;
3223 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
3226 void skb_warn_bad_offload(const struct sk_buff *skb)
3228 static const netdev_features_t null_features;
3229 struct net_device *dev = skb->dev;
3230 const char *name = "";
3232 if (!net_ratelimit())
3236 if (dev->dev.parent)
3237 name = dev_driver_string(dev->dev.parent);
3239 name = netdev_name(dev);
3241 skb_dump(KERN_WARNING, skb, false);
3242 WARN(1, "%s: caps=(%pNF, %pNF)\n",
3243 name, dev ? &dev->features : &null_features,
3244 skb->sk ? &skb->sk->sk_route_caps : &null_features);
3248 * Invalidate hardware checksum when packet is to be mangled, and
3249 * complete checksum manually on outgoing path.
3251 int skb_checksum_help(struct sk_buff *skb)
3254 int ret = 0, offset;
3256 if (skb->ip_summed == CHECKSUM_COMPLETE)
3257 goto out_set_summed;
3259 if (unlikely(skb_is_gso(skb))) {
3260 skb_warn_bad_offload(skb);
3264 /* Before computing a checksum, we should make sure no frag could
3265 * be modified by an external entity : checksum could be wrong.
3267 if (skb_has_shared_frag(skb)) {
3268 ret = __skb_linearize(skb);
3273 offset = skb_checksum_start_offset(skb);
3275 if (unlikely(offset >= skb_headlen(skb))) {
3276 DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
3277 WARN_ONCE(true, "offset (%d) >= skb_headlen() (%u)\n",
3278 offset, skb_headlen(skb));
3281 csum = skb_checksum(skb, offset, skb->len - offset, 0);
3283 offset += skb->csum_offset;
3284 if (unlikely(offset + sizeof(__sum16) > skb_headlen(skb))) {
3285 DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
3286 WARN_ONCE(true, "offset+2 (%zu) > skb_headlen() (%u)\n",
3287 offset + sizeof(__sum16), skb_headlen(skb));
3290 ret = skb_ensure_writable(skb, offset + sizeof(__sum16));
3294 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
3296 skb->ip_summed = CHECKSUM_NONE;
3300 EXPORT_SYMBOL(skb_checksum_help);
3302 int skb_crc32c_csum_help(struct sk_buff *skb)
3305 int ret = 0, offset, start;
3307 if (skb->ip_summed != CHECKSUM_PARTIAL)
3310 if (unlikely(skb_is_gso(skb)))
3313 /* Before computing a checksum, we should make sure no frag could
3314 * be modified by an external entity : checksum could be wrong.
3316 if (unlikely(skb_has_shared_frag(skb))) {
3317 ret = __skb_linearize(skb);
3321 start = skb_checksum_start_offset(skb);
3322 offset = start + offsetof(struct sctphdr, checksum);
3323 if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
3328 ret = skb_ensure_writable(skb, offset + sizeof(__le32));
3332 crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
3333 skb->len - start, ~(__u32)0,
3335 *(__le32 *)(skb->data + offset) = crc32c_csum;
3336 skb_reset_csum_not_inet(skb);
3341 __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
3343 __be16 type = skb->protocol;
3345 /* Tunnel gso handlers can set protocol to ethernet. */
3346 if (type == htons(ETH_P_TEB)) {
3349 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
3352 eth = (struct ethhdr *)skb->data;
3353 type = eth->h_proto;
3356 return vlan_get_protocol_and_depth(skb, type, depth);
3360 /* Take action when hardware reception checksum errors are detected. */
3362 static void do_netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
3364 netdev_err(dev, "hw csum failure\n");
3365 skb_dump(KERN_ERR, skb, true);
3369 void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
3371 DO_ONCE_LITE(do_netdev_rx_csum_fault, dev, skb);
3373 EXPORT_SYMBOL(netdev_rx_csum_fault);
3376 /* XXX: check that highmem exists at all on the given machine. */
3377 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
3379 #ifdef CONFIG_HIGHMEM
3382 if (!(dev->features & NETIF_F_HIGHDMA)) {
3383 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3384 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3386 if (PageHighMem(skb_frag_page(frag)))
3394 /* If MPLS offload request, verify we are testing hardware MPLS features
3395 * instead of standard features for the netdev.
3397 #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
3398 static netdev_features_t net_mpls_features(struct sk_buff *skb,
3399 netdev_features_t features,
3402 if (eth_p_mpls(type))
3403 features &= skb->dev->mpls_features;
3408 static netdev_features_t net_mpls_features(struct sk_buff *skb,
3409 netdev_features_t features,
3416 static netdev_features_t harmonize_features(struct sk_buff *skb,
3417 netdev_features_t features)
3421 type = skb_network_protocol(skb, NULL);
3422 features = net_mpls_features(skb, features, type);
3424 if (skb->ip_summed != CHECKSUM_NONE &&
3425 !can_checksum_protocol(features, type)) {
3426 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3428 if (illegal_highdma(skb->dev, skb))
3429 features &= ~NETIF_F_SG;
3434 netdev_features_t passthru_features_check(struct sk_buff *skb,
3435 struct net_device *dev,
3436 netdev_features_t features)
3440 EXPORT_SYMBOL(passthru_features_check);
3442 static netdev_features_t dflt_features_check(struct sk_buff *skb,
3443 struct net_device *dev,
3444 netdev_features_t features)
3446 return vlan_features_check(skb, features);
3449 static netdev_features_t gso_features_check(const struct sk_buff *skb,
3450 struct net_device *dev,
3451 netdev_features_t features)
3453 u16 gso_segs = skb_shinfo(skb)->gso_segs;
3455 if (gso_segs > READ_ONCE(dev->gso_max_segs))
3456 return features & ~NETIF_F_GSO_MASK;
3458 if (unlikely(skb->len >= READ_ONCE(dev->gso_max_size)))
3459 return features & ~NETIF_F_GSO_MASK;
3461 if (!skb_shinfo(skb)->gso_type) {
3462 skb_warn_bad_offload(skb);
3463 return features & ~NETIF_F_GSO_MASK;
3466 /* Support for GSO partial features requires software
3467 * intervention before we can actually process the packets
3468 * so we need to strip support for any partial features now
3469 * and we can pull them back in after we have partially
3470 * segmented the frame.
3472 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
3473 features &= ~dev->gso_partial_features;
3475 /* Make sure to clear the IPv4 ID mangling feature if the
3476 * IPv4 header has the potential to be fragmented.
3478 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
3479 struct iphdr *iph = skb->encapsulation ?
3480 inner_ip_hdr(skb) : ip_hdr(skb);
3482 if (!(iph->frag_off & htons(IP_DF)))
3483 features &= ~NETIF_F_TSO_MANGLEID;
3489 netdev_features_t netif_skb_features(struct sk_buff *skb)
3491 struct net_device *dev = skb->dev;
3492 netdev_features_t features = dev->features;
3494 if (skb_is_gso(skb))
3495 features = gso_features_check(skb, dev, features);
3497 /* If encapsulation offload request, verify we are testing
3498 * hardware encapsulation features instead of standard
3499 * features for the netdev
3501 if (skb->encapsulation)
3502 features &= dev->hw_enc_features;
3504 if (skb_vlan_tagged(skb))
3505 features = netdev_intersect_features(features,
3506 dev->vlan_features |
3507 NETIF_F_HW_VLAN_CTAG_TX |
3508 NETIF_F_HW_VLAN_STAG_TX);
3510 if (dev->netdev_ops->ndo_features_check)
3511 features &= dev->netdev_ops->ndo_features_check(skb, dev,
3514 features &= dflt_features_check(skb, dev, features);
3516 return harmonize_features(skb, features);
3518 EXPORT_SYMBOL(netif_skb_features);
3520 static int xmit_one(struct sk_buff *skb, struct net_device *dev,
3521 struct netdev_queue *txq, bool more)
3526 if (dev_nit_active(dev))
3527 dev_queue_xmit_nit(skb, dev);
3530 trace_net_dev_start_xmit(skb, dev);
3531 rc = netdev_start_xmit(skb, dev, txq, more);
3532 trace_net_dev_xmit(skb, rc, dev, len);
3537 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
3538 struct netdev_queue *txq, int *ret)
3540 struct sk_buff *skb = first;
3541 int rc = NETDEV_TX_OK;
3544 struct sk_buff *next = skb->next;
3546 skb_mark_not_on_list(skb);
3547 rc = xmit_one(skb, dev, txq, next != NULL);
3548 if (unlikely(!dev_xmit_complete(rc))) {
3554 if (netif_tx_queue_stopped(txq) && skb) {
3555 rc = NETDEV_TX_BUSY;
3565 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
3566 netdev_features_t features)
3568 if (skb_vlan_tag_present(skb) &&
3569 !vlan_hw_offload_capable(features, skb->vlan_proto))
3570 skb = __vlan_hwaccel_push_inside(skb);
3574 int skb_csum_hwoffload_help(struct sk_buff *skb,
3575 const netdev_features_t features)
3577 if (unlikely(skb_csum_is_sctp(skb)))
3578 return !!(features & NETIF_F_SCTP_CRC) ? 0 :
3579 skb_crc32c_csum_help(skb);
3581 if (features & NETIF_F_HW_CSUM)
3584 if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
3585 switch (skb->csum_offset) {
3586 case offsetof(struct tcphdr, check):
3587 case offsetof(struct udphdr, check):
3592 return skb_checksum_help(skb);
3594 EXPORT_SYMBOL(skb_csum_hwoffload_help);
3596 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
3598 netdev_features_t features;
3600 features = netif_skb_features(skb);
3601 skb = validate_xmit_vlan(skb, features);
3605 skb = sk_validate_xmit_skb(skb, dev);
3609 if (netif_needs_gso(skb, features)) {
3610 struct sk_buff *segs;
3612 segs = skb_gso_segment(skb, features);
3620 if (skb_needs_linearize(skb, features) &&
3621 __skb_linearize(skb))
3624 /* If packet is not checksummed and device does not
3625 * support checksumming for this protocol, complete
3626 * checksumming here.
3628 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3629 if (skb->encapsulation)
3630 skb_set_inner_transport_header(skb,
3631 skb_checksum_start_offset(skb));
3633 skb_set_transport_header(skb,
3634 skb_checksum_start_offset(skb));
3635 if (skb_csum_hwoffload_help(skb, features))
3640 skb = validate_xmit_xfrm(skb, features, again);
3647 dev_core_stats_tx_dropped_inc(dev);
3651 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
3653 struct sk_buff *next, *head = NULL, *tail;
3655 for (; skb != NULL; skb = next) {
3657 skb_mark_not_on_list(skb);
3659 /* in case skb wont be segmented, point to itself */
3662 skb = validate_xmit_skb(skb, dev, again);
3670 /* If skb was segmented, skb->prev points to
3671 * the last segment. If not, it still contains skb.
3677 EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
3679 static void qdisc_pkt_len_init(struct sk_buff *skb)
3681 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3683 qdisc_skb_cb(skb)->pkt_len = skb->len;
3685 /* To get more precise estimation of bytes sent on wire,
3686 * we add to pkt_len the headers size of all segments
3688 if (shinfo->gso_size && skb_transport_header_was_set(skb)) {
3689 u16 gso_segs = shinfo->gso_segs;
3690 unsigned int hdr_len;
3692 /* mac layer + network layer */
3693 hdr_len = skb_transport_offset(skb);
3695 /* + transport layer */
3696 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
3697 const struct tcphdr *th;
3698 struct tcphdr _tcphdr;
3700 th = skb_header_pointer(skb, hdr_len,
3701 sizeof(_tcphdr), &_tcphdr);
3703 hdr_len += __tcp_hdrlen(th);
3705 struct udphdr _udphdr;
3707 if (skb_header_pointer(skb, hdr_len,
3708 sizeof(_udphdr), &_udphdr))
3709 hdr_len += sizeof(struct udphdr);
3712 if (shinfo->gso_type & SKB_GSO_DODGY)
3713 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3716 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
3720 static int dev_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *q,
3721 struct sk_buff **to_free,
3722 struct netdev_queue *txq)
3726 rc = q->enqueue(skb, q, to_free) & NET_XMIT_MASK;
3727 if (rc == NET_XMIT_SUCCESS)
3728 trace_qdisc_enqueue(q, txq, skb);
3732 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3733 struct net_device *dev,
3734 struct netdev_queue *txq)
3736 spinlock_t *root_lock = qdisc_lock(q);
3737 struct sk_buff *to_free = NULL;
3741 qdisc_calculate_pkt_len(skb, q);
3743 tcf_set_drop_reason(skb, SKB_DROP_REASON_QDISC_DROP);
3745 if (q->flags & TCQ_F_NOLOCK) {
3746 if (q->flags & TCQ_F_CAN_BYPASS && nolock_qdisc_is_empty(q) &&
3747 qdisc_run_begin(q)) {
3748 /* Retest nolock_qdisc_is_empty() within the protection
3749 * of q->seqlock to protect from racing with requeuing.
3751 if (unlikely(!nolock_qdisc_is_empty(q))) {
3752 rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3759 qdisc_bstats_cpu_update(q, skb);
3760 if (sch_direct_xmit(skb, q, dev, txq, NULL, true) &&
3761 !nolock_qdisc_is_empty(q))
3765 return NET_XMIT_SUCCESS;
3768 rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3772 if (unlikely(to_free))
3773 kfree_skb_list_reason(to_free,
3774 tcf_get_drop_reason(to_free));
3779 * Heuristic to force contended enqueues to serialize on a
3780 * separate lock before trying to get qdisc main lock.
3781 * This permits qdisc->running owner to get the lock more
3782 * often and dequeue packets faster.
3783 * On PREEMPT_RT it is possible to preempt the qdisc owner during xmit
3784 * and then other tasks will only enqueue packets. The packets will be
3785 * sent after the qdisc owner is scheduled again. To prevent this
3786 * scenario the task always serialize on the lock.
3788 contended = qdisc_is_running(q) || IS_ENABLED(CONFIG_PREEMPT_RT);
3789 if (unlikely(contended))
3790 spin_lock(&q->busylock);
3792 spin_lock(root_lock);
3793 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3794 __qdisc_drop(skb, &to_free);
3796 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
3797 qdisc_run_begin(q)) {
3799 * This is a work-conserving queue; there are no old skbs
3800 * waiting to be sent out; and the qdisc is not running -
3801 * xmit the skb directly.
3804 qdisc_bstats_update(q, skb);
3806 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
3807 if (unlikely(contended)) {
3808 spin_unlock(&q->busylock);
3815 rc = NET_XMIT_SUCCESS;
3817 rc = dev_qdisc_enqueue(skb, q, &to_free, txq);
3818 if (qdisc_run_begin(q)) {
3819 if (unlikely(contended)) {
3820 spin_unlock(&q->busylock);
3827 spin_unlock(root_lock);
3828 if (unlikely(to_free))
3829 kfree_skb_list_reason(to_free,
3830 tcf_get_drop_reason(to_free));
3831 if (unlikely(contended))
3832 spin_unlock(&q->busylock);
3836 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
3837 static void skb_update_prio(struct sk_buff *skb)
3839 const struct netprio_map *map;
3840 const struct sock *sk;
3841 unsigned int prioidx;
3845 map = rcu_dereference_bh(skb->dev->priomap);
3848 sk = skb_to_full_sk(skb);
3852 prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
3854 if (prioidx < map->priomap_len)
3855 skb->priority = map->priomap[prioidx];
3858 #define skb_update_prio(skb)
3862 * dev_loopback_xmit - loop back @skb
3863 * @net: network namespace this loopback is happening in
3864 * @sk: sk needed to be a netfilter okfn
3865 * @skb: buffer to transmit
3867 int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
3869 skb_reset_mac_header(skb);
3870 __skb_pull(skb, skb_network_offset(skb));
3871 skb->pkt_type = PACKET_LOOPBACK;
3872 if (skb->ip_summed == CHECKSUM_NONE)
3873 skb->ip_summed = CHECKSUM_UNNECESSARY;
3874 DEBUG_NET_WARN_ON_ONCE(!skb_dst(skb));
3879 EXPORT_SYMBOL(dev_loopback_xmit);
3881 #ifdef CONFIG_NET_EGRESS
3882 static struct netdev_queue *
3883 netdev_tx_queue_mapping(struct net_device *dev, struct sk_buff *skb)
3885 int qm = skb_get_queue_mapping(skb);
3887 return netdev_get_tx_queue(dev, netdev_cap_txqueue(dev, qm));
3890 static bool netdev_xmit_txqueue_skipped(void)
3892 return __this_cpu_read(softnet_data.xmit.skip_txqueue);
3895 void netdev_xmit_skip_txqueue(bool skip)
3897 __this_cpu_write(softnet_data.xmit.skip_txqueue, skip);
3899 EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue);
3900 #endif /* CONFIG_NET_EGRESS */
3902 #ifdef CONFIG_NET_XGRESS
3903 static int tc_run(struct tcx_entry *entry, struct sk_buff *skb,
3904 enum skb_drop_reason *drop_reason)
3906 int ret = TC_ACT_UNSPEC;
3907 #ifdef CONFIG_NET_CLS_ACT
3908 struct mini_Qdisc *miniq = rcu_dereference_bh(entry->miniq);
3909 struct tcf_result res;
3914 tc_skb_cb(skb)->mru = 0;
3915 tc_skb_cb(skb)->post_ct = false;
3916 tcf_set_drop_reason(skb, *drop_reason);
3918 mini_qdisc_bstats_cpu_update(miniq, skb);
3919 ret = tcf_classify(skb, miniq->block, miniq->filter_list, &res, false);
3920 /* Only tcf related quirks below. */
3923 *drop_reason = tcf_get_drop_reason(skb);
3924 mini_qdisc_qstats_cpu_drop(miniq);
3927 case TC_ACT_RECLASSIFY:
3928 skb->tc_index = TC_H_MIN(res.classid);
3931 #endif /* CONFIG_NET_CLS_ACT */
3935 static DEFINE_STATIC_KEY_FALSE(tcx_needed_key);
3939 static_branch_inc(&tcx_needed_key);
3944 static_branch_dec(&tcx_needed_key);
3947 static __always_inline enum tcx_action_base
3948 tcx_run(const struct bpf_mprog_entry *entry, struct sk_buff *skb,
3949 const bool needs_mac)
3951 const struct bpf_mprog_fp *fp;
3952 const struct bpf_prog *prog;
3956 __skb_push(skb, skb->mac_len);
3957 bpf_mprog_foreach_prog(entry, fp, prog) {
3958 bpf_compute_data_pointers(skb);
3959 ret = bpf_prog_run(prog, skb);
3960 if (ret != TCX_NEXT)
3964 __skb_pull(skb, skb->mac_len);
3965 return tcx_action_code(skb, ret);
3968 static __always_inline struct sk_buff *
3969 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
3970 struct net_device *orig_dev, bool *another)
3972 struct bpf_mprog_entry *entry = rcu_dereference_bh(skb->dev->tcx_ingress);
3973 enum skb_drop_reason drop_reason = SKB_DROP_REASON_TC_INGRESS;
3979 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3983 qdisc_skb_cb(skb)->pkt_len = skb->len;
3984 tcx_set_ingress(skb, true);
3986 if (static_branch_unlikely(&tcx_needed_key)) {
3987 sch_ret = tcx_run(entry, skb, true);
3988 if (sch_ret != TC_ACT_UNSPEC)
3989 goto ingress_verdict;
3991 sch_ret = tc_run(tcx_entry(entry), skb, &drop_reason);
3994 case TC_ACT_REDIRECT:
3995 /* skb_mac_header check was done by BPF, so we can safely
3996 * push the L2 header back before redirecting to another
3999 __skb_push(skb, skb->mac_len);
4000 if (skb_do_redirect(skb) == -EAGAIN) {
4001 __skb_pull(skb, skb->mac_len);
4005 *ret = NET_RX_SUCCESS;
4008 kfree_skb_reason(skb, drop_reason);
4011 /* used by tc_run */
4017 case TC_ACT_CONSUMED:
4018 *ret = NET_RX_SUCCESS;
4025 static __always_inline struct sk_buff *
4026 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
4028 struct bpf_mprog_entry *entry = rcu_dereference_bh(dev->tcx_egress);
4029 enum skb_drop_reason drop_reason = SKB_DROP_REASON_TC_EGRESS;
4035 /* qdisc_skb_cb(skb)->pkt_len & tcx_set_ingress() was
4036 * already set by the caller.
4038 if (static_branch_unlikely(&tcx_needed_key)) {
4039 sch_ret = tcx_run(entry, skb, false);
4040 if (sch_ret != TC_ACT_UNSPEC)
4041 goto egress_verdict;
4043 sch_ret = tc_run(tcx_entry(entry), skb, &drop_reason);
4046 case TC_ACT_REDIRECT:
4047 /* No need to push/pop skb's mac_header here on egress! */
4048 skb_do_redirect(skb);
4049 *ret = NET_XMIT_SUCCESS;
4052 kfree_skb_reason(skb, drop_reason);
4053 *ret = NET_XMIT_DROP;
4055 /* used by tc_run */
4061 case TC_ACT_CONSUMED:
4062 *ret = NET_XMIT_SUCCESS;
4069 static __always_inline struct sk_buff *
4070 sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
4071 struct net_device *orig_dev, bool *another)
4076 static __always_inline struct sk_buff *
4077 sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
4081 #endif /* CONFIG_NET_XGRESS */
4084 static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
4085 struct xps_dev_maps *dev_maps, unsigned int tci)
4087 int tc = netdev_get_prio_tc_map(dev, skb->priority);
4088 struct xps_map *map;
4089 int queue_index = -1;
4091 if (tc >= dev_maps->num_tc || tci >= dev_maps->nr_ids)
4094 tci *= dev_maps->num_tc;
4097 map = rcu_dereference(dev_maps->attr_map[tci]);
4100 queue_index = map->queues[0];
4102 queue_index = map->queues[reciprocal_scale(
4103 skb_get_hash(skb), map->len)];
4104 if (unlikely(queue_index >= dev->real_num_tx_queues))
4111 static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
4112 struct sk_buff *skb)
4115 struct xps_dev_maps *dev_maps;
4116 struct sock *sk = skb->sk;
4117 int queue_index = -1;
4119 if (!static_key_false(&xps_needed))
4123 if (!static_key_false(&xps_rxqs_needed))
4126 dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_RXQS]);
4128 int tci = sk_rx_queue_get(sk);
4131 queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
4136 if (queue_index < 0) {
4137 dev_maps = rcu_dereference(sb_dev->xps_maps[XPS_CPUS]);
4139 unsigned int tci = skb->sender_cpu - 1;
4141 queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
4153 u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
4154 struct net_device *sb_dev)
4158 EXPORT_SYMBOL(dev_pick_tx_zero);
4160 u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
4161 struct net_device *sb_dev)
4163 return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
4165 EXPORT_SYMBOL(dev_pick_tx_cpu_id);
4167 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
4168 struct net_device *sb_dev)
4170 struct sock *sk = skb->sk;
4171 int queue_index = sk_tx_queue_get(sk);
4173 sb_dev = sb_dev ? : dev;
4175 if (queue_index < 0 || skb->ooo_okay ||
4176 queue_index >= dev->real_num_tx_queues) {
4177 int new_index = get_xps_queue(dev, sb_dev, skb);
4180 new_index = skb_tx_hash(dev, sb_dev, skb);
4182 if (queue_index != new_index && sk &&
4184 rcu_access_pointer(sk->sk_dst_cache))
4185 sk_tx_queue_set(sk, new_index);
4187 queue_index = new_index;
4192 EXPORT_SYMBOL(netdev_pick_tx);
4194 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
4195 struct sk_buff *skb,
4196 struct net_device *sb_dev)
4198 int queue_index = 0;
4201 u32 sender_cpu = skb->sender_cpu - 1;
4203 if (sender_cpu >= (u32)NR_CPUS)
4204 skb->sender_cpu = raw_smp_processor_id() + 1;
4207 if (dev->real_num_tx_queues != 1) {
4208 const struct net_device_ops *ops = dev->netdev_ops;
4210 if (ops->ndo_select_queue)
4211 queue_index = ops->ndo_select_queue(dev, skb, sb_dev);
4213 queue_index = netdev_pick_tx(dev, skb, sb_dev);
4215 queue_index = netdev_cap_txqueue(dev, queue_index);
4218 skb_set_queue_mapping(skb, queue_index);
4219 return netdev_get_tx_queue(dev, queue_index);
4223 * __dev_queue_xmit() - transmit a buffer
4224 * @skb: buffer to transmit
4225 * @sb_dev: suboordinate device used for L2 forwarding offload
4227 * Queue a buffer for transmission to a network device. The caller must
4228 * have set the device and priority and built the buffer before calling
4229 * this function. The function can be called from an interrupt.
4231 * When calling this method, interrupts MUST be enabled. This is because
4232 * the BH enable code must have IRQs enabled so that it will not deadlock.
4234 * Regardless of the return value, the skb is consumed, so it is currently
4235 * difficult to retry a send to this method. (You can bump the ref count
4236 * before sending to hold a reference for retry if you are careful.)
4239 * * 0 - buffer successfully transmitted
4240 * * positive qdisc return code - NET_XMIT_DROP etc.
4241 * * negative errno - other errors
4243 int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
4245 struct net_device *dev = skb->dev;
4246 struct netdev_queue *txq = NULL;
4251 skb_reset_mac_header(skb);
4252 skb_assert_len(skb);
4254 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
4255 __skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SCHED);
4257 /* Disable soft irqs for various locks below. Also
4258 * stops preemption for RCU.
4262 skb_update_prio(skb);
4264 qdisc_pkt_len_init(skb);
4265 tcx_set_ingress(skb, false);
4266 #ifdef CONFIG_NET_EGRESS
4267 if (static_branch_unlikely(&egress_needed_key)) {
4268 if (nf_hook_egress_active()) {
4269 skb = nf_hook_egress(skb, &rc, dev);
4274 netdev_xmit_skip_txqueue(false);
4276 nf_skip_egress(skb, true);
4277 skb = sch_handle_egress(skb, &rc, dev);
4280 nf_skip_egress(skb, false);
4282 if (netdev_xmit_txqueue_skipped())
4283 txq = netdev_tx_queue_mapping(dev, skb);
4286 /* If device/qdisc don't need skb->dst, release it right now while
4287 * its hot in this cpu cache.
4289 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
4295 txq = netdev_core_pick_tx(dev, skb, sb_dev);
4297 q = rcu_dereference_bh(txq->qdisc);
4299 trace_net_dev_queue(skb);
4301 rc = __dev_xmit_skb(skb, q, dev, txq);
4305 /* The device has no queue. Common case for software devices:
4306 * loopback, all the sorts of tunnels...
4308 * Really, it is unlikely that netif_tx_lock protection is necessary
4309 * here. (f.e. loopback and IP tunnels are clean ignoring statistics
4311 * However, it is possible, that they rely on protection
4314 * Check this and shot the lock. It is not prone from deadlocks.
4315 *Either shot noqueue qdisc, it is even simpler 8)
4317 if (dev->flags & IFF_UP) {
4318 int cpu = smp_processor_id(); /* ok because BHs are off */
4320 /* Other cpus might concurrently change txq->xmit_lock_owner
4321 * to -1 or to their cpu id, but not to our id.
4323 if (READ_ONCE(txq->xmit_lock_owner) != cpu) {
4324 if (dev_xmit_recursion())
4325 goto recursion_alert;
4327 skb = validate_xmit_skb(skb, dev, &again);
4331 HARD_TX_LOCK(dev, txq, cpu);
4333 if (!netif_xmit_stopped(txq)) {
4334 dev_xmit_recursion_inc();
4335 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
4336 dev_xmit_recursion_dec();
4337 if (dev_xmit_complete(rc)) {
4338 HARD_TX_UNLOCK(dev, txq);
4342 HARD_TX_UNLOCK(dev, txq);
4343 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
4346 /* Recursion is detected! It is possible,
4350 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
4356 rcu_read_unlock_bh();
4358 dev_core_stats_tx_dropped_inc(dev);
4359 kfree_skb_list(skb);
4362 rcu_read_unlock_bh();
4365 EXPORT_SYMBOL(__dev_queue_xmit);
4367 int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
4369 struct net_device *dev = skb->dev;
4370 struct sk_buff *orig_skb = skb;
4371 struct netdev_queue *txq;
4372 int ret = NETDEV_TX_BUSY;
4375 if (unlikely(!netif_running(dev) ||
4376 !netif_carrier_ok(dev)))
4379 skb = validate_xmit_skb_list(skb, dev, &again);
4380 if (skb != orig_skb)
4383 skb_set_queue_mapping(skb, queue_id);
4384 txq = skb_get_tx_queue(dev, skb);
4388 dev_xmit_recursion_inc();
4389 HARD_TX_LOCK(dev, txq, smp_processor_id());
4390 if (!netif_xmit_frozen_or_drv_stopped(txq))
4391 ret = netdev_start_xmit(skb, dev, txq, false);
4392 HARD_TX_UNLOCK(dev, txq);
4393 dev_xmit_recursion_dec();
4398 dev_core_stats_tx_dropped_inc(dev);
4399 kfree_skb_list(skb);
4400 return NET_XMIT_DROP;
4402 EXPORT_SYMBOL(__dev_direct_xmit);
4404 /*************************************************************************
4406 *************************************************************************/
4408 unsigned int sysctl_skb_defer_max __read_mostly = 64;
4409 int weight_p __read_mostly = 64; /* old backlog weight */
4410 int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */
4411 int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */
4413 /* Called with irq disabled */
4414 static inline void ____napi_schedule(struct softnet_data *sd,
4415 struct napi_struct *napi)
4417 struct task_struct *thread;
4419 lockdep_assert_irqs_disabled();
4421 if (test_bit(NAPI_STATE_THREADED, &napi->state)) {
4422 /* Paired with smp_mb__before_atomic() in
4423 * napi_enable()/dev_set_threaded().
4424 * Use READ_ONCE() to guarantee a complete
4425 * read on napi->thread. Only call
4426 * wake_up_process() when it's not NULL.
4428 thread = READ_ONCE(napi->thread);
4430 /* Avoid doing set_bit() if the thread is in
4431 * INTERRUPTIBLE state, cause napi_thread_wait()
4432 * makes sure to proceed with napi polling
4433 * if the thread is explicitly woken from here.
4435 if (READ_ONCE(thread->__state) != TASK_INTERRUPTIBLE)
4436 set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
4437 wake_up_process(thread);
4442 list_add_tail(&napi->poll_list, &sd->poll_list);
4443 WRITE_ONCE(napi->list_owner, smp_processor_id());
4444 /* If not called from net_rx_action()
4445 * we have to raise NET_RX_SOFTIRQ.
4447 if (!sd->in_net_rx_action)
4448 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4453 struct static_key_false rps_needed __read_mostly;
4454 EXPORT_SYMBOL(rps_needed);
4455 struct static_key_false rfs_needed __read_mostly;
4456 EXPORT_SYMBOL(rfs_needed);
4458 static struct rps_dev_flow *
4459 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4460 struct rps_dev_flow *rflow, u16 next_cpu)
4462 if (next_cpu < nr_cpu_ids) {
4463 #ifdef CONFIG_RFS_ACCEL
4464 struct netdev_rx_queue *rxqueue;
4465 struct rps_dev_flow_table *flow_table;
4466 struct rps_dev_flow *old_rflow;
4471 /* Should we steer this flow to a different hardware queue? */
4472 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
4473 !(dev->features & NETIF_F_NTUPLE))
4475 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
4476 if (rxq_index == skb_get_rx_queue(skb))
4479 rxqueue = dev->_rx + rxq_index;
4480 flow_table = rcu_dereference(rxqueue->rps_flow_table);
4483 flow_id = skb_get_hash(skb) & flow_table->mask;
4484 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
4485 rxq_index, flow_id);
4489 rflow = &flow_table->flows[flow_id];
4491 if (old_rflow->filter == rflow->filter)
4492 old_rflow->filter = RPS_NO_FILTER;
4496 per_cpu(softnet_data, next_cpu).input_queue_head;
4499 rflow->cpu = next_cpu;
4504 * get_rps_cpu is called from netif_receive_skb and returns the target
4505 * CPU from the RPS map of the receiving queue for a given skb.
4506 * rcu_read_lock must be held on entry.
4508 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4509 struct rps_dev_flow **rflowp)
4511 const struct rps_sock_flow_table *sock_flow_table;
4512 struct netdev_rx_queue *rxqueue = dev->_rx;
4513 struct rps_dev_flow_table *flow_table;
4514 struct rps_map *map;
4519 if (skb_rx_queue_recorded(skb)) {
4520 u16 index = skb_get_rx_queue(skb);
4522 if (unlikely(index >= dev->real_num_rx_queues)) {
4523 WARN_ONCE(dev->real_num_rx_queues > 1,
4524 "%s received packet on queue %u, but number "
4525 "of RX queues is %u\n",
4526 dev->name, index, dev->real_num_rx_queues);
4532 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
4534 flow_table = rcu_dereference(rxqueue->rps_flow_table);
4535 map = rcu_dereference(rxqueue->rps_map);
4536 if (!flow_table && !map)
4539 skb_reset_network_header(skb);
4540 hash = skb_get_hash(skb);
4544 sock_flow_table = rcu_dereference(net_hotdata.rps_sock_flow_table);
4545 if (flow_table && sock_flow_table) {
4546 struct rps_dev_flow *rflow;
4550 /* First check into global flow table if there is a match.
4551 * This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow().
4553 ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]);
4554 if ((ident ^ hash) & ~net_hotdata.rps_cpu_mask)
4557 next_cpu = ident & net_hotdata.rps_cpu_mask;
4559 /* OK, now we know there is a match,
4560 * we can look at the local (per receive queue) flow table
4562 rflow = &flow_table->flows[hash & flow_table->mask];
4566 * If the desired CPU (where last recvmsg was done) is
4567 * different from current CPU (one in the rx-queue flow
4568 * table entry), switch if one of the following holds:
4569 * - Current CPU is unset (>= nr_cpu_ids).
4570 * - Current CPU is offline.
4571 * - The current CPU's queue tail has advanced beyond the
4572 * last packet that was enqueued using this table entry.
4573 * This guarantees that all previous packets for the flow
4574 * have been dequeued, thus preserving in order delivery.
4576 if (unlikely(tcpu != next_cpu) &&
4577 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
4578 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
4579 rflow->last_qtail)) >= 0)) {
4581 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
4584 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
4594 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
4595 if (cpu_online(tcpu)) {
4605 #ifdef CONFIG_RFS_ACCEL
4608 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
4609 * @dev: Device on which the filter was set
4610 * @rxq_index: RX queue index
4611 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
4612 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
4614 * Drivers that implement ndo_rx_flow_steer() should periodically call
4615 * this function for each installed filter and remove the filters for
4616 * which it returns %true.
4618 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
4619 u32 flow_id, u16 filter_id)
4621 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
4622 struct rps_dev_flow_table *flow_table;
4623 struct rps_dev_flow *rflow;
4628 flow_table = rcu_dereference(rxqueue->rps_flow_table);
4629 if (flow_table && flow_id <= flow_table->mask) {
4630 rflow = &flow_table->flows[flow_id];
4631 cpu = READ_ONCE(rflow->cpu);
4632 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
4633 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
4634 rflow->last_qtail) <
4635 (int)(10 * flow_table->mask)))
4641 EXPORT_SYMBOL(rps_may_expire_flow);
4643 #endif /* CONFIG_RFS_ACCEL */
4645 /* Called from hardirq (IPI) context */
4646 static void rps_trigger_softirq(void *data)
4648 struct softnet_data *sd = data;
4650 ____napi_schedule(sd, &sd->backlog);
4654 #endif /* CONFIG_RPS */
4656 /* Called from hardirq (IPI) context */
4657 static void trigger_rx_softirq(void *data)
4659 struct softnet_data *sd = data;
4661 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4662 smp_store_release(&sd->defer_ipi_scheduled, 0);
4666 * After we queued a packet into sd->input_pkt_queue,
4667 * we need to make sure this queue is serviced soon.
4669 * - If this is another cpu queue, link it to our rps_ipi_list,
4670 * and make sure we will process rps_ipi_list from net_rx_action().
4672 * - If this is our own queue, NAPI schedule our backlog.
4673 * Note that this also raises NET_RX_SOFTIRQ.
4675 static void napi_schedule_rps(struct softnet_data *sd)
4677 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
4681 sd->rps_ipi_next = mysd->rps_ipi_list;
4682 mysd->rps_ipi_list = sd;
4684 /* If not called from net_rx_action() or napi_threaded_poll()
4685 * we have to raise NET_RX_SOFTIRQ.
4687 if (!mysd->in_net_rx_action && !mysd->in_napi_threaded_poll)
4688 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4691 #endif /* CONFIG_RPS */
4692 __napi_schedule_irqoff(&mysd->backlog);
4695 #ifdef CONFIG_NET_FLOW_LIMIT
4696 int netdev_flow_limit_table_len __read_mostly = (1 << 12);
4699 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
4701 #ifdef CONFIG_NET_FLOW_LIMIT
4702 struct sd_flow_limit *fl;
4703 struct softnet_data *sd;
4704 unsigned int old_flow, new_flow;
4706 if (qlen < (READ_ONCE(net_hotdata.max_backlog) >> 1))
4709 sd = this_cpu_ptr(&softnet_data);
4712 fl = rcu_dereference(sd->flow_limit);
4714 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
4715 old_flow = fl->history[fl->history_head];
4716 fl->history[fl->history_head] = new_flow;
4719 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
4721 if (likely(fl->buckets[old_flow]))
4722 fl->buckets[old_flow]--;
4724 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
4736 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
4737 * queue (may be a remote CPU queue).
4739 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
4740 unsigned int *qtail)
4742 enum skb_drop_reason reason;
4743 struct softnet_data *sd;
4744 unsigned long flags;
4747 reason = SKB_DROP_REASON_NOT_SPECIFIED;
4748 sd = &per_cpu(softnet_data, cpu);
4750 rps_lock_irqsave(sd, &flags);
4751 if (!netif_running(skb->dev))
4753 qlen = skb_queue_len(&sd->input_pkt_queue);
4754 if (qlen <= READ_ONCE(net_hotdata.max_backlog) &&
4755 !skb_flow_limit(skb, qlen)) {
4758 __skb_queue_tail(&sd->input_pkt_queue, skb);
4759 input_queue_tail_incr_save(sd, qtail);
4760 rps_unlock_irq_restore(sd, &flags);
4761 return NET_RX_SUCCESS;
4764 /* Schedule NAPI for backlog device
4765 * We can use non atomic operation since we own the queue lock
4767 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state))
4768 napi_schedule_rps(sd);
4771 reason = SKB_DROP_REASON_CPU_BACKLOG;
4775 rps_unlock_irq_restore(sd, &flags);
4777 dev_core_stats_rx_dropped_inc(skb->dev);
4778 kfree_skb_reason(skb, reason);
4782 static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
4784 struct net_device *dev = skb->dev;
4785 struct netdev_rx_queue *rxqueue;
4789 if (skb_rx_queue_recorded(skb)) {
4790 u16 index = skb_get_rx_queue(skb);
4792 if (unlikely(index >= dev->real_num_rx_queues)) {
4793 WARN_ONCE(dev->real_num_rx_queues > 1,
4794 "%s received packet on queue %u, but number "
4795 "of RX queues is %u\n",
4796 dev->name, index, dev->real_num_rx_queues);
4798 return rxqueue; /* Return first rxqueue */
4805 u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp,
4806 struct bpf_prog *xdp_prog)
4808 void *orig_data, *orig_data_end, *hard_start;
4809 struct netdev_rx_queue *rxqueue;
4810 bool orig_bcast, orig_host;
4811 u32 mac_len, frame_sz;
4812 __be16 orig_eth_type;
4817 /* The XDP program wants to see the packet starting at the MAC
4820 mac_len = skb->data - skb_mac_header(skb);
4821 hard_start = skb->data - skb_headroom(skb);
4823 /* SKB "head" area always have tailroom for skb_shared_info */
4824 frame_sz = (void *)skb_end_pointer(skb) - hard_start;
4825 frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
4827 rxqueue = netif_get_rxqueue(skb);
4828 xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
4829 xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len,
4830 skb_headlen(skb) + mac_len, true);
4831 if (skb_is_nonlinear(skb)) {
4832 skb_shinfo(skb)->xdp_frags_size = skb->data_len;
4833 xdp_buff_set_frags_flag(xdp);
4835 xdp_buff_clear_frags_flag(xdp);
4838 orig_data_end = xdp->data_end;
4839 orig_data = xdp->data;
4840 eth = (struct ethhdr *)xdp->data;
4841 orig_host = ether_addr_equal_64bits(eth->h_dest, skb->dev->dev_addr);
4842 orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest);
4843 orig_eth_type = eth->h_proto;
4845 act = bpf_prog_run_xdp(xdp_prog, xdp);
4847 /* check if bpf_xdp_adjust_head was used */
4848 off = xdp->data - orig_data;
4851 __skb_pull(skb, off);
4853 __skb_push(skb, -off);
4855 skb->mac_header += off;
4856 skb_reset_network_header(skb);
4859 /* check if bpf_xdp_adjust_tail was used */
4860 off = xdp->data_end - orig_data_end;
4862 skb_set_tail_pointer(skb, xdp->data_end - xdp->data);
4863 skb->len += off; /* positive on grow, negative on shrink */
4866 /* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers
4867 * (e.g. bpf_xdp_adjust_tail), we need to update data_len here.
4869 if (xdp_buff_has_frags(xdp))
4870 skb->data_len = skb_shinfo(skb)->xdp_frags_size;
4874 /* check if XDP changed eth hdr such SKB needs update */
4875 eth = (struct ethhdr *)xdp->data;
4876 if ((orig_eth_type != eth->h_proto) ||
4877 (orig_host != ether_addr_equal_64bits(eth->h_dest,
4878 skb->dev->dev_addr)) ||
4879 (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) {
4880 __skb_push(skb, ETH_HLEN);
4881 skb->pkt_type = PACKET_HOST;
4882 skb->protocol = eth_type_trans(skb, skb->dev);
4885 /* Redirect/Tx gives L2 packet, code that will reuse skb must __skb_pull
4886 * before calling us again on redirect path. We do not call do_redirect
4887 * as we leave that up to the caller.
4889 * Caller is responsible for managing lifetime of skb (i.e. calling
4890 * kfree_skb in response to actions it cannot handle/XDP_DROP).
4895 __skb_push(skb, mac_len);
4898 metalen = xdp->data - xdp->data_meta;
4900 skb_metadata_set(skb, metalen);
4908 netif_skb_check_for_xdp(struct sk_buff **pskb, struct bpf_prog *prog)
4910 struct sk_buff *skb = *pskb;
4911 int err, hroom, troom;
4913 if (!skb_cow_data_for_xdp(this_cpu_read(system_page_pool), pskb, prog))
4916 /* In case we have to go down the path and also linearize,
4917 * then lets do the pskb_expand_head() work just once here.
4919 hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
4920 troom = skb->tail + skb->data_len - skb->end;
4921 err = pskb_expand_head(skb,
4922 hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
4923 troom > 0 ? troom + 128 : 0, GFP_ATOMIC);
4927 return skb_linearize(skb);
4930 static u32 netif_receive_generic_xdp(struct sk_buff **pskb,
4931 struct xdp_buff *xdp,
4932 struct bpf_prog *xdp_prog)
4934 struct sk_buff *skb = *pskb;
4935 u32 mac_len, act = XDP_DROP;
4937 /* Reinjected packets coming from act_mirred or similar should
4938 * not get XDP generic processing.
4940 if (skb_is_redirected(skb))
4943 /* XDP packets must have sufficient headroom of XDP_PACKET_HEADROOM
4944 * bytes. This is the guarantee that also native XDP provides,
4945 * thus we need to do it here as well.
4947 mac_len = skb->data - skb_mac_header(skb);
4948 __skb_push(skb, mac_len);
4950 if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
4951 skb_headroom(skb) < XDP_PACKET_HEADROOM) {
4952 if (netif_skb_check_for_xdp(pskb, xdp_prog))
4956 __skb_pull(*pskb, mac_len);
4958 act = bpf_prog_run_generic_xdp(*pskb, xdp, xdp_prog);
4965 bpf_warn_invalid_xdp_action((*pskb)->dev, xdp_prog, act);
4968 trace_xdp_exception((*pskb)->dev, xdp_prog, act);
4979 /* When doing generic XDP we have to bypass the qdisc layer and the
4980 * network taps in order to match in-driver-XDP behavior. This also means
4981 * that XDP packets are able to starve other packets going through a qdisc,
4982 * and DDOS attacks will be more effective. In-driver-XDP use dedicated TX
4983 * queues, so they do not have this starvation issue.
4985 void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
4987 struct net_device *dev = skb->dev;
4988 struct netdev_queue *txq;
4989 bool free_skb = true;
4992 txq = netdev_core_pick_tx(dev, skb, NULL);
4993 cpu = smp_processor_id();
4994 HARD_TX_LOCK(dev, txq, cpu);
4995 if (!netif_xmit_frozen_or_drv_stopped(txq)) {
4996 rc = netdev_start_xmit(skb, dev, txq, 0);
4997 if (dev_xmit_complete(rc))
5000 HARD_TX_UNLOCK(dev, txq);
5002 trace_xdp_exception(dev, xdp_prog, XDP_TX);
5003 dev_core_stats_tx_dropped_inc(dev);
5008 static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
5010 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb)
5013 struct xdp_buff xdp;
5017 act = netif_receive_generic_xdp(pskb, &xdp, xdp_prog);
5018 if (act != XDP_PASS) {
5021 err = xdp_do_generic_redirect((*pskb)->dev, *pskb,
5027 generic_xdp_tx(*pskb, xdp_prog);
5035 kfree_skb_reason(*pskb, SKB_DROP_REASON_XDP);
5038 EXPORT_SYMBOL_GPL(do_xdp_generic);
5040 static int netif_rx_internal(struct sk_buff *skb)
5044 net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue), skb);
5046 trace_netif_rx(skb);
5049 if (static_branch_unlikely(&rps_needed)) {
5050 struct rps_dev_flow voidflow, *rflow = &voidflow;
5055 cpu = get_rps_cpu(skb->dev, skb, &rflow);
5057 cpu = smp_processor_id();
5059 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5067 ret = enqueue_to_backlog(skb, smp_processor_id(), &qtail);
5073 * __netif_rx - Slightly optimized version of netif_rx
5074 * @skb: buffer to post
5076 * This behaves as netif_rx except that it does not disable bottom halves.
5077 * As a result this function may only be invoked from the interrupt context
5078 * (either hard or soft interrupt).
5080 int __netif_rx(struct sk_buff *skb)
5084 lockdep_assert_once(hardirq_count() | softirq_count());
5086 trace_netif_rx_entry(skb);
5087 ret = netif_rx_internal(skb);
5088 trace_netif_rx_exit(ret);
5091 EXPORT_SYMBOL(__netif_rx);
5094 * netif_rx - post buffer to the network code
5095 * @skb: buffer to post
5097 * This function receives a packet from a device driver and queues it for
5098 * the upper (protocol) levels to process via the backlog NAPI device. It
5099 * always succeeds. The buffer may be dropped during processing for
5100 * congestion control or by the protocol layers.
5101 * The network buffer is passed via the backlog NAPI device. Modern NIC
5102 * driver should use NAPI and GRO.
5103 * This function can used from interrupt and from process context. The
5104 * caller from process context must not disable interrupts before invoking
5108 * NET_RX_SUCCESS (no congestion)
5109 * NET_RX_DROP (packet was dropped)
5112 int netif_rx(struct sk_buff *skb)
5114 bool need_bh_off = !(hardirq_count() | softirq_count());
5119 trace_netif_rx_entry(skb);
5120 ret = netif_rx_internal(skb);
5121 trace_netif_rx_exit(ret);
5126 EXPORT_SYMBOL(netif_rx);
5128 static __latent_entropy void net_tx_action(struct softirq_action *h)
5130 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
5132 if (sd->completion_queue) {
5133 struct sk_buff *clist;
5135 local_irq_disable();
5136 clist = sd->completion_queue;
5137 sd->completion_queue = NULL;
5141 struct sk_buff *skb = clist;
5143 clist = clist->next;
5145 WARN_ON(refcount_read(&skb->users));
5146 if (likely(get_kfree_skb_cb(skb)->reason == SKB_CONSUMED))
5147 trace_consume_skb(skb, net_tx_action);
5149 trace_kfree_skb(skb, net_tx_action,
5150 get_kfree_skb_cb(skb)->reason);
5152 if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
5155 __napi_kfree_skb(skb,
5156 get_kfree_skb_cb(skb)->reason);
5160 if (sd->output_queue) {
5163 local_irq_disable();
5164 head = sd->output_queue;
5165 sd->output_queue = NULL;
5166 sd->output_queue_tailp = &sd->output_queue;
5172 struct Qdisc *q = head;
5173 spinlock_t *root_lock = NULL;
5175 head = head->next_sched;
5177 /* We need to make sure head->next_sched is read
5178 * before clearing __QDISC_STATE_SCHED
5180 smp_mb__before_atomic();
5182 if (!(q->flags & TCQ_F_NOLOCK)) {
5183 root_lock = qdisc_lock(q);
5184 spin_lock(root_lock);
5185 } else if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED,
5187 /* There is a synchronize_net() between
5188 * STATE_DEACTIVATED flag being set and
5189 * qdisc_reset()/some_qdisc_is_busy() in
5190 * dev_deactivate(), so we can safely bail out
5191 * early here to avoid data race between
5192 * qdisc_deactivate() and some_qdisc_is_busy()
5193 * for lockless qdisc.
5195 clear_bit(__QDISC_STATE_SCHED, &q->state);
5199 clear_bit(__QDISC_STATE_SCHED, &q->state);
5202 spin_unlock(root_lock);
5208 xfrm_dev_backlog(sd);
5211 #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
5212 /* This hook is defined here for ATM LANE */
5213 int (*br_fdb_test_addr_hook)(struct net_device *dev,
5214 unsigned char *addr) __read_mostly;
5215 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
5219 * netdev_is_rx_handler_busy - check if receive handler is registered
5220 * @dev: device to check
5222 * Check if a receive handler is already registered for a given device.
5223 * Return true if there one.
5225 * The caller must hold the rtnl_mutex.
5227 bool netdev_is_rx_handler_busy(struct net_device *dev)
5230 return dev && rtnl_dereference(dev->rx_handler);
5232 EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
5235 * netdev_rx_handler_register - register receive handler
5236 * @dev: device to register a handler for
5237 * @rx_handler: receive handler to register
5238 * @rx_handler_data: data pointer that is used by rx handler
5240 * Register a receive handler for a device. This handler will then be
5241 * called from __netif_receive_skb. A negative errno code is returned
5244 * The caller must hold the rtnl_mutex.
5246 * For a general description of rx_handler, see enum rx_handler_result.
5248 int netdev_rx_handler_register(struct net_device *dev,
5249 rx_handler_func_t *rx_handler,
5250 void *rx_handler_data)
5252 if (netdev_is_rx_handler_busy(dev))
5255 if (dev->priv_flags & IFF_NO_RX_HANDLER)
5258 /* Note: rx_handler_data must be set before rx_handler */
5259 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
5260 rcu_assign_pointer(dev->rx_handler, rx_handler);
5264 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
5267 * netdev_rx_handler_unregister - unregister receive handler
5268 * @dev: device to unregister a handler from
5270 * Unregister a receive handler from a device.
5272 * The caller must hold the rtnl_mutex.
5274 void netdev_rx_handler_unregister(struct net_device *dev)
5278 RCU_INIT_POINTER(dev->rx_handler, NULL);
5279 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
5280 * section has a guarantee to see a non NULL rx_handler_data
5284 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
5286 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
5289 * Limit the use of PFMEMALLOC reserves to those protocols that implement
5290 * the special handling of PFMEMALLOC skbs.
5292 static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
5294 switch (skb->protocol) {
5295 case htons(ETH_P_ARP):
5296 case htons(ETH_P_IP):
5297 case htons(ETH_P_IPV6):
5298 case htons(ETH_P_8021Q):
5299 case htons(ETH_P_8021AD):
5306 static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
5307 int *ret, struct net_device *orig_dev)
5309 if (nf_hook_ingress_active(skb)) {
5313 *ret = deliver_skb(skb, *pt_prev, orig_dev);
5318 ingress_retval = nf_hook_ingress(skb);
5320 return ingress_retval;
5325 static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
5326 struct packet_type **ppt_prev)
5328 struct packet_type *ptype, *pt_prev;
5329 rx_handler_func_t *rx_handler;
5330 struct sk_buff *skb = *pskb;
5331 struct net_device *orig_dev;
5332 bool deliver_exact = false;
5333 int ret = NET_RX_DROP;
5336 net_timestamp_check(!READ_ONCE(net_hotdata.tstamp_prequeue), skb);
5338 trace_netif_receive_skb(skb);
5340 orig_dev = skb->dev;
5342 skb_reset_network_header(skb);
5343 if (!skb_transport_header_was_set(skb))
5344 skb_reset_transport_header(skb);
5345 skb_reset_mac_len(skb);
5350 skb->skb_iif = skb->dev->ifindex;
5352 __this_cpu_inc(softnet_data.processed);
5354 if (static_branch_unlikely(&generic_xdp_needed_key)) {
5358 ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog),
5362 if (ret2 != XDP_PASS) {
5368 if (eth_type_vlan(skb->protocol)) {
5369 skb = skb_vlan_untag(skb);
5374 if (skb_skip_tc_classify(skb))
5380 list_for_each_entry_rcu(ptype, &net_hotdata.ptype_all, list) {
5382 ret = deliver_skb(skb, pt_prev, orig_dev);
5386 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
5388 ret = deliver_skb(skb, pt_prev, orig_dev);
5393 #ifdef CONFIG_NET_INGRESS
5394 if (static_branch_unlikely(&ingress_needed_key)) {
5395 bool another = false;
5397 nf_skip_egress(skb, true);
5398 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev,
5405 nf_skip_egress(skb, false);
5406 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
5410 skb_reset_redirect(skb);
5412 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
5415 if (skb_vlan_tag_present(skb)) {
5417 ret = deliver_skb(skb, pt_prev, orig_dev);
5420 if (vlan_do_receive(&skb))
5422 else if (unlikely(!skb))
5426 rx_handler = rcu_dereference(skb->dev->rx_handler);
5429 ret = deliver_skb(skb, pt_prev, orig_dev);
5432 switch (rx_handler(&skb)) {
5433 case RX_HANDLER_CONSUMED:
5434 ret = NET_RX_SUCCESS;
5436 case RX_HANDLER_ANOTHER:
5438 case RX_HANDLER_EXACT:
5439 deliver_exact = true;
5441 case RX_HANDLER_PASS:
5448 if (unlikely(skb_vlan_tag_present(skb)) && !netdev_uses_dsa(skb->dev)) {
5450 if (skb_vlan_tag_get_id(skb)) {
5451 /* Vlan id is non 0 and vlan_do_receive() above couldn't
5454 skb->pkt_type = PACKET_OTHERHOST;
5455 } else if (eth_type_vlan(skb->protocol)) {
5456 /* Outer header is 802.1P with vlan 0, inner header is
5457 * 802.1Q or 802.1AD and vlan_do_receive() above could
5458 * not find vlan dev for vlan id 0.
5460 __vlan_hwaccel_clear_tag(skb);
5461 skb = skb_vlan_untag(skb);
5464 if (vlan_do_receive(&skb))
5465 /* After stripping off 802.1P header with vlan 0
5466 * vlan dev is found for inner header.
5469 else if (unlikely(!skb))
5472 /* We have stripped outer 802.1P vlan 0 header.
5473 * But could not find vlan dev.
5474 * check again for vlan id to set OTHERHOST.
5478 /* Note: we might in the future use prio bits
5479 * and set skb->priority like in vlan_do_receive()
5480 * For the time being, just ignore Priority Code Point
5482 __vlan_hwaccel_clear_tag(skb);
5485 type = skb->protocol;
5487 /* deliver only exact match when indicated */
5488 if (likely(!deliver_exact)) {
5489 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5490 &ptype_base[ntohs(type) &
5494 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5495 &orig_dev->ptype_specific);
5497 if (unlikely(skb->dev != orig_dev)) {
5498 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5499 &skb->dev->ptype_specific);
5503 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
5505 *ppt_prev = pt_prev;
5509 dev_core_stats_rx_dropped_inc(skb->dev);
5511 dev_core_stats_rx_nohandler_inc(skb->dev);
5512 kfree_skb_reason(skb, SKB_DROP_REASON_UNHANDLED_PROTO);
5513 /* Jamal, now you will not able to escape explaining
5514 * me how you were going to use this. :-)
5520 /* The invariant here is that if *ppt_prev is not NULL
5521 * then skb should also be non-NULL.
5523 * Apparently *ppt_prev assignment above holds this invariant due to
5524 * skb dereferencing near it.
5530 static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
5532 struct net_device *orig_dev = skb->dev;
5533 struct packet_type *pt_prev = NULL;
5536 ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
5538 ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb,
5539 skb->dev, pt_prev, orig_dev);
5544 * netif_receive_skb_core - special purpose version of netif_receive_skb
5545 * @skb: buffer to process
5547 * More direct receive version of netif_receive_skb(). It should
5548 * only be used by callers that have a need to skip RPS and Generic XDP.
5549 * Caller must also take care of handling if ``(page_is_)pfmemalloc``.
5551 * This function may only be called from softirq context and interrupts
5552 * should be enabled.
5554 * Return values (usually ignored):
5555 * NET_RX_SUCCESS: no congestion
5556 * NET_RX_DROP: packet was dropped
5558 int netif_receive_skb_core(struct sk_buff *skb)
5563 ret = __netif_receive_skb_one_core(skb, false);
5568 EXPORT_SYMBOL(netif_receive_skb_core);
5570 static inline void __netif_receive_skb_list_ptype(struct list_head *head,
5571 struct packet_type *pt_prev,
5572 struct net_device *orig_dev)
5574 struct sk_buff *skb, *next;
5578 if (list_empty(head))
5580 if (pt_prev->list_func != NULL)
5581 INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv,
5582 ip_list_rcv, head, pt_prev, orig_dev);
5584 list_for_each_entry_safe(skb, next, head, list) {
5585 skb_list_del_init(skb);
5586 pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
5590 static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
5592 /* Fast-path assumptions:
5593 * - There is no RX handler.
5594 * - Only one packet_type matches.
5595 * If either of these fails, we will end up doing some per-packet
5596 * processing in-line, then handling the 'last ptype' for the whole
5597 * sublist. This can't cause out-of-order delivery to any single ptype,
5598 * because the 'last ptype' must be constant across the sublist, and all
5599 * other ptypes are handled per-packet.
5601 /* Current (common) ptype of sublist */
5602 struct packet_type *pt_curr = NULL;
5603 /* Current (common) orig_dev of sublist */
5604 struct net_device *od_curr = NULL;
5605 struct list_head sublist;
5606 struct sk_buff *skb, *next;
5608 INIT_LIST_HEAD(&sublist);
5609 list_for_each_entry_safe(skb, next, head, list) {
5610 struct net_device *orig_dev = skb->dev;
5611 struct packet_type *pt_prev = NULL;
5613 skb_list_del_init(skb);
5614 __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
5617 if (pt_curr != pt_prev || od_curr != orig_dev) {
5618 /* dispatch old sublist */
5619 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5620 /* start new sublist */
5621 INIT_LIST_HEAD(&sublist);
5625 list_add_tail(&skb->list, &sublist);
5628 /* dispatch final sublist */
5629 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5632 static int __netif_receive_skb(struct sk_buff *skb)
5636 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
5637 unsigned int noreclaim_flag;
5640 * PFMEMALLOC skbs are special, they should
5641 * - be delivered to SOCK_MEMALLOC sockets only
5642 * - stay away from userspace
5643 * - have bounded memory usage
5645 * Use PF_MEMALLOC as this saves us from propagating the allocation
5646 * context down to all allocation sites.
5648 noreclaim_flag = memalloc_noreclaim_save();
5649 ret = __netif_receive_skb_one_core(skb, true);
5650 memalloc_noreclaim_restore(noreclaim_flag);
5652 ret = __netif_receive_skb_one_core(skb, false);
5657 static void __netif_receive_skb_list(struct list_head *head)
5659 unsigned long noreclaim_flag = 0;
5660 struct sk_buff *skb, *next;
5661 bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */
5663 list_for_each_entry_safe(skb, next, head, list) {
5664 if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) {
5665 struct list_head sublist;
5667 /* Handle the previous sublist */
5668 list_cut_before(&sublist, head, &skb->list);
5669 if (!list_empty(&sublist))
5670 __netif_receive_skb_list_core(&sublist, pfmemalloc);
5671 pfmemalloc = !pfmemalloc;
5672 /* See comments in __netif_receive_skb */
5674 noreclaim_flag = memalloc_noreclaim_save();
5676 memalloc_noreclaim_restore(noreclaim_flag);
5679 /* Handle the remaining sublist */
5680 if (!list_empty(head))
5681 __netif_receive_skb_list_core(head, pfmemalloc);
5682 /* Restore pflags */
5684 memalloc_noreclaim_restore(noreclaim_flag);
5687 static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
5689 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
5690 struct bpf_prog *new = xdp->prog;
5693 switch (xdp->command) {
5694 case XDP_SETUP_PROG:
5695 rcu_assign_pointer(dev->xdp_prog, new);
5700 static_branch_dec(&generic_xdp_needed_key);
5701 } else if (new && !old) {
5702 static_branch_inc(&generic_xdp_needed_key);
5703 dev_disable_lro(dev);
5704 dev_disable_gro_hw(dev);
5716 static int netif_receive_skb_internal(struct sk_buff *skb)
5720 net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue), skb);
5722 if (skb_defer_rx_timestamp(skb))
5723 return NET_RX_SUCCESS;
5727 if (static_branch_unlikely(&rps_needed)) {
5728 struct rps_dev_flow voidflow, *rflow = &voidflow;
5729 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5732 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5738 ret = __netif_receive_skb(skb);
5743 void netif_receive_skb_list_internal(struct list_head *head)
5745 struct sk_buff *skb, *next;
5746 struct list_head sublist;
5748 INIT_LIST_HEAD(&sublist);
5749 list_for_each_entry_safe(skb, next, head, list) {
5750 net_timestamp_check(READ_ONCE(net_hotdata.tstamp_prequeue),
5752 skb_list_del_init(skb);
5753 if (!skb_defer_rx_timestamp(skb))
5754 list_add_tail(&skb->list, &sublist);
5756 list_splice_init(&sublist, head);
5760 if (static_branch_unlikely(&rps_needed)) {
5761 list_for_each_entry_safe(skb, next, head, list) {
5762 struct rps_dev_flow voidflow, *rflow = &voidflow;
5763 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5766 /* Will be handled, remove from list */
5767 skb_list_del_init(skb);
5768 enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5773 __netif_receive_skb_list(head);
5778 * netif_receive_skb - process receive buffer from network
5779 * @skb: buffer to process
5781 * netif_receive_skb() is the main receive data processing function.
5782 * It always succeeds. The buffer may be dropped during processing
5783 * for congestion control or by the protocol layers.
5785 * This function may only be called from softirq context and interrupts
5786 * should be enabled.
5788 * Return values (usually ignored):
5789 * NET_RX_SUCCESS: no congestion
5790 * NET_RX_DROP: packet was dropped
5792 int netif_receive_skb(struct sk_buff *skb)
5796 trace_netif_receive_skb_entry(skb);
5798 ret = netif_receive_skb_internal(skb);
5799 trace_netif_receive_skb_exit(ret);
5803 EXPORT_SYMBOL(netif_receive_skb);
5806 * netif_receive_skb_list - process many receive buffers from network
5807 * @head: list of skbs to process.
5809 * Since return value of netif_receive_skb() is normally ignored, and
5810 * wouldn't be meaningful for a list, this function returns void.
5812 * This function may only be called from softirq context and interrupts
5813 * should be enabled.
5815 void netif_receive_skb_list(struct list_head *head)
5817 struct sk_buff *skb;
5819 if (list_empty(head))
5821 if (trace_netif_receive_skb_list_entry_enabled()) {
5822 list_for_each_entry(skb, head, list)
5823 trace_netif_receive_skb_list_entry(skb);
5825 netif_receive_skb_list_internal(head);
5826 trace_netif_receive_skb_list_exit(0);
5828 EXPORT_SYMBOL(netif_receive_skb_list);
5830 static DEFINE_PER_CPU(struct work_struct, flush_works);
5832 /* Network device is going away, flush any packets still pending */
5833 static void flush_backlog(struct work_struct *work)
5835 struct sk_buff *skb, *tmp;
5836 struct softnet_data *sd;
5839 sd = this_cpu_ptr(&softnet_data);
5841 rps_lock_irq_disable(sd);
5842 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
5843 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5844 __skb_unlink(skb, &sd->input_pkt_queue);
5845 dev_kfree_skb_irq(skb);
5846 input_queue_head_incr(sd);
5849 rps_unlock_irq_enable(sd);
5851 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
5852 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
5853 __skb_unlink(skb, &sd->process_queue);
5855 input_queue_head_incr(sd);
5861 static bool flush_required(int cpu)
5863 #if IS_ENABLED(CONFIG_RPS)
5864 struct softnet_data *sd = &per_cpu(softnet_data, cpu);
5867 rps_lock_irq_disable(sd);
5869 /* as insertion into process_queue happens with the rps lock held,
5870 * process_queue access may race only with dequeue
5872 do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
5873 !skb_queue_empty_lockless(&sd->process_queue);
5874 rps_unlock_irq_enable(sd);
5878 /* without RPS we can't safely check input_pkt_queue: during a
5879 * concurrent remote skb_queue_splice() we can detect as empty both
5880 * input_pkt_queue and process_queue even if the latter could end-up
5881 * containing a lot of packets.
5886 static void flush_all_backlogs(void)
5888 static cpumask_t flush_cpus;
5891 /* since we are under rtnl lock protection we can use static data
5892 * for the cpumask and avoid allocating on stack the possibly
5899 cpumask_clear(&flush_cpus);
5900 for_each_online_cpu(cpu) {
5901 if (flush_required(cpu)) {
5902 queue_work_on(cpu, system_highpri_wq,
5903 per_cpu_ptr(&flush_works, cpu));
5904 cpumask_set_cpu(cpu, &flush_cpus);
5908 /* we can have in flight packet[s] on the cpus we are not flushing,
5909 * synchronize_net() in unregister_netdevice_many() will take care of
5912 for_each_cpu(cpu, &flush_cpus)
5913 flush_work(per_cpu_ptr(&flush_works, cpu));
5918 static void net_rps_send_ipi(struct softnet_data *remsd)
5922 struct softnet_data *next = remsd->rps_ipi_next;
5924 if (cpu_online(remsd->cpu))
5925 smp_call_function_single_async(remsd->cpu, &remsd->csd);
5932 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
5933 * Note: called with local irq disabled, but exits with local irq enabled.
5935 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
5938 struct softnet_data *remsd = sd->rps_ipi_list;
5941 sd->rps_ipi_list = NULL;
5945 /* Send pending IPI's to kick RPS processing on remote cpus. */
5946 net_rps_send_ipi(remsd);
5952 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
5955 return sd->rps_ipi_list != NULL;
5961 static int process_backlog(struct napi_struct *napi, int quota)
5963 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
5967 /* Check if we have pending ipi, its better to send them now,
5968 * not waiting net_rx_action() end.
5970 if (sd_has_rps_ipi_waiting(sd)) {
5971 local_irq_disable();
5972 net_rps_action_and_irq_enable(sd);
5975 napi->weight = READ_ONCE(net_hotdata.dev_rx_weight);
5977 struct sk_buff *skb;
5979 while ((skb = __skb_dequeue(&sd->process_queue))) {
5981 __netif_receive_skb(skb);
5983 input_queue_head_incr(sd);
5984 if (++work >= quota)
5989 rps_lock_irq_disable(sd);
5990 if (skb_queue_empty(&sd->input_pkt_queue)) {
5992 * Inline a custom version of __napi_complete().
5993 * only current cpu owns and manipulates this napi,
5994 * and NAPI_STATE_SCHED is the only possible flag set
5996 * We can use a plain write instead of clear_bit(),
5997 * and we dont need an smp_mb() memory barrier.
6002 skb_queue_splice_tail_init(&sd->input_pkt_queue,
6003 &sd->process_queue);
6005 rps_unlock_irq_enable(sd);
6012 * __napi_schedule - schedule for receive
6013 * @n: entry to schedule
6015 * The entry's receive function will be scheduled to run.
6016 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
6018 void __napi_schedule(struct napi_struct *n)
6020 unsigned long flags;
6022 local_irq_save(flags);
6023 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
6024 local_irq_restore(flags);
6026 EXPORT_SYMBOL(__napi_schedule);
6029 * napi_schedule_prep - check if napi can be scheduled
6032 * Test if NAPI routine is already running, and if not mark
6033 * it as running. This is used as a condition variable to
6034 * insure only one NAPI poll instance runs. We also make
6035 * sure there is no pending NAPI disable.
6037 bool napi_schedule_prep(struct napi_struct *n)
6039 unsigned long new, val = READ_ONCE(n->state);
6042 if (unlikely(val & NAPIF_STATE_DISABLE))
6044 new = val | NAPIF_STATE_SCHED;
6046 /* Sets STATE_MISSED bit if STATE_SCHED was already set
6047 * This was suggested by Alexander Duyck, as compiler
6048 * emits better code than :
6049 * if (val & NAPIF_STATE_SCHED)
6050 * new |= NAPIF_STATE_MISSED;
6052 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
6054 } while (!try_cmpxchg(&n->state, &val, new));
6056 return !(val & NAPIF_STATE_SCHED);
6058 EXPORT_SYMBOL(napi_schedule_prep);
6061 * __napi_schedule_irqoff - schedule for receive
6062 * @n: entry to schedule
6064 * Variant of __napi_schedule() assuming hard irqs are masked.
6066 * On PREEMPT_RT enabled kernels this maps to __napi_schedule()
6067 * because the interrupt disabled assumption might not be true
6068 * due to force-threaded interrupts and spinlock substitution.
6070 void __napi_schedule_irqoff(struct napi_struct *n)
6072 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
6073 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
6077 EXPORT_SYMBOL(__napi_schedule_irqoff);
6079 bool napi_complete_done(struct napi_struct *n, int work_done)
6081 unsigned long flags, val, new, timeout = 0;
6085 * 1) Don't let napi dequeue from the cpu poll list
6086 * just in case its running on a different cpu.
6087 * 2) If we are busy polling, do nothing here, we have
6088 * the guarantee we will be called later.
6090 if (unlikely(n->state & (NAPIF_STATE_NPSVC |
6091 NAPIF_STATE_IN_BUSY_POLL)))
6096 timeout = READ_ONCE(n->dev->gro_flush_timeout);
6097 n->defer_hard_irqs_count = READ_ONCE(n->dev->napi_defer_hard_irqs);
6099 if (n->defer_hard_irqs_count > 0) {
6100 n->defer_hard_irqs_count--;
6101 timeout = READ_ONCE(n->dev->gro_flush_timeout);
6105 if (n->gro_bitmask) {
6106 /* When the NAPI instance uses a timeout and keeps postponing
6107 * it, we need to bound somehow the time packets are kept in
6110 napi_gro_flush(n, !!timeout);
6115 if (unlikely(!list_empty(&n->poll_list))) {
6116 /* If n->poll_list is not empty, we need to mask irqs */
6117 local_irq_save(flags);
6118 list_del_init(&n->poll_list);
6119 local_irq_restore(flags);
6121 WRITE_ONCE(n->list_owner, -1);
6123 val = READ_ONCE(n->state);
6125 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
6127 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED |
6128 NAPIF_STATE_SCHED_THREADED |
6129 NAPIF_STATE_PREFER_BUSY_POLL);
6131 /* If STATE_MISSED was set, leave STATE_SCHED set,
6132 * because we will call napi->poll() one more time.
6133 * This C code was suggested by Alexander Duyck to help gcc.
6135 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
6137 } while (!try_cmpxchg(&n->state, &val, new));
6139 if (unlikely(val & NAPIF_STATE_MISSED)) {
6145 hrtimer_start(&n->timer, ns_to_ktime(timeout),
6146 HRTIMER_MODE_REL_PINNED);
6149 EXPORT_SYMBOL(napi_complete_done);
6151 /* must be called under rcu_read_lock(), as we dont take a reference */
6152 struct napi_struct *napi_by_id(unsigned int napi_id)
6154 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
6155 struct napi_struct *napi;
6157 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
6158 if (napi->napi_id == napi_id)
6164 static void skb_defer_free_flush(struct softnet_data *sd)
6166 struct sk_buff *skb, *next;
6168 /* Paired with WRITE_ONCE() in skb_attempt_defer_free() */
6169 if (!READ_ONCE(sd->defer_list))
6172 spin_lock(&sd->defer_lock);
6173 skb = sd->defer_list;
6174 sd->defer_list = NULL;
6175 sd->defer_count = 0;
6176 spin_unlock(&sd->defer_lock);
6178 while (skb != NULL) {
6180 napi_consume_skb(skb, 1);
6185 #if defined(CONFIG_NET_RX_BUSY_POLL)
6187 static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule)
6189 if (!skip_schedule) {
6190 gro_normal_list(napi);
6191 __napi_schedule(napi);
6195 if (napi->gro_bitmask) {
6196 /* flush too old packets
6197 * If HZ < 1000, flush all packets.
6199 napi_gro_flush(napi, HZ >= 1000);
6202 gro_normal_list(napi);
6203 clear_bit(NAPI_STATE_SCHED, &napi->state);
6207 NAPI_F_PREFER_BUSY_POLL = 1,
6208 NAPI_F_END_ON_RESCHED = 2,
6211 static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock,
6212 unsigned flags, u16 budget)
6214 bool skip_schedule = false;
6215 unsigned long timeout;
6218 /* Busy polling means there is a high chance device driver hard irq
6219 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
6220 * set in napi_schedule_prep().
6221 * Since we are about to call napi->poll() once more, we can safely
6222 * clear NAPI_STATE_MISSED.
6224 * Note: x86 could use a single "lock and ..." instruction
6225 * to perform these two clear_bit()
6227 clear_bit(NAPI_STATE_MISSED, &napi->state);
6228 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
6232 if (flags & NAPI_F_PREFER_BUSY_POLL) {
6233 napi->defer_hard_irqs_count = READ_ONCE(napi->dev->napi_defer_hard_irqs);
6234 timeout = READ_ONCE(napi->dev->gro_flush_timeout);
6235 if (napi->defer_hard_irqs_count && timeout) {
6236 hrtimer_start(&napi->timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED);
6237 skip_schedule = true;
6241 /* All we really want here is to re-enable device interrupts.
6242 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
6244 rc = napi->poll(napi, budget);
6245 /* We can't gro_normal_list() here, because napi->poll() might have
6246 * rearmed the napi (napi_complete_done()) in which case it could
6247 * already be running on another CPU.
6249 trace_napi_poll(napi, rc, budget);
6250 netpoll_poll_unlock(have_poll_lock);
6252 __busy_poll_stop(napi, skip_schedule);
6256 static void __napi_busy_loop(unsigned int napi_id,
6257 bool (*loop_end)(void *, unsigned long),
6258 void *loop_end_arg, unsigned flags, u16 budget)
6260 unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
6261 int (*napi_poll)(struct napi_struct *napi, int budget);
6262 void *have_poll_lock = NULL;
6263 struct napi_struct *napi;
6265 WARN_ON_ONCE(!rcu_read_lock_held());
6270 napi = napi_by_id(napi_id);
6274 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
6281 unsigned long val = READ_ONCE(napi->state);
6283 /* If multiple threads are competing for this napi,
6284 * we avoid dirtying napi->state as much as we can.
6286 if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
6287 NAPIF_STATE_IN_BUSY_POLL)) {
6288 if (flags & NAPI_F_PREFER_BUSY_POLL)
6289 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6292 if (cmpxchg(&napi->state, val,
6293 val | NAPIF_STATE_IN_BUSY_POLL |
6294 NAPIF_STATE_SCHED) != val) {
6295 if (flags & NAPI_F_PREFER_BUSY_POLL)
6296 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6299 have_poll_lock = netpoll_poll_lock(napi);
6300 napi_poll = napi->poll;
6302 work = napi_poll(napi, budget);
6303 trace_napi_poll(napi, work, budget);
6304 gro_normal_list(napi);
6307 __NET_ADD_STATS(dev_net(napi->dev),
6308 LINUX_MIB_BUSYPOLLRXPACKETS, work);
6309 skb_defer_free_flush(this_cpu_ptr(&softnet_data));
6312 if (!loop_end || loop_end(loop_end_arg, start_time))
6315 if (unlikely(need_resched())) {
6316 if (flags & NAPI_F_END_ON_RESCHED)
6319 busy_poll_stop(napi, have_poll_lock, flags, budget);
6320 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
6325 if (loop_end(loop_end_arg, start_time))
6332 busy_poll_stop(napi, have_poll_lock, flags, budget);
6333 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
6337 void napi_busy_loop_rcu(unsigned int napi_id,
6338 bool (*loop_end)(void *, unsigned long),
6339 void *loop_end_arg, bool prefer_busy_poll, u16 budget)
6341 unsigned flags = NAPI_F_END_ON_RESCHED;
6343 if (prefer_busy_poll)
6344 flags |= NAPI_F_PREFER_BUSY_POLL;
6346 __napi_busy_loop(napi_id, loop_end, loop_end_arg, flags, budget);
6349 void napi_busy_loop(unsigned int napi_id,
6350 bool (*loop_end)(void *, unsigned long),
6351 void *loop_end_arg, bool prefer_busy_poll, u16 budget)
6353 unsigned flags = prefer_busy_poll ? NAPI_F_PREFER_BUSY_POLL : 0;
6356 __napi_busy_loop(napi_id, loop_end, loop_end_arg, flags, budget);
6359 EXPORT_SYMBOL(napi_busy_loop);
6361 #endif /* CONFIG_NET_RX_BUSY_POLL */
6363 static void napi_hash_add(struct napi_struct *napi)
6365 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state))
6368 spin_lock(&napi_hash_lock);
6370 /* 0..NR_CPUS range is reserved for sender_cpu use */
6372 if (unlikely(++napi_gen_id < MIN_NAPI_ID))
6373 napi_gen_id = MIN_NAPI_ID;
6374 } while (napi_by_id(napi_gen_id));
6375 napi->napi_id = napi_gen_id;
6377 hlist_add_head_rcu(&napi->napi_hash_node,
6378 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
6380 spin_unlock(&napi_hash_lock);
6383 /* Warning : caller is responsible to make sure rcu grace period
6384 * is respected before freeing memory containing @napi
6386 static void napi_hash_del(struct napi_struct *napi)
6388 spin_lock(&napi_hash_lock);
6390 hlist_del_init_rcu(&napi->napi_hash_node);
6392 spin_unlock(&napi_hash_lock);
6395 static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
6397 struct napi_struct *napi;
6399 napi = container_of(timer, struct napi_struct, timer);
6401 /* Note : we use a relaxed variant of napi_schedule_prep() not setting
6402 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
6404 if (!napi_disable_pending(napi) &&
6405 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) {
6406 clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
6407 __napi_schedule_irqoff(napi);
6410 return HRTIMER_NORESTART;
6413 static void init_gro_hash(struct napi_struct *napi)
6417 for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6418 INIT_LIST_HEAD(&napi->gro_hash[i].list);
6419 napi->gro_hash[i].count = 0;
6421 napi->gro_bitmask = 0;
6424 int dev_set_threaded(struct net_device *dev, bool threaded)
6426 struct napi_struct *napi;
6429 if (dev->threaded == threaded)
6433 list_for_each_entry(napi, &dev->napi_list, dev_list) {
6434 if (!napi->thread) {
6435 err = napi_kthread_create(napi);
6444 dev->threaded = threaded;
6446 /* Make sure kthread is created before THREADED bit
6449 smp_mb__before_atomic();
6451 /* Setting/unsetting threaded mode on a napi might not immediately
6452 * take effect, if the current napi instance is actively being
6453 * polled. In this case, the switch between threaded mode and
6454 * softirq mode will happen in the next round of napi_schedule().
6455 * This should not cause hiccups/stalls to the live traffic.
6457 list_for_each_entry(napi, &dev->napi_list, dev_list)
6458 assign_bit(NAPI_STATE_THREADED, &napi->state, threaded);
6462 EXPORT_SYMBOL(dev_set_threaded);
6465 * netif_queue_set_napi - Associate queue with the napi
6466 * @dev: device to which NAPI and queue belong
6467 * @queue_index: Index of queue
6468 * @type: queue type as RX or TX
6469 * @napi: NAPI context, pass NULL to clear previously set NAPI
6471 * Set queue with its corresponding napi context. This should be done after
6472 * registering the NAPI handler for the queue-vector and the queues have been
6473 * mapped to the corresponding interrupt vector.
6475 void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index,
6476 enum netdev_queue_type type, struct napi_struct *napi)
6478 struct netdev_rx_queue *rxq;
6479 struct netdev_queue *txq;
6481 if (WARN_ON_ONCE(napi && !napi->dev))
6483 if (dev->reg_state >= NETREG_REGISTERED)
6487 case NETDEV_QUEUE_TYPE_RX:
6488 rxq = __netif_get_rx_queue(dev, queue_index);
6491 case NETDEV_QUEUE_TYPE_TX:
6492 txq = netdev_get_tx_queue(dev, queue_index);
6499 EXPORT_SYMBOL(netif_queue_set_napi);
6501 void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
6502 int (*poll)(struct napi_struct *, int), int weight)
6504 if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state)))
6507 INIT_LIST_HEAD(&napi->poll_list);
6508 INIT_HLIST_NODE(&napi->napi_hash_node);
6509 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
6510 napi->timer.function = napi_watchdog;
6511 init_gro_hash(napi);
6513 INIT_LIST_HEAD(&napi->rx_list);
6516 if (weight > NAPI_POLL_WEIGHT)
6517 netdev_err_once(dev, "%s() called with weight %d\n", __func__,
6519 napi->weight = weight;
6521 #ifdef CONFIG_NETPOLL
6522 napi->poll_owner = -1;
6524 napi->list_owner = -1;
6525 set_bit(NAPI_STATE_SCHED, &napi->state);
6526 set_bit(NAPI_STATE_NPSVC, &napi->state);
6527 list_add_rcu(&napi->dev_list, &dev->napi_list);
6528 napi_hash_add(napi);
6529 napi_get_frags_check(napi);
6530 /* Create kthread for this napi if dev->threaded is set.
6531 * Clear dev->threaded if kthread creation failed so that
6532 * threaded mode will not be enabled in napi_enable().
6534 if (dev->threaded && napi_kthread_create(napi))
6536 netif_napi_set_irq(napi, -1);
6538 EXPORT_SYMBOL(netif_napi_add_weight);
6540 void napi_disable(struct napi_struct *n)
6542 unsigned long val, new;
6545 set_bit(NAPI_STATE_DISABLE, &n->state);
6547 val = READ_ONCE(n->state);
6549 while (val & (NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC)) {
6550 usleep_range(20, 200);
6551 val = READ_ONCE(n->state);
6554 new = val | NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC;
6555 new &= ~(NAPIF_STATE_THREADED | NAPIF_STATE_PREFER_BUSY_POLL);
6556 } while (!try_cmpxchg(&n->state, &val, new));
6558 hrtimer_cancel(&n->timer);
6560 clear_bit(NAPI_STATE_DISABLE, &n->state);
6562 EXPORT_SYMBOL(napi_disable);
6565 * napi_enable - enable NAPI scheduling
6568 * Resume NAPI from being scheduled on this context.
6569 * Must be paired with napi_disable.
6571 void napi_enable(struct napi_struct *n)
6573 unsigned long new, val = READ_ONCE(n->state);
6576 BUG_ON(!test_bit(NAPI_STATE_SCHED, &val));
6578 new = val & ~(NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC);
6579 if (n->dev->threaded && n->thread)
6580 new |= NAPIF_STATE_THREADED;
6581 } while (!try_cmpxchg(&n->state, &val, new));
6583 EXPORT_SYMBOL(napi_enable);
6585 static void flush_gro_hash(struct napi_struct *napi)
6589 for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6590 struct sk_buff *skb, *n;
6592 list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list)
6594 napi->gro_hash[i].count = 0;
6598 /* Must be called in process context */
6599 void __netif_napi_del(struct napi_struct *napi)
6601 if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state))
6604 napi_hash_del(napi);
6605 list_del_rcu(&napi->dev_list);
6606 napi_free_frags(napi);
6608 flush_gro_hash(napi);
6609 napi->gro_bitmask = 0;
6612 kthread_stop(napi->thread);
6613 napi->thread = NULL;
6616 EXPORT_SYMBOL(__netif_napi_del);
6618 static int __napi_poll(struct napi_struct *n, bool *repoll)
6624 /* This NAPI_STATE_SCHED test is for avoiding a race
6625 * with netpoll's poll_napi(). Only the entity which
6626 * obtains the lock and sees NAPI_STATE_SCHED set will
6627 * actually make the ->poll() call. Therefore we avoid
6628 * accidentally calling ->poll() when NAPI is not scheduled.
6631 if (napi_is_scheduled(n)) {
6632 work = n->poll(n, weight);
6633 trace_napi_poll(n, work, weight);
6635 xdp_do_check_flushed(n);
6638 if (unlikely(work > weight))
6639 netdev_err_once(n->dev, "NAPI poll function %pS returned %d, exceeding its budget of %d.\n",
6640 n->poll, work, weight);
6642 if (likely(work < weight))
6645 /* Drivers must not modify the NAPI state if they
6646 * consume the entire weight. In such cases this code
6647 * still "owns" the NAPI instance and therefore can
6648 * move the instance around on the list at-will.
6650 if (unlikely(napi_disable_pending(n))) {
6655 /* The NAPI context has more processing work, but busy-polling
6656 * is preferred. Exit early.
6658 if (napi_prefer_busy_poll(n)) {
6659 if (napi_complete_done(n, work)) {
6660 /* If timeout is not set, we need to make sure
6661 * that the NAPI is re-scheduled.
6668 if (n->gro_bitmask) {
6669 /* flush too old packets
6670 * If HZ < 1000, flush all packets.
6672 napi_gro_flush(n, HZ >= 1000);
6677 /* Some drivers may have called napi_schedule
6678 * prior to exhausting their budget.
6680 if (unlikely(!list_empty(&n->poll_list))) {
6681 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
6682 n->dev ? n->dev->name : "backlog");
6691 static int napi_poll(struct napi_struct *n, struct list_head *repoll)
6693 bool do_repoll = false;
6697 list_del_init(&n->poll_list);
6699 have = netpoll_poll_lock(n);
6701 work = __napi_poll(n, &do_repoll);
6704 list_add_tail(&n->poll_list, repoll);
6706 netpoll_poll_unlock(have);
6711 static int napi_thread_wait(struct napi_struct *napi)
6715 set_current_state(TASK_INTERRUPTIBLE);
6717 while (!kthread_should_stop()) {
6718 /* Testing SCHED_THREADED bit here to make sure the current
6719 * kthread owns this napi and could poll on this napi.
6720 * Testing SCHED bit is not enough because SCHED bit might be
6721 * set by some other busy poll thread or by napi_disable().
6723 if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state) || woken) {
6724 WARN_ON(!list_empty(&napi->poll_list));
6725 __set_current_state(TASK_RUNNING);
6730 /* woken being true indicates this thread owns this napi. */
6732 set_current_state(TASK_INTERRUPTIBLE);
6734 __set_current_state(TASK_RUNNING);
6739 static int napi_threaded_poll(void *data)
6741 struct napi_struct *napi = data;
6742 struct softnet_data *sd;
6745 while (!napi_thread_wait(napi)) {
6746 unsigned long last_qs = jiffies;
6749 bool repoll = false;
6752 sd = this_cpu_ptr(&softnet_data);
6753 sd->in_napi_threaded_poll = true;
6755 have = netpoll_poll_lock(napi);
6756 __napi_poll(napi, &repoll);
6757 netpoll_poll_unlock(have);
6759 sd->in_napi_threaded_poll = false;
6762 if (sd_has_rps_ipi_waiting(sd)) {
6763 local_irq_disable();
6764 net_rps_action_and_irq_enable(sd);
6766 skb_defer_free_flush(sd);
6772 rcu_softirq_qs_periodic(last_qs);
6779 static __latent_entropy void net_rx_action(struct softirq_action *h)
6781 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
6782 unsigned long time_limit = jiffies +
6783 usecs_to_jiffies(READ_ONCE(net_hotdata.netdev_budget_usecs));
6784 int budget = READ_ONCE(net_hotdata.netdev_budget);
6789 sd->in_net_rx_action = true;
6790 local_irq_disable();
6791 list_splice_init(&sd->poll_list, &list);
6795 struct napi_struct *n;
6797 skb_defer_free_flush(sd);
6799 if (list_empty(&list)) {
6800 if (list_empty(&repoll)) {
6801 sd->in_net_rx_action = false;
6803 /* We need to check if ____napi_schedule()
6804 * had refilled poll_list while
6805 * sd->in_net_rx_action was true.
6807 if (!list_empty(&sd->poll_list))
6809 if (!sd_has_rps_ipi_waiting(sd))
6815 n = list_first_entry(&list, struct napi_struct, poll_list);
6816 budget -= napi_poll(n, &repoll);
6818 /* If softirq window is exhausted then punt.
6819 * Allow this to run for 2 jiffies since which will allow
6820 * an average latency of 1.5/HZ.
6822 if (unlikely(budget <= 0 ||
6823 time_after_eq(jiffies, time_limit))) {
6829 local_irq_disable();
6831 list_splice_tail_init(&sd->poll_list, &list);
6832 list_splice_tail(&repoll, &list);
6833 list_splice(&list, &sd->poll_list);
6834 if (!list_empty(&sd->poll_list))
6835 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
6837 sd->in_net_rx_action = false;
6839 net_rps_action_and_irq_enable(sd);
6843 struct netdev_adjacent {
6844 struct net_device *dev;
6845 netdevice_tracker dev_tracker;
6847 /* upper master flag, there can only be one master device per list */
6850 /* lookup ignore flag */
6853 /* counter for the number of times this device was added to us */
6856 /* private field for the users */
6859 struct list_head list;
6860 struct rcu_head rcu;
6863 static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
6864 struct list_head *adj_list)
6866 struct netdev_adjacent *adj;
6868 list_for_each_entry(adj, adj_list, list) {
6869 if (adj->dev == adj_dev)
6875 static int ____netdev_has_upper_dev(struct net_device *upper_dev,
6876 struct netdev_nested_priv *priv)
6878 struct net_device *dev = (struct net_device *)priv->data;
6880 return upper_dev == dev;
6884 * netdev_has_upper_dev - Check if device is linked to an upper device
6886 * @upper_dev: upper device to check
6888 * Find out if a device is linked to specified upper device and return true
6889 * in case it is. Note that this checks only immediate upper device,
6890 * not through a complete stack of devices. The caller must hold the RTNL lock.
6892 bool netdev_has_upper_dev(struct net_device *dev,
6893 struct net_device *upper_dev)
6895 struct netdev_nested_priv priv = {
6896 .data = (void *)upper_dev,
6901 return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
6904 EXPORT_SYMBOL(netdev_has_upper_dev);
6907 * netdev_has_upper_dev_all_rcu - Check if device is linked to an upper device
6909 * @upper_dev: upper device to check
6911 * Find out if a device is linked to specified upper device and return true
6912 * in case it is. Note that this checks the entire upper device chain.
6913 * The caller must hold rcu lock.
6916 bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
6917 struct net_device *upper_dev)
6919 struct netdev_nested_priv priv = {
6920 .data = (void *)upper_dev,
6923 return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
6926 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
6929 * netdev_has_any_upper_dev - Check if device is linked to some device
6932 * Find out if a device is linked to an upper device and return true in case
6933 * it is. The caller must hold the RTNL lock.
6935 bool netdev_has_any_upper_dev(struct net_device *dev)
6939 return !list_empty(&dev->adj_list.upper);
6941 EXPORT_SYMBOL(netdev_has_any_upper_dev);
6944 * netdev_master_upper_dev_get - Get master upper device
6947 * Find a master upper device and return pointer to it or NULL in case
6948 * it's not there. The caller must hold the RTNL lock.
6950 struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
6952 struct netdev_adjacent *upper;
6956 if (list_empty(&dev->adj_list.upper))
6959 upper = list_first_entry(&dev->adj_list.upper,
6960 struct netdev_adjacent, list);
6961 if (likely(upper->master))
6965 EXPORT_SYMBOL(netdev_master_upper_dev_get);
6967 static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev)
6969 struct netdev_adjacent *upper;
6973 if (list_empty(&dev->adj_list.upper))
6976 upper = list_first_entry(&dev->adj_list.upper,
6977 struct netdev_adjacent, list);
6978 if (likely(upper->master) && !upper->ignore)
6984 * netdev_has_any_lower_dev - Check if device is linked to some device
6987 * Find out if a device is linked to a lower device and return true in case
6988 * it is. The caller must hold the RTNL lock.
6990 static bool netdev_has_any_lower_dev(struct net_device *dev)
6994 return !list_empty(&dev->adj_list.lower);
6997 void *netdev_adjacent_get_private(struct list_head *adj_list)
6999 struct netdev_adjacent *adj;
7001 adj = list_entry(adj_list, struct netdev_adjacent, list);
7003 return adj->private;
7005 EXPORT_SYMBOL(netdev_adjacent_get_private);
7008 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
7010 * @iter: list_head ** of the current position
7012 * Gets the next device from the dev's upper list, starting from iter
7013 * position. The caller must hold RCU read lock.
7015 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
7016 struct list_head **iter)
7018 struct netdev_adjacent *upper;
7020 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
7022 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7024 if (&upper->list == &dev->adj_list.upper)
7027 *iter = &upper->list;
7031 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
7033 static struct net_device *__netdev_next_upper_dev(struct net_device *dev,
7034 struct list_head **iter,
7037 struct netdev_adjacent *upper;
7039 upper = list_entry((*iter)->next, struct netdev_adjacent, list);
7041 if (&upper->list == &dev->adj_list.upper)
7044 *iter = &upper->list;
7045 *ignore = upper->ignore;
7050 static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
7051 struct list_head **iter)
7053 struct netdev_adjacent *upper;
7055 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
7057 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7059 if (&upper->list == &dev->adj_list.upper)
7062 *iter = &upper->list;
7067 static int __netdev_walk_all_upper_dev(struct net_device *dev,
7068 int (*fn)(struct net_device *dev,
7069 struct netdev_nested_priv *priv),
7070 struct netdev_nested_priv *priv)
7072 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7073 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7078 iter = &dev->adj_list.upper;
7082 ret = fn(now, priv);
7089 udev = __netdev_next_upper_dev(now, &iter, &ignore);
7096 niter = &udev->adj_list.upper;
7097 dev_stack[cur] = now;
7098 iter_stack[cur++] = iter;
7105 next = dev_stack[--cur];
7106 niter = iter_stack[cur];
7116 int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
7117 int (*fn)(struct net_device *dev,
7118 struct netdev_nested_priv *priv),
7119 struct netdev_nested_priv *priv)
7121 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7122 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7126 iter = &dev->adj_list.upper;
7130 ret = fn(now, priv);
7137 udev = netdev_next_upper_dev_rcu(now, &iter);
7142 niter = &udev->adj_list.upper;
7143 dev_stack[cur] = now;
7144 iter_stack[cur++] = iter;
7151 next = dev_stack[--cur];
7152 niter = iter_stack[cur];
7161 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
7163 static bool __netdev_has_upper_dev(struct net_device *dev,
7164 struct net_device *upper_dev)
7166 struct netdev_nested_priv priv = {
7168 .data = (void *)upper_dev,
7173 return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev,
7178 * netdev_lower_get_next_private - Get the next ->private from the
7179 * lower neighbour list
7181 * @iter: list_head ** of the current position
7183 * Gets the next netdev_adjacent->private from the dev's lower neighbour
7184 * list, starting from iter position. The caller must hold either hold the
7185 * RTNL lock or its own locking that guarantees that the neighbour lower
7186 * list will remain unchanged.
7188 void *netdev_lower_get_next_private(struct net_device *dev,
7189 struct list_head **iter)
7191 struct netdev_adjacent *lower;
7193 lower = list_entry(*iter, struct netdev_adjacent, list);
7195 if (&lower->list == &dev->adj_list.lower)
7198 *iter = lower->list.next;
7200 return lower->private;
7202 EXPORT_SYMBOL(netdev_lower_get_next_private);
7205 * netdev_lower_get_next_private_rcu - Get the next ->private from the
7206 * lower neighbour list, RCU
7209 * @iter: list_head ** of the current position
7211 * Gets the next netdev_adjacent->private from the dev's lower neighbour
7212 * list, starting from iter position. The caller must hold RCU read lock.
7214 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
7215 struct list_head **iter)
7217 struct netdev_adjacent *lower;
7219 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
7221 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7223 if (&lower->list == &dev->adj_list.lower)
7226 *iter = &lower->list;
7228 return lower->private;
7230 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
7233 * netdev_lower_get_next - Get the next device from the lower neighbour
7236 * @iter: list_head ** of the current position
7238 * Gets the next netdev_adjacent from the dev's lower neighbour
7239 * list, starting from iter position. The caller must hold RTNL lock or
7240 * its own locking that guarantees that the neighbour lower
7241 * list will remain unchanged.
7243 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
7245 struct netdev_adjacent *lower;
7247 lower = list_entry(*iter, struct netdev_adjacent, list);
7249 if (&lower->list == &dev->adj_list.lower)
7252 *iter = lower->list.next;
7256 EXPORT_SYMBOL(netdev_lower_get_next);
7258 static struct net_device *netdev_next_lower_dev(struct net_device *dev,
7259 struct list_head **iter)
7261 struct netdev_adjacent *lower;
7263 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
7265 if (&lower->list == &dev->adj_list.lower)
7268 *iter = &lower->list;
7273 static struct net_device *__netdev_next_lower_dev(struct net_device *dev,
7274 struct list_head **iter,
7277 struct netdev_adjacent *lower;
7279 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
7281 if (&lower->list == &dev->adj_list.lower)
7284 *iter = &lower->list;
7285 *ignore = lower->ignore;
7290 int netdev_walk_all_lower_dev(struct net_device *dev,
7291 int (*fn)(struct net_device *dev,
7292 struct netdev_nested_priv *priv),
7293 struct netdev_nested_priv *priv)
7295 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7296 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7300 iter = &dev->adj_list.lower;
7304 ret = fn(now, priv);
7311 ldev = netdev_next_lower_dev(now, &iter);
7316 niter = &ldev->adj_list.lower;
7317 dev_stack[cur] = now;
7318 iter_stack[cur++] = iter;
7325 next = dev_stack[--cur];
7326 niter = iter_stack[cur];
7335 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
7337 static int __netdev_walk_all_lower_dev(struct net_device *dev,
7338 int (*fn)(struct net_device *dev,
7339 struct netdev_nested_priv *priv),
7340 struct netdev_nested_priv *priv)
7342 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7343 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7348 iter = &dev->adj_list.lower;
7352 ret = fn(now, priv);
7359 ldev = __netdev_next_lower_dev(now, &iter, &ignore);
7366 niter = &ldev->adj_list.lower;
7367 dev_stack[cur] = now;
7368 iter_stack[cur++] = iter;
7375 next = dev_stack[--cur];
7376 niter = iter_stack[cur];
7386 struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
7387 struct list_head **iter)
7389 struct netdev_adjacent *lower;
7391 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7392 if (&lower->list == &dev->adj_list.lower)
7395 *iter = &lower->list;
7399 EXPORT_SYMBOL(netdev_next_lower_dev_rcu);
7401 static u8 __netdev_upper_depth(struct net_device *dev)
7403 struct net_device *udev;
7404 struct list_head *iter;
7408 for (iter = &dev->adj_list.upper,
7409 udev = __netdev_next_upper_dev(dev, &iter, &ignore);
7411 udev = __netdev_next_upper_dev(dev, &iter, &ignore)) {
7414 if (max_depth < udev->upper_level)
7415 max_depth = udev->upper_level;
7421 static u8 __netdev_lower_depth(struct net_device *dev)
7423 struct net_device *ldev;
7424 struct list_head *iter;
7428 for (iter = &dev->adj_list.lower,
7429 ldev = __netdev_next_lower_dev(dev, &iter, &ignore);
7431 ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) {
7434 if (max_depth < ldev->lower_level)
7435 max_depth = ldev->lower_level;
7441 static int __netdev_update_upper_level(struct net_device *dev,
7442 struct netdev_nested_priv *__unused)
7444 dev->upper_level = __netdev_upper_depth(dev) + 1;
7448 #ifdef CONFIG_LOCKDEP
7449 static LIST_HEAD(net_unlink_list);
7451 static void net_unlink_todo(struct net_device *dev)
7453 if (list_empty(&dev->unlink_list))
7454 list_add_tail(&dev->unlink_list, &net_unlink_list);
7458 static int __netdev_update_lower_level(struct net_device *dev,
7459 struct netdev_nested_priv *priv)
7461 dev->lower_level = __netdev_lower_depth(dev) + 1;
7463 #ifdef CONFIG_LOCKDEP
7467 if (priv->flags & NESTED_SYNC_IMM)
7468 dev->nested_level = dev->lower_level - 1;
7469 if (priv->flags & NESTED_SYNC_TODO)
7470 net_unlink_todo(dev);
7475 int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
7476 int (*fn)(struct net_device *dev,
7477 struct netdev_nested_priv *priv),
7478 struct netdev_nested_priv *priv)
7480 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7481 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7485 iter = &dev->adj_list.lower;
7489 ret = fn(now, priv);
7496 ldev = netdev_next_lower_dev_rcu(now, &iter);
7501 niter = &ldev->adj_list.lower;
7502 dev_stack[cur] = now;
7503 iter_stack[cur++] = iter;
7510 next = dev_stack[--cur];
7511 niter = iter_stack[cur];
7520 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
7523 * netdev_lower_get_first_private_rcu - Get the first ->private from the
7524 * lower neighbour list, RCU
7528 * Gets the first netdev_adjacent->private from the dev's lower neighbour
7529 * list. The caller must hold RCU read lock.
7531 void *netdev_lower_get_first_private_rcu(struct net_device *dev)
7533 struct netdev_adjacent *lower;
7535 lower = list_first_or_null_rcu(&dev->adj_list.lower,
7536 struct netdev_adjacent, list);
7538 return lower->private;
7541 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
7544 * netdev_master_upper_dev_get_rcu - Get master upper device
7547 * Find a master upper device and return pointer to it or NULL in case
7548 * it's not there. The caller must hold the RCU read lock.
7550 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
7552 struct netdev_adjacent *upper;
7554 upper = list_first_or_null_rcu(&dev->adj_list.upper,
7555 struct netdev_adjacent, list);
7556 if (upper && likely(upper->master))
7560 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
7562 static int netdev_adjacent_sysfs_add(struct net_device *dev,
7563 struct net_device *adj_dev,
7564 struct list_head *dev_list)
7566 char linkname[IFNAMSIZ+7];
7568 sprintf(linkname, dev_list == &dev->adj_list.upper ?
7569 "upper_%s" : "lower_%s", adj_dev->name);
7570 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
7573 static void netdev_adjacent_sysfs_del(struct net_device *dev,
7575 struct list_head *dev_list)
7577 char linkname[IFNAMSIZ+7];
7579 sprintf(linkname, dev_list == &dev->adj_list.upper ?
7580 "upper_%s" : "lower_%s", name);
7581 sysfs_remove_link(&(dev->dev.kobj), linkname);
7584 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
7585 struct net_device *adj_dev,
7586 struct list_head *dev_list)
7588 return (dev_list == &dev->adj_list.upper ||
7589 dev_list == &dev->adj_list.lower) &&
7590 net_eq(dev_net(dev), dev_net(adj_dev));
7593 static int __netdev_adjacent_dev_insert(struct net_device *dev,
7594 struct net_device *adj_dev,
7595 struct list_head *dev_list,
7596 void *private, bool master)
7598 struct netdev_adjacent *adj;
7601 adj = __netdev_find_adj(adj_dev, dev_list);
7605 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
7606 dev->name, adj_dev->name, adj->ref_nr);
7611 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
7616 adj->master = master;
7618 adj->private = private;
7619 adj->ignore = false;
7620 netdev_hold(adj_dev, &adj->dev_tracker, GFP_KERNEL);
7622 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
7623 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
7625 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
7626 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
7631 /* Ensure that master link is always the first item in list. */
7633 ret = sysfs_create_link(&(dev->dev.kobj),
7634 &(adj_dev->dev.kobj), "master");
7636 goto remove_symlinks;
7638 list_add_rcu(&adj->list, dev_list);
7640 list_add_tail_rcu(&adj->list, dev_list);
7646 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
7647 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
7649 netdev_put(adj_dev, &adj->dev_tracker);
7655 static void __netdev_adjacent_dev_remove(struct net_device *dev,
7656 struct net_device *adj_dev,
7658 struct list_head *dev_list)
7660 struct netdev_adjacent *adj;
7662 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
7663 dev->name, adj_dev->name, ref_nr);
7665 adj = __netdev_find_adj(adj_dev, dev_list);
7668 pr_err("Adjacency does not exist for device %s from %s\n",
7669 dev->name, adj_dev->name);
7674 if (adj->ref_nr > ref_nr) {
7675 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
7676 dev->name, adj_dev->name, ref_nr,
7677 adj->ref_nr - ref_nr);
7678 adj->ref_nr -= ref_nr;
7683 sysfs_remove_link(&(dev->dev.kobj), "master");
7685 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
7686 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
7688 list_del_rcu(&adj->list);
7689 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
7690 adj_dev->name, dev->name, adj_dev->name);
7691 netdev_put(adj_dev, &adj->dev_tracker);
7692 kfree_rcu(adj, rcu);
7695 static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
7696 struct net_device *upper_dev,
7697 struct list_head *up_list,
7698 struct list_head *down_list,
7699 void *private, bool master)
7703 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
7708 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
7711 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
7718 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
7719 struct net_device *upper_dev,
7721 struct list_head *up_list,
7722 struct list_head *down_list)
7724 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
7725 __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
7728 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
7729 struct net_device *upper_dev,
7730 void *private, bool master)
7732 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
7733 &dev->adj_list.upper,
7734 &upper_dev->adj_list.lower,
7738 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
7739 struct net_device *upper_dev)
7741 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
7742 &dev->adj_list.upper,
7743 &upper_dev->adj_list.lower);
7746 static int __netdev_upper_dev_link(struct net_device *dev,
7747 struct net_device *upper_dev, bool master,
7748 void *upper_priv, void *upper_info,
7749 struct netdev_nested_priv *priv,
7750 struct netlink_ext_ack *extack)
7752 struct netdev_notifier_changeupper_info changeupper_info = {
7757 .upper_dev = upper_dev,
7760 .upper_info = upper_info,
7762 struct net_device *master_dev;
7767 if (dev == upper_dev)
7770 /* To prevent loops, check if dev is not upper device to upper_dev. */
7771 if (__netdev_has_upper_dev(upper_dev, dev))
7774 if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV)
7778 if (__netdev_has_upper_dev(dev, upper_dev))
7781 master_dev = __netdev_master_upper_dev_get(dev);
7783 return master_dev == upper_dev ? -EEXIST : -EBUSY;
7786 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
7787 &changeupper_info.info);
7788 ret = notifier_to_errno(ret);
7792 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
7797 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
7798 &changeupper_info.info);
7799 ret = notifier_to_errno(ret);
7803 __netdev_update_upper_level(dev, NULL);
7804 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
7806 __netdev_update_lower_level(upper_dev, priv);
7807 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
7813 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
7819 * netdev_upper_dev_link - Add a link to the upper device
7821 * @upper_dev: new upper device
7822 * @extack: netlink extended ack
7824 * Adds a link to device which is upper to this one. The caller must hold
7825 * the RTNL lock. On a failure a negative errno code is returned.
7826 * On success the reference counts are adjusted and the function
7829 int netdev_upper_dev_link(struct net_device *dev,
7830 struct net_device *upper_dev,
7831 struct netlink_ext_ack *extack)
7833 struct netdev_nested_priv priv = {
7834 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
7838 return __netdev_upper_dev_link(dev, upper_dev, false,
7839 NULL, NULL, &priv, extack);
7841 EXPORT_SYMBOL(netdev_upper_dev_link);
7844 * netdev_master_upper_dev_link - Add a master link to the upper device
7846 * @upper_dev: new upper device
7847 * @upper_priv: upper device private
7848 * @upper_info: upper info to be passed down via notifier
7849 * @extack: netlink extended ack
7851 * Adds a link to device which is upper to this one. In this case, only
7852 * one master upper device can be linked, although other non-master devices
7853 * might be linked as well. The caller must hold the RTNL lock.
7854 * On a failure a negative errno code is returned. On success the reference
7855 * counts are adjusted and the function returns zero.
7857 int netdev_master_upper_dev_link(struct net_device *dev,
7858 struct net_device *upper_dev,
7859 void *upper_priv, void *upper_info,
7860 struct netlink_ext_ack *extack)
7862 struct netdev_nested_priv priv = {
7863 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
7867 return __netdev_upper_dev_link(dev, upper_dev, true,
7868 upper_priv, upper_info, &priv, extack);
7870 EXPORT_SYMBOL(netdev_master_upper_dev_link);
7872 static void __netdev_upper_dev_unlink(struct net_device *dev,
7873 struct net_device *upper_dev,
7874 struct netdev_nested_priv *priv)
7876 struct netdev_notifier_changeupper_info changeupper_info = {
7880 .upper_dev = upper_dev,
7886 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
7888 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
7889 &changeupper_info.info);
7891 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
7893 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
7894 &changeupper_info.info);
7896 __netdev_update_upper_level(dev, NULL);
7897 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
7899 __netdev_update_lower_level(upper_dev, priv);
7900 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
7905 * netdev_upper_dev_unlink - Removes a link to upper device
7907 * @upper_dev: new upper device
7909 * Removes a link to device which is upper to this one. The caller must hold
7912 void netdev_upper_dev_unlink(struct net_device *dev,
7913 struct net_device *upper_dev)
7915 struct netdev_nested_priv priv = {
7916 .flags = NESTED_SYNC_TODO,
7920 __netdev_upper_dev_unlink(dev, upper_dev, &priv);
7922 EXPORT_SYMBOL(netdev_upper_dev_unlink);
7924 static void __netdev_adjacent_dev_set(struct net_device *upper_dev,
7925 struct net_device *lower_dev,
7928 struct netdev_adjacent *adj;
7930 adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower);
7934 adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper);
7939 static void netdev_adjacent_dev_disable(struct net_device *upper_dev,
7940 struct net_device *lower_dev)
7942 __netdev_adjacent_dev_set(upper_dev, lower_dev, true);
7945 static void netdev_adjacent_dev_enable(struct net_device *upper_dev,
7946 struct net_device *lower_dev)
7948 __netdev_adjacent_dev_set(upper_dev, lower_dev, false);
7951 int netdev_adjacent_change_prepare(struct net_device *old_dev,
7952 struct net_device *new_dev,
7953 struct net_device *dev,
7954 struct netlink_ext_ack *extack)
7956 struct netdev_nested_priv priv = {
7965 if (old_dev && new_dev != old_dev)
7966 netdev_adjacent_dev_disable(dev, old_dev);
7967 err = __netdev_upper_dev_link(new_dev, dev, false, NULL, NULL, &priv,
7970 if (old_dev && new_dev != old_dev)
7971 netdev_adjacent_dev_enable(dev, old_dev);
7977 EXPORT_SYMBOL(netdev_adjacent_change_prepare);
7979 void netdev_adjacent_change_commit(struct net_device *old_dev,
7980 struct net_device *new_dev,
7981 struct net_device *dev)
7983 struct netdev_nested_priv priv = {
7984 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
7988 if (!new_dev || !old_dev)
7991 if (new_dev == old_dev)
7994 netdev_adjacent_dev_enable(dev, old_dev);
7995 __netdev_upper_dev_unlink(old_dev, dev, &priv);
7997 EXPORT_SYMBOL(netdev_adjacent_change_commit);
7999 void netdev_adjacent_change_abort(struct net_device *old_dev,
8000 struct net_device *new_dev,
8001 struct net_device *dev)
8003 struct netdev_nested_priv priv = {
8011 if (old_dev && new_dev != old_dev)
8012 netdev_adjacent_dev_enable(dev, old_dev);
8014 __netdev_upper_dev_unlink(new_dev, dev, &priv);
8016 EXPORT_SYMBOL(netdev_adjacent_change_abort);
8019 * netdev_bonding_info_change - Dispatch event about slave change
8021 * @bonding_info: info to dispatch
8023 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
8024 * The caller must hold the RTNL lock.
8026 void netdev_bonding_info_change(struct net_device *dev,
8027 struct netdev_bonding_info *bonding_info)
8029 struct netdev_notifier_bonding_info info = {
8033 memcpy(&info.bonding_info, bonding_info,
8034 sizeof(struct netdev_bonding_info));
8035 call_netdevice_notifiers_info(NETDEV_BONDING_INFO,
8038 EXPORT_SYMBOL(netdev_bonding_info_change);
8040 static int netdev_offload_xstats_enable_l3(struct net_device *dev,
8041 struct netlink_ext_ack *extack)
8043 struct netdev_notifier_offload_xstats_info info = {
8045 .info.extack = extack,
8046 .type = NETDEV_OFFLOAD_XSTATS_TYPE_L3,
8051 dev->offload_xstats_l3 = kzalloc(sizeof(*dev->offload_xstats_l3),
8053 if (!dev->offload_xstats_l3)
8056 rc = call_netdevice_notifiers_info_robust(NETDEV_OFFLOAD_XSTATS_ENABLE,
8057 NETDEV_OFFLOAD_XSTATS_DISABLE,
8059 err = notifier_to_errno(rc);
8066 kfree(dev->offload_xstats_l3);
8067 dev->offload_xstats_l3 = NULL;
8071 int netdev_offload_xstats_enable(struct net_device *dev,
8072 enum netdev_offload_xstats_type type,
8073 struct netlink_ext_ack *extack)
8077 if (netdev_offload_xstats_enabled(dev, type))
8081 case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
8082 return netdev_offload_xstats_enable_l3(dev, extack);
8088 EXPORT_SYMBOL(netdev_offload_xstats_enable);
8090 static void netdev_offload_xstats_disable_l3(struct net_device *dev)
8092 struct netdev_notifier_offload_xstats_info info = {
8094 .type = NETDEV_OFFLOAD_XSTATS_TYPE_L3,
8097 call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_DISABLE,
8099 kfree(dev->offload_xstats_l3);
8100 dev->offload_xstats_l3 = NULL;
8103 int netdev_offload_xstats_disable(struct net_device *dev,
8104 enum netdev_offload_xstats_type type)
8108 if (!netdev_offload_xstats_enabled(dev, type))
8112 case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
8113 netdev_offload_xstats_disable_l3(dev);
8120 EXPORT_SYMBOL(netdev_offload_xstats_disable);
8122 static void netdev_offload_xstats_disable_all(struct net_device *dev)
8124 netdev_offload_xstats_disable(dev, NETDEV_OFFLOAD_XSTATS_TYPE_L3);
8127 static struct rtnl_hw_stats64 *
8128 netdev_offload_xstats_get_ptr(const struct net_device *dev,
8129 enum netdev_offload_xstats_type type)
8132 case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
8133 return dev->offload_xstats_l3;
8140 bool netdev_offload_xstats_enabled(const struct net_device *dev,
8141 enum netdev_offload_xstats_type type)
8145 return netdev_offload_xstats_get_ptr(dev, type);
8147 EXPORT_SYMBOL(netdev_offload_xstats_enabled);
8149 struct netdev_notifier_offload_xstats_ru {
8153 struct netdev_notifier_offload_xstats_rd {
8154 struct rtnl_hw_stats64 stats;
8158 static void netdev_hw_stats64_add(struct rtnl_hw_stats64 *dest,
8159 const struct rtnl_hw_stats64 *src)
8161 dest->rx_packets += src->rx_packets;
8162 dest->tx_packets += src->tx_packets;
8163 dest->rx_bytes += src->rx_bytes;
8164 dest->tx_bytes += src->tx_bytes;
8165 dest->rx_errors += src->rx_errors;
8166 dest->tx_errors += src->tx_errors;
8167 dest->rx_dropped += src->rx_dropped;
8168 dest->tx_dropped += src->tx_dropped;
8169 dest->multicast += src->multicast;
8172 static int netdev_offload_xstats_get_used(struct net_device *dev,
8173 enum netdev_offload_xstats_type type,
8175 struct netlink_ext_ack *extack)
8177 struct netdev_notifier_offload_xstats_ru report_used = {};
8178 struct netdev_notifier_offload_xstats_info info = {
8180 .info.extack = extack,
8182 .report_used = &report_used,
8186 WARN_ON(!netdev_offload_xstats_enabled(dev, type));
8187 rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_USED,
8189 *p_used = report_used.used;
8190 return notifier_to_errno(rc);
8193 static int netdev_offload_xstats_get_stats(struct net_device *dev,
8194 enum netdev_offload_xstats_type type,
8195 struct rtnl_hw_stats64 *p_stats,
8197 struct netlink_ext_ack *extack)
8199 struct netdev_notifier_offload_xstats_rd report_delta = {};
8200 struct netdev_notifier_offload_xstats_info info = {
8202 .info.extack = extack,
8204 .report_delta = &report_delta,
8206 struct rtnl_hw_stats64 *stats;
8209 stats = netdev_offload_xstats_get_ptr(dev, type);
8210 if (WARN_ON(!stats))
8213 rc = call_netdevice_notifiers_info(NETDEV_OFFLOAD_XSTATS_REPORT_DELTA,
8216 /* Cache whatever we got, even if there was an error, otherwise the
8217 * successful stats retrievals would get lost.
8219 netdev_hw_stats64_add(stats, &report_delta.stats);
8223 *p_used = report_delta.used;
8225 return notifier_to_errno(rc);
8228 int netdev_offload_xstats_get(struct net_device *dev,
8229 enum netdev_offload_xstats_type type,
8230 struct rtnl_hw_stats64 *p_stats, bool *p_used,
8231 struct netlink_ext_ack *extack)
8236 return netdev_offload_xstats_get_stats(dev, type, p_stats,
8239 return netdev_offload_xstats_get_used(dev, type, p_used,
8242 EXPORT_SYMBOL(netdev_offload_xstats_get);
8245 netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *report_delta,
8246 const struct rtnl_hw_stats64 *stats)
8248 report_delta->used = true;
8249 netdev_hw_stats64_add(&report_delta->stats, stats);
8251 EXPORT_SYMBOL(netdev_offload_xstats_report_delta);
8254 netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *report_used)
8256 report_used->used = true;
8258 EXPORT_SYMBOL(netdev_offload_xstats_report_used);
8260 void netdev_offload_xstats_push_delta(struct net_device *dev,
8261 enum netdev_offload_xstats_type type,
8262 const struct rtnl_hw_stats64 *p_stats)
8264 struct rtnl_hw_stats64 *stats;
8268 stats = netdev_offload_xstats_get_ptr(dev, type);
8269 if (WARN_ON(!stats))
8272 netdev_hw_stats64_add(stats, p_stats);
8274 EXPORT_SYMBOL(netdev_offload_xstats_push_delta);
8277 * netdev_get_xmit_slave - Get the xmit slave of master device
8280 * @all_slaves: assume all the slaves are active
8282 * The reference counters are not incremented so the caller must be
8283 * careful with locks. The caller must hold RCU lock.
8284 * %NULL is returned if no slave is found.
8287 struct net_device *netdev_get_xmit_slave(struct net_device *dev,
8288 struct sk_buff *skb,
8291 const struct net_device_ops *ops = dev->netdev_ops;
8293 if (!ops->ndo_get_xmit_slave)
8295 return ops->ndo_get_xmit_slave(dev, skb, all_slaves);
8297 EXPORT_SYMBOL(netdev_get_xmit_slave);
8299 static struct net_device *netdev_sk_get_lower_dev(struct net_device *dev,
8302 const struct net_device_ops *ops = dev->netdev_ops;
8304 if (!ops->ndo_sk_get_lower_dev)
8306 return ops->ndo_sk_get_lower_dev(dev, sk);
8310 * netdev_sk_get_lowest_dev - Get the lowest device in chain given device and socket
8314 * %NULL is returned if no lower device is found.
8317 struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev,
8320 struct net_device *lower;
8322 lower = netdev_sk_get_lower_dev(dev, sk);
8325 lower = netdev_sk_get_lower_dev(dev, sk);
8330 EXPORT_SYMBOL(netdev_sk_get_lowest_dev);
8332 static void netdev_adjacent_add_links(struct net_device *dev)
8334 struct netdev_adjacent *iter;
8336 struct net *net = dev_net(dev);
8338 list_for_each_entry(iter, &dev->adj_list.upper, list) {
8339 if (!net_eq(net, dev_net(iter->dev)))
8341 netdev_adjacent_sysfs_add(iter->dev, dev,
8342 &iter->dev->adj_list.lower);
8343 netdev_adjacent_sysfs_add(dev, iter->dev,
8344 &dev->adj_list.upper);
8347 list_for_each_entry(iter, &dev->adj_list.lower, list) {
8348 if (!net_eq(net, dev_net(iter->dev)))
8350 netdev_adjacent_sysfs_add(iter->dev, dev,
8351 &iter->dev->adj_list.upper);
8352 netdev_adjacent_sysfs_add(dev, iter->dev,
8353 &dev->adj_list.lower);
8357 static void netdev_adjacent_del_links(struct net_device *dev)
8359 struct netdev_adjacent *iter;
8361 struct net *net = dev_net(dev);
8363 list_for_each_entry(iter, &dev->adj_list.upper, list) {
8364 if (!net_eq(net, dev_net(iter->dev)))
8366 netdev_adjacent_sysfs_del(iter->dev, dev->name,
8367 &iter->dev->adj_list.lower);
8368 netdev_adjacent_sysfs_del(dev, iter->dev->name,
8369 &dev->adj_list.upper);
8372 list_for_each_entry(iter, &dev->adj_list.lower, list) {
8373 if (!net_eq(net, dev_net(iter->dev)))
8375 netdev_adjacent_sysfs_del(iter->dev, dev->name,
8376 &iter->dev->adj_list.upper);
8377 netdev_adjacent_sysfs_del(dev, iter->dev->name,
8378 &dev->adj_list.lower);
8382 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
8384 struct netdev_adjacent *iter;
8386 struct net *net = dev_net(dev);
8388 list_for_each_entry(iter, &dev->adj_list.upper, list) {
8389 if (!net_eq(net, dev_net(iter->dev)))
8391 netdev_adjacent_sysfs_del(iter->dev, oldname,
8392 &iter->dev->adj_list.lower);
8393 netdev_adjacent_sysfs_add(iter->dev, dev,
8394 &iter->dev->adj_list.lower);
8397 list_for_each_entry(iter, &dev->adj_list.lower, list) {
8398 if (!net_eq(net, dev_net(iter->dev)))
8400 netdev_adjacent_sysfs_del(iter->dev, oldname,
8401 &iter->dev->adj_list.upper);
8402 netdev_adjacent_sysfs_add(iter->dev, dev,
8403 &iter->dev->adj_list.upper);
8407 void *netdev_lower_dev_get_private(struct net_device *dev,
8408 struct net_device *lower_dev)
8410 struct netdev_adjacent *lower;
8414 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
8418 return lower->private;
8420 EXPORT_SYMBOL(netdev_lower_dev_get_private);
8424 * netdev_lower_state_changed - Dispatch event about lower device state change
8425 * @lower_dev: device
8426 * @lower_state_info: state to dispatch
8428 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
8429 * The caller must hold the RTNL lock.
8431 void netdev_lower_state_changed(struct net_device *lower_dev,
8432 void *lower_state_info)
8434 struct netdev_notifier_changelowerstate_info changelowerstate_info = {
8435 .info.dev = lower_dev,
8439 changelowerstate_info.lower_state_info = lower_state_info;
8440 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE,
8441 &changelowerstate_info.info);
8443 EXPORT_SYMBOL(netdev_lower_state_changed);
8445 static void dev_change_rx_flags(struct net_device *dev, int flags)
8447 const struct net_device_ops *ops = dev->netdev_ops;
8449 if (ops->ndo_change_rx_flags)
8450 ops->ndo_change_rx_flags(dev, flags);
8453 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
8455 unsigned int old_flags = dev->flags;
8461 dev->flags |= IFF_PROMISC;
8462 dev->promiscuity += inc;
8463 if (dev->promiscuity == 0) {
8466 * If inc causes overflow, untouch promisc and return error.
8469 dev->flags &= ~IFF_PROMISC;
8471 dev->promiscuity -= inc;
8472 netdev_warn(dev, "promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n");
8476 if (dev->flags != old_flags) {
8477 netdev_info(dev, "%s promiscuous mode\n",
8478 dev->flags & IFF_PROMISC ? "entered" : "left");
8479 if (audit_enabled) {
8480 current_uid_gid(&uid, &gid);
8481 audit_log(audit_context(), GFP_ATOMIC,
8482 AUDIT_ANOM_PROMISCUOUS,
8483 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
8484 dev->name, (dev->flags & IFF_PROMISC),
8485 (old_flags & IFF_PROMISC),
8486 from_kuid(&init_user_ns, audit_get_loginuid(current)),
8487 from_kuid(&init_user_ns, uid),
8488 from_kgid(&init_user_ns, gid),
8489 audit_get_sessionid(current));
8492 dev_change_rx_flags(dev, IFF_PROMISC);
8495 __dev_notify_flags(dev, old_flags, IFF_PROMISC, 0, NULL);
8500 * dev_set_promiscuity - update promiscuity count on a device
8504 * Add or remove promiscuity from a device. While the count in the device
8505 * remains above zero the interface remains promiscuous. Once it hits zero
8506 * the device reverts back to normal filtering operation. A negative inc
8507 * value is used to drop promiscuity on the device.
8508 * Return 0 if successful or a negative errno code on error.
8510 int dev_set_promiscuity(struct net_device *dev, int inc)
8512 unsigned int old_flags = dev->flags;
8515 err = __dev_set_promiscuity(dev, inc, true);
8518 if (dev->flags != old_flags)
8519 dev_set_rx_mode(dev);
8522 EXPORT_SYMBOL(dev_set_promiscuity);
8524 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
8526 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
8530 dev->flags |= IFF_ALLMULTI;
8531 dev->allmulti += inc;
8532 if (dev->allmulti == 0) {
8535 * If inc causes overflow, untouch allmulti and return error.
8538 dev->flags &= ~IFF_ALLMULTI;
8540 dev->allmulti -= inc;
8541 netdev_warn(dev, "allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n");
8545 if (dev->flags ^ old_flags) {
8546 netdev_info(dev, "%s allmulticast mode\n",
8547 dev->flags & IFF_ALLMULTI ? "entered" : "left");
8548 dev_change_rx_flags(dev, IFF_ALLMULTI);
8549 dev_set_rx_mode(dev);
8551 __dev_notify_flags(dev, old_flags,
8552 dev->gflags ^ old_gflags, 0, NULL);
8558 * dev_set_allmulti - update allmulti count on a device
8562 * Add or remove reception of all multicast frames to a device. While the
8563 * count in the device remains above zero the interface remains listening
8564 * to all interfaces. Once it hits zero the device reverts back to normal
8565 * filtering operation. A negative @inc value is used to drop the counter
8566 * when releasing a resource needing all multicasts.
8567 * Return 0 if successful or a negative errno code on error.
8570 int dev_set_allmulti(struct net_device *dev, int inc)
8572 return __dev_set_allmulti(dev, inc, true);
8574 EXPORT_SYMBOL(dev_set_allmulti);
8577 * Upload unicast and multicast address lists to device and
8578 * configure RX filtering. When the device doesn't support unicast
8579 * filtering it is put in promiscuous mode while unicast addresses
8582 void __dev_set_rx_mode(struct net_device *dev)
8584 const struct net_device_ops *ops = dev->netdev_ops;
8586 /* dev_open will call this function so the list will stay sane. */
8587 if (!(dev->flags&IFF_UP))
8590 if (!netif_device_present(dev))
8593 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
8594 /* Unicast addresses changes may only happen under the rtnl,
8595 * therefore calling __dev_set_promiscuity here is safe.
8597 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
8598 __dev_set_promiscuity(dev, 1, false);
8599 dev->uc_promisc = true;
8600 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
8601 __dev_set_promiscuity(dev, -1, false);
8602 dev->uc_promisc = false;
8606 if (ops->ndo_set_rx_mode)
8607 ops->ndo_set_rx_mode(dev);
8610 void dev_set_rx_mode(struct net_device *dev)
8612 netif_addr_lock_bh(dev);
8613 __dev_set_rx_mode(dev);
8614 netif_addr_unlock_bh(dev);
8618 * dev_get_flags - get flags reported to userspace
8621 * Get the combination of flag bits exported through APIs to userspace.
8623 unsigned int dev_get_flags(const struct net_device *dev)
8627 flags = (READ_ONCE(dev->flags) & ~(IFF_PROMISC |
8632 (READ_ONCE(dev->gflags) & (IFF_PROMISC |
8635 if (netif_running(dev)) {
8636 if (netif_oper_up(dev))
8637 flags |= IFF_RUNNING;
8638 if (netif_carrier_ok(dev))
8639 flags |= IFF_LOWER_UP;
8640 if (netif_dormant(dev))
8641 flags |= IFF_DORMANT;
8646 EXPORT_SYMBOL(dev_get_flags);
8648 int __dev_change_flags(struct net_device *dev, unsigned int flags,
8649 struct netlink_ext_ack *extack)
8651 unsigned int old_flags = dev->flags;
8657 * Set the flags on our device.
8660 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
8661 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
8663 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
8667 * Load in the correct multicast list now the flags have changed.
8670 if ((old_flags ^ flags) & IFF_MULTICAST)
8671 dev_change_rx_flags(dev, IFF_MULTICAST);
8673 dev_set_rx_mode(dev);
8676 * Have we downed the interface. We handle IFF_UP ourselves
8677 * according to user attempts to set it, rather than blindly
8682 if ((old_flags ^ flags) & IFF_UP) {
8683 if (old_flags & IFF_UP)
8686 ret = __dev_open(dev, extack);
8689 if ((flags ^ dev->gflags) & IFF_PROMISC) {
8690 int inc = (flags & IFF_PROMISC) ? 1 : -1;
8691 unsigned int old_flags = dev->flags;
8693 dev->gflags ^= IFF_PROMISC;
8695 if (__dev_set_promiscuity(dev, inc, false) >= 0)
8696 if (dev->flags != old_flags)
8697 dev_set_rx_mode(dev);
8700 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
8701 * is important. Some (broken) drivers set IFF_PROMISC, when
8702 * IFF_ALLMULTI is requested not asking us and not reporting.
8704 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
8705 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
8707 dev->gflags ^= IFF_ALLMULTI;
8708 __dev_set_allmulti(dev, inc, false);
8714 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
8715 unsigned int gchanges, u32 portid,
8716 const struct nlmsghdr *nlh)
8718 unsigned int changes = dev->flags ^ old_flags;
8721 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC, portid, nlh);
8723 if (changes & IFF_UP) {
8724 if (dev->flags & IFF_UP)
8725 call_netdevice_notifiers(NETDEV_UP, dev);
8727 call_netdevice_notifiers(NETDEV_DOWN, dev);
8730 if (dev->flags & IFF_UP &&
8731 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
8732 struct netdev_notifier_change_info change_info = {
8736 .flags_changed = changes,
8739 call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info);
8744 * dev_change_flags - change device settings
8746 * @flags: device state flags
8747 * @extack: netlink extended ack
8749 * Change settings on device based state flags. The flags are
8750 * in the userspace exported format.
8752 int dev_change_flags(struct net_device *dev, unsigned int flags,
8753 struct netlink_ext_ack *extack)
8756 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
8758 ret = __dev_change_flags(dev, flags, extack);
8762 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
8763 __dev_notify_flags(dev, old_flags, changes, 0, NULL);
8766 EXPORT_SYMBOL(dev_change_flags);
8768 int __dev_set_mtu(struct net_device *dev, int new_mtu)
8770 const struct net_device_ops *ops = dev->netdev_ops;
8772 if (ops->ndo_change_mtu)
8773 return ops->ndo_change_mtu(dev, new_mtu);
8775 /* Pairs with all the lockless reads of dev->mtu in the stack */
8776 WRITE_ONCE(dev->mtu, new_mtu);
8779 EXPORT_SYMBOL(__dev_set_mtu);
8781 int dev_validate_mtu(struct net_device *dev, int new_mtu,
8782 struct netlink_ext_ack *extack)
8784 /* MTU must be positive, and in range */
8785 if (new_mtu < 0 || new_mtu < dev->min_mtu) {
8786 NL_SET_ERR_MSG(extack, "mtu less than device minimum");
8790 if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
8791 NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
8798 * dev_set_mtu_ext - Change maximum transfer unit
8800 * @new_mtu: new transfer unit
8801 * @extack: netlink extended ack
8803 * Change the maximum transfer size of the network device.
8805 int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
8806 struct netlink_ext_ack *extack)
8810 if (new_mtu == dev->mtu)
8813 err = dev_validate_mtu(dev, new_mtu, extack);
8817 if (!netif_device_present(dev))
8820 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
8821 err = notifier_to_errno(err);
8825 orig_mtu = dev->mtu;
8826 err = __dev_set_mtu(dev, new_mtu);
8829 err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
8831 err = notifier_to_errno(err);
8833 /* setting mtu back and notifying everyone again,
8834 * so that they have a chance to revert changes.
8836 __dev_set_mtu(dev, orig_mtu);
8837 call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
8844 int dev_set_mtu(struct net_device *dev, int new_mtu)
8846 struct netlink_ext_ack extack;
8849 memset(&extack, 0, sizeof(extack));
8850 err = dev_set_mtu_ext(dev, new_mtu, &extack);
8851 if (err && extack._msg)
8852 net_err_ratelimited("%s: %s\n", dev->name, extack._msg);
8855 EXPORT_SYMBOL(dev_set_mtu);
8858 * dev_change_tx_queue_len - Change TX queue length of a netdevice
8860 * @new_len: new tx queue length
8862 int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
8864 unsigned int orig_len = dev->tx_queue_len;
8867 if (new_len != (unsigned int)new_len)
8870 if (new_len != orig_len) {
8871 dev->tx_queue_len = new_len;
8872 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
8873 res = notifier_to_errno(res);
8876 res = dev_qdisc_change_tx_queue_len(dev);
8884 netdev_err(dev, "refused to change device tx_queue_len\n");
8885 dev->tx_queue_len = orig_len;
8890 * dev_set_group - Change group this device belongs to
8892 * @new_group: group this device should belong to
8894 void dev_set_group(struct net_device *dev, int new_group)
8896 dev->group = new_group;
8900 * dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR.
8902 * @addr: new address
8903 * @extack: netlink extended ack
8905 int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
8906 struct netlink_ext_ack *extack)
8908 struct netdev_notifier_pre_changeaddr_info info = {
8910 .info.extack = extack,
8915 rc = call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR, &info.info);
8916 return notifier_to_errno(rc);
8918 EXPORT_SYMBOL(dev_pre_changeaddr_notify);
8921 * dev_set_mac_address - Change Media Access Control Address
8924 * @extack: netlink extended ack
8926 * Change the hardware (MAC) address of the device
8928 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
8929 struct netlink_ext_ack *extack)
8931 const struct net_device_ops *ops = dev->netdev_ops;
8934 if (!ops->ndo_set_mac_address)
8936 if (sa->sa_family != dev->type)
8938 if (!netif_device_present(dev))
8940 err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack);
8943 if (memcmp(dev->dev_addr, sa->sa_data, dev->addr_len)) {
8944 err = ops->ndo_set_mac_address(dev, sa);
8948 dev->addr_assign_type = NET_ADDR_SET;
8949 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
8950 add_device_randomness(dev->dev_addr, dev->addr_len);
8953 EXPORT_SYMBOL(dev_set_mac_address);
8955 DECLARE_RWSEM(dev_addr_sem);
8957 int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
8958 struct netlink_ext_ack *extack)
8962 down_write(&dev_addr_sem);
8963 ret = dev_set_mac_address(dev, sa, extack);
8964 up_write(&dev_addr_sem);
8967 EXPORT_SYMBOL(dev_set_mac_address_user);
8969 int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name)
8971 size_t size = sizeof(sa->sa_data_min);
8972 struct net_device *dev;
8975 down_read(&dev_addr_sem);
8978 dev = dev_get_by_name_rcu(net, dev_name);
8984 memset(sa->sa_data, 0, size);
8986 memcpy(sa->sa_data, dev->dev_addr,
8987 min_t(size_t, size, dev->addr_len));
8988 sa->sa_family = dev->type;
8992 up_read(&dev_addr_sem);
8995 EXPORT_SYMBOL(dev_get_mac_address);
8998 * dev_change_carrier - Change device carrier
9000 * @new_carrier: new value
9002 * Change device carrier
9004 int dev_change_carrier(struct net_device *dev, bool new_carrier)
9006 const struct net_device_ops *ops = dev->netdev_ops;
9008 if (!ops->ndo_change_carrier)
9010 if (!netif_device_present(dev))
9012 return ops->ndo_change_carrier(dev, new_carrier);
9016 * dev_get_phys_port_id - Get device physical port ID
9020 * Get device physical port ID
9022 int dev_get_phys_port_id(struct net_device *dev,
9023 struct netdev_phys_item_id *ppid)
9025 const struct net_device_ops *ops = dev->netdev_ops;
9027 if (!ops->ndo_get_phys_port_id)
9029 return ops->ndo_get_phys_port_id(dev, ppid);
9033 * dev_get_phys_port_name - Get device physical port name
9036 * @len: limit of bytes to copy to name
9038 * Get device physical port name
9040 int dev_get_phys_port_name(struct net_device *dev,
9041 char *name, size_t len)
9043 const struct net_device_ops *ops = dev->netdev_ops;
9046 if (ops->ndo_get_phys_port_name) {
9047 err = ops->ndo_get_phys_port_name(dev, name, len);
9048 if (err != -EOPNOTSUPP)
9051 return devlink_compat_phys_port_name_get(dev, name, len);
9055 * dev_get_port_parent_id - Get the device's port parent identifier
9056 * @dev: network device
9057 * @ppid: pointer to a storage for the port's parent identifier
9058 * @recurse: allow/disallow recursion to lower devices
9060 * Get the devices's port parent identifier
9062 int dev_get_port_parent_id(struct net_device *dev,
9063 struct netdev_phys_item_id *ppid,
9066 const struct net_device_ops *ops = dev->netdev_ops;
9067 struct netdev_phys_item_id first = { };
9068 struct net_device *lower_dev;
9069 struct list_head *iter;
9072 if (ops->ndo_get_port_parent_id) {
9073 err = ops->ndo_get_port_parent_id(dev, ppid);
9074 if (err != -EOPNOTSUPP)
9078 err = devlink_compat_switch_id_get(dev, ppid);
9079 if (!recurse || err != -EOPNOTSUPP)
9082 netdev_for_each_lower_dev(dev, lower_dev, iter) {
9083 err = dev_get_port_parent_id(lower_dev, ppid, true);
9088 else if (memcmp(&first, ppid, sizeof(*ppid)))
9094 EXPORT_SYMBOL(dev_get_port_parent_id);
9097 * netdev_port_same_parent_id - Indicate if two network devices have
9098 * the same port parent identifier
9099 * @a: first network device
9100 * @b: second network device
9102 bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b)
9104 struct netdev_phys_item_id a_id = { };
9105 struct netdev_phys_item_id b_id = { };
9107 if (dev_get_port_parent_id(a, &a_id, true) ||
9108 dev_get_port_parent_id(b, &b_id, true))
9111 return netdev_phys_item_id_same(&a_id, &b_id);
9113 EXPORT_SYMBOL(netdev_port_same_parent_id);
9116 * dev_change_proto_down - set carrier according to proto_down.
9119 * @proto_down: new value
9121 int dev_change_proto_down(struct net_device *dev, bool proto_down)
9123 if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN))
9125 if (!netif_device_present(dev))
9128 netif_carrier_off(dev);
9130 netif_carrier_on(dev);
9131 dev->proto_down = proto_down;
9136 * dev_change_proto_down_reason - proto down reason
9139 * @mask: proto down mask
9140 * @value: proto down value
9142 void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
9148 dev->proto_down_reason = value;
9150 for_each_set_bit(b, &mask, 32) {
9151 if (value & (1 << b))
9152 dev->proto_down_reason |= BIT(b);
9154 dev->proto_down_reason &= ~BIT(b);
9159 struct bpf_xdp_link {
9160 struct bpf_link link;
9161 struct net_device *dev; /* protected by rtnl_lock, no refcnt held */
9165 static enum bpf_xdp_mode dev_xdp_mode(struct net_device *dev, u32 flags)
9167 if (flags & XDP_FLAGS_HW_MODE)
9169 if (flags & XDP_FLAGS_DRV_MODE)
9170 return XDP_MODE_DRV;
9171 if (flags & XDP_FLAGS_SKB_MODE)
9172 return XDP_MODE_SKB;
9173 return dev->netdev_ops->ndo_bpf ? XDP_MODE_DRV : XDP_MODE_SKB;
9176 static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode)
9180 return generic_xdp_install;
9183 return dev->netdev_ops->ndo_bpf;
9189 static struct bpf_xdp_link *dev_xdp_link(struct net_device *dev,
9190 enum bpf_xdp_mode mode)
9192 return dev->xdp_state[mode].link;
9195 static struct bpf_prog *dev_xdp_prog(struct net_device *dev,
9196 enum bpf_xdp_mode mode)
9198 struct bpf_xdp_link *link = dev_xdp_link(dev, mode);
9201 return link->link.prog;
9202 return dev->xdp_state[mode].prog;
9205 u8 dev_xdp_prog_count(struct net_device *dev)
9210 for (i = 0; i < __MAX_XDP_MODE; i++)
9211 if (dev->xdp_state[i].prog || dev->xdp_state[i].link)
9215 EXPORT_SYMBOL_GPL(dev_xdp_prog_count);
9217 u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode)
9219 struct bpf_prog *prog = dev_xdp_prog(dev, mode);
9221 return prog ? prog->aux->id : 0;
9224 static void dev_xdp_set_link(struct net_device *dev, enum bpf_xdp_mode mode,
9225 struct bpf_xdp_link *link)
9227 dev->xdp_state[mode].link = link;
9228 dev->xdp_state[mode].prog = NULL;
9231 static void dev_xdp_set_prog(struct net_device *dev, enum bpf_xdp_mode mode,
9232 struct bpf_prog *prog)
9234 dev->xdp_state[mode].link = NULL;
9235 dev->xdp_state[mode].prog = prog;
9238 static int dev_xdp_install(struct net_device *dev, enum bpf_xdp_mode mode,
9239 bpf_op_t bpf_op, struct netlink_ext_ack *extack,
9240 u32 flags, struct bpf_prog *prog)
9242 struct netdev_bpf xdp;
9245 memset(&xdp, 0, sizeof(xdp));
9246 xdp.command = mode == XDP_MODE_HW ? XDP_SETUP_PROG_HW : XDP_SETUP_PROG;
9247 xdp.extack = extack;
9251 /* Drivers assume refcnt is already incremented (i.e, prog pointer is
9252 * "moved" into driver), so they don't increment it on their own, but
9253 * they do decrement refcnt when program is detached or replaced.
9254 * Given net_device also owns link/prog, we need to bump refcnt here
9255 * to prevent drivers from underflowing it.
9259 err = bpf_op(dev, &xdp);
9266 if (mode != XDP_MODE_HW)
9267 bpf_prog_change_xdp(dev_xdp_prog(dev, mode), prog);
9272 static void dev_xdp_uninstall(struct net_device *dev)
9274 struct bpf_xdp_link *link;
9275 struct bpf_prog *prog;
9276 enum bpf_xdp_mode mode;
9281 for (mode = XDP_MODE_SKB; mode < __MAX_XDP_MODE; mode++) {
9282 prog = dev_xdp_prog(dev, mode);
9286 bpf_op = dev_xdp_bpf_op(dev, mode);
9290 WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL));
9292 /* auto-detach link from net device */
9293 link = dev_xdp_link(dev, mode);
9299 dev_xdp_set_link(dev, mode, NULL);
9303 static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack,
9304 struct bpf_xdp_link *link, struct bpf_prog *new_prog,
9305 struct bpf_prog *old_prog, u32 flags)
9307 unsigned int num_modes = hweight32(flags & XDP_FLAGS_MODES);
9308 struct bpf_prog *cur_prog;
9309 struct net_device *upper;
9310 struct list_head *iter;
9311 enum bpf_xdp_mode mode;
9317 /* either link or prog attachment, never both */
9318 if (link && (new_prog || old_prog))
9320 /* link supports only XDP mode flags */
9321 if (link && (flags & ~XDP_FLAGS_MODES)) {
9322 NL_SET_ERR_MSG(extack, "Invalid XDP flags for BPF link attachment");
9325 /* just one XDP mode bit should be set, zero defaults to drv/skb mode */
9326 if (num_modes > 1) {
9327 NL_SET_ERR_MSG(extack, "Only one XDP mode flag can be set");
9330 /* avoid ambiguity if offload + drv/skb mode progs are both loaded */
9331 if (!num_modes && dev_xdp_prog_count(dev) > 1) {
9332 NL_SET_ERR_MSG(extack,
9333 "More than one program loaded, unset mode is ambiguous");
9336 /* old_prog != NULL implies XDP_FLAGS_REPLACE is set */
9337 if (old_prog && !(flags & XDP_FLAGS_REPLACE)) {
9338 NL_SET_ERR_MSG(extack, "XDP_FLAGS_REPLACE is not specified");
9342 mode = dev_xdp_mode(dev, flags);
9343 /* can't replace attached link */
9344 if (dev_xdp_link(dev, mode)) {
9345 NL_SET_ERR_MSG(extack, "Can't replace active BPF XDP link");
9349 /* don't allow if an upper device already has a program */
9350 netdev_for_each_upper_dev_rcu(dev, upper, iter) {
9351 if (dev_xdp_prog_count(upper) > 0) {
9352 NL_SET_ERR_MSG(extack, "Cannot attach when an upper device already has a program");
9357 cur_prog = dev_xdp_prog(dev, mode);
9358 /* can't replace attached prog with link */
9359 if (link && cur_prog) {
9360 NL_SET_ERR_MSG(extack, "Can't replace active XDP program with BPF link");
9363 if ((flags & XDP_FLAGS_REPLACE) && cur_prog != old_prog) {
9364 NL_SET_ERR_MSG(extack, "Active program does not match expected");
9368 /* put effective new program into new_prog */
9370 new_prog = link->link.prog;
9373 bool offload = mode == XDP_MODE_HW;
9374 enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB
9375 ? XDP_MODE_DRV : XDP_MODE_SKB;
9377 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) {
9378 NL_SET_ERR_MSG(extack, "XDP program already attached");
9381 if (!offload && dev_xdp_prog(dev, other_mode)) {
9382 NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time");
9385 if (!offload && bpf_prog_is_offloaded(new_prog->aux)) {
9386 NL_SET_ERR_MSG(extack, "Using offloaded program without HW_MODE flag is not supported");
9389 if (bpf_prog_is_dev_bound(new_prog->aux) && !bpf_offload_dev_match(new_prog, dev)) {
9390 NL_SET_ERR_MSG(extack, "Program bound to different device");
9393 if (new_prog->expected_attach_type == BPF_XDP_DEVMAP) {
9394 NL_SET_ERR_MSG(extack, "BPF_XDP_DEVMAP programs can not be attached to a device");
9397 if (new_prog->expected_attach_type == BPF_XDP_CPUMAP) {
9398 NL_SET_ERR_MSG(extack, "BPF_XDP_CPUMAP programs can not be attached to a device");
9403 /* don't call drivers if the effective program didn't change */
9404 if (new_prog != cur_prog) {
9405 bpf_op = dev_xdp_bpf_op(dev, mode);
9407 NL_SET_ERR_MSG(extack, "Underlying driver does not support XDP in native mode");
9411 err = dev_xdp_install(dev, mode, bpf_op, extack, flags, new_prog);
9417 dev_xdp_set_link(dev, mode, link);
9419 dev_xdp_set_prog(dev, mode, new_prog);
9421 bpf_prog_put(cur_prog);
9426 static int dev_xdp_attach_link(struct net_device *dev,
9427 struct netlink_ext_ack *extack,
9428 struct bpf_xdp_link *link)
9430 return dev_xdp_attach(dev, extack, link, NULL, NULL, link->flags);
9433 static int dev_xdp_detach_link(struct net_device *dev,
9434 struct netlink_ext_ack *extack,
9435 struct bpf_xdp_link *link)
9437 enum bpf_xdp_mode mode;
9442 mode = dev_xdp_mode(dev, link->flags);
9443 if (dev_xdp_link(dev, mode) != link)
9446 bpf_op = dev_xdp_bpf_op(dev, mode);
9447 WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL));
9448 dev_xdp_set_link(dev, mode, NULL);
9452 static void bpf_xdp_link_release(struct bpf_link *link)
9454 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9458 /* if racing with net_device's tear down, xdp_link->dev might be
9459 * already NULL, in which case link was already auto-detached
9461 if (xdp_link->dev) {
9462 WARN_ON(dev_xdp_detach_link(xdp_link->dev, NULL, xdp_link));
9463 xdp_link->dev = NULL;
9469 static int bpf_xdp_link_detach(struct bpf_link *link)
9471 bpf_xdp_link_release(link);
9475 static void bpf_xdp_link_dealloc(struct bpf_link *link)
9477 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9482 static void bpf_xdp_link_show_fdinfo(const struct bpf_link *link,
9483 struct seq_file *seq)
9485 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9490 ifindex = xdp_link->dev->ifindex;
9493 seq_printf(seq, "ifindex:\t%u\n", ifindex);
9496 static int bpf_xdp_link_fill_link_info(const struct bpf_link *link,
9497 struct bpf_link_info *info)
9499 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9504 ifindex = xdp_link->dev->ifindex;
9507 info->xdp.ifindex = ifindex;
9511 static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog,
9512 struct bpf_prog *old_prog)
9514 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9515 enum bpf_xdp_mode mode;
9521 /* link might have been auto-released already, so fail */
9522 if (!xdp_link->dev) {
9527 if (old_prog && link->prog != old_prog) {
9531 old_prog = link->prog;
9532 if (old_prog->type != new_prog->type ||
9533 old_prog->expected_attach_type != new_prog->expected_attach_type) {
9538 if (old_prog == new_prog) {
9539 /* no-op, don't disturb drivers */
9540 bpf_prog_put(new_prog);
9544 mode = dev_xdp_mode(xdp_link->dev, xdp_link->flags);
9545 bpf_op = dev_xdp_bpf_op(xdp_link->dev, mode);
9546 err = dev_xdp_install(xdp_link->dev, mode, bpf_op, NULL,
9547 xdp_link->flags, new_prog);
9551 old_prog = xchg(&link->prog, new_prog);
9552 bpf_prog_put(old_prog);
9559 static const struct bpf_link_ops bpf_xdp_link_lops = {
9560 .release = bpf_xdp_link_release,
9561 .dealloc = bpf_xdp_link_dealloc,
9562 .detach = bpf_xdp_link_detach,
9563 .show_fdinfo = bpf_xdp_link_show_fdinfo,
9564 .fill_link_info = bpf_xdp_link_fill_link_info,
9565 .update_prog = bpf_xdp_link_update,
9568 int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
9570 struct net *net = current->nsproxy->net_ns;
9571 struct bpf_link_primer link_primer;
9572 struct netlink_ext_ack extack = {};
9573 struct bpf_xdp_link *link;
9574 struct net_device *dev;
9578 dev = dev_get_by_index(net, attr->link_create.target_ifindex);
9584 link = kzalloc(sizeof(*link), GFP_USER);
9590 bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog);
9592 link->flags = attr->link_create.flags;
9594 err = bpf_link_prime(&link->link, &link_primer);
9600 err = dev_xdp_attach_link(dev, &extack, link);
9605 bpf_link_cleanup(&link_primer);
9606 trace_bpf_xdp_link_attach_failed(extack._msg);
9610 fd = bpf_link_settle(&link_primer);
9611 /* link itself doesn't hold dev's refcnt to not complicate shutdown */
9624 * dev_change_xdp_fd - set or clear a bpf program for a device rx path
9626 * @extack: netlink extended ack
9627 * @fd: new program fd or negative value to clear
9628 * @expected_fd: old program fd that userspace expects to replace or clear
9629 * @flags: xdp-related flags
9631 * Set or clear a bpf program for a device
9633 int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
9634 int fd, int expected_fd, u32 flags)
9636 enum bpf_xdp_mode mode = dev_xdp_mode(dev, flags);
9637 struct bpf_prog *new_prog = NULL, *old_prog = NULL;
9643 new_prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
9644 mode != XDP_MODE_SKB);
9645 if (IS_ERR(new_prog))
9646 return PTR_ERR(new_prog);
9649 if (expected_fd >= 0) {
9650 old_prog = bpf_prog_get_type_dev(expected_fd, BPF_PROG_TYPE_XDP,
9651 mode != XDP_MODE_SKB);
9652 if (IS_ERR(old_prog)) {
9653 err = PTR_ERR(old_prog);
9659 err = dev_xdp_attach(dev, extack, NULL, new_prog, old_prog, flags);
9662 if (err && new_prog)
9663 bpf_prog_put(new_prog);
9665 bpf_prog_put(old_prog);
9670 * dev_index_reserve() - allocate an ifindex in a namespace
9671 * @net: the applicable net namespace
9672 * @ifindex: requested ifindex, pass %0 to get one allocated
9674 * Allocate a ifindex for a new device. Caller must either use the ifindex
9675 * to store the device (via list_netdevice()) or call dev_index_release()
9676 * to give the index up.
9678 * Return: a suitable unique value for a new device interface number or -errno.
9680 static int dev_index_reserve(struct net *net, u32 ifindex)
9684 if (ifindex > INT_MAX) {
9685 DEBUG_NET_WARN_ON_ONCE(1);
9690 err = xa_alloc_cyclic(&net->dev_by_index, &ifindex, NULL,
9691 xa_limit_31b, &net->ifindex, GFP_KERNEL);
9693 err = xa_insert(&net->dev_by_index, ifindex, NULL, GFP_KERNEL);
9700 static void dev_index_release(struct net *net, int ifindex)
9702 /* Expect only unused indexes, unlist_netdevice() removes the used */
9703 WARN_ON(xa_erase(&net->dev_by_index, ifindex));
9706 /* Delayed registration/unregisteration */
9707 LIST_HEAD(net_todo_list);
9708 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
9709 atomic_t dev_unreg_count = ATOMIC_INIT(0);
9711 static void net_set_todo(struct net_device *dev)
9713 list_add_tail(&dev->todo_list, &net_todo_list);
9716 static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
9717 struct net_device *upper, netdev_features_t features)
9719 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
9720 netdev_features_t feature;
9723 for_each_netdev_feature(upper_disables, feature_bit) {
9724 feature = __NETIF_F_BIT(feature_bit);
9725 if (!(upper->wanted_features & feature)
9726 && (features & feature)) {
9727 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
9728 &feature, upper->name);
9729 features &= ~feature;
9736 static void netdev_sync_lower_features(struct net_device *upper,
9737 struct net_device *lower, netdev_features_t features)
9739 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
9740 netdev_features_t feature;
9743 for_each_netdev_feature(upper_disables, feature_bit) {
9744 feature = __NETIF_F_BIT(feature_bit);
9745 if (!(features & feature) && (lower->features & feature)) {
9746 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
9747 &feature, lower->name);
9748 lower->wanted_features &= ~feature;
9749 __netdev_update_features(lower);
9751 if (unlikely(lower->features & feature))
9752 netdev_WARN(upper, "failed to disable %pNF on %s!\n",
9753 &feature, lower->name);
9755 netdev_features_change(lower);
9760 static netdev_features_t netdev_fix_features(struct net_device *dev,
9761 netdev_features_t features)
9763 /* Fix illegal checksum combinations */
9764 if ((features & NETIF_F_HW_CSUM) &&
9765 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
9766 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
9767 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
9770 /* TSO requires that SG is present as well. */
9771 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
9772 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
9773 features &= ~NETIF_F_ALL_TSO;
9776 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
9777 !(features & NETIF_F_IP_CSUM)) {
9778 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
9779 features &= ~NETIF_F_TSO;
9780 features &= ~NETIF_F_TSO_ECN;
9783 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
9784 !(features & NETIF_F_IPV6_CSUM)) {
9785 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
9786 features &= ~NETIF_F_TSO6;
9789 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
9790 if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
9791 features &= ~NETIF_F_TSO_MANGLEID;
9793 /* TSO ECN requires that TSO is present as well. */
9794 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
9795 features &= ~NETIF_F_TSO_ECN;
9797 /* Software GSO depends on SG. */
9798 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
9799 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
9800 features &= ~NETIF_F_GSO;
9803 /* GSO partial features require GSO partial be set */
9804 if ((features & dev->gso_partial_features) &&
9805 !(features & NETIF_F_GSO_PARTIAL)) {
9807 "Dropping partially supported GSO features since no GSO partial.\n");
9808 features &= ~dev->gso_partial_features;
9811 if (!(features & NETIF_F_RXCSUM)) {
9812 /* NETIF_F_GRO_HW implies doing RXCSUM since every packet
9813 * successfully merged by hardware must also have the
9814 * checksum verified by hardware. If the user does not
9815 * want to enable RXCSUM, logically, we should disable GRO_HW.
9817 if (features & NETIF_F_GRO_HW) {
9818 netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
9819 features &= ~NETIF_F_GRO_HW;
9823 /* LRO/HW-GRO features cannot be combined with RX-FCS */
9824 if (features & NETIF_F_RXFCS) {
9825 if (features & NETIF_F_LRO) {
9826 netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n");
9827 features &= ~NETIF_F_LRO;
9830 if (features & NETIF_F_GRO_HW) {
9831 netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n");
9832 features &= ~NETIF_F_GRO_HW;
9836 if ((features & NETIF_F_GRO_HW) && (features & NETIF_F_LRO)) {
9837 netdev_dbg(dev, "Dropping LRO feature since HW-GRO is requested.\n");
9838 features &= ~NETIF_F_LRO;
9841 if (features & NETIF_F_HW_TLS_TX) {
9842 bool ip_csum = (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) ==
9843 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
9844 bool hw_csum = features & NETIF_F_HW_CSUM;
9846 if (!ip_csum && !hw_csum) {
9847 netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
9848 features &= ~NETIF_F_HW_TLS_TX;
9852 if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) {
9853 netdev_dbg(dev, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n");
9854 features &= ~NETIF_F_HW_TLS_RX;
9860 int __netdev_update_features(struct net_device *dev)
9862 struct net_device *upper, *lower;
9863 netdev_features_t features;
9864 struct list_head *iter;
9869 features = netdev_get_wanted_features(dev);
9871 if (dev->netdev_ops->ndo_fix_features)
9872 features = dev->netdev_ops->ndo_fix_features(dev, features);
9874 /* driver might be less strict about feature dependencies */
9875 features = netdev_fix_features(dev, features);
9877 /* some features can't be enabled if they're off on an upper device */
9878 netdev_for_each_upper_dev_rcu(dev, upper, iter)
9879 features = netdev_sync_upper_features(dev, upper, features);
9881 if (dev->features == features)
9884 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
9885 &dev->features, &features);
9887 if (dev->netdev_ops->ndo_set_features)
9888 err = dev->netdev_ops->ndo_set_features(dev, features);
9892 if (unlikely(err < 0)) {
9894 "set_features() failed (%d); wanted %pNF, left %pNF\n",
9895 err, &features, &dev->features);
9896 /* return non-0 since some features might have changed and
9897 * it's better to fire a spurious notification than miss it
9903 /* some features must be disabled on lower devices when disabled
9904 * on an upper device (think: bonding master or bridge)
9906 netdev_for_each_lower_dev(dev, lower, iter)
9907 netdev_sync_lower_features(dev, lower, features);
9910 netdev_features_t diff = features ^ dev->features;
9912 if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) {
9913 /* udp_tunnel_{get,drop}_rx_info both need
9914 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
9915 * device, or they won't do anything.
9916 * Thus we need to update dev->features
9917 * *before* calling udp_tunnel_get_rx_info,
9918 * but *after* calling udp_tunnel_drop_rx_info.
9920 if (features & NETIF_F_RX_UDP_TUNNEL_PORT) {
9921 dev->features = features;
9922 udp_tunnel_get_rx_info(dev);
9924 udp_tunnel_drop_rx_info(dev);
9928 if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) {
9929 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
9930 dev->features = features;
9931 err |= vlan_get_rx_ctag_filter_info(dev);
9933 vlan_drop_rx_ctag_filter_info(dev);
9937 if (diff & NETIF_F_HW_VLAN_STAG_FILTER) {
9938 if (features & NETIF_F_HW_VLAN_STAG_FILTER) {
9939 dev->features = features;
9940 err |= vlan_get_rx_stag_filter_info(dev);
9942 vlan_drop_rx_stag_filter_info(dev);
9946 dev->features = features;
9949 return err < 0 ? 0 : 1;
9953 * netdev_update_features - recalculate device features
9954 * @dev: the device to check
9956 * Recalculate dev->features set and send notifications if it
9957 * has changed. Should be called after driver or hardware dependent
9958 * conditions might have changed that influence the features.
9960 void netdev_update_features(struct net_device *dev)
9962 if (__netdev_update_features(dev))
9963 netdev_features_change(dev);
9965 EXPORT_SYMBOL(netdev_update_features);
9968 * netdev_change_features - recalculate device features
9969 * @dev: the device to check
9971 * Recalculate dev->features set and send notifications even
9972 * if they have not changed. Should be called instead of
9973 * netdev_update_features() if also dev->vlan_features might
9974 * have changed to allow the changes to be propagated to stacked
9977 void netdev_change_features(struct net_device *dev)
9979 __netdev_update_features(dev);
9980 netdev_features_change(dev);
9982 EXPORT_SYMBOL(netdev_change_features);
9985 * netif_stacked_transfer_operstate - transfer operstate
9986 * @rootdev: the root or lower level device to transfer state from
9987 * @dev: the device to transfer operstate to
9989 * Transfer operational state from root to device. This is normally
9990 * called when a stacking relationship exists between the root
9991 * device and the device(a leaf device).
9993 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
9994 struct net_device *dev)
9996 if (rootdev->operstate == IF_OPER_DORMANT)
9997 netif_dormant_on(dev);
9999 netif_dormant_off(dev);
10001 if (rootdev->operstate == IF_OPER_TESTING)
10002 netif_testing_on(dev);
10004 netif_testing_off(dev);
10006 if (netif_carrier_ok(rootdev))
10007 netif_carrier_on(dev);
10009 netif_carrier_off(dev);
10011 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
10013 static int netif_alloc_rx_queues(struct net_device *dev)
10015 unsigned int i, count = dev->num_rx_queues;
10016 struct netdev_rx_queue *rx;
10017 size_t sz = count * sizeof(*rx);
10022 rx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
10028 for (i = 0; i < count; i++) {
10031 /* XDP RX-queue setup */
10032 err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i, 0);
10039 /* Rollback successful reg's and free other resources */
10041 xdp_rxq_info_unreg(&rx[i].xdp_rxq);
10047 static void netif_free_rx_queues(struct net_device *dev)
10049 unsigned int i, count = dev->num_rx_queues;
10051 /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
10055 for (i = 0; i < count; i++)
10056 xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq);
10061 static void netdev_init_one_queue(struct net_device *dev,
10062 struct netdev_queue *queue, void *_unused)
10064 /* Initialize queue lock */
10065 spin_lock_init(&queue->_xmit_lock);
10066 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
10067 queue->xmit_lock_owner = -1;
10068 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
10071 dql_init(&queue->dql, HZ);
10075 static void netif_free_tx_queues(struct net_device *dev)
10080 static int netif_alloc_netdev_queues(struct net_device *dev)
10082 unsigned int count = dev->num_tx_queues;
10083 struct netdev_queue *tx;
10084 size_t sz = count * sizeof(*tx);
10086 if (count < 1 || count > 0xffff)
10089 tx = kvzalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
10095 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
10096 spin_lock_init(&dev->tx_global_lock);
10101 void netif_tx_stop_all_queues(struct net_device *dev)
10105 for (i = 0; i < dev->num_tx_queues; i++) {
10106 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
10108 netif_tx_stop_queue(txq);
10111 EXPORT_SYMBOL(netif_tx_stop_all_queues);
10113 static int netdev_do_alloc_pcpu_stats(struct net_device *dev)
10117 /* Drivers implementing ndo_get_peer_dev must support tstat
10118 * accounting, so that skb_do_redirect() can bump the dev's
10119 * RX stats upon network namespace switch.
10121 if (dev->netdev_ops->ndo_get_peer_dev &&
10122 dev->pcpu_stat_type != NETDEV_PCPU_STAT_TSTATS)
10123 return -EOPNOTSUPP;
10125 switch (dev->pcpu_stat_type) {
10126 case NETDEV_PCPU_STAT_NONE:
10128 case NETDEV_PCPU_STAT_LSTATS:
10129 v = dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
10131 case NETDEV_PCPU_STAT_TSTATS:
10132 v = dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
10134 case NETDEV_PCPU_STAT_DSTATS:
10135 v = dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
10141 return v ? 0 : -ENOMEM;
10144 static void netdev_do_free_pcpu_stats(struct net_device *dev)
10146 switch (dev->pcpu_stat_type) {
10147 case NETDEV_PCPU_STAT_NONE:
10149 case NETDEV_PCPU_STAT_LSTATS:
10150 free_percpu(dev->lstats);
10152 case NETDEV_PCPU_STAT_TSTATS:
10153 free_percpu(dev->tstats);
10155 case NETDEV_PCPU_STAT_DSTATS:
10156 free_percpu(dev->dstats);
10162 * register_netdevice() - register a network device
10163 * @dev: device to register
10165 * Take a prepared network device structure and make it externally accessible.
10166 * A %NETDEV_REGISTER message is sent to the netdev notifier chain.
10167 * Callers must hold the rtnl lock - you may want register_netdev()
10170 int register_netdevice(struct net_device *dev)
10173 struct net *net = dev_net(dev);
10175 BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE <
10176 NETDEV_FEATURE_COUNT);
10177 BUG_ON(dev_boot_phase);
10182 /* When net_device's are persistent, this will be fatal. */
10183 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
10186 ret = ethtool_check_ops(dev->ethtool_ops);
10190 spin_lock_init(&dev->addr_list_lock);
10191 netdev_set_addr_lockdep_class(dev);
10193 ret = dev_get_valid_name(net, dev, dev->name);
10198 dev->name_node = netdev_name_node_head_alloc(dev);
10199 if (!dev->name_node)
10202 /* Init, if this function is available */
10203 if (dev->netdev_ops->ndo_init) {
10204 ret = dev->netdev_ops->ndo_init(dev);
10208 goto err_free_name;
10212 if (((dev->hw_features | dev->features) &
10213 NETIF_F_HW_VLAN_CTAG_FILTER) &&
10214 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
10215 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
10216 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
10221 ret = netdev_do_alloc_pcpu_stats(dev);
10225 ret = dev_index_reserve(net, dev->ifindex);
10227 goto err_free_pcpu;
10228 dev->ifindex = ret;
10230 /* Transfer changeable features to wanted_features and enable
10231 * software offloads (GSO and GRO).
10233 dev->hw_features |= (NETIF_F_SOFT_FEATURES | NETIF_F_SOFT_FEATURES_OFF);
10234 dev->features |= NETIF_F_SOFT_FEATURES;
10236 if (dev->udp_tunnel_nic_info) {
10237 dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
10238 dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT;
10241 dev->wanted_features = dev->features & dev->hw_features;
10243 if (!(dev->flags & IFF_LOOPBACK))
10244 dev->hw_features |= NETIF_F_NOCACHE_COPY;
10246 /* If IPv4 TCP segmentation offload is supported we should also
10247 * allow the device to enable segmenting the frame with the option
10248 * of ignoring a static IP ID value. This doesn't enable the
10249 * feature itself but allows the user to enable it later.
10251 if (dev->hw_features & NETIF_F_TSO)
10252 dev->hw_features |= NETIF_F_TSO_MANGLEID;
10253 if (dev->vlan_features & NETIF_F_TSO)
10254 dev->vlan_features |= NETIF_F_TSO_MANGLEID;
10255 if (dev->mpls_features & NETIF_F_TSO)
10256 dev->mpls_features |= NETIF_F_TSO_MANGLEID;
10257 if (dev->hw_enc_features & NETIF_F_TSO)
10258 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
10260 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
10262 dev->vlan_features |= NETIF_F_HIGHDMA;
10264 /* Make NETIF_F_SG inheritable to tunnel devices.
10266 dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
10268 /* Make NETIF_F_SG inheritable to MPLS.
10270 dev->mpls_features |= NETIF_F_SG;
10272 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
10273 ret = notifier_to_errno(ret);
10275 goto err_ifindex_release;
10277 ret = netdev_register_kobject(dev);
10279 WRITE_ONCE(dev->reg_state, ret ? NETREG_UNREGISTERED : NETREG_REGISTERED);
10282 goto err_uninit_notify;
10284 __netdev_update_features(dev);
10287 * Default initial state at registry is that the
10288 * device is present.
10291 set_bit(__LINK_STATE_PRESENT, &dev->state);
10293 linkwatch_init_dev(dev);
10295 dev_init_scheduler(dev);
10297 netdev_hold(dev, &dev->dev_registered_tracker, GFP_KERNEL);
10298 list_netdevice(dev);
10300 add_device_randomness(dev->dev_addr, dev->addr_len);
10302 /* If the device has permanent device address, driver should
10303 * set dev_addr and also addr_assign_type should be set to
10304 * NET_ADDR_PERM (default value).
10306 if (dev->addr_assign_type == NET_ADDR_PERM)
10307 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10309 /* Notify protocols, that a new device appeared. */
10310 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
10311 ret = notifier_to_errno(ret);
10313 /* Expect explicit free_netdev() on failure */
10314 dev->needs_free_netdev = false;
10315 unregister_netdevice_queue(dev, NULL);
10319 * Prevent userspace races by waiting until the network
10320 * device is fully setup before sending notifications.
10322 if (!dev->rtnl_link_ops ||
10323 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
10324 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL, 0, NULL);
10330 call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev);
10331 err_ifindex_release:
10332 dev_index_release(net, dev->ifindex);
10334 netdev_do_free_pcpu_stats(dev);
10336 if (dev->netdev_ops->ndo_uninit)
10337 dev->netdev_ops->ndo_uninit(dev);
10338 if (dev->priv_destructor)
10339 dev->priv_destructor(dev);
10341 netdev_name_node_free(dev->name_node);
10344 EXPORT_SYMBOL(register_netdevice);
10347 * init_dummy_netdev - init a dummy network device for NAPI
10348 * @dev: device to init
10350 * This takes a network device structure and initialize the minimum
10351 * amount of fields so it can be used to schedule NAPI polls without
10352 * registering a full blown interface. This is to be used by drivers
10353 * that need to tie several hardware interfaces to a single NAPI
10354 * poll scheduler due to HW limitations.
10356 void init_dummy_netdev(struct net_device *dev)
10358 /* Clear everything. Note we don't initialize spinlocks
10359 * are they aren't supposed to be taken by any of the
10360 * NAPI code and this dummy netdev is supposed to be
10361 * only ever used for NAPI polls
10363 memset(dev, 0, sizeof(struct net_device));
10365 /* make sure we BUG if trying to hit standard
10366 * register/unregister code path
10368 dev->reg_state = NETREG_DUMMY;
10370 /* NAPI wants this */
10371 INIT_LIST_HEAD(&dev->napi_list);
10373 /* a dummy interface is started by default */
10374 set_bit(__LINK_STATE_PRESENT, &dev->state);
10375 set_bit(__LINK_STATE_START, &dev->state);
10377 /* napi_busy_loop stats accounting wants this */
10378 dev_net_set(dev, &init_net);
10380 /* Note : We dont allocate pcpu_refcnt for dummy devices,
10381 * because users of this 'device' dont need to change
10385 EXPORT_SYMBOL_GPL(init_dummy_netdev);
10389 * register_netdev - register a network device
10390 * @dev: device to register
10392 * Take a completed network device structure and add it to the kernel
10393 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
10394 * chain. 0 is returned on success. A negative errno code is returned
10395 * on a failure to set up the device, or if the name is a duplicate.
10397 * This is a wrapper around register_netdevice that takes the rtnl semaphore
10398 * and expands the device name if you passed a format string to
10401 int register_netdev(struct net_device *dev)
10405 if (rtnl_lock_killable())
10407 err = register_netdevice(dev);
10411 EXPORT_SYMBOL(register_netdev);
10413 int netdev_refcnt_read(const struct net_device *dev)
10415 #ifdef CONFIG_PCPU_DEV_REFCNT
10418 for_each_possible_cpu(i)
10419 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
10422 return refcount_read(&dev->dev_refcnt);
10425 EXPORT_SYMBOL(netdev_refcnt_read);
10427 int netdev_unregister_timeout_secs __read_mostly = 10;
10429 #define WAIT_REFS_MIN_MSECS 1
10430 #define WAIT_REFS_MAX_MSECS 250
10432 * netdev_wait_allrefs_any - wait until all references are gone.
10433 * @list: list of net_devices to wait on
10435 * This is called when unregistering network devices.
10437 * Any protocol or device that holds a reference should register
10438 * for netdevice notification, and cleanup and put back the
10439 * reference if they receive an UNREGISTER event.
10440 * We can get stuck here if buggy protocols don't correctly
10443 static struct net_device *netdev_wait_allrefs_any(struct list_head *list)
10445 unsigned long rebroadcast_time, warning_time;
10446 struct net_device *dev;
10449 rebroadcast_time = warning_time = jiffies;
10451 list_for_each_entry(dev, list, todo_list)
10452 if (netdev_refcnt_read(dev) == 1)
10456 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
10459 /* Rebroadcast unregister notification */
10460 list_for_each_entry(dev, list, todo_list)
10461 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
10467 list_for_each_entry(dev, list, todo_list)
10468 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
10470 /* We must not have linkwatch events
10471 * pending on unregister. If this
10472 * happens, we simply run the queue
10473 * unscheduled, resulting in a noop
10476 linkwatch_run_queue();
10482 rebroadcast_time = jiffies;
10487 wait = WAIT_REFS_MIN_MSECS;
10490 wait = min(wait << 1, WAIT_REFS_MAX_MSECS);
10493 list_for_each_entry(dev, list, todo_list)
10494 if (netdev_refcnt_read(dev) == 1)
10497 if (time_after(jiffies, warning_time +
10498 READ_ONCE(netdev_unregister_timeout_secs) * HZ)) {
10499 list_for_each_entry(dev, list, todo_list) {
10500 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
10501 dev->name, netdev_refcnt_read(dev));
10502 ref_tracker_dir_print(&dev->refcnt_tracker, 10);
10505 warning_time = jiffies;
10510 /* The sequence is:
10514 * register_netdevice(x1);
10515 * register_netdevice(x2);
10517 * unregister_netdevice(y1);
10518 * unregister_netdevice(y2);
10524 * We are invoked by rtnl_unlock().
10525 * This allows us to deal with problems:
10526 * 1) We can delete sysfs objects which invoke hotplug
10527 * without deadlocking with linkwatch via keventd.
10528 * 2) Since we run with the RTNL semaphore not held, we can sleep
10529 * safely in order to wait for the netdev refcnt to drop to zero.
10531 * We must not return until all unregister events added during
10532 * the interval the lock was held have been completed.
10534 void netdev_run_todo(void)
10536 struct net_device *dev, *tmp;
10537 struct list_head list;
10539 #ifdef CONFIG_LOCKDEP
10540 struct list_head unlink_list;
10542 list_replace_init(&net_unlink_list, &unlink_list);
10544 while (!list_empty(&unlink_list)) {
10545 struct net_device *dev = list_first_entry(&unlink_list,
10548 list_del_init(&dev->unlink_list);
10549 dev->nested_level = dev->lower_level - 1;
10553 /* Snapshot list, allow later requests */
10554 list_replace_init(&net_todo_list, &list);
10558 /* Wait for rcu callbacks to finish before next phase */
10559 if (!list_empty(&list))
10562 list_for_each_entry_safe(dev, tmp, &list, todo_list) {
10563 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
10564 netdev_WARN(dev, "run_todo but not unregistering\n");
10565 list_del(&dev->todo_list);
10569 WRITE_ONCE(dev->reg_state, NETREG_UNREGISTERED);
10570 linkwatch_sync_dev(dev);
10574 while (!list_empty(&list)) {
10575 dev = netdev_wait_allrefs_any(&list);
10576 list_del(&dev->todo_list);
10579 BUG_ON(netdev_refcnt_read(dev) != 1);
10580 BUG_ON(!list_empty(&dev->ptype_all));
10581 BUG_ON(!list_empty(&dev->ptype_specific));
10582 WARN_ON(rcu_access_pointer(dev->ip_ptr));
10583 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
10585 netdev_do_free_pcpu_stats(dev);
10586 if (dev->priv_destructor)
10587 dev->priv_destructor(dev);
10588 if (dev->needs_free_netdev)
10593 /* Free network device */
10594 kobject_put(&dev->dev.kobj);
10596 if (cnt && atomic_sub_and_test(cnt, &dev_unreg_count))
10597 wake_up(&netdev_unregistering_wq);
10600 /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
10601 * all the same fields in the same order as net_device_stats, with only
10602 * the type differing, but rtnl_link_stats64 may have additional fields
10603 * at the end for newer counters.
10605 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
10606 const struct net_device_stats *netdev_stats)
10608 size_t i, n = sizeof(*netdev_stats) / sizeof(atomic_long_t);
10609 const atomic_long_t *src = (atomic_long_t *)netdev_stats;
10610 u64 *dst = (u64 *)stats64;
10612 BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
10613 for (i = 0; i < n; i++)
10614 dst[i] = (unsigned long)atomic_long_read(&src[i]);
10615 /* zero out counters that only exist in rtnl_link_stats64 */
10616 memset((char *)stats64 + n * sizeof(u64), 0,
10617 sizeof(*stats64) - n * sizeof(u64));
10619 EXPORT_SYMBOL(netdev_stats_to_stats64);
10621 static __cold struct net_device_core_stats __percpu *netdev_core_stats_alloc(
10622 struct net_device *dev)
10624 struct net_device_core_stats __percpu *p;
10626 p = alloc_percpu_gfp(struct net_device_core_stats,
10627 GFP_ATOMIC | __GFP_NOWARN);
10629 if (p && cmpxchg(&dev->core_stats, NULL, p))
10632 /* This READ_ONCE() pairs with the cmpxchg() above */
10633 return READ_ONCE(dev->core_stats);
10636 noinline void netdev_core_stats_inc(struct net_device *dev, u32 offset)
10638 /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */
10639 struct net_device_core_stats __percpu *p = READ_ONCE(dev->core_stats);
10640 unsigned long __percpu *field;
10642 if (unlikely(!p)) {
10643 p = netdev_core_stats_alloc(dev);
10648 field = (__force unsigned long __percpu *)((__force void *)p + offset);
10649 this_cpu_inc(*field);
10651 EXPORT_SYMBOL_GPL(netdev_core_stats_inc);
10654 * dev_get_stats - get network device statistics
10655 * @dev: device to get statistics from
10656 * @storage: place to store stats
10658 * Get network statistics from device. Return @storage.
10659 * The device driver may provide its own method by setting
10660 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
10661 * otherwise the internal statistics structure is used.
10663 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
10664 struct rtnl_link_stats64 *storage)
10666 const struct net_device_ops *ops = dev->netdev_ops;
10667 const struct net_device_core_stats __percpu *p;
10669 if (ops->ndo_get_stats64) {
10670 memset(storage, 0, sizeof(*storage));
10671 ops->ndo_get_stats64(dev, storage);
10672 } else if (ops->ndo_get_stats) {
10673 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
10674 } else if (dev->pcpu_stat_type == NETDEV_PCPU_STAT_TSTATS) {
10675 dev_get_tstats64(dev, storage);
10677 netdev_stats_to_stats64(storage, &dev->stats);
10680 /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */
10681 p = READ_ONCE(dev->core_stats);
10683 const struct net_device_core_stats *core_stats;
10686 for_each_possible_cpu(i) {
10687 core_stats = per_cpu_ptr(p, i);
10688 storage->rx_dropped += READ_ONCE(core_stats->rx_dropped);
10689 storage->tx_dropped += READ_ONCE(core_stats->tx_dropped);
10690 storage->rx_nohandler += READ_ONCE(core_stats->rx_nohandler);
10691 storage->rx_otherhost_dropped += READ_ONCE(core_stats->rx_otherhost_dropped);
10696 EXPORT_SYMBOL(dev_get_stats);
10699 * dev_fetch_sw_netstats - get per-cpu network device statistics
10700 * @s: place to store stats
10701 * @netstats: per-cpu network stats to read from
10703 * Read per-cpu network statistics and populate the related fields in @s.
10705 void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
10706 const struct pcpu_sw_netstats __percpu *netstats)
10710 for_each_possible_cpu(cpu) {
10711 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
10712 const struct pcpu_sw_netstats *stats;
10713 unsigned int start;
10715 stats = per_cpu_ptr(netstats, cpu);
10717 start = u64_stats_fetch_begin(&stats->syncp);
10718 rx_packets = u64_stats_read(&stats->rx_packets);
10719 rx_bytes = u64_stats_read(&stats->rx_bytes);
10720 tx_packets = u64_stats_read(&stats->tx_packets);
10721 tx_bytes = u64_stats_read(&stats->tx_bytes);
10722 } while (u64_stats_fetch_retry(&stats->syncp, start));
10724 s->rx_packets += rx_packets;
10725 s->rx_bytes += rx_bytes;
10726 s->tx_packets += tx_packets;
10727 s->tx_bytes += tx_bytes;
10730 EXPORT_SYMBOL_GPL(dev_fetch_sw_netstats);
10733 * dev_get_tstats64 - ndo_get_stats64 implementation
10734 * @dev: device to get statistics from
10735 * @s: place to store stats
10737 * Populate @s from dev->stats and dev->tstats. Can be used as
10738 * ndo_get_stats64() callback.
10740 void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s)
10742 netdev_stats_to_stats64(s, &dev->stats);
10743 dev_fetch_sw_netstats(s, dev->tstats);
10745 EXPORT_SYMBOL_GPL(dev_get_tstats64);
10747 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
10749 struct netdev_queue *queue = dev_ingress_queue(dev);
10751 #ifdef CONFIG_NET_CLS_ACT
10754 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
10757 netdev_init_one_queue(dev, queue, NULL);
10758 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
10759 RCU_INIT_POINTER(queue->qdisc_sleeping, &noop_qdisc);
10760 rcu_assign_pointer(dev->ingress_queue, queue);
10765 static const struct ethtool_ops default_ethtool_ops;
10767 void netdev_set_default_ethtool_ops(struct net_device *dev,
10768 const struct ethtool_ops *ops)
10770 if (dev->ethtool_ops == &default_ethtool_ops)
10771 dev->ethtool_ops = ops;
10773 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
10776 * netdev_sw_irq_coalesce_default_on() - enable SW IRQ coalescing by default
10777 * @dev: netdev to enable the IRQ coalescing on
10779 * Sets a conservative default for SW IRQ coalescing. Users can use
10780 * sysfs attributes to override the default values.
10782 void netdev_sw_irq_coalesce_default_on(struct net_device *dev)
10784 WARN_ON(dev->reg_state == NETREG_REGISTERED);
10786 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
10787 dev->gro_flush_timeout = 20000;
10788 dev->napi_defer_hard_irqs = 1;
10791 EXPORT_SYMBOL_GPL(netdev_sw_irq_coalesce_default_on);
10793 void netdev_freemem(struct net_device *dev)
10795 char *addr = (char *)dev - dev->padded;
10801 * alloc_netdev_mqs - allocate network device
10802 * @sizeof_priv: size of private data to allocate space for
10803 * @name: device name format string
10804 * @name_assign_type: origin of device name
10805 * @setup: callback to initialize device
10806 * @txqs: the number of TX subqueues to allocate
10807 * @rxqs: the number of RX subqueues to allocate
10809 * Allocates a struct net_device with private data area for driver use
10810 * and performs basic initialization. Also allocates subqueue structs
10811 * for each queue on the device.
10813 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
10814 unsigned char name_assign_type,
10815 void (*setup)(struct net_device *),
10816 unsigned int txqs, unsigned int rxqs)
10818 struct net_device *dev;
10819 unsigned int alloc_size;
10820 struct net_device *p;
10822 BUG_ON(strlen(name) >= sizeof(dev->name));
10825 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
10830 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
10834 alloc_size = sizeof(struct net_device);
10836 /* ensure 32-byte alignment of private area */
10837 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
10838 alloc_size += sizeof_priv;
10840 /* ensure 32-byte alignment of whole construct */
10841 alloc_size += NETDEV_ALIGN - 1;
10843 p = kvzalloc(alloc_size, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
10847 dev = PTR_ALIGN(p, NETDEV_ALIGN);
10848 dev->padded = (char *)dev - (char *)p;
10850 ref_tracker_dir_init(&dev->refcnt_tracker, 128, name);
10851 #ifdef CONFIG_PCPU_DEV_REFCNT
10852 dev->pcpu_refcnt = alloc_percpu(int);
10853 if (!dev->pcpu_refcnt)
10857 refcount_set(&dev->dev_refcnt, 1);
10860 if (dev_addr_init(dev))
10866 dev_net_set(dev, &init_net);
10868 dev->gso_max_size = GSO_LEGACY_MAX_SIZE;
10869 dev->xdp_zc_max_segs = 1;
10870 dev->gso_max_segs = GSO_MAX_SEGS;
10871 dev->gro_max_size = GRO_LEGACY_MAX_SIZE;
10872 dev->gso_ipv4_max_size = GSO_LEGACY_MAX_SIZE;
10873 dev->gro_ipv4_max_size = GRO_LEGACY_MAX_SIZE;
10874 dev->tso_max_size = TSO_LEGACY_MAX_SIZE;
10875 dev->tso_max_segs = TSO_MAX_SEGS;
10876 dev->upper_level = 1;
10877 dev->lower_level = 1;
10878 #ifdef CONFIG_LOCKDEP
10879 dev->nested_level = 0;
10880 INIT_LIST_HEAD(&dev->unlink_list);
10883 INIT_LIST_HEAD(&dev->napi_list);
10884 INIT_LIST_HEAD(&dev->unreg_list);
10885 INIT_LIST_HEAD(&dev->close_list);
10886 INIT_LIST_HEAD(&dev->link_watch_list);
10887 INIT_LIST_HEAD(&dev->adj_list.upper);
10888 INIT_LIST_HEAD(&dev->adj_list.lower);
10889 INIT_LIST_HEAD(&dev->ptype_all);
10890 INIT_LIST_HEAD(&dev->ptype_specific);
10891 INIT_LIST_HEAD(&dev->net_notifier_list);
10892 #ifdef CONFIG_NET_SCHED
10893 hash_init(dev->qdisc_hash);
10895 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
10898 if (!dev->tx_queue_len) {
10899 dev->priv_flags |= IFF_NO_QUEUE;
10900 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
10903 dev->num_tx_queues = txqs;
10904 dev->real_num_tx_queues = txqs;
10905 if (netif_alloc_netdev_queues(dev))
10908 dev->num_rx_queues = rxqs;
10909 dev->real_num_rx_queues = rxqs;
10910 if (netif_alloc_rx_queues(dev))
10913 strcpy(dev->name, name);
10914 dev->name_assign_type = name_assign_type;
10915 dev->group = INIT_NETDEV_GROUP;
10916 if (!dev->ethtool_ops)
10917 dev->ethtool_ops = &default_ethtool_ops;
10919 nf_hook_netdev_init(dev);
10928 #ifdef CONFIG_PCPU_DEV_REFCNT
10929 free_percpu(dev->pcpu_refcnt);
10932 netdev_freemem(dev);
10935 EXPORT_SYMBOL(alloc_netdev_mqs);
10938 * free_netdev - free network device
10941 * This function does the last stage of destroying an allocated device
10942 * interface. The reference to the device object is released. If this
10943 * is the last reference then it will be freed.Must be called in process
10946 void free_netdev(struct net_device *dev)
10948 struct napi_struct *p, *n;
10952 /* When called immediately after register_netdevice() failed the unwind
10953 * handling may still be dismantling the device. Handle that case by
10954 * deferring the free.
10956 if (dev->reg_state == NETREG_UNREGISTERING) {
10958 dev->needs_free_netdev = true;
10962 netif_free_tx_queues(dev);
10963 netif_free_rx_queues(dev);
10965 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
10967 /* Flush device addresses */
10968 dev_addr_flush(dev);
10970 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
10973 ref_tracker_dir_exit(&dev->refcnt_tracker);
10974 #ifdef CONFIG_PCPU_DEV_REFCNT
10975 free_percpu(dev->pcpu_refcnt);
10976 dev->pcpu_refcnt = NULL;
10978 free_percpu(dev->core_stats);
10979 dev->core_stats = NULL;
10980 free_percpu(dev->xdp_bulkq);
10981 dev->xdp_bulkq = NULL;
10983 /* Compatibility with error handling in drivers */
10984 if (dev->reg_state == NETREG_UNINITIALIZED) {
10985 netdev_freemem(dev);
10989 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
10990 WRITE_ONCE(dev->reg_state, NETREG_RELEASED);
10992 /* will free via device release */
10993 put_device(&dev->dev);
10995 EXPORT_SYMBOL(free_netdev);
10998 * synchronize_net - Synchronize with packet receive processing
11000 * Wait for packets currently being received to be done.
11001 * Does not block later packets from starting.
11003 void synchronize_net(void)
11006 if (rtnl_is_locked())
11007 synchronize_rcu_expedited();
11011 EXPORT_SYMBOL(synchronize_net);
11014 * unregister_netdevice_queue - remove device from the kernel
11018 * This function shuts down a device interface and removes it
11019 * from the kernel tables.
11020 * If head not NULL, device is queued to be unregistered later.
11022 * Callers must hold the rtnl semaphore. You may want
11023 * unregister_netdev() instead of this.
11026 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
11031 list_move_tail(&dev->unreg_list, head);
11035 list_add(&dev->unreg_list, &single);
11036 unregister_netdevice_many(&single);
11039 EXPORT_SYMBOL(unregister_netdevice_queue);
11041 void unregister_netdevice_many_notify(struct list_head *head,
11042 u32 portid, const struct nlmsghdr *nlh)
11044 struct net_device *dev, *tmp;
11045 LIST_HEAD(close_head);
11048 BUG_ON(dev_boot_phase);
11051 if (list_empty(head))
11054 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
11055 /* Some devices call without registering
11056 * for initialization unwind. Remove those
11057 * devices and proceed with the remaining.
11059 if (dev->reg_state == NETREG_UNINITIALIZED) {
11060 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
11064 list_del(&dev->unreg_list);
11067 dev->dismantle = true;
11068 BUG_ON(dev->reg_state != NETREG_REGISTERED);
11071 /* If device is running, close it first. */
11072 list_for_each_entry(dev, head, unreg_list)
11073 list_add_tail(&dev->close_list, &close_head);
11074 dev_close_many(&close_head, true);
11076 list_for_each_entry(dev, head, unreg_list) {
11077 /* And unlink it from device chain. */
11078 unlist_netdevice(dev);
11079 WRITE_ONCE(dev->reg_state, NETREG_UNREGISTERING);
11081 flush_all_backlogs();
11085 list_for_each_entry(dev, head, unreg_list) {
11086 struct sk_buff *skb = NULL;
11088 /* Shutdown queueing discipline. */
11090 dev_tcx_uninstall(dev);
11091 dev_xdp_uninstall(dev);
11092 bpf_dev_bound_netdev_unregister(dev);
11094 netdev_offload_xstats_disable_all(dev);
11096 /* Notify protocols, that we are about to destroy
11097 * this device. They should clean all the things.
11099 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
11101 if (!dev->rtnl_link_ops ||
11102 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
11103 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
11104 GFP_KERNEL, NULL, 0,
11108 * Flush the unicast and multicast chains
11113 netdev_name_node_alt_flush(dev);
11114 netdev_name_node_free(dev->name_node);
11116 call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev);
11118 if (dev->netdev_ops->ndo_uninit)
11119 dev->netdev_ops->ndo_uninit(dev);
11122 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL, portid, nlh);
11124 /* Notifier chain MUST detach us all upper devices. */
11125 WARN_ON(netdev_has_any_upper_dev(dev));
11126 WARN_ON(netdev_has_any_lower_dev(dev));
11128 /* Remove entries from kobject tree */
11129 netdev_unregister_kobject(dev);
11131 /* Remove XPS queueing entries */
11132 netif_reset_xps_queues_gt(dev, 0);
11138 list_for_each_entry(dev, head, unreg_list) {
11139 netdev_put(dev, &dev->dev_registered_tracker);
11143 atomic_add(cnt, &dev_unreg_count);
11149 * unregister_netdevice_many - unregister many devices
11150 * @head: list of devices
11152 * Note: As most callers use a stack allocated list_head,
11153 * we force a list_del() to make sure stack wont be corrupted later.
11155 void unregister_netdevice_many(struct list_head *head)
11157 unregister_netdevice_many_notify(head, 0, NULL);
11159 EXPORT_SYMBOL(unregister_netdevice_many);
11162 * unregister_netdev - remove device from the kernel
11165 * This function shuts down a device interface and removes it
11166 * from the kernel tables.
11168 * This is just a wrapper for unregister_netdevice that takes
11169 * the rtnl semaphore. In general you want to use this and not
11170 * unregister_netdevice.
11172 void unregister_netdev(struct net_device *dev)
11175 unregister_netdevice(dev);
11178 EXPORT_SYMBOL(unregister_netdev);
11181 * __dev_change_net_namespace - move device to different nethost namespace
11183 * @net: network namespace
11184 * @pat: If not NULL name pattern to try if the current device name
11185 * is already taken in the destination network namespace.
11186 * @new_ifindex: If not zero, specifies device index in the target
11189 * This function shuts down a device interface and moves it
11190 * to a new network namespace. On success 0 is returned, on
11191 * a failure a netagive errno code is returned.
11193 * Callers must hold the rtnl semaphore.
11196 int __dev_change_net_namespace(struct net_device *dev, struct net *net,
11197 const char *pat, int new_ifindex)
11199 struct netdev_name_node *name_node;
11200 struct net *net_old = dev_net(dev);
11201 char new_name[IFNAMSIZ] = {};
11206 /* Don't allow namespace local devices to be moved. */
11208 if (dev->features & NETIF_F_NETNS_LOCAL)
11211 /* Ensure the device has been registrered */
11212 if (dev->reg_state != NETREG_REGISTERED)
11215 /* Get out if there is nothing todo */
11217 if (net_eq(net_old, net))
11220 /* Pick the destination device name, and ensure
11221 * we can use it in the destination network namespace.
11224 if (netdev_name_in_use(net, dev->name)) {
11225 /* We get here if we can't use the current device name */
11228 err = dev_prep_valid_name(net, dev, pat, new_name, EEXIST);
11232 /* Check that none of the altnames conflicts. */
11234 netdev_for_each_altname(dev, name_node)
11235 if (netdev_name_in_use(net, name_node->name))
11238 /* Check that new_ifindex isn't used yet. */
11240 err = dev_index_reserve(net, new_ifindex);
11244 /* If there is an ifindex conflict assign a new one */
11245 err = dev_index_reserve(net, dev->ifindex);
11247 err = dev_index_reserve(net, 0);
11254 * And now a mini version of register_netdevice unregister_netdevice.
11257 /* If device is running close it first. */
11260 /* And unlink it from device chain */
11261 unlist_netdevice(dev);
11265 /* Shutdown queueing discipline. */
11268 /* Notify protocols, that we are about to destroy
11269 * this device. They should clean all the things.
11271 * Note that dev->reg_state stays at NETREG_REGISTERED.
11272 * This is wanted because this way 8021q and macvlan know
11273 * the device is just moving and can keep their slaves up.
11275 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
11278 new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL);
11280 rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid,
11284 * Flush the unicast and multicast chains
11289 /* Send a netdev-removed uevent to the old namespace */
11290 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
11291 netdev_adjacent_del_links(dev);
11293 /* Move per-net netdevice notifiers that are following the netdevice */
11294 move_netdevice_notifiers_dev_net(dev, net);
11296 /* Actually switch the network namespace */
11297 dev_net_set(dev, net);
11298 dev->ifindex = new_ifindex;
11300 if (new_name[0]) /* Rename the netdev to prepared name */
11301 strscpy(dev->name, new_name, IFNAMSIZ);
11303 /* Fixup kobjects */
11304 dev_set_uevent_suppress(&dev->dev, 1);
11305 err = device_rename(&dev->dev, dev->name);
11306 dev_set_uevent_suppress(&dev->dev, 0);
11309 /* Send a netdev-add uevent to the new namespace */
11310 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
11311 netdev_adjacent_add_links(dev);
11313 /* Adapt owner in case owning user namespace of target network
11314 * namespace is different from the original one.
11316 err = netdev_change_owner(dev, net_old, net);
11319 /* Add the device back in the hashes */
11320 list_netdevice(dev);
11322 /* Notify protocols, that a new device appeared. */
11323 call_netdevice_notifiers(NETDEV_REGISTER, dev);
11326 * Prevent userspace races by waiting until the network
11327 * device is fully setup before sending notifications.
11329 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL, 0, NULL);
11336 EXPORT_SYMBOL_GPL(__dev_change_net_namespace);
11338 static int dev_cpu_dead(unsigned int oldcpu)
11340 struct sk_buff **list_skb;
11341 struct sk_buff *skb;
11343 struct softnet_data *sd, *oldsd, *remsd = NULL;
11345 local_irq_disable();
11346 cpu = smp_processor_id();
11347 sd = &per_cpu(softnet_data, cpu);
11348 oldsd = &per_cpu(softnet_data, oldcpu);
11350 /* Find end of our completion_queue. */
11351 list_skb = &sd->completion_queue;
11353 list_skb = &(*list_skb)->next;
11354 /* Append completion queue from offline CPU. */
11355 *list_skb = oldsd->completion_queue;
11356 oldsd->completion_queue = NULL;
11358 /* Append output queue from offline CPU. */
11359 if (oldsd->output_queue) {
11360 *sd->output_queue_tailp = oldsd->output_queue;
11361 sd->output_queue_tailp = oldsd->output_queue_tailp;
11362 oldsd->output_queue = NULL;
11363 oldsd->output_queue_tailp = &oldsd->output_queue;
11365 /* Append NAPI poll list from offline CPU, with one exception :
11366 * process_backlog() must be called by cpu owning percpu backlog.
11367 * We properly handle process_queue & input_pkt_queue later.
11369 while (!list_empty(&oldsd->poll_list)) {
11370 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
11371 struct napi_struct,
11374 list_del_init(&napi->poll_list);
11375 if (napi->poll == process_backlog)
11378 ____napi_schedule(sd, napi);
11381 raise_softirq_irqoff(NET_TX_SOFTIRQ);
11382 local_irq_enable();
11385 remsd = oldsd->rps_ipi_list;
11386 oldsd->rps_ipi_list = NULL;
11388 /* send out pending IPI's on offline CPU */
11389 net_rps_send_ipi(remsd);
11391 /* Process offline CPU's input_pkt_queue */
11392 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
11394 input_queue_head_incr(oldsd);
11396 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
11398 input_queue_head_incr(oldsd);
11405 * netdev_increment_features - increment feature set by one
11406 * @all: current feature set
11407 * @one: new feature set
11408 * @mask: mask feature set
11410 * Computes a new feature set after adding a device with feature set
11411 * @one to the master device with current feature set @all. Will not
11412 * enable anything that is off in @mask. Returns the new feature set.
11414 netdev_features_t netdev_increment_features(netdev_features_t all,
11415 netdev_features_t one, netdev_features_t mask)
11417 if (mask & NETIF_F_HW_CSUM)
11418 mask |= NETIF_F_CSUM_MASK;
11419 mask |= NETIF_F_VLAN_CHALLENGED;
11421 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
11422 all &= one | ~NETIF_F_ALL_FOR_ALL;
11424 /* If one device supports hw checksumming, set for all. */
11425 if (all & NETIF_F_HW_CSUM)
11426 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
11430 EXPORT_SYMBOL(netdev_increment_features);
11432 static struct hlist_head * __net_init netdev_create_hash(void)
11435 struct hlist_head *hash;
11437 hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL);
11439 for (i = 0; i < NETDEV_HASHENTRIES; i++)
11440 INIT_HLIST_HEAD(&hash[i]);
11445 /* Initialize per network namespace state */
11446 static int __net_init netdev_init(struct net *net)
11448 BUILD_BUG_ON(GRO_HASH_BUCKETS >
11449 8 * sizeof_field(struct napi_struct, gro_bitmask));
11451 INIT_LIST_HEAD(&net->dev_base_head);
11453 net->dev_name_head = netdev_create_hash();
11454 if (net->dev_name_head == NULL)
11457 net->dev_index_head = netdev_create_hash();
11458 if (net->dev_index_head == NULL)
11461 xa_init_flags(&net->dev_by_index, XA_FLAGS_ALLOC1);
11463 RAW_INIT_NOTIFIER_HEAD(&net->netdev_chain);
11468 kfree(net->dev_name_head);
11474 * netdev_drivername - network driver for the device
11475 * @dev: network device
11477 * Determine network driver for device.
11479 const char *netdev_drivername(const struct net_device *dev)
11481 const struct device_driver *driver;
11482 const struct device *parent;
11483 const char *empty = "";
11485 parent = dev->dev.parent;
11489 driver = parent->driver;
11490 if (driver && driver->name)
11491 return driver->name;
11495 static void __netdev_printk(const char *level, const struct net_device *dev,
11496 struct va_format *vaf)
11498 if (dev && dev->dev.parent) {
11499 dev_printk_emit(level[1] - '0',
11502 dev_driver_string(dev->dev.parent),
11503 dev_name(dev->dev.parent),
11504 netdev_name(dev), netdev_reg_state(dev),
11507 printk("%s%s%s: %pV",
11508 level, netdev_name(dev), netdev_reg_state(dev), vaf);
11510 printk("%s(NULL net_device): %pV", level, vaf);
11514 void netdev_printk(const char *level, const struct net_device *dev,
11515 const char *format, ...)
11517 struct va_format vaf;
11520 va_start(args, format);
11525 __netdev_printk(level, dev, &vaf);
11529 EXPORT_SYMBOL(netdev_printk);
11531 #define define_netdev_printk_level(func, level) \
11532 void func(const struct net_device *dev, const char *fmt, ...) \
11534 struct va_format vaf; \
11537 va_start(args, fmt); \
11542 __netdev_printk(level, dev, &vaf); \
11546 EXPORT_SYMBOL(func);
11548 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
11549 define_netdev_printk_level(netdev_alert, KERN_ALERT);
11550 define_netdev_printk_level(netdev_crit, KERN_CRIT);
11551 define_netdev_printk_level(netdev_err, KERN_ERR);
11552 define_netdev_printk_level(netdev_warn, KERN_WARNING);
11553 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
11554 define_netdev_printk_level(netdev_info, KERN_INFO);
11556 static void __net_exit netdev_exit(struct net *net)
11558 kfree(net->dev_name_head);
11559 kfree(net->dev_index_head);
11560 xa_destroy(&net->dev_by_index);
11561 if (net != &init_net)
11562 WARN_ON_ONCE(!list_empty(&net->dev_base_head));
11565 static struct pernet_operations __net_initdata netdev_net_ops = {
11566 .init = netdev_init,
11567 .exit = netdev_exit,
11570 static void __net_exit default_device_exit_net(struct net *net)
11572 struct netdev_name_node *name_node, *tmp;
11573 struct net_device *dev, *aux;
11575 * Push all migratable network devices back to the
11576 * initial network namespace
11579 for_each_netdev_safe(net, dev, aux) {
11581 char fb_name[IFNAMSIZ];
11583 /* Ignore unmoveable devices (i.e. loopback) */
11584 if (dev->features & NETIF_F_NETNS_LOCAL)
11587 /* Leave virtual devices for the generic cleanup */
11588 if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund)
11591 /* Push remaining network devices to init_net */
11592 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
11593 if (netdev_name_in_use(&init_net, fb_name))
11594 snprintf(fb_name, IFNAMSIZ, "dev%%d");
11596 netdev_for_each_altname_safe(dev, name_node, tmp)
11597 if (netdev_name_in_use(&init_net, name_node->name))
11598 __netdev_name_node_alt_destroy(name_node);
11600 err = dev_change_net_namespace(dev, &init_net, fb_name);
11602 pr_emerg("%s: failed to move %s to init_net: %d\n",
11603 __func__, dev->name, err);
11609 static void __net_exit default_device_exit_batch(struct list_head *net_list)
11611 /* At exit all network devices most be removed from a network
11612 * namespace. Do this in the reverse order of registration.
11613 * Do this across as many network namespaces as possible to
11614 * improve batching efficiency.
11616 struct net_device *dev;
11618 LIST_HEAD(dev_kill_list);
11621 list_for_each_entry(net, net_list, exit_list) {
11622 default_device_exit_net(net);
11626 list_for_each_entry(net, net_list, exit_list) {
11627 for_each_netdev_reverse(net, dev) {
11628 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
11629 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
11631 unregister_netdevice_queue(dev, &dev_kill_list);
11634 unregister_netdevice_many(&dev_kill_list);
11638 static struct pernet_operations __net_initdata default_device_ops = {
11639 .exit_batch = default_device_exit_batch,
11642 static void __init net_dev_struct_check(void)
11644 /* TX read-mostly hotpath */
11645 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, priv_flags);
11646 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, netdev_ops);
11647 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, header_ops);
11648 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, _tx);
11649 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, real_num_tx_queues);
11650 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, gso_max_size);
11651 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, gso_ipv4_max_size);
11652 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, gso_max_segs);
11653 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, gso_partial_features);
11654 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, num_tc);
11655 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, mtu);
11656 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, needed_headroom);
11657 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, tc_to_txq);
11659 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, xps_maps);
11661 #ifdef CONFIG_NETFILTER_EGRESS
11662 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, nf_hooks_egress);
11664 #ifdef CONFIG_NET_XGRESS
11665 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, tcx_egress);
11667 CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_tx, 160);
11669 /* TXRX read-mostly hotpath */
11670 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, lstats);
11671 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, state);
11672 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, flags);
11673 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, hard_header_len);
11674 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, features);
11675 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, ip6_ptr);
11676 CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_txrx, 46);
11678 /* RX read-mostly hotpath */
11679 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, ptype_specific);
11680 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, ifindex);
11681 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, real_num_rx_queues);
11682 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, _rx);
11683 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, gro_flush_timeout);
11684 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, napi_defer_hard_irqs);
11685 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, gro_max_size);
11686 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, gro_ipv4_max_size);
11687 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, rx_handler);
11688 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, rx_handler_data);
11689 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, nd_net);
11690 #ifdef CONFIG_NETPOLL
11691 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, npinfo);
11693 #ifdef CONFIG_NET_XGRESS
11694 CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, tcx_ingress);
11696 CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_rx, 104);
11700 * Initialize the DEV module. At boot time this walks the device list and
11701 * unhooks any devices that fail to initialise (normally hardware not
11702 * present) and leaves us with a valid list of present and active devices.
11706 /* We allocate 256 pages for each CPU if PAGE_SHIFT is 12 */
11707 #define SYSTEM_PERCPU_PAGE_POOL_SIZE ((1 << 20) / PAGE_SIZE)
11709 static int net_page_pool_create(int cpuid)
11711 #if IS_ENABLED(CONFIG_PAGE_POOL)
11712 struct page_pool_params page_pool_params = {
11713 .pool_size = SYSTEM_PERCPU_PAGE_POOL_SIZE,
11714 .flags = PP_FLAG_SYSTEM_POOL,
11715 .nid = NUMA_NO_NODE,
11717 struct page_pool *pp_ptr;
11719 pp_ptr = page_pool_create_percpu(&page_pool_params, cpuid);
11720 if (IS_ERR(pp_ptr))
11723 per_cpu(system_page_pool, cpuid) = pp_ptr;
11729 * This is called single threaded during boot, so no need
11730 * to take the rtnl semaphore.
11732 static int __init net_dev_init(void)
11734 int i, rc = -ENOMEM;
11736 BUG_ON(!dev_boot_phase);
11738 net_dev_struct_check();
11740 if (dev_proc_init())
11743 if (netdev_kobject_init())
11746 for (i = 0; i < PTYPE_HASH_SIZE; i++)
11747 INIT_LIST_HEAD(&ptype_base[i]);
11749 if (register_pernet_subsys(&netdev_net_ops))
11753 * Initialise the packet receive queues.
11756 for_each_possible_cpu(i) {
11757 struct work_struct *flush = per_cpu_ptr(&flush_works, i);
11758 struct softnet_data *sd = &per_cpu(softnet_data, i);
11760 INIT_WORK(flush, flush_backlog);
11762 skb_queue_head_init(&sd->input_pkt_queue);
11763 skb_queue_head_init(&sd->process_queue);
11764 #ifdef CONFIG_XFRM_OFFLOAD
11765 skb_queue_head_init(&sd->xfrm_backlog);
11767 INIT_LIST_HEAD(&sd->poll_list);
11768 sd->output_queue_tailp = &sd->output_queue;
11770 INIT_CSD(&sd->csd, rps_trigger_softirq, sd);
11773 INIT_CSD(&sd->defer_csd, trigger_rx_softirq, sd);
11774 spin_lock_init(&sd->defer_lock);
11776 init_gro_hash(&sd->backlog);
11777 sd->backlog.poll = process_backlog;
11778 sd->backlog.weight = weight_p;
11780 if (net_page_pool_create(i))
11784 dev_boot_phase = 0;
11786 /* The loopback device is special if any other network devices
11787 * is present in a network namespace the loopback device must
11788 * be present. Since we now dynamically allocate and free the
11789 * loopback device ensure this invariant is maintained by
11790 * keeping the loopback device as the first device on the
11791 * list of network devices. Ensuring the loopback devices
11792 * is the first device that appears and the last network device
11795 if (register_pernet_device(&loopback_net_ops))
11798 if (register_pernet_device(&default_device_ops))
11801 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
11802 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
11804 rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
11805 NULL, dev_cpu_dead);
11810 for_each_possible_cpu(i) {
11811 struct page_pool *pp_ptr;
11813 pp_ptr = per_cpu(system_page_pool, i);
11817 page_pool_destroy(pp_ptr);
11818 per_cpu(system_page_pool, i) = NULL;
11825 subsys_initcall(net_dev_init);