net: dsa: bcm_sf2: Fill in BCM4908 CFP entries
[linux-block.git] / net / core / dev.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4 2/*
722c9a0c 3 * NET3 Protocol independent device support routines.
1da177e4 4 *
1da177e4 5 * Derived from the non IP parts of dev.c 1.0.19
722c9a0c 6 * Authors: Ross Biro
1da177e4
LT
7 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
8 * Mark Evans, <evansmp@uhura.aston.ac.uk>
9 *
10 * Additional Authors:
11 * Florian la Roche <rzsfl@rz.uni-sb.de>
12 * Alan Cox <gw4pts@gw4pts.ampr.org>
13 * David Hinds <dahinds@users.sourceforge.net>
14 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
15 * Adam Sulmicki <adam@cfar.umd.edu>
16 * Pekka Riikonen <priikone@poesidon.pspt.fi>
17 *
18 * Changes:
19 * D.J. Barrow : Fixed bug where dev->refcnt gets set
722c9a0c 20 * to 2 if register_netdev gets called
21 * before net_dev_init & also removed a
22 * few lines of code in the process.
1da177e4
LT
23 * Alan Cox : device private ioctl copies fields back.
24 * Alan Cox : Transmit queue code does relevant
25 * stunts to keep the queue safe.
26 * Alan Cox : Fixed double lock.
27 * Alan Cox : Fixed promisc NULL pointer trap
28 * ???????? : Support the full private ioctl range
29 * Alan Cox : Moved ioctl permission check into
30 * drivers
31 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
32 * Alan Cox : 100 backlog just doesn't cut it when
33 * you start doing multicast video 8)
34 * Alan Cox : Rewrote net_bh and list manager.
722c9a0c 35 * Alan Cox : Fix ETH_P_ALL echoback lengths.
1da177e4
LT
36 * Alan Cox : Took out transmit every packet pass
37 * Saved a few bytes in the ioctl handler
38 * Alan Cox : Network driver sets packet type before
39 * calling netif_rx. Saves a function
40 * call a packet.
41 * Alan Cox : Hashed net_bh()
42 * Richard Kooijman: Timestamp fixes.
43 * Alan Cox : Wrong field in SIOCGIFDSTADDR
44 * Alan Cox : Device lock protection.
722c9a0c 45 * Alan Cox : Fixed nasty side effect of device close
1da177e4
LT
46 * changes.
47 * Rudi Cilibrasi : Pass the right thing to
48 * set_mac_address()
49 * Dave Miller : 32bit quantity for the device lock to
50 * make it work out on a Sparc.
51 * Bjorn Ekwall : Added KERNELD hack.
52 * Alan Cox : Cleaned up the backlog initialise.
53 * Craig Metz : SIOCGIFCONF fix if space for under
54 * 1 device.
55 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
56 * is no device open function.
57 * Andi Kleen : Fix error reporting for SIOCGIFCONF
58 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
59 * Cyrus Durgin : Cleaned for KMOD
60 * Adam Sulmicki : Bug Fix : Network Device Unload
61 * A network device unload needs to purge
62 * the backlog queue.
63 * Paul Rusty Russell : SIOCSIFNAME
64 * Pekka Riikonen : Netdev boot-time settings code
65 * Andrew Morton : Make unregister_netdevice wait
722c9a0c 66 * indefinitely on dev->refcnt
67 * J Hadi Salim : - Backlog queue sampling
1da177e4
LT
68 * - netif_rx() feedback
69 */
70
7c0f6ba6 71#include <linux/uaccess.h>
1da177e4 72#include <linux/bitops.h>
4fc268d2 73#include <linux/capability.h>
1da177e4
LT
74#include <linux/cpu.h>
75#include <linux/types.h>
76#include <linux/kernel.h>
08e9897d 77#include <linux/hash.h>
5a0e3ad6 78#include <linux/slab.h>
1da177e4 79#include <linux/sched.h>
f1083048 80#include <linux/sched/mm.h>
4a3e2f71 81#include <linux/mutex.h>
11d6011c 82#include <linux/rwsem.h>
1da177e4
LT
83#include <linux/string.h>
84#include <linux/mm.h>
85#include <linux/socket.h>
86#include <linux/sockios.h>
87#include <linux/errno.h>
88#include <linux/interrupt.h>
89#include <linux/if_ether.h>
90#include <linux/netdevice.h>
91#include <linux/etherdevice.h>
0187bdfb 92#include <linux/ethtool.h>
1da177e4 93#include <linux/skbuff.h>
29863d41 94#include <linux/kthread.h>
a7862b45 95#include <linux/bpf.h>
b5cdae32 96#include <linux/bpf_trace.h>
457c4cbc 97#include <net/net_namespace.h>
1da177e4 98#include <net/sock.h>
02d62e86 99#include <net/busy_poll.h>
1da177e4 100#include <linux/rtnetlink.h>
1da177e4 101#include <linux/stat.h>
b14a9fc4 102#include <net/dsa.h>
1da177e4 103#include <net/dst.h>
fc4099f1 104#include <net/dst_metadata.h>
04f00ab2 105#include <net/gro.h>
1da177e4 106#include <net/pkt_sched.h>
87d83093 107#include <net/pkt_cls.h>
1da177e4 108#include <net/checksum.h>
44540960 109#include <net/xfrm.h>
1da177e4
LT
110#include <linux/highmem.h>
111#include <linux/init.h>
1da177e4 112#include <linux/module.h>
1da177e4
LT
113#include <linux/netpoll.h>
114#include <linux/rcupdate.h>
115#include <linux/delay.h>
1da177e4 116#include <net/iw_handler.h>
1da177e4 117#include <asm/current.h>
5bdb9886 118#include <linux/audit.h>
db217334 119#include <linux/dmaengine.h>
f6a78bfc 120#include <linux/err.h>
c7fa9d18 121#include <linux/ctype.h>
723e98b7 122#include <linux/if_arp.h>
6de329e2 123#include <linux/if_vlan.h>
8f0f2223 124#include <linux/ip.h>
ad55dcaf 125#include <net/ip.h>
25cd9ba0 126#include <net/mpls.h>
8f0f2223
DM
127#include <linux/ipv6.h>
128#include <linux/in.h>
b6b2fed1
DM
129#include <linux/jhash.h>
130#include <linux/random.h>
9cbc1cb8 131#include <trace/events/napi.h>
cf66ba58 132#include <trace/events/net.h>
07dc22e7 133#include <trace/events/skb.h>
caeda9b9 134#include <linux/inetdevice.h>
c445477d 135#include <linux/cpu_rmap.h>
c5905afb 136#include <linux/static_key.h>
af12fa6e 137#include <linux/hashtable.h>
60877a32 138#include <linux/vmalloc.h>
529d0489 139#include <linux/if_macvlan.h>
e7fd2885 140#include <linux/errqueue.h>
3b47d303 141#include <linux/hrtimer.h>
357b6cc5 142#include <linux/netfilter_ingress.h>
40e4e713 143#include <linux/crash_dump.h>
b72b5bf6 144#include <linux/sctp.h>
ae847f40 145#include <net/udp_tunnel.h>
6621dd29 146#include <linux/net_namespace.h>
aaa5d90b 147#include <linux/indirect_call_wrapper.h>
af3836df 148#include <net/devlink.h>
bd869245 149#include <linux/pm_runtime.h>
3744741a 150#include <linux/prandom.h>
1da177e4 151
342709ef
PE
152#include "net-sysfs.h"
153
d565b0a1
HX
154#define MAX_GRO_SKBS 8
155
5d38a079
HX
156/* This should be increased if a protocol with a bigger head is added. */
157#define GRO_MAX_HEAD (MAX_HEADER + 128)
158
1da177e4 159static DEFINE_SPINLOCK(ptype_lock);
62532da9 160static DEFINE_SPINLOCK(offload_lock);
900ff8c6
CW
161struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
162struct list_head ptype_all __read_mostly; /* Taps */
62532da9 163static struct list_head offload_base __read_mostly;
1da177e4 164
ae78dbfa 165static int netif_rx_internal(struct sk_buff *skb);
54951194 166static int call_netdevice_notifiers_info(unsigned long val,
54951194 167 struct netdev_notifier_info *info);
26372605
PM
168static int call_netdevice_notifiers_extack(unsigned long val,
169 struct net_device *dev,
170 struct netlink_ext_ack *extack);
90b602f8 171static struct napi_struct *napi_by_id(unsigned int napi_id);
ae78dbfa 172
1da177e4 173/*
7562f876 174 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
1da177e4
LT
175 * semaphore.
176 *
c6d14c84 177 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
1da177e4
LT
178 *
179 * Writers must hold the rtnl semaphore while they loop through the
7562f876 180 * dev_base_head list, and hold dev_base_lock for writing when they do the
1da177e4
LT
181 * actual updates. This allows pure readers to access the list even
182 * while a writer is preparing to update it.
183 *
184 * To put it another way, dev_base_lock is held for writing only to
185 * protect against pure readers; the rtnl semaphore provides the
186 * protection against other writers.
187 *
188 * See, for example usages, register_netdevice() and
189 * unregister_netdevice(), which must be called with the rtnl
190 * semaphore held.
191 */
1da177e4 192DEFINE_RWLOCK(dev_base_lock);
1da177e4
LT
193EXPORT_SYMBOL(dev_base_lock);
194
6c557001
FW
195static DEFINE_MUTEX(ifalias_mutex);
196
af12fa6e
ET
197/* protects napi_hash addition/deletion and napi_gen_id */
198static DEFINE_SPINLOCK(napi_hash_lock);
199
52bd2d62 200static unsigned int napi_gen_id = NR_CPUS;
6180d9de 201static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
af12fa6e 202
11d6011c 203static DECLARE_RWSEM(devnet_rename_sem);
c91f6df2 204
4e985ada
TG
205static inline void dev_base_seq_inc(struct net *net)
206{
643aa9cb 207 while (++net->dev_base_seq == 0)
208 ;
4e985ada
TG
209}
210
881d966b 211static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
1da177e4 212{
8387ff25 213 unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ));
95c96174 214
08e9897d 215 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
1da177e4
LT
216}
217
881d966b 218static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
1da177e4 219{
7c28bd0b 220 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
1da177e4
LT
221}
222
e36fa2f7 223static inline void rps_lock(struct softnet_data *sd)
152102c7
CG
224{
225#ifdef CONFIG_RPS
e36fa2f7 226 spin_lock(&sd->input_pkt_queue.lock);
152102c7
CG
227#endif
228}
229
e36fa2f7 230static inline void rps_unlock(struct softnet_data *sd)
152102c7
CG
231{
232#ifdef CONFIG_RPS
e36fa2f7 233 spin_unlock(&sd->input_pkt_queue.lock);
152102c7
CG
234#endif
235}
236
ff927412
JP
237static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev,
238 const char *name)
239{
240 struct netdev_name_node *name_node;
241
242 name_node = kmalloc(sizeof(*name_node), GFP_KERNEL);
243 if (!name_node)
244 return NULL;
245 INIT_HLIST_NODE(&name_node->hlist);
246 name_node->dev = dev;
247 name_node->name = name;
248 return name_node;
249}
250
251static struct netdev_name_node *
252netdev_name_node_head_alloc(struct net_device *dev)
253{
36fbf1e5
JP
254 struct netdev_name_node *name_node;
255
256 name_node = netdev_name_node_alloc(dev, dev->name);
257 if (!name_node)
258 return NULL;
259 INIT_LIST_HEAD(&name_node->list);
260 return name_node;
ff927412
JP
261}
262
263static void netdev_name_node_free(struct netdev_name_node *name_node)
264{
265 kfree(name_node);
266}
267
268static void netdev_name_node_add(struct net *net,
269 struct netdev_name_node *name_node)
270{
271 hlist_add_head_rcu(&name_node->hlist,
272 dev_name_hash(net, name_node->name));
273}
274
275static void netdev_name_node_del(struct netdev_name_node *name_node)
276{
277 hlist_del_rcu(&name_node->hlist);
278}
279
280static struct netdev_name_node *netdev_name_node_lookup(struct net *net,
281 const char *name)
282{
283 struct hlist_head *head = dev_name_hash(net, name);
284 struct netdev_name_node *name_node;
285
286 hlist_for_each_entry(name_node, head, hlist)
287 if (!strcmp(name_node->name, name))
288 return name_node;
289 return NULL;
290}
291
292static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net,
293 const char *name)
294{
295 struct hlist_head *head = dev_name_hash(net, name);
296 struct netdev_name_node *name_node;
297
298 hlist_for_each_entry_rcu(name_node, head, hlist)
299 if (!strcmp(name_node->name, name))
300 return name_node;
301 return NULL;
302}
303
36fbf1e5
JP
304int netdev_name_node_alt_create(struct net_device *dev, const char *name)
305{
306 struct netdev_name_node *name_node;
307 struct net *net = dev_net(dev);
308
309 name_node = netdev_name_node_lookup(net, name);
310 if (name_node)
311 return -EEXIST;
312 name_node = netdev_name_node_alloc(dev, name);
313 if (!name_node)
314 return -ENOMEM;
315 netdev_name_node_add(net, name_node);
316 /* The node that holds dev->name acts as a head of per-device list. */
317 list_add_tail(&name_node->list, &dev->name_node->list);
318
319 return 0;
320}
321EXPORT_SYMBOL(netdev_name_node_alt_create);
322
323static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
324{
325 list_del(&name_node->list);
326 netdev_name_node_del(name_node);
327 kfree(name_node->name);
328 netdev_name_node_free(name_node);
329}
330
331int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
332{
333 struct netdev_name_node *name_node;
334 struct net *net = dev_net(dev);
335
336 name_node = netdev_name_node_lookup(net, name);
337 if (!name_node)
338 return -ENOENT;
e08ad805
ED
339 /* lookup might have found our primary name or a name belonging
340 * to another device.
341 */
342 if (name_node == dev->name_node || name_node->dev != dev)
343 return -EINVAL;
344
36fbf1e5
JP
345 __netdev_name_node_alt_destroy(name_node);
346
347 return 0;
348}
349EXPORT_SYMBOL(netdev_name_node_alt_destroy);
350
351static void netdev_name_node_alt_flush(struct net_device *dev)
352{
353 struct netdev_name_node *name_node, *tmp;
354
355 list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list)
356 __netdev_name_node_alt_destroy(name_node);
357}
358
ce286d32 359/* Device list insertion */
53759be9 360static void list_netdevice(struct net_device *dev)
ce286d32 361{
c346dca1 362 struct net *net = dev_net(dev);
ce286d32
EB
363
364 ASSERT_RTNL();
365
366 write_lock_bh(&dev_base_lock);
c6d14c84 367 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
ff927412 368 netdev_name_node_add(net, dev->name_node);
fb699dfd
ED
369 hlist_add_head_rcu(&dev->index_hlist,
370 dev_index_hash(net, dev->ifindex));
ce286d32 371 write_unlock_bh(&dev_base_lock);
4e985ada
TG
372
373 dev_base_seq_inc(net);
ce286d32
EB
374}
375
fb699dfd
ED
376/* Device list removal
377 * caller must respect a RCU grace period before freeing/reusing dev
378 */
ce286d32
EB
379static void unlist_netdevice(struct net_device *dev)
380{
381 ASSERT_RTNL();
382
383 /* Unlink dev from the device chain */
384 write_lock_bh(&dev_base_lock);
c6d14c84 385 list_del_rcu(&dev->dev_list);
ff927412 386 netdev_name_node_del(dev->name_node);
fb699dfd 387 hlist_del_rcu(&dev->index_hlist);
ce286d32 388 write_unlock_bh(&dev_base_lock);
4e985ada
TG
389
390 dev_base_seq_inc(dev_net(dev));
ce286d32
EB
391}
392
1da177e4
LT
393/*
394 * Our notifier list
395 */
396
f07d5b94 397static RAW_NOTIFIER_HEAD(netdev_chain);
1da177e4
LT
398
399/*
400 * Device drivers call our routines to queue packets here. We empty the
401 * queue in the local softnet handler.
402 */
bea3348e 403
9958da05 404DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
d1b19dff 405EXPORT_PER_CPU_SYMBOL(softnet_data);
1da177e4 406
1a33e10e
CW
407#ifdef CONFIG_LOCKDEP
408/*
409 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
410 * according to dev->type
411 */
412static const unsigned short netdev_lock_type[] = {
413 ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
414 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
415 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
416 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
417 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
418 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
419 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
420 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
421 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
422 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
423 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
424 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
425 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
426 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
427 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
428
429static const char *const netdev_lock_name[] = {
430 "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
431 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
432 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
433 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
434 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
435 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
436 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
437 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
438 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
439 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
440 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
441 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
442 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
443 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
444 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
445
446static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
845e0ebb 447static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
1a33e10e
CW
448
449static inline unsigned short netdev_lock_pos(unsigned short dev_type)
450{
451 int i;
452
453 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
454 if (netdev_lock_type[i] == dev_type)
455 return i;
456 /* the last key is used by default */
457 return ARRAY_SIZE(netdev_lock_type) - 1;
458}
459
460static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
461 unsigned short dev_type)
462{
463 int i;
464
465 i = netdev_lock_pos(dev_type);
466 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
467 netdev_lock_name[i]);
468}
845e0ebb
CW
469
470static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
471{
472 int i;
473
474 i = netdev_lock_pos(dev->type);
475 lockdep_set_class_and_name(&dev->addr_list_lock,
476 &netdev_addr_lock_key[i],
477 netdev_lock_name[i]);
478}
1a33e10e
CW
479#else
480static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
481 unsigned short dev_type)
482{
483}
845e0ebb
CW
484
485static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
486{
487}
1a33e10e
CW
488#endif
489
1da177e4 490/*******************************************************************************
eb13da1a 491 *
492 * Protocol management and registration routines
493 *
494 *******************************************************************************/
1da177e4 495
1da177e4 496
1da177e4
LT
497/*
498 * Add a protocol ID to the list. Now that the input handler is
499 * smarter we can dispense with all the messy stuff that used to be
500 * here.
501 *
502 * BEWARE!!! Protocol handlers, mangling input packets,
503 * MUST BE last in hash buckets and checking protocol handlers
504 * MUST start from promiscuous ptype_all chain in net_bh.
505 * It is true now, do not change it.
506 * Explanation follows: if protocol handler, mangling packet, will
507 * be the first on list, it is not able to sense, that packet
508 * is cloned and should be copied-on-write, so that it will
509 * change it and subsequent readers will get broken packet.
510 * --ANK (980803)
511 */
512
c07b68e8
ED
513static inline struct list_head *ptype_head(const struct packet_type *pt)
514{
515 if (pt->type == htons(ETH_P_ALL))
7866a621 516 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
c07b68e8 517 else
7866a621
SN
518 return pt->dev ? &pt->dev->ptype_specific :
519 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
c07b68e8
ED
520}
521
1da177e4
LT
522/**
523 * dev_add_pack - add packet handler
524 * @pt: packet type declaration
525 *
526 * Add a protocol handler to the networking stack. The passed &packet_type
527 * is linked into kernel lists and may not be freed until it has been
528 * removed from the kernel lists.
529 *
4ec93edb 530 * This call does not sleep therefore it can not
1da177e4
LT
531 * guarantee all CPU's that are in middle of receiving packets
532 * will see the new packet type (until the next received packet).
533 */
534
535void dev_add_pack(struct packet_type *pt)
536{
c07b68e8 537 struct list_head *head = ptype_head(pt);
1da177e4 538
c07b68e8
ED
539 spin_lock(&ptype_lock);
540 list_add_rcu(&pt->list, head);
541 spin_unlock(&ptype_lock);
1da177e4 542}
d1b19dff 543EXPORT_SYMBOL(dev_add_pack);
1da177e4 544
1da177e4
LT
545/**
546 * __dev_remove_pack - remove packet handler
547 * @pt: packet type declaration
548 *
549 * Remove a protocol handler that was previously added to the kernel
550 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
551 * from the kernel lists and can be freed or reused once this function
4ec93edb 552 * returns.
1da177e4
LT
553 *
554 * The packet type might still be in use by receivers
555 * and must not be freed until after all the CPU's have gone
556 * through a quiescent state.
557 */
558void __dev_remove_pack(struct packet_type *pt)
559{
c07b68e8 560 struct list_head *head = ptype_head(pt);
1da177e4
LT
561 struct packet_type *pt1;
562
c07b68e8 563 spin_lock(&ptype_lock);
1da177e4
LT
564
565 list_for_each_entry(pt1, head, list) {
566 if (pt == pt1) {
567 list_del_rcu(&pt->list);
568 goto out;
569 }
570 }
571
7b6cd1ce 572 pr_warn("dev_remove_pack: %p not found\n", pt);
1da177e4 573out:
c07b68e8 574 spin_unlock(&ptype_lock);
1da177e4 575}
d1b19dff
ED
576EXPORT_SYMBOL(__dev_remove_pack);
577
1da177e4
LT
578/**
579 * dev_remove_pack - remove packet handler
580 * @pt: packet type declaration
581 *
582 * Remove a protocol handler that was previously added to the kernel
583 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
584 * from the kernel lists and can be freed or reused once this function
585 * returns.
586 *
587 * This call sleeps to guarantee that no CPU is looking at the packet
588 * type after return.
589 */
590void dev_remove_pack(struct packet_type *pt)
591{
592 __dev_remove_pack(pt);
4ec93edb 593
1da177e4
LT
594 synchronize_net();
595}
d1b19dff 596EXPORT_SYMBOL(dev_remove_pack);
1da177e4 597
62532da9
VY
598
599/**
600 * dev_add_offload - register offload handlers
601 * @po: protocol offload declaration
602 *
603 * Add protocol offload handlers to the networking stack. The passed
604 * &proto_offload is linked into kernel lists and may not be freed until
605 * it has been removed from the kernel lists.
606 *
607 * This call does not sleep therefore it can not
608 * guarantee all CPU's that are in middle of receiving packets
609 * will see the new offload handlers (until the next received packet).
610 */
611void dev_add_offload(struct packet_offload *po)
612{
bdef7de4 613 struct packet_offload *elem;
62532da9
VY
614
615 spin_lock(&offload_lock);
bdef7de4
DM
616 list_for_each_entry(elem, &offload_base, list) {
617 if (po->priority < elem->priority)
618 break;
619 }
620 list_add_rcu(&po->list, elem->list.prev);
62532da9
VY
621 spin_unlock(&offload_lock);
622}
623EXPORT_SYMBOL(dev_add_offload);
624
625/**
626 * __dev_remove_offload - remove offload handler
627 * @po: packet offload declaration
628 *
629 * Remove a protocol offload handler that was previously added to the
630 * kernel offload handlers by dev_add_offload(). The passed &offload_type
631 * is removed from the kernel lists and can be freed or reused once this
632 * function returns.
633 *
634 * The packet type might still be in use by receivers
635 * and must not be freed until after all the CPU's have gone
636 * through a quiescent state.
637 */
1d143d9f 638static void __dev_remove_offload(struct packet_offload *po)
62532da9
VY
639{
640 struct list_head *head = &offload_base;
641 struct packet_offload *po1;
642
c53aa505 643 spin_lock(&offload_lock);
62532da9
VY
644
645 list_for_each_entry(po1, head, list) {
646 if (po == po1) {
647 list_del_rcu(&po->list);
648 goto out;
649 }
650 }
651
652 pr_warn("dev_remove_offload: %p not found\n", po);
653out:
c53aa505 654 spin_unlock(&offload_lock);
62532da9 655}
62532da9
VY
656
657/**
658 * dev_remove_offload - remove packet offload handler
659 * @po: packet offload declaration
660 *
661 * Remove a packet offload handler that was previously added to the kernel
662 * offload handlers by dev_add_offload(). The passed &offload_type is
663 * removed from the kernel lists and can be freed or reused once this
664 * function returns.
665 *
666 * This call sleeps to guarantee that no CPU is looking at the packet
667 * type after return.
668 */
669void dev_remove_offload(struct packet_offload *po)
670{
671 __dev_remove_offload(po);
672
673 synchronize_net();
674}
675EXPORT_SYMBOL(dev_remove_offload);
676
1da177e4 677/******************************************************************************
eb13da1a 678 *
679 * Device Boot-time Settings Routines
680 *
681 ******************************************************************************/
1da177e4
LT
682
683/* Boot time configuration table */
684static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
685
686/**
687 * netdev_boot_setup_add - add new setup entry
688 * @name: name of the device
689 * @map: configured settings for the device
690 *
691 * Adds new setup entry to the dev_boot_setup list. The function
692 * returns 0 on error and 1 on success. This is a generic routine to
693 * all netdevices.
694 */
695static int netdev_boot_setup_add(char *name, struct ifmap *map)
696{
697 struct netdev_boot_setup *s;
698 int i;
699
700 s = dev_boot_setup;
701 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
702 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
703 memset(s[i].name, 0, sizeof(s[i].name));
93b3cff9 704 strlcpy(s[i].name, name, IFNAMSIZ);
1da177e4
LT
705 memcpy(&s[i].map, map, sizeof(s[i].map));
706 break;
707 }
708 }
709
710 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
711}
712
713/**
722c9a0c 714 * netdev_boot_setup_check - check boot time settings
715 * @dev: the netdevice
1da177e4 716 *
722c9a0c 717 * Check boot time settings for the device.
718 * The found settings are set for the device to be used
719 * later in the device probing.
720 * Returns 0 if no settings found, 1 if they are.
1da177e4
LT
721 */
722int netdev_boot_setup_check(struct net_device *dev)
723{
724 struct netdev_boot_setup *s = dev_boot_setup;
725 int i;
726
727 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
728 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
93b3cff9 729 !strcmp(dev->name, s[i].name)) {
722c9a0c 730 dev->irq = s[i].map.irq;
731 dev->base_addr = s[i].map.base_addr;
732 dev->mem_start = s[i].map.mem_start;
733 dev->mem_end = s[i].map.mem_end;
1da177e4
LT
734 return 1;
735 }
736 }
737 return 0;
738}
d1b19dff 739EXPORT_SYMBOL(netdev_boot_setup_check);
1da177e4
LT
740
741
742/**
722c9a0c 743 * netdev_boot_base - get address from boot time settings
744 * @prefix: prefix for network device
745 * @unit: id for network device
746 *
747 * Check boot time settings for the base address of device.
748 * The found settings are set for the device to be used
749 * later in the device probing.
750 * Returns 0 if no settings found.
1da177e4
LT
751 */
752unsigned long netdev_boot_base(const char *prefix, int unit)
753{
754 const struct netdev_boot_setup *s = dev_boot_setup;
755 char name[IFNAMSIZ];
756 int i;
757
758 sprintf(name, "%s%d", prefix, unit);
759
760 /*
761 * If device already registered then return base of 1
762 * to indicate not to probe for this interface
763 */
881d966b 764 if (__dev_get_by_name(&init_net, name))
1da177e4
LT
765 return 1;
766
767 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
768 if (!strcmp(name, s[i].name))
769 return s[i].map.base_addr;
770 return 0;
771}
772
773/*
774 * Saves at boot time configured settings for any netdevice.
775 */
776int __init netdev_boot_setup(char *str)
777{
778 int ints[5];
779 struct ifmap map;
780
781 str = get_options(str, ARRAY_SIZE(ints), ints);
782 if (!str || !*str)
783 return 0;
784
785 /* Save settings */
786 memset(&map, 0, sizeof(map));
787 if (ints[0] > 0)
788 map.irq = ints[1];
789 if (ints[0] > 1)
790 map.base_addr = ints[2];
791 if (ints[0] > 2)
792 map.mem_start = ints[3];
793 if (ints[0] > 3)
794 map.mem_end = ints[4];
795
796 /* Add new entry to the list */
797 return netdev_boot_setup_add(str, &map);
798}
799
800__setup("netdev=", netdev_boot_setup);
801
802/*******************************************************************************
eb13da1a 803 *
804 * Device Interface Subroutines
805 *
806 *******************************************************************************/
1da177e4 807
a54acb3a
ND
808/**
809 * dev_get_iflink - get 'iflink' value of a interface
810 * @dev: targeted interface
811 *
812 * Indicates the ifindex the interface is linked to.
813 * Physical interfaces have the same 'ifindex' and 'iflink' values.
814 */
815
816int dev_get_iflink(const struct net_device *dev)
817{
818 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
819 return dev->netdev_ops->ndo_get_iflink(dev);
820
7a66bbc9 821 return dev->ifindex;
a54acb3a
ND
822}
823EXPORT_SYMBOL(dev_get_iflink);
824
fc4099f1
PS
825/**
826 * dev_fill_metadata_dst - Retrieve tunnel egress information.
827 * @dev: targeted interface
828 * @skb: The packet.
829 *
830 * For better visibility of tunnel traffic OVS needs to retrieve
831 * egress tunnel information for a packet. Following API allows
832 * user to get this info.
833 */
834int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
835{
836 struct ip_tunnel_info *info;
837
838 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
839 return -EINVAL;
840
841 info = skb_tunnel_info_unclone(skb);
842 if (!info)
843 return -ENOMEM;
844 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
845 return -EINVAL;
846
847 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
848}
849EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
850
1da177e4
LT
851/**
852 * __dev_get_by_name - find a device by its name
c4ea43c5 853 * @net: the applicable net namespace
1da177e4
LT
854 * @name: name to find
855 *
856 * Find an interface by name. Must be called under RTNL semaphore
857 * or @dev_base_lock. If the name is found a pointer to the device
858 * is returned. If the name is not found then %NULL is returned. The
859 * reference counters are not incremented so the caller must be
860 * careful with locks.
861 */
862
881d966b 863struct net_device *__dev_get_by_name(struct net *net, const char *name)
1da177e4 864{
ff927412 865 struct netdev_name_node *node_name;
1da177e4 866
ff927412
JP
867 node_name = netdev_name_node_lookup(net, name);
868 return node_name ? node_name->dev : NULL;
1da177e4 869}
d1b19dff 870EXPORT_SYMBOL(__dev_get_by_name);
1da177e4 871
72c9528b 872/**
722c9a0c 873 * dev_get_by_name_rcu - find a device by its name
874 * @net: the applicable net namespace
875 * @name: name to find
876 *
877 * Find an interface by name.
878 * If the name is found a pointer to the device is returned.
879 * If the name is not found then %NULL is returned.
880 * The reference counters are not incremented so the caller must be
881 * careful with locks. The caller must hold RCU lock.
72c9528b
ED
882 */
883
884struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
885{
ff927412 886 struct netdev_name_node *node_name;
72c9528b 887
ff927412
JP
888 node_name = netdev_name_node_lookup_rcu(net, name);
889 return node_name ? node_name->dev : NULL;
72c9528b
ED
890}
891EXPORT_SYMBOL(dev_get_by_name_rcu);
892
1da177e4
LT
893/**
894 * dev_get_by_name - find a device by its name
c4ea43c5 895 * @net: the applicable net namespace
1da177e4
LT
896 * @name: name to find
897 *
898 * Find an interface by name. This can be called from any
899 * context and does its own locking. The returned handle has
900 * the usage count incremented and the caller must use dev_put() to
901 * release it when it is no longer needed. %NULL is returned if no
902 * matching device is found.
903 */
904
881d966b 905struct net_device *dev_get_by_name(struct net *net, const char *name)
1da177e4
LT
906{
907 struct net_device *dev;
908
72c9528b
ED
909 rcu_read_lock();
910 dev = dev_get_by_name_rcu(net, name);
1da177e4
LT
911 if (dev)
912 dev_hold(dev);
72c9528b 913 rcu_read_unlock();
1da177e4
LT
914 return dev;
915}
d1b19dff 916EXPORT_SYMBOL(dev_get_by_name);
1da177e4
LT
917
918/**
919 * __dev_get_by_index - find a device by its ifindex
c4ea43c5 920 * @net: the applicable net namespace
1da177e4
LT
921 * @ifindex: index of device
922 *
923 * Search for an interface by index. Returns %NULL if the device
924 * is not found or a pointer to the device. The device has not
925 * had its reference counter increased so the caller must be careful
926 * about locking. The caller must hold either the RTNL semaphore
927 * or @dev_base_lock.
928 */
929
881d966b 930struct net_device *__dev_get_by_index(struct net *net, int ifindex)
1da177e4 931{
0bd8d536
ED
932 struct net_device *dev;
933 struct hlist_head *head = dev_index_hash(net, ifindex);
1da177e4 934
b67bfe0d 935 hlist_for_each_entry(dev, head, index_hlist)
1da177e4
LT
936 if (dev->ifindex == ifindex)
937 return dev;
0bd8d536 938
1da177e4
LT
939 return NULL;
940}
d1b19dff 941EXPORT_SYMBOL(__dev_get_by_index);
1da177e4 942
fb699dfd
ED
943/**
944 * dev_get_by_index_rcu - find a device by its ifindex
945 * @net: the applicable net namespace
946 * @ifindex: index of device
947 *
948 * Search for an interface by index. Returns %NULL if the device
949 * is not found or a pointer to the device. The device has not
950 * had its reference counter increased so the caller must be careful
951 * about locking. The caller must hold RCU lock.
952 */
953
954struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
955{
fb699dfd
ED
956 struct net_device *dev;
957 struct hlist_head *head = dev_index_hash(net, ifindex);
958
b67bfe0d 959 hlist_for_each_entry_rcu(dev, head, index_hlist)
fb699dfd
ED
960 if (dev->ifindex == ifindex)
961 return dev;
962
963 return NULL;
964}
965EXPORT_SYMBOL(dev_get_by_index_rcu);
966
1da177e4
LT
967
968/**
969 * dev_get_by_index - find a device by its ifindex
c4ea43c5 970 * @net: the applicable net namespace
1da177e4
LT
971 * @ifindex: index of device
972 *
973 * Search for an interface by index. Returns NULL if the device
974 * is not found or a pointer to the device. The device returned has
975 * had a reference added and the pointer is safe until the user calls
976 * dev_put to indicate they have finished with it.
977 */
978
881d966b 979struct net_device *dev_get_by_index(struct net *net, int ifindex)
1da177e4
LT
980{
981 struct net_device *dev;
982
fb699dfd
ED
983 rcu_read_lock();
984 dev = dev_get_by_index_rcu(net, ifindex);
1da177e4
LT
985 if (dev)
986 dev_hold(dev);
fb699dfd 987 rcu_read_unlock();
1da177e4
LT
988 return dev;
989}
d1b19dff 990EXPORT_SYMBOL(dev_get_by_index);
1da177e4 991
90b602f8
ML
992/**
993 * dev_get_by_napi_id - find a device by napi_id
994 * @napi_id: ID of the NAPI struct
995 *
996 * Search for an interface by NAPI ID. Returns %NULL if the device
997 * is not found or a pointer to the device. The device has not had
998 * its reference counter increased so the caller must be careful
999 * about locking. The caller must hold RCU lock.
1000 */
1001
1002struct net_device *dev_get_by_napi_id(unsigned int napi_id)
1003{
1004 struct napi_struct *napi;
1005
1006 WARN_ON_ONCE(!rcu_read_lock_held());
1007
1008 if (napi_id < MIN_NAPI_ID)
1009 return NULL;
1010
1011 napi = napi_by_id(napi_id);
1012
1013 return napi ? napi->dev : NULL;
1014}
1015EXPORT_SYMBOL(dev_get_by_napi_id);
1016
5dbe7c17
NS
1017/**
1018 * netdev_get_name - get a netdevice name, knowing its ifindex.
1019 * @net: network namespace
1020 * @name: a pointer to the buffer where the name will be stored.
1021 * @ifindex: the ifindex of the interface to get the name from.
5dbe7c17
NS
1022 */
1023int netdev_get_name(struct net *net, char *name, int ifindex)
1024{
1025 struct net_device *dev;
11d6011c 1026 int ret;
5dbe7c17 1027
11d6011c 1028 down_read(&devnet_rename_sem);
5dbe7c17 1029 rcu_read_lock();
11d6011c 1030
5dbe7c17
NS
1031 dev = dev_get_by_index_rcu(net, ifindex);
1032 if (!dev) {
11d6011c
AD
1033 ret = -ENODEV;
1034 goto out;
5dbe7c17
NS
1035 }
1036
1037 strcpy(name, dev->name);
5dbe7c17 1038
11d6011c
AD
1039 ret = 0;
1040out:
1041 rcu_read_unlock();
1042 up_read(&devnet_rename_sem);
1043 return ret;
5dbe7c17
NS
1044}
1045
1da177e4 1046/**
941666c2 1047 * dev_getbyhwaddr_rcu - find a device by its hardware address
c4ea43c5 1048 * @net: the applicable net namespace
1da177e4
LT
1049 * @type: media type of device
1050 * @ha: hardware address
1051 *
1052 * Search for an interface by MAC address. Returns NULL if the device
c506653d
ED
1053 * is not found or a pointer to the device.
1054 * The caller must hold RCU or RTNL.
941666c2 1055 * The returned device has not had its ref count increased
1da177e4
LT
1056 * and the caller must therefore be careful about locking
1057 *
1da177e4
LT
1058 */
1059
941666c2
ED
1060struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
1061 const char *ha)
1da177e4
LT
1062{
1063 struct net_device *dev;
1064
941666c2 1065 for_each_netdev_rcu(net, dev)
1da177e4
LT
1066 if (dev->type == type &&
1067 !memcmp(dev->dev_addr, ha, dev->addr_len))
7562f876
PE
1068 return dev;
1069
1070 return NULL;
1da177e4 1071}
941666c2 1072EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
cf309e3f 1073
881d966b 1074struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
4e9cac2b 1075{
99fe3c39 1076 struct net_device *dev, *ret = NULL;
4e9cac2b 1077
99fe3c39
ED
1078 rcu_read_lock();
1079 for_each_netdev_rcu(net, dev)
1080 if (dev->type == type) {
1081 dev_hold(dev);
1082 ret = dev;
1083 break;
1084 }
1085 rcu_read_unlock();
1086 return ret;
1da177e4 1087}
1da177e4
LT
1088EXPORT_SYMBOL(dev_getfirstbyhwtype);
1089
1090/**
6c555490 1091 * __dev_get_by_flags - find any device with given flags
c4ea43c5 1092 * @net: the applicable net namespace
1da177e4
LT
1093 * @if_flags: IFF_* values
1094 * @mask: bitmask of bits in if_flags to check
1095 *
1096 * Search for any interface with the given flags. Returns NULL if a device
bb69ae04 1097 * is not found or a pointer to the device. Must be called inside
6c555490 1098 * rtnl_lock(), and result refcount is unchanged.
1da177e4
LT
1099 */
1100
6c555490
WC
1101struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
1102 unsigned short mask)
1da177e4 1103{
7562f876 1104 struct net_device *dev, *ret;
1da177e4 1105
6c555490
WC
1106 ASSERT_RTNL();
1107
7562f876 1108 ret = NULL;
6c555490 1109 for_each_netdev(net, dev) {
1da177e4 1110 if (((dev->flags ^ if_flags) & mask) == 0) {
7562f876 1111 ret = dev;
1da177e4
LT
1112 break;
1113 }
1114 }
7562f876 1115 return ret;
1da177e4 1116}
6c555490 1117EXPORT_SYMBOL(__dev_get_by_flags);
1da177e4
LT
1118
1119/**
1120 * dev_valid_name - check if name is okay for network device
1121 * @name: name string
1122 *
1123 * Network device names need to be valid file names to
4250b75b 1124 * allow sysfs to work. We also disallow any kind of
c7fa9d18 1125 * whitespace.
1da177e4 1126 */
95f050bf 1127bool dev_valid_name(const char *name)
1da177e4 1128{
c7fa9d18 1129 if (*name == '\0')
95f050bf 1130 return false;
a9d48205 1131 if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
95f050bf 1132 return false;
c7fa9d18 1133 if (!strcmp(name, ".") || !strcmp(name, ".."))
95f050bf 1134 return false;
c7fa9d18
DM
1135
1136 while (*name) {
a4176a93 1137 if (*name == '/' || *name == ':' || isspace(*name))
95f050bf 1138 return false;
c7fa9d18
DM
1139 name++;
1140 }
95f050bf 1141 return true;
1da177e4 1142}
d1b19dff 1143EXPORT_SYMBOL(dev_valid_name);
1da177e4
LT
1144
1145/**
b267b179
EB
1146 * __dev_alloc_name - allocate a name for a device
1147 * @net: network namespace to allocate the device name in
1da177e4 1148 * @name: name format string
b267b179 1149 * @buf: scratch buffer and result name string
1da177e4
LT
1150 *
1151 * Passed a format string - eg "lt%d" it will try and find a suitable
3041a069
SH
1152 * id. It scans list of devices to build up a free map, then chooses
1153 * the first empty slot. The caller must hold the dev_base or rtnl lock
1154 * while allocating the name and adding the device in order to avoid
1155 * duplicates.
1156 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1157 * Returns the number of the unit assigned or a negative errno code.
1da177e4
LT
1158 */
1159
b267b179 1160static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1da177e4
LT
1161{
1162 int i = 0;
1da177e4
LT
1163 const char *p;
1164 const int max_netdevices = 8*PAGE_SIZE;
cfcabdcc 1165 unsigned long *inuse;
1da177e4
LT
1166 struct net_device *d;
1167
93809105
RV
1168 if (!dev_valid_name(name))
1169 return -EINVAL;
1170
51f299dd 1171 p = strchr(name, '%');
1da177e4
LT
1172 if (p) {
1173 /*
1174 * Verify the string as this thing may have come from
1175 * the user. There must be either one "%d" and no other "%"
1176 * characters.
1177 */
1178 if (p[1] != 'd' || strchr(p + 2, '%'))
1179 return -EINVAL;
1180
1181 /* Use one page as a bit array of possible slots */
cfcabdcc 1182 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1da177e4
LT
1183 if (!inuse)
1184 return -ENOMEM;
1185
881d966b 1186 for_each_netdev(net, d) {
1da177e4
LT
1187 if (!sscanf(d->name, name, &i))
1188 continue;
1189 if (i < 0 || i >= max_netdevices)
1190 continue;
1191
1192 /* avoid cases where sscanf is not exact inverse of printf */
b267b179 1193 snprintf(buf, IFNAMSIZ, name, i);
1da177e4
LT
1194 if (!strncmp(buf, d->name, IFNAMSIZ))
1195 set_bit(i, inuse);
1196 }
1197
1198 i = find_first_zero_bit(inuse, max_netdevices);
1199 free_page((unsigned long) inuse);
1200 }
1201
6224abda 1202 snprintf(buf, IFNAMSIZ, name, i);
b267b179 1203 if (!__dev_get_by_name(net, buf))
1da177e4 1204 return i;
1da177e4
LT
1205
1206 /* It is possible to run out of possible slots
1207 * when the name is long and there isn't enough space left
1208 * for the digits, or if all bits are used.
1209 */
029b6d14 1210 return -ENFILE;
1da177e4
LT
1211}
1212
2c88b855
RV
1213static int dev_alloc_name_ns(struct net *net,
1214 struct net_device *dev,
1215 const char *name)
1216{
1217 char buf[IFNAMSIZ];
1218 int ret;
1219
c46d7642 1220 BUG_ON(!net);
2c88b855
RV
1221 ret = __dev_alloc_name(net, name, buf);
1222 if (ret >= 0)
1223 strlcpy(dev->name, buf, IFNAMSIZ);
1224 return ret;
1da177e4
LT
1225}
1226
b267b179
EB
1227/**
1228 * dev_alloc_name - allocate a name for a device
1229 * @dev: device
1230 * @name: name format string
1231 *
1232 * Passed a format string - eg "lt%d" it will try and find a suitable
1233 * id. It scans list of devices to build up a free map, then chooses
1234 * the first empty slot. The caller must hold the dev_base or rtnl lock
1235 * while allocating the name and adding the device in order to avoid
1236 * duplicates.
1237 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1238 * Returns the number of the unit assigned or a negative errno code.
1239 */
1240
1241int dev_alloc_name(struct net_device *dev, const char *name)
1242{
c46d7642 1243 return dev_alloc_name_ns(dev_net(dev), dev, name);
b267b179 1244}
d1b19dff 1245EXPORT_SYMBOL(dev_alloc_name);
b267b179 1246
bacb7e18
ED
1247static int dev_get_valid_name(struct net *net, struct net_device *dev,
1248 const char *name)
828de4f6 1249{
55a5ec9b
DM
1250 BUG_ON(!net);
1251
1252 if (!dev_valid_name(name))
1253 return -EINVAL;
1254
1255 if (strchr(name, '%'))
1256 return dev_alloc_name_ns(net, dev, name);
1257 else if (__dev_get_by_name(net, name))
1258 return -EEXIST;
1259 else if (dev->name != name)
1260 strlcpy(dev->name, name, IFNAMSIZ);
1261
1262 return 0;
d9031024 1263}
1da177e4
LT
1264
1265/**
1266 * dev_change_name - change name of a device
1267 * @dev: device
1268 * @newname: name (or format string) must be at least IFNAMSIZ
1269 *
1270 * Change name of a device, can pass format strings "eth%d".
1271 * for wildcarding.
1272 */
cf04a4c7 1273int dev_change_name(struct net_device *dev, const char *newname)
1da177e4 1274{
238fa362 1275 unsigned char old_assign_type;
fcc5a03a 1276 char oldname[IFNAMSIZ];
1da177e4 1277 int err = 0;
fcc5a03a 1278 int ret;
881d966b 1279 struct net *net;
1da177e4
LT
1280
1281 ASSERT_RTNL();
c346dca1 1282 BUG_ON(!dev_net(dev));
1da177e4 1283
c346dca1 1284 net = dev_net(dev);
8065a779
SWL
1285
1286 /* Some auto-enslaved devices e.g. failover slaves are
1287 * special, as userspace might rename the device after
1288 * the interface had been brought up and running since
1289 * the point kernel initiated auto-enslavement. Allow
1290 * live name change even when these slave devices are
1291 * up and running.
1292 *
1293 * Typically, users of these auto-enslaving devices
1294 * don't actually care about slave name change, as
1295 * they are supposed to operate on master interface
1296 * directly.
1297 */
1298 if (dev->flags & IFF_UP &&
1299 likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK)))
1da177e4
LT
1300 return -EBUSY;
1301
11d6011c 1302 down_write(&devnet_rename_sem);
c91f6df2
BH
1303
1304 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
11d6011c 1305 up_write(&devnet_rename_sem);
c8d90dca 1306 return 0;
c91f6df2 1307 }
c8d90dca 1308
fcc5a03a
HX
1309 memcpy(oldname, dev->name, IFNAMSIZ);
1310
828de4f6 1311 err = dev_get_valid_name(net, dev, newname);
c91f6df2 1312 if (err < 0) {
11d6011c 1313 up_write(&devnet_rename_sem);
d9031024 1314 return err;
c91f6df2 1315 }
1da177e4 1316
6fe82a39
VF
1317 if (oldname[0] && !strchr(oldname, '%'))
1318 netdev_info(dev, "renamed from %s\n", oldname);
1319
238fa362
TG
1320 old_assign_type = dev->name_assign_type;
1321 dev->name_assign_type = NET_NAME_RENAMED;
1322
fcc5a03a 1323rollback:
a1b3f594
EB
1324 ret = device_rename(&dev->dev, dev->name);
1325 if (ret) {
1326 memcpy(dev->name, oldname, IFNAMSIZ);
238fa362 1327 dev->name_assign_type = old_assign_type;
11d6011c 1328 up_write(&devnet_rename_sem);
a1b3f594 1329 return ret;
dcc99773 1330 }
7f988eab 1331
11d6011c 1332 up_write(&devnet_rename_sem);
c91f6df2 1333
5bb025fa
VF
1334 netdev_adjacent_rename_links(dev, oldname);
1335
7f988eab 1336 write_lock_bh(&dev_base_lock);
ff927412 1337 netdev_name_node_del(dev->name_node);
72c9528b
ED
1338 write_unlock_bh(&dev_base_lock);
1339
1340 synchronize_rcu();
1341
1342 write_lock_bh(&dev_base_lock);
ff927412 1343 netdev_name_node_add(net, dev->name_node);
7f988eab
HX
1344 write_unlock_bh(&dev_base_lock);
1345
056925ab 1346 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
fcc5a03a
HX
1347 ret = notifier_to_errno(ret);
1348
1349 if (ret) {
91e9c07b
ED
1350 /* err >= 0 after dev_alloc_name() or stores the first errno */
1351 if (err >= 0) {
fcc5a03a 1352 err = ret;
11d6011c 1353 down_write(&devnet_rename_sem);
fcc5a03a 1354 memcpy(dev->name, oldname, IFNAMSIZ);
5bb025fa 1355 memcpy(oldname, newname, IFNAMSIZ);
238fa362
TG
1356 dev->name_assign_type = old_assign_type;
1357 old_assign_type = NET_NAME_RENAMED;
fcc5a03a 1358 goto rollback;
91e9c07b 1359 } else {
7b6cd1ce 1360 pr_err("%s: name change rollback failed: %d\n",
91e9c07b 1361 dev->name, ret);
fcc5a03a
HX
1362 }
1363 }
1da177e4
LT
1364
1365 return err;
1366}
1367
0b815a1a
SH
1368/**
1369 * dev_set_alias - change ifalias of a device
1370 * @dev: device
1371 * @alias: name up to IFALIASZ
f0db275a 1372 * @len: limit of bytes to copy from info
0b815a1a
SH
1373 *
1374 * Set ifalias for a device,
1375 */
1376int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1377{
6c557001 1378 struct dev_ifalias *new_alias = NULL;
0b815a1a
SH
1379
1380 if (len >= IFALIASZ)
1381 return -EINVAL;
1382
6c557001
FW
1383 if (len) {
1384 new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL);
1385 if (!new_alias)
1386 return -ENOMEM;
1387
1388 memcpy(new_alias->ifalias, alias, len);
1389 new_alias->ifalias[len] = 0;
96ca4a2c
OH
1390 }
1391
6c557001 1392 mutex_lock(&ifalias_mutex);
e3f0d761
PM
1393 new_alias = rcu_replace_pointer(dev->ifalias, new_alias,
1394 mutex_is_locked(&ifalias_mutex));
6c557001
FW
1395 mutex_unlock(&ifalias_mutex);
1396
1397 if (new_alias)
1398 kfree_rcu(new_alias, rcuhead);
0b815a1a 1399
0b815a1a
SH
1400 return len;
1401}
0fe554a4 1402EXPORT_SYMBOL(dev_set_alias);
0b815a1a 1403
6c557001
FW
1404/**
1405 * dev_get_alias - get ifalias of a device
1406 * @dev: device
20e88320 1407 * @name: buffer to store name of ifalias
6c557001
FW
1408 * @len: size of buffer
1409 *
1410 * get ifalias for a device. Caller must make sure dev cannot go
1411 * away, e.g. rcu read lock or own a reference count to device.
1412 */
1413int dev_get_alias(const struct net_device *dev, char *name, size_t len)
1414{
1415 const struct dev_ifalias *alias;
1416 int ret = 0;
1417
1418 rcu_read_lock();
1419 alias = rcu_dereference(dev->ifalias);
1420 if (alias)
1421 ret = snprintf(name, len, "%s", alias->ifalias);
1422 rcu_read_unlock();
1423
1424 return ret;
1425}
0b815a1a 1426
d8a33ac4 1427/**
3041a069 1428 * netdev_features_change - device changes features
d8a33ac4
SH
1429 * @dev: device to cause notification
1430 *
1431 * Called to indicate a device has changed features.
1432 */
1433void netdev_features_change(struct net_device *dev)
1434{
056925ab 1435 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
d8a33ac4
SH
1436}
1437EXPORT_SYMBOL(netdev_features_change);
1438
1da177e4
LT
1439/**
1440 * netdev_state_change - device changes state
1441 * @dev: device to cause notification
1442 *
1443 * Called to indicate a device has changed state. This function calls
1444 * the notifier chains for netdev_chain and sends a NEWLINK message
1445 * to the routing socket.
1446 */
1447void netdev_state_change(struct net_device *dev)
1448{
1449 if (dev->flags & IFF_UP) {
51d0c047
DA
1450 struct netdev_notifier_change_info change_info = {
1451 .info.dev = dev,
1452 };
54951194 1453
51d0c047 1454 call_netdevice_notifiers_info(NETDEV_CHANGE,
54951194 1455 &change_info.info);
7f294054 1456 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1da177e4
LT
1457 }
1458}
d1b19dff 1459EXPORT_SYMBOL(netdev_state_change);
1da177e4 1460
7061eb8c
LP
1461/**
1462 * __netdev_notify_peers - notify network peers about existence of @dev,
1463 * to be called when rtnl lock is already held.
1464 * @dev: network device
1465 *
1466 * Generate traffic such that interested network peers are aware of
1467 * @dev, such as by generating a gratuitous ARP. This may be used when
1468 * a device wants to inform the rest of the network about some sort of
1469 * reconfiguration such as a failover event or virtual machine
1470 * migration.
1471 */
1472void __netdev_notify_peers(struct net_device *dev)
1473{
1474 ASSERT_RTNL();
1475 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1476 call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
1477}
1478EXPORT_SYMBOL(__netdev_notify_peers);
1479
ee89bab1 1480/**
722c9a0c 1481 * netdev_notify_peers - notify network peers about existence of @dev
1482 * @dev: network device
ee89bab1
AW
1483 *
1484 * Generate traffic such that interested network peers are aware of
1485 * @dev, such as by generating a gratuitous ARP. This may be used when
1486 * a device wants to inform the rest of the network about some sort of
1487 * reconfiguration such as a failover event or virtual machine
1488 * migration.
1489 */
1490void netdev_notify_peers(struct net_device *dev)
c1da4ac7 1491{
ee89bab1 1492 rtnl_lock();
7061eb8c 1493 __netdev_notify_peers(dev);
ee89bab1 1494 rtnl_unlock();
c1da4ac7 1495}
ee89bab1 1496EXPORT_SYMBOL(netdev_notify_peers);
c1da4ac7 1497
29863d41
WW
1498static int napi_threaded_poll(void *data);
1499
1500static int napi_kthread_create(struct napi_struct *n)
1501{
1502 int err = 0;
1503
1504 /* Create and wake up the kthread once to put it in
1505 * TASK_INTERRUPTIBLE mode to avoid the blocked task
1506 * warning and work with loadavg.
1507 */
1508 n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d",
1509 n->dev->name, n->napi_id);
1510 if (IS_ERR(n->thread)) {
1511 err = PTR_ERR(n->thread);
1512 pr_err("kthread_run failed with err %d\n", err);
1513 n->thread = NULL;
1514 }
1515
1516 return err;
1517}
1518
40c900aa 1519static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
1da177e4 1520{
d314774c 1521 const struct net_device_ops *ops = dev->netdev_ops;
3b8bcfd5 1522 int ret;
1da177e4 1523
e46b66bc
BH
1524 ASSERT_RTNL();
1525
bd869245
HK
1526 if (!netif_device_present(dev)) {
1527 /* may be detached because parent is runtime-suspended */
1528 if (dev->dev.parent)
1529 pm_runtime_resume(dev->dev.parent);
1530 if (!netif_device_present(dev))
1531 return -ENODEV;
1532 }
1da177e4 1533
ca99ca14
NH
1534 /* Block netpoll from trying to do any rx path servicing.
1535 * If we don't do this there is a chance ndo_poll_controller
1536 * or ndo_poll may be running while we open the device
1537 */
66b5552f 1538 netpoll_poll_disable(dev);
ca99ca14 1539
40c900aa 1540 ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack);
3b8bcfd5
JB
1541 ret = notifier_to_errno(ret);
1542 if (ret)
1543 return ret;
1544
1da177e4 1545 set_bit(__LINK_STATE_START, &dev->state);
bada339b 1546
d314774c
SH
1547 if (ops->ndo_validate_addr)
1548 ret = ops->ndo_validate_addr(dev);
bada339b 1549
d314774c
SH
1550 if (!ret && ops->ndo_open)
1551 ret = ops->ndo_open(dev);
1da177e4 1552
66b5552f 1553 netpoll_poll_enable(dev);
ca99ca14 1554
bada339b
JG
1555 if (ret)
1556 clear_bit(__LINK_STATE_START, &dev->state);
1557 else {
1da177e4 1558 dev->flags |= IFF_UP;
4417da66 1559 dev_set_rx_mode(dev);
1da177e4 1560 dev_activate(dev);
7bf23575 1561 add_device_randomness(dev->dev_addr, dev->addr_len);
1da177e4 1562 }
bada339b 1563
1da177e4
LT
1564 return ret;
1565}
1566
1567/**
bd380811 1568 * dev_open - prepare an interface for use.
00f54e68
PM
1569 * @dev: device to open
1570 * @extack: netlink extended ack
1da177e4 1571 *
bd380811
PM
1572 * Takes a device from down to up state. The device's private open
1573 * function is invoked and then the multicast lists are loaded. Finally
1574 * the device is moved into the up state and a %NETDEV_UP message is
1575 * sent to the netdev notifier chain.
1576 *
1577 * Calling this function on an active interface is a nop. On a failure
1578 * a negative errno code is returned.
1da177e4 1579 */
00f54e68 1580int dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
bd380811
PM
1581{
1582 int ret;
1583
bd380811
PM
1584 if (dev->flags & IFF_UP)
1585 return 0;
1586
40c900aa 1587 ret = __dev_open(dev, extack);
bd380811
PM
1588 if (ret < 0)
1589 return ret;
1590
7f294054 1591 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
bd380811
PM
1592 call_netdevice_notifiers(NETDEV_UP, dev);
1593
1594 return ret;
1595}
1596EXPORT_SYMBOL(dev_open);
1597
7051b88a 1598static void __dev_close_many(struct list_head *head)
1da177e4 1599{
44345724 1600 struct net_device *dev;
e46b66bc 1601
bd380811 1602 ASSERT_RTNL();
9d5010db
DM
1603 might_sleep();
1604
5cde2829 1605 list_for_each_entry(dev, head, close_list) {
3f4df206 1606 /* Temporarily disable netpoll until the interface is down */
66b5552f 1607 netpoll_poll_disable(dev);
3f4df206 1608
44345724 1609 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1da177e4 1610
44345724 1611 clear_bit(__LINK_STATE_START, &dev->state);
1da177e4 1612
44345724
OP
1613 /* Synchronize to scheduled poll. We cannot touch poll list, it
1614 * can be even on different cpu. So just clear netif_running().
1615 *
1616 * dev->stop() will invoke napi_disable() on all of it's
1617 * napi_struct instances on this device.
1618 */
4e857c58 1619 smp_mb__after_atomic(); /* Commit netif_running(). */
44345724 1620 }
1da177e4 1621
44345724 1622 dev_deactivate_many(head);
d8b2a4d2 1623
5cde2829 1624 list_for_each_entry(dev, head, close_list) {
44345724 1625 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4 1626
44345724
OP
1627 /*
1628 * Call the device specific close. This cannot fail.
1629 * Only if device is UP
1630 *
1631 * We allow it to be called even after a DETACH hot-plug
1632 * event.
1633 */
1634 if (ops->ndo_stop)
1635 ops->ndo_stop(dev);
1636
44345724 1637 dev->flags &= ~IFF_UP;
66b5552f 1638 netpoll_poll_enable(dev);
44345724 1639 }
44345724
OP
1640}
1641
7051b88a 1642static void __dev_close(struct net_device *dev)
44345724
OP
1643{
1644 LIST_HEAD(single);
1645
5cde2829 1646 list_add(&dev->close_list, &single);
7051b88a 1647 __dev_close_many(&single);
f87e6f47 1648 list_del(&single);
44345724
OP
1649}
1650
7051b88a 1651void dev_close_many(struct list_head *head, bool unlink)
44345724
OP
1652{
1653 struct net_device *dev, *tmp;
1da177e4 1654
5cde2829
EB
1655 /* Remove the devices that don't need to be closed */
1656 list_for_each_entry_safe(dev, tmp, head, close_list)
44345724 1657 if (!(dev->flags & IFF_UP))
5cde2829 1658 list_del_init(&dev->close_list);
44345724
OP
1659
1660 __dev_close_many(head);
1da177e4 1661
5cde2829 1662 list_for_each_entry_safe(dev, tmp, head, close_list) {
7f294054 1663 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
44345724 1664 call_netdevice_notifiers(NETDEV_DOWN, dev);
99c4a26a
DM
1665 if (unlink)
1666 list_del_init(&dev->close_list);
44345724 1667 }
bd380811 1668}
99c4a26a 1669EXPORT_SYMBOL(dev_close_many);
bd380811
PM
1670
1671/**
1672 * dev_close - shutdown an interface.
1673 * @dev: device to shutdown
1674 *
1675 * This function moves an active device into down state. A
1676 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1677 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1678 * chain.
1679 */
7051b88a 1680void dev_close(struct net_device *dev)
bd380811 1681{
e14a5993
ED
1682 if (dev->flags & IFF_UP) {
1683 LIST_HEAD(single);
1da177e4 1684
5cde2829 1685 list_add(&dev->close_list, &single);
99c4a26a 1686 dev_close_many(&single, true);
e14a5993
ED
1687 list_del(&single);
1688 }
1da177e4 1689}
d1b19dff 1690EXPORT_SYMBOL(dev_close);
1da177e4
LT
1691
1692
0187bdfb
BH
1693/**
1694 * dev_disable_lro - disable Large Receive Offload on a device
1695 * @dev: device
1696 *
1697 * Disable Large Receive Offload (LRO) on a net device. Must be
1698 * called under RTNL. This is needed if received packets may be
1699 * forwarded to another interface.
1700 */
1701void dev_disable_lro(struct net_device *dev)
1702{
fbe168ba
MK
1703 struct net_device *lower_dev;
1704 struct list_head *iter;
529d0489 1705
bc5787c6
MM
1706 dev->wanted_features &= ~NETIF_F_LRO;
1707 netdev_update_features(dev);
27660515 1708
22d5969f
MM
1709 if (unlikely(dev->features & NETIF_F_LRO))
1710 netdev_WARN(dev, "failed to disable LRO!\n");
fbe168ba
MK
1711
1712 netdev_for_each_lower_dev(dev, lower_dev, iter)
1713 dev_disable_lro(lower_dev);
0187bdfb
BH
1714}
1715EXPORT_SYMBOL(dev_disable_lro);
1716
56f5aa77
MC
1717/**
1718 * dev_disable_gro_hw - disable HW Generic Receive Offload on a device
1719 * @dev: device
1720 *
1721 * Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be
1722 * called under RTNL. This is needed if Generic XDP is installed on
1723 * the device.
1724 */
1725static void dev_disable_gro_hw(struct net_device *dev)
1726{
1727 dev->wanted_features &= ~NETIF_F_GRO_HW;
1728 netdev_update_features(dev);
1729
1730 if (unlikely(dev->features & NETIF_F_GRO_HW))
1731 netdev_WARN(dev, "failed to disable GRO_HW!\n");
1732}
1733
ede2762d
KT
1734const char *netdev_cmd_to_name(enum netdev_cmd cmd)
1735{
1736#define N(val) \
1737 case NETDEV_##val: \
1738 return "NETDEV_" __stringify(val);
1739 switch (cmd) {
1740 N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER)
1741 N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE)
1742 N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE)
1743 N(POST_INIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) N(CHANGEUPPER)
1744 N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) N(BONDING_INFO)
1745 N(PRECHANGEUPPER) N(CHANGELOWERSTATE) N(UDP_TUNNEL_PUSH_INFO)
1746 N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
9daae9bd
GP
1747 N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO)
1748 N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
1570415f 1749 N(PRE_CHANGEADDR)
3f5ecd8a 1750 }
ede2762d
KT
1751#undef N
1752 return "UNKNOWN_NETDEV_EVENT";
1753}
1754EXPORT_SYMBOL_GPL(netdev_cmd_to_name);
1755
351638e7
JP
1756static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1757 struct net_device *dev)
1758{
51d0c047
DA
1759 struct netdev_notifier_info info = {
1760 .dev = dev,
1761 };
351638e7 1762
351638e7
JP
1763 return nb->notifier_call(nb, val, &info);
1764}
0187bdfb 1765
afa0df59
JP
1766static int call_netdevice_register_notifiers(struct notifier_block *nb,
1767 struct net_device *dev)
1768{
1769 int err;
1770
1771 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1772 err = notifier_to_errno(err);
1773 if (err)
1774 return err;
1775
1776 if (!(dev->flags & IFF_UP))
1777 return 0;
1778
1779 call_netdevice_notifier(nb, NETDEV_UP, dev);
1780 return 0;
1781}
1782
1783static void call_netdevice_unregister_notifiers(struct notifier_block *nb,
1784 struct net_device *dev)
1785{
1786 if (dev->flags & IFF_UP) {
1787 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1788 dev);
1789 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1790 }
1791 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1792}
1793
1794static int call_netdevice_register_net_notifiers(struct notifier_block *nb,
1795 struct net *net)
1796{
1797 struct net_device *dev;
1798 int err;
1799
1800 for_each_netdev(net, dev) {
1801 err = call_netdevice_register_notifiers(nb, dev);
1802 if (err)
1803 goto rollback;
1804 }
1805 return 0;
1806
1807rollback:
1808 for_each_netdev_continue_reverse(net, dev)
1809 call_netdevice_unregister_notifiers(nb, dev);
1810 return err;
1811}
1812
1813static void call_netdevice_unregister_net_notifiers(struct notifier_block *nb,
1814 struct net *net)
1815{
1816 struct net_device *dev;
1817
1818 for_each_netdev(net, dev)
1819 call_netdevice_unregister_notifiers(nb, dev);
1820}
1821
881d966b
EB
1822static int dev_boot_phase = 1;
1823
1da177e4 1824/**
722c9a0c 1825 * register_netdevice_notifier - register a network notifier block
1826 * @nb: notifier
1da177e4 1827 *
722c9a0c 1828 * Register a notifier to be called when network device events occur.
1829 * The notifier passed is linked into the kernel structures and must
1830 * not be reused until it has been unregistered. A negative errno code
1831 * is returned on a failure.
1da177e4 1832 *
722c9a0c 1833 * When registered all registration and up events are replayed
1834 * to the new notifier to allow device to have a race free
1835 * view of the network device list.
1da177e4
LT
1836 */
1837
1838int register_netdevice_notifier(struct notifier_block *nb)
1839{
881d966b 1840 struct net *net;
1da177e4
LT
1841 int err;
1842
328fbe74
KT
1843 /* Close race with setup_net() and cleanup_net() */
1844 down_write(&pernet_ops_rwsem);
1da177e4 1845 rtnl_lock();
f07d5b94 1846 err = raw_notifier_chain_register(&netdev_chain, nb);
fcc5a03a
HX
1847 if (err)
1848 goto unlock;
881d966b
EB
1849 if (dev_boot_phase)
1850 goto unlock;
1851 for_each_net(net) {
afa0df59
JP
1852 err = call_netdevice_register_net_notifiers(nb, net);
1853 if (err)
1854 goto rollback;
1da177e4 1855 }
fcc5a03a
HX
1856
1857unlock:
1da177e4 1858 rtnl_unlock();
328fbe74 1859 up_write(&pernet_ops_rwsem);
1da177e4 1860 return err;
fcc5a03a
HX
1861
1862rollback:
afa0df59
JP
1863 for_each_net_continue_reverse(net)
1864 call_netdevice_unregister_net_notifiers(nb, net);
c67625a1
PE
1865
1866 raw_notifier_chain_unregister(&netdev_chain, nb);
fcc5a03a 1867 goto unlock;
1da177e4 1868}
d1b19dff 1869EXPORT_SYMBOL(register_netdevice_notifier);
1da177e4
LT
1870
1871/**
722c9a0c 1872 * unregister_netdevice_notifier - unregister a network notifier block
1873 * @nb: notifier
1da177e4 1874 *
722c9a0c 1875 * Unregister a notifier previously registered by
1876 * register_netdevice_notifier(). The notifier is unlinked into the
1877 * kernel structures and may then be reused. A negative errno code
1878 * is returned on a failure.
7d3d43da 1879 *
722c9a0c 1880 * After unregistering unregister and down device events are synthesized
1881 * for all devices on the device list to the removed notifier to remove
1882 * the need for special case cleanup code.
1da177e4
LT
1883 */
1884
1885int unregister_netdevice_notifier(struct notifier_block *nb)
1886{
7d3d43da 1887 struct net *net;
9f514950
HX
1888 int err;
1889
328fbe74
KT
1890 /* Close race with setup_net() and cleanup_net() */
1891 down_write(&pernet_ops_rwsem);
9f514950 1892 rtnl_lock();
f07d5b94 1893 err = raw_notifier_chain_unregister(&netdev_chain, nb);
7d3d43da
EB
1894 if (err)
1895 goto unlock;
1896
48b3a137
JP
1897 for_each_net(net)
1898 call_netdevice_unregister_net_notifiers(nb, net);
1899
7d3d43da 1900unlock:
9f514950 1901 rtnl_unlock();
328fbe74 1902 up_write(&pernet_ops_rwsem);
9f514950 1903 return err;
1da177e4 1904}
d1b19dff 1905EXPORT_SYMBOL(unregister_netdevice_notifier);
1da177e4 1906
1f637703
JP
1907static int __register_netdevice_notifier_net(struct net *net,
1908 struct notifier_block *nb,
1909 bool ignore_call_fail)
1910{
1911 int err;
1912
1913 err = raw_notifier_chain_register(&net->netdev_chain, nb);
1914 if (err)
1915 return err;
1916 if (dev_boot_phase)
1917 return 0;
1918
1919 err = call_netdevice_register_net_notifiers(nb, net);
1920 if (err && !ignore_call_fail)
1921 goto chain_unregister;
1922
1923 return 0;
1924
1925chain_unregister:
1926 raw_notifier_chain_unregister(&net->netdev_chain, nb);
1927 return err;
1928}
1929
1930static int __unregister_netdevice_notifier_net(struct net *net,
1931 struct notifier_block *nb)
1932{
1933 int err;
1934
1935 err = raw_notifier_chain_unregister(&net->netdev_chain, nb);
1936 if (err)
1937 return err;
1938
1939 call_netdevice_unregister_net_notifiers(nb, net);
1940 return 0;
1941}
1942
a30c7b42
JP
1943/**
1944 * register_netdevice_notifier_net - register a per-netns network notifier block
1945 * @net: network namespace
1946 * @nb: notifier
1947 *
1948 * Register a notifier to be called when network device events occur.
1949 * The notifier passed is linked into the kernel structures and must
1950 * not be reused until it has been unregistered. A negative errno code
1951 * is returned on a failure.
1952 *
1953 * When registered all registration and up events are replayed
1954 * to the new notifier to allow device to have a race free
1955 * view of the network device list.
1956 */
1957
1958int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb)
1959{
1960 int err;
1961
1962 rtnl_lock();
1f637703 1963 err = __register_netdevice_notifier_net(net, nb, false);
a30c7b42
JP
1964 rtnl_unlock();
1965 return err;
a30c7b42
JP
1966}
1967EXPORT_SYMBOL(register_netdevice_notifier_net);
1968
1969/**
1970 * unregister_netdevice_notifier_net - unregister a per-netns
1971 * network notifier block
1972 * @net: network namespace
1973 * @nb: notifier
1974 *
1975 * Unregister a notifier previously registered by
1976 * register_netdevice_notifier(). The notifier is unlinked into the
1977 * kernel structures and may then be reused. A negative errno code
1978 * is returned on a failure.
1979 *
1980 * After unregistering unregister and down device events are synthesized
1981 * for all devices on the device list to the removed notifier to remove
1982 * the need for special case cleanup code.
1983 */
1984
1985int unregister_netdevice_notifier_net(struct net *net,
1986 struct notifier_block *nb)
1987{
1988 int err;
1989
1990 rtnl_lock();
1f637703 1991 err = __unregister_netdevice_notifier_net(net, nb);
a30c7b42
JP
1992 rtnl_unlock();
1993 return err;
1994}
1995EXPORT_SYMBOL(unregister_netdevice_notifier_net);
a30c7b42 1996
93642e14
JP
1997int register_netdevice_notifier_dev_net(struct net_device *dev,
1998 struct notifier_block *nb,
1999 struct netdev_net_notifier *nn)
2000{
2001 int err;
a30c7b42 2002
93642e14
JP
2003 rtnl_lock();
2004 err = __register_netdevice_notifier_net(dev_net(dev), nb, false);
2005 if (!err) {
2006 nn->nb = nb;
2007 list_add(&nn->list, &dev->net_notifier_list);
2008 }
a30c7b42
JP
2009 rtnl_unlock();
2010 return err;
2011}
93642e14
JP
2012EXPORT_SYMBOL(register_netdevice_notifier_dev_net);
2013
2014int unregister_netdevice_notifier_dev_net(struct net_device *dev,
2015 struct notifier_block *nb,
2016 struct netdev_net_notifier *nn)
2017{
2018 int err;
2019
2020 rtnl_lock();
2021 list_del(&nn->list);
2022 err = __unregister_netdevice_notifier_net(dev_net(dev), nb);
2023 rtnl_unlock();
2024 return err;
2025}
2026EXPORT_SYMBOL(unregister_netdevice_notifier_dev_net);
2027
2028static void move_netdevice_notifiers_dev_net(struct net_device *dev,
2029 struct net *net)
2030{
2031 struct netdev_net_notifier *nn;
2032
2033 list_for_each_entry(nn, &dev->net_notifier_list, list) {
2034 __unregister_netdevice_notifier_net(dev_net(dev), nn->nb);
2035 __register_netdevice_notifier_net(net, nn->nb, true);
2036 }
2037}
a30c7b42 2038
351638e7
JP
2039/**
2040 * call_netdevice_notifiers_info - call all network notifier blocks
2041 * @val: value passed unmodified to notifier function
351638e7
JP
2042 * @info: notifier information data
2043 *
2044 * Call all network notifier blocks. Parameters and return value
2045 * are as for raw_notifier_call_chain().
2046 */
2047
1d143d9f 2048static int call_netdevice_notifiers_info(unsigned long val,
1d143d9f 2049 struct netdev_notifier_info *info)
351638e7 2050{
a30c7b42
JP
2051 struct net *net = dev_net(info->dev);
2052 int ret;
2053
351638e7 2054 ASSERT_RTNL();
a30c7b42
JP
2055
2056 /* Run per-netns notifier block chain first, then run the global one.
2057 * Hopefully, one day, the global one is going to be removed after
2058 * all notifier block registrators get converted to be per-netns.
2059 */
2060 ret = raw_notifier_call_chain(&net->netdev_chain, val, info);
2061 if (ret & NOTIFY_STOP_MASK)
2062 return ret;
351638e7
JP
2063 return raw_notifier_call_chain(&netdev_chain, val, info);
2064}
351638e7 2065
26372605
PM
2066static int call_netdevice_notifiers_extack(unsigned long val,
2067 struct net_device *dev,
2068 struct netlink_ext_ack *extack)
2069{
2070 struct netdev_notifier_info info = {
2071 .dev = dev,
2072 .extack = extack,
2073 };
2074
2075 return call_netdevice_notifiers_info(val, &info);
2076}
2077
1da177e4
LT
2078/**
2079 * call_netdevice_notifiers - call all network notifier blocks
2080 * @val: value passed unmodified to notifier function
c4ea43c5 2081 * @dev: net_device pointer passed unmodified to notifier function
1da177e4
LT
2082 *
2083 * Call all network notifier blocks. Parameters and return value
f07d5b94 2084 * are as for raw_notifier_call_chain().
1da177e4
LT
2085 */
2086
ad7379d4 2087int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1da177e4 2088{
26372605 2089 return call_netdevice_notifiers_extack(val, dev, NULL);
1da177e4 2090}
edf947f1 2091EXPORT_SYMBOL(call_netdevice_notifiers);
1da177e4 2092
af7d6cce
SD
2093/**
2094 * call_netdevice_notifiers_mtu - call all network notifier blocks
2095 * @val: value passed unmodified to notifier function
2096 * @dev: net_device pointer passed unmodified to notifier function
2097 * @arg: additional u32 argument passed to the notifier function
2098 *
2099 * Call all network notifier blocks. Parameters and return value
2100 * are as for raw_notifier_call_chain().
2101 */
2102static int call_netdevice_notifiers_mtu(unsigned long val,
2103 struct net_device *dev, u32 arg)
2104{
2105 struct netdev_notifier_info_ext info = {
2106 .info.dev = dev,
2107 .ext.mtu = arg,
2108 };
2109
2110 BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
2111
2112 return call_netdevice_notifiers_info(val, &info.info);
2113}
2114
1cf51900 2115#ifdef CONFIG_NET_INGRESS
aabf6772 2116static DEFINE_STATIC_KEY_FALSE(ingress_needed_key);
4577139b
DB
2117
2118void net_inc_ingress_queue(void)
2119{
aabf6772 2120 static_branch_inc(&ingress_needed_key);
4577139b
DB
2121}
2122EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
2123
2124void net_dec_ingress_queue(void)
2125{
aabf6772 2126 static_branch_dec(&ingress_needed_key);
4577139b
DB
2127}
2128EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
2129#endif
2130
1f211a1b 2131#ifdef CONFIG_NET_EGRESS
aabf6772 2132static DEFINE_STATIC_KEY_FALSE(egress_needed_key);
1f211a1b
DB
2133
2134void net_inc_egress_queue(void)
2135{
aabf6772 2136 static_branch_inc(&egress_needed_key);
1f211a1b
DB
2137}
2138EXPORT_SYMBOL_GPL(net_inc_egress_queue);
2139
2140void net_dec_egress_queue(void)
2141{
aabf6772 2142 static_branch_dec(&egress_needed_key);
1f211a1b
DB
2143}
2144EXPORT_SYMBOL_GPL(net_dec_egress_queue);
2145#endif
2146
39e83922 2147static DEFINE_STATIC_KEY_FALSE(netstamp_needed_key);
e9666d10 2148#ifdef CONFIG_JUMP_LABEL
b90e5794 2149static atomic_t netstamp_needed_deferred;
13baa00a 2150static atomic_t netstamp_wanted;
5fa8bbda 2151static void netstamp_clear(struct work_struct *work)
1da177e4 2152{
b90e5794 2153 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
13baa00a 2154 int wanted;
b90e5794 2155
13baa00a
ED
2156 wanted = atomic_add_return(deferred, &netstamp_wanted);
2157 if (wanted > 0)
39e83922 2158 static_branch_enable(&netstamp_needed_key);
13baa00a 2159 else
39e83922 2160 static_branch_disable(&netstamp_needed_key);
5fa8bbda
ED
2161}
2162static DECLARE_WORK(netstamp_work, netstamp_clear);
b90e5794 2163#endif
5fa8bbda
ED
2164
2165void net_enable_timestamp(void)
2166{
e9666d10 2167#ifdef CONFIG_JUMP_LABEL
13baa00a
ED
2168 int wanted;
2169
2170 while (1) {
2171 wanted = atomic_read(&netstamp_wanted);
2172 if (wanted <= 0)
2173 break;
2174 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
2175 return;
2176 }
2177 atomic_inc(&netstamp_needed_deferred);
2178 schedule_work(&netstamp_work);
2179#else
39e83922 2180 static_branch_inc(&netstamp_needed_key);
13baa00a 2181#endif
1da177e4 2182}
d1b19dff 2183EXPORT_SYMBOL(net_enable_timestamp);
1da177e4
LT
2184
2185void net_disable_timestamp(void)
2186{
e9666d10 2187#ifdef CONFIG_JUMP_LABEL
13baa00a
ED
2188 int wanted;
2189
2190 while (1) {
2191 wanted = atomic_read(&netstamp_wanted);
2192 if (wanted <= 1)
2193 break;
2194 if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
2195 return;
2196 }
2197 atomic_dec(&netstamp_needed_deferred);
5fa8bbda
ED
2198 schedule_work(&netstamp_work);
2199#else
39e83922 2200 static_branch_dec(&netstamp_needed_key);
5fa8bbda 2201#endif
1da177e4 2202}
d1b19dff 2203EXPORT_SYMBOL(net_disable_timestamp);
1da177e4 2204
3b098e2d 2205static inline void net_timestamp_set(struct sk_buff *skb)
1da177e4 2206{
2456e855 2207 skb->tstamp = 0;
39e83922 2208 if (static_branch_unlikely(&netstamp_needed_key))
a61bbcf2 2209 __net_timestamp(skb);
1da177e4
LT
2210}
2211
39e83922
DB
2212#define net_timestamp_check(COND, SKB) \
2213 if (static_branch_unlikely(&netstamp_needed_key)) { \
2214 if ((COND) && !(SKB)->tstamp) \
2215 __net_timestamp(SKB); \
2216 } \
3b098e2d 2217
f4b05d27 2218bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb)
79b569f0 2219{
5f7d5728 2220 return __is_skb_forwardable(dev, skb, true);
79b569f0 2221}
1ee481fb 2222EXPORT_SYMBOL_GPL(is_skb_forwardable);
79b569f0 2223
5f7d5728
JDB
2224static int __dev_forward_skb2(struct net_device *dev, struct sk_buff *skb,
2225 bool check_mtu)
a0265d28 2226{
5f7d5728 2227 int ret = ____dev_forward_skb(dev, skb, check_mtu);
a0265d28 2228
4e3264d2
MKL
2229 if (likely(!ret)) {
2230 skb->protocol = eth_type_trans(skb, dev);
2231 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
2232 }
a0265d28 2233
4e3264d2 2234 return ret;
a0265d28 2235}
5f7d5728
JDB
2236
2237int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2238{
2239 return __dev_forward_skb2(dev, skb, true);
2240}
a0265d28
HX
2241EXPORT_SYMBOL_GPL(__dev_forward_skb);
2242
44540960
AB
2243/**
2244 * dev_forward_skb - loopback an skb to another netif
2245 *
2246 * @dev: destination network device
2247 * @skb: buffer to forward
2248 *
2249 * return values:
2250 * NET_RX_SUCCESS (no congestion)
6ec82562 2251 * NET_RX_DROP (packet was dropped, but freed)
44540960
AB
2252 *
2253 * dev_forward_skb can be used for injecting an skb from the
2254 * start_xmit function of one device into the receive queue
2255 * of another device.
2256 *
2257 * The receiving device may be in another namespace, so
2258 * we have to clear all information in the skb that could
2259 * impact namespace isolation.
2260 */
2261int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
2262{
a0265d28 2263 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
44540960
AB
2264}
2265EXPORT_SYMBOL_GPL(dev_forward_skb);
2266
5f7d5728
JDB
2267int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb)
2268{
2269 return __dev_forward_skb2(dev, skb, false) ?: netif_rx_internal(skb);
2270}
2271
71d9dec2
CG
2272static inline int deliver_skb(struct sk_buff *skb,
2273 struct packet_type *pt_prev,
2274 struct net_device *orig_dev)
2275{
1f8b977a 2276 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
1080e512 2277 return -ENOMEM;
63354797 2278 refcount_inc(&skb->users);
71d9dec2
CG
2279 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2280}
2281
7866a621
SN
2282static inline void deliver_ptype_list_skb(struct sk_buff *skb,
2283 struct packet_type **pt,
fbcb2170
JP
2284 struct net_device *orig_dev,
2285 __be16 type,
7866a621
SN
2286 struct list_head *ptype_list)
2287{
2288 struct packet_type *ptype, *pt_prev = *pt;
2289
2290 list_for_each_entry_rcu(ptype, ptype_list, list) {
2291 if (ptype->type != type)
2292 continue;
2293 if (pt_prev)
fbcb2170 2294 deliver_skb(skb, pt_prev, orig_dev);
7866a621
SN
2295 pt_prev = ptype;
2296 }
2297 *pt = pt_prev;
2298}
2299
c0de08d0
EL
2300static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
2301{
a3d744e9 2302 if (!ptype->af_packet_priv || !skb->sk)
c0de08d0
EL
2303 return false;
2304
2305 if (ptype->id_match)
2306 return ptype->id_match(ptype, skb->sk);
2307 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
2308 return true;
2309
2310 return false;
2311}
2312
9f9a742d
MR
2313/**
2314 * dev_nit_active - return true if any network interface taps are in use
2315 *
2316 * @dev: network device to check for the presence of taps
2317 */
2318bool dev_nit_active(struct net_device *dev)
2319{
2320 return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all);
2321}
2322EXPORT_SYMBOL_GPL(dev_nit_active);
2323
1da177e4
LT
2324/*
2325 * Support routine. Sends outgoing frames to any network
2326 * taps currently in use.
2327 */
2328
74b20582 2329void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
2330{
2331 struct packet_type *ptype;
71d9dec2
CG
2332 struct sk_buff *skb2 = NULL;
2333 struct packet_type *pt_prev = NULL;
7866a621 2334 struct list_head *ptype_list = &ptype_all;
a61bbcf2 2335
1da177e4 2336 rcu_read_lock();
7866a621
SN
2337again:
2338 list_for_each_entry_rcu(ptype, ptype_list, list) {
fa788d98
VW
2339 if (ptype->ignore_outgoing)
2340 continue;
2341
1da177e4
LT
2342 /* Never send packets back to the socket
2343 * they originated from - MvS (miquels@drinkel.ow.org)
2344 */
7866a621
SN
2345 if (skb_loop_sk(ptype, skb))
2346 continue;
71d9dec2 2347
7866a621
SN
2348 if (pt_prev) {
2349 deliver_skb(skb2, pt_prev, skb->dev);
2350 pt_prev = ptype;
2351 continue;
2352 }
1da177e4 2353
7866a621
SN
2354 /* need to clone skb, done only once */
2355 skb2 = skb_clone(skb, GFP_ATOMIC);
2356 if (!skb2)
2357 goto out_unlock;
70978182 2358
7866a621 2359 net_timestamp_set(skb2);
1da177e4 2360
7866a621
SN
2361 /* skb->nh should be correctly
2362 * set by sender, so that the second statement is
2363 * just protection against buggy protocols.
2364 */
2365 skb_reset_mac_header(skb2);
2366
2367 if (skb_network_header(skb2) < skb2->data ||
2368 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
2369 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
2370 ntohs(skb2->protocol),
2371 dev->name);
2372 skb_reset_network_header(skb2);
1da177e4 2373 }
7866a621
SN
2374
2375 skb2->transport_header = skb2->network_header;
2376 skb2->pkt_type = PACKET_OUTGOING;
2377 pt_prev = ptype;
2378 }
2379
2380 if (ptype_list == &ptype_all) {
2381 ptype_list = &dev->ptype_all;
2382 goto again;
1da177e4 2383 }
7866a621 2384out_unlock:
581fe0ea
WB
2385 if (pt_prev) {
2386 if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC))
2387 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
2388 else
2389 kfree_skb(skb2);
2390 }
1da177e4
LT
2391 rcu_read_unlock();
2392}
74b20582 2393EXPORT_SYMBOL_GPL(dev_queue_xmit_nit);
1da177e4 2394
2c53040f
BH
2395/**
2396 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
4f57c087
JF
2397 * @dev: Network device
2398 * @txq: number of queues available
2399 *
2400 * If real_num_tx_queues is changed the tc mappings may no longer be
2401 * valid. To resolve this verify the tc mapping remains valid and if
2402 * not NULL the mapping. With no priorities mapping to this
2403 * offset/count pair it will no longer be used. In the worst case TC0
2404 * is invalid nothing can be done so disable priority mappings. If is
2405 * expected that drivers will fix this mapping if they can before
2406 * calling netif_set_real_num_tx_queues.
2407 */
bb134d22 2408static void netif_setup_tc(struct net_device *dev, unsigned int txq)
4f57c087
JF
2409{
2410 int i;
2411 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2412
2413 /* If TC0 is invalidated disable TC mapping */
2414 if (tc->offset + tc->count > txq) {
7b6cd1ce 2415 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
4f57c087
JF
2416 dev->num_tc = 0;
2417 return;
2418 }
2419
2420 /* Invalidated prio to tc mappings set to TC0 */
2421 for (i = 1; i < TC_BITMASK + 1; i++) {
2422 int q = netdev_get_prio_tc_map(dev, i);
2423
2424 tc = &dev->tc_to_txq[q];
2425 if (tc->offset + tc->count > txq) {
7b6cd1ce
JP
2426 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
2427 i, q);
4f57c087
JF
2428 netdev_set_prio_tc_map(dev, i, 0);
2429 }
2430 }
2431}
2432
8d059b0f
AD
2433int netdev_txq_to_tc(struct net_device *dev, unsigned int txq)
2434{
2435 if (dev->num_tc) {
2436 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
2437 int i;
2438
ffcfe25b 2439 /* walk through the TCs and see if it falls into any of them */
8d059b0f
AD
2440 for (i = 0; i < TC_MAX_QUEUE; i++, tc++) {
2441 if ((txq - tc->offset) < tc->count)
2442 return i;
2443 }
2444
ffcfe25b 2445 /* didn't find it, just return -1 to indicate no match */
8d059b0f
AD
2446 return -1;
2447 }
2448
2449 return 0;
2450}
8a5f2166 2451EXPORT_SYMBOL(netdev_txq_to_tc);
8d059b0f 2452
537c00de 2453#ifdef CONFIG_XPS
04157469
AN
2454struct static_key xps_needed __read_mostly;
2455EXPORT_SYMBOL(xps_needed);
2456struct static_key xps_rxqs_needed __read_mostly;
2457EXPORT_SYMBOL(xps_rxqs_needed);
537c00de
AD
2458static DEFINE_MUTEX(xps_map_mutex);
2459#define xmap_dereference(P) \
2460 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
2461
6234f874
AD
2462static bool remove_xps_queue(struct xps_dev_maps *dev_maps,
2463 int tci, u16 index)
537c00de 2464{
10cdc3f3
AD
2465 struct xps_map *map = NULL;
2466 int pos;
537c00de 2467
10cdc3f3 2468 if (dev_maps)
80d19669 2469 map = xmap_dereference(dev_maps->attr_map[tci]);
6234f874
AD
2470 if (!map)
2471 return false;
537c00de 2472
6234f874
AD
2473 for (pos = map->len; pos--;) {
2474 if (map->queues[pos] != index)
2475 continue;
2476
2477 if (map->len > 1) {
2478 map->queues[pos] = map->queues[--map->len];
10cdc3f3 2479 break;
537c00de 2480 }
6234f874 2481
80d19669 2482 RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL);
6234f874
AD
2483 kfree_rcu(map, rcu);
2484 return false;
537c00de
AD
2485 }
2486
6234f874 2487 return true;
10cdc3f3
AD
2488}
2489
6234f874
AD
2490static bool remove_xps_queue_cpu(struct net_device *dev,
2491 struct xps_dev_maps *dev_maps,
2492 int cpu, u16 offset, u16 count)
2493{
184c449f
AD
2494 int num_tc = dev->num_tc ? : 1;
2495 bool active = false;
2496 int tci;
6234f874 2497
184c449f
AD
2498 for (tci = cpu * num_tc; num_tc--; tci++) {
2499 int i, j;
2500
2501 for (i = count, j = offset; i--; j++) {
6358d49a 2502 if (!remove_xps_queue(dev_maps, tci, j))
184c449f
AD
2503 break;
2504 }
2505
2506 active |= i < 0;
6234f874
AD
2507 }
2508
184c449f 2509 return active;
6234f874
AD
2510}
2511
867d0ad4
SD
2512static void reset_xps_maps(struct net_device *dev,
2513 struct xps_dev_maps *dev_maps,
2514 bool is_rxqs_map)
2515{
2516 if (is_rxqs_map) {
2517 static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
2518 RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
2519 } else {
2520 RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
2521 }
2522 static_key_slow_dec_cpuslocked(&xps_needed);
2523 kfree_rcu(dev_maps, rcu);
2524}
2525
80d19669
AN
2526static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
2527 struct xps_dev_maps *dev_maps, unsigned int nr_ids,
2528 u16 offset, u16 count, bool is_rxqs_map)
2529{
2530 bool active = false;
2531 int i, j;
2532
2533 for (j = -1; j = netif_attrmask_next(j, mask, nr_ids),
2534 j < nr_ids;)
2535 active |= remove_xps_queue_cpu(dev, dev_maps, j, offset,
2536 count);
867d0ad4
SD
2537 if (!active)
2538 reset_xps_maps(dev, dev_maps, is_rxqs_map);
80d19669 2539
f28c020f
SD
2540 if (!is_rxqs_map) {
2541 for (i = offset + (count - 1); count--; i--) {
2542 netdev_queue_numa_node_write(
2543 netdev_get_tx_queue(dev, i),
2544 NUMA_NO_NODE);
80d19669 2545 }
80d19669
AN
2546 }
2547}
2548
6234f874
AD
2549static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
2550 u16 count)
10cdc3f3 2551{
80d19669 2552 const unsigned long *possible_mask = NULL;
10cdc3f3 2553 struct xps_dev_maps *dev_maps;
80d19669 2554 unsigned int nr_ids;
10cdc3f3 2555
04157469
AN
2556 if (!static_key_false(&xps_needed))
2557 return;
10cdc3f3 2558
4d99f660 2559 cpus_read_lock();
04157469 2560 mutex_lock(&xps_map_mutex);
10cdc3f3 2561
04157469
AN
2562 if (static_key_false(&xps_rxqs_needed)) {
2563 dev_maps = xmap_dereference(dev->xps_rxqs_map);
2564 if (dev_maps) {
2565 nr_ids = dev->num_rx_queues;
2566 clean_xps_maps(dev, possible_mask, dev_maps, nr_ids,
2567 offset, count, true);
2568 }
537c00de
AD
2569 }
2570
80d19669
AN
2571 dev_maps = xmap_dereference(dev->xps_cpus_map);
2572 if (!dev_maps)
2573 goto out_no_maps;
2574
2575 if (num_possible_cpus() > 1)
2576 possible_mask = cpumask_bits(cpu_possible_mask);
2577 nr_ids = nr_cpu_ids;
2578 clean_xps_maps(dev, possible_mask, dev_maps, nr_ids, offset, count,
2579 false);
024e9679 2580
537c00de
AD
2581out_no_maps:
2582 mutex_unlock(&xps_map_mutex);
4d99f660 2583 cpus_read_unlock();
537c00de
AD
2584}
2585
6234f874
AD
2586static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
2587{
2588 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index);
2589}
2590
80d19669
AN
2591static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index,
2592 u16 index, bool is_rxqs_map)
01c5f864
AD
2593{
2594 struct xps_map *new_map;
2595 int alloc_len = XPS_MIN_MAP_ALLOC;
2596 int i, pos;
2597
2598 for (pos = 0; map && pos < map->len; pos++) {
2599 if (map->queues[pos] != index)
2600 continue;
2601 return map;
2602 }
2603
80d19669 2604 /* Need to add tx-queue to this CPU's/rx-queue's existing map */
01c5f864
AD
2605 if (map) {
2606 if (pos < map->alloc_len)
2607 return map;
2608
2609 alloc_len = map->alloc_len * 2;
2610 }
2611
80d19669
AN
2612 /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's
2613 * map
2614 */
2615 if (is_rxqs_map)
2616 new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL);
2617 else
2618 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2619 cpu_to_node(attr_index));
01c5f864
AD
2620 if (!new_map)
2621 return NULL;
2622
2623 for (i = 0; i < pos; i++)
2624 new_map->queues[i] = map->queues[i];
2625 new_map->alloc_len = alloc_len;
2626 new_map->len = pos;
2627
2628 return new_map;
2629}
2630
4d99f660 2631/* Must be called under cpus_read_lock */
80d19669
AN
2632int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
2633 u16 index, bool is_rxqs_map)
537c00de 2634{
80d19669 2635 const unsigned long *online_mask = NULL, *possible_mask = NULL;
01c5f864 2636 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
80d19669 2637 int i, j, tci, numa_node_id = -2;
184c449f 2638 int maps_sz, num_tc = 1, tc = 0;
537c00de 2639 struct xps_map *map, *new_map;
01c5f864 2640 bool active = false;
80d19669 2641 unsigned int nr_ids;
537c00de 2642
184c449f 2643 if (dev->num_tc) {
ffcfe25b 2644 /* Do not allow XPS on subordinate device directly */
184c449f 2645 num_tc = dev->num_tc;
ffcfe25b
AD
2646 if (num_tc < 0)
2647 return -EINVAL;
2648
2649 /* If queue belongs to subordinate dev use its map */
2650 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
2651
184c449f
AD
2652 tc = netdev_txq_to_tc(dev, index);
2653 if (tc < 0)
2654 return -EINVAL;
2655 }
2656
537c00de 2657 mutex_lock(&xps_map_mutex);
80d19669
AN
2658 if (is_rxqs_map) {
2659 maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues);
2660 dev_maps = xmap_dereference(dev->xps_rxqs_map);
2661 nr_ids = dev->num_rx_queues;
2662 } else {
2663 maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc);
2664 if (num_possible_cpus() > 1) {
2665 online_mask = cpumask_bits(cpu_online_mask);
2666 possible_mask = cpumask_bits(cpu_possible_mask);
2667 }
2668 dev_maps = xmap_dereference(dev->xps_cpus_map);
2669 nr_ids = nr_cpu_ids;
2670 }
537c00de 2671
80d19669
AN
2672 if (maps_sz < L1_CACHE_BYTES)
2673 maps_sz = L1_CACHE_BYTES;
537c00de 2674
01c5f864 2675 /* allocate memory for queue storage */
80d19669
AN
2676 for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids),
2677 j < nr_ids;) {
01c5f864
AD
2678 if (!new_dev_maps)
2679 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2bb60cb9
AD
2680 if (!new_dev_maps) {
2681 mutex_unlock(&xps_map_mutex);
01c5f864 2682 return -ENOMEM;
2bb60cb9 2683 }
01c5f864 2684
80d19669
AN
2685 tci = j * num_tc + tc;
2686 map = dev_maps ? xmap_dereference(dev_maps->attr_map[tci]) :
01c5f864
AD
2687 NULL;
2688
80d19669 2689 map = expand_xps_map(map, j, index, is_rxqs_map);
01c5f864
AD
2690 if (!map)
2691 goto error;
2692
80d19669 2693 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
01c5f864
AD
2694 }
2695
2696 if (!new_dev_maps)
2697 goto out_no_new_maps;
2698
867d0ad4
SD
2699 if (!dev_maps) {
2700 /* Increment static keys at most once per type */
2701 static_key_slow_inc_cpuslocked(&xps_needed);
2702 if (is_rxqs_map)
2703 static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
2704 }
04157469 2705
80d19669
AN
2706 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2707 j < nr_ids;) {
184c449f 2708 /* copy maps belonging to foreign traffic classes */
80d19669 2709 for (i = tc, tci = j * num_tc; dev_maps && i--; tci++) {
184c449f 2710 /* fill in the new device map from the old device map */
80d19669
AN
2711 map = xmap_dereference(dev_maps->attr_map[tci]);
2712 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
184c449f
AD
2713 }
2714
2715 /* We need to explicitly update tci as prevous loop
2716 * could break out early if dev_maps is NULL.
2717 */
80d19669 2718 tci = j * num_tc + tc;
184c449f 2719
80d19669
AN
2720 if (netif_attr_test_mask(j, mask, nr_ids) &&
2721 netif_attr_test_online(j, online_mask, nr_ids)) {
2722 /* add tx-queue to CPU/rx-queue maps */
01c5f864
AD
2723 int pos = 0;
2724
80d19669 2725 map = xmap_dereference(new_dev_maps->attr_map[tci]);
01c5f864
AD
2726 while ((pos < map->len) && (map->queues[pos] != index))
2727 pos++;
2728
2729 if (pos == map->len)
2730 map->queues[map->len++] = index;
537c00de 2731#ifdef CONFIG_NUMA
80d19669
AN
2732 if (!is_rxqs_map) {
2733 if (numa_node_id == -2)
2734 numa_node_id = cpu_to_node(j);
2735 else if (numa_node_id != cpu_to_node(j))
2736 numa_node_id = -1;
2737 }
537c00de 2738#endif
01c5f864
AD
2739 } else if (dev_maps) {
2740 /* fill in the new device map from the old device map */
80d19669
AN
2741 map = xmap_dereference(dev_maps->attr_map[tci]);
2742 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
537c00de 2743 }
01c5f864 2744
184c449f
AD
2745 /* copy maps belonging to foreign traffic classes */
2746 for (i = num_tc - tc, tci++; dev_maps && --i; tci++) {
2747 /* fill in the new device map from the old device map */
80d19669
AN
2748 map = xmap_dereference(dev_maps->attr_map[tci]);
2749 RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map);
184c449f 2750 }
537c00de
AD
2751 }
2752
80d19669
AN
2753 if (is_rxqs_map)
2754 rcu_assign_pointer(dev->xps_rxqs_map, new_dev_maps);
2755 else
2756 rcu_assign_pointer(dev->xps_cpus_map, new_dev_maps);
01c5f864 2757
537c00de 2758 /* Cleanup old maps */
184c449f
AD
2759 if (!dev_maps)
2760 goto out_no_old_maps;
2761
80d19669
AN
2762 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2763 j < nr_ids;) {
2764 for (i = num_tc, tci = j * num_tc; i--; tci++) {
2765 new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
2766 map = xmap_dereference(dev_maps->attr_map[tci]);
01c5f864
AD
2767 if (map && map != new_map)
2768 kfree_rcu(map, rcu);
2769 }
537c00de
AD
2770 }
2771
184c449f
AD
2772 kfree_rcu(dev_maps, rcu);
2773
2774out_no_old_maps:
01c5f864
AD
2775 dev_maps = new_dev_maps;
2776 active = true;
537c00de 2777
01c5f864 2778out_no_new_maps:
80d19669
AN
2779 if (!is_rxqs_map) {
2780 /* update Tx queue numa node */
2781 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2782 (numa_node_id >= 0) ?
2783 numa_node_id : NUMA_NO_NODE);
2784 }
537c00de 2785
01c5f864
AD
2786 if (!dev_maps)
2787 goto out_no_maps;
2788
80d19669
AN
2789 /* removes tx-queue from unused CPUs/rx-queues */
2790 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2791 j < nr_ids;) {
2792 for (i = tc, tci = j * num_tc; i--; tci++)
184c449f 2793 active |= remove_xps_queue(dev_maps, tci, index);
80d19669
AN
2794 if (!netif_attr_test_mask(j, mask, nr_ids) ||
2795 !netif_attr_test_online(j, online_mask, nr_ids))
184c449f
AD
2796 active |= remove_xps_queue(dev_maps, tci, index);
2797 for (i = num_tc - tc, tci++; --i; tci++)
2798 active |= remove_xps_queue(dev_maps, tci, index);
01c5f864
AD
2799 }
2800
2801 /* free map if not active */
867d0ad4
SD
2802 if (!active)
2803 reset_xps_maps(dev, dev_maps, is_rxqs_map);
01c5f864
AD
2804
2805out_no_maps:
537c00de
AD
2806 mutex_unlock(&xps_map_mutex);
2807
2808 return 0;
2809error:
01c5f864 2810 /* remove any maps that we added */
80d19669
AN
2811 for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
2812 j < nr_ids;) {
2813 for (i = num_tc, tci = j * num_tc; i--; tci++) {
2814 new_map = xmap_dereference(new_dev_maps->attr_map[tci]);
184c449f 2815 map = dev_maps ?
80d19669 2816 xmap_dereference(dev_maps->attr_map[tci]) :
184c449f
AD
2817 NULL;
2818 if (new_map && new_map != map)
2819 kfree(new_map);
2820 }
01c5f864
AD
2821 }
2822
537c00de
AD
2823 mutex_unlock(&xps_map_mutex);
2824
537c00de
AD
2825 kfree(new_dev_maps);
2826 return -ENOMEM;
2827}
4d99f660 2828EXPORT_SYMBOL_GPL(__netif_set_xps_queue);
80d19669
AN
2829
2830int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2831 u16 index)
2832{
4d99f660
AV
2833 int ret;
2834
2835 cpus_read_lock();
2836 ret = __netif_set_xps_queue(dev, cpumask_bits(mask), index, false);
2837 cpus_read_unlock();
2838
2839 return ret;
80d19669 2840}
537c00de
AD
2841EXPORT_SYMBOL(netif_set_xps_queue);
2842
2843#endif
ffcfe25b
AD
2844static void netdev_unbind_all_sb_channels(struct net_device *dev)
2845{
2846 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2847
2848 /* Unbind any subordinate channels */
2849 while (txq-- != &dev->_tx[0]) {
2850 if (txq->sb_dev)
2851 netdev_unbind_sb_channel(dev, txq->sb_dev);
2852 }
2853}
2854
9cf1f6a8
AD
2855void netdev_reset_tc(struct net_device *dev)
2856{
6234f874
AD
2857#ifdef CONFIG_XPS
2858 netif_reset_xps_queues_gt(dev, 0);
2859#endif
ffcfe25b
AD
2860 netdev_unbind_all_sb_channels(dev);
2861
2862 /* Reset TC configuration of device */
9cf1f6a8
AD
2863 dev->num_tc = 0;
2864 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
2865 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
2866}
2867EXPORT_SYMBOL(netdev_reset_tc);
2868
2869int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
2870{
2871 if (tc >= dev->num_tc)
2872 return -EINVAL;
2873
6234f874
AD
2874#ifdef CONFIG_XPS
2875 netif_reset_xps_queues(dev, offset, count);
2876#endif
9cf1f6a8
AD
2877 dev->tc_to_txq[tc].count = count;
2878 dev->tc_to_txq[tc].offset = offset;
2879 return 0;
2880}
2881EXPORT_SYMBOL(netdev_set_tc_queue);
2882
2883int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
2884{
2885 if (num_tc > TC_MAX_QUEUE)
2886 return -EINVAL;
2887
6234f874
AD
2888#ifdef CONFIG_XPS
2889 netif_reset_xps_queues_gt(dev, 0);
2890#endif
ffcfe25b
AD
2891 netdev_unbind_all_sb_channels(dev);
2892
9cf1f6a8
AD
2893 dev->num_tc = num_tc;
2894 return 0;
2895}
2896EXPORT_SYMBOL(netdev_set_num_tc);
2897
ffcfe25b
AD
2898void netdev_unbind_sb_channel(struct net_device *dev,
2899 struct net_device *sb_dev)
2900{
2901 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues];
2902
2903#ifdef CONFIG_XPS
2904 netif_reset_xps_queues_gt(sb_dev, 0);
2905#endif
2906 memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq));
2907 memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map));
2908
2909 while (txq-- != &dev->_tx[0]) {
2910 if (txq->sb_dev == sb_dev)
2911 txq->sb_dev = NULL;
2912 }
2913}
2914EXPORT_SYMBOL(netdev_unbind_sb_channel);
2915
2916int netdev_bind_sb_channel_queue(struct net_device *dev,
2917 struct net_device *sb_dev,
2918 u8 tc, u16 count, u16 offset)
2919{
2920 /* Make certain the sb_dev and dev are already configured */
2921 if (sb_dev->num_tc >= 0 || tc >= dev->num_tc)
2922 return -EINVAL;
2923
2924 /* We cannot hand out queues we don't have */
2925 if ((offset + count) > dev->real_num_tx_queues)
2926 return -EINVAL;
2927
2928 /* Record the mapping */
2929 sb_dev->tc_to_txq[tc].count = count;
2930 sb_dev->tc_to_txq[tc].offset = offset;
2931
2932 /* Provide a way for Tx queue to find the tc_to_txq map or
2933 * XPS map for itself.
2934 */
2935 while (count--)
2936 netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev;
2937
2938 return 0;
2939}
2940EXPORT_SYMBOL(netdev_bind_sb_channel_queue);
2941
2942int netdev_set_sb_channel(struct net_device *dev, u16 channel)
2943{
2944 /* Do not use a multiqueue device to represent a subordinate channel */
2945 if (netif_is_multiqueue(dev))
2946 return -ENODEV;
2947
2948 /* We allow channels 1 - 32767 to be used for subordinate channels.
2949 * Channel 0 is meant to be "native" mode and used only to represent
2950 * the main root device. We allow writing 0 to reset the device back
2951 * to normal mode after being used as a subordinate channel.
2952 */
2953 if (channel > S16_MAX)
2954 return -EINVAL;
2955
2956 dev->num_tc = -channel;
2957
2958 return 0;
2959}
2960EXPORT_SYMBOL(netdev_set_sb_channel);
2961
f0796d5c
JF
2962/*
2963 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
3a053b1a 2964 * greater than real_num_tx_queues stale skbs on the qdisc must be flushed.
f0796d5c 2965 */
e6484930 2966int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
f0796d5c 2967{
ac5b7019 2968 bool disabling;
1d24eb48
TH
2969 int rc;
2970
ac5b7019
JK
2971 disabling = txq < dev->real_num_tx_queues;
2972
e6484930
TH
2973 if (txq < 1 || txq > dev->num_tx_queues)
2974 return -EINVAL;
f0796d5c 2975
5c56580b
BH
2976 if (dev->reg_state == NETREG_REGISTERED ||
2977 dev->reg_state == NETREG_UNREGISTERING) {
e6484930
TH
2978 ASSERT_RTNL();
2979
1d24eb48
TH
2980 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2981 txq);
bf264145
TH
2982 if (rc)
2983 return rc;
2984
4f57c087
JF
2985 if (dev->num_tc)
2986 netif_setup_tc(dev, txq);
2987
ac5b7019
JK
2988 dev->real_num_tx_queues = txq;
2989
2990 if (disabling) {
2991 synchronize_net();
e6484930 2992 qdisc_reset_all_tx_gt(dev, txq);
024e9679
AD
2993#ifdef CONFIG_XPS
2994 netif_reset_xps_queues_gt(dev, txq);
2995#endif
2996 }
ac5b7019
JK
2997 } else {
2998 dev->real_num_tx_queues = txq;
f0796d5c 2999 }
e6484930 3000
e6484930 3001 return 0;
f0796d5c
JF
3002}
3003EXPORT_SYMBOL(netif_set_real_num_tx_queues);
56079431 3004
a953be53 3005#ifdef CONFIG_SYSFS
62fe0b40
BH
3006/**
3007 * netif_set_real_num_rx_queues - set actual number of RX queues used
3008 * @dev: Network device
3009 * @rxq: Actual number of RX queues
3010 *
3011 * This must be called either with the rtnl_lock held or before
3012 * registration of the net device. Returns 0 on success, or a
4e7f7951
BH
3013 * negative error code. If called before registration, it always
3014 * succeeds.
62fe0b40
BH
3015 */
3016int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
3017{
3018 int rc;
3019
bd25fa7b
TH
3020 if (rxq < 1 || rxq > dev->num_rx_queues)
3021 return -EINVAL;
3022
62fe0b40
BH
3023 if (dev->reg_state == NETREG_REGISTERED) {
3024 ASSERT_RTNL();
3025
62fe0b40
BH
3026 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
3027 rxq);
3028 if (rc)
3029 return rc;
62fe0b40
BH
3030 }
3031
3032 dev->real_num_rx_queues = rxq;
3033 return 0;
3034}
3035EXPORT_SYMBOL(netif_set_real_num_rx_queues);
3036#endif
3037
2c53040f
BH
3038/**
3039 * netif_get_num_default_rss_queues - default number of RSS queues
16917b87
YM
3040 *
3041 * This routine should set an upper limit on the number of RSS queues
3042 * used by default by multiqueue devices.
3043 */
a55b138b 3044int netif_get_num_default_rss_queues(void)
16917b87 3045{
40e4e713
HS
3046 return is_kdump_kernel() ?
3047 1 : min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
16917b87
YM
3048}
3049EXPORT_SYMBOL(netif_get_num_default_rss_queues);
3050
3bcb846c 3051static void __netif_reschedule(struct Qdisc *q)
56079431 3052{
def82a1d
JP
3053 struct softnet_data *sd;
3054 unsigned long flags;
56079431 3055
def82a1d 3056 local_irq_save(flags);
903ceff7 3057 sd = this_cpu_ptr(&softnet_data);
a9cbd588
CG
3058 q->next_sched = NULL;
3059 *sd->output_queue_tailp = q;
3060 sd->output_queue_tailp = &q->next_sched;
def82a1d
JP
3061 raise_softirq_irqoff(NET_TX_SOFTIRQ);
3062 local_irq_restore(flags);
3063}
3064
3065void __netif_schedule(struct Qdisc *q)
3066{
3067 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
3068 __netif_reschedule(q);
56079431
DV
3069}
3070EXPORT_SYMBOL(__netif_schedule);
3071
e6247027
ED
3072struct dev_kfree_skb_cb {
3073 enum skb_free_reason reason;
3074};
3075
3076static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
56079431 3077{
e6247027
ED
3078 return (struct dev_kfree_skb_cb *)skb->cb;
3079}
3080
46e5da40
JF
3081void netif_schedule_queue(struct netdev_queue *txq)
3082{
3083 rcu_read_lock();
5be5515a 3084 if (!netif_xmit_stopped(txq)) {
46e5da40
JF
3085 struct Qdisc *q = rcu_dereference(txq->qdisc);
3086
3087 __netif_schedule(q);
3088 }
3089 rcu_read_unlock();
3090}
3091EXPORT_SYMBOL(netif_schedule_queue);
3092
46e5da40
JF
3093void netif_tx_wake_queue(struct netdev_queue *dev_queue)
3094{
3095 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
3096 struct Qdisc *q;
3097
3098 rcu_read_lock();
3099 q = rcu_dereference(dev_queue->qdisc);
3100 __netif_schedule(q);
3101 rcu_read_unlock();
3102 }
3103}
3104EXPORT_SYMBOL(netif_tx_wake_queue);
3105
e6247027 3106void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
56079431 3107{
e6247027 3108 unsigned long flags;
56079431 3109
9899886d
MJ
3110 if (unlikely(!skb))
3111 return;
3112
63354797 3113 if (likely(refcount_read(&skb->users) == 1)) {
e6247027 3114 smp_rmb();
63354797
RE
3115 refcount_set(&skb->users, 0);
3116 } else if (likely(!refcount_dec_and_test(&skb->users))) {
e6247027 3117 return;
bea3348e 3118 }
e6247027
ED
3119 get_kfree_skb_cb(skb)->reason = reason;
3120 local_irq_save(flags);
3121 skb->next = __this_cpu_read(softnet_data.completion_queue);
3122 __this_cpu_write(softnet_data.completion_queue, skb);
3123 raise_softirq_irqoff(NET_TX_SOFTIRQ);
3124 local_irq_restore(flags);
56079431 3125}
e6247027 3126EXPORT_SYMBOL(__dev_kfree_skb_irq);
56079431 3127
e6247027 3128void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
56079431
DV
3129{
3130 if (in_irq() || irqs_disabled())
e6247027 3131 __dev_kfree_skb_irq(skb, reason);
56079431
DV
3132 else
3133 dev_kfree_skb(skb);
3134}
e6247027 3135EXPORT_SYMBOL(__dev_kfree_skb_any);
56079431
DV
3136
3137
bea3348e
SH
3138/**
3139 * netif_device_detach - mark device as removed
3140 * @dev: network device
3141 *
3142 * Mark device as removed from system and therefore no longer available.
3143 */
56079431
DV
3144void netif_device_detach(struct net_device *dev)
3145{
3146 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
3147 netif_running(dev)) {
d543103a 3148 netif_tx_stop_all_queues(dev);
56079431
DV
3149 }
3150}
3151EXPORT_SYMBOL(netif_device_detach);
3152
bea3348e
SH
3153/**
3154 * netif_device_attach - mark device as attached
3155 * @dev: network device
3156 *
3157 * Mark device as attached from system and restart if needed.
3158 */
56079431
DV
3159void netif_device_attach(struct net_device *dev)
3160{
3161 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
3162 netif_running(dev)) {
d543103a 3163 netif_tx_wake_all_queues(dev);
4ec93edb 3164 __netdev_watchdog_up(dev);
56079431
DV
3165 }
3166}
3167EXPORT_SYMBOL(netif_device_attach);
3168
5605c762
JP
3169/*
3170 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
3171 * to be used as a distribution range.
3172 */
eadec877
AD
3173static u16 skb_tx_hash(const struct net_device *dev,
3174 const struct net_device *sb_dev,
3175 struct sk_buff *skb)
5605c762
JP
3176{
3177 u32 hash;
3178 u16 qoffset = 0;
1b837d48 3179 u16 qcount = dev->real_num_tx_queues;
5605c762 3180
eadec877
AD
3181 if (dev->num_tc) {
3182 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
3183
3184 qoffset = sb_dev->tc_to_txq[tc].offset;
3185 qcount = sb_dev->tc_to_txq[tc].count;
3186 }
3187
5605c762
JP
3188 if (skb_rx_queue_recorded(skb)) {
3189 hash = skb_get_rx_queue(skb);
6e11d157
AN
3190 if (hash >= qoffset)
3191 hash -= qoffset;
1b837d48
AD
3192 while (unlikely(hash >= qcount))
3193 hash -= qcount;
eadec877 3194 return hash + qoffset;
5605c762
JP
3195 }
3196
3197 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
3198}
5605c762 3199
36c92474
BH
3200static void skb_warn_bad_offload(const struct sk_buff *skb)
3201{
84d15ae5 3202 static const netdev_features_t null_features;
36c92474 3203 struct net_device *dev = skb->dev;
88ad4175 3204 const char *name = "";
36c92474 3205
c846ad9b
BG
3206 if (!net_ratelimit())
3207 return;
3208
88ad4175
BM
3209 if (dev) {
3210 if (dev->dev.parent)
3211 name = dev_driver_string(dev->dev.parent);
3212 else
3213 name = netdev_name(dev);
3214 }
6413139d
WB
3215 skb_dump(KERN_WARNING, skb, false);
3216 WARN(1, "%s: caps=(%pNF, %pNF)\n",
88ad4175 3217 name, dev ? &dev->features : &null_features,
6413139d 3218 skb->sk ? &skb->sk->sk_route_caps : &null_features);
36c92474
BH
3219}
3220
1da177e4
LT
3221/*
3222 * Invalidate hardware checksum when packet is to be mangled, and
3223 * complete checksum manually on outgoing path.
3224 */
84fa7933 3225int skb_checksum_help(struct sk_buff *skb)
1da177e4 3226{
d3bc23e7 3227 __wsum csum;
663ead3b 3228 int ret = 0, offset;
1da177e4 3229
84fa7933 3230 if (skb->ip_summed == CHECKSUM_COMPLETE)
a430a43d
HX
3231 goto out_set_summed;
3232
3aefd7d6 3233 if (unlikely(skb_is_gso(skb))) {
36c92474
BH
3234 skb_warn_bad_offload(skb);
3235 return -EINVAL;
1da177e4
LT
3236 }
3237
cef401de
ED
3238 /* Before computing a checksum, we should make sure no frag could
3239 * be modified by an external entity : checksum could be wrong.
3240 */
3241 if (skb_has_shared_frag(skb)) {
3242 ret = __skb_linearize(skb);
3243 if (ret)
3244 goto out;
3245 }
3246
55508d60 3247 offset = skb_checksum_start_offset(skb);
a030847e
HX
3248 BUG_ON(offset >= skb_headlen(skb));
3249 csum = skb_checksum(skb, offset, skb->len - offset, 0);
3250
3251 offset += skb->csum_offset;
3252 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
3253
8211fbfa
HK
3254 ret = skb_ensure_writable(skb, offset + sizeof(__sum16));
3255 if (ret)
3256 goto out;
1da177e4 3257
4f2e4ad5 3258 *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0;
a430a43d 3259out_set_summed:
1da177e4 3260 skb->ip_summed = CHECKSUM_NONE;
4ec93edb 3261out:
1da177e4
LT
3262 return ret;
3263}
d1b19dff 3264EXPORT_SYMBOL(skb_checksum_help);
1da177e4 3265
b72b5bf6
DC
3266int skb_crc32c_csum_help(struct sk_buff *skb)
3267{
3268 __le32 crc32c_csum;
3269 int ret = 0, offset, start;
3270
3271 if (skb->ip_summed != CHECKSUM_PARTIAL)
3272 goto out;
3273
3274 if (unlikely(skb_is_gso(skb)))
3275 goto out;
3276
3277 /* Before computing a checksum, we should make sure no frag could
3278 * be modified by an external entity : checksum could be wrong.
3279 */
3280 if (unlikely(skb_has_shared_frag(skb))) {
3281 ret = __skb_linearize(skb);
3282 if (ret)
3283 goto out;
3284 }
3285 start = skb_checksum_start_offset(skb);
3286 offset = start + offsetof(struct sctphdr, checksum);
3287 if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
3288 ret = -EINVAL;
3289 goto out;
3290 }
8211fbfa
HK
3291
3292 ret = skb_ensure_writable(skb, offset + sizeof(__le32));
3293 if (ret)
3294 goto out;
3295
b72b5bf6
DC
3296 crc32c_csum = cpu_to_le32(~__skb_checksum(skb, start,
3297 skb->len - start, ~(__u32)0,
3298 crc32c_csum_stub));
3299 *(__le32 *)(skb->data + offset) = crc32c_csum;
3300 skb->ip_summed = CHECKSUM_NONE;
dba00306 3301 skb->csum_not_inet = 0;
b72b5bf6
DC
3302out:
3303 return ret;
3304}
3305
53d6471c 3306__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
f6a78bfc 3307{
252e3346 3308 __be16 type = skb->protocol;
f6a78bfc 3309
19acc327
PS
3310 /* Tunnel gso handlers can set protocol to ethernet. */
3311 if (type == htons(ETH_P_TEB)) {
3312 struct ethhdr *eth;
3313
3314 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
3315 return 0;
3316
1dfe82eb 3317 eth = (struct ethhdr *)skb->data;
19acc327
PS
3318 type = eth->h_proto;
3319 }
3320
d4bcef3f 3321 return __vlan_get_protocol(skb, type, depth);
ec5f0615
PS
3322}
3323
3324/**
3325 * skb_mac_gso_segment - mac layer segmentation handler.
3326 * @skb: buffer to segment
3327 * @features: features for the output path (see dev->features)
3328 */
3329struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
3330 netdev_features_t features)
3331{
3332 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
3333 struct packet_offload *ptype;
53d6471c
VY
3334 int vlan_depth = skb->mac_len;
3335 __be16 type = skb_network_protocol(skb, &vlan_depth);
ec5f0615
PS
3336
3337 if (unlikely(!type))
3338 return ERR_PTR(-EINVAL);
3339
53d6471c 3340 __skb_pull(skb, vlan_depth);
f6a78bfc
HX
3341
3342 rcu_read_lock();
22061d80 3343 list_for_each_entry_rcu(ptype, &offload_base, list) {
f191a1d1 3344 if (ptype->type == type && ptype->callbacks.gso_segment) {
f191a1d1 3345 segs = ptype->callbacks.gso_segment(skb, features);
f6a78bfc
HX
3346 break;
3347 }
3348 }
3349 rcu_read_unlock();
3350
98e399f8 3351 __skb_push(skb, skb->data - skb_mac_header(skb));
576a30eb 3352
f6a78bfc
HX
3353 return segs;
3354}
05e8ef4a
PS
3355EXPORT_SYMBOL(skb_mac_gso_segment);
3356
3357
3358/* openvswitch calls this on rx path, so we need a different check.
3359 */
3360static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
3361{
3362 if (tx_path)
0c19f846
WB
3363 return skb->ip_summed != CHECKSUM_PARTIAL &&
3364 skb->ip_summed != CHECKSUM_UNNECESSARY;
6e7bc478
ED
3365
3366 return skb->ip_summed == CHECKSUM_NONE;
05e8ef4a
PS
3367}
3368
3369/**
3370 * __skb_gso_segment - Perform segmentation on skb.
3371 * @skb: buffer to segment
3372 * @features: features for the output path (see dev->features)
3373 * @tx_path: whether it is called in TX path
3374 *
3375 * This function segments the given skb and returns a list of segments.
3376 *
3377 * It may return NULL if the skb requires no segmentation. This is
3378 * only possible when GSO is used for verifying header integrity.
9207f9d4 3379 *
a08e7fd9 3380 * Segmentation preserves SKB_GSO_CB_OFFSET bytes of previous skb cb.
05e8ef4a
PS
3381 */
3382struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
3383 netdev_features_t features, bool tx_path)
3384{
b2504a5d
ED
3385 struct sk_buff *segs;
3386
05e8ef4a
PS
3387 if (unlikely(skb_needs_check(skb, tx_path))) {
3388 int err;
3389
b2504a5d 3390 /* We're going to init ->check field in TCP or UDP header */
a40e0a66 3391 err = skb_cow_head(skb, 0);
3392 if (err < 0)
05e8ef4a
PS
3393 return ERR_PTR(err);
3394 }
3395
802ab55a
AD
3396 /* Only report GSO partial support if it will enable us to
3397 * support segmentation on this frame without needing additional
3398 * work.
3399 */
3400 if (features & NETIF_F_GSO_PARTIAL) {
3401 netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
3402 struct net_device *dev = skb->dev;
3403
3404 partial_features |= dev->features & dev->gso_partial_features;
3405 if (!skb_gso_ok(skb, features | partial_features))
3406 features &= ~NETIF_F_GSO_PARTIAL;
3407 }
3408
a08e7fd9 3409 BUILD_BUG_ON(SKB_GSO_CB_OFFSET +
9207f9d4
KK
3410 sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
3411
68c33163 3412 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
3347c960
ED
3413 SKB_GSO_CB(skb)->encap_level = 0;
3414
05e8ef4a
PS
3415 skb_reset_mac_header(skb);
3416 skb_reset_mac_len(skb);
3417
b2504a5d
ED
3418 segs = skb_mac_gso_segment(skb, features);
3419
3a1296a3 3420 if (segs != skb && unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
b2504a5d
ED
3421 skb_warn_bad_offload(skb);
3422
3423 return segs;
05e8ef4a 3424}
12b0004d 3425EXPORT_SYMBOL(__skb_gso_segment);
f6a78bfc 3426
fb286bb2
HX
3427/* Take action when hardware reception checksum errors are detected. */
3428#ifdef CONFIG_BUG
7fe50ac8 3429void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
fb286bb2
HX
3430{
3431 if (net_ratelimit()) {
7b6cd1ce 3432 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
6413139d 3433 skb_dump(KERN_ERR, skb, true);
fb286bb2
HX
3434 dump_stack();
3435 }
3436}
3437EXPORT_SYMBOL(netdev_rx_csum_fault);
3438#endif
3439
ab74cfeb 3440/* XXX: check that highmem exists at all on the given machine. */
c1e756bf 3441static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1da177e4 3442{
3d3a8533 3443#ifdef CONFIG_HIGHMEM
1da177e4 3444 int i;
f4563a75 3445
5acbbd42 3446 if (!(dev->features & NETIF_F_HIGHDMA)) {
ea2ab693
IC
3447 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3448 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
f4563a75 3449
ea2ab693 3450 if (PageHighMem(skb_frag_page(frag)))
5acbbd42 3451 return 1;
ea2ab693 3452 }
5acbbd42 3453 }
3d3a8533 3454#endif
1da177e4
LT
3455 return 0;
3456}
1da177e4 3457
3b392ddb
SH
3458/* If MPLS offload request, verify we are testing hardware MPLS features
3459 * instead of standard features for the netdev.
3460 */
d0edc7bf 3461#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
3b392ddb
SH
3462static netdev_features_t net_mpls_features(struct sk_buff *skb,
3463 netdev_features_t features,
3464 __be16 type)
3465{
25cd9ba0 3466 if (eth_p_mpls(type))
3b392ddb
SH
3467 features &= skb->dev->mpls_features;
3468
3469 return features;
3470}
3471#else
3472static netdev_features_t net_mpls_features(struct sk_buff *skb,
3473 netdev_features_t features,
3474 __be16 type)
3475{
3476 return features;
3477}
3478#endif
3479
c8f44aff 3480static netdev_features_t harmonize_features(struct sk_buff *skb,
c1e756bf 3481 netdev_features_t features)
f01a5236 3482{
3b392ddb
SH
3483 __be16 type;
3484
9fc95f50 3485 type = skb_network_protocol(skb, NULL);
3b392ddb 3486 features = net_mpls_features(skb, features, type);
53d6471c 3487
c0d680e5 3488 if (skb->ip_summed != CHECKSUM_NONE &&
3b392ddb 3489 !can_checksum_protocol(features, type)) {
996e8021 3490 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
f01a5236 3491 }
7be2c82c
ED
3492 if (illegal_highdma(skb->dev, skb))
3493 features &= ~NETIF_F_SG;
f01a5236
JG
3494
3495 return features;
3496}
3497
e38f3025
TM
3498netdev_features_t passthru_features_check(struct sk_buff *skb,
3499 struct net_device *dev,
3500 netdev_features_t features)
3501{
3502 return features;
3503}
3504EXPORT_SYMBOL(passthru_features_check);
3505
7ce23672 3506static netdev_features_t dflt_features_check(struct sk_buff *skb,
8cb65d00
TM
3507 struct net_device *dev,
3508 netdev_features_t features)
3509{
3510 return vlan_features_check(skb, features);
3511}
3512
cbc53e08
AD
3513static netdev_features_t gso_features_check(const struct sk_buff *skb,
3514 struct net_device *dev,
3515 netdev_features_t features)
3516{
3517 u16 gso_segs = skb_shinfo(skb)->gso_segs;
3518
3519 if (gso_segs > dev->gso_max_segs)
3520 return features & ~NETIF_F_GSO_MASK;
3521
1d155dfd
HK
3522 if (!skb_shinfo(skb)->gso_type) {
3523 skb_warn_bad_offload(skb);
3524 return features & ~NETIF_F_GSO_MASK;
3525 }
3526
802ab55a
AD
3527 /* Support for GSO partial features requires software
3528 * intervention before we can actually process the packets
3529 * so we need to strip support for any partial features now
3530 * and we can pull them back in after we have partially
3531 * segmented the frame.
3532 */
3533 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL))
3534 features &= ~dev->gso_partial_features;
3535
3536 /* Make sure to clear the IPv4 ID mangling feature if the
3537 * IPv4 header has the potential to be fragmented.
cbc53e08
AD
3538 */
3539 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
3540 struct iphdr *iph = skb->encapsulation ?
3541 inner_ip_hdr(skb) : ip_hdr(skb);
3542
3543 if (!(iph->frag_off & htons(IP_DF)))
3544 features &= ~NETIF_F_TSO_MANGLEID;
3545 }
3546
3547 return features;
3548}
3549
c1e756bf 3550netdev_features_t netif_skb_features(struct sk_buff *skb)
58e998c6 3551{
5f35227e 3552 struct net_device *dev = skb->dev;
fcbeb976 3553 netdev_features_t features = dev->features;
58e998c6 3554
cbc53e08
AD
3555 if (skb_is_gso(skb))
3556 features = gso_features_check(skb, dev, features);
30b678d8 3557
5f35227e
JG
3558 /* If encapsulation offload request, verify we are testing
3559 * hardware encapsulation features instead of standard
3560 * features for the netdev
3561 */
3562 if (skb->encapsulation)
3563 features &= dev->hw_enc_features;
3564
f5a7fb88
TM
3565 if (skb_vlan_tagged(skb))
3566 features = netdev_intersect_features(features,
3567 dev->vlan_features |
3568 NETIF_F_HW_VLAN_CTAG_TX |
3569 NETIF_F_HW_VLAN_STAG_TX);
f01a5236 3570
5f35227e
JG
3571 if (dev->netdev_ops->ndo_features_check)
3572 features &= dev->netdev_ops->ndo_features_check(skb, dev,
3573 features);
8cb65d00
TM
3574 else
3575 features &= dflt_features_check(skb, dev, features);
5f35227e 3576
c1e756bf 3577 return harmonize_features(skb, features);
58e998c6 3578}
c1e756bf 3579EXPORT_SYMBOL(netif_skb_features);
58e998c6 3580
2ea25513 3581static int xmit_one(struct sk_buff *skb, struct net_device *dev,
95f6b3dd 3582 struct netdev_queue *txq, bool more)
f6a78bfc 3583{
2ea25513
DM
3584 unsigned int len;
3585 int rc;
00829823 3586
9f9a742d 3587 if (dev_nit_active(dev))
2ea25513 3588 dev_queue_xmit_nit(skb, dev);
fc741216 3589
2ea25513 3590 len = skb->len;
3744741a 3591 PRANDOM_ADD_NOISE(skb, dev, txq, len + jiffies);
2ea25513 3592 trace_net_dev_start_xmit(skb, dev);
95f6b3dd 3593 rc = netdev_start_xmit(skb, dev, txq, more);
2ea25513 3594 trace_net_dev_xmit(skb, rc, dev, len);
adf30907 3595
2ea25513
DM
3596 return rc;
3597}
7b9c6090 3598
8dcda22a
DM
3599struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
3600 struct netdev_queue *txq, int *ret)
7f2e870f
DM
3601{
3602 struct sk_buff *skb = first;
3603 int rc = NETDEV_TX_OK;
7b9c6090 3604
7f2e870f
DM
3605 while (skb) {
3606 struct sk_buff *next = skb->next;
fc70fb64 3607
a8305bff 3608 skb_mark_not_on_list(skb);
95f6b3dd 3609 rc = xmit_one(skb, dev, txq, next != NULL);
7f2e870f
DM
3610 if (unlikely(!dev_xmit_complete(rc))) {
3611 skb->next = next;
3612 goto out;
3613 }
6afff0ca 3614
7f2e870f 3615 skb = next;
fe60faa5 3616 if (netif_tx_queue_stopped(txq) && skb) {
7f2e870f
DM
3617 rc = NETDEV_TX_BUSY;
3618 break;
9ccb8975 3619 }
7f2e870f 3620 }
9ccb8975 3621
7f2e870f
DM
3622out:
3623 *ret = rc;
3624 return skb;
3625}
b40863c6 3626
1ff0dc94
ED
3627static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
3628 netdev_features_t features)
f6a78bfc 3629{
df8a39de 3630 if (skb_vlan_tag_present(skb) &&
5968250c
JP
3631 !vlan_hw_offload_capable(features, skb->vlan_proto))
3632 skb = __vlan_hwaccel_push_inside(skb);
eae3f88e
DM
3633 return skb;
3634}
f6a78bfc 3635
43c26a1a
DC
3636int skb_csum_hwoffload_help(struct sk_buff *skb,
3637 const netdev_features_t features)
3638{
fa821170 3639 if (unlikely(skb_csum_is_sctp(skb)))
43c26a1a
DC
3640 return !!(features & NETIF_F_SCTP_CRC) ? 0 :
3641 skb_crc32c_csum_help(skb);
3642
62fafcd6
XL
3643 if (features & NETIF_F_HW_CSUM)
3644 return 0;
3645
3646 if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
3647 switch (skb->csum_offset) {
3648 case offsetof(struct tcphdr, check):
3649 case offsetof(struct udphdr, check):
3650 return 0;
3651 }
3652 }
3653
3654 return skb_checksum_help(skb);
43c26a1a
DC
3655}
3656EXPORT_SYMBOL(skb_csum_hwoffload_help);
3657
f53c7239 3658static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again)
eae3f88e
DM
3659{
3660 netdev_features_t features;
f6a78bfc 3661
eae3f88e
DM
3662 features = netif_skb_features(skb);
3663 skb = validate_xmit_vlan(skb, features);
3664 if (unlikely(!skb))
3665 goto out_null;
7b9c6090 3666
ebf4e808
IL
3667 skb = sk_validate_xmit_skb(skb, dev);
3668 if (unlikely(!skb))
3669 goto out_null;
3670
8b86a61d 3671 if (netif_needs_gso(skb, features)) {
ce93718f
DM
3672 struct sk_buff *segs;
3673
3674 segs = skb_gso_segment(skb, features);
cecda693 3675 if (IS_ERR(segs)) {
af6dabc9 3676 goto out_kfree_skb;
cecda693
JW
3677 } else if (segs) {
3678 consume_skb(skb);
3679 skb = segs;
f6a78bfc 3680 }
eae3f88e
DM
3681 } else {
3682 if (skb_needs_linearize(skb, features) &&
3683 __skb_linearize(skb))
3684 goto out_kfree_skb;
4ec93edb 3685
eae3f88e
DM
3686 /* If packet is not checksummed and device does not
3687 * support checksumming for this protocol, complete
3688 * checksumming here.
3689 */
3690 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3691 if (skb->encapsulation)
3692 skb_set_inner_transport_header(skb,
3693 skb_checksum_start_offset(skb));
3694 else
3695 skb_set_transport_header(skb,
3696 skb_checksum_start_offset(skb));
43c26a1a 3697 if (skb_csum_hwoffload_help(skb, features))
eae3f88e 3698 goto out_kfree_skb;
7b9c6090 3699 }
0c772159 3700 }
7b9c6090 3701
f53c7239 3702 skb = validate_xmit_xfrm(skb, features, again);
3dca3f38 3703
eae3f88e 3704 return skb;
fc70fb64 3705
f6a78bfc
HX
3706out_kfree_skb:
3707 kfree_skb(skb);
eae3f88e 3708out_null:
d21fd63e 3709 atomic_long_inc(&dev->tx_dropped);
eae3f88e
DM
3710 return NULL;
3711}
6afff0ca 3712
f53c7239 3713struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again)
55a93b3e
ED
3714{
3715 struct sk_buff *next, *head = NULL, *tail;
3716
bec3cfdc 3717 for (; skb != NULL; skb = next) {
55a93b3e 3718 next = skb->next;
a8305bff 3719 skb_mark_not_on_list(skb);
bec3cfdc
ED
3720
3721 /* in case skb wont be segmented, point to itself */
3722 skb->prev = skb;
3723
f53c7239 3724 skb = validate_xmit_skb(skb, dev, again);
bec3cfdc
ED
3725 if (!skb)
3726 continue;
55a93b3e 3727
bec3cfdc
ED
3728 if (!head)
3729 head = skb;
3730 else
3731 tail->next = skb;
3732 /* If skb was segmented, skb->prev points to
3733 * the last segment. If not, it still contains skb.
3734 */
3735 tail = skb->prev;
55a93b3e
ED
3736 }
3737 return head;
f6a78bfc 3738}
104ba78c 3739EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
f6a78bfc 3740
1def9238
ED
3741static void qdisc_pkt_len_init(struct sk_buff *skb)
3742{
3743 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3744
3745 qdisc_skb_cb(skb)->pkt_len = skb->len;
3746
3747 /* To get more precise estimation of bytes sent on wire,
3748 * we add to pkt_len the headers size of all segments
3749 */
a0dce875 3750 if (shinfo->gso_size && skb_transport_header_was_set(skb)) {
757b8b1d 3751 unsigned int hdr_len;
15e5a030 3752 u16 gso_segs = shinfo->gso_segs;
1def9238 3753
757b8b1d
ED
3754 /* mac layer + network layer */
3755 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3756
3757 /* + transport layer */
7c68d1a6
ED
3758 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
3759 const struct tcphdr *th;
3760 struct tcphdr _tcphdr;
3761
3762 th = skb_header_pointer(skb, skb_transport_offset(skb),
3763 sizeof(_tcphdr), &_tcphdr);
3764 if (likely(th))
3765 hdr_len += __tcp_hdrlen(th);
3766 } else {
3767 struct udphdr _udphdr;
3768
3769 if (skb_header_pointer(skb, skb_transport_offset(skb),
3770 sizeof(_udphdr), &_udphdr))
3771 hdr_len += sizeof(struct udphdr);
3772 }
15e5a030
JW
3773
3774 if (shinfo->gso_type & SKB_GSO_DODGY)
3775 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3776 shinfo->gso_size);
3777
3778 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
1def9238
ED
3779 }
3780}
3781
bbd8a0d3
KK
3782static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3783 struct net_device *dev,
3784 struct netdev_queue *txq)
3785{
3786 spinlock_t *root_lock = qdisc_lock(q);
520ac30f 3787 struct sk_buff *to_free = NULL;
a2da570d 3788 bool contended;
bbd8a0d3
KK
3789 int rc;
3790
a2da570d 3791 qdisc_calculate_pkt_len(skb, q);
6b3ba914
JF
3792
3793 if (q->flags & TCQ_F_NOLOCK) {
ac5c66f2 3794 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
379349e9 3795 qdisc_run(q);
6b3ba914
JF
3796
3797 if (unlikely(to_free))
3798 kfree_skb_list(to_free);
3799 return rc;
3800 }
3801
79640a4c
ED
3802 /*
3803 * Heuristic to force contended enqueues to serialize on a
3804 * separate lock before trying to get qdisc main lock.
f9eb8aea 3805 * This permits qdisc->running owner to get the lock more
9bf2b8c2 3806 * often and dequeue packets faster.
79640a4c 3807 */
a2da570d 3808 contended = qdisc_is_running(q);
79640a4c
ED
3809 if (unlikely(contended))
3810 spin_lock(&q->busylock);
3811
bbd8a0d3
KK
3812 spin_lock(root_lock);
3813 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
520ac30f 3814 __qdisc_drop(skb, &to_free);
bbd8a0d3
KK
3815 rc = NET_XMIT_DROP;
3816 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
bc135b23 3817 qdisc_run_begin(q)) {
bbd8a0d3
KK
3818 /*
3819 * This is a work-conserving queue; there are no old skbs
3820 * waiting to be sent out; and the qdisc is not running -
3821 * xmit the skb directly.
3822 */
bfe0d029 3823
bfe0d029
ED
3824 qdisc_bstats_update(q, skb);
3825
55a93b3e 3826 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
79640a4c
ED
3827 if (unlikely(contended)) {
3828 spin_unlock(&q->busylock);
3829 contended = false;
3830 }
bbd8a0d3 3831 __qdisc_run(q);
6c148184 3832 }
bbd8a0d3 3833
6c148184 3834 qdisc_run_end(q);
bbd8a0d3
KK
3835 rc = NET_XMIT_SUCCESS;
3836 } else {
ac5c66f2 3837 rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
79640a4c
ED
3838 if (qdisc_run_begin(q)) {
3839 if (unlikely(contended)) {
3840 spin_unlock(&q->busylock);
3841 contended = false;
3842 }
3843 __qdisc_run(q);
6c148184 3844 qdisc_run_end(q);
79640a4c 3845 }
bbd8a0d3
KK
3846 }
3847 spin_unlock(root_lock);
520ac30f
ED
3848 if (unlikely(to_free))
3849 kfree_skb_list(to_free);
79640a4c
ED
3850 if (unlikely(contended))
3851 spin_unlock(&q->busylock);
bbd8a0d3
KK
3852 return rc;
3853}
3854
86f8515f 3855#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
5bc1421e
NH
3856static void skb_update_prio(struct sk_buff *skb)
3857{
4dcb31d4
ED
3858 const struct netprio_map *map;
3859 const struct sock *sk;
3860 unsigned int prioidx;
5bc1421e 3861
4dcb31d4
ED
3862 if (skb->priority)
3863 return;
3864 map = rcu_dereference_bh(skb->dev->priomap);
3865 if (!map)
3866 return;
3867 sk = skb_to_full_sk(skb);
3868 if (!sk)
3869 return;
91c68ce2 3870
4dcb31d4
ED
3871 prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
3872
3873 if (prioidx < map->priomap_len)
3874 skb->priority = map->priomap[prioidx];
5bc1421e
NH
3875}
3876#else
3877#define skb_update_prio(skb)
3878#endif
3879
95603e22
MM
3880/**
3881 * dev_loopback_xmit - loop back @skb
0c4b51f0
EB
3882 * @net: network namespace this loopback is happening in
3883 * @sk: sk needed to be a netfilter okfn
95603e22
MM
3884 * @skb: buffer to transmit
3885 */
0c4b51f0 3886int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
95603e22
MM
3887{
3888 skb_reset_mac_header(skb);
3889 __skb_pull(skb, skb_network_offset(skb));
3890 skb->pkt_type = PACKET_LOOPBACK;
3891 skb->ip_summed = CHECKSUM_UNNECESSARY;
3892 WARN_ON(!skb_dst(skb));
3893 skb_dst_force(skb);
3894 netif_rx_ni(skb);
3895 return 0;
3896}
3897EXPORT_SYMBOL(dev_loopback_xmit);
3898
1f211a1b
DB
3899#ifdef CONFIG_NET_EGRESS
3900static struct sk_buff *
3901sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
3902{
46209401 3903 struct mini_Qdisc *miniq = rcu_dereference_bh(dev->miniq_egress);
1f211a1b
DB
3904 struct tcf_result cl_res;
3905
46209401 3906 if (!miniq)
1f211a1b
DB
3907 return skb;
3908
8dc07fdb 3909 /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
aadaca9e 3910 qdisc_skb_cb(skb)->mru = 0;
7baf2429 3911 qdisc_skb_cb(skb)->post_ct = false;
46209401 3912 mini_qdisc_bstats_cpu_update(miniq, skb);
1f211a1b 3913
46209401 3914 switch (tcf_classify(skb, miniq->filter_list, &cl_res, false)) {
1f211a1b
DB
3915 case TC_ACT_OK:
3916 case TC_ACT_RECLASSIFY:
3917 skb->tc_index = TC_H_MIN(cl_res.classid);
3918 break;
3919 case TC_ACT_SHOT:
46209401 3920 mini_qdisc_qstats_cpu_drop(miniq);
1f211a1b 3921 *ret = NET_XMIT_DROP;
7e2c3aea
DB
3922 kfree_skb(skb);
3923 return NULL;
1f211a1b
DB
3924 case TC_ACT_STOLEN:
3925 case TC_ACT_QUEUED:
e25ea21f 3926 case TC_ACT_TRAP:
1f211a1b 3927 *ret = NET_XMIT_SUCCESS;
7e2c3aea 3928 consume_skb(skb);
1f211a1b
DB
3929 return NULL;
3930 case TC_ACT_REDIRECT:
3931 /* No need to push/pop skb's mac_header here on egress! */
3932 skb_do_redirect(skb);
3933 *ret = NET_XMIT_SUCCESS;
3934 return NULL;
3935 default:
3936 break;
3937 }
357b6cc5 3938
1f211a1b
DB
3939 return skb;
3940}
3941#endif /* CONFIG_NET_EGRESS */
3942
fc9bab24
AN
3943#ifdef CONFIG_XPS
3944static int __get_xps_queue_idx(struct net_device *dev, struct sk_buff *skb,
3945 struct xps_dev_maps *dev_maps, unsigned int tci)
3946{
3947 struct xps_map *map;
3948 int queue_index = -1;
3949
3950 if (dev->num_tc) {
3951 tci *= dev->num_tc;
3952 tci += netdev_get_prio_tc_map(dev, skb->priority);
3953 }
3954
3955 map = rcu_dereference(dev_maps->attr_map[tci]);
3956 if (map) {
3957 if (map->len == 1)
3958 queue_index = map->queues[0];
3959 else
3960 queue_index = map->queues[reciprocal_scale(
3961 skb_get_hash(skb), map->len)];
3962 if (unlikely(queue_index >= dev->real_num_tx_queues))
3963 queue_index = -1;
3964 }
3965 return queue_index;
3966}
3967#endif
3968
eadec877
AD
3969static int get_xps_queue(struct net_device *dev, struct net_device *sb_dev,
3970 struct sk_buff *skb)
638b2a69
JP
3971{
3972#ifdef CONFIG_XPS
3973 struct xps_dev_maps *dev_maps;
fc9bab24 3974 struct sock *sk = skb->sk;
638b2a69
JP
3975 int queue_index = -1;
3976
04157469
AN
3977 if (!static_key_false(&xps_needed))
3978 return -1;
3979
638b2a69 3980 rcu_read_lock();
fc9bab24
AN
3981 if (!static_key_false(&xps_rxqs_needed))
3982 goto get_cpus_map;
3983
eadec877 3984 dev_maps = rcu_dereference(sb_dev->xps_rxqs_map);
638b2a69 3985 if (dev_maps) {
fc9bab24 3986 int tci = sk_rx_queue_get(sk);
184c449f 3987
fc9bab24
AN
3988 if (tci >= 0 && tci < dev->num_rx_queues)
3989 queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
3990 tci);
3991 }
184c449f 3992
fc9bab24
AN
3993get_cpus_map:
3994 if (queue_index < 0) {
eadec877 3995 dev_maps = rcu_dereference(sb_dev->xps_cpus_map);
fc9bab24
AN
3996 if (dev_maps) {
3997 unsigned int tci = skb->sender_cpu - 1;
3998
3999 queue_index = __get_xps_queue_idx(dev, skb, dev_maps,
4000 tci);
638b2a69
JP
4001 }
4002 }
4003 rcu_read_unlock();
4004
4005 return queue_index;
4006#else
4007 return -1;
4008#endif
4009}
4010
a4ea8a3d 4011u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
a350ecce 4012 struct net_device *sb_dev)
a4ea8a3d
AD
4013{
4014 return 0;
4015}
4016EXPORT_SYMBOL(dev_pick_tx_zero);
4017
4018u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
a350ecce 4019 struct net_device *sb_dev)
a4ea8a3d
AD
4020{
4021 return (u16)raw_smp_processor_id() % dev->real_num_tx_queues;
4022}
4023EXPORT_SYMBOL(dev_pick_tx_cpu_id);
4024
b71b5837
PA
4025u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
4026 struct net_device *sb_dev)
638b2a69
JP
4027{
4028 struct sock *sk = skb->sk;
4029 int queue_index = sk_tx_queue_get(sk);
4030
eadec877
AD
4031 sb_dev = sb_dev ? : dev;
4032
638b2a69
JP
4033 if (queue_index < 0 || skb->ooo_okay ||
4034 queue_index >= dev->real_num_tx_queues) {
eadec877 4035 int new_index = get_xps_queue(dev, sb_dev, skb);
f4563a75 4036
638b2a69 4037 if (new_index < 0)
eadec877 4038 new_index = skb_tx_hash(dev, sb_dev, skb);
638b2a69
JP
4039
4040 if (queue_index != new_index && sk &&
004a5d01 4041 sk_fullsock(sk) &&
638b2a69
JP
4042 rcu_access_pointer(sk->sk_dst_cache))
4043 sk_tx_queue_set(sk, new_index);
4044
4045 queue_index = new_index;
4046 }
4047
4048 return queue_index;
4049}
b71b5837 4050EXPORT_SYMBOL(netdev_pick_tx);
638b2a69 4051
4bd97d51
PA
4052struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
4053 struct sk_buff *skb,
4054 struct net_device *sb_dev)
638b2a69
JP
4055{
4056 int queue_index = 0;
4057
4058#ifdef CONFIG_XPS
52bd2d62
ED
4059 u32 sender_cpu = skb->sender_cpu - 1;
4060
4061 if (sender_cpu >= (u32)NR_CPUS)
638b2a69
JP
4062 skb->sender_cpu = raw_smp_processor_id() + 1;
4063#endif
4064
4065 if (dev->real_num_tx_queues != 1) {
4066 const struct net_device_ops *ops = dev->netdev_ops;
f4563a75 4067
638b2a69 4068 if (ops->ndo_select_queue)
a350ecce 4069 queue_index = ops->ndo_select_queue(dev, skb, sb_dev);
638b2a69 4070 else
4bd97d51 4071 queue_index = netdev_pick_tx(dev, skb, sb_dev);
638b2a69 4072
d584527c 4073 queue_index = netdev_cap_txqueue(dev, queue_index);
638b2a69
JP
4074 }
4075
4076 skb_set_queue_mapping(skb, queue_index);
4077 return netdev_get_tx_queue(dev, queue_index);
4078}
4079
d29f749e 4080/**
9d08dd3d 4081 * __dev_queue_xmit - transmit a buffer
d29f749e 4082 * @skb: buffer to transmit
eadec877 4083 * @sb_dev: suboordinate device used for L2 forwarding offload
d29f749e
DJ
4084 *
4085 * Queue a buffer for transmission to a network device. The caller must
4086 * have set the device and priority and built the buffer before calling
4087 * this function. The function can be called from an interrupt.
4088 *
4089 * A negative errno code is returned on a failure. A success does not
4090 * guarantee the frame will be transmitted as it may be dropped due
4091 * to congestion or traffic shaping.
4092 *
4093 * -----------------------------------------------------------------------------------
4094 * I notice this method can also return errors from the queue disciplines,
4095 * including NET_XMIT_DROP, which is a positive value. So, errors can also
4096 * be positive.
4097 *
4098 * Regardless of the return value, the skb is consumed, so it is currently
4099 * difficult to retry a send to this method. (You can bump the ref count
4100 * before sending to hold a reference for retry if you are careful.)
4101 *
4102 * When calling this method, interrupts MUST be enabled. This is because
4103 * the BH enable code must have IRQs enabled so that it will not deadlock.
4104 * --BLG
4105 */
eadec877 4106static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
1da177e4
LT
4107{
4108 struct net_device *dev = skb->dev;
dc2b4847 4109 struct netdev_queue *txq;
1da177e4
LT
4110 struct Qdisc *q;
4111 int rc = -ENOMEM;
f53c7239 4112 bool again = false;
1da177e4 4113
6d1ccff6
ED
4114 skb_reset_mac_header(skb);
4115
e7fd2885 4116 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
e7ed11ee 4117 __skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SCHED);
e7fd2885 4118
4ec93edb
YH
4119 /* Disable soft irqs for various locks below. Also
4120 * stops preemption for RCU.
1da177e4 4121 */
4ec93edb 4122 rcu_read_lock_bh();
1da177e4 4123
5bc1421e
NH
4124 skb_update_prio(skb);
4125
1f211a1b
DB
4126 qdisc_pkt_len_init(skb);
4127#ifdef CONFIG_NET_CLS_ACT
8dc07fdb 4128 skb->tc_at_ingress = 0;
357b6cc5 4129# ifdef CONFIG_NET_EGRESS
aabf6772 4130 if (static_branch_unlikely(&egress_needed_key)) {
1f211a1b
DB
4131 skb = sch_handle_egress(skb, &rc, dev);
4132 if (!skb)
4133 goto out;
4134 }
357b6cc5 4135# endif
1f211a1b 4136#endif
02875878
ED
4137 /* If device/qdisc don't need skb->dst, release it right now while
4138 * its hot in this cpu cache.
4139 */
4140 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
4141 skb_dst_drop(skb);
4142 else
4143 skb_dst_force(skb);
4144
4bd97d51 4145 txq = netdev_core_pick_tx(dev, skb, sb_dev);
a898def2 4146 q = rcu_dereference_bh(txq->qdisc);
37437bb2 4147
cf66ba58 4148 trace_net_dev_queue(skb);
1da177e4 4149 if (q->enqueue) {
bbd8a0d3 4150 rc = __dev_xmit_skb(skb, q, dev, txq);
37437bb2 4151 goto out;
1da177e4
LT
4152 }
4153
4154 /* The device has no queue. Common case for software devices:
eb13da1a 4155 * loopback, all the sorts of tunnels...
1da177e4 4156
eb13da1a 4157 * Really, it is unlikely that netif_tx_lock protection is necessary
4158 * here. (f.e. loopback and IP tunnels are clean ignoring statistics
4159 * counters.)
4160 * However, it is possible, that they rely on protection
4161 * made by us here.
1da177e4 4162
eb13da1a 4163 * Check this and shot the lock. It is not prone from deadlocks.
4164 *Either shot noqueue qdisc, it is even simpler 8)
1da177e4
LT
4165 */
4166 if (dev->flags & IFF_UP) {
4167 int cpu = smp_processor_id(); /* ok because BHs are off */
4168
c773e847 4169 if (txq->xmit_lock_owner != cpu) {
97cdcf37 4170 if (dev_xmit_recursion())
745e20f1
ED
4171 goto recursion_alert;
4172
f53c7239 4173 skb = validate_xmit_skb(skb, dev, &again);
1f59533f 4174 if (!skb)
d21fd63e 4175 goto out;
1f59533f 4176
3744741a 4177 PRANDOM_ADD_NOISE(skb, dev, txq, jiffies);
c773e847 4178 HARD_TX_LOCK(dev, txq, cpu);
1da177e4 4179
73466498 4180 if (!netif_xmit_stopped(txq)) {
97cdcf37 4181 dev_xmit_recursion_inc();
ce93718f 4182 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
97cdcf37 4183 dev_xmit_recursion_dec();
572a9d7b 4184 if (dev_xmit_complete(rc)) {
c773e847 4185 HARD_TX_UNLOCK(dev, txq);
1da177e4
LT
4186 goto out;
4187 }
4188 }
c773e847 4189 HARD_TX_UNLOCK(dev, txq);
e87cc472
JP
4190 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
4191 dev->name);
1da177e4
LT
4192 } else {
4193 /* Recursion is detected! It is possible,
745e20f1
ED
4194 * unfortunately
4195 */
4196recursion_alert:
e87cc472
JP
4197 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
4198 dev->name);
1da177e4
LT
4199 }
4200 }
4201
4202 rc = -ENETDOWN;
d4828d85 4203 rcu_read_unlock_bh();
1da177e4 4204
015f0688 4205 atomic_long_inc(&dev->tx_dropped);
1f59533f 4206 kfree_skb_list(skb);
1da177e4
LT
4207 return rc;
4208out:
d4828d85 4209 rcu_read_unlock_bh();
1da177e4
LT
4210 return rc;
4211}
f663dd9a 4212
2b4aa3ce 4213int dev_queue_xmit(struct sk_buff *skb)
f663dd9a
JW
4214{
4215 return __dev_queue_xmit(skb, NULL);
4216}
2b4aa3ce 4217EXPORT_SYMBOL(dev_queue_xmit);
1da177e4 4218
eadec877 4219int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev)
f663dd9a 4220{
eadec877 4221 return __dev_queue_xmit(skb, sb_dev);
f663dd9a
JW
4222}
4223EXPORT_SYMBOL(dev_queue_xmit_accel);
4224
36ccdf85 4225int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
865b03f2
MK
4226{
4227 struct net_device *dev = skb->dev;
4228 struct sk_buff *orig_skb = skb;
4229 struct netdev_queue *txq;
4230 int ret = NETDEV_TX_BUSY;
4231 bool again = false;
4232
4233 if (unlikely(!netif_running(dev) ||
4234 !netif_carrier_ok(dev)))
4235 goto drop;
4236
4237 skb = validate_xmit_skb_list(skb, dev, &again);
4238 if (skb != orig_skb)
4239 goto drop;
4240
4241 skb_set_queue_mapping(skb, queue_id);
4242 txq = skb_get_tx_queue(dev, skb);
3744741a 4243 PRANDOM_ADD_NOISE(skb, dev, txq, jiffies);
865b03f2
MK
4244
4245 local_bh_disable();
4246
0ad6f6e7 4247 dev_xmit_recursion_inc();
865b03f2
MK
4248 HARD_TX_LOCK(dev, txq, smp_processor_id());
4249 if (!netif_xmit_frozen_or_drv_stopped(txq))
4250 ret = netdev_start_xmit(skb, dev, txq, false);
4251 HARD_TX_UNLOCK(dev, txq);
0ad6f6e7 4252 dev_xmit_recursion_dec();
865b03f2
MK
4253
4254 local_bh_enable();
865b03f2
MK
4255 return ret;
4256drop:
4257 atomic_long_inc(&dev->tx_dropped);
4258 kfree_skb_list(skb);
4259 return NET_XMIT_DROP;
4260}
36ccdf85 4261EXPORT_SYMBOL(__dev_direct_xmit);
1da177e4 4262
eb13da1a 4263/*************************************************************************
4264 * Receiver routines
4265 *************************************************************************/
1da177e4 4266
6b2bedc3 4267int netdev_max_backlog __read_mostly = 1000;
c9e6bc64
ED
4268EXPORT_SYMBOL(netdev_max_backlog);
4269
3b098e2d 4270int netdev_tstamp_prequeue __read_mostly = 1;
6b2bedc3 4271int netdev_budget __read_mostly = 300;
a4837980
KK
4272/* Must be at least 2 jiffes to guarantee 1 jiffy timeout */
4273unsigned int __read_mostly netdev_budget_usecs = 2 * USEC_PER_SEC / HZ;
3d48b53f
MT
4274int weight_p __read_mostly = 64; /* old backlog weight */
4275int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */
4276int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */
4277int dev_rx_weight __read_mostly = 64;
4278int dev_tx_weight __read_mostly = 64;
323ebb61
EC
4279/* Maximum number of GRO_NORMAL skbs to batch up for list-RX */
4280int gro_normal_batch __read_mostly = 8;
1da177e4 4281
eecfd7c4
ED
4282/* Called with irq disabled */
4283static inline void ____napi_schedule(struct softnet_data *sd,
4284 struct napi_struct *napi)
4285{
29863d41
WW
4286 struct task_struct *thread;
4287
4288 if (test_bit(NAPI_STATE_THREADED, &napi->state)) {
4289 /* Paired with smp_mb__before_atomic() in
5fdd2f0e
WW
4290 * napi_enable()/dev_set_threaded().
4291 * Use READ_ONCE() to guarantee a complete
4292 * read on napi->thread. Only call
29863d41
WW
4293 * wake_up_process() when it's not NULL.
4294 */
4295 thread = READ_ONCE(napi->thread);
4296 if (thread) {
4297 wake_up_process(thread);
4298 return;
4299 }
4300 }
4301
eecfd7c4
ED
4302 list_add_tail(&napi->poll_list, &sd->poll_list);
4303 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4304}
4305
bfb564e7
KK
4306#ifdef CONFIG_RPS
4307
4308/* One global table that all flow-based protocols share. */
6e3f7faf 4309struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
bfb564e7 4310EXPORT_SYMBOL(rps_sock_flow_table);
567e4b79
ED
4311u32 rps_cpu_mask __read_mostly;
4312EXPORT_SYMBOL(rps_cpu_mask);
bfb564e7 4313
dc05360f 4314struct static_key_false rps_needed __read_mostly;
3df97ba8 4315EXPORT_SYMBOL(rps_needed);
dc05360f 4316struct static_key_false rfs_needed __read_mostly;
13bfff25 4317EXPORT_SYMBOL(rfs_needed);
adc9300e 4318
c445477d
BH
4319static struct rps_dev_flow *
4320set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4321 struct rps_dev_flow *rflow, u16 next_cpu)
4322{
a31196b0 4323 if (next_cpu < nr_cpu_ids) {
c445477d
BH
4324#ifdef CONFIG_RFS_ACCEL
4325 struct netdev_rx_queue *rxqueue;
4326 struct rps_dev_flow_table *flow_table;
4327 struct rps_dev_flow *old_rflow;
4328 u32 flow_id;
4329 u16 rxq_index;
4330 int rc;
4331
4332 /* Should we steer this flow to a different hardware queue? */
69a19ee6
BH
4333 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
4334 !(dev->features & NETIF_F_NTUPLE))
c445477d
BH
4335 goto out;
4336 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
4337 if (rxq_index == skb_get_rx_queue(skb))
4338 goto out;
4339
4340 rxqueue = dev->_rx + rxq_index;
4341 flow_table = rcu_dereference(rxqueue->rps_flow_table);
4342 if (!flow_table)
4343 goto out;
61b905da 4344 flow_id = skb_get_hash(skb) & flow_table->mask;
c445477d
BH
4345 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
4346 rxq_index, flow_id);
4347 if (rc < 0)
4348 goto out;
4349 old_rflow = rflow;
4350 rflow = &flow_table->flows[flow_id];
c445477d
BH
4351 rflow->filter = rc;
4352 if (old_rflow->filter == rflow->filter)
4353 old_rflow->filter = RPS_NO_FILTER;
4354 out:
4355#endif
4356 rflow->last_qtail =
09994d1b 4357 per_cpu(softnet_data, next_cpu).input_queue_head;
c445477d
BH
4358 }
4359
09994d1b 4360 rflow->cpu = next_cpu;
c445477d
BH
4361 return rflow;
4362}
4363
bfb564e7
KK
4364/*
4365 * get_rps_cpu is called from netif_receive_skb and returns the target
4366 * CPU from the RPS map of the receiving queue for a given skb.
4367 * rcu_read_lock must be held on entry.
4368 */
4369static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
4370 struct rps_dev_flow **rflowp)
4371{
567e4b79
ED
4372 const struct rps_sock_flow_table *sock_flow_table;
4373 struct netdev_rx_queue *rxqueue = dev->_rx;
bfb564e7 4374 struct rps_dev_flow_table *flow_table;
567e4b79 4375 struct rps_map *map;
bfb564e7 4376 int cpu = -1;
567e4b79 4377 u32 tcpu;
61b905da 4378 u32 hash;
bfb564e7
KK
4379
4380 if (skb_rx_queue_recorded(skb)) {
4381 u16 index = skb_get_rx_queue(skb);
567e4b79 4382
62fe0b40
BH
4383 if (unlikely(index >= dev->real_num_rx_queues)) {
4384 WARN_ONCE(dev->real_num_rx_queues > 1,
4385 "%s received packet on queue %u, but number "
4386 "of RX queues is %u\n",
4387 dev->name, index, dev->real_num_rx_queues);
bfb564e7
KK
4388 goto done;
4389 }
567e4b79
ED
4390 rxqueue += index;
4391 }
bfb564e7 4392
567e4b79
ED
4393 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
4394
4395 flow_table = rcu_dereference(rxqueue->rps_flow_table);
6e3f7faf 4396 map = rcu_dereference(rxqueue->rps_map);
567e4b79 4397 if (!flow_table && !map)
bfb564e7
KK
4398 goto done;
4399
2d47b459 4400 skb_reset_network_header(skb);
61b905da
TH
4401 hash = skb_get_hash(skb);
4402 if (!hash)
bfb564e7
KK
4403 goto done;
4404
fec5e652
TH
4405 sock_flow_table = rcu_dereference(rps_sock_flow_table);
4406 if (flow_table && sock_flow_table) {
fec5e652 4407 struct rps_dev_flow *rflow;
567e4b79
ED
4408 u32 next_cpu;
4409 u32 ident;
4410
4411 /* First check into global flow table if there is a match */
4412 ident = sock_flow_table->ents[hash & sock_flow_table->mask];
4413 if ((ident ^ hash) & ~rps_cpu_mask)
4414 goto try_rps;
fec5e652 4415
567e4b79
ED
4416 next_cpu = ident & rps_cpu_mask;
4417
4418 /* OK, now we know there is a match,
4419 * we can look at the local (per receive queue) flow table
4420 */
61b905da 4421 rflow = &flow_table->flows[hash & flow_table->mask];
fec5e652
TH
4422 tcpu = rflow->cpu;
4423
fec5e652
TH
4424 /*
4425 * If the desired CPU (where last recvmsg was done) is
4426 * different from current CPU (one in the rx-queue flow
4427 * table entry), switch if one of the following holds:
a31196b0 4428 * - Current CPU is unset (>= nr_cpu_ids).
fec5e652
TH
4429 * - Current CPU is offline.
4430 * - The current CPU's queue tail has advanced beyond the
4431 * last packet that was enqueued using this table entry.
4432 * This guarantees that all previous packets for the flow
4433 * have been dequeued, thus preserving in order delivery.
4434 */
4435 if (unlikely(tcpu != next_cpu) &&
a31196b0 4436 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
fec5e652 4437 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
baefa31d
TH
4438 rflow->last_qtail)) >= 0)) {
4439 tcpu = next_cpu;
c445477d 4440 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
baefa31d 4441 }
c445477d 4442
a31196b0 4443 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
fec5e652
TH
4444 *rflowp = rflow;
4445 cpu = tcpu;
4446 goto done;
4447 }
4448 }
4449
567e4b79
ED
4450try_rps:
4451
0a9627f2 4452 if (map) {
8fc54f68 4453 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
0a9627f2
TH
4454 if (cpu_online(tcpu)) {
4455 cpu = tcpu;
4456 goto done;
4457 }
4458 }
4459
4460done:
0a9627f2
TH
4461 return cpu;
4462}
4463
c445477d
BH
4464#ifdef CONFIG_RFS_ACCEL
4465
4466/**
4467 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
4468 * @dev: Device on which the filter was set
4469 * @rxq_index: RX queue index
4470 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
4471 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
4472 *
4473 * Drivers that implement ndo_rx_flow_steer() should periodically call
4474 * this function for each installed filter and remove the filters for
4475 * which it returns %true.
4476 */
4477bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
4478 u32 flow_id, u16 filter_id)
4479{
4480 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
4481 struct rps_dev_flow_table *flow_table;
4482 struct rps_dev_flow *rflow;
4483 bool expire = true;
a31196b0 4484 unsigned int cpu;
c445477d
BH
4485
4486 rcu_read_lock();
4487 flow_table = rcu_dereference(rxqueue->rps_flow_table);
4488 if (flow_table && flow_id <= flow_table->mask) {
4489 rflow = &flow_table->flows[flow_id];
6aa7de05 4490 cpu = READ_ONCE(rflow->cpu);
a31196b0 4491 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
c445477d
BH
4492 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
4493 rflow->last_qtail) <
4494 (int)(10 * flow_table->mask)))
4495 expire = false;
4496 }
4497 rcu_read_unlock();
4498 return expire;
4499}
4500EXPORT_SYMBOL(rps_may_expire_flow);
4501
4502#endif /* CONFIG_RFS_ACCEL */
4503
0a9627f2 4504/* Called from hardirq (IPI) context */
e36fa2f7 4505static void rps_trigger_softirq(void *data)
0a9627f2 4506{
e36fa2f7
ED
4507 struct softnet_data *sd = data;
4508
eecfd7c4 4509 ____napi_schedule(sd, &sd->backlog);
dee42870 4510 sd->received_rps++;
0a9627f2 4511}
e36fa2f7 4512
fec5e652 4513#endif /* CONFIG_RPS */
0a9627f2 4514
e36fa2f7
ED
4515/*
4516 * Check if this softnet_data structure is another cpu one
4517 * If yes, queue it to our IPI list and return 1
4518 * If no, return 0
4519 */
4520static int rps_ipi_queued(struct softnet_data *sd)
4521{
4522#ifdef CONFIG_RPS
903ceff7 4523 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
e36fa2f7
ED
4524
4525 if (sd != mysd) {
4526 sd->rps_ipi_next = mysd->rps_ipi_list;
4527 mysd->rps_ipi_list = sd;
4528
4529 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4530 return 1;
4531 }
4532#endif /* CONFIG_RPS */
4533 return 0;
4534}
4535
99bbc707
WB
4536#ifdef CONFIG_NET_FLOW_LIMIT
4537int netdev_flow_limit_table_len __read_mostly = (1 << 12);
4538#endif
4539
4540static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
4541{
4542#ifdef CONFIG_NET_FLOW_LIMIT
4543 struct sd_flow_limit *fl;
4544 struct softnet_data *sd;
4545 unsigned int old_flow, new_flow;
4546
4547 if (qlen < (netdev_max_backlog >> 1))
4548 return false;
4549
903ceff7 4550 sd = this_cpu_ptr(&softnet_data);
99bbc707
WB
4551
4552 rcu_read_lock();
4553 fl = rcu_dereference(sd->flow_limit);
4554 if (fl) {
3958afa1 4555 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
99bbc707
WB
4556 old_flow = fl->history[fl->history_head];
4557 fl->history[fl->history_head] = new_flow;
4558
4559 fl->history_head++;
4560 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
4561
4562 if (likely(fl->buckets[old_flow]))
4563 fl->buckets[old_flow]--;
4564
4565 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
4566 fl->count++;
4567 rcu_read_unlock();
4568 return true;
4569 }
4570 }
4571 rcu_read_unlock();
4572#endif
4573 return false;
4574}
4575
0a9627f2
TH
4576/*
4577 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
4578 * queue (may be a remote CPU queue).
4579 */
fec5e652
TH
4580static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
4581 unsigned int *qtail)
0a9627f2 4582{
e36fa2f7 4583 struct softnet_data *sd;
0a9627f2 4584 unsigned long flags;
99bbc707 4585 unsigned int qlen;
0a9627f2 4586
e36fa2f7 4587 sd = &per_cpu(softnet_data, cpu);
0a9627f2
TH
4588
4589 local_irq_save(flags);
0a9627f2 4590
e36fa2f7 4591 rps_lock(sd);
e9e4dd32
JA
4592 if (!netif_running(skb->dev))
4593 goto drop;
99bbc707
WB
4594 qlen = skb_queue_len(&sd->input_pkt_queue);
4595 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
e008f3f0 4596 if (qlen) {
0a9627f2 4597enqueue:
e36fa2f7 4598 __skb_queue_tail(&sd->input_pkt_queue, skb);
76cc8b13 4599 input_queue_tail_incr_save(sd, qtail);
e36fa2f7 4600 rps_unlock(sd);
152102c7 4601 local_irq_restore(flags);
0a9627f2
TH
4602 return NET_RX_SUCCESS;
4603 }
4604
ebda37c2
ED
4605 /* Schedule NAPI for backlog device
4606 * We can use non atomic operation since we own the queue lock
4607 */
4608 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
e36fa2f7 4609 if (!rps_ipi_queued(sd))
eecfd7c4 4610 ____napi_schedule(sd, &sd->backlog);
0a9627f2
TH
4611 }
4612 goto enqueue;
4613 }
4614
e9e4dd32 4615drop:
dee42870 4616 sd->dropped++;
e36fa2f7 4617 rps_unlock(sd);
0a9627f2 4618
0a9627f2
TH
4619 local_irq_restore(flags);
4620
caf586e5 4621 atomic_long_inc(&skb->dev->rx_dropped);
0a9627f2
TH
4622 kfree_skb(skb);
4623 return NET_RX_DROP;
4624}
1da177e4 4625
e817f856
JDB
4626static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
4627{
4628 struct net_device *dev = skb->dev;
4629 struct netdev_rx_queue *rxqueue;
4630
4631 rxqueue = dev->_rx;
4632
4633 if (skb_rx_queue_recorded(skb)) {
4634 u16 index = skb_get_rx_queue(skb);
4635
4636 if (unlikely(index >= dev->real_num_rx_queues)) {
4637 WARN_ONCE(dev->real_num_rx_queues > 1,
4638 "%s received packet on queue %u, but number "
4639 "of RX queues is %u\n",
4640 dev->name, index, dev->real_num_rx_queues);
4641
4642 return rxqueue; /* Return first rxqueue */
4643 }
4644 rxqueue += index;
4645 }
4646 return rxqueue;
4647}
4648
d4455169 4649static u32 netif_receive_generic_xdp(struct sk_buff *skb,
02671e23 4650 struct xdp_buff *xdp,
d4455169
JF
4651 struct bpf_prog *xdp_prog)
4652{
be9df4af 4653 void *orig_data, *orig_data_end, *hard_start;
e817f856 4654 struct netdev_rx_queue *rxqueue;
de8f3a83 4655 u32 metalen, act = XDP_DROP;
43b5169d 4656 u32 mac_len, frame_sz;
29724956
JDB
4657 __be16 orig_eth_type;
4658 struct ethhdr *eth;
4659 bool orig_bcast;
be9df4af 4660 int off;
d4455169
JF
4661
4662 /* Reinjected packets coming from act_mirred or similar should
4663 * not get XDP generic processing.
4664 */
2c64605b 4665 if (skb_is_redirected(skb))
d4455169
JF
4666 return XDP_PASS;
4667
de8f3a83
DB
4668 /* XDP packets must be linear and must have sufficient headroom
4669 * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
4670 * native XDP provides, thus we need to do it here as well.
4671 */
ad1e03b2 4672 if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
de8f3a83
DB
4673 skb_headroom(skb) < XDP_PACKET_HEADROOM) {
4674 int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
4675 int troom = skb->tail + skb->data_len - skb->end;
4676
4677 /* In case we have to go down the path and also linearize,
4678 * then lets do the pskb_expand_head() work just once here.
4679 */
4680 if (pskb_expand_head(skb,
4681 hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0,
4682 troom > 0 ? troom + 128 : 0, GFP_ATOMIC))
4683 goto do_drop;
2d17d8d7 4684 if (skb_linearize(skb))
de8f3a83
DB
4685 goto do_drop;
4686 }
d4455169
JF
4687
4688 /* The XDP program wants to see the packet starting at the MAC
4689 * header.
4690 */
4691 mac_len = skb->data - skb_mac_header(skb);
be9df4af 4692 hard_start = skb->data - skb_headroom(skb);
a075767b
JDB
4693
4694 /* SKB "head" area always have tailroom for skb_shared_info */
be9df4af 4695 frame_sz = (void *)skb_end_pointer(skb) - hard_start;
43b5169d 4696 frame_sz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
a075767b 4697
be9df4af
LB
4698 rxqueue = netif_get_rxqueue(skb);
4699 xdp_init_buff(xdp, frame_sz, &rxqueue->xdp_rxq);
4700 xdp_prepare_buff(xdp, hard_start, skb_headroom(skb) - mac_len,
4701 skb_headlen(skb) + mac_len, true);
a075767b 4702
02671e23
BT
4703 orig_data_end = xdp->data_end;
4704 orig_data = xdp->data;
29724956
JDB
4705 eth = (struct ethhdr *)xdp->data;
4706 orig_bcast = is_multicast_ether_addr_64bits(eth->h_dest);
4707 orig_eth_type = eth->h_proto;
d4455169 4708
02671e23 4709 act = bpf_prog_run_xdp(xdp_prog, xdp);
d4455169 4710
065af355 4711 /* check if bpf_xdp_adjust_head was used */
02671e23 4712 off = xdp->data - orig_data;
065af355
JDB
4713 if (off) {
4714 if (off > 0)
4715 __skb_pull(skb, off);
4716 else if (off < 0)
4717 __skb_push(skb, -off);
4718
4719 skb->mac_header += off;
4720 skb_reset_network_header(skb);
4721 }
d4455169 4722
a075767b
JDB
4723 /* check if bpf_xdp_adjust_tail was used */
4724 off = xdp->data_end - orig_data_end;
f7613120 4725 if (off != 0) {
02671e23 4726 skb_set_tail_pointer(skb, xdp->data_end - xdp->data);
a075767b 4727 skb->len += off; /* positive on grow, negative on shrink */
f7613120 4728 }
198d83bb 4729
29724956
JDB
4730 /* check if XDP changed eth hdr such SKB needs update */
4731 eth = (struct ethhdr *)xdp->data;
4732 if ((orig_eth_type != eth->h_proto) ||
4733 (orig_bcast != is_multicast_ether_addr_64bits(eth->h_dest))) {
4734 __skb_push(skb, ETH_HLEN);
4735 skb->protocol = eth_type_trans(skb, skb->dev);
4736 }
4737
d4455169 4738 switch (act) {
6103aa96 4739 case XDP_REDIRECT:
d4455169
JF
4740 case XDP_TX:
4741 __skb_push(skb, mac_len);
de8f3a83 4742 break;
d4455169 4743 case XDP_PASS:
02671e23 4744 metalen = xdp->data - xdp->data_meta;
de8f3a83
DB
4745 if (metalen)
4746 skb_metadata_set(skb, metalen);
d4455169 4747 break;
d4455169
JF
4748 default:
4749 bpf_warn_invalid_xdp_action(act);
df561f66 4750 fallthrough;
d4455169
JF
4751 case XDP_ABORTED:
4752 trace_xdp_exception(skb->dev, xdp_prog, act);
df561f66 4753 fallthrough;
d4455169
JF
4754 case XDP_DROP:
4755 do_drop:
4756 kfree_skb(skb);
4757 break;
4758 }
4759
4760 return act;
4761}
4762
4763/* When doing generic XDP we have to bypass the qdisc layer and the
4764 * network taps in order to match in-driver-XDP behavior.
4765 */
7c497478 4766void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
d4455169
JF
4767{
4768 struct net_device *dev = skb->dev;
4769 struct netdev_queue *txq;
4770 bool free_skb = true;
4771 int cpu, rc;
4772
4bd97d51 4773 txq = netdev_core_pick_tx(dev, skb, NULL);
d4455169
JF
4774 cpu = smp_processor_id();
4775 HARD_TX_LOCK(dev, txq, cpu);
4776 if (!netif_xmit_stopped(txq)) {
4777 rc = netdev_start_xmit(skb, dev, txq, 0);
4778 if (dev_xmit_complete(rc))
4779 free_skb = false;
4780 }
4781 HARD_TX_UNLOCK(dev, txq);
4782 if (free_skb) {
4783 trace_xdp_exception(dev, xdp_prog, XDP_TX);
4784 kfree_skb(skb);
4785 }
4786}
4787
02786475 4788static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key);
d4455169 4789
7c497478 4790int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
d4455169 4791{
d4455169 4792 if (xdp_prog) {
02671e23
BT
4793 struct xdp_buff xdp;
4794 u32 act;
6103aa96 4795 int err;
d4455169 4796
02671e23 4797 act = netif_receive_generic_xdp(skb, &xdp, xdp_prog);
d4455169 4798 if (act != XDP_PASS) {
6103aa96
JF
4799 switch (act) {
4800 case XDP_REDIRECT:
2facaad6 4801 err = xdp_do_generic_redirect(skb->dev, skb,
02671e23 4802 &xdp, xdp_prog);
6103aa96
JF
4803 if (err)
4804 goto out_redir;
02671e23 4805 break;
6103aa96 4806 case XDP_TX:
d4455169 4807 generic_xdp_tx(skb, xdp_prog);
6103aa96
JF
4808 break;
4809 }
d4455169
JF
4810 return XDP_DROP;
4811 }
4812 }
4813 return XDP_PASS;
6103aa96 4814out_redir:
6103aa96
JF
4815 kfree_skb(skb);
4816 return XDP_DROP;
d4455169 4817}
7c497478 4818EXPORT_SYMBOL_GPL(do_xdp_generic);
d4455169 4819
ae78dbfa 4820static int netif_rx_internal(struct sk_buff *skb)
1da177e4 4821{
b0e28f1e 4822 int ret;
1da177e4 4823
588f0330 4824 net_timestamp_check(netdev_tstamp_prequeue, skb);
1da177e4 4825
cf66ba58 4826 trace_netif_rx(skb);
d4455169 4827
df334545 4828#ifdef CONFIG_RPS
dc05360f 4829 if (static_branch_unlikely(&rps_needed)) {
fec5e652 4830 struct rps_dev_flow voidflow, *rflow = &voidflow;
b0e28f1e
ED
4831 int cpu;
4832
cece1945 4833 preempt_disable();
b0e28f1e 4834 rcu_read_lock();
fec5e652
TH
4835
4836 cpu = get_rps_cpu(skb->dev, skb, &rflow);
b0e28f1e
ED
4837 if (cpu < 0)
4838 cpu = smp_processor_id();
fec5e652
TH
4839
4840 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4841
b0e28f1e 4842 rcu_read_unlock();
cece1945 4843 preempt_enable();
adc9300e
ED
4844 } else
4845#endif
fec5e652
TH
4846 {
4847 unsigned int qtail;
f4563a75 4848
fec5e652
TH
4849 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
4850 put_cpu();
4851 }
b0e28f1e 4852 return ret;
1da177e4 4853}
ae78dbfa
BH
4854
4855/**
4856 * netif_rx - post buffer to the network code
4857 * @skb: buffer to post
4858 *
4859 * This function receives a packet from a device driver and queues it for
4860 * the upper (protocol) levels to process. It always succeeds. The buffer
4861 * may be dropped during processing for congestion control or by the
4862 * protocol layers.
4863 *
4864 * return values:
4865 * NET_RX_SUCCESS (no congestion)
4866 * NET_RX_DROP (packet was dropped)
4867 *
4868 */
4869
4870int netif_rx(struct sk_buff *skb)
4871{
b0e3f1bd
GB
4872 int ret;
4873
ae78dbfa
BH
4874 trace_netif_rx_entry(skb);
4875
b0e3f1bd
GB
4876 ret = netif_rx_internal(skb);
4877 trace_netif_rx_exit(ret);
4878
4879 return ret;
ae78dbfa 4880}
d1b19dff 4881EXPORT_SYMBOL(netif_rx);
1da177e4
LT
4882
4883int netif_rx_ni(struct sk_buff *skb)
4884{
4885 int err;
4886
ae78dbfa
BH
4887 trace_netif_rx_ni_entry(skb);
4888
1da177e4 4889 preempt_disable();
ae78dbfa 4890 err = netif_rx_internal(skb);
1da177e4
LT
4891 if (local_softirq_pending())
4892 do_softirq();
4893 preempt_enable();
b0e3f1bd 4894 trace_netif_rx_ni_exit(err);
1da177e4
LT
4895
4896 return err;
4897}
1da177e4
LT
4898EXPORT_SYMBOL(netif_rx_ni);
4899
c11171a4
SAS
4900int netif_rx_any_context(struct sk_buff *skb)
4901{
4902 /*
4903 * If invoked from contexts which do not invoke bottom half
4904 * processing either at return from interrupt or when softrqs are
4905 * reenabled, use netif_rx_ni() which invokes bottomhalf processing
4906 * directly.
4907 */
4908 if (in_interrupt())
4909 return netif_rx(skb);
4910 else
4911 return netif_rx_ni(skb);
4912}
4913EXPORT_SYMBOL(netif_rx_any_context);
4914
0766f788 4915static __latent_entropy void net_tx_action(struct softirq_action *h)
1da177e4 4916{
903ceff7 4917 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
1da177e4
LT
4918
4919 if (sd->completion_queue) {
4920 struct sk_buff *clist;
4921
4922 local_irq_disable();
4923 clist = sd->completion_queue;
4924 sd->completion_queue = NULL;
4925 local_irq_enable();
4926
4927 while (clist) {
4928 struct sk_buff *skb = clist;
f4563a75 4929
1da177e4
LT
4930 clist = clist->next;
4931
63354797 4932 WARN_ON(refcount_read(&skb->users));
e6247027
ED
4933 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
4934 trace_consume_skb(skb);
4935 else
4936 trace_kfree_skb(skb, net_tx_action);
15fad714
JDB
4937
4938 if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
4939 __kfree_skb(skb);
4940 else
4941 __kfree_skb_defer(skb);
1da177e4
LT
4942 }
4943 }
4944
4945 if (sd->output_queue) {
37437bb2 4946 struct Qdisc *head;
1da177e4
LT
4947
4948 local_irq_disable();
4949 head = sd->output_queue;
4950 sd->output_queue = NULL;
a9cbd588 4951 sd->output_queue_tailp = &sd->output_queue;
1da177e4
LT
4952 local_irq_enable();
4953
4954 while (head) {
37437bb2 4955 struct Qdisc *q = head;
6b3ba914 4956 spinlock_t *root_lock = NULL;
37437bb2 4957
1da177e4
LT
4958 head = head->next_sched;
4959
6b3ba914
JF
4960 if (!(q->flags & TCQ_F_NOLOCK)) {
4961 root_lock = qdisc_lock(q);
4962 spin_lock(root_lock);
4963 }
3bcb846c
ED
4964 /* We need to make sure head->next_sched is read
4965 * before clearing __QDISC_STATE_SCHED
4966 */
4967 smp_mb__before_atomic();
4968 clear_bit(__QDISC_STATE_SCHED, &q->state);
4969 qdisc_run(q);
6b3ba914
JF
4970 if (root_lock)
4971 spin_unlock(root_lock);
1da177e4
LT
4972 }
4973 }
f53c7239
SK
4974
4975 xfrm_dev_backlog(sd);
1da177e4
LT
4976}
4977
181402a5 4978#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE)
da678292
MM
4979/* This hook is defined here for ATM LANE */
4980int (*br_fdb_test_addr_hook)(struct net_device *dev,
4981 unsigned char *addr) __read_mostly;
4fb019a0 4982EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
da678292 4983#endif
1da177e4 4984
1f211a1b
DB
4985static inline struct sk_buff *
4986sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
9aa1206e 4987 struct net_device *orig_dev, bool *another)
f697c3e8 4988{
e7582bab 4989#ifdef CONFIG_NET_CLS_ACT
46209401 4990 struct mini_Qdisc *miniq = rcu_dereference_bh(skb->dev->miniq_ingress);
d2788d34 4991 struct tcf_result cl_res;
24824a09 4992
c9e99fd0
DB
4993 /* If there's at least one ingress present somewhere (so
4994 * we get here via enabled static key), remaining devices
4995 * that are not configured with an ingress qdisc will bail
d2788d34 4996 * out here.
c9e99fd0 4997 */
46209401 4998 if (!miniq)
4577139b 4999 return skb;
46209401 5000
f697c3e8
HX
5001 if (*pt_prev) {
5002 *ret = deliver_skb(skb, *pt_prev, orig_dev);
5003 *pt_prev = NULL;
1da177e4
LT
5004 }
5005
3365495c 5006 qdisc_skb_cb(skb)->pkt_len = skb->len;
aadaca9e 5007 qdisc_skb_cb(skb)->mru = 0;
7baf2429 5008 qdisc_skb_cb(skb)->post_ct = false;
8dc07fdb 5009 skb->tc_at_ingress = 1;
46209401 5010 mini_qdisc_bstats_cpu_update(miniq, skb);
c9e99fd0 5011
7d17c544
PB
5012 switch (tcf_classify_ingress(skb, miniq->block, miniq->filter_list,
5013 &cl_res, false)) {
d2788d34
DB
5014 case TC_ACT_OK:
5015 case TC_ACT_RECLASSIFY:
5016 skb->tc_index = TC_H_MIN(cl_res.classid);
5017 break;
5018 case TC_ACT_SHOT:
46209401 5019 mini_qdisc_qstats_cpu_drop(miniq);
8a3a4c6e
ED
5020 kfree_skb(skb);
5021 return NULL;
d2788d34
DB
5022 case TC_ACT_STOLEN:
5023 case TC_ACT_QUEUED:
e25ea21f 5024 case TC_ACT_TRAP:
8a3a4c6e 5025 consume_skb(skb);
d2788d34 5026 return NULL;
27b29f63
AS
5027 case TC_ACT_REDIRECT:
5028 /* skb_mac_header check was done by cls/act_bpf, so
5029 * we can safely push the L2 header back before
5030 * redirecting to another netdev
5031 */
5032 __skb_push(skb, skb->mac_len);
9aa1206e
DB
5033 if (skb_do_redirect(skb) == -EAGAIN) {
5034 __skb_pull(skb, skb->mac_len);
5035 *another = true;
5036 break;
5037 }
27b29f63 5038 return NULL;
720f22fe 5039 case TC_ACT_CONSUMED:
cd11b164 5040 return NULL;
d2788d34
DB
5041 default:
5042 break;
f697c3e8 5043 }
e7582bab 5044#endif /* CONFIG_NET_CLS_ACT */
e687ad60
PN
5045 return skb;
5046}
1da177e4 5047
24b27fc4
MB
5048/**
5049 * netdev_is_rx_handler_busy - check if receive handler is registered
5050 * @dev: device to check
5051 *
5052 * Check if a receive handler is already registered for a given device.
5053 * Return true if there one.
5054 *
5055 * The caller must hold the rtnl_mutex.
5056 */
5057bool netdev_is_rx_handler_busy(struct net_device *dev)
5058{
5059 ASSERT_RTNL();
5060 return dev && rtnl_dereference(dev->rx_handler);
5061}
5062EXPORT_SYMBOL_GPL(netdev_is_rx_handler_busy);
5063
ab95bfe0
JP
5064/**
5065 * netdev_rx_handler_register - register receive handler
5066 * @dev: device to register a handler for
5067 * @rx_handler: receive handler to register
93e2c32b 5068 * @rx_handler_data: data pointer that is used by rx handler
ab95bfe0 5069 *
e227867f 5070 * Register a receive handler for a device. This handler will then be
ab95bfe0
JP
5071 * called from __netif_receive_skb. A negative errno code is returned
5072 * on a failure.
5073 *
5074 * The caller must hold the rtnl_mutex.
8a4eb573
JP
5075 *
5076 * For a general description of rx_handler, see enum rx_handler_result.
ab95bfe0
JP
5077 */
5078int netdev_rx_handler_register(struct net_device *dev,
93e2c32b
JP
5079 rx_handler_func_t *rx_handler,
5080 void *rx_handler_data)
ab95bfe0 5081{
1b7cd004 5082 if (netdev_is_rx_handler_busy(dev))
ab95bfe0
JP
5083 return -EBUSY;
5084
f5426250
PA
5085 if (dev->priv_flags & IFF_NO_RX_HANDLER)
5086 return -EINVAL;
5087
00cfec37 5088 /* Note: rx_handler_data must be set before rx_handler */
93e2c32b 5089 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
ab95bfe0
JP
5090 rcu_assign_pointer(dev->rx_handler, rx_handler);
5091
5092 return 0;
5093}
5094EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
5095
5096/**
5097 * netdev_rx_handler_unregister - unregister receive handler
5098 * @dev: device to unregister a handler from
5099 *
166ec369 5100 * Unregister a receive handler from a device.
ab95bfe0
JP
5101 *
5102 * The caller must hold the rtnl_mutex.
5103 */
5104void netdev_rx_handler_unregister(struct net_device *dev)
5105{
5106
5107 ASSERT_RTNL();
a9b3cd7f 5108 RCU_INIT_POINTER(dev->rx_handler, NULL);
00cfec37
ED
5109 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
5110 * section has a guarantee to see a non NULL rx_handler_data
5111 * as well.
5112 */
5113 synchronize_net();
a9b3cd7f 5114 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
ab95bfe0
JP
5115}
5116EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
5117
b4b9e355
MG
5118/*
5119 * Limit the use of PFMEMALLOC reserves to those protocols that implement
5120 * the special handling of PFMEMALLOC skbs.
5121 */
5122static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
5123{
5124 switch (skb->protocol) {
2b8837ae
JP
5125 case htons(ETH_P_ARP):
5126 case htons(ETH_P_IP):
5127 case htons(ETH_P_IPV6):
5128 case htons(ETH_P_8021Q):
5129 case htons(ETH_P_8021AD):
b4b9e355
MG
5130 return true;
5131 default:
5132 return false;
5133 }
5134}
5135
e687ad60
PN
5136static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
5137 int *ret, struct net_device *orig_dev)
5138{
5139 if (nf_hook_ingress_active(skb)) {
2c1e2703
AC
5140 int ingress_retval;
5141
e687ad60
PN
5142 if (*pt_prev) {
5143 *ret = deliver_skb(skb, *pt_prev, orig_dev);
5144 *pt_prev = NULL;
5145 }
5146
2c1e2703
AC
5147 rcu_read_lock();
5148 ingress_retval = nf_hook_ingress(skb);
5149 rcu_read_unlock();
5150 return ingress_retval;
e687ad60
PN
5151 }
5152 return 0;
5153}
e687ad60 5154
c0bbbdc3 5155static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
88eb1944 5156 struct packet_type **ppt_prev)
1da177e4
LT
5157{
5158 struct packet_type *ptype, *pt_prev;
ab95bfe0 5159 rx_handler_func_t *rx_handler;
c0bbbdc3 5160 struct sk_buff *skb = *pskb;
f2ccd8fa 5161 struct net_device *orig_dev;
8a4eb573 5162 bool deliver_exact = false;
1da177e4 5163 int ret = NET_RX_DROP;
252e3346 5164 __be16 type;
1da177e4 5165
588f0330 5166 net_timestamp_check(!netdev_tstamp_prequeue, skb);
81bbb3d4 5167
cf66ba58 5168 trace_netif_receive_skb(skb);
9b22ea56 5169
cc9bd5ce 5170 orig_dev = skb->dev;
8f903c70 5171
c1d2bbe1 5172 skb_reset_network_header(skb);
fda55eca
ED
5173 if (!skb_transport_header_was_set(skb))
5174 skb_reset_transport_header(skb);
0b5c9db1 5175 skb_reset_mac_len(skb);
1da177e4
LT
5176
5177 pt_prev = NULL;
5178
63d8ea7f 5179another_round:
b6858177 5180 skb->skb_iif = skb->dev->ifindex;
63d8ea7f
DM
5181
5182 __this_cpu_inc(softnet_data.processed);
5183
458bf2f2
SH
5184 if (static_branch_unlikely(&generic_xdp_needed_key)) {
5185 int ret2;
5186
5187 preempt_disable();
5188 ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
5189 preempt_enable();
5190
c0bbbdc3
BS
5191 if (ret2 != XDP_PASS) {
5192 ret = NET_RX_DROP;
5193 goto out;
5194 }
458bf2f2
SH
5195 skb_reset_mac_len(skb);
5196 }
5197
324cefaf 5198 if (eth_type_vlan(skb->protocol)) {
0d5501c1 5199 skb = skb_vlan_untag(skb);
bcc6d479 5200 if (unlikely(!skb))
2c17d27c 5201 goto out;
bcc6d479
JP
5202 }
5203
e7246e12
WB
5204 if (skb_skip_tc_classify(skb))
5205 goto skip_classify;
1da177e4 5206
9754e293 5207 if (pfmemalloc)
b4b9e355
MG
5208 goto skip_taps;
5209
1da177e4 5210 list_for_each_entry_rcu(ptype, &ptype_all, list) {
7866a621
SN
5211 if (pt_prev)
5212 ret = deliver_skb(skb, pt_prev, orig_dev);
5213 pt_prev = ptype;
5214 }
5215
5216 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
5217 if (pt_prev)
5218 ret = deliver_skb(skb, pt_prev, orig_dev);
5219 pt_prev = ptype;
1da177e4
LT
5220 }
5221
b4b9e355 5222skip_taps:
1cf51900 5223#ifdef CONFIG_NET_INGRESS
aabf6772 5224 if (static_branch_unlikely(&ingress_needed_key)) {
9aa1206e
DB
5225 bool another = false;
5226
5227 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev,
5228 &another);
5229 if (another)
5230 goto another_round;
4577139b 5231 if (!skb)
2c17d27c 5232 goto out;
e687ad60
PN
5233
5234 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
2c17d27c 5235 goto out;
4577139b 5236 }
1cf51900 5237#endif
2c64605b 5238 skb_reset_redirect(skb);
e7246e12 5239skip_classify:
9754e293 5240 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
b4b9e355
MG
5241 goto drop;
5242
df8a39de 5243 if (skb_vlan_tag_present(skb)) {
2425717b
JF
5244 if (pt_prev) {
5245 ret = deliver_skb(skb, pt_prev, orig_dev);
5246 pt_prev = NULL;
5247 }
48cc32d3 5248 if (vlan_do_receive(&skb))
2425717b
JF
5249 goto another_round;
5250 else if (unlikely(!skb))
2c17d27c 5251 goto out;
2425717b
JF
5252 }
5253
48cc32d3 5254 rx_handler = rcu_dereference(skb->dev->rx_handler);
ab95bfe0
JP
5255 if (rx_handler) {
5256 if (pt_prev) {
5257 ret = deliver_skb(skb, pt_prev, orig_dev);
5258 pt_prev = NULL;
5259 }
8a4eb573
JP
5260 switch (rx_handler(&skb)) {
5261 case RX_HANDLER_CONSUMED:
3bc1b1ad 5262 ret = NET_RX_SUCCESS;
2c17d27c 5263 goto out;
8a4eb573 5264 case RX_HANDLER_ANOTHER:
63d8ea7f 5265 goto another_round;
8a4eb573
JP
5266 case RX_HANDLER_EXACT:
5267 deliver_exact = true;
b1866bff 5268 break;
8a4eb573
JP
5269 case RX_HANDLER_PASS:
5270 break;
5271 default:
5272 BUG();
5273 }
ab95bfe0 5274 }
1da177e4 5275
b14a9fc4 5276 if (unlikely(skb_vlan_tag_present(skb)) && !netdev_uses_dsa(skb->dev)) {
36b2f61a
GV
5277check_vlan_id:
5278 if (skb_vlan_tag_get_id(skb)) {
5279 /* Vlan id is non 0 and vlan_do_receive() above couldn't
5280 * find vlan device.
5281 */
d4b812de 5282 skb->pkt_type = PACKET_OTHERHOST;
324cefaf 5283 } else if (eth_type_vlan(skb->protocol)) {
36b2f61a
GV
5284 /* Outer header is 802.1P with vlan 0, inner header is
5285 * 802.1Q or 802.1AD and vlan_do_receive() above could
5286 * not find vlan dev for vlan id 0.
5287 */
5288 __vlan_hwaccel_clear_tag(skb);
5289 skb = skb_vlan_untag(skb);
5290 if (unlikely(!skb))
5291 goto out;
5292 if (vlan_do_receive(&skb))
5293 /* After stripping off 802.1P header with vlan 0
5294 * vlan dev is found for inner header.
5295 */
5296 goto another_round;
5297 else if (unlikely(!skb))
5298 goto out;
5299 else
5300 /* We have stripped outer 802.1P vlan 0 header.
5301 * But could not find vlan dev.
5302 * check again for vlan id to set OTHERHOST.
5303 */
5304 goto check_vlan_id;
5305 }
d4b812de
ED
5306 /* Note: we might in the future use prio bits
5307 * and set skb->priority like in vlan_do_receive()
5308 * For the time being, just ignore Priority Code Point
5309 */
b1817524 5310 __vlan_hwaccel_clear_tag(skb);
d4b812de 5311 }
48cc32d3 5312
7866a621
SN
5313 type = skb->protocol;
5314
63d8ea7f 5315 /* deliver only exact match when indicated */
7866a621
SN
5316 if (likely(!deliver_exact)) {
5317 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5318 &ptype_base[ntohs(type) &
5319 PTYPE_HASH_MASK]);
5320 }
1f3c8804 5321
7866a621
SN
5322 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5323 &orig_dev->ptype_specific);
5324
5325 if (unlikely(skb->dev != orig_dev)) {
5326 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
5327 &skb->dev->ptype_specific);
1da177e4
LT
5328 }
5329
5330 if (pt_prev) {
1f8b977a 5331 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
0e698bf6 5332 goto drop;
88eb1944 5333 *ppt_prev = pt_prev;
1da177e4 5334 } else {
b4b9e355 5335drop:
6e7333d3
JW
5336 if (!deliver_exact)
5337 atomic_long_inc(&skb->dev->rx_dropped);
5338 else
5339 atomic_long_inc(&skb->dev->rx_nohandler);
1da177e4
LT
5340 kfree_skb(skb);
5341 /* Jamal, now you will not able to escape explaining
5342 * me how you were going to use this. :-)
5343 */
5344 ret = NET_RX_DROP;
5345 }
5346
2c17d27c 5347out:
c0bbbdc3
BS
5348 /* The invariant here is that if *ppt_prev is not NULL
5349 * then skb should also be non-NULL.
5350 *
5351 * Apparently *ppt_prev assignment above holds this invariant due to
5352 * skb dereferencing near it.
5353 */
5354 *pskb = skb;
9754e293
DM
5355 return ret;
5356}
5357
88eb1944
EC
5358static int __netif_receive_skb_one_core(struct sk_buff *skb, bool pfmemalloc)
5359{
5360 struct net_device *orig_dev = skb->dev;
5361 struct packet_type *pt_prev = NULL;
5362 int ret;
5363
c0bbbdc3 5364 ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
88eb1944 5365 if (pt_prev)
f5737cba
PA
5366 ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb,
5367 skb->dev, pt_prev, orig_dev);
88eb1944
EC
5368 return ret;
5369}
5370
1c601d82
JDB
5371/**
5372 * netif_receive_skb_core - special purpose version of netif_receive_skb
5373 * @skb: buffer to process
5374 *
5375 * More direct receive version of netif_receive_skb(). It should
5376 * only be used by callers that have a need to skip RPS and Generic XDP.
2de9780f 5377 * Caller must also take care of handling if ``(page_is_)pfmemalloc``.
1c601d82
JDB
5378 *
5379 * This function may only be called from softirq context and interrupts
5380 * should be enabled.
5381 *
5382 * Return values (usually ignored):
5383 * NET_RX_SUCCESS: no congestion
5384 * NET_RX_DROP: packet was dropped
5385 */
5386int netif_receive_skb_core(struct sk_buff *skb)
5387{
5388 int ret;
5389
5390 rcu_read_lock();
88eb1944 5391 ret = __netif_receive_skb_one_core(skb, false);
1c601d82
JDB
5392 rcu_read_unlock();
5393
5394 return ret;
5395}
5396EXPORT_SYMBOL(netif_receive_skb_core);
5397
88eb1944
EC
5398static inline void __netif_receive_skb_list_ptype(struct list_head *head,
5399 struct packet_type *pt_prev,
5400 struct net_device *orig_dev)
4ce0017a
EC
5401{
5402 struct sk_buff *skb, *next;
5403
88eb1944
EC
5404 if (!pt_prev)
5405 return;
5406 if (list_empty(head))
5407 return;
17266ee9 5408 if (pt_prev->list_func != NULL)
fdf71426
PA
5409 INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv,
5410 ip_list_rcv, head, pt_prev, orig_dev);
17266ee9 5411 else
9a5a90d1
AL
5412 list_for_each_entry_safe(skb, next, head, list) {
5413 skb_list_del_init(skb);
fdf71426 5414 pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
9a5a90d1 5415 }
88eb1944
EC
5416}
5417
5418static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
5419{
5420 /* Fast-path assumptions:
5421 * - There is no RX handler.
5422 * - Only one packet_type matches.
5423 * If either of these fails, we will end up doing some per-packet
5424 * processing in-line, then handling the 'last ptype' for the whole
5425 * sublist. This can't cause out-of-order delivery to any single ptype,
5426 * because the 'last ptype' must be constant across the sublist, and all
5427 * other ptypes are handled per-packet.
5428 */
5429 /* Current (common) ptype of sublist */
5430 struct packet_type *pt_curr = NULL;
5431 /* Current (common) orig_dev of sublist */
5432 struct net_device *od_curr = NULL;
5433 struct list_head sublist;
5434 struct sk_buff *skb, *next;
5435
9af86f93 5436 INIT_LIST_HEAD(&sublist);
88eb1944
EC
5437 list_for_each_entry_safe(skb, next, head, list) {
5438 struct net_device *orig_dev = skb->dev;
5439 struct packet_type *pt_prev = NULL;
5440
22f6bbb7 5441 skb_list_del_init(skb);
c0bbbdc3 5442 __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
9af86f93
EC
5443 if (!pt_prev)
5444 continue;
88eb1944
EC
5445 if (pt_curr != pt_prev || od_curr != orig_dev) {
5446 /* dispatch old sublist */
88eb1944
EC
5447 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
5448 /* start new sublist */
9af86f93 5449 INIT_LIST_HEAD(&sublist);
88eb1944
EC
5450 pt_curr = pt_prev;
5451 od_curr = orig_dev;
5452 }
9af86f93 5453 list_add_tail(&skb->list, &sublist);
88eb1944
EC
5454 }
5455
5456 /* dispatch final sublist */
9af86f93 5457 __netif_receive_skb_list_ptype(&sublist, pt_curr, od_curr);
4ce0017a
EC
5458}
5459
9754e293
DM
5460static int __netif_receive_skb(struct sk_buff *skb)
5461{
5462 int ret;
5463
5464 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
f1083048 5465 unsigned int noreclaim_flag;
9754e293
DM
5466
5467 /*
5468 * PFMEMALLOC skbs are special, they should
5469 * - be delivered to SOCK_MEMALLOC sockets only
5470 * - stay away from userspace
5471 * - have bounded memory usage
5472 *
5473 * Use PF_MEMALLOC as this saves us from propagating the allocation
5474 * context down to all allocation sites.
5475 */
f1083048 5476 noreclaim_flag = memalloc_noreclaim_save();
88eb1944 5477 ret = __netif_receive_skb_one_core(skb, true);
f1083048 5478 memalloc_noreclaim_restore(noreclaim_flag);
9754e293 5479 } else
88eb1944 5480 ret = __netif_receive_skb_one_core(skb, false);
9754e293 5481
1da177e4
LT
5482 return ret;
5483}
0a9627f2 5484
4ce0017a
EC
5485static void __netif_receive_skb_list(struct list_head *head)
5486{
5487 unsigned long noreclaim_flag = 0;
5488 struct sk_buff *skb, *next;
5489 bool pfmemalloc = false; /* Is current sublist PF_MEMALLOC? */
5490
5491 list_for_each_entry_safe(skb, next, head, list) {
5492 if ((sk_memalloc_socks() && skb_pfmemalloc(skb)) != pfmemalloc) {
5493 struct list_head sublist;
5494
5495 /* Handle the previous sublist */
5496 list_cut_before(&sublist, head, &skb->list);
b9f463d6
EC
5497 if (!list_empty(&sublist))
5498 __netif_receive_skb_list_core(&sublist, pfmemalloc);
4ce0017a
EC
5499 pfmemalloc = !pfmemalloc;
5500 /* See comments in __netif_receive_skb */
5501 if (pfmemalloc)
5502 noreclaim_flag = memalloc_noreclaim_save();
5503 else
5504 memalloc_noreclaim_restore(noreclaim_flag);
5505 }
5506 }
5507 /* Handle the remaining sublist */
b9f463d6
EC
5508 if (!list_empty(head))
5509 __netif_receive_skb_list_core(head, pfmemalloc);
4ce0017a
EC
5510 /* Restore pflags */
5511 if (pfmemalloc)
5512 memalloc_noreclaim_restore(noreclaim_flag);
5513}
5514
f4e63525 5515static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
b5cdae32 5516{
58038695 5517 struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
b5cdae32
DM
5518 struct bpf_prog *new = xdp->prog;
5519 int ret = 0;
5520
fbee97fe
DA
5521 if (new) {
5522 u32 i;
5523
984fe94f
YZ
5524 mutex_lock(&new->aux->used_maps_mutex);
5525
fbee97fe
DA
5526 /* generic XDP does not work with DEVMAPs that can
5527 * have a bpf_prog installed on an entry
5528 */
5529 for (i = 0; i < new->aux->used_map_cnt; i++) {
984fe94f
YZ
5530 if (dev_map_can_have_prog(new->aux->used_maps[i]) ||
5531 cpu_map_prog_allowed(new->aux->used_maps[i])) {
5532 mutex_unlock(&new->aux->used_maps_mutex);
92164774 5533 return -EINVAL;
984fe94f 5534 }
fbee97fe 5535 }
984fe94f
YZ
5536
5537 mutex_unlock(&new->aux->used_maps_mutex);
fbee97fe
DA
5538 }
5539
b5cdae32 5540 switch (xdp->command) {
58038695 5541 case XDP_SETUP_PROG:
b5cdae32
DM
5542 rcu_assign_pointer(dev->xdp_prog, new);
5543 if (old)
5544 bpf_prog_put(old);
5545
5546 if (old && !new) {
02786475 5547 static_branch_dec(&generic_xdp_needed_key);
b5cdae32 5548 } else if (new && !old) {
02786475 5549 static_branch_inc(&generic_xdp_needed_key);
b5cdae32 5550 dev_disable_lro(dev);
56f5aa77 5551 dev_disable_gro_hw(dev);
b5cdae32
DM
5552 }
5553 break;
b5cdae32 5554
b5cdae32
DM
5555 default:
5556 ret = -EINVAL;
5557 break;
5558 }
5559
5560 return ret;
5561}
5562
ae78dbfa 5563static int netif_receive_skb_internal(struct sk_buff *skb)
0a9627f2 5564{
2c17d27c
JA
5565 int ret;
5566
588f0330 5567 net_timestamp_check(netdev_tstamp_prequeue, skb);
3b098e2d 5568
c1f19b51
RC
5569 if (skb_defer_rx_timestamp(skb))
5570 return NET_RX_SUCCESS;
5571
bbbe211c 5572 rcu_read_lock();
df334545 5573#ifdef CONFIG_RPS
dc05360f 5574 if (static_branch_unlikely(&rps_needed)) {
3b098e2d 5575 struct rps_dev_flow voidflow, *rflow = &voidflow;
2c17d27c 5576 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
0a9627f2 5577
3b098e2d
ED
5578 if (cpu >= 0) {
5579 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
5580 rcu_read_unlock();
adc9300e 5581 return ret;
3b098e2d 5582 }
fec5e652 5583 }
1e94d72f 5584#endif
2c17d27c
JA
5585 ret = __netif_receive_skb(skb);
5586 rcu_read_unlock();
5587 return ret;
0a9627f2 5588}
ae78dbfa 5589
7da517a3
EC
5590static void netif_receive_skb_list_internal(struct list_head *head)
5591{
7da517a3 5592 struct sk_buff *skb, *next;
8c057efa 5593 struct list_head sublist;
7da517a3 5594
8c057efa 5595 INIT_LIST_HEAD(&sublist);
7da517a3
EC
5596 list_for_each_entry_safe(skb, next, head, list) {
5597 net_timestamp_check(netdev_tstamp_prequeue, skb);
22f6bbb7 5598 skb_list_del_init(skb);
8c057efa
EC
5599 if (!skb_defer_rx_timestamp(skb))
5600 list_add_tail(&skb->list, &sublist);
7da517a3 5601 }
8c057efa 5602 list_splice_init(&sublist, head);
7da517a3 5603
7da517a3
EC
5604 rcu_read_lock();
5605#ifdef CONFIG_RPS
dc05360f 5606 if (static_branch_unlikely(&rps_needed)) {
7da517a3
EC
5607 list_for_each_entry_safe(skb, next, head, list) {
5608 struct rps_dev_flow voidflow, *rflow = &voidflow;
5609 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
5610
5611 if (cpu >= 0) {
8c057efa 5612 /* Will be handled, remove from list */
22f6bbb7 5613 skb_list_del_init(skb);
8c057efa 5614 enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
7da517a3
EC
5615 }
5616 }
5617 }
5618#endif
5619 __netif_receive_skb_list(head);
5620 rcu_read_unlock();
5621}
5622
ae78dbfa
BH
5623/**
5624 * netif_receive_skb - process receive buffer from network
5625 * @skb: buffer to process
5626 *
5627 * netif_receive_skb() is the main receive data processing function.
5628 * It always succeeds. The buffer may be dropped during processing
5629 * for congestion control or by the protocol layers.
5630 *
5631 * This function may only be called from softirq context and interrupts
5632 * should be enabled.
5633 *
5634 * Return values (usually ignored):
5635 * NET_RX_SUCCESS: no congestion
5636 * NET_RX_DROP: packet was dropped
5637 */
04eb4489 5638int netif_receive_skb(struct sk_buff *skb)
ae78dbfa 5639{
b0e3f1bd
GB
5640 int ret;
5641
ae78dbfa
BH
5642 trace_netif_receive_skb_entry(skb);
5643
b0e3f1bd
GB
5644 ret = netif_receive_skb_internal(skb);
5645 trace_netif_receive_skb_exit(ret);
5646
5647 return ret;
ae78dbfa 5648}
04eb4489 5649EXPORT_SYMBOL(netif_receive_skb);
1da177e4 5650
f6ad8c1b
EC
5651/**
5652 * netif_receive_skb_list - process many receive buffers from network
5653 * @head: list of skbs to process.
5654 *
7da517a3
EC
5655 * Since return value of netif_receive_skb() is normally ignored, and
5656 * wouldn't be meaningful for a list, this function returns void.
f6ad8c1b
EC
5657 *
5658 * This function may only be called from softirq context and interrupts
5659 * should be enabled.
5660 */
5661void netif_receive_skb_list(struct list_head *head)
5662{
7da517a3 5663 struct sk_buff *skb;
f6ad8c1b 5664
b9f463d6
EC
5665 if (list_empty(head))
5666 return;
b0e3f1bd
GB
5667 if (trace_netif_receive_skb_list_entry_enabled()) {
5668 list_for_each_entry(skb, head, list)
5669 trace_netif_receive_skb_list_entry(skb);
5670 }
7da517a3 5671 netif_receive_skb_list_internal(head);
b0e3f1bd 5672 trace_netif_receive_skb_list_exit(0);
f6ad8c1b
EC
5673}
5674EXPORT_SYMBOL(netif_receive_skb_list);
5675
ce1e2a77 5676static DEFINE_PER_CPU(struct work_struct, flush_works);
145dd5f9
PA
5677
5678/* Network device is going away, flush any packets still pending */
5679static void flush_backlog(struct work_struct *work)
6e583ce5 5680{
6e583ce5 5681 struct sk_buff *skb, *tmp;
145dd5f9
PA
5682 struct softnet_data *sd;
5683
5684 local_bh_disable();
5685 sd = this_cpu_ptr(&softnet_data);
6e583ce5 5686
145dd5f9 5687 local_irq_disable();
e36fa2f7 5688 rps_lock(sd);
6e7676c1 5689 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
41852497 5690 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
e36fa2f7 5691 __skb_unlink(skb, &sd->input_pkt_queue);
7df5cb75 5692 dev_kfree_skb_irq(skb);
76cc8b13 5693 input_queue_head_incr(sd);
6e583ce5 5694 }
6e7676c1 5695 }
e36fa2f7 5696 rps_unlock(sd);
145dd5f9 5697 local_irq_enable();
6e7676c1
CG
5698
5699 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
41852497 5700 if (skb->dev->reg_state == NETREG_UNREGISTERING) {
6e7676c1
CG
5701 __skb_unlink(skb, &sd->process_queue);
5702 kfree_skb(skb);
76cc8b13 5703 input_queue_head_incr(sd);
6e7676c1
CG
5704 }
5705 }
145dd5f9
PA
5706 local_bh_enable();
5707}
5708
2de79ee2
PA
5709static bool flush_required(int cpu)
5710{
5711#if IS_ENABLED(CONFIG_RPS)
5712 struct softnet_data *sd = &per_cpu(softnet_data, cpu);
5713 bool do_flush;
5714
5715 local_irq_disable();
5716 rps_lock(sd);
5717
5718 /* as insertion into process_queue happens with the rps lock held,
5719 * process_queue access may race only with dequeue
5720 */
5721 do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
5722 !skb_queue_empty_lockless(&sd->process_queue);
5723 rps_unlock(sd);
5724 local_irq_enable();
5725
5726 return do_flush;
5727#endif
5728 /* without RPS we can't safely check input_pkt_queue: during a
5729 * concurrent remote skb_queue_splice() we can detect as empty both
5730 * input_pkt_queue and process_queue even if the latter could end-up
5731 * containing a lot of packets.
5732 */
5733 return true;
5734}
5735
41852497 5736static void flush_all_backlogs(void)
145dd5f9 5737{
2de79ee2 5738 static cpumask_t flush_cpus;
145dd5f9
PA
5739 unsigned int cpu;
5740
2de79ee2
PA
5741 /* since we are under rtnl lock protection we can use static data
5742 * for the cpumask and avoid allocating on stack the possibly
5743 * large mask
5744 */
5745 ASSERT_RTNL();
5746
145dd5f9
PA
5747 get_online_cpus();
5748
2de79ee2
PA
5749 cpumask_clear(&flush_cpus);
5750 for_each_online_cpu(cpu) {
5751 if (flush_required(cpu)) {
5752 queue_work_on(cpu, system_highpri_wq,
5753 per_cpu_ptr(&flush_works, cpu));
5754 cpumask_set_cpu(cpu, &flush_cpus);
5755 }
5756 }
145dd5f9 5757
2de79ee2 5758 /* we can have in flight packet[s] on the cpus we are not flushing,
0cbe1e57 5759 * synchronize_net() in unregister_netdevice_many() will take care of
2de79ee2
PA
5760 * them
5761 */
5762 for_each_cpu(cpu, &flush_cpus)
41852497 5763 flush_work(per_cpu_ptr(&flush_works, cpu));
145dd5f9
PA
5764
5765 put_online_cpus();
6e583ce5
SH
5766}
5767
c8079432
MM
5768/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
5769static void gro_normal_list(struct napi_struct *napi)
5770{
5771 if (!napi->rx_count)
5772 return;
5773 netif_receive_skb_list_internal(&napi->rx_list);
5774 INIT_LIST_HEAD(&napi->rx_list);
5775 napi->rx_count = 0;
5776}
5777
5778/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
5779 * pass the whole batch up to the stack.
5780 */
8dc1c444 5781static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs)
c8079432
MM
5782{
5783 list_add_tail(&skb->list, &napi->rx_list);
8dc1c444
ED
5784 napi->rx_count += segs;
5785 if (napi->rx_count >= gro_normal_batch)
c8079432
MM
5786 gro_normal_list(napi);
5787}
5788
c8079432 5789static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
d565b0a1 5790{
22061d80 5791 struct packet_offload *ptype;
d565b0a1 5792 __be16 type = skb->protocol;
22061d80 5793 struct list_head *head = &offload_base;
d565b0a1
HX
5794 int err = -ENOENT;
5795
c3c7c254
ED
5796 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
5797
fc59f9a3
HX
5798 if (NAPI_GRO_CB(skb)->count == 1) {
5799 skb_shinfo(skb)->gso_size = 0;
d565b0a1 5800 goto out;
fc59f9a3 5801 }
d565b0a1
HX
5802
5803 rcu_read_lock();
5804 list_for_each_entry_rcu(ptype, head, list) {
f191a1d1 5805 if (ptype->type != type || !ptype->callbacks.gro_complete)
d565b0a1
HX
5806 continue;
5807
aaa5d90b
PA
5808 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
5809 ipv6_gro_complete, inet_gro_complete,
5810 skb, 0);
d565b0a1
HX
5811 break;
5812 }
5813 rcu_read_unlock();
5814
5815 if (err) {
5816 WARN_ON(&ptype->list == head);
5817 kfree_skb(skb);
5818 return NET_RX_SUCCESS;
5819 }
5820
5821out:
8dc1c444 5822 gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
c8079432 5823 return NET_RX_SUCCESS;
d565b0a1
HX
5824}
5825
6312fe77 5826static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
07d78363 5827 bool flush_old)
d565b0a1 5828{
6312fe77 5829 struct list_head *head = &napi->gro_hash[index].list;
d4546c25 5830 struct sk_buff *skb, *p;
2e71a6f8 5831
07d78363 5832 list_for_each_entry_safe_reverse(skb, p, head, list) {
2e71a6f8
ED
5833 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
5834 return;
992cba7e 5835 skb_list_del_init(skb);
c8079432 5836 napi_gro_complete(napi, skb);
6312fe77 5837 napi->gro_hash[index].count--;
d565b0a1 5838 }
d9f37d01
LR
5839
5840 if (!napi->gro_hash[index].count)
5841 __clear_bit(index, &napi->gro_bitmask);
d565b0a1 5842}
07d78363 5843
6312fe77 5844/* napi->gro_hash[].list contains packets ordered by age.
07d78363
DM
5845 * youngest packets at the head of it.
5846 * Complete skbs in reverse order to reduce latencies.
5847 */
5848void napi_gro_flush(struct napi_struct *napi, bool flush_old)
5849{
42519ede
ED
5850 unsigned long bitmask = napi->gro_bitmask;
5851 unsigned int i, base = ~0U;
07d78363 5852
42519ede
ED
5853 while ((i = ffs(bitmask)) != 0) {
5854 bitmask >>= i;
5855 base += i;
5856 __napi_gro_flush_chain(napi, base, flush_old);
d9f37d01 5857 }
07d78363 5858}
86cac58b 5859EXPORT_SYMBOL(napi_gro_flush);
d565b0a1 5860
07d78363
DM
5861static struct list_head *gro_list_prepare(struct napi_struct *napi,
5862 struct sk_buff *skb)
89c5fa33 5863{
89c5fa33 5864 unsigned int maclen = skb->dev->hard_header_len;
0b4cec8c 5865 u32 hash = skb_get_hash_raw(skb);
07d78363 5866 struct list_head *head;
d4546c25 5867 struct sk_buff *p;
89c5fa33 5868
6312fe77 5869 head = &napi->gro_hash[hash & (GRO_HASH_BUCKETS - 1)].list;
07d78363 5870 list_for_each_entry(p, head, list) {
89c5fa33
ED
5871 unsigned long diffs;
5872
0b4cec8c
TH
5873 NAPI_GRO_CB(p)->flush = 0;
5874
5875 if (hash != skb_get_hash_raw(p)) {
5876 NAPI_GRO_CB(p)->same_flow = 0;
5877 continue;
5878 }
5879
89c5fa33 5880 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
b1817524
MM
5881 diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb);
5882 if (skb_vlan_tag_present(p))
fc5141cb 5883 diffs |= skb_vlan_tag_get(p) ^ skb_vlan_tag_get(skb);
ce87fc6c 5884 diffs |= skb_metadata_dst_cmp(p, skb);
de8f3a83 5885 diffs |= skb_metadata_differs(p, skb);
89c5fa33
ED
5886 if (maclen == ETH_HLEN)
5887 diffs |= compare_ether_header(skb_mac_header(p),
a50e233c 5888 skb_mac_header(skb));
89c5fa33
ED
5889 else if (!diffs)
5890 diffs = memcmp(skb_mac_header(p),
a50e233c 5891 skb_mac_header(skb),
89c5fa33
ED
5892 maclen);
5893 NAPI_GRO_CB(p)->same_flow = !diffs;
89c5fa33 5894 }
07d78363
DM
5895
5896 return head;
89c5fa33
ED
5897}
5898
299603e8
JC
5899static void skb_gro_reset_offset(struct sk_buff *skb)
5900{
5901 const struct skb_shared_info *pinfo = skb_shinfo(skb);
5902 const skb_frag_t *frag0 = &pinfo->frags[0];
5903
5904 NAPI_GRO_CB(skb)->data_offset = 0;
5905 NAPI_GRO_CB(skb)->frag0 = NULL;
5906 NAPI_GRO_CB(skb)->frag0_len = 0;
5907
8aef998d 5908 if (!skb_headlen(skb) && pinfo->nr_frags &&
299603e8
JC
5909 !PageHighMem(skb_frag_page(frag0))) {
5910 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
7cfd5fd5
ED
5911 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
5912 skb_frag_size(frag0),
5913 skb->end - skb->tail);
89c5fa33
ED
5914 }
5915}
5916
a50e233c
ED
5917static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
5918{
5919 struct skb_shared_info *pinfo = skb_shinfo(skb);
5920
5921 BUG_ON(skb->end - skb->tail < grow);
5922
5923 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
5924
5925 skb->data_len -= grow;
5926 skb->tail += grow;
5927
b54c9d5b 5928 skb_frag_off_add(&pinfo->frags[0], grow);
a50e233c
ED
5929 skb_frag_size_sub(&pinfo->frags[0], grow);
5930
5931 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
5932 skb_frag_unref(skb, 0);
5933 memmove(pinfo->frags, pinfo->frags + 1,
5934 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
5935 }
5936}
5937
c8079432 5938static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
07d78363 5939{
6312fe77 5940 struct sk_buff *oldest;
07d78363 5941
6312fe77 5942 oldest = list_last_entry(head, struct sk_buff, list);
07d78363 5943
6312fe77 5944 /* We are called with head length >= MAX_GRO_SKBS, so this is
07d78363
DM
5945 * impossible.
5946 */
5947 if (WARN_ON_ONCE(!oldest))
5948 return;
5949
d9f37d01
LR
5950 /* Do not adjust napi->gro_hash[].count, caller is adding a new
5951 * SKB to the chain.
07d78363 5952 */
ece23711 5953 skb_list_del_init(oldest);
c8079432 5954 napi_gro_complete(napi, oldest);
07d78363
DM
5955}
5956
bb728820 5957static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
d565b0a1 5958{
6312fe77 5959 u32 hash = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
d4546c25 5960 struct list_head *head = &offload_base;
22061d80 5961 struct packet_offload *ptype;
d565b0a1 5962 __be16 type = skb->protocol;
07d78363 5963 struct list_head *gro_head;
d4546c25 5964 struct sk_buff *pp = NULL;
5b252f0c 5965 enum gro_result ret;
d4546c25 5966 int same_flow;
a50e233c 5967 int grow;
d565b0a1 5968
b5cdae32 5969 if (netif_elide_gro(skb->dev))
d565b0a1
HX
5970 goto normal;
5971
07d78363 5972 gro_head = gro_list_prepare(napi, skb);
89c5fa33 5973
d565b0a1
HX
5974 rcu_read_lock();
5975 list_for_each_entry_rcu(ptype, head, list) {
f191a1d1 5976 if (ptype->type != type || !ptype->callbacks.gro_receive)
d565b0a1
HX
5977 continue;
5978
86911732 5979 skb_set_network_header(skb, skb_gro_offset(skb));
efd9450e 5980 skb_reset_mac_len(skb);
d565b0a1 5981 NAPI_GRO_CB(skb)->same_flow = 0;
d61d072e 5982 NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
5d38a079 5983 NAPI_GRO_CB(skb)->free = 0;
fac8e0f5 5984 NAPI_GRO_CB(skb)->encap_mark = 0;
fcd91dd4 5985 NAPI_GRO_CB(skb)->recursion_counter = 0;
a0ca153f 5986 NAPI_GRO_CB(skb)->is_fou = 0;
1530545e 5987 NAPI_GRO_CB(skb)->is_atomic = 1;
15e2396d 5988 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
d565b0a1 5989
662880f4
TH
5990 /* Setup for GRO checksum validation */
5991 switch (skb->ip_summed) {
5992 case CHECKSUM_COMPLETE:
5993 NAPI_GRO_CB(skb)->csum = skb->csum;
5994 NAPI_GRO_CB(skb)->csum_valid = 1;
5995 NAPI_GRO_CB(skb)->csum_cnt = 0;
5996 break;
5997 case CHECKSUM_UNNECESSARY:
5998 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
5999 NAPI_GRO_CB(skb)->csum_valid = 0;
6000 break;
6001 default:
6002 NAPI_GRO_CB(skb)->csum_cnt = 0;
6003 NAPI_GRO_CB(skb)->csum_valid = 0;
6004 }
d565b0a1 6005
aaa5d90b
PA
6006 pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
6007 ipv6_gro_receive, inet_gro_receive,
6008 gro_head, skb);
d565b0a1
HX
6009 break;
6010 }
6011 rcu_read_unlock();
6012
6013 if (&ptype->list == head)
6014 goto normal;
6015
45586c70 6016 if (PTR_ERR(pp) == -EINPROGRESS) {
25393d3f
SK
6017 ret = GRO_CONSUMED;
6018 goto ok;
6019 }
6020
0da2afd5 6021 same_flow = NAPI_GRO_CB(skb)->same_flow;
5d0d9be8 6022 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
0da2afd5 6023
d565b0a1 6024 if (pp) {
992cba7e 6025 skb_list_del_init(pp);
c8079432 6026 napi_gro_complete(napi, pp);
6312fe77 6027 napi->gro_hash[hash].count--;
d565b0a1
HX
6028 }
6029
0da2afd5 6030 if (same_flow)
d565b0a1
HX
6031 goto ok;
6032
600adc18 6033 if (NAPI_GRO_CB(skb)->flush)
d565b0a1 6034 goto normal;
d565b0a1 6035
6312fe77 6036 if (unlikely(napi->gro_hash[hash].count >= MAX_GRO_SKBS)) {
c8079432 6037 gro_flush_oldest(napi, gro_head);
600adc18 6038 } else {
6312fe77 6039 napi->gro_hash[hash].count++;
600adc18 6040 }
d565b0a1 6041 NAPI_GRO_CB(skb)->count = 1;
2e71a6f8 6042 NAPI_GRO_CB(skb)->age = jiffies;
29e98242 6043 NAPI_GRO_CB(skb)->last = skb;
86911732 6044 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
07d78363 6045 list_add(&skb->list, gro_head);
5d0d9be8 6046 ret = GRO_HELD;
d565b0a1 6047
ad0f9904 6048pull:
a50e233c
ED
6049 grow = skb_gro_offset(skb) - skb_headlen(skb);
6050 if (grow > 0)
6051 gro_pull_from_frag0(skb, grow);
d565b0a1 6052ok:
d9f37d01
LR
6053 if (napi->gro_hash[hash].count) {
6054 if (!test_bit(hash, &napi->gro_bitmask))
6055 __set_bit(hash, &napi->gro_bitmask);
6056 } else if (test_bit(hash, &napi->gro_bitmask)) {
6057 __clear_bit(hash, &napi->gro_bitmask);
6058 }
6059
5d0d9be8 6060 return ret;
d565b0a1
HX
6061
6062normal:
ad0f9904
HX
6063 ret = GRO_NORMAL;
6064 goto pull;
5d38a079 6065}
96e93eab 6066
bf5a755f
JC
6067struct packet_offload *gro_find_receive_by_type(__be16 type)
6068{
6069 struct list_head *offload_head = &offload_base;
6070 struct packet_offload *ptype;
6071
6072 list_for_each_entry_rcu(ptype, offload_head, list) {
6073 if (ptype->type != type || !ptype->callbacks.gro_receive)
6074 continue;
6075 return ptype;
6076 }
6077 return NULL;
6078}
e27a2f83 6079EXPORT_SYMBOL(gro_find_receive_by_type);
bf5a755f
JC
6080
6081struct packet_offload *gro_find_complete_by_type(__be16 type)
6082{
6083 struct list_head *offload_head = &offload_base;
6084 struct packet_offload *ptype;
6085
6086 list_for_each_entry_rcu(ptype, offload_head, list) {
6087 if (ptype->type != type || !ptype->callbacks.gro_complete)
6088 continue;
6089 return ptype;
6090 }
6091 return NULL;
6092}
e27a2f83 6093EXPORT_SYMBOL(gro_find_complete_by_type);
5d38a079 6094
6570bc79
AL
6095static gro_result_t napi_skb_finish(struct napi_struct *napi,
6096 struct sk_buff *skb,
6097 gro_result_t ret)
5d38a079 6098{
5d0d9be8
HX
6099 switch (ret) {
6100 case GRO_NORMAL:
8dc1c444 6101 gro_normal_one(napi, skb, 1);
c7c4b3b6 6102 break;
5d38a079 6103
daa86548 6104 case GRO_MERGED_FREE:
e44699d2
MK
6105 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
6106 napi_skb_free_stolen_head(skb);
6107 else
9243adfc 6108 __kfree_skb_defer(skb);
daa86548
ED
6109 break;
6110
5b252f0c
BH
6111 case GRO_HELD:
6112 case GRO_MERGED:
25393d3f 6113 case GRO_CONSUMED:
5b252f0c 6114 break;
5d38a079
HX
6115 }
6116
c7c4b3b6 6117 return ret;
5d0d9be8 6118}
5d0d9be8 6119
c7c4b3b6 6120gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
5d0d9be8 6121{
b0e3f1bd
GB
6122 gro_result_t ret;
6123
93f93a44 6124 skb_mark_napi_id(skb, napi);
ae78dbfa 6125 trace_napi_gro_receive_entry(skb);
86911732 6126
a50e233c
ED
6127 skb_gro_reset_offset(skb);
6128
6570bc79 6129 ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
b0e3f1bd
GB
6130 trace_napi_gro_receive_exit(ret);
6131
6132 return ret;
d565b0a1
HX
6133}
6134EXPORT_SYMBOL(napi_gro_receive);
6135
d0c2b0d2 6136static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
96e93eab 6137{
93a35f59
ED
6138 if (unlikely(skb->pfmemalloc)) {
6139 consume_skb(skb);
6140 return;
6141 }
96e93eab 6142 __skb_pull(skb, skb_headlen(skb));
2a2a459e
ED
6143 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
6144 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
b1817524 6145 __vlan_hwaccel_clear_tag(skb);
66c46d74 6146 skb->dev = napi->dev;
6d152e23 6147 skb->skb_iif = 0;
33d9a2c7
ED
6148
6149 /* eth_type_trans() assumes pkt_type is PACKET_HOST */
6150 skb->pkt_type = PACKET_HOST;
6151
c3caf119
JC
6152 skb->encapsulation = 0;
6153 skb_shinfo(skb)->gso_type = 0;
e33d0ba8 6154 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
174e2381 6155 skb_ext_reset(skb);
96e93eab
HX
6156
6157 napi->skb = skb;
6158}
96e93eab 6159
76620aaf 6160struct sk_buff *napi_get_frags(struct napi_struct *napi)
5d38a079 6161{
5d38a079 6162 struct sk_buff *skb = napi->skb;
5d38a079
HX
6163
6164 if (!skb) {
fd11a83d 6165 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
e2f9dc3b
ED
6166 if (skb) {
6167 napi->skb = skb;
6168 skb_mark_napi_id(skb, napi);
6169 }
80595d59 6170 }
96e93eab
HX
6171 return skb;
6172}
76620aaf 6173EXPORT_SYMBOL(napi_get_frags);
96e93eab 6174
a50e233c
ED
6175static gro_result_t napi_frags_finish(struct napi_struct *napi,
6176 struct sk_buff *skb,
6177 gro_result_t ret)
96e93eab 6178{
5d0d9be8
HX
6179 switch (ret) {
6180 case GRO_NORMAL:
a50e233c
ED
6181 case GRO_HELD:
6182 __skb_push(skb, ETH_HLEN);
6183 skb->protocol = eth_type_trans(skb, skb->dev);
323ebb61 6184 if (ret == GRO_NORMAL)
8dc1c444 6185 gro_normal_one(napi, skb, 1);
86911732 6186 break;
5d38a079 6187
e44699d2
MK
6188 case GRO_MERGED_FREE:
6189 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
6190 napi_skb_free_stolen_head(skb);
6191 else
6192 napi_reuse_skb(napi, skb);
6193 break;
6194
5b252f0c 6195 case GRO_MERGED:
25393d3f 6196 case GRO_CONSUMED:
5b252f0c 6197 break;
5d0d9be8 6198 }
5d38a079 6199
c7c4b3b6 6200 return ret;
5d38a079 6201}
5d0d9be8 6202
a50e233c
ED
6203/* Upper GRO stack assumes network header starts at gro_offset=0
6204 * Drivers could call both napi_gro_frags() and napi_gro_receive()
6205 * We copy ethernet header into skb->data to have a common layout.
6206 */
4adb9c4a 6207static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
76620aaf
HX
6208{
6209 struct sk_buff *skb = napi->skb;
a50e233c
ED
6210 const struct ethhdr *eth;
6211 unsigned int hlen = sizeof(*eth);
76620aaf
HX
6212
6213 napi->skb = NULL;
6214
a50e233c
ED
6215 skb_reset_mac_header(skb);
6216 skb_gro_reset_offset(skb);
6217
a50e233c
ED
6218 if (unlikely(skb_gro_header_hard(skb, hlen))) {
6219 eth = skb_gro_header_slow(skb, hlen, 0);
6220 if (unlikely(!eth)) {
4da46ceb
AC
6221 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
6222 __func__, napi->dev->name);
a50e233c
ED
6223 napi_reuse_skb(napi, skb);
6224 return NULL;
6225 }
6226 } else {
a4270d67 6227 eth = (const struct ethhdr *)skb->data;
a50e233c
ED
6228 gro_pull_from_frag0(skb, hlen);
6229 NAPI_GRO_CB(skb)->frag0 += hlen;
6230 NAPI_GRO_CB(skb)->frag0_len -= hlen;
76620aaf 6231 }
a50e233c
ED
6232 __skb_pull(skb, hlen);
6233
6234 /*
6235 * This works because the only protocols we care about don't require
6236 * special handling.
6237 * We'll fix it up properly in napi_frags_finish()
6238 */
6239 skb->protocol = eth->h_proto;
76620aaf 6240
76620aaf
HX
6241 return skb;
6242}
76620aaf 6243
c7c4b3b6 6244gro_result_t napi_gro_frags(struct napi_struct *napi)
5d0d9be8 6245{
b0e3f1bd 6246 gro_result_t ret;
76620aaf 6247 struct sk_buff *skb = napi_frags_skb(napi);
5d0d9be8 6248
ae78dbfa
BH
6249 trace_napi_gro_frags_entry(skb);
6250
b0e3f1bd
GB
6251 ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
6252 trace_napi_gro_frags_exit(ret);
6253
6254 return ret;
5d0d9be8 6255}
5d38a079
HX
6256EXPORT_SYMBOL(napi_gro_frags);
6257
573e8fca
TH
6258/* Compute the checksum from gro_offset and return the folded value
6259 * after adding in any pseudo checksum.
6260 */
6261__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
6262{
6263 __wsum wsum;
6264 __sum16 sum;
6265
6266 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
6267
6268 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
6269 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
14641931 6270 /* See comments in __skb_checksum_complete(). */
573e8fca
TH
6271 if (likely(!sum)) {
6272 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
6273 !skb->csum_complete_sw)
7fe50ac8 6274 netdev_rx_csum_fault(skb->dev, skb);
573e8fca
TH
6275 }
6276
6277 NAPI_GRO_CB(skb)->csum = wsum;
6278 NAPI_GRO_CB(skb)->csum_valid = 1;
6279
6280 return sum;
6281}
6282EXPORT_SYMBOL(__skb_gro_checksum_complete);
6283
773fc8f6 6284static void net_rps_send_ipi(struct softnet_data *remsd)
6285{
6286#ifdef CONFIG_RPS
6287 while (remsd) {
6288 struct softnet_data *next = remsd->rps_ipi_next;
6289
6290 if (cpu_online(remsd->cpu))
6291 smp_call_function_single_async(remsd->cpu, &remsd->csd);
6292 remsd = next;
6293 }
6294#endif
6295}
6296
e326bed2 6297/*
855abcf0 6298 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
e326bed2
ED
6299 * Note: called with local irq disabled, but exits with local irq enabled.
6300 */
6301static void net_rps_action_and_irq_enable(struct softnet_data *sd)
6302{
6303#ifdef CONFIG_RPS
6304 struct softnet_data *remsd = sd->rps_ipi_list;
6305
6306 if (remsd) {
6307 sd->rps_ipi_list = NULL;
6308
6309 local_irq_enable();
6310
6311 /* Send pending IPI's to kick RPS processing on remote cpus. */
773fc8f6 6312 net_rps_send_ipi(remsd);
e326bed2
ED
6313 } else
6314#endif
6315 local_irq_enable();
6316}
6317
d75b1ade
ED
6318static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
6319{
6320#ifdef CONFIG_RPS
6321 return sd->rps_ipi_list != NULL;
6322#else
6323 return false;
6324#endif
6325}
6326
bea3348e 6327static int process_backlog(struct napi_struct *napi, int quota)
1da177e4 6328{
eecfd7c4 6329 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
145dd5f9
PA
6330 bool again = true;
6331 int work = 0;
1da177e4 6332
e326bed2
ED
6333 /* Check if we have pending ipi, its better to send them now,
6334 * not waiting net_rx_action() end.
6335 */
d75b1ade 6336 if (sd_has_rps_ipi_waiting(sd)) {
e326bed2
ED
6337 local_irq_disable();
6338 net_rps_action_and_irq_enable(sd);
6339 }
d75b1ade 6340
3d48b53f 6341 napi->weight = dev_rx_weight;
145dd5f9 6342 while (again) {
1da177e4 6343 struct sk_buff *skb;
6e7676c1
CG
6344
6345 while ((skb = __skb_dequeue(&sd->process_queue))) {
2c17d27c 6346 rcu_read_lock();
6e7676c1 6347 __netif_receive_skb(skb);
2c17d27c 6348 rcu_read_unlock();
76cc8b13 6349 input_queue_head_incr(sd);
145dd5f9 6350 if (++work >= quota)
76cc8b13 6351 return work;
145dd5f9 6352
6e7676c1 6353 }
1da177e4 6354
145dd5f9 6355 local_irq_disable();
e36fa2f7 6356 rps_lock(sd);
11ef7a89 6357 if (skb_queue_empty(&sd->input_pkt_queue)) {
eecfd7c4
ED
6358 /*
6359 * Inline a custom version of __napi_complete().
6360 * only current cpu owns and manipulates this napi,
11ef7a89
TH
6361 * and NAPI_STATE_SCHED is the only possible flag set
6362 * on backlog.
6363 * We can use a plain write instead of clear_bit(),
eecfd7c4
ED
6364 * and we dont need an smp_mb() memory barrier.
6365 */
eecfd7c4 6366 napi->state = 0;
145dd5f9
PA
6367 again = false;
6368 } else {
6369 skb_queue_splice_tail_init(&sd->input_pkt_queue,
6370 &sd->process_queue);
bea3348e 6371 }
e36fa2f7 6372 rps_unlock(sd);
145dd5f9 6373 local_irq_enable();
6e7676c1 6374 }
1da177e4 6375
bea3348e
SH
6376 return work;
6377}
1da177e4 6378
bea3348e
SH
6379/**
6380 * __napi_schedule - schedule for receive
c4ea43c5 6381 * @n: entry to schedule
bea3348e 6382 *
bc9ad166
ED
6383 * The entry's receive function will be scheduled to run.
6384 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
bea3348e 6385 */
b5606c2d 6386void __napi_schedule(struct napi_struct *n)
bea3348e
SH
6387{
6388 unsigned long flags;
1da177e4 6389
bea3348e 6390 local_irq_save(flags);
903ceff7 6391 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
bea3348e 6392 local_irq_restore(flags);
1da177e4 6393}
bea3348e
SH
6394EXPORT_SYMBOL(__napi_schedule);
6395
39e6c820
ED
6396/**
6397 * napi_schedule_prep - check if napi can be scheduled
6398 * @n: napi context
6399 *
6400 * Test if NAPI routine is already running, and if not mark
ee1a4c84 6401 * it as running. This is used as a condition variable to
39e6c820
ED
6402 * insure only one NAPI poll instance runs. We also make
6403 * sure there is no pending NAPI disable.
6404 */
6405bool napi_schedule_prep(struct napi_struct *n)
6406{
6407 unsigned long val, new;
6408
6409 do {
6410 val = READ_ONCE(n->state);
6411 if (unlikely(val & NAPIF_STATE_DISABLE))
6412 return false;
6413 new = val | NAPIF_STATE_SCHED;
6414
6415 /* Sets STATE_MISSED bit if STATE_SCHED was already set
6416 * This was suggested by Alexander Duyck, as compiler
6417 * emits better code than :
6418 * if (val & NAPIF_STATE_SCHED)
6419 * new |= NAPIF_STATE_MISSED;
6420 */
6421 new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
6422 NAPIF_STATE_MISSED;
6423 } while (cmpxchg(&n->state, val, new) != val);
6424
6425 return !(val & NAPIF_STATE_SCHED);
6426}
6427EXPORT_SYMBOL(napi_schedule_prep);
6428
bc9ad166
ED
6429/**
6430 * __napi_schedule_irqoff - schedule for receive
6431 * @n: entry to schedule
6432 *
6433 * Variant of __napi_schedule() assuming hard irqs are masked
6434 */
6435void __napi_schedule_irqoff(struct napi_struct *n)
6436{
6437 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
6438}
6439EXPORT_SYMBOL(__napi_schedule_irqoff);
6440
364b6055 6441bool napi_complete_done(struct napi_struct *n, int work_done)
d565b0a1 6442{
6f8b12d6
ED
6443 unsigned long flags, val, new, timeout = 0;
6444 bool ret = true;
d565b0a1
HX
6445
6446 /*
217f6974
ED
6447 * 1) Don't let napi dequeue from the cpu poll list
6448 * just in case its running on a different cpu.
6449 * 2) If we are busy polling, do nothing here, we have
6450 * the guarantee we will be called later.
d565b0a1 6451 */
217f6974
ED
6452 if (unlikely(n->state & (NAPIF_STATE_NPSVC |
6453 NAPIF_STATE_IN_BUSY_POLL)))
364b6055 6454 return false;
d565b0a1 6455
6f8b12d6
ED
6456 if (work_done) {
6457 if (n->gro_bitmask)
7e417a66
ED
6458 timeout = READ_ONCE(n->dev->gro_flush_timeout);
6459 n->defer_hard_irqs_count = READ_ONCE(n->dev->napi_defer_hard_irqs);
6f8b12d6
ED
6460 }
6461 if (n->defer_hard_irqs_count > 0) {
6462 n->defer_hard_irqs_count--;
7e417a66 6463 timeout = READ_ONCE(n->dev->gro_flush_timeout);
6f8b12d6
ED
6464 if (timeout)
6465 ret = false;
6466 }
6467 if (n->gro_bitmask) {
605108ac
PA
6468 /* When the NAPI instance uses a timeout and keeps postponing
6469 * it, we need to bound somehow the time packets are kept in
6470 * the GRO layer
6471 */
6472 napi_gro_flush(n, !!timeout);
3b47d303 6473 }
c8079432
MM
6474
6475 gro_normal_list(n);
6476
02c1602e 6477 if (unlikely(!list_empty(&n->poll_list))) {
d75b1ade
ED
6478 /* If n->poll_list is not empty, we need to mask irqs */
6479 local_irq_save(flags);
02c1602e 6480 list_del_init(&n->poll_list);
d75b1ade
ED
6481 local_irq_restore(flags);
6482 }
39e6c820
ED
6483
6484 do {
6485 val = READ_ONCE(n->state);
6486
6487 WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
6488
7fd3253a
BT
6489 new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED |
6490 NAPIF_STATE_PREFER_BUSY_POLL);
39e6c820
ED
6491
6492 /* If STATE_MISSED was set, leave STATE_SCHED set,
6493 * because we will call napi->poll() one more time.
6494 * This C code was suggested by Alexander Duyck to help gcc.
6495 */
6496 new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
6497 NAPIF_STATE_SCHED;
6498 } while (cmpxchg(&n->state, val, new) != val);
6499
6500 if (unlikely(val & NAPIF_STATE_MISSED)) {
6501 __napi_schedule(n);
6502 return false;
6503 }
6504
6f8b12d6
ED
6505 if (timeout)
6506 hrtimer_start(&n->timer, ns_to_ktime(timeout),
6507 HRTIMER_MODE_REL_PINNED);
6508 return ret;
d565b0a1 6509}
3b47d303 6510EXPORT_SYMBOL(napi_complete_done);
d565b0a1 6511
af12fa6e 6512/* must be called under rcu_read_lock(), as we dont take a reference */
02d62e86 6513static struct napi_struct *napi_by_id(unsigned int napi_id)
af12fa6e
ET
6514{
6515 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
6516 struct napi_struct *napi;
6517
6518 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
6519 if (napi->napi_id == napi_id)
6520 return napi;
6521
6522 return NULL;
6523}
02d62e86
ED
6524
6525#if defined(CONFIG_NET_RX_BUSY_POLL)
217f6974 6526
7fd3253a 6527static void __busy_poll_stop(struct napi_struct *napi, bool skip_schedule)
217f6974 6528{
7fd3253a
BT
6529 if (!skip_schedule) {
6530 gro_normal_list(napi);
6531 __napi_schedule(napi);
6532 return;
6533 }
217f6974 6534
7fd3253a
BT
6535 if (napi->gro_bitmask) {
6536 /* flush too old packets
6537 * If HZ < 1000, flush all packets.
6538 */
6539 napi_gro_flush(napi, HZ >= 1000);
6540 }
217f6974 6541
7fd3253a
BT
6542 gro_normal_list(napi);
6543 clear_bit(NAPI_STATE_SCHED, &napi->state);
6544}
6545
7c951caf
BT
6546static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, bool prefer_busy_poll,
6547 u16 budget)
217f6974 6548{
7fd3253a
BT
6549 bool skip_schedule = false;
6550 unsigned long timeout;
217f6974
ED
6551 int rc;
6552
39e6c820
ED
6553 /* Busy polling means there is a high chance device driver hard irq
6554 * could not grab NAPI_STATE_SCHED, and that NAPI_STATE_MISSED was
6555 * set in napi_schedule_prep().
6556 * Since we are about to call napi->poll() once more, we can safely
6557 * clear NAPI_STATE_MISSED.
6558 *
6559 * Note: x86 could use a single "lock and ..." instruction
6560 * to perform these two clear_bit()
6561 */
6562 clear_bit(NAPI_STATE_MISSED, &napi->state);
217f6974
ED
6563 clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
6564
6565 local_bh_disable();
6566
7fd3253a
BT
6567 if (prefer_busy_poll) {
6568 napi->defer_hard_irqs_count = READ_ONCE(napi->dev->napi_defer_hard_irqs);
6569 timeout = READ_ONCE(napi->dev->gro_flush_timeout);
6570 if (napi->defer_hard_irqs_count && timeout) {
6571 hrtimer_start(&napi->timer, ns_to_ktime(timeout), HRTIMER_MODE_REL_PINNED);
6572 skip_schedule = true;
6573 }
6574 }
6575
217f6974
ED
6576 /* All we really want here is to re-enable device interrupts.
6577 * Ideally, a new ndo_busy_poll_stop() could avoid another round.
6578 */
7c951caf 6579 rc = napi->poll(napi, budget);
323ebb61
EC
6580 /* We can't gro_normal_list() here, because napi->poll() might have
6581 * rearmed the napi (napi_complete_done()) in which case it could
6582 * already be running on another CPU.
6583 */
7c951caf 6584 trace_napi_poll(napi, rc, budget);
217f6974 6585 netpoll_poll_unlock(have_poll_lock);
7c951caf 6586 if (rc == budget)
7fd3253a 6587 __busy_poll_stop(napi, skip_schedule);
217f6974 6588 local_bh_enable();
217f6974
ED
6589}
6590
7db6b048
SS
6591void napi_busy_loop(unsigned int napi_id,
6592 bool (*loop_end)(void *, unsigned long),
7c951caf 6593 void *loop_end_arg, bool prefer_busy_poll, u16 budget)
02d62e86 6594{
7db6b048 6595 unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
217f6974 6596 int (*napi_poll)(struct napi_struct *napi, int budget);
217f6974 6597 void *have_poll_lock = NULL;
02d62e86 6598 struct napi_struct *napi;
217f6974
ED
6599
6600restart:
217f6974 6601 napi_poll = NULL;
02d62e86 6602
2a028ecb 6603 rcu_read_lock();
02d62e86 6604
545cd5e5 6605 napi = napi_by_id(napi_id);
02d62e86
ED
6606 if (!napi)
6607 goto out;
6608
217f6974
ED
6609 preempt_disable();
6610 for (;;) {
2b5cd0df
AD
6611 int work = 0;
6612
2a028ecb 6613 local_bh_disable();
217f6974
ED
6614 if (!napi_poll) {
6615 unsigned long val = READ_ONCE(napi->state);
6616
6617 /* If multiple threads are competing for this napi,
6618 * we avoid dirtying napi->state as much as we can.
6619 */
6620 if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
7fd3253a
BT
6621 NAPIF_STATE_IN_BUSY_POLL)) {
6622 if (prefer_busy_poll)
6623 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
217f6974 6624 goto count;
7fd3253a 6625 }
217f6974
ED
6626 if (cmpxchg(&napi->state, val,
6627 val | NAPIF_STATE_IN_BUSY_POLL |
7fd3253a
BT
6628 NAPIF_STATE_SCHED) != val) {
6629 if (prefer_busy_poll)
6630 set_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
217f6974 6631 goto count;
7fd3253a 6632 }
217f6974
ED
6633 have_poll_lock = netpoll_poll_lock(napi);
6634 napi_poll = napi->poll;
6635 }
7c951caf
BT
6636 work = napi_poll(napi, budget);
6637 trace_napi_poll(napi, work, budget);
323ebb61 6638 gro_normal_list(napi);
217f6974 6639count:
2b5cd0df 6640 if (work > 0)
7db6b048 6641 __NET_ADD_STATS(dev_net(napi->dev),
2b5cd0df 6642 LINUX_MIB_BUSYPOLLRXPACKETS, work);
2a028ecb 6643 local_bh_enable();
02d62e86 6644
7db6b048 6645 if (!loop_end || loop_end(loop_end_arg, start_time))
217f6974 6646 break;
02d62e86 6647
217f6974
ED
6648 if (unlikely(need_resched())) {
6649 if (napi_poll)
7c951caf 6650 busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget);
217f6974
ED
6651 preempt_enable();
6652 rcu_read_unlock();
6653 cond_resched();
7db6b048 6654 if (loop_end(loop_end_arg, start_time))
2b5cd0df 6655 return;
217f6974
ED
6656 goto restart;
6657 }
6cdf89b1 6658 cpu_relax();
217f6974
ED
6659 }
6660 if (napi_poll)
7c951caf 6661 busy_poll_stop(napi, have_poll_lock, prefer_busy_poll, budget);
217f6974 6662 preempt_enable();
02d62e86 6663out:
2a028ecb 6664 rcu_read_unlock();
02d62e86 6665}
7db6b048 6666EXPORT_SYMBOL(napi_busy_loop);
02d62e86
ED
6667
6668#endif /* CONFIG_NET_RX_BUSY_POLL */
af12fa6e 6669
149d6ad8 6670static void napi_hash_add(struct napi_struct *napi)
af12fa6e 6671{
4d092dd2 6672 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state))
52bd2d62 6673 return;
af12fa6e 6674
52bd2d62 6675 spin_lock(&napi_hash_lock);
af12fa6e 6676
545cd5e5 6677 /* 0..NR_CPUS range is reserved for sender_cpu use */
52bd2d62 6678 do {
545cd5e5
AD
6679 if (unlikely(++napi_gen_id < MIN_NAPI_ID))
6680 napi_gen_id = MIN_NAPI_ID;
52bd2d62
ED
6681 } while (napi_by_id(napi_gen_id));
6682 napi->napi_id = napi_gen_id;
af12fa6e 6683
52bd2d62
ED
6684 hlist_add_head_rcu(&napi->napi_hash_node,
6685 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
af12fa6e 6686
52bd2d62 6687 spin_unlock(&napi_hash_lock);
af12fa6e 6688}
af12fa6e
ET
6689
6690/* Warning : caller is responsible to make sure rcu grace period
6691 * is respected before freeing memory containing @napi
6692 */
5198d545 6693static void napi_hash_del(struct napi_struct *napi)
af12fa6e
ET
6694{
6695 spin_lock(&napi_hash_lock);
6696
4d092dd2 6697 hlist_del_init_rcu(&napi->napi_hash_node);
5198d545 6698
af12fa6e
ET
6699 spin_unlock(&napi_hash_lock);
6700}
af12fa6e 6701
3b47d303
ED
6702static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
6703{
6704 struct napi_struct *napi;
6705
6706 napi = container_of(timer, struct napi_struct, timer);
39e6c820
ED
6707
6708 /* Note : we use a relaxed variant of napi_schedule_prep() not setting
6709 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
6710 */
6f8b12d6 6711 if (!napi_disable_pending(napi) &&
7fd3253a
BT
6712 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) {
6713 clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &napi->state);
39e6c820 6714 __napi_schedule_irqoff(napi);
7fd3253a 6715 }
3b47d303
ED
6716
6717 return HRTIMER_NORESTART;
6718}
6719
7c4ec749 6720static void init_gro_hash(struct napi_struct *napi)
d565b0a1 6721{
07d78363
DM
6722 int i;
6723
6312fe77
LR
6724 for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6725 INIT_LIST_HEAD(&napi->gro_hash[i].list);
6726 napi->gro_hash[i].count = 0;
6727 }
7c4ec749
DM
6728 napi->gro_bitmask = 0;
6729}
6730
5fdd2f0e
WW
6731int dev_set_threaded(struct net_device *dev, bool threaded)
6732{
6733 struct napi_struct *napi;
6734 int err = 0;
6735
6736 if (dev->threaded == threaded)
6737 return 0;
6738
6739 if (threaded) {
6740 list_for_each_entry(napi, &dev->napi_list, dev_list) {
6741 if (!napi->thread) {
6742 err = napi_kthread_create(napi);
6743 if (err) {
6744 threaded = false;
6745 break;
6746 }
6747 }
6748 }
6749 }
6750
6751 dev->threaded = threaded;
6752
6753 /* Make sure kthread is created before THREADED bit
6754 * is set.
6755 */
6756 smp_mb__before_atomic();
6757
6758 /* Setting/unsetting threaded mode on a napi might not immediately
6759 * take effect, if the current napi instance is actively being
6760 * polled. In this case, the switch between threaded mode and
6761 * softirq mode will happen in the next round of napi_schedule().
6762 * This should not cause hiccups/stalls to the live traffic.
6763 */
6764 list_for_each_entry(napi, &dev->napi_list, dev_list) {
6765 if (threaded)
6766 set_bit(NAPI_STATE_THREADED, &napi->state);
6767 else
6768 clear_bit(NAPI_STATE_THREADED, &napi->state);
6769 }
6770
6771 return err;
6772}
6773
7c4ec749
DM
6774void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
6775 int (*poll)(struct napi_struct *, int), int weight)
6776{
4d092dd2
JK
6777 if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state)))
6778 return;
6779
7c4ec749 6780 INIT_LIST_HEAD(&napi->poll_list);
4d092dd2 6781 INIT_HLIST_NODE(&napi->napi_hash_node);
7c4ec749
DM
6782 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
6783 napi->timer.function = napi_watchdog;
6784 init_gro_hash(napi);
5d38a079 6785 napi->skb = NULL;
323ebb61
EC
6786 INIT_LIST_HEAD(&napi->rx_list);
6787 napi->rx_count = 0;
d565b0a1 6788 napi->poll = poll;
82dc3c63 6789 if (weight > NAPI_POLL_WEIGHT)
bf29e9e9
QC
6790 netdev_err_once(dev, "%s() called with weight %d\n", __func__,
6791 weight);
d565b0a1 6792 napi->weight = weight;
d565b0a1 6793 napi->dev = dev;
5d38a079 6794#ifdef CONFIG_NETPOLL
d565b0a1
HX
6795 napi->poll_owner = -1;
6796#endif
6797 set_bit(NAPI_STATE_SCHED, &napi->state);
96e97bc0
JK
6798 set_bit(NAPI_STATE_NPSVC, &napi->state);
6799 list_add_rcu(&napi->dev_list, &dev->napi_list);
93d05d4a 6800 napi_hash_add(napi);
29863d41
WW
6801 /* Create kthread for this napi if dev->threaded is set.
6802 * Clear dev->threaded if kthread creation failed so that
6803 * threaded mode will not be enabled in napi_enable().
6804 */
6805 if (dev->threaded && napi_kthread_create(napi))
6806 dev->threaded = 0;
d565b0a1
HX
6807}
6808EXPORT_SYMBOL(netif_napi_add);
6809
3b47d303
ED
6810void napi_disable(struct napi_struct *n)
6811{
6812 might_sleep();
6813 set_bit(NAPI_STATE_DISABLE, &n->state);
6814
6815 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
6816 msleep(1);
2d8bff12
NH
6817 while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
6818 msleep(1);
3b47d303
ED
6819
6820 hrtimer_cancel(&n->timer);
6821
7fd3253a 6822 clear_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state);
3b47d303 6823 clear_bit(NAPI_STATE_DISABLE, &n->state);
29863d41 6824 clear_bit(NAPI_STATE_THREADED, &n->state);
3b47d303
ED
6825}
6826EXPORT_SYMBOL(napi_disable);
6827
29863d41
WW
6828/**
6829 * napi_enable - enable NAPI scheduling
6830 * @n: NAPI context
6831 *
6832 * Resume NAPI from being scheduled on this context.
6833 * Must be paired with napi_disable.
6834 */
6835void napi_enable(struct napi_struct *n)
6836{
6837 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
6838 smp_mb__before_atomic();
6839 clear_bit(NAPI_STATE_SCHED, &n->state);
6840 clear_bit(NAPI_STATE_NPSVC, &n->state);
6841 if (n->dev->threaded && n->thread)
6842 set_bit(NAPI_STATE_THREADED, &n->state);
6843}
6844EXPORT_SYMBOL(napi_enable);
6845
07d78363 6846static void flush_gro_hash(struct napi_struct *napi)
d4546c25 6847{
07d78363 6848 int i;
d4546c25 6849
07d78363
DM
6850 for (i = 0; i < GRO_HASH_BUCKETS; i++) {
6851 struct sk_buff *skb, *n;
6852
6312fe77 6853 list_for_each_entry_safe(skb, n, &napi->gro_hash[i].list, list)
07d78363 6854 kfree_skb(skb);
6312fe77 6855 napi->gro_hash[i].count = 0;
07d78363 6856 }
d4546c25
DM
6857}
6858
93d05d4a 6859/* Must be called in process context */
5198d545 6860void __netif_napi_del(struct napi_struct *napi)
d565b0a1 6861{
4d092dd2
JK
6862 if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state))
6863 return;
6864
5198d545 6865 napi_hash_del(napi);
5251ef82 6866 list_del_rcu(&napi->dev_list);
76620aaf 6867 napi_free_frags(napi);
d565b0a1 6868
07d78363 6869 flush_gro_hash(napi);
d9f37d01 6870 napi->gro_bitmask = 0;
29863d41
WW
6871
6872 if (napi->thread) {
6873 kthread_stop(napi->thread);
6874 napi->thread = NULL;
6875 }
d565b0a1 6876}
5198d545 6877EXPORT_SYMBOL(__netif_napi_del);
d565b0a1 6878
898f8015 6879static int __napi_poll(struct napi_struct *n, bool *repoll)
726ce70e 6880{
726ce70e
HX
6881 int work, weight;
6882
726ce70e
HX
6883 weight = n->weight;
6884
6885 /* This NAPI_STATE_SCHED test is for avoiding a race
6886 * with netpoll's poll_napi(). Only the entity which
6887 * obtains the lock and sees NAPI_STATE_SCHED set will
6888 * actually make the ->poll() call. Therefore we avoid
6889 * accidentally calling ->poll() when NAPI is not scheduled.
6890 */
6891 work = 0;
6892 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
6893 work = n->poll(n, weight);
1db19db7 6894 trace_napi_poll(n, work, weight);
726ce70e
HX
6895 }
6896
427d5838
ED
6897 if (unlikely(work > weight))
6898 pr_err_once("NAPI poll function %pS returned %d, exceeding its budget of %d.\n",
6899 n->poll, work, weight);
726ce70e
HX
6900
6901 if (likely(work < weight))
898f8015 6902 return work;
726ce70e
HX
6903
6904 /* Drivers must not modify the NAPI state if they
6905 * consume the entire weight. In such cases this code
6906 * still "owns" the NAPI instance and therefore can
6907 * move the instance around on the list at-will.
6908 */
6909 if (unlikely(napi_disable_pending(n))) {
6910 napi_complete(n);
898f8015 6911 return work;
726ce70e
HX
6912 }
6913
7fd3253a
BT
6914 /* The NAPI context has more processing work, but busy-polling
6915 * is preferred. Exit early.
6916 */
6917 if (napi_prefer_busy_poll(n)) {
6918 if (napi_complete_done(n, work)) {
6919 /* If timeout is not set, we need to make sure
6920 * that the NAPI is re-scheduled.
6921 */
6922 napi_schedule(n);
6923 }
898f8015 6924 return work;
7fd3253a
BT
6925 }
6926
d9f37d01 6927 if (n->gro_bitmask) {
726ce70e
HX
6928 /* flush too old packets
6929 * If HZ < 1000, flush all packets.
6930 */
6931 napi_gro_flush(n, HZ >= 1000);
6932 }
6933
c8079432
MM
6934 gro_normal_list(n);
6935
001ce546
HX
6936 /* Some drivers may have called napi_schedule
6937 * prior to exhausting their budget.
6938 */
6939 if (unlikely(!list_empty(&n->poll_list))) {
6940 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
6941 n->dev ? n->dev->name : "backlog");
898f8015 6942 return work;
001ce546
HX
6943 }
6944
898f8015
FF
6945 *repoll = true;
6946
6947 return work;
6948}
6949
6950static int napi_poll(struct napi_struct *n, struct list_head *repoll)
6951{
6952 bool do_repoll = false;
6953 void *have;
6954 int work;
6955
6956 list_del_init(&n->poll_list);
6957
6958 have = netpoll_poll_lock(n);
6959
6960 work = __napi_poll(n, &do_repoll);
6961
6962 if (do_repoll)
6963 list_add_tail(&n->poll_list, repoll);
726ce70e 6964
726ce70e
HX
6965 netpoll_poll_unlock(have);
6966
6967 return work;
6968}
6969
29863d41
WW
6970static int napi_thread_wait(struct napi_struct *napi)
6971{
6972 set_current_state(TASK_INTERRUPTIBLE);
6973
6974 while (!kthread_should_stop() && !napi_disable_pending(napi)) {
6975 if (test_bit(NAPI_STATE_SCHED, &napi->state)) {
6976 WARN_ON(!list_empty(&napi->poll_list));
6977 __set_current_state(TASK_RUNNING);
6978 return 0;
6979 }
6980
6981 schedule();
6982 set_current_state(TASK_INTERRUPTIBLE);
6983 }
6984 __set_current_state(TASK_RUNNING);
6985 return -1;
6986}
6987
6988static int napi_threaded_poll(void *data)
6989{
6990 struct napi_struct *napi = data;
6991 void *have;
6992
6993 while (!napi_thread_wait(napi)) {
6994 for (;;) {
6995 bool repoll = false;
6996
6997 local_bh_disable();
6998
6999 have = netpoll_poll_lock(napi);
7000 __napi_poll(napi, &repoll);
7001 netpoll_poll_unlock(have);
7002
29863d41
WW
7003 local_bh_enable();
7004
7005 if (!repoll)
7006 break;
7007
7008 cond_resched();
7009 }
7010 }
7011 return 0;
7012}
7013
0766f788 7014static __latent_entropy void net_rx_action(struct softirq_action *h)
1da177e4 7015{
903ceff7 7016 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
7acf8a1e
MW
7017 unsigned long time_limit = jiffies +
7018 usecs_to_jiffies(netdev_budget_usecs);
51b0bded 7019 int budget = netdev_budget;
d75b1ade
ED
7020 LIST_HEAD(list);
7021 LIST_HEAD(repoll);
53fb95d3 7022
1da177e4 7023 local_irq_disable();
d75b1ade
ED
7024 list_splice_init(&sd->poll_list, &list);
7025 local_irq_enable();
1da177e4 7026
ceb8d5bf 7027 for (;;) {
bea3348e 7028 struct napi_struct *n;
1da177e4 7029
ceb8d5bf
HX
7030 if (list_empty(&list)) {
7031 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
fec6e49b 7032 return;
ceb8d5bf
HX
7033 break;
7034 }
7035
6bd373eb
HX
7036 n = list_first_entry(&list, struct napi_struct, poll_list);
7037 budget -= napi_poll(n, &repoll);
7038
d75b1ade 7039 /* If softirq window is exhausted then punt.
24f8b238
SH
7040 * Allow this to run for 2 jiffies since which will allow
7041 * an average latency of 1.5/HZ.
bea3348e 7042 */
ceb8d5bf
HX
7043 if (unlikely(budget <= 0 ||
7044 time_after_eq(jiffies, time_limit))) {
7045 sd->time_squeeze++;
7046 break;
7047 }
1da177e4 7048 }
d75b1ade 7049
d75b1ade
ED
7050 local_irq_disable();
7051
7052 list_splice_tail_init(&sd->poll_list, &list);
7053 list_splice_tail(&repoll, &list);
7054 list_splice(&list, &sd->poll_list);
7055 if (!list_empty(&sd->poll_list))
7056 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
7057
e326bed2 7058 net_rps_action_and_irq_enable(sd);
1da177e4
LT
7059}
7060
aa9d8560 7061struct netdev_adjacent {
9ff162a8 7062 struct net_device *dev;
5d261913
VF
7063
7064 /* upper master flag, there can only be one master device per list */
9ff162a8 7065 bool master;
5d261913 7066
32b6d34f
TY
7067 /* lookup ignore flag */
7068 bool ignore;
7069
5d261913
VF
7070 /* counter for the number of times this device was added to us */
7071 u16 ref_nr;
7072
402dae96
VF
7073 /* private field for the users */
7074 void *private;
7075
9ff162a8
JP
7076 struct list_head list;
7077 struct rcu_head rcu;
9ff162a8
JP
7078};
7079
6ea29da1 7080static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
2f268f12 7081 struct list_head *adj_list)
9ff162a8 7082{
5d261913 7083 struct netdev_adjacent *adj;
5d261913 7084
2f268f12 7085 list_for_each_entry(adj, adj_list, list) {
5d261913
VF
7086 if (adj->dev == adj_dev)
7087 return adj;
9ff162a8
JP
7088 }
7089 return NULL;
7090}
7091
eff74233
TY
7092static int ____netdev_has_upper_dev(struct net_device *upper_dev,
7093 struct netdev_nested_priv *priv)
f1170fd4 7094{
eff74233 7095 struct net_device *dev = (struct net_device *)priv->data;
f1170fd4
DA
7096
7097 return upper_dev == dev;
7098}
7099
9ff162a8
JP
7100/**
7101 * netdev_has_upper_dev - Check if device is linked to an upper device
7102 * @dev: device
7103 * @upper_dev: upper device to check
7104 *
7105 * Find out if a device is linked to specified upper device and return true
7106 * in case it is. Note that this checks only immediate upper device,
7107 * not through a complete stack of devices. The caller must hold the RTNL lock.
7108 */
7109bool netdev_has_upper_dev(struct net_device *dev,
7110 struct net_device *upper_dev)
7111{
eff74233
TY
7112 struct netdev_nested_priv priv = {
7113 .data = (void *)upper_dev,
7114 };
7115
9ff162a8
JP
7116 ASSERT_RTNL();
7117
32b6d34f 7118 return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
eff74233 7119 &priv);
9ff162a8
JP
7120}
7121EXPORT_SYMBOL(netdev_has_upper_dev);
7122
1a3f060c 7123/**
c1639be9 7124 * netdev_has_upper_dev_all_rcu - Check if device is linked to an upper device
1a3f060c
DA
7125 * @dev: device
7126 * @upper_dev: upper device to check
7127 *
7128 * Find out if a device is linked to specified upper device and return true
7129 * in case it is. Note that this checks the entire upper device chain.
7130 * The caller must hold rcu lock.
7131 */
7132
1a3f060c
DA
7133bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
7134 struct net_device *upper_dev)
7135{
eff74233
TY
7136 struct netdev_nested_priv priv = {
7137 .data = (void *)upper_dev,
7138 };
7139
32b6d34f 7140 return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
eff74233 7141 &priv);
1a3f060c
DA
7142}
7143EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
7144
9ff162a8
JP
7145/**
7146 * netdev_has_any_upper_dev - Check if device is linked to some device
7147 * @dev: device
7148 *
7149 * Find out if a device is linked to an upper device and return true in case
7150 * it is. The caller must hold the RTNL lock.
7151 */
25cc72a3 7152bool netdev_has_any_upper_dev(struct net_device *dev)
9ff162a8
JP
7153{
7154 ASSERT_RTNL();
7155
f1170fd4 7156 return !list_empty(&dev->adj_list.upper);
9ff162a8 7157}
25cc72a3 7158EXPORT_SYMBOL(netdev_has_any_upper_dev);
9ff162a8
JP
7159
7160/**
7161 * netdev_master_upper_dev_get - Get master upper device
7162 * @dev: device
7163 *
7164 * Find a master upper device and return pointer to it or NULL in case
7165 * it's not there. The caller must hold the RTNL lock.
7166 */
7167struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
7168{
aa9d8560 7169 struct netdev_adjacent *upper;
9ff162a8
JP
7170
7171 ASSERT_RTNL();
7172
2f268f12 7173 if (list_empty(&dev->adj_list.upper))
9ff162a8
JP
7174 return NULL;
7175
2f268f12 7176 upper = list_first_entry(&dev->adj_list.upper,
aa9d8560 7177 struct netdev_adjacent, list);
9ff162a8
JP
7178 if (likely(upper->master))
7179 return upper->dev;
7180 return NULL;
7181}
7182EXPORT_SYMBOL(netdev_master_upper_dev_get);
7183
32b6d34f
TY
7184static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev)
7185{
7186 struct netdev_adjacent *upper;
7187
7188 ASSERT_RTNL();
7189
7190 if (list_empty(&dev->adj_list.upper))
7191 return NULL;
7192
7193 upper = list_first_entry(&dev->adj_list.upper,
7194 struct netdev_adjacent, list);
7195 if (likely(upper->master) && !upper->ignore)
7196 return upper->dev;
7197 return NULL;
7198}
7199
0f524a80
DA
7200/**
7201 * netdev_has_any_lower_dev - Check if device is linked to some device
7202 * @dev: device
7203 *
7204 * Find out if a device is linked to a lower device and return true in case
7205 * it is. The caller must hold the RTNL lock.
7206 */
7207static bool netdev_has_any_lower_dev(struct net_device *dev)
7208{
7209 ASSERT_RTNL();
7210
7211 return !list_empty(&dev->adj_list.lower);
7212}
7213
b6ccba4c
VF
7214void *netdev_adjacent_get_private(struct list_head *adj_list)
7215{
7216 struct netdev_adjacent *adj;
7217
7218 adj = list_entry(adj_list, struct netdev_adjacent, list);
7219
7220 return adj->private;
7221}
7222EXPORT_SYMBOL(netdev_adjacent_get_private);
7223
44a40855
VY
7224/**
7225 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
7226 * @dev: device
7227 * @iter: list_head ** of the current position
7228 *
7229 * Gets the next device from the dev's upper list, starting from iter
7230 * position. The caller must hold RCU read lock.
7231 */
7232struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
7233 struct list_head **iter)
7234{
7235 struct netdev_adjacent *upper;
7236
7237 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
7238
7239 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7240
7241 if (&upper->list == &dev->adj_list.upper)
7242 return NULL;
7243
7244 *iter = &upper->list;
7245
7246 return upper->dev;
7247}
7248EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
7249
32b6d34f
TY
7250static struct net_device *__netdev_next_upper_dev(struct net_device *dev,
7251 struct list_head **iter,
7252 bool *ignore)
5343da4c
TY
7253{
7254 struct netdev_adjacent *upper;
7255
7256 upper = list_entry((*iter)->next, struct netdev_adjacent, list);
7257
7258 if (&upper->list == &dev->adj_list.upper)
7259 return NULL;
7260
7261 *iter = &upper->list;
32b6d34f 7262 *ignore = upper->ignore;
5343da4c
TY
7263
7264 return upper->dev;
7265}
7266
1a3f060c
DA
7267static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
7268 struct list_head **iter)
7269{
7270 struct netdev_adjacent *upper;
7271
7272 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
7273
7274 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7275
7276 if (&upper->list == &dev->adj_list.upper)
7277 return NULL;
7278
7279 *iter = &upper->list;
7280
7281 return upper->dev;
7282}
7283
32b6d34f
TY
7284static int __netdev_walk_all_upper_dev(struct net_device *dev,
7285 int (*fn)(struct net_device *dev,
eff74233
TY
7286 struct netdev_nested_priv *priv),
7287 struct netdev_nested_priv *priv)
5343da4c
TY
7288{
7289 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7290 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7291 int ret, cur = 0;
32b6d34f 7292 bool ignore;
5343da4c
TY
7293
7294 now = dev;
7295 iter = &dev->adj_list.upper;
7296
7297 while (1) {
7298 if (now != dev) {
eff74233 7299 ret = fn(now, priv);
5343da4c
TY
7300 if (ret)
7301 return ret;
7302 }
7303
7304 next = NULL;
7305 while (1) {
32b6d34f 7306 udev = __netdev_next_upper_dev(now, &iter, &ignore);
5343da4c
TY
7307 if (!udev)
7308 break;
32b6d34f
TY
7309 if (ignore)
7310 continue;
5343da4c
TY
7311
7312 next = udev;
7313 niter = &udev->adj_list.upper;
7314 dev_stack[cur] = now;
7315 iter_stack[cur++] = iter;
7316 break;
7317 }
7318
7319 if (!next) {
7320 if (!cur)
7321 return 0;
7322 next = dev_stack[--cur];
7323 niter = iter_stack[cur];
7324 }
7325
7326 now = next;
7327 iter = niter;
7328 }
7329
7330 return 0;
7331}
7332
1a3f060c
DA
7333int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
7334 int (*fn)(struct net_device *dev,
eff74233
TY
7335 struct netdev_nested_priv *priv),
7336 struct netdev_nested_priv *priv)
1a3f060c 7337{
5343da4c
TY
7338 struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7339 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7340 int ret, cur = 0;
1a3f060c 7341
5343da4c
TY
7342 now = dev;
7343 iter = &dev->adj_list.upper;
1a3f060c 7344
5343da4c
TY
7345 while (1) {
7346 if (now != dev) {
eff74233 7347 ret = fn(now, priv);
5343da4c
TY
7348 if (ret)
7349 return ret;
7350 }
7351
7352 next = NULL;
7353 while (1) {
7354 udev = netdev_next_upper_dev_rcu(now, &iter);
7355 if (!udev)
7356 break;
7357
7358 next = udev;
7359 niter = &udev->adj_list.upper;
7360 dev_stack[cur] = now;
7361 iter_stack[cur++] = iter;
7362 break;
7363 }
7364
7365 if (!next) {
7366 if (!cur)
7367 return 0;
7368 next = dev_stack[--cur];
7369 niter = iter_stack[cur];
7370 }
7371
7372 now = next;
7373 iter = niter;
1a3f060c
DA
7374 }
7375
7376 return 0;
7377}
7378EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
7379
32b6d34f
TY
7380static bool __netdev_has_upper_dev(struct net_device *dev,
7381 struct net_device *upper_dev)
7382{
eff74233 7383 struct netdev_nested_priv priv = {
1fc70edb 7384 .flags = 0,
eff74233
TY
7385 .data = (void *)upper_dev,
7386 };
7387
32b6d34f
TY
7388 ASSERT_RTNL();
7389
7390 return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev,
eff74233 7391 &priv);
32b6d34f
TY
7392}
7393
31088a11
VF
7394/**
7395 * netdev_lower_get_next_private - Get the next ->private from the
7396 * lower neighbour list
7397 * @dev: device
7398 * @iter: list_head ** of the current position
7399 *
7400 * Gets the next netdev_adjacent->private from the dev's lower neighbour
7401 * list, starting from iter position. The caller must hold either hold the
7402 * RTNL lock or its own locking that guarantees that the neighbour lower
b469139e 7403 * list will remain unchanged.
31088a11
VF
7404 */
7405void *netdev_lower_get_next_private(struct net_device *dev,
7406 struct list_head **iter)
7407{
7408 struct netdev_adjacent *lower;
7409
7410 lower = list_entry(*iter, struct netdev_adjacent, list);
7411
7412 if (&lower->list == &dev->adj_list.lower)
7413 return NULL;
7414
6859e7df 7415 *iter = lower->list.next;
31088a11
VF
7416
7417 return lower->private;
7418}
7419EXPORT_SYMBOL(netdev_lower_get_next_private);
7420
7421/**
7422 * netdev_lower_get_next_private_rcu - Get the next ->private from the
7423 * lower neighbour list, RCU
7424 * variant
7425 * @dev: device
7426 * @iter: list_head ** of the current position
7427 *
7428 * Gets the next netdev_adjacent->private from the dev's lower neighbour
7429 * list, starting from iter position. The caller must hold RCU read lock.
7430 */
7431void *netdev_lower_get_next_private_rcu(struct net_device *dev,
7432 struct list_head **iter)
7433{
7434 struct netdev_adjacent *lower;
7435
7436 WARN_ON_ONCE(!rcu_read_lock_held());
7437
7438 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7439
7440 if (&lower->list == &dev->adj_list.lower)
7441 return NULL;
7442
6859e7df 7443 *iter = &lower->list;
31088a11
VF
7444
7445 return lower->private;
7446}
7447EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
7448
4085ebe8
VY
7449/**
7450 * netdev_lower_get_next - Get the next device from the lower neighbour
7451 * list
7452 * @dev: device
7453 * @iter: list_head ** of the current position
7454 *
7455 * Gets the next netdev_adjacent from the dev's lower neighbour
7456 * list, starting from iter position. The caller must hold RTNL lock or
7457 * its own locking that guarantees that the neighbour lower
b469139e 7458 * list will remain unchanged.
4085ebe8
VY
7459 */
7460void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
7461{
7462 struct netdev_adjacent *lower;
7463
cfdd28be 7464 lower = list_entry(*iter, struct netdev_adjacent, list);
4085ebe8
VY
7465
7466 if (&lower->list == &dev->adj_list.lower)
7467 return NULL;
7468
cfdd28be 7469 *iter = lower->list.next;
4085ebe8
VY
7470
7471 return lower->dev;
7472}
7473EXPORT_SYMBOL(netdev_lower_get_next);
7474
1a3f060c
DA
7475static struct net_device *netdev_next_lower_dev(struct net_device *dev,
7476 struct list_head **iter)
7477{
7478 struct netdev_adjacent *lower;
7479
46b5ab1a 7480 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
1a3f060c
DA
7481
7482 if (&lower->list == &dev->adj_list.lower)
7483 return NULL;
7484
46b5ab1a 7485 *iter = &lower->list;
1a3f060c
DA
7486
7487 return lower->dev;
7488}
7489
32b6d34f
TY
7490static struct net_device *__netdev_next_lower_dev(struct net_device *dev,
7491 struct list_head **iter,
7492 bool *ignore)
7493{
7494 struct netdev_adjacent *lower;
7495
7496 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
7497
7498 if (&lower->list == &dev->adj_list.lower)
7499 return NULL;
7500
7501 *iter = &lower->list;
7502 *ignore = lower->ignore;
7503
7504 return lower->dev;
7505}
7506
1a3f060c
DA
7507int netdev_walk_all_lower_dev(struct net_device *dev,
7508 int (*fn)(struct net_device *dev,
eff74233
TY
7509 struct netdev_nested_priv *priv),
7510 struct netdev_nested_priv *priv)
1a3f060c 7511{
5343da4c
TY
7512 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7513 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7514 int ret, cur = 0;
1a3f060c 7515
5343da4c
TY
7516 now = dev;
7517 iter = &dev->adj_list.lower;
1a3f060c 7518
5343da4c
TY
7519 while (1) {
7520 if (now != dev) {
eff74233 7521 ret = fn(now, priv);
5343da4c
TY
7522 if (ret)
7523 return ret;
7524 }
7525
7526 next = NULL;
7527 while (1) {
7528 ldev = netdev_next_lower_dev(now, &iter);
7529 if (!ldev)
7530 break;
7531
7532 next = ldev;
7533 niter = &ldev->adj_list.lower;
7534 dev_stack[cur] = now;
7535 iter_stack[cur++] = iter;
7536 break;
7537 }
7538
7539 if (!next) {
7540 if (!cur)
7541 return 0;
7542 next = dev_stack[--cur];
7543 niter = iter_stack[cur];
7544 }
7545
7546 now = next;
7547 iter = niter;
1a3f060c
DA
7548 }
7549
7550 return 0;
7551}
7552EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
7553
32b6d34f
TY
7554static int __netdev_walk_all_lower_dev(struct net_device *dev,
7555 int (*fn)(struct net_device *dev,
eff74233
TY
7556 struct netdev_nested_priv *priv),
7557 struct netdev_nested_priv *priv)
32b6d34f
TY
7558{
7559 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7560 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7561 int ret, cur = 0;
7562 bool ignore;
7563
7564 now = dev;
7565 iter = &dev->adj_list.lower;
7566
7567 while (1) {
7568 if (now != dev) {
eff74233 7569 ret = fn(now, priv);
32b6d34f
TY
7570 if (ret)
7571 return ret;
7572 }
7573
7574 next = NULL;
7575 while (1) {
7576 ldev = __netdev_next_lower_dev(now, &iter, &ignore);
7577 if (!ldev)
7578 break;
7579 if (ignore)
7580 continue;
7581
7582 next = ldev;
7583 niter = &ldev->adj_list.lower;
7584 dev_stack[cur] = now;
7585 iter_stack[cur++] = iter;
7586 break;
7587 }
7588
7589 if (!next) {
7590 if (!cur)
7591 return 0;
7592 next = dev_stack[--cur];
7593 niter = iter_stack[cur];
7594 }
7595
7596 now = next;
7597 iter = niter;
7598 }
7599
7600 return 0;
7601}
7602
7151affe
TY
7603struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
7604 struct list_head **iter)
1a3f060c
DA
7605{
7606 struct netdev_adjacent *lower;
7607
7608 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
7609 if (&lower->list == &dev->adj_list.lower)
7610 return NULL;
7611
7612 *iter = &lower->list;
7613
7614 return lower->dev;
7615}
7151affe 7616EXPORT_SYMBOL(netdev_next_lower_dev_rcu);
1a3f060c 7617
5343da4c
TY
7618static u8 __netdev_upper_depth(struct net_device *dev)
7619{
7620 struct net_device *udev;
7621 struct list_head *iter;
7622 u8 max_depth = 0;
32b6d34f 7623 bool ignore;
5343da4c
TY
7624
7625 for (iter = &dev->adj_list.upper,
32b6d34f 7626 udev = __netdev_next_upper_dev(dev, &iter, &ignore);
5343da4c 7627 udev;
32b6d34f
TY
7628 udev = __netdev_next_upper_dev(dev, &iter, &ignore)) {
7629 if (ignore)
7630 continue;
5343da4c
TY
7631 if (max_depth < udev->upper_level)
7632 max_depth = udev->upper_level;
7633 }
7634
7635 return max_depth;
7636}
7637
7638static u8 __netdev_lower_depth(struct net_device *dev)
1a3f060c
DA
7639{
7640 struct net_device *ldev;
7641 struct list_head *iter;
5343da4c 7642 u8 max_depth = 0;
32b6d34f 7643 bool ignore;
1a3f060c
DA
7644
7645 for (iter = &dev->adj_list.lower,
32b6d34f 7646 ldev = __netdev_next_lower_dev(dev, &iter, &ignore);
1a3f060c 7647 ldev;
32b6d34f
TY
7648 ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) {
7649 if (ignore)
7650 continue;
5343da4c
TY
7651 if (max_depth < ldev->lower_level)
7652 max_depth = ldev->lower_level;
7653 }
1a3f060c 7654
5343da4c
TY
7655 return max_depth;
7656}
7657
eff74233
TY
7658static int __netdev_update_upper_level(struct net_device *dev,
7659 struct netdev_nested_priv *__unused)
5343da4c
TY
7660{
7661 dev->upper_level = __netdev_upper_depth(dev) + 1;
7662 return 0;
7663}
7664
eff74233 7665static int __netdev_update_lower_level(struct net_device *dev,
1fc70edb 7666 struct netdev_nested_priv *priv)
5343da4c
TY
7667{
7668 dev->lower_level = __netdev_lower_depth(dev) + 1;
1fc70edb
TY
7669
7670#ifdef CONFIG_LOCKDEP
7671 if (!priv)
7672 return 0;
7673
7674 if (priv->flags & NESTED_SYNC_IMM)
7675 dev->nested_level = dev->lower_level - 1;
7676 if (priv->flags & NESTED_SYNC_TODO)
7677 net_unlink_todo(dev);
7678#endif
5343da4c
TY
7679 return 0;
7680}
7681
7682int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
7683 int (*fn)(struct net_device *dev,
eff74233
TY
7684 struct netdev_nested_priv *priv),
7685 struct netdev_nested_priv *priv)
5343da4c
TY
7686{
7687 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
7688 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
7689 int ret, cur = 0;
7690
7691 now = dev;
7692 iter = &dev->adj_list.lower;
7693
7694 while (1) {
7695 if (now != dev) {
eff74233 7696 ret = fn(now, priv);
5343da4c
TY
7697 if (ret)
7698 return ret;
7699 }
7700
7701 next = NULL;
7702 while (1) {
7703 ldev = netdev_next_lower_dev_rcu(now, &iter);
7704 if (!ldev)
7705 break;
7706
7707 next = ldev;
7708 niter = &ldev->adj_list.lower;
7709 dev_stack[cur] = now;
7710 iter_stack[cur++] = iter;
7711 break;
7712 }
7713
7714 if (!next) {
7715 if (!cur)
7716 return 0;
7717 next = dev_stack[--cur];
7718 niter = iter_stack[cur];
7719 }
7720
7721 now = next;
7722 iter = niter;
1a3f060c
DA
7723 }
7724
7725 return 0;
7726}
7727EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev_rcu);
7728
e001bfad 7729/**
7730 * netdev_lower_get_first_private_rcu - Get the first ->private from the
7731 * lower neighbour list, RCU
7732 * variant
7733 * @dev: device
7734 *
7735 * Gets the first netdev_adjacent->private from the dev's lower neighbour
7736 * list. The caller must hold RCU read lock.
7737 */
7738void *netdev_lower_get_first_private_rcu(struct net_device *dev)
7739{
7740 struct netdev_adjacent *lower;
7741
7742 lower = list_first_or_null_rcu(&dev->adj_list.lower,
7743 struct netdev_adjacent, list);
7744 if (lower)
7745 return lower->private;
7746 return NULL;
7747}
7748EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
7749
9ff162a8
JP
7750/**
7751 * netdev_master_upper_dev_get_rcu - Get master upper device
7752 * @dev: device
7753 *
7754 * Find a master upper device and return pointer to it or NULL in case
7755 * it's not there. The caller must hold the RCU read lock.
7756 */
7757struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
7758{
aa9d8560 7759 struct netdev_adjacent *upper;
9ff162a8 7760
2f268f12 7761 upper = list_first_or_null_rcu(&dev->adj_list.upper,
aa9d8560 7762 struct netdev_adjacent, list);
9ff162a8
JP
7763 if (upper && likely(upper->master))
7764 return upper->dev;
7765 return NULL;
7766}
7767EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
7768
0a59f3a9 7769static int netdev_adjacent_sysfs_add(struct net_device *dev,
3ee32707
VF
7770 struct net_device *adj_dev,
7771 struct list_head *dev_list)
7772{
7773 char linkname[IFNAMSIZ+7];
f4563a75 7774
3ee32707
VF
7775 sprintf(linkname, dev_list == &dev->adj_list.upper ?
7776 "upper_%s" : "lower_%s", adj_dev->name);
7777 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
7778 linkname);
7779}
0a59f3a9 7780static void netdev_adjacent_sysfs_del(struct net_device *dev,
3ee32707
VF
7781 char *name,
7782 struct list_head *dev_list)
7783{
7784 char linkname[IFNAMSIZ+7];
f4563a75 7785
3ee32707
VF
7786 sprintf(linkname, dev_list == &dev->adj_list.upper ?
7787 "upper_%s" : "lower_%s", name);
7788 sysfs_remove_link(&(dev->dev.kobj), linkname);
7789}
7790
7ce64c79
AF
7791static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
7792 struct net_device *adj_dev,
7793 struct list_head *dev_list)
7794{
7795 return (dev_list == &dev->adj_list.upper ||
7796 dev_list == &dev->adj_list.lower) &&
7797 net_eq(dev_net(dev), dev_net(adj_dev));
7798}
3ee32707 7799
5d261913
VF
7800static int __netdev_adjacent_dev_insert(struct net_device *dev,
7801 struct net_device *adj_dev,
7863c054 7802 struct list_head *dev_list,
402dae96 7803 void *private, bool master)
5d261913
VF
7804{
7805 struct netdev_adjacent *adj;
842d67a7 7806 int ret;
5d261913 7807
6ea29da1 7808 adj = __netdev_find_adj(adj_dev, dev_list);
5d261913
VF
7809
7810 if (adj) {
790510d9 7811 adj->ref_nr += 1;
67b62f98
DA
7812 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d\n",
7813 dev->name, adj_dev->name, adj->ref_nr);
7814
5d261913
VF
7815 return 0;
7816 }
7817
7818 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
7819 if (!adj)
7820 return -ENOMEM;
7821
7822 adj->dev = adj_dev;
7823 adj->master = master;
790510d9 7824 adj->ref_nr = 1;
402dae96 7825 adj->private = private;
32b6d34f 7826 adj->ignore = false;
5d261913 7827 dev_hold(adj_dev);
2f268f12 7828
67b62f98
DA
7829 pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
7830 dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
5d261913 7831
7ce64c79 7832 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
3ee32707 7833 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
5831d66e
VF
7834 if (ret)
7835 goto free_adj;
7836 }
7837
7863c054 7838 /* Ensure that master link is always the first item in list. */
842d67a7
VF
7839 if (master) {
7840 ret = sysfs_create_link(&(dev->dev.kobj),
7841 &(adj_dev->dev.kobj), "master");
7842 if (ret)
5831d66e 7843 goto remove_symlinks;
842d67a7 7844
7863c054 7845 list_add_rcu(&adj->list, dev_list);
842d67a7 7846 } else {
7863c054 7847 list_add_tail_rcu(&adj->list, dev_list);
842d67a7 7848 }
5d261913
VF
7849
7850 return 0;
842d67a7 7851
5831d66e 7852remove_symlinks:
7ce64c79 7853 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
3ee32707 7854 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
842d67a7
VF
7855free_adj:
7856 kfree(adj);
974daef7 7857 dev_put(adj_dev);
842d67a7
VF
7858
7859 return ret;
5d261913
VF
7860}
7861
1d143d9f 7862static void __netdev_adjacent_dev_remove(struct net_device *dev,
7863 struct net_device *adj_dev,
93409033 7864 u16 ref_nr,
1d143d9f 7865 struct list_head *dev_list)
5d261913
VF
7866{
7867 struct netdev_adjacent *adj;
7868
67b62f98
DA
7869 pr_debug("Remove adjacency: dev %s adj_dev %s ref_nr %d\n",
7870 dev->name, adj_dev->name, ref_nr);
7871
6ea29da1 7872 adj = __netdev_find_adj(adj_dev, dev_list);
5d261913 7873
2f268f12 7874 if (!adj) {
67b62f98 7875 pr_err("Adjacency does not exist for device %s from %s\n",
2f268f12 7876 dev->name, adj_dev->name);
67b62f98
DA
7877 WARN_ON(1);
7878 return;
2f268f12 7879 }
5d261913 7880
93409033 7881 if (adj->ref_nr > ref_nr) {
67b62f98
DA
7882 pr_debug("adjacency: %s to %s ref_nr - %d = %d\n",
7883 dev->name, adj_dev->name, ref_nr,
7884 adj->ref_nr - ref_nr);
93409033 7885 adj->ref_nr -= ref_nr;
5d261913
VF
7886 return;
7887 }
7888
842d67a7
VF
7889 if (adj->master)
7890 sysfs_remove_link(&(dev->dev.kobj), "master");
7891
7ce64c79 7892 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
3ee32707 7893 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
5831d66e 7894
5d261913 7895 list_del_rcu(&adj->list);
67b62f98 7896 pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
2f268f12 7897 adj_dev->name, dev->name, adj_dev->name);
5d261913
VF
7898 dev_put(adj_dev);
7899 kfree_rcu(adj, rcu);
7900}
7901
1d143d9f 7902static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
7903 struct net_device *upper_dev,
7904 struct list_head *up_list,
7905 struct list_head *down_list,
7906 void *private, bool master)
5d261913
VF
7907{
7908 int ret;
7909
790510d9 7910 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list,
93409033 7911 private, master);
5d261913
VF
7912 if (ret)
7913 return ret;
7914
790510d9 7915 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list,
93409033 7916 private, false);
5d261913 7917 if (ret) {
790510d9 7918 __netdev_adjacent_dev_remove(dev, upper_dev, 1, up_list);
5d261913
VF
7919 return ret;
7920 }
7921
7922 return 0;
7923}
7924
1d143d9f 7925static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
7926 struct net_device *upper_dev,
93409033 7927 u16 ref_nr,
1d143d9f 7928 struct list_head *up_list,
7929 struct list_head *down_list)
5d261913 7930{
93409033
AC
7931 __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
7932 __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
5d261913
VF
7933}
7934
1d143d9f 7935static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
7936 struct net_device *upper_dev,
7937 void *private, bool master)
2f268f12 7938{
f1170fd4
DA
7939 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
7940 &dev->adj_list.upper,
7941 &upper_dev->adj_list.lower,
7942 private, master);
5d261913
VF
7943}
7944
1d143d9f 7945static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
7946 struct net_device *upper_dev)
2f268f12 7947{
93409033 7948 __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
2f268f12
VF
7949 &dev->adj_list.upper,
7950 &upper_dev->adj_list.lower);
7951}
5d261913 7952
9ff162a8 7953static int __netdev_upper_dev_link(struct net_device *dev,
402dae96 7954 struct net_device *upper_dev, bool master,
42ab19ee 7955 void *upper_priv, void *upper_info,
1fc70edb 7956 struct netdev_nested_priv *priv,
42ab19ee 7957 struct netlink_ext_ack *extack)
9ff162a8 7958{
51d0c047
DA
7959 struct netdev_notifier_changeupper_info changeupper_info = {
7960 .info = {
7961 .dev = dev,
42ab19ee 7962 .extack = extack,
51d0c047
DA
7963 },
7964 .upper_dev = upper_dev,
7965 .master = master,
7966 .linking = true,
7967 .upper_info = upper_info,
7968 };
50d629e7 7969 struct net_device *master_dev;
5d261913 7970 int ret = 0;
9ff162a8
JP
7971
7972 ASSERT_RTNL();
7973
7974 if (dev == upper_dev)
7975 return -EBUSY;
7976
7977 /* To prevent loops, check if dev is not upper device to upper_dev. */
32b6d34f 7978 if (__netdev_has_upper_dev(upper_dev, dev))
9ff162a8
JP
7979 return -EBUSY;
7980
5343da4c
TY
7981 if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV)
7982 return -EMLINK;
7983
50d629e7 7984 if (!master) {
32b6d34f 7985 if (__netdev_has_upper_dev(dev, upper_dev))
50d629e7
MM
7986 return -EEXIST;
7987 } else {
32b6d34f 7988 master_dev = __netdev_master_upper_dev_get(dev);
50d629e7
MM
7989 if (master_dev)
7990 return master_dev == upper_dev ? -EEXIST : -EBUSY;
7991 }
9ff162a8 7992
51d0c047 7993 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
573c7ba0
JP
7994 &changeupper_info.info);
7995 ret = notifier_to_errno(ret);
7996 if (ret)
7997 return ret;
7998
6dffb044 7999 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
402dae96 8000 master);
5d261913
VF
8001 if (ret)
8002 return ret;
9ff162a8 8003
51d0c047 8004 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
b03804e7
IS
8005 &changeupper_info.info);
8006 ret = notifier_to_errno(ret);
8007 if (ret)
f1170fd4 8008 goto rollback;
b03804e7 8009
5343da4c 8010 __netdev_update_upper_level(dev, NULL);
32b6d34f 8011 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
5343da4c 8012
1fc70edb 8013 __netdev_update_lower_level(upper_dev, priv);
32b6d34f 8014 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
1fc70edb 8015 priv);
5343da4c 8016
9ff162a8 8017 return 0;
5d261913 8018
f1170fd4 8019rollback:
2f268f12 8020 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5d261913
VF
8021
8022 return ret;
9ff162a8
JP
8023}
8024
8025/**
8026 * netdev_upper_dev_link - Add a link to the upper device
8027 * @dev: device
8028 * @upper_dev: new upper device
7a006d59 8029 * @extack: netlink extended ack
9ff162a8
JP
8030 *
8031 * Adds a link to device which is upper to this one. The caller must hold
8032 * the RTNL lock. On a failure a negative errno code is returned.
8033 * On success the reference counts are adjusted and the function
8034 * returns zero.
8035 */
8036int netdev_upper_dev_link(struct net_device *dev,
42ab19ee
DA
8037 struct net_device *upper_dev,
8038 struct netlink_ext_ack *extack)
9ff162a8 8039{
1fc70edb
TY
8040 struct netdev_nested_priv priv = {
8041 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
8042 .data = NULL,
8043 };
8044
42ab19ee 8045 return __netdev_upper_dev_link(dev, upper_dev, false,
1fc70edb 8046 NULL, NULL, &priv, extack);
9ff162a8
JP
8047}
8048EXPORT_SYMBOL(netdev_upper_dev_link);
8049
8050/**
8051 * netdev_master_upper_dev_link - Add a master link to the upper device
8052 * @dev: device
8053 * @upper_dev: new upper device
6dffb044 8054 * @upper_priv: upper device private
29bf24af 8055 * @upper_info: upper info to be passed down via notifier
7a006d59 8056 * @extack: netlink extended ack
9ff162a8
JP
8057 *
8058 * Adds a link to device which is upper to this one. In this case, only
8059 * one master upper device can be linked, although other non-master devices
8060 * might be linked as well. The caller must hold the RTNL lock.
8061 * On a failure a negative errno code is returned. On success the reference
8062 * counts are adjusted and the function returns zero.
8063 */
8064int netdev_master_upper_dev_link(struct net_device *dev,
6dffb044 8065 struct net_device *upper_dev,
42ab19ee
DA
8066 void *upper_priv, void *upper_info,
8067 struct netlink_ext_ack *extack)
9ff162a8 8068{
1fc70edb
TY
8069 struct netdev_nested_priv priv = {
8070 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
8071 .data = NULL,
8072 };
8073
29bf24af 8074 return __netdev_upper_dev_link(dev, upper_dev, true,
1fc70edb 8075 upper_priv, upper_info, &priv, extack);
9ff162a8
JP
8076}
8077EXPORT_SYMBOL(netdev_master_upper_dev_link);
8078
fe8300fd 8079static void __netdev_upper_dev_unlink(struct net_device *dev,
1fc70edb
TY
8080 struct net_device *upper_dev,
8081 struct netdev_nested_priv *priv)
9ff162a8 8082{
51d0c047
DA
8083 struct netdev_notifier_changeupper_info changeupper_info = {
8084 .info = {
8085 .dev = dev,
8086 },
8087 .upper_dev = upper_dev,
8088 .linking = false,
8089 };
f4563a75 8090
9ff162a8
JP
8091 ASSERT_RTNL();
8092
0e4ead9d 8093 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
0e4ead9d 8094
51d0c047 8095 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
573c7ba0
JP
8096 &changeupper_info.info);
8097
2f268f12 8098 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5d261913 8099
51d0c047 8100 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
0e4ead9d 8101 &changeupper_info.info);
5343da4c
TY
8102
8103 __netdev_update_upper_level(dev, NULL);
32b6d34f 8104 __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
5343da4c 8105
1fc70edb 8106 __netdev_update_lower_level(upper_dev, priv);
32b6d34f 8107 __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
1fc70edb 8108 priv);
9ff162a8 8109}
fe8300fd
TY
8110
8111/**
8112 * netdev_upper_dev_unlink - Removes a link to upper device
8113 * @dev: device
8114 * @upper_dev: new upper device
8115 *
8116 * Removes a link to device which is upper to this one. The caller must hold
8117 * the RTNL lock.
8118 */
8119void netdev_upper_dev_unlink(struct net_device *dev,
8120 struct net_device *upper_dev)
8121{
1fc70edb
TY
8122 struct netdev_nested_priv priv = {
8123 .flags = NESTED_SYNC_TODO,
8124 .data = NULL,
8125 };
8126
8127 __netdev_upper_dev_unlink(dev, upper_dev, &priv);
9ff162a8
JP
8128}
8129EXPORT_SYMBOL(netdev_upper_dev_unlink);
8130
32b6d34f
TY
8131static void __netdev_adjacent_dev_set(struct net_device *upper_dev,
8132 struct net_device *lower_dev,
8133 bool val)
8134{
8135 struct netdev_adjacent *adj;
8136
8137 adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower);
8138 if (adj)
8139 adj->ignore = val;
8140
8141 adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper);
8142 if (adj)
8143 adj->ignore = val;
8144}
8145
8146static void netdev_adjacent_dev_disable(struct net_device *upper_dev,
8147 struct net_device *lower_dev)
8148{
8149 __netdev_adjacent_dev_set(upper_dev, lower_dev, true);
8150}
8151
8152static void netdev_adjacent_dev_enable(struct net_device *upper_dev,
8153 struct net_device *lower_dev)
8154{
8155 __netdev_adjacent_dev_set(upper_dev, lower_dev, false);
8156}
8157
8158int netdev_adjacent_change_prepare(struct net_device *old_dev,
8159 struct net_device *new_dev,
8160 struct net_device *dev,
8161 struct netlink_ext_ack *extack)
8162{
1fc70edb
TY
8163 struct netdev_nested_priv priv = {
8164 .flags = 0,
8165 .data = NULL,
8166 };
32b6d34f
TY
8167 int err;
8168
8169 if (!new_dev)
8170 return 0;
8171
8172 if (old_dev && new_dev != old_dev)
8173 netdev_adjacent_dev_disable(dev, old_dev);
1fc70edb
TY
8174 err = __netdev_upper_dev_link(new_dev, dev, false, NULL, NULL, &priv,
8175 extack);
32b6d34f
TY
8176 if (err) {
8177 if (old_dev && new_dev != old_dev)
8178 netdev_adjacent_dev_enable(dev, old_dev);
8179 return err;
8180 }
8181
8182 return 0;
8183}
8184EXPORT_SYMBOL(netdev_adjacent_change_prepare);
8185
8186void netdev_adjacent_change_commit(struct net_device *old_dev,
8187 struct net_device *new_dev,
8188 struct net_device *dev)
8189{
1fc70edb
TY
8190 struct netdev_nested_priv priv = {
8191 .flags = NESTED_SYNC_IMM | NESTED_SYNC_TODO,
8192 .data = NULL,
8193 };
8194
32b6d34f
TY
8195 if (!new_dev || !old_dev)
8196 return;
8197
8198 if (new_dev == old_dev)
8199 return;
8200
8201 netdev_adjacent_dev_enable(dev, old_dev);
1fc70edb 8202 __netdev_upper_dev_unlink(old_dev, dev, &priv);
32b6d34f
TY
8203}
8204EXPORT_SYMBOL(netdev_adjacent_change_commit);
8205
8206void netdev_adjacent_change_abort(struct net_device *old_dev,
8207 struct net_device *new_dev,
8208 struct net_device *dev)
8209{
1fc70edb
TY
8210 struct netdev_nested_priv priv = {
8211 .flags = 0,
8212 .data = NULL,
8213 };
8214
32b6d34f
TY
8215 if (!new_dev)
8216 return;
8217
8218 if (old_dev && new_dev != old_dev)
8219 netdev_adjacent_dev_enable(dev, old_dev);
8220
1fc70edb 8221 __netdev_upper_dev_unlink(new_dev, dev, &priv);
32b6d34f
TY
8222}
8223EXPORT_SYMBOL(netdev_adjacent_change_abort);
8224
61bd3857
MS
8225/**
8226 * netdev_bonding_info_change - Dispatch event about slave change
8227 * @dev: device
4a26e453 8228 * @bonding_info: info to dispatch
61bd3857
MS
8229 *
8230 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
8231 * The caller must hold the RTNL lock.
8232 */
8233void netdev_bonding_info_change(struct net_device *dev,
8234 struct netdev_bonding_info *bonding_info)
8235{
51d0c047
DA
8236 struct netdev_notifier_bonding_info info = {
8237 .info.dev = dev,
8238 };
61bd3857
MS
8239
8240 memcpy(&info.bonding_info, bonding_info,
8241 sizeof(struct netdev_bonding_info));
51d0c047 8242 call_netdevice_notifiers_info(NETDEV_BONDING_INFO,
61bd3857
MS
8243 &info.info);
8244}
8245EXPORT_SYMBOL(netdev_bonding_info_change);
8246
cff9f12b
MG
8247/**
8248 * netdev_get_xmit_slave - Get the xmit slave of master device
8842500d 8249 * @dev: device
cff9f12b
MG
8250 * @skb: The packet
8251 * @all_slaves: assume all the slaves are active
8252 *
8253 * The reference counters are not incremented so the caller must be
8254 * careful with locks. The caller must hold RCU lock.
8255 * %NULL is returned if no slave is found.
8256 */
8257
8258struct net_device *netdev_get_xmit_slave(struct net_device *dev,
8259 struct sk_buff *skb,
8260 bool all_slaves)
8261{
8262 const struct net_device_ops *ops = dev->netdev_ops;
8263
8264 if (!ops->ndo_get_xmit_slave)
8265 return NULL;
8266 return ops->ndo_get_xmit_slave(dev, skb, all_slaves);
8267}
8268EXPORT_SYMBOL(netdev_get_xmit_slave);
8269
719a402c
TT
8270static struct net_device *netdev_sk_get_lower_dev(struct net_device *dev,
8271 struct sock *sk)
8272{
8273 const struct net_device_ops *ops = dev->netdev_ops;
8274
8275 if (!ops->ndo_sk_get_lower_dev)
8276 return NULL;
8277 return ops->ndo_sk_get_lower_dev(dev, sk);
8278}
8279
8280/**
8281 * netdev_sk_get_lowest_dev - Get the lowest device in chain given device and socket
8282 * @dev: device
8283 * @sk: the socket
8284 *
8285 * %NULL is returned if no lower device is found.
8286 */
8287
8288struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev,
8289 struct sock *sk)
8290{
8291 struct net_device *lower;
8292
8293 lower = netdev_sk_get_lower_dev(dev, sk);
8294 while (lower) {
8295 dev = lower;
8296 lower = netdev_sk_get_lower_dev(dev, sk);
8297 }
8298
8299 return dev;
8300}
8301EXPORT_SYMBOL(netdev_sk_get_lowest_dev);
8302
2ce1ee17 8303static void netdev_adjacent_add_links(struct net_device *dev)
4c75431a
AF
8304{
8305 struct netdev_adjacent *iter;
8306
8307 struct net *net = dev_net(dev);
8308
8309 list_for_each_entry(iter, &dev->adj_list.upper, list) {
be4da0e3 8310 if (!net_eq(net, dev_net(iter->dev)))
4c75431a
AF
8311 continue;
8312 netdev_adjacent_sysfs_add(iter->dev, dev,
8313 &iter->dev->adj_list.lower);
8314 netdev_adjacent_sysfs_add(dev, iter->dev,
8315 &dev->adj_list.upper);
8316 }
8317
8318 list_for_each_entry(iter, &dev->adj_list.lower, list) {
be4da0e3 8319 if (!net_eq(net, dev_net(iter->dev)))
4c75431a
AF
8320 continue;
8321 netdev_adjacent_sysfs_add(iter->dev, dev,
8322 &iter->dev->adj_list.upper);
8323 netdev_adjacent_sysfs_add(dev, iter->dev,
8324 &dev->adj_list.lower);
8325 }
8326}
8327
2ce1ee17 8328static void netdev_adjacent_del_links(struct net_device *dev)
4c75431a
AF
8329{
8330 struct netdev_adjacent *iter;
8331
8332 struct net *net = dev_net(dev);
8333
8334 list_for_each_entry(iter, &dev->adj_list.upper, list) {
be4da0e3 8335 if (!net_eq(net, dev_net(iter->dev)))
4c75431a
AF
8336 continue;
8337 netdev_adjacent_sysfs_del(iter->dev, dev->name,
8338 &iter->dev->adj_list.lower);
8339 netdev_adjacent_sysfs_del(dev, iter->dev->name,
8340 &dev->adj_list.upper);
8341 }
8342
8343 list_for_each_entry(iter, &dev->adj_list.lower, list) {
be4da0e3 8344 if (!net_eq(net, dev_net(iter->dev)))
4c75431a
AF
8345 continue;
8346 netdev_adjacent_sysfs_del(iter->dev, dev->name,
8347 &iter->dev->adj_list.upper);
8348 netdev_adjacent_sysfs_del(dev, iter->dev->name,
8349 &dev->adj_list.lower);
8350 }
8351}
8352
5bb025fa 8353void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
402dae96 8354{
5bb025fa 8355 struct netdev_adjacent *iter;
402dae96 8356
4c75431a
AF
8357 struct net *net = dev_net(dev);
8358
5bb025fa 8359 list_for_each_entry(iter, &dev->adj_list.upper, list) {
be4da0e3 8360 if (!net_eq(net, dev_net(iter->dev)))
4c75431a 8361 continue;
5bb025fa
VF
8362 netdev_adjacent_sysfs_del(iter->dev, oldname,
8363 &iter->dev->adj_list.lower);
8364 netdev_adjacent_sysfs_add(iter->dev, dev,
8365 &iter->dev->adj_list.lower);
8366 }
402dae96 8367
5bb025fa 8368 list_for_each_entry(iter, &dev->adj_list.lower, list) {
be4da0e3 8369 if (!net_eq(net, dev_net(iter->dev)))
4c75431a 8370 continue;
5bb025fa
VF
8371 netdev_adjacent_sysfs_del(iter->dev, oldname,
8372 &iter->dev->adj_list.upper);
8373 netdev_adjacent_sysfs_add(iter->dev, dev,
8374 &iter->dev->adj_list.upper);
8375 }
402dae96 8376}
402dae96
VF
8377
8378void *netdev_lower_dev_get_private(struct net_device *dev,
8379 struct net_device *lower_dev)
8380{
8381 struct netdev_adjacent *lower;
8382
8383 if (!lower_dev)
8384 return NULL;
6ea29da1 8385 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
402dae96
VF
8386 if (!lower)
8387 return NULL;
8388
8389 return lower->private;
8390}
8391EXPORT_SYMBOL(netdev_lower_dev_get_private);
8392
4085ebe8 8393
04d48266 8394/**
c1639be9 8395 * netdev_lower_state_changed - Dispatch event about lower device state change
04d48266
JP
8396 * @lower_dev: device
8397 * @lower_state_info: state to dispatch
8398 *
8399 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
8400 * The caller must hold the RTNL lock.
8401 */
8402void netdev_lower_state_changed(struct net_device *lower_dev,
8403 void *lower_state_info)
8404{
51d0c047
DA
8405 struct netdev_notifier_changelowerstate_info changelowerstate_info = {
8406 .info.dev = lower_dev,
8407 };
04d48266
JP
8408
8409 ASSERT_RTNL();
8410 changelowerstate_info.lower_state_info = lower_state_info;
51d0c047 8411 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE,
04d48266
JP
8412 &changelowerstate_info.info);
8413}
8414EXPORT_SYMBOL(netdev_lower_state_changed);
8415
b6c40d68
PM
8416static void dev_change_rx_flags(struct net_device *dev, int flags)
8417{
d314774c
SH
8418 const struct net_device_ops *ops = dev->netdev_ops;
8419
d2615bf4 8420 if (ops->ndo_change_rx_flags)
d314774c 8421 ops->ndo_change_rx_flags(dev, flags);
b6c40d68
PM
8422}
8423
991fb3f7 8424static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
1da177e4 8425{
b536db93 8426 unsigned int old_flags = dev->flags;
d04a48b0
EB
8427 kuid_t uid;
8428 kgid_t gid;
1da177e4 8429
24023451
PM
8430 ASSERT_RTNL();
8431
dad9b335
WC
8432 dev->flags |= IFF_PROMISC;
8433 dev->promiscuity += inc;
8434 if (dev->promiscuity == 0) {
8435 /*
8436 * Avoid overflow.
8437 * If inc causes overflow, untouch promisc and return error.
8438 */
8439 if (inc < 0)
8440 dev->flags &= ~IFF_PROMISC;
8441 else {
8442 dev->promiscuity -= inc;
7b6cd1ce
JP
8443 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
8444 dev->name);
dad9b335
WC
8445 return -EOVERFLOW;
8446 }
8447 }
52609c0b 8448 if (dev->flags != old_flags) {
7b6cd1ce
JP
8449 pr_info("device %s %s promiscuous mode\n",
8450 dev->name,
8451 dev->flags & IFF_PROMISC ? "entered" : "left");
8192b0c4
DH
8452 if (audit_enabled) {
8453 current_uid_gid(&uid, &gid);
cdfb6b34
RGB
8454 audit_log(audit_context(), GFP_ATOMIC,
8455 AUDIT_ANOM_PROMISCUOUS,
8456 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
8457 dev->name, (dev->flags & IFF_PROMISC),
8458 (old_flags & IFF_PROMISC),
8459 from_kuid(&init_user_ns, audit_get_loginuid(current)),
8460 from_kuid(&init_user_ns, uid),
8461 from_kgid(&init_user_ns, gid),
8462 audit_get_sessionid(current));
8192b0c4 8463 }
24023451 8464
b6c40d68 8465 dev_change_rx_flags(dev, IFF_PROMISC);
1da177e4 8466 }
991fb3f7
ND
8467 if (notify)
8468 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
dad9b335 8469 return 0;
1da177e4
LT
8470}
8471
4417da66
PM
8472/**
8473 * dev_set_promiscuity - update promiscuity count on a device
8474 * @dev: device
8475 * @inc: modifier
8476 *
8477 * Add or remove promiscuity from a device. While the count in the device
8478 * remains above zero the interface remains promiscuous. Once it hits zero
8479 * the device reverts back to normal filtering operation. A negative inc
8480 * value is used to drop promiscuity on the device.
dad9b335 8481 * Return 0 if successful or a negative errno code on error.
4417da66 8482 */
dad9b335 8483int dev_set_promiscuity(struct net_device *dev, int inc)
4417da66 8484{
b536db93 8485 unsigned int old_flags = dev->flags;
dad9b335 8486 int err;
4417da66 8487
991fb3f7 8488 err = __dev_set_promiscuity(dev, inc, true);
4b5a698e 8489 if (err < 0)
dad9b335 8490 return err;
4417da66
PM
8491 if (dev->flags != old_flags)
8492 dev_set_rx_mode(dev);
dad9b335 8493 return err;
4417da66 8494}
d1b19dff 8495EXPORT_SYMBOL(dev_set_promiscuity);
4417da66 8496
991fb3f7 8497static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
1da177e4 8498{
991fb3f7 8499 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
1da177e4 8500
24023451
PM
8501 ASSERT_RTNL();
8502
1da177e4 8503 dev->flags |= IFF_ALLMULTI;
dad9b335
WC
8504 dev->allmulti += inc;
8505 if (dev->allmulti == 0) {
8506 /*
8507 * Avoid overflow.
8508 * If inc causes overflow, untouch allmulti and return error.
8509 */
8510 if (inc < 0)
8511 dev->flags &= ~IFF_ALLMULTI;
8512 else {
8513 dev->allmulti -= inc;
7b6cd1ce
JP
8514 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
8515 dev->name);
dad9b335
WC
8516 return -EOVERFLOW;
8517 }
8518 }
24023451 8519 if (dev->flags ^ old_flags) {
b6c40d68 8520 dev_change_rx_flags(dev, IFF_ALLMULTI);
4417da66 8521 dev_set_rx_mode(dev);
991fb3f7
ND
8522 if (notify)
8523 __dev_notify_flags(dev, old_flags,
8524 dev->gflags ^ old_gflags);
24023451 8525 }
dad9b335 8526 return 0;
4417da66 8527}
991fb3f7
ND
8528
8529/**
8530 * dev_set_allmulti - update allmulti count on a device
8531 * @dev: device
8532 * @inc: modifier
8533 *
8534 * Add or remove reception of all multicast frames to a device. While the
8535 * count in the device remains above zero the interface remains listening
8536 * to all interfaces. Once it hits zero the device reverts back to normal
8537 * filtering operation. A negative @inc value is used to drop the counter
8538 * when releasing a resource needing all multicasts.
8539 * Return 0 if successful or a negative errno code on error.
8540 */
8541
8542int dev_set_allmulti(struct net_device *dev, int inc)
8543{
8544 return __dev_set_allmulti(dev, inc, true);
8545}
d1b19dff 8546EXPORT_SYMBOL(dev_set_allmulti);
4417da66
PM
8547
8548/*
8549 * Upload unicast and multicast address lists to device and
8550 * configure RX filtering. When the device doesn't support unicast
53ccaae1 8551 * filtering it is put in promiscuous mode while unicast addresses
4417da66
PM
8552 * are present.
8553 */
8554void __dev_set_rx_mode(struct net_device *dev)
8555{
d314774c
SH
8556 const struct net_device_ops *ops = dev->netdev_ops;
8557
4417da66
PM
8558 /* dev_open will call this function so the list will stay sane. */
8559 if (!(dev->flags&IFF_UP))
8560 return;
8561
8562 if (!netif_device_present(dev))
40b77c94 8563 return;
4417da66 8564
01789349 8565 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
4417da66
PM
8566 /* Unicast addresses changes may only happen under the rtnl,
8567 * therefore calling __dev_set_promiscuity here is safe.
8568 */
32e7bfc4 8569 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
991fb3f7 8570 __dev_set_promiscuity(dev, 1, false);
2d348d1f 8571 dev->uc_promisc = true;
32e7bfc4 8572 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
991fb3f7 8573 __dev_set_promiscuity(dev, -1, false);
2d348d1f 8574 dev->uc_promisc = false;
4417da66 8575 }
4417da66 8576 }
01789349
JP
8577
8578 if (ops->ndo_set_rx_mode)
8579 ops->ndo_set_rx_mode(dev);
4417da66
PM
8580}
8581
8582void dev_set_rx_mode(struct net_device *dev)
8583{
b9e40857 8584 netif_addr_lock_bh(dev);
4417da66 8585 __dev_set_rx_mode(dev);
b9e40857 8586 netif_addr_unlock_bh(dev);
1da177e4
LT
8587}
8588
f0db275a
SH
8589/**
8590 * dev_get_flags - get flags reported to userspace
8591 * @dev: device
8592 *
8593 * Get the combination of flag bits exported through APIs to userspace.
8594 */
95c96174 8595unsigned int dev_get_flags(const struct net_device *dev)
1da177e4 8596{
95c96174 8597 unsigned int flags;
1da177e4
LT
8598
8599 flags = (dev->flags & ~(IFF_PROMISC |
8600 IFF_ALLMULTI |
b00055aa
SR
8601 IFF_RUNNING |
8602 IFF_LOWER_UP |
8603 IFF_DORMANT)) |
1da177e4
LT
8604 (dev->gflags & (IFF_PROMISC |
8605 IFF_ALLMULTI));
8606
b00055aa
SR
8607 if (netif_running(dev)) {
8608 if (netif_oper_up(dev))
8609 flags |= IFF_RUNNING;
8610 if (netif_carrier_ok(dev))
8611 flags |= IFF_LOWER_UP;
8612 if (netif_dormant(dev))
8613 flags |= IFF_DORMANT;
8614 }
1da177e4
LT
8615
8616 return flags;
8617}
d1b19dff 8618EXPORT_SYMBOL(dev_get_flags);
1da177e4 8619
6d040321
PM
8620int __dev_change_flags(struct net_device *dev, unsigned int flags,
8621 struct netlink_ext_ack *extack)
1da177e4 8622{
b536db93 8623 unsigned int old_flags = dev->flags;
bd380811 8624 int ret;
1da177e4 8625
24023451
PM
8626 ASSERT_RTNL();
8627
1da177e4
LT
8628 /*
8629 * Set the flags on our device.
8630 */
8631
8632 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
8633 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
8634 IFF_AUTOMEDIA)) |
8635 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
8636 IFF_ALLMULTI));
8637
8638 /*
8639 * Load in the correct multicast list now the flags have changed.
8640 */
8641
b6c40d68
PM
8642 if ((old_flags ^ flags) & IFF_MULTICAST)
8643 dev_change_rx_flags(dev, IFF_MULTICAST);
24023451 8644
4417da66 8645 dev_set_rx_mode(dev);
1da177e4
LT
8646
8647 /*
8648 * Have we downed the interface. We handle IFF_UP ourselves
8649 * according to user attempts to set it, rather than blindly
8650 * setting it.
8651 */
8652
8653 ret = 0;
7051b88a 8654 if ((old_flags ^ flags) & IFF_UP) {
8655 if (old_flags & IFF_UP)
8656 __dev_close(dev);
8657 else
40c900aa 8658 ret = __dev_open(dev, extack);
7051b88a 8659 }
1da177e4 8660
1da177e4 8661 if ((flags ^ dev->gflags) & IFF_PROMISC) {
d1b19dff 8662 int inc = (flags & IFF_PROMISC) ? 1 : -1;
991fb3f7 8663 unsigned int old_flags = dev->flags;
d1b19dff 8664
1da177e4 8665 dev->gflags ^= IFF_PROMISC;
991fb3f7
ND
8666
8667 if (__dev_set_promiscuity(dev, inc, false) >= 0)
8668 if (dev->flags != old_flags)
8669 dev_set_rx_mode(dev);
1da177e4
LT
8670 }
8671
8672 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
eb13da1a 8673 * is important. Some (broken) drivers set IFF_PROMISC, when
8674 * IFF_ALLMULTI is requested not asking us and not reporting.
1da177e4
LT
8675 */
8676 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
d1b19dff
ED
8677 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
8678
1da177e4 8679 dev->gflags ^= IFF_ALLMULTI;
991fb3f7 8680 __dev_set_allmulti(dev, inc, false);
1da177e4
LT
8681 }
8682
bd380811
PM
8683 return ret;
8684}
8685
a528c219
ND
8686void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
8687 unsigned int gchanges)
bd380811
PM
8688{
8689 unsigned int changes = dev->flags ^ old_flags;
8690
a528c219 8691 if (gchanges)
7f294054 8692 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
a528c219 8693
bd380811
PM
8694 if (changes & IFF_UP) {
8695 if (dev->flags & IFF_UP)
8696 call_netdevice_notifiers(NETDEV_UP, dev);
8697 else
8698 call_netdevice_notifiers(NETDEV_DOWN, dev);
8699 }
8700
8701 if (dev->flags & IFF_UP &&
be9efd36 8702 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
51d0c047
DA
8703 struct netdev_notifier_change_info change_info = {
8704 .info = {
8705 .dev = dev,
8706 },
8707 .flags_changed = changes,
8708 };
be9efd36 8709
51d0c047 8710 call_netdevice_notifiers_info(NETDEV_CHANGE, &change_info.info);
be9efd36 8711 }
bd380811
PM
8712}
8713
8714/**
8715 * dev_change_flags - change device settings
8716 * @dev: device
8717 * @flags: device state flags
567c5e13 8718 * @extack: netlink extended ack
bd380811
PM
8719 *
8720 * Change settings on device based state flags. The flags are
8721 * in the userspace exported format.
8722 */
567c5e13
PM
8723int dev_change_flags(struct net_device *dev, unsigned int flags,
8724 struct netlink_ext_ack *extack)
bd380811 8725{
b536db93 8726 int ret;
991fb3f7 8727 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
bd380811 8728
6d040321 8729 ret = __dev_change_flags(dev, flags, extack);
bd380811
PM
8730 if (ret < 0)
8731 return ret;
8732
991fb3f7 8733 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
a528c219 8734 __dev_notify_flags(dev, old_flags, changes);
1da177e4
LT
8735 return ret;
8736}
d1b19dff 8737EXPORT_SYMBOL(dev_change_flags);
1da177e4 8738
f51048c3 8739int __dev_set_mtu(struct net_device *dev, int new_mtu)
2315dc91
VF
8740{
8741 const struct net_device_ops *ops = dev->netdev_ops;
8742
8743 if (ops->ndo_change_mtu)
8744 return ops->ndo_change_mtu(dev, new_mtu);
8745
501a90c9
ED
8746 /* Pairs with all the lockless reads of dev->mtu in the stack */
8747 WRITE_ONCE(dev->mtu, new_mtu);
2315dc91
VF
8748 return 0;
8749}
f51048c3 8750EXPORT_SYMBOL(__dev_set_mtu);
2315dc91 8751
d836f5c6
ED
8752int dev_validate_mtu(struct net_device *dev, int new_mtu,
8753 struct netlink_ext_ack *extack)
8754{
8755 /* MTU must be positive, and in range */
8756 if (new_mtu < 0 || new_mtu < dev->min_mtu) {
8757 NL_SET_ERR_MSG(extack, "mtu less than device minimum");
8758 return -EINVAL;
8759 }
8760
8761 if (dev->max_mtu > 0 && new_mtu > dev->max_mtu) {
8762 NL_SET_ERR_MSG(extack, "mtu greater than device maximum");
8763 return -EINVAL;
8764 }
8765 return 0;
8766}
8767
f0db275a 8768/**
7a4c53be 8769 * dev_set_mtu_ext - Change maximum transfer unit
f0db275a
SH
8770 * @dev: device
8771 * @new_mtu: new transfer unit
7a4c53be 8772 * @extack: netlink extended ack
f0db275a
SH
8773 *
8774 * Change the maximum transfer size of the network device.
8775 */
7a4c53be
SH
8776int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
8777 struct netlink_ext_ack *extack)
1da177e4 8778{
2315dc91 8779 int err, orig_mtu;
1da177e4
LT
8780
8781 if (new_mtu == dev->mtu)
8782 return 0;
8783
d836f5c6
ED
8784 err = dev_validate_mtu(dev, new_mtu, extack);
8785 if (err)
8786 return err;
1da177e4
LT
8787
8788 if (!netif_device_present(dev))
8789 return -ENODEV;
8790
1d486bfb
VF
8791 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
8792 err = notifier_to_errno(err);
8793 if (err)
8794 return err;
d314774c 8795
2315dc91
VF
8796 orig_mtu = dev->mtu;
8797 err = __dev_set_mtu(dev, new_mtu);
d314774c 8798
2315dc91 8799 if (!err) {
af7d6cce
SD
8800 err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
8801 orig_mtu);
2315dc91
VF
8802 err = notifier_to_errno(err);
8803 if (err) {
8804 /* setting mtu back and notifying everyone again,
8805 * so that they have a chance to revert changes.
8806 */
8807 __dev_set_mtu(dev, orig_mtu);
af7d6cce
SD
8808 call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
8809 new_mtu);
2315dc91
VF
8810 }
8811 }
1da177e4
LT
8812 return err;
8813}
7a4c53be
SH
8814
8815int dev_set_mtu(struct net_device *dev, int new_mtu)
8816{
8817 struct netlink_ext_ack extack;
8818 int err;
8819
a6bcfc89 8820 memset(&extack, 0, sizeof(extack));
7a4c53be 8821 err = dev_set_mtu_ext(dev, new_mtu, &extack);
a6bcfc89 8822 if (err && extack._msg)
7a4c53be
SH
8823 net_err_ratelimited("%s: %s\n", dev->name, extack._msg);
8824 return err;
8825}
d1b19dff 8826EXPORT_SYMBOL(dev_set_mtu);
1da177e4 8827
6a643ddb
CW
8828/**
8829 * dev_change_tx_queue_len - Change TX queue length of a netdevice
8830 * @dev: device
8831 * @new_len: new tx queue length
8832 */
8833int dev_change_tx_queue_len(struct net_device *dev, unsigned long new_len)
8834{
8835 unsigned int orig_len = dev->tx_queue_len;
8836 int res;
8837
8838 if (new_len != (unsigned int)new_len)
8839 return -ERANGE;
8840
8841 if (new_len != orig_len) {
8842 dev->tx_queue_len = new_len;
8843 res = call_netdevice_notifiers(NETDEV_CHANGE_TX_QUEUE_LEN, dev);
8844 res = notifier_to_errno(res);
7effaf06
TT
8845 if (res)
8846 goto err_rollback;
8847 res = dev_qdisc_change_tx_queue_len(dev);
8848 if (res)
8849 goto err_rollback;
6a643ddb
CW
8850 }
8851
8852 return 0;
7effaf06
TT
8853
8854err_rollback:
8855 netdev_err(dev, "refused to change device tx_queue_len\n");
8856 dev->tx_queue_len = orig_len;
8857 return res;
6a643ddb
CW
8858}
8859
cbda10fa
VD
8860/**
8861 * dev_set_group - Change group this device belongs to
8862 * @dev: device
8863 * @new_group: group this device should belong to
8864 */
8865void dev_set_group(struct net_device *dev, int new_group)
8866{
8867 dev->group = new_group;
8868}
8869EXPORT_SYMBOL(dev_set_group);
8870
d59cdf94
PM
8871/**
8872 * dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR.
8873 * @dev: device
8874 * @addr: new address
8875 * @extack: netlink extended ack
8876 */
8877int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
8878 struct netlink_ext_ack *extack)
8879{
8880 struct netdev_notifier_pre_changeaddr_info info = {
8881 .info.dev = dev,
8882 .info.extack = extack,
8883 .dev_addr = addr,
8884 };
8885 int rc;
8886
8887 rc = call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR, &info.info);
8888 return notifier_to_errno(rc);
8889}
8890EXPORT_SYMBOL(dev_pre_changeaddr_notify);
8891
f0db275a
SH
8892/**
8893 * dev_set_mac_address - Change Media Access Control Address
8894 * @dev: device
8895 * @sa: new address
3a37a963 8896 * @extack: netlink extended ack
f0db275a
SH
8897 *
8898 * Change the hardware (MAC) address of the device
8899 */
3a37a963
PM
8900int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
8901 struct netlink_ext_ack *extack)
1da177e4 8902{
d314774c 8903 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4
LT
8904 int err;
8905
d314774c 8906 if (!ops->ndo_set_mac_address)
1da177e4
LT
8907 return -EOPNOTSUPP;
8908 if (sa->sa_family != dev->type)
8909 return -EINVAL;
8910 if (!netif_device_present(dev))
8911 return -ENODEV;
d59cdf94
PM
8912 err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack);
8913 if (err)
8914 return err;
d314774c 8915 err = ops->ndo_set_mac_address(dev, sa);
f6521516
JP
8916 if (err)
8917 return err;
fbdeca2d 8918 dev->addr_assign_type = NET_ADDR_SET;
f6521516 8919 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
7bf23575 8920 add_device_randomness(dev->dev_addr, dev->addr_len);
f6521516 8921 return 0;
1da177e4 8922}
d1b19dff 8923EXPORT_SYMBOL(dev_set_mac_address);
1da177e4 8924
3b23a32a
CW
8925static DECLARE_RWSEM(dev_addr_sem);
8926
8927int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa,
8928 struct netlink_ext_ack *extack)
8929{
8930 int ret;
8931
8932 down_write(&dev_addr_sem);
8933 ret = dev_set_mac_address(dev, sa, extack);
8934 up_write(&dev_addr_sem);
8935 return ret;
8936}
8937EXPORT_SYMBOL(dev_set_mac_address_user);
8938
8939int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name)
8940{
8941 size_t size = sizeof(sa->sa_data);
8942 struct net_device *dev;
8943 int ret = 0;
8944
8945 down_read(&dev_addr_sem);
8946 rcu_read_lock();
8947
8948 dev = dev_get_by_name_rcu(net, dev_name);
8949 if (!dev) {
8950 ret = -ENODEV;
8951 goto unlock;
8952 }
8953 if (!dev->addr_len)
8954 memset(sa->sa_data, 0, size);
8955 else
8956 memcpy(sa->sa_data, dev->dev_addr,
8957 min_t(size_t, size, dev->addr_len));
8958 sa->sa_family = dev->type;
8959
8960unlock:
8961 rcu_read_unlock();
8962 up_read(&dev_addr_sem);
8963 return ret;
8964}
8965EXPORT_SYMBOL(dev_get_mac_address);
8966
4bf84c35
JP
8967/**
8968 * dev_change_carrier - Change device carrier
8969 * @dev: device
691b3b7e 8970 * @new_carrier: new value
4bf84c35
JP
8971 *
8972 * Change device carrier
8973 */
8974int dev_change_carrier(struct net_device *dev, bool new_carrier)
8975{
8976 const struct net_device_ops *ops = dev->netdev_ops;
8977
8978 if (!ops->ndo_change_carrier)
8979 return -EOPNOTSUPP;
8980 if (!netif_device_present(dev))
8981 return -ENODEV;
8982 return ops->ndo_change_carrier(dev, new_carrier);
8983}
8984EXPORT_SYMBOL(dev_change_carrier);
8985
66b52b0d
JP
8986/**
8987 * dev_get_phys_port_id - Get device physical port ID
8988 * @dev: device
8989 * @ppid: port ID
8990 *
8991 * Get device physical port ID
8992 */
8993int dev_get_phys_port_id(struct net_device *dev,
02637fce 8994 struct netdev_phys_item_id *ppid)
66b52b0d
JP
8995{
8996 const struct net_device_ops *ops = dev->netdev_ops;
8997
8998 if (!ops->ndo_get_phys_port_id)
8999 return -EOPNOTSUPP;
9000 return ops->ndo_get_phys_port_id(dev, ppid);
9001}
9002EXPORT_SYMBOL(dev_get_phys_port_id);
9003
db24a904
DA
9004/**
9005 * dev_get_phys_port_name - Get device physical port name
9006 * @dev: device
9007 * @name: port name
ed49e650 9008 * @len: limit of bytes to copy to name
db24a904
DA
9009 *
9010 * Get device physical port name
9011 */
9012int dev_get_phys_port_name(struct net_device *dev,
9013 char *name, size_t len)
9014{
9015 const struct net_device_ops *ops = dev->netdev_ops;
af3836df 9016 int err;
db24a904 9017
af3836df
JP
9018 if (ops->ndo_get_phys_port_name) {
9019 err = ops->ndo_get_phys_port_name(dev, name, len);
9020 if (err != -EOPNOTSUPP)
9021 return err;
9022 }
9023 return devlink_compat_phys_port_name_get(dev, name, len);
db24a904
DA
9024}
9025EXPORT_SYMBOL(dev_get_phys_port_name);
9026
d6abc596
FF
9027/**
9028 * dev_get_port_parent_id - Get the device's port parent identifier
9029 * @dev: network device
9030 * @ppid: pointer to a storage for the port's parent identifier
9031 * @recurse: allow/disallow recursion to lower devices
9032 *
9033 * Get the devices's port parent identifier
9034 */
9035int dev_get_port_parent_id(struct net_device *dev,
9036 struct netdev_phys_item_id *ppid,
9037 bool recurse)
9038{
9039 const struct net_device_ops *ops = dev->netdev_ops;
9040 struct netdev_phys_item_id first = { };
9041 struct net_device *lower_dev;
9042 struct list_head *iter;
7e1146e8
JP
9043 int err;
9044
9045 if (ops->ndo_get_port_parent_id) {
9046 err = ops->ndo_get_port_parent_id(dev, ppid);
9047 if (err != -EOPNOTSUPP)
9048 return err;
9049 }
d6abc596 9050
7e1146e8
JP
9051 err = devlink_compat_switch_id_get(dev, ppid);
9052 if (!err || err != -EOPNOTSUPP)
9053 return err;
d6abc596
FF
9054
9055 if (!recurse)
7e1146e8 9056 return -EOPNOTSUPP;
d6abc596
FF
9057
9058 netdev_for_each_lower_dev(dev, lower_dev, iter) {
9059 err = dev_get_port_parent_id(lower_dev, ppid, recurse);
9060 if (err)
9061 break;
9062 if (!first.id_len)
9063 first = *ppid;
9064 else if (memcmp(&first, ppid, sizeof(*ppid)))
e1b9efe6 9065 return -EOPNOTSUPP;
d6abc596
FF
9066 }
9067
9068 return err;
9069}
9070EXPORT_SYMBOL(dev_get_port_parent_id);
9071
9072/**
9073 * netdev_port_same_parent_id - Indicate if two network devices have
9074 * the same port parent identifier
9075 * @a: first network device
9076 * @b: second network device
9077 */
9078bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b)
9079{
9080 struct netdev_phys_item_id a_id = { };
9081 struct netdev_phys_item_id b_id = { };
9082
9083 if (dev_get_port_parent_id(a, &a_id, true) ||
9084 dev_get_port_parent_id(b, &b_id, true))
9085 return false;
9086
9087 return netdev_phys_item_id_same(&a_id, &b_id);
9088}
9089EXPORT_SYMBOL(netdev_port_same_parent_id);
9090
d746d707
AK
9091/**
9092 * dev_change_proto_down - update protocol port state information
9093 * @dev: device
9094 * @proto_down: new value
9095 *
9096 * This info can be used by switch drivers to set the phys state of the
9097 * port.
9098 */
9099int dev_change_proto_down(struct net_device *dev, bool proto_down)
9100{
9101 const struct net_device_ops *ops = dev->netdev_ops;
9102
9103 if (!ops->ndo_change_proto_down)
9104 return -EOPNOTSUPP;
9105 if (!netif_device_present(dev))
9106 return -ENODEV;
9107 return ops->ndo_change_proto_down(dev, proto_down);
9108}
9109EXPORT_SYMBOL(dev_change_proto_down);
9110
b5899679
AR
9111/**
9112 * dev_change_proto_down_generic - generic implementation for
9113 * ndo_change_proto_down that sets carrier according to
9114 * proto_down.
9115 *
9116 * @dev: device
9117 * @proto_down: new value
9118 */
9119int dev_change_proto_down_generic(struct net_device *dev, bool proto_down)
9120{
9121 if (proto_down)
9122 netif_carrier_off(dev);
9123 else
9124 netif_carrier_on(dev);
9125 dev->proto_down = proto_down;
9126 return 0;
9127}
9128EXPORT_SYMBOL(dev_change_proto_down_generic);
9129
829eb208
RP
9130/**
9131 * dev_change_proto_down_reason - proto down reason
9132 *
9133 * @dev: device
9134 * @mask: proto down mask
9135 * @value: proto down value
9136 */
9137void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask,
9138 u32 value)
9139{
9140 int b;
9141
9142 if (!mask) {
9143 dev->proto_down_reason = value;
9144 } else {
9145 for_each_set_bit(b, &mask, 32) {
9146 if (value & (1 << b))
9147 dev->proto_down_reason |= BIT(b);
9148 else
9149 dev->proto_down_reason &= ~BIT(b);
9150 }
9151 }
9152}
9153EXPORT_SYMBOL(dev_change_proto_down_reason);
9154
aa8d3a71
AN
9155struct bpf_xdp_link {
9156 struct bpf_link link;
9157 struct net_device *dev; /* protected by rtnl_lock, no refcnt held */
9158 int flags;
9159};
9160
c8a36f19 9161static enum bpf_xdp_mode dev_xdp_mode(struct net_device *dev, u32 flags)
d67b9cd2 9162{
7f0a8382
AN
9163 if (flags & XDP_FLAGS_HW_MODE)
9164 return XDP_MODE_HW;
9165 if (flags & XDP_FLAGS_DRV_MODE)
9166 return XDP_MODE_DRV;
c8a36f19
AN
9167 if (flags & XDP_FLAGS_SKB_MODE)
9168 return XDP_MODE_SKB;
9169 return dev->netdev_ops->ndo_bpf ? XDP_MODE_DRV : XDP_MODE_SKB;
7f0a8382 9170}
d67b9cd2 9171
7f0a8382
AN
9172static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode)
9173{
9174 switch (mode) {
9175 case XDP_MODE_SKB:
9176 return generic_xdp_install;
9177 case XDP_MODE_DRV:
9178 case XDP_MODE_HW:
9179 return dev->netdev_ops->ndo_bpf;
9180 default:
9181 return NULL;
5d867245 9182 }
7f0a8382 9183}
118b4aa2 9184
aa8d3a71
AN
9185static struct bpf_xdp_link *dev_xdp_link(struct net_device *dev,
9186 enum bpf_xdp_mode mode)
9187{
9188 return dev->xdp_state[mode].link;
9189}
9190
7f0a8382
AN
9191static struct bpf_prog *dev_xdp_prog(struct net_device *dev,
9192 enum bpf_xdp_mode mode)
9193{
aa8d3a71
AN
9194 struct bpf_xdp_link *link = dev_xdp_link(dev, mode);
9195
9196 if (link)
9197 return link->link.prog;
7f0a8382
AN
9198 return dev->xdp_state[mode].prog;
9199}
9200
998f1729
THJ
9201static u8 dev_xdp_prog_count(struct net_device *dev)
9202{
9203 u8 count = 0;
9204 int i;
9205
9206 for (i = 0; i < __MAX_XDP_MODE; i++)
9207 if (dev->xdp_state[i].prog || dev->xdp_state[i].link)
9208 count++;
9209 return count;
9210}
9211
7f0a8382
AN
9212u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode)
9213{
9214 struct bpf_prog *prog = dev_xdp_prog(dev, mode);
118b4aa2 9215
7f0a8382
AN
9216 return prog ? prog->aux->id : 0;
9217}
58038695 9218
aa8d3a71
AN
9219static void dev_xdp_set_link(struct net_device *dev, enum bpf_xdp_mode mode,
9220 struct bpf_xdp_link *link)
9221{
9222 dev->xdp_state[mode].link = link;
9223 dev->xdp_state[mode].prog = NULL;
d67b9cd2
DB
9224}
9225
7f0a8382
AN
9226static void dev_xdp_set_prog(struct net_device *dev, enum bpf_xdp_mode mode,
9227 struct bpf_prog *prog)
9228{
aa8d3a71 9229 dev->xdp_state[mode].link = NULL;
7f0a8382 9230 dev->xdp_state[mode].prog = prog;
d67b9cd2
DB
9231}
9232
7f0a8382
AN
9233static int dev_xdp_install(struct net_device *dev, enum bpf_xdp_mode mode,
9234 bpf_op_t bpf_op, struct netlink_ext_ack *extack,
9235 u32 flags, struct bpf_prog *prog)
d67b9cd2 9236{
f4e63525 9237 struct netdev_bpf xdp;
7e6897f9
BT
9238 int err;
9239
d67b9cd2 9240 memset(&xdp, 0, sizeof(xdp));
7f0a8382 9241 xdp.command = mode == XDP_MODE_HW ? XDP_SETUP_PROG_HW : XDP_SETUP_PROG;
d67b9cd2 9242 xdp.extack = extack;
32d60277 9243 xdp.flags = flags;
d67b9cd2
DB
9244 xdp.prog = prog;
9245
7f0a8382
AN
9246 /* Drivers assume refcnt is already incremented (i.e, prog pointer is
9247 * "moved" into driver), so they don't increment it on their own, but
9248 * they do decrement refcnt when program is detached or replaced.
9249 * Given net_device also owns link/prog, we need to bump refcnt here
9250 * to prevent drivers from underflowing it.
9251 */
9252 if (prog)
9253 bpf_prog_inc(prog);
7e6897f9 9254 err = bpf_op(dev, &xdp);
7f0a8382
AN
9255 if (err) {
9256 if (prog)
9257 bpf_prog_put(prog);
9258 return err;
9259 }
7e6897f9 9260
7f0a8382
AN
9261 if (mode != XDP_MODE_HW)
9262 bpf_prog_change_xdp(dev_xdp_prog(dev, mode), prog);
7e6897f9 9263
7f0a8382 9264 return 0;
d67b9cd2
DB
9265}
9266
bd0b2e7f
JK
9267static void dev_xdp_uninstall(struct net_device *dev)
9268{
aa8d3a71 9269 struct bpf_xdp_link *link;
7f0a8382
AN
9270 struct bpf_prog *prog;
9271 enum bpf_xdp_mode mode;
9272 bpf_op_t bpf_op;
bd0b2e7f 9273
7f0a8382 9274 ASSERT_RTNL();
bd0b2e7f 9275
7f0a8382
AN
9276 for (mode = XDP_MODE_SKB; mode < __MAX_XDP_MODE; mode++) {
9277 prog = dev_xdp_prog(dev, mode);
9278 if (!prog)
9279 continue;
bd0b2e7f 9280
7f0a8382
AN
9281 bpf_op = dev_xdp_bpf_op(dev, mode);
9282 if (!bpf_op)
9283 continue;
bd0b2e7f 9284
7f0a8382
AN
9285 WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL));
9286
aa8d3a71
AN
9287 /* auto-detach link from net device */
9288 link = dev_xdp_link(dev, mode);
9289 if (link)
9290 link->dev = NULL;
9291 else
9292 bpf_prog_put(prog);
9293
9294 dev_xdp_set_link(dev, mode, NULL);
7f0a8382 9295 }
bd0b2e7f
JK
9296}
9297
d4baa936 9298static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack,
aa8d3a71
AN
9299 struct bpf_xdp_link *link, struct bpf_prog *new_prog,
9300 struct bpf_prog *old_prog, u32 flags)
a7862b45 9301{
998f1729 9302 unsigned int num_modes = hweight32(flags & XDP_FLAGS_MODES);
d4baa936
AN
9303 struct bpf_prog *cur_prog;
9304 enum bpf_xdp_mode mode;
7f0a8382 9305 bpf_op_t bpf_op;
a7862b45
BB
9306 int err;
9307
85de8576
DB
9308 ASSERT_RTNL();
9309
aa8d3a71
AN
9310 /* either link or prog attachment, never both */
9311 if (link && (new_prog || old_prog))
9312 return -EINVAL;
9313 /* link supports only XDP mode flags */
9314 if (link && (flags & ~XDP_FLAGS_MODES)) {
9315 NL_SET_ERR_MSG(extack, "Invalid XDP flags for BPF link attachment");
9316 return -EINVAL;
9317 }
998f1729
THJ
9318 /* just one XDP mode bit should be set, zero defaults to drv/skb mode */
9319 if (num_modes > 1) {
d4baa936
AN
9320 NL_SET_ERR_MSG(extack, "Only one XDP mode flag can be set");
9321 return -EINVAL;
9322 }
998f1729
THJ
9323 /* avoid ambiguity if offload + drv/skb mode progs are both loaded */
9324 if (!num_modes && dev_xdp_prog_count(dev) > 1) {
9325 NL_SET_ERR_MSG(extack,
9326 "More than one program loaded, unset mode is ambiguous");
9327 return -EINVAL;
9328 }
d4baa936
AN
9329 /* old_prog != NULL implies XDP_FLAGS_REPLACE is set */
9330 if (old_prog && !(flags & XDP_FLAGS_REPLACE)) {
9331 NL_SET_ERR_MSG(extack, "XDP_FLAGS_REPLACE is not specified");
9332 return -EINVAL;
01dde20c 9333 }
a25717d2 9334
c8a36f19 9335 mode = dev_xdp_mode(dev, flags);
aa8d3a71
AN
9336 /* can't replace attached link */
9337 if (dev_xdp_link(dev, mode)) {
9338 NL_SET_ERR_MSG(extack, "Can't replace active BPF XDP link");
9339 return -EBUSY;
01dde20c 9340 }
c14a9f63 9341
d4baa936 9342 cur_prog = dev_xdp_prog(dev, mode);
aa8d3a71
AN
9343 /* can't replace attached prog with link */
9344 if (link && cur_prog) {
9345 NL_SET_ERR_MSG(extack, "Can't replace active XDP program with BPF link");
9346 return -EBUSY;
9347 }
d4baa936
AN
9348 if ((flags & XDP_FLAGS_REPLACE) && cur_prog != old_prog) {
9349 NL_SET_ERR_MSG(extack, "Active program does not match expected");
9350 return -EEXIST;
92234c8f 9351 }
c14a9f63 9352
aa8d3a71
AN
9353 /* put effective new program into new_prog */
9354 if (link)
9355 new_prog = link->link.prog;
85de8576 9356
d4baa936
AN
9357 if (new_prog) {
9358 bool offload = mode == XDP_MODE_HW;
7f0a8382
AN
9359 enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB
9360 ? XDP_MODE_DRV : XDP_MODE_SKB;
441a3303 9361
068d9d1e
AN
9362 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) {
9363 NL_SET_ERR_MSG(extack, "XDP program already attached");
9364 return -EBUSY;
9365 }
d4baa936 9366 if (!offload && dev_xdp_prog(dev, other_mode)) {
7f0a8382 9367 NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time");
d67b9cd2 9368 return -EEXIST;
01dde20c 9369 }
d4baa936 9370 if (!offload && bpf_prog_is_dev_bound(new_prog->aux)) {
7f0a8382 9371 NL_SET_ERR_MSG(extack, "Using device-bound program without HW_MODE flag is not supported");
441a3303
JK
9372 return -EINVAL;
9373 }
d4baa936 9374 if (new_prog->expected_attach_type == BPF_XDP_DEVMAP) {
fbee97fe 9375 NL_SET_ERR_MSG(extack, "BPF_XDP_DEVMAP programs can not be attached to a device");
fbee97fe
DA
9376 return -EINVAL;
9377 }
d4baa936
AN
9378 if (new_prog->expected_attach_type == BPF_XDP_CPUMAP) {
9379 NL_SET_ERR_MSG(extack, "BPF_XDP_CPUMAP programs can not be attached to a device");
92164774
LB
9380 return -EINVAL;
9381 }
d4baa936 9382 }
92164774 9383
d4baa936
AN
9384 /* don't call drivers if the effective program didn't change */
9385 if (new_prog != cur_prog) {
9386 bpf_op = dev_xdp_bpf_op(dev, mode);
9387 if (!bpf_op) {
9388 NL_SET_ERR_MSG(extack, "Underlying driver does not support XDP in native mode");
9389 return -EOPNOTSUPP;
c14a9f63 9390 }
a7862b45 9391
d4baa936
AN
9392 err = dev_xdp_install(dev, mode, bpf_op, extack, flags, new_prog);
9393 if (err)
9394 return err;
7f0a8382 9395 }
d4baa936 9396
aa8d3a71
AN
9397 if (link)
9398 dev_xdp_set_link(dev, mode, link);
9399 else
9400 dev_xdp_set_prog(dev, mode, new_prog);
d4baa936
AN
9401 if (cur_prog)
9402 bpf_prog_put(cur_prog);
a7862b45 9403
7f0a8382 9404 return 0;
a7862b45 9405}
a7862b45 9406
aa8d3a71
AN
9407static int dev_xdp_attach_link(struct net_device *dev,
9408 struct netlink_ext_ack *extack,
9409 struct bpf_xdp_link *link)
9410{
9411 return dev_xdp_attach(dev, extack, link, NULL, NULL, link->flags);
9412}
9413
9414static int dev_xdp_detach_link(struct net_device *dev,
9415 struct netlink_ext_ack *extack,
9416 struct bpf_xdp_link *link)
9417{
9418 enum bpf_xdp_mode mode;
9419 bpf_op_t bpf_op;
9420
9421 ASSERT_RTNL();
9422
c8a36f19 9423 mode = dev_xdp_mode(dev, link->flags);
aa8d3a71
AN
9424 if (dev_xdp_link(dev, mode) != link)
9425 return -EINVAL;
9426
9427 bpf_op = dev_xdp_bpf_op(dev, mode);
9428 WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL));
9429 dev_xdp_set_link(dev, mode, NULL);
9430 return 0;
9431}
9432
9433static void bpf_xdp_link_release(struct bpf_link *link)
9434{
9435 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9436
9437 rtnl_lock();
9438
9439 /* if racing with net_device's tear down, xdp_link->dev might be
9440 * already NULL, in which case link was already auto-detached
9441 */
73b11c2a 9442 if (xdp_link->dev) {
aa8d3a71 9443 WARN_ON(dev_xdp_detach_link(xdp_link->dev, NULL, xdp_link));
73b11c2a
AN
9444 xdp_link->dev = NULL;
9445 }
aa8d3a71
AN
9446
9447 rtnl_unlock();
9448}
9449
73b11c2a
AN
9450static int bpf_xdp_link_detach(struct bpf_link *link)
9451{
9452 bpf_xdp_link_release(link);
9453 return 0;
9454}
9455
aa8d3a71
AN
9456static void bpf_xdp_link_dealloc(struct bpf_link *link)
9457{
9458 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9459
9460 kfree(xdp_link);
9461}
9462
c1931c97
AN
9463static void bpf_xdp_link_show_fdinfo(const struct bpf_link *link,
9464 struct seq_file *seq)
9465{
9466 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9467 u32 ifindex = 0;
9468
9469 rtnl_lock();
9470 if (xdp_link->dev)
9471 ifindex = xdp_link->dev->ifindex;
9472 rtnl_unlock();
9473
9474 seq_printf(seq, "ifindex:\t%u\n", ifindex);
9475}
9476
9477static int bpf_xdp_link_fill_link_info(const struct bpf_link *link,
9478 struct bpf_link_info *info)
9479{
9480 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9481 u32 ifindex = 0;
9482
9483 rtnl_lock();
9484 if (xdp_link->dev)
9485 ifindex = xdp_link->dev->ifindex;
9486 rtnl_unlock();
9487
9488 info->xdp.ifindex = ifindex;
9489 return 0;
9490}
9491
026a4c28
AN
9492static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog,
9493 struct bpf_prog *old_prog)
9494{
9495 struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link);
9496 enum bpf_xdp_mode mode;
9497 bpf_op_t bpf_op;
9498 int err = 0;
9499
9500 rtnl_lock();
9501
9502 /* link might have been auto-released already, so fail */
9503 if (!xdp_link->dev) {
9504 err = -ENOLINK;
9505 goto out_unlock;
9506 }
9507
9508 if (old_prog && link->prog != old_prog) {
9509 err = -EPERM;
9510 goto out_unlock;
9511 }
9512 old_prog = link->prog;
9513 if (old_prog == new_prog) {
9514 /* no-op, don't disturb drivers */
9515 bpf_prog_put(new_prog);
9516 goto out_unlock;
9517 }
9518
c8a36f19 9519 mode = dev_xdp_mode(xdp_link->dev, xdp_link->flags);
026a4c28
AN
9520 bpf_op = dev_xdp_bpf_op(xdp_link->dev, mode);
9521 err = dev_xdp_install(xdp_link->dev, mode, bpf_op, NULL,
9522 xdp_link->flags, new_prog);
9523 if (err)
9524 goto out_unlock;
9525
9526 old_prog = xchg(&link->prog, new_prog);
9527 bpf_prog_put(old_prog);
9528
9529out_unlock:
9530 rtnl_unlock();
9531 return err;
9532}
9533
aa8d3a71
AN
9534static const struct bpf_link_ops bpf_xdp_link_lops = {
9535 .release = bpf_xdp_link_release,
9536 .dealloc = bpf_xdp_link_dealloc,
73b11c2a 9537 .detach = bpf_xdp_link_detach,
c1931c97
AN
9538 .show_fdinfo = bpf_xdp_link_show_fdinfo,
9539 .fill_link_info = bpf_xdp_link_fill_link_info,
026a4c28 9540 .update_prog = bpf_xdp_link_update,
aa8d3a71
AN
9541};
9542
9543int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
9544{
9545 struct net *net = current->nsproxy->net_ns;
9546 struct bpf_link_primer link_primer;
9547 struct bpf_xdp_link *link;
9548 struct net_device *dev;
9549 int err, fd;
9550
9551 dev = dev_get_by_index(net, attr->link_create.target_ifindex);
9552 if (!dev)
9553 return -EINVAL;
9554
9555 link = kzalloc(sizeof(*link), GFP_USER);
9556 if (!link) {
9557 err = -ENOMEM;
9558 goto out_put_dev;
9559 }
9560
9561 bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog);
9562 link->dev = dev;
9563 link->flags = attr->link_create.flags;
9564
9565 err = bpf_link_prime(&link->link, &link_primer);
9566 if (err) {
9567 kfree(link);
9568 goto out_put_dev;
9569 }
9570
9571 rtnl_lock();
9572 err = dev_xdp_attach_link(dev, NULL, link);
9573 rtnl_unlock();
9574
9575 if (err) {
9576 bpf_link_cleanup(&link_primer);
9577 goto out_put_dev;
9578 }
9579
9580 fd = bpf_link_settle(&link_primer);
9581 /* link itself doesn't hold dev's refcnt to not complicate shutdown */
9582 dev_put(dev);
9583 return fd;
9584
9585out_put_dev:
9586 dev_put(dev);
9587 return err;
9588}
9589
d4baa936
AN
9590/**
9591 * dev_change_xdp_fd - set or clear a bpf program for a device rx path
9592 * @dev: device
9593 * @extack: netlink extended ack
9594 * @fd: new program fd or negative value to clear
9595 * @expected_fd: old program fd that userspace expects to replace or clear
9596 * @flags: xdp-related flags
9597 *
9598 * Set or clear a bpf program for a device
9599 */
9600int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
9601 int fd, int expected_fd, u32 flags)
9602{
c8a36f19 9603 enum bpf_xdp_mode mode = dev_xdp_mode(dev, flags);
d4baa936
AN
9604 struct bpf_prog *new_prog = NULL, *old_prog = NULL;
9605 int err;
9606
9607 ASSERT_RTNL();
9608
9609 if (fd >= 0) {
9610 new_prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
9611 mode != XDP_MODE_SKB);
9612 if (IS_ERR(new_prog))
9613 return PTR_ERR(new_prog);
9614 }
9615
9616 if (expected_fd >= 0) {
9617 old_prog = bpf_prog_get_type_dev(expected_fd, BPF_PROG_TYPE_XDP,
9618 mode != XDP_MODE_SKB);
9619 if (IS_ERR(old_prog)) {
9620 err = PTR_ERR(old_prog);
9621 old_prog = NULL;
9622 goto err_out;
c14a9f63 9623 }
a7862b45
BB
9624 }
9625
aa8d3a71 9626 err = dev_xdp_attach(dev, extack, NULL, new_prog, old_prog, flags);
a7862b45 9627
d4baa936
AN
9628err_out:
9629 if (err && new_prog)
9630 bpf_prog_put(new_prog);
9631 if (old_prog)
9632 bpf_prog_put(old_prog);
a7862b45
BB
9633 return err;
9634}
a7862b45 9635
1da177e4
LT
9636/**
9637 * dev_new_index - allocate an ifindex
c4ea43c5 9638 * @net: the applicable net namespace
1da177e4
LT
9639 *
9640 * Returns a suitable unique value for a new device interface
9641 * number. The caller must hold the rtnl semaphore or the
9642 * dev_base_lock to be sure it remains unique.
9643 */
881d966b 9644static int dev_new_index(struct net *net)
1da177e4 9645{
aa79e66e 9646 int ifindex = net->ifindex;
f4563a75 9647
1da177e4
LT
9648 for (;;) {
9649 if (++ifindex <= 0)
9650 ifindex = 1;
881d966b 9651 if (!__dev_get_by_index(net, ifindex))
aa79e66e 9652 return net->ifindex = ifindex;
1da177e4
LT
9653 }
9654}
9655
1da177e4 9656/* Delayed registration/unregisteration */
3b5b34fd 9657static LIST_HEAD(net_todo_list);
200b916f 9658DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
1da177e4 9659
6f05f629 9660static void net_set_todo(struct net_device *dev)
1da177e4 9661{
1da177e4 9662 list_add_tail(&dev->todo_list, &net_todo_list);
50624c93 9663 dev_net(dev)->dev_unreg_count++;
1da177e4
LT
9664}
9665
fd867d51
JW
9666static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
9667 struct net_device *upper, netdev_features_t features)
9668{
9669 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
9670 netdev_features_t feature;
5ba3f7d6 9671 int feature_bit;
fd867d51 9672
3b89ea9c 9673 for_each_netdev_feature(upper_disables, feature_bit) {
5ba3f7d6 9674 feature = __NETIF_F_BIT(feature_bit);
fd867d51
JW
9675 if (!(upper->wanted_features & feature)
9676 && (features & feature)) {
9677 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
9678 &feature, upper->name);
9679 features &= ~feature;
9680 }
9681 }
9682
9683 return features;
9684}
9685
9686static void netdev_sync_lower_features(struct net_device *upper,
9687 struct net_device *lower, netdev_features_t features)
9688{
9689 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
9690 netdev_features_t feature;
5ba3f7d6 9691 int feature_bit;
fd867d51 9692
3b89ea9c 9693 for_each_netdev_feature(upper_disables, feature_bit) {
5ba3f7d6 9694 feature = __NETIF_F_BIT(feature_bit);
fd867d51
JW
9695 if (!(features & feature) && (lower->features & feature)) {
9696 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
9697 &feature, lower->name);
9698 lower->wanted_features &= ~feature;
dd912306 9699 __netdev_update_features(lower);
fd867d51
JW
9700
9701 if (unlikely(lower->features & feature))
9702 netdev_WARN(upper, "failed to disable %pNF on %s!\n",
9703 &feature, lower->name);
dd912306
CW
9704 else
9705 netdev_features_change(lower);
fd867d51
JW
9706 }
9707 }
9708}
9709
c8f44aff
MM
9710static netdev_features_t netdev_fix_features(struct net_device *dev,
9711 netdev_features_t features)
b63365a2 9712{
57422dc5
MM
9713 /* Fix illegal checksum combinations */
9714 if ((features & NETIF_F_HW_CSUM) &&
9715 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
6f404e44 9716 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
57422dc5
MM
9717 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
9718 }
9719
b63365a2 9720 /* TSO requires that SG is present as well. */
ea2d3688 9721 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
6f404e44 9722 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
ea2d3688 9723 features &= ~NETIF_F_ALL_TSO;
b63365a2
HX
9724 }
9725
ec5f0615
PS
9726 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
9727 !(features & NETIF_F_IP_CSUM)) {
9728 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
9729 features &= ~NETIF_F_TSO;
9730 features &= ~NETIF_F_TSO_ECN;
9731 }
9732
9733 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
9734 !(features & NETIF_F_IPV6_CSUM)) {
9735 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
9736 features &= ~NETIF_F_TSO6;
9737 }
9738
b1dc497b
AD
9739 /* TSO with IPv4 ID mangling requires IPv4 TSO be enabled */
9740 if ((features & NETIF_F_TSO_MANGLEID) && !(features & NETIF_F_TSO))
9741 features &= ~NETIF_F_TSO_MANGLEID;
9742
31d8b9e0
BH
9743 /* TSO ECN requires that TSO is present as well. */
9744 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
9745 features &= ~NETIF_F_TSO_ECN;
9746
212b573f
MM
9747 /* Software GSO depends on SG. */
9748 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
6f404e44 9749 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
212b573f
MM
9750 features &= ~NETIF_F_GSO;
9751 }
9752
802ab55a
AD
9753 /* GSO partial features require GSO partial be set */
9754 if ((features & dev->gso_partial_features) &&
9755 !(features & NETIF_F_GSO_PARTIAL)) {
9756 netdev_dbg(dev,
9757 "Dropping partially supported GSO features since no GSO partial.\n");
9758 features &= ~dev->gso_partial_features;
9759 }
9760
fb1f5f79
MC
9761 if (!(features & NETIF_F_RXCSUM)) {
9762 /* NETIF_F_GRO_HW implies doing RXCSUM since every packet
9763 * successfully merged by hardware must also have the
9764 * checksum verified by hardware. If the user does not
9765 * want to enable RXCSUM, logically, we should disable GRO_HW.
9766 */
9767 if (features & NETIF_F_GRO_HW) {
9768 netdev_dbg(dev, "Dropping NETIF_F_GRO_HW since no RXCSUM feature.\n");
9769 features &= ~NETIF_F_GRO_HW;
9770 }
9771 }
9772
de8d5ab2
GP
9773 /* LRO/HW-GRO features cannot be combined with RX-FCS */
9774 if (features & NETIF_F_RXFCS) {
9775 if (features & NETIF_F_LRO) {
9776 netdev_dbg(dev, "Dropping LRO feature since RX-FCS is requested.\n");
9777 features &= ~NETIF_F_LRO;
9778 }
9779
9780 if (features & NETIF_F_GRO_HW) {
9781 netdev_dbg(dev, "Dropping HW-GRO feature since RX-FCS is requested.\n");
9782 features &= ~NETIF_F_GRO_HW;
9783 }
e6c6a929
GP
9784 }
9785
25537d71
TT
9786 if (features & NETIF_F_HW_TLS_TX) {
9787 bool ip_csum = (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) ==
9788 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
9789 bool hw_csum = features & NETIF_F_HW_CSUM;
9790
9791 if (!ip_csum && !hw_csum) {
9792 netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
9793 features &= ~NETIF_F_HW_TLS_TX;
9794 }
ae0b04b2
TT
9795 }
9796
a3eb4e9d
TT
9797 if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) {
9798 netdev_dbg(dev, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n");
9799 features &= ~NETIF_F_HW_TLS_RX;
9800 }
9801
b63365a2
HX
9802 return features;
9803}
b63365a2 9804
6cb6a27c 9805int __netdev_update_features(struct net_device *dev)
5455c699 9806{
fd867d51 9807 struct net_device *upper, *lower;
c8f44aff 9808 netdev_features_t features;
fd867d51 9809 struct list_head *iter;
e7868a85 9810 int err = -1;
5455c699 9811
87267485
MM
9812 ASSERT_RTNL();
9813
5455c699
MM
9814 features = netdev_get_wanted_features(dev);
9815
9816 if (dev->netdev_ops->ndo_fix_features)
9817 features = dev->netdev_ops->ndo_fix_features(dev, features);
9818
9819 /* driver might be less strict about feature dependencies */
9820 features = netdev_fix_features(dev, features);
9821
4250b75b 9822 /* some features can't be enabled if they're off on an upper device */
fd867d51
JW
9823 netdev_for_each_upper_dev_rcu(dev, upper, iter)
9824 features = netdev_sync_upper_features(dev, upper, features);
9825
5455c699 9826 if (dev->features == features)
e7868a85 9827 goto sync_lower;
5455c699 9828
c8f44aff
MM
9829 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
9830 &dev->features, &features);
5455c699
MM
9831
9832 if (dev->netdev_ops->ndo_set_features)
9833 err = dev->netdev_ops->ndo_set_features(dev, features);
5f8dc33e
NA
9834 else
9835 err = 0;
5455c699 9836
6cb6a27c 9837 if (unlikely(err < 0)) {
5455c699 9838 netdev_err(dev,
c8f44aff
MM
9839 "set_features() failed (%d); wanted %pNF, left %pNF\n",
9840 err, &features, &dev->features);
17b85d29
NA
9841 /* return non-0 since some features might have changed and
9842 * it's better to fire a spurious notification than miss it
9843 */
9844 return -1;
6cb6a27c
MM
9845 }
9846
e7868a85 9847sync_lower:
fd867d51
JW
9848 /* some features must be disabled on lower devices when disabled
9849 * on an upper device (think: bonding master or bridge)
9850 */
9851 netdev_for_each_lower_dev(dev, lower, iter)
9852 netdev_sync_lower_features(dev, lower, features);
9853
ae847f40
SD
9854 if (!err) {
9855 netdev_features_t diff = features ^ dev->features;
9856
9857 if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) {
9858 /* udp_tunnel_{get,drop}_rx_info both need
9859 * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the
9860 * device, or they won't do anything.
9861 * Thus we need to update dev->features
9862 * *before* calling udp_tunnel_get_rx_info,
9863 * but *after* calling udp_tunnel_drop_rx_info.
9864 */
9865 if (features & NETIF_F_RX_UDP_TUNNEL_PORT) {
9866 dev->features = features;
9867 udp_tunnel_get_rx_info(dev);
9868 } else {
9869 udp_tunnel_drop_rx_info(dev);
9870 }
9871 }
9872
9daae9bd
GP
9873 if (diff & NETIF_F_HW_VLAN_CTAG_FILTER) {
9874 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
9875 dev->features = features;
9876 err |= vlan_get_rx_ctag_filter_info(dev);
9877 } else {
9878 vlan_drop_rx_ctag_filter_info(dev);
9879 }
9880 }
9881
9882 if (diff & NETIF_F_HW_VLAN_STAG_FILTER) {
9883 if (features & NETIF_F_HW_VLAN_STAG_FILTER) {
9884 dev->features = features;
9885 err |= vlan_get_rx_stag_filter_info(dev);
9886 } else {
9887 vlan_drop_rx_stag_filter_info(dev);
9888 }
9889 }
9890
6cb6a27c 9891 dev->features = features;
ae847f40 9892 }
6cb6a27c 9893
e7868a85 9894 return err < 0 ? 0 : 1;
6cb6a27c
MM
9895}
9896
afe12cc8
MM
9897/**
9898 * netdev_update_features - recalculate device features
9899 * @dev: the device to check
9900 *
9901 * Recalculate dev->features set and send notifications if it
9902 * has changed. Should be called after driver or hardware dependent
9903 * conditions might have changed that influence the features.
9904 */
6cb6a27c
MM
9905void netdev_update_features(struct net_device *dev)
9906{
9907 if (__netdev_update_features(dev))
9908 netdev_features_change(dev);
5455c699
MM
9909}
9910EXPORT_SYMBOL(netdev_update_features);
9911
afe12cc8
MM
9912/**
9913 * netdev_change_features - recalculate device features
9914 * @dev: the device to check
9915 *
9916 * Recalculate dev->features set and send notifications even
9917 * if they have not changed. Should be called instead of
9918 * netdev_update_features() if also dev->vlan_features might
9919 * have changed to allow the changes to be propagated to stacked
9920 * VLAN devices.
9921 */
9922void netdev_change_features(struct net_device *dev)
9923{
9924 __netdev_update_features(dev);
9925 netdev_features_change(dev);
9926}
9927EXPORT_SYMBOL(netdev_change_features);
9928
fc4a7489
PM
9929/**
9930 * netif_stacked_transfer_operstate - transfer operstate
9931 * @rootdev: the root or lower level device to transfer state from
9932 * @dev: the device to transfer operstate to
9933 *
9934 * Transfer operational state from root to device. This is normally
9935 * called when a stacking relationship exists between the root
9936 * device and the device(a leaf device).
9937 */
9938void netif_stacked_transfer_operstate(const struct net_device *rootdev,
9939 struct net_device *dev)
9940{
9941 if (rootdev->operstate == IF_OPER_DORMANT)
9942 netif_dormant_on(dev);
9943 else
9944 netif_dormant_off(dev);
9945
eec517cd
AL
9946 if (rootdev->operstate == IF_OPER_TESTING)
9947 netif_testing_on(dev);
9948 else
9949 netif_testing_off(dev);
9950
0575c86b
ZS
9951 if (netif_carrier_ok(rootdev))
9952 netif_carrier_on(dev);
9953 else
9954 netif_carrier_off(dev);
fc4a7489
PM
9955}
9956EXPORT_SYMBOL(netif_stacked_transfer_operstate);
9957
1b4bf461
ED
9958static int netif_alloc_rx_queues(struct net_device *dev)
9959{
1b4bf461 9960 unsigned int i, count = dev->num_rx_queues;
bd25fa7b 9961 struct netdev_rx_queue *rx;
10595902 9962 size_t sz = count * sizeof(*rx);
e817f856 9963 int err = 0;
1b4bf461 9964
bd25fa7b 9965 BUG_ON(count < 1);
1b4bf461 9966
dcda9b04 9967 rx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
da6bc57a
MH
9968 if (!rx)
9969 return -ENOMEM;
9970
bd25fa7b
TH
9971 dev->_rx = rx;
9972
e817f856 9973 for (i = 0; i < count; i++) {
fe822240 9974 rx[i].dev = dev;
e817f856
JDB
9975
9976 /* XDP RX-queue setup */
b02e5a0e 9977 err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i, 0);
e817f856
JDB
9978 if (err < 0)
9979 goto err_rxq_info;
9980 }
1b4bf461 9981 return 0;
e817f856
JDB
9982
9983err_rxq_info:
9984 /* Rollback successful reg's and free other resources */
9985 while (i--)
9986 xdp_rxq_info_unreg(&rx[i].xdp_rxq);
141b52a9 9987 kvfree(dev->_rx);
e817f856
JDB
9988 dev->_rx = NULL;
9989 return err;
9990}
9991
9992static void netif_free_rx_queues(struct net_device *dev)
9993{
9994 unsigned int i, count = dev->num_rx_queues;
e817f856
JDB
9995
9996 /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */
9997 if (!dev->_rx)
9998 return;
9999
e817f856 10000 for (i = 0; i < count; i++)
82aaff2f
JK
10001 xdp_rxq_info_unreg(&dev->_rx[i].xdp_rxq);
10002
10003 kvfree(dev->_rx);
1b4bf461
ED
10004}
10005
aa942104
CG
10006static void netdev_init_one_queue(struct net_device *dev,
10007 struct netdev_queue *queue, void *_unused)
10008{
10009 /* Initialize queue lock */
10010 spin_lock_init(&queue->_xmit_lock);
1a33e10e 10011 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
aa942104 10012 queue->xmit_lock_owner = -1;
b236da69 10013 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
aa942104 10014 queue->dev = dev;
114cf580
TH
10015#ifdef CONFIG_BQL
10016 dql_init(&queue->dql, HZ);
10017#endif
aa942104
CG
10018}
10019
60877a32
ED
10020static void netif_free_tx_queues(struct net_device *dev)
10021{
4cb28970 10022 kvfree(dev->_tx);
60877a32
ED
10023}
10024
e6484930
TH
10025static int netif_alloc_netdev_queues(struct net_device *dev)
10026{
10027 unsigned int count = dev->num_tx_queues;
10028 struct netdev_queue *tx;
60877a32 10029 size_t sz = count * sizeof(*tx);
e6484930 10030
d339727c
ED
10031 if (count < 1 || count > 0xffff)
10032 return -EINVAL;
62b5942a 10033
dcda9b04 10034 tx = kvzalloc(sz, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
da6bc57a
MH
10035 if (!tx)
10036 return -ENOMEM;
10037
e6484930 10038 dev->_tx = tx;
1d24eb48 10039
e6484930
TH
10040 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
10041 spin_lock_init(&dev->tx_global_lock);
aa942104
CG
10042
10043 return 0;
e6484930
TH
10044}
10045
a2029240
DV
10046void netif_tx_stop_all_queues(struct net_device *dev)
10047{
10048 unsigned int i;
10049
10050 for (i = 0; i < dev->num_tx_queues; i++) {
10051 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
f4563a75 10052
a2029240
DV
10053 netif_tx_stop_queue(txq);
10054 }
10055}
10056EXPORT_SYMBOL(netif_tx_stop_all_queues);
10057
1da177e4
LT
10058/**
10059 * register_netdevice - register a network device
10060 * @dev: device to register
10061 *
10062 * Take a completed network device structure and add it to the kernel
10063 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
10064 * chain. 0 is returned on success. A negative errno code is returned
10065 * on a failure to set up the device, or if the name is a duplicate.
10066 *
10067 * Callers must hold the rtnl semaphore. You may want
10068 * register_netdev() instead of this.
10069 *
10070 * BUGS:
10071 * The locking appears insufficient to guarantee two parallel registers
10072 * will not get the same name.
10073 */
10074
10075int register_netdevice(struct net_device *dev)
10076{
1da177e4 10077 int ret;
d314774c 10078 struct net *net = dev_net(dev);
1da177e4 10079
e283de3a
FF
10080 BUILD_BUG_ON(sizeof(netdev_features_t) * BITS_PER_BYTE <
10081 NETDEV_FEATURE_COUNT);
1da177e4
LT
10082 BUG_ON(dev_boot_phase);
10083 ASSERT_RTNL();
10084
b17a7c17
SH
10085 might_sleep();
10086
1da177e4
LT
10087 /* When net_device's are persistent, this will be fatal. */
10088 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
d314774c 10089 BUG_ON(!net);
1da177e4 10090
9000edb7
JK
10091 ret = ethtool_check_ops(dev->ethtool_ops);
10092 if (ret)
10093 return ret;
10094
f1f28aa3 10095 spin_lock_init(&dev->addr_list_lock);
845e0ebb 10096 netdev_set_addr_lockdep_class(dev);
1da177e4 10097
828de4f6 10098 ret = dev_get_valid_name(net, dev, dev->name);
0696c3a8
PP
10099 if (ret < 0)
10100 goto out;
10101
9077f052 10102 ret = -ENOMEM;
ff927412
JP
10103 dev->name_node = netdev_name_node_head_alloc(dev);
10104 if (!dev->name_node)
10105 goto out;
10106
1da177e4 10107 /* Init, if this function is available */
d314774c
SH
10108 if (dev->netdev_ops->ndo_init) {
10109 ret = dev->netdev_ops->ndo_init(dev);
1da177e4
LT
10110 if (ret) {
10111 if (ret > 0)
10112 ret = -EIO;
42c17fa6 10113 goto err_free_name;
1da177e4
LT
10114 }
10115 }
4ec93edb 10116
f646968f
PM
10117 if (((dev->hw_features | dev->features) &
10118 NETIF_F_HW_VLAN_CTAG_FILTER) &&
d2ed273d
MM
10119 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
10120 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
10121 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
10122 ret = -EINVAL;
10123 goto err_uninit;
10124 }
10125
9c7dafbf
PE
10126 ret = -EBUSY;
10127 if (!dev->ifindex)
10128 dev->ifindex = dev_new_index(net);
10129 else if (__dev_get_by_index(net, dev->ifindex))
10130 goto err_uninit;
10131
5455c699
MM
10132 /* Transfer changeable features to wanted_features and enable
10133 * software offloads (GSO and GRO).
10134 */
1a3c998f 10135 dev->hw_features |= (NETIF_F_SOFT_FEATURES | NETIF_F_SOFT_FEATURES_OFF);
14d1232f 10136 dev->features |= NETIF_F_SOFT_FEATURES;
d764a122 10137
876c4384 10138 if (dev->udp_tunnel_nic_info) {
d764a122
SD
10139 dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT;
10140 dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT;
10141 }
10142
14d1232f 10143 dev->wanted_features = dev->features & dev->hw_features;
1da177e4 10144
cbc53e08 10145 if (!(dev->flags & IFF_LOOPBACK))
34324dc2 10146 dev->hw_features |= NETIF_F_NOCACHE_COPY;
cbc53e08 10147
7f348a60
AD
10148 /* If IPv4 TCP segmentation offload is supported we should also
10149 * allow the device to enable segmenting the frame with the option
10150 * of ignoring a static IP ID value. This doesn't enable the
10151 * feature itself but allows the user to enable it later.
10152 */
cbc53e08
AD
10153 if (dev->hw_features & NETIF_F_TSO)
10154 dev->hw_features |= NETIF_F_TSO_MANGLEID;
7f348a60
AD
10155 if (dev->vlan_features & NETIF_F_TSO)
10156 dev->vlan_features |= NETIF_F_TSO_MANGLEID;
10157 if (dev->mpls_features & NETIF_F_TSO)
10158 dev->mpls_features |= NETIF_F_TSO_MANGLEID;
10159 if (dev->hw_enc_features & NETIF_F_TSO)
10160 dev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
c6e1a0d1 10161
1180e7d6 10162 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
16c3ea78 10163 */
1180e7d6 10164 dev->vlan_features |= NETIF_F_HIGHDMA;
16c3ea78 10165
ee579677
PS
10166 /* Make NETIF_F_SG inheritable to tunnel devices.
10167 */
802ab55a 10168 dev->hw_enc_features |= NETIF_F_SG | NETIF_F_GSO_PARTIAL;
ee579677 10169
0d89d203
SH
10170 /* Make NETIF_F_SG inheritable to MPLS.
10171 */
10172 dev->mpls_features |= NETIF_F_SG;
10173
7ffbe3fd
JB
10174 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
10175 ret = notifier_to_errno(ret);
10176 if (ret)
10177 goto err_uninit;
10178
8b41d188 10179 ret = netdev_register_kobject(dev);
cb626bf5
JH
10180 if (ret) {
10181 dev->reg_state = NETREG_UNREGISTERED;
7ce1b0ed 10182 goto err_uninit;
cb626bf5 10183 }
b17a7c17
SH
10184 dev->reg_state = NETREG_REGISTERED;
10185
6cb6a27c 10186 __netdev_update_features(dev);
8e9b59b2 10187
1da177e4
LT
10188 /*
10189 * Default initial state at registry is that the
10190 * device is present.
10191 */
10192
10193 set_bit(__LINK_STATE_PRESENT, &dev->state);
10194
8f4cccbb
BH
10195 linkwatch_init_dev(dev);
10196
1da177e4 10197 dev_init_scheduler(dev);
1da177e4 10198 dev_hold(dev);
ce286d32 10199 list_netdevice(dev);
7bf23575 10200 add_device_randomness(dev->dev_addr, dev->addr_len);
1da177e4 10201
948b337e
JP
10202 /* If the device has permanent device address, driver should
10203 * set dev_addr and also addr_assign_type should be set to
10204 * NET_ADDR_PERM (default value).
10205 */
10206 if (dev->addr_assign_type == NET_ADDR_PERM)
10207 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10208
1da177e4 10209 /* Notify protocols, that a new device appeared. */
056925ab 10210 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
fcc5a03a 10211 ret = notifier_to_errno(ret);
93ee31f1 10212 if (ret) {
766b0515
JK
10213 /* Expect explicit free_netdev() on failure */
10214 dev->needs_free_netdev = false;
037e56bd 10215 unregister_netdevice_queue(dev, NULL);
766b0515 10216 goto out;
93ee31f1 10217 }
d90a909e
EB
10218 /*
10219 * Prevent userspace races by waiting until the network
10220 * device is fully setup before sending notifications.
10221 */
a2835763
PM
10222 if (!dev->rtnl_link_ops ||
10223 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
7f294054 10224 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
1da177e4
LT
10225
10226out:
10227 return ret;
7ce1b0ed
HX
10228
10229err_uninit:
d314774c
SH
10230 if (dev->netdev_ops->ndo_uninit)
10231 dev->netdev_ops->ndo_uninit(dev);
cf124db5
DM
10232 if (dev->priv_destructor)
10233 dev->priv_destructor(dev);
42c17fa6
DC
10234err_free_name:
10235 netdev_name_node_free(dev->name_node);
7ce1b0ed 10236 goto out;
1da177e4 10237}
d1b19dff 10238EXPORT_SYMBOL(register_netdevice);
1da177e4 10239
937f1ba5
BH
10240/**
10241 * init_dummy_netdev - init a dummy network device for NAPI
10242 * @dev: device to init
10243 *
10244 * This takes a network device structure and initialize the minimum
10245 * amount of fields so it can be used to schedule NAPI polls without
10246 * registering a full blown interface. This is to be used by drivers
10247 * that need to tie several hardware interfaces to a single NAPI
10248 * poll scheduler due to HW limitations.
10249 */
10250int init_dummy_netdev(struct net_device *dev)
10251{
10252 /* Clear everything. Note we don't initialize spinlocks
10253 * are they aren't supposed to be taken by any of the
10254 * NAPI code and this dummy netdev is supposed to be
10255 * only ever used for NAPI polls
10256 */
10257 memset(dev, 0, sizeof(struct net_device));
10258
10259 /* make sure we BUG if trying to hit standard
10260 * register/unregister code path
10261 */
10262 dev->reg_state = NETREG_DUMMY;
10263
937f1ba5
BH
10264 /* NAPI wants this */
10265 INIT_LIST_HEAD(&dev->napi_list);
10266
10267 /* a dummy interface is started by default */
10268 set_bit(__LINK_STATE_PRESENT, &dev->state);
10269 set_bit(__LINK_STATE_START, &dev->state);
10270
35edfdc7
JE
10271 /* napi_busy_loop stats accounting wants this */
10272 dev_net_set(dev, &init_net);
10273
29b4433d
ED
10274 /* Note : We dont allocate pcpu_refcnt for dummy devices,
10275 * because users of this 'device' dont need to change
10276 * its refcount.
10277 */
10278
937f1ba5
BH
10279 return 0;
10280}
10281EXPORT_SYMBOL_GPL(init_dummy_netdev);
10282
10283
1da177e4
LT
10284/**
10285 * register_netdev - register a network device
10286 * @dev: device to register
10287 *
10288 * Take a completed network device structure and add it to the kernel
10289 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
10290 * chain. 0 is returned on success. A negative errno code is returned
10291 * on a failure to set up the device, or if the name is a duplicate.
10292 *
38b4da38 10293 * This is a wrapper around register_netdevice that takes the rtnl semaphore
1da177e4
LT
10294 * and expands the device name if you passed a format string to
10295 * alloc_netdev.
10296 */
10297int register_netdev(struct net_device *dev)
10298{
10299 int err;
10300
b0f3debc
KT
10301 if (rtnl_lock_killable())
10302 return -EINTR;
1da177e4 10303 err = register_netdevice(dev);
1da177e4
LT
10304 rtnl_unlock();
10305 return err;
10306}
10307EXPORT_SYMBOL(register_netdev);
10308
29b4433d
ED
10309int netdev_refcnt_read(const struct net_device *dev)
10310{
10311 int i, refcnt = 0;
10312
10313 for_each_possible_cpu(i)
10314 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
10315 return refcnt;
10316}
10317EXPORT_SYMBOL(netdev_refcnt_read);
10318
de2b541b
MCC
10319#define WAIT_REFS_MIN_MSECS 1
10320#define WAIT_REFS_MAX_MSECS 250
2c53040f 10321/**
1da177e4 10322 * netdev_wait_allrefs - wait until all references are gone.
3de7a37b 10323 * @dev: target net_device
1da177e4
LT
10324 *
10325 * This is called when unregistering network devices.
10326 *
10327 * Any protocol or device that holds a reference should register
10328 * for netdevice notification, and cleanup and put back the
10329 * reference if they receive an UNREGISTER event.
10330 * We can get stuck here if buggy protocols don't correctly
4ec93edb 10331 * call dev_put.
1da177e4
LT
10332 */
10333static void netdev_wait_allrefs(struct net_device *dev)
10334{
10335 unsigned long rebroadcast_time, warning_time;
0e4be9e5 10336 int wait = 0, refcnt;
1da177e4 10337
e014debe
ED
10338 linkwatch_forget_dev(dev);
10339
1da177e4 10340 rebroadcast_time = warning_time = jiffies;
29b4433d
ED
10341 refcnt = netdev_refcnt_read(dev);
10342
10343 while (refcnt != 0) {
1da177e4 10344 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
6756ae4b 10345 rtnl_lock();
1da177e4
LT
10346
10347 /* Rebroadcast unregister notification */
056925ab 10348 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
1da177e4 10349
748e2d93 10350 __rtnl_unlock();
0115e8e3 10351 rcu_barrier();
748e2d93
ED
10352 rtnl_lock();
10353
1da177e4
LT
10354 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
10355 &dev->state)) {
10356 /* We must not have linkwatch events
10357 * pending on unregister. If this
10358 * happens, we simply run the queue
10359 * unscheduled, resulting in a noop
10360 * for this device.
10361 */
10362 linkwatch_run_queue();
10363 }
10364
6756ae4b 10365 __rtnl_unlock();
1da177e4
LT
10366
10367 rebroadcast_time = jiffies;
10368 }
10369
0e4be9e5
FR
10370 if (!wait) {
10371 rcu_barrier();
10372 wait = WAIT_REFS_MIN_MSECS;
10373 } else {
10374 msleep(wait);
10375 wait = min(wait << 1, WAIT_REFS_MAX_MSECS);
10376 }
1da177e4 10377
29b4433d
ED
10378 refcnt = netdev_refcnt_read(dev);
10379
d7c04b05 10380 if (refcnt && time_after(jiffies, warning_time + 10 * HZ)) {
7b6cd1ce
JP
10381 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
10382 dev->name, refcnt);
1da177e4
LT
10383 warning_time = jiffies;
10384 }
10385 }
10386}
10387
10388/* The sequence is:
10389 *
10390 * rtnl_lock();
10391 * ...
10392 * register_netdevice(x1);
10393 * register_netdevice(x2);
10394 * ...
10395 * unregister_netdevice(y1);
10396 * unregister_netdevice(y2);
10397 * ...
10398 * rtnl_unlock();
10399 * free_netdev(y1);
10400 * free_netdev(y2);
10401 *
58ec3b4d 10402 * We are invoked by rtnl_unlock().
1da177e4 10403 * This allows us to deal with problems:
b17a7c17 10404 * 1) We can delete sysfs objects which invoke hotplug
1da177e4
LT
10405 * without deadlocking with linkwatch via keventd.
10406 * 2) Since we run with the RTNL semaphore not held, we can sleep
10407 * safely in order to wait for the netdev refcnt to drop to zero.
58ec3b4d
HX
10408 *
10409 * We must not return until all unregister events added during
10410 * the interval the lock was held have been completed.
1da177e4 10411 */
1da177e4
LT
10412void netdev_run_todo(void)
10413{
626ab0e6 10414 struct list_head list;
1fc70edb
TY
10415#ifdef CONFIG_LOCKDEP
10416 struct list_head unlink_list;
10417
10418 list_replace_init(&net_unlink_list, &unlink_list);
10419
10420 while (!list_empty(&unlink_list)) {
10421 struct net_device *dev = list_first_entry(&unlink_list,
10422 struct net_device,
10423 unlink_list);
0e8b8d6a 10424 list_del_init(&dev->unlink_list);
1fc70edb
TY
10425 dev->nested_level = dev->lower_level - 1;
10426 }
10427#endif
1da177e4 10428
1da177e4 10429 /* Snapshot list, allow later requests */
626ab0e6 10430 list_replace_init(&net_todo_list, &list);
58ec3b4d
HX
10431
10432 __rtnl_unlock();
626ab0e6 10433
0115e8e3
ED
10434
10435 /* Wait for rcu callbacks to finish before next phase */
850a545b
EB
10436 if (!list_empty(&list))
10437 rcu_barrier();
10438
1da177e4
LT
10439 while (!list_empty(&list)) {
10440 struct net_device *dev
e5e26d75 10441 = list_first_entry(&list, struct net_device, todo_list);
1da177e4
LT
10442 list_del(&dev->todo_list);
10443
b17a7c17 10444 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
7b6cd1ce 10445 pr_err("network todo '%s' but state %d\n",
b17a7c17
SH
10446 dev->name, dev->reg_state);
10447 dump_stack();
10448 continue;
10449 }
1da177e4 10450
b17a7c17 10451 dev->reg_state = NETREG_UNREGISTERED;
1da177e4 10452
b17a7c17 10453 netdev_wait_allrefs(dev);
1da177e4 10454
b17a7c17 10455 /* paranoia */
29b4433d 10456 BUG_ON(netdev_refcnt_read(dev));
7866a621
SN
10457 BUG_ON(!list_empty(&dev->ptype_all));
10458 BUG_ON(!list_empty(&dev->ptype_specific));
33d480ce
ED
10459 WARN_ON(rcu_access_pointer(dev->ip_ptr));
10460 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
330c7272 10461#if IS_ENABLED(CONFIG_DECNET)
547b792c 10462 WARN_ON(dev->dn_ptr);
330c7272 10463#endif
cf124db5
DM
10464 if (dev->priv_destructor)
10465 dev->priv_destructor(dev);
10466 if (dev->needs_free_netdev)
10467 free_netdev(dev);
9093bbb2 10468
50624c93
EB
10469 /* Report a network device has been unregistered */
10470 rtnl_lock();
10471 dev_net(dev)->dev_unreg_count--;
10472 __rtnl_unlock();
10473 wake_up(&netdev_unregistering_wq);
10474
9093bbb2
SH
10475 /* Free network device */
10476 kobject_put(&dev->dev.kobj);
1da177e4 10477 }
1da177e4
LT
10478}
10479
9256645a
JW
10480/* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
10481 * all the same fields in the same order as net_device_stats, with only
10482 * the type differing, but rtnl_link_stats64 may have additional fields
10483 * at the end for newer counters.
3cfde79c 10484 */
77a1abf5
ED
10485void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
10486 const struct net_device_stats *netdev_stats)
3cfde79c
BH
10487{
10488#if BITS_PER_LONG == 64
9256645a 10489 BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
9af9959e 10490 memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
9256645a
JW
10491 /* zero out counters that only exist in rtnl_link_stats64 */
10492 memset((char *)stats64 + sizeof(*netdev_stats), 0,
10493 sizeof(*stats64) - sizeof(*netdev_stats));
3cfde79c 10494#else
9256645a 10495 size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
3cfde79c
BH
10496 const unsigned long *src = (const unsigned long *)netdev_stats;
10497 u64 *dst = (u64 *)stats64;
10498
9256645a 10499 BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
3cfde79c
BH
10500 for (i = 0; i < n; i++)
10501 dst[i] = src[i];
9256645a
JW
10502 /* zero out counters that only exist in rtnl_link_stats64 */
10503 memset((char *)stats64 + n * sizeof(u64), 0,
10504 sizeof(*stats64) - n * sizeof(u64));
3cfde79c
BH
10505#endif
10506}
77a1abf5 10507EXPORT_SYMBOL(netdev_stats_to_stats64);
3cfde79c 10508
eeda3fd6
SH
10509/**
10510 * dev_get_stats - get network device statistics
10511 * @dev: device to get statistics from
28172739 10512 * @storage: place to store stats
eeda3fd6 10513 *
d7753516
BH
10514 * Get network statistics from device. Return @storage.
10515 * The device driver may provide its own method by setting
10516 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
10517 * otherwise the internal statistics structure is used.
eeda3fd6 10518 */
d7753516
BH
10519struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
10520 struct rtnl_link_stats64 *storage)
7004bf25 10521{
eeda3fd6
SH
10522 const struct net_device_ops *ops = dev->netdev_ops;
10523
28172739
ED
10524 if (ops->ndo_get_stats64) {
10525 memset(storage, 0, sizeof(*storage));
caf586e5
ED
10526 ops->ndo_get_stats64(dev, storage);
10527 } else if (ops->ndo_get_stats) {
3cfde79c 10528 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
caf586e5
ED
10529 } else {
10530 netdev_stats_to_stats64(storage, &dev->stats);
28172739 10531 }
6f64ec74
ED
10532 storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
10533 storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
10534 storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler);
28172739 10535 return storage;
c45d286e 10536}
eeda3fd6 10537EXPORT_SYMBOL(dev_get_stats);
c45d286e 10538
44fa32f0
HK
10539/**
10540 * dev_fetch_sw_netstats - get per-cpu network device statistics
10541 * @s: place to store stats
10542 * @netstats: per-cpu network stats to read from
10543 *
10544 * Read per-cpu network statistics and populate the related fields in @s.
10545 */
10546void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
10547 const struct pcpu_sw_netstats __percpu *netstats)
10548{
10549 int cpu;
10550
10551 for_each_possible_cpu(cpu) {
10552 const struct pcpu_sw_netstats *stats;
10553 struct pcpu_sw_netstats tmp;
10554 unsigned int start;
10555
10556 stats = per_cpu_ptr(netstats, cpu);
10557 do {
10558 start = u64_stats_fetch_begin_irq(&stats->syncp);
10559 tmp.rx_packets = stats->rx_packets;
10560 tmp.rx_bytes = stats->rx_bytes;
10561 tmp.tx_packets = stats->tx_packets;
10562 tmp.tx_bytes = stats->tx_bytes;
10563 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
10564
10565 s->rx_packets += tmp.rx_packets;
10566 s->rx_bytes += tmp.rx_bytes;
10567 s->tx_packets += tmp.tx_packets;
10568 s->tx_bytes += tmp.tx_bytes;
10569 }
10570}
10571EXPORT_SYMBOL_GPL(dev_fetch_sw_netstats);
10572
a1839426
HK
10573/**
10574 * dev_get_tstats64 - ndo_get_stats64 implementation
10575 * @dev: device to get statistics from
10576 * @s: place to store stats
10577 *
10578 * Populate @s from dev->stats and dev->tstats. Can be used as
10579 * ndo_get_stats64() callback.
10580 */
10581void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s)
10582{
10583 netdev_stats_to_stats64(s, &dev->stats);
10584 dev_fetch_sw_netstats(s, dev->tstats);
10585}
10586EXPORT_SYMBOL_GPL(dev_get_tstats64);
10587
24824a09 10588struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
dc2b4847 10589{
24824a09 10590 struct netdev_queue *queue = dev_ingress_queue(dev);
dc2b4847 10591
24824a09
ED
10592#ifdef CONFIG_NET_CLS_ACT
10593 if (queue)
10594 return queue;
10595 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
10596 if (!queue)
10597 return NULL;
10598 netdev_init_one_queue(dev, queue, NULL);
2ce1ee17 10599 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
24824a09
ED
10600 queue->qdisc_sleeping = &noop_qdisc;
10601 rcu_assign_pointer(dev->ingress_queue, queue);
10602#endif
10603 return queue;
bb949fbd
DM
10604}
10605
2c60db03
ED
10606static const struct ethtool_ops default_ethtool_ops;
10607
d07d7507
SG
10608void netdev_set_default_ethtool_ops(struct net_device *dev,
10609 const struct ethtool_ops *ops)
10610{
10611 if (dev->ethtool_ops == &default_ethtool_ops)
10612 dev->ethtool_ops = ops;
10613}
10614EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
10615
74d332c1
ED
10616void netdev_freemem(struct net_device *dev)
10617{
10618 char *addr = (char *)dev - dev->padded;
10619
4cb28970 10620 kvfree(addr);
74d332c1
ED
10621}
10622
1da177e4 10623/**
722c9a0c 10624 * alloc_netdev_mqs - allocate network device
10625 * @sizeof_priv: size of private data to allocate space for
10626 * @name: device name format string
10627 * @name_assign_type: origin of device name
10628 * @setup: callback to initialize device
10629 * @txqs: the number of TX subqueues to allocate
10630 * @rxqs: the number of RX subqueues to allocate
10631 *
10632 * Allocates a struct net_device with private data area for driver use
10633 * and performs basic initialization. Also allocates subqueue structs
10634 * for each queue on the device.
1da177e4 10635 */
36909ea4 10636struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
c835a677 10637 unsigned char name_assign_type,
36909ea4
TH
10638 void (*setup)(struct net_device *),
10639 unsigned int txqs, unsigned int rxqs)
1da177e4 10640{
1da177e4 10641 struct net_device *dev;
52a59bd5 10642 unsigned int alloc_size;
1ce8e7b5 10643 struct net_device *p;
1da177e4 10644
b6fe17d6
SH
10645 BUG_ON(strlen(name) >= sizeof(dev->name));
10646
36909ea4 10647 if (txqs < 1) {
7b6cd1ce 10648 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
55513fb4
TH
10649 return NULL;
10650 }
10651
36909ea4 10652 if (rxqs < 1) {
7b6cd1ce 10653 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
36909ea4
TH
10654 return NULL;
10655 }
36909ea4 10656
fd2ea0a7 10657 alloc_size = sizeof(struct net_device);
d1643d24
AD
10658 if (sizeof_priv) {
10659 /* ensure 32-byte alignment of private area */
1ce8e7b5 10660 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
d1643d24
AD
10661 alloc_size += sizeof_priv;
10662 }
10663 /* ensure 32-byte alignment of whole construct */
1ce8e7b5 10664 alloc_size += NETDEV_ALIGN - 1;
1da177e4 10665
dcda9b04 10666 p = kvzalloc(alloc_size, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
62b5942a 10667 if (!p)
1da177e4 10668 return NULL;
1da177e4 10669
1ce8e7b5 10670 dev = PTR_ALIGN(p, NETDEV_ALIGN);
1da177e4 10671 dev->padded = (char *)dev - (char *)p;
ab9c73cc 10672
29b4433d
ED
10673 dev->pcpu_refcnt = alloc_percpu(int);
10674 if (!dev->pcpu_refcnt)
74d332c1 10675 goto free_dev;
ab9c73cc 10676
ab9c73cc 10677 if (dev_addr_init(dev))
29b4433d 10678 goto free_pcpu;
ab9c73cc 10679
22bedad3 10680 dev_mc_init(dev);
a748ee24 10681 dev_uc_init(dev);
ccffad25 10682
c346dca1 10683 dev_net_set(dev, &init_net);
1da177e4 10684
8d3bdbd5 10685 dev->gso_max_size = GSO_MAX_SIZE;
30b678d8 10686 dev->gso_max_segs = GSO_MAX_SEGS;
5343da4c
TY
10687 dev->upper_level = 1;
10688 dev->lower_level = 1;
1fc70edb
TY
10689#ifdef CONFIG_LOCKDEP
10690 dev->nested_level = 0;
10691 INIT_LIST_HEAD(&dev->unlink_list);
10692#endif
8d3bdbd5 10693
8d3bdbd5
DM
10694 INIT_LIST_HEAD(&dev->napi_list);
10695 INIT_LIST_HEAD(&dev->unreg_list);
5cde2829 10696 INIT_LIST_HEAD(&dev->close_list);
8d3bdbd5 10697 INIT_LIST_HEAD(&dev->link_watch_list);
2f268f12
VF
10698 INIT_LIST_HEAD(&dev->adj_list.upper);
10699 INIT_LIST_HEAD(&dev->adj_list.lower);
7866a621
SN
10700 INIT_LIST_HEAD(&dev->ptype_all);
10701 INIT_LIST_HEAD(&dev->ptype_specific);
93642e14 10702 INIT_LIST_HEAD(&dev->net_notifier_list);
59cc1f61
JK
10703#ifdef CONFIG_NET_SCHED
10704 hash_init(dev->qdisc_hash);
10705#endif
02875878 10706 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
8d3bdbd5
DM
10707 setup(dev);
10708
a813104d 10709 if (!dev->tx_queue_len) {
f84bb1ea 10710 dev->priv_flags |= IFF_NO_QUEUE;
11597084 10711 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
a813104d 10712 }
906470c1 10713
36909ea4
TH
10714 dev->num_tx_queues = txqs;
10715 dev->real_num_tx_queues = txqs;
ed9af2e8 10716 if (netif_alloc_netdev_queues(dev))
8d3bdbd5 10717 goto free_all;
e8a0464c 10718
36909ea4
TH
10719 dev->num_rx_queues = rxqs;
10720 dev->real_num_rx_queues = rxqs;
fe822240 10721 if (netif_alloc_rx_queues(dev))
8d3bdbd5 10722 goto free_all;
0a9627f2 10723
1da177e4 10724 strcpy(dev->name, name);
c835a677 10725 dev->name_assign_type = name_assign_type;
cbda10fa 10726 dev->group = INIT_NETDEV_GROUP;
2c60db03
ED
10727 if (!dev->ethtool_ops)
10728 dev->ethtool_ops = &default_ethtool_ops;
e687ad60 10729
357b6cc5 10730 nf_hook_ingress_init(dev);
e687ad60 10731
1da177e4 10732 return dev;
ab9c73cc 10733
8d3bdbd5
DM
10734free_all:
10735 free_netdev(dev);
10736 return NULL;
10737
29b4433d
ED
10738free_pcpu:
10739 free_percpu(dev->pcpu_refcnt);
74d332c1
ED
10740free_dev:
10741 netdev_freemem(dev);
ab9c73cc 10742 return NULL;
1da177e4 10743}
36909ea4 10744EXPORT_SYMBOL(alloc_netdev_mqs);
1da177e4
LT
10745
10746/**
722c9a0c 10747 * free_netdev - free network device
10748 * @dev: device
1da177e4 10749 *
722c9a0c 10750 * This function does the last stage of destroying an allocated device
10751 * interface. The reference to the device object is released. If this
10752 * is the last reference then it will be freed.Must be called in process
10753 * context.
1da177e4
LT
10754 */
10755void free_netdev(struct net_device *dev)
10756{
d565b0a1
HX
10757 struct napi_struct *p, *n;
10758
93d05d4a 10759 might_sleep();
c269a24c
JK
10760
10761 /* When called immediately after register_netdevice() failed the unwind
10762 * handling may still be dismantling the device. Handle that case by
10763 * deferring the free.
10764 */
10765 if (dev->reg_state == NETREG_UNREGISTERING) {
10766 ASSERT_RTNL();
10767 dev->needs_free_netdev = true;
10768 return;
10769 }
10770
60877a32 10771 netif_free_tx_queues(dev);
e817f856 10772 netif_free_rx_queues(dev);
e8a0464c 10773
33d480ce 10774 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
24824a09 10775
f001fde5
JP
10776 /* Flush device addresses */
10777 dev_addr_flush(dev);
10778
d565b0a1
HX
10779 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
10780 netif_napi_del(p);
10781
29b4433d
ED
10782 free_percpu(dev->pcpu_refcnt);
10783 dev->pcpu_refcnt = NULL;
75ccae62
THJ
10784 free_percpu(dev->xdp_bulkq);
10785 dev->xdp_bulkq = NULL;
29b4433d 10786
3041a069 10787 /* Compatibility with error handling in drivers */
1da177e4 10788 if (dev->reg_state == NETREG_UNINITIALIZED) {
74d332c1 10789 netdev_freemem(dev);
1da177e4
LT
10790 return;
10791 }
10792
10793 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
10794 dev->reg_state = NETREG_RELEASED;
10795
43cb76d9
GKH
10796 /* will free via device release */
10797 put_device(&dev->dev);
1da177e4 10798}
d1b19dff 10799EXPORT_SYMBOL(free_netdev);
4ec93edb 10800
f0db275a
SH
10801/**
10802 * synchronize_net - Synchronize with packet receive processing
10803 *
10804 * Wait for packets currently being received to be done.
10805 * Does not block later packets from starting.
10806 */
4ec93edb 10807void synchronize_net(void)
1da177e4
LT
10808{
10809 might_sleep();
be3fc413
ED
10810 if (rtnl_is_locked())
10811 synchronize_rcu_expedited();
10812 else
10813 synchronize_rcu();
1da177e4 10814}
d1b19dff 10815EXPORT_SYMBOL(synchronize_net);
1da177e4
LT
10816
10817/**
44a0873d 10818 * unregister_netdevice_queue - remove device from the kernel
1da177e4 10819 * @dev: device
44a0873d 10820 * @head: list
6ebfbc06 10821 *
1da177e4 10822 * This function shuts down a device interface and removes it
d59b54b1 10823 * from the kernel tables.
44a0873d 10824 * If head not NULL, device is queued to be unregistered later.
1da177e4
LT
10825 *
10826 * Callers must hold the rtnl semaphore. You may want
10827 * unregister_netdev() instead of this.
10828 */
10829
44a0873d 10830void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
1da177e4 10831{
a6620712
HX
10832 ASSERT_RTNL();
10833
44a0873d 10834 if (head) {
9fdce099 10835 list_move_tail(&dev->unreg_list, head);
44a0873d 10836 } else {
037e56bd
JK
10837 LIST_HEAD(single);
10838
10839 list_add(&dev->unreg_list, &single);
0cbe1e57 10840 unregister_netdevice_many(&single);
44a0873d 10841 }
1da177e4 10842}
44a0873d 10843EXPORT_SYMBOL(unregister_netdevice_queue);
1da177e4 10844
9b5e383c
ED
10845/**
10846 * unregister_netdevice_many - unregister many devices
10847 * @head: list of devices
87757a91
ED
10848 *
10849 * Note: As most callers use a stack allocated list_head,
10850 * we force a list_del() to make sure stack wont be corrupted later.
9b5e383c
ED
10851 */
10852void unregister_netdevice_many(struct list_head *head)
bcfe2f1a
JK
10853{
10854 struct net_device *dev, *tmp;
10855 LIST_HEAD(close_head);
10856
10857 BUG_ON(dev_boot_phase);
10858 ASSERT_RTNL();
10859
0cbe1e57
JK
10860 if (list_empty(head))
10861 return;
10862
bcfe2f1a
JK
10863 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
10864 /* Some devices call without registering
10865 * for initialization unwind. Remove those
10866 * devices and proceed with the remaining.
10867 */
10868 if (dev->reg_state == NETREG_UNINITIALIZED) {
10869 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
10870 dev->name, dev);
10871
10872 WARN_ON(1);
10873 list_del(&dev->unreg_list);
10874 continue;
10875 }
10876 dev->dismantle = true;
10877 BUG_ON(dev->reg_state != NETREG_REGISTERED);
10878 }
10879
10880 /* If device is running, close it first. */
10881 list_for_each_entry(dev, head, unreg_list)
10882 list_add_tail(&dev->close_list, &close_head);
10883 dev_close_many(&close_head, true);
10884
10885 list_for_each_entry(dev, head, unreg_list) {
10886 /* And unlink it from device chain. */
10887 unlist_netdevice(dev);
10888
10889 dev->reg_state = NETREG_UNREGISTERING;
10890 }
10891 flush_all_backlogs();
10892
10893 synchronize_net();
10894
10895 list_for_each_entry(dev, head, unreg_list) {
10896 struct sk_buff *skb = NULL;
10897
10898 /* Shutdown queueing discipline. */
10899 dev_shutdown(dev);
10900
10901 dev_xdp_uninstall(dev);
10902
10903 /* Notify protocols, that we are about to destroy
10904 * this device. They should clean all the things.
10905 */
10906 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
10907
10908 if (!dev->rtnl_link_ops ||
10909 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
10910 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U, 0,
10911 GFP_KERNEL, NULL, 0);
10912
10913 /*
10914 * Flush the unicast and multicast chains
10915 */
10916 dev_uc_flush(dev);
10917 dev_mc_flush(dev);
10918
10919 netdev_name_node_alt_flush(dev);
10920 netdev_name_node_free(dev->name_node);
10921
10922 if (dev->netdev_ops->ndo_uninit)
10923 dev->netdev_ops->ndo_uninit(dev);
10924
10925 if (skb)
10926 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
10927
10928 /* Notifier chain MUST detach us all upper devices. */
10929 WARN_ON(netdev_has_any_upper_dev(dev));
10930 WARN_ON(netdev_has_any_lower_dev(dev));
10931
10932 /* Remove entries from kobject tree */
10933 netdev_unregister_kobject(dev);
10934#ifdef CONFIG_XPS
10935 /* Remove XPS queueing entries */
10936 netif_reset_xps_queues_gt(dev, 0);
10937#endif
10938 }
10939
10940 synchronize_net();
10941
10942 list_for_each_entry(dev, head, unreg_list) {
10943 dev_put(dev);
10944 net_set_todo(dev);
10945 }
0cbe1e57
JK
10946
10947 list_del(head);
bcfe2f1a 10948}
0cbe1e57 10949EXPORT_SYMBOL(unregister_netdevice_many);
bcfe2f1a 10950
1da177e4
LT
10951/**
10952 * unregister_netdev - remove device from the kernel
10953 * @dev: device
10954 *
10955 * This function shuts down a device interface and removes it
d59b54b1 10956 * from the kernel tables.
1da177e4
LT
10957 *
10958 * This is just a wrapper for unregister_netdevice that takes
10959 * the rtnl semaphore. In general you want to use this and not
10960 * unregister_netdevice.
10961 */
10962void unregister_netdev(struct net_device *dev)
10963{
10964 rtnl_lock();
10965 unregister_netdevice(dev);
10966 rtnl_unlock();
10967}
1da177e4
LT
10968EXPORT_SYMBOL(unregister_netdev);
10969
ce286d32
EB
10970/**
10971 * dev_change_net_namespace - move device to different nethost namespace
10972 * @dev: device
10973 * @net: network namespace
10974 * @pat: If not NULL name pattern to try if the current device name
10975 * is already taken in the destination network namespace.
10976 *
10977 * This function shuts down a device interface and moves it
10978 * to a new network namespace. On success 0 is returned, on
10979 * a failure a netagive errno code is returned.
10980 *
10981 * Callers must hold the rtnl semaphore.
10982 */
10983
10984int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
10985{
ef6a4c88 10986 struct net *net_old = dev_net(dev);
38e01b30 10987 int err, new_nsid, new_ifindex;
ce286d32
EB
10988
10989 ASSERT_RTNL();
10990
10991 /* Don't allow namespace local devices to be moved. */
10992 err = -EINVAL;
10993 if (dev->features & NETIF_F_NETNS_LOCAL)
10994 goto out;
10995
10996 /* Ensure the device has been registrered */
ce286d32
EB
10997 if (dev->reg_state != NETREG_REGISTERED)
10998 goto out;
10999
11000 /* Get out if there is nothing todo */
11001 err = 0;
ef6a4c88 11002 if (net_eq(net_old, net))
ce286d32
EB
11003 goto out;
11004
11005 /* Pick the destination device name, and ensure
11006 * we can use it in the destination network namespace.
11007 */
11008 err = -EEXIST;
d9031024 11009 if (__dev_get_by_name(net, dev->name)) {
ce286d32
EB
11010 /* We get here if we can't use the current device name */
11011 if (!pat)
11012 goto out;
7892bd08
LR
11013 err = dev_get_valid_name(net, dev, pat);
11014 if (err < 0)
ce286d32
EB
11015 goto out;
11016 }
11017
11018 /*
11019 * And now a mini version of register_netdevice unregister_netdevice.
11020 */
11021
11022 /* If device is running close it first. */
9b772652 11023 dev_close(dev);
ce286d32
EB
11024
11025 /* And unlink it from device chain */
ce286d32
EB
11026 unlist_netdevice(dev);
11027
11028 synchronize_net();
11029
11030 /* Shutdown queueing discipline. */
11031 dev_shutdown(dev);
11032
11033 /* Notify protocols, that we are about to destroy
eb13da1a 11034 * this device. They should clean all the things.
11035 *
11036 * Note that dev->reg_state stays at NETREG_REGISTERED.
11037 * This is wanted because this way 8021q and macvlan know
11038 * the device is just moving and can keep their slaves up.
11039 */
ce286d32 11040 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6549dd43 11041 rcu_barrier();
38e01b30 11042
d4e4fdf9 11043 new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL);
38e01b30
ND
11044 /* If there is an ifindex conflict assign a new one */
11045 if (__dev_get_by_index(net, dev->ifindex))
11046 new_ifindex = dev_new_index(net);
11047 else
11048 new_ifindex = dev->ifindex;
11049
11050 rtmsg_ifinfo_newnet(RTM_DELLINK, dev, ~0U, GFP_KERNEL, &new_nsid,
11051 new_ifindex);
ce286d32
EB
11052
11053 /*
11054 * Flush the unicast and multicast chains
11055 */
a748ee24 11056 dev_uc_flush(dev);
22bedad3 11057 dev_mc_flush(dev);
ce286d32 11058
4e66ae2e
SH
11059 /* Send a netdev-removed uevent to the old namespace */
11060 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
4c75431a 11061 netdev_adjacent_del_links(dev);
4e66ae2e 11062
93642e14
JP
11063 /* Move per-net netdevice notifiers that are following the netdevice */
11064 move_netdevice_notifiers_dev_net(dev, net);
11065
ce286d32 11066 /* Actually switch the network namespace */
c346dca1 11067 dev_net_set(dev, net);
38e01b30 11068 dev->ifindex = new_ifindex;
ce286d32 11069
4e66ae2e
SH
11070 /* Send a netdev-add uevent to the new namespace */
11071 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
4c75431a 11072 netdev_adjacent_add_links(dev);
4e66ae2e 11073
8b41d188 11074 /* Fixup kobjects */
a1b3f594 11075 err = device_rename(&dev->dev, dev->name);
8b41d188 11076 WARN_ON(err);
ce286d32 11077
ef6a4c88
CB
11078 /* Adapt owner in case owning user namespace of target network
11079 * namespace is different from the original one.
11080 */
11081 err = netdev_change_owner(dev, net_old, net);
11082 WARN_ON(err);
11083
ce286d32
EB
11084 /* Add the device back in the hashes */
11085 list_netdevice(dev);
11086
11087 /* Notify protocols, that a new device appeared. */
11088 call_netdevice_notifiers(NETDEV_REGISTER, dev);
11089
d90a909e
EB
11090 /*
11091 * Prevent userspace races by waiting until the network
11092 * device is fully setup before sending notifications.
11093 */
7f294054 11094 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
d90a909e 11095
ce286d32
EB
11096 synchronize_net();
11097 err = 0;
11098out:
11099 return err;
11100}
463d0183 11101EXPORT_SYMBOL_GPL(dev_change_net_namespace);
ce286d32 11102
f0bf90de 11103static int dev_cpu_dead(unsigned int oldcpu)
1da177e4
LT
11104{
11105 struct sk_buff **list_skb;
1da177e4 11106 struct sk_buff *skb;
f0bf90de 11107 unsigned int cpu;
97d8b6e3 11108 struct softnet_data *sd, *oldsd, *remsd = NULL;
1da177e4 11109
1da177e4
LT
11110 local_irq_disable();
11111 cpu = smp_processor_id();
11112 sd = &per_cpu(softnet_data, cpu);
11113 oldsd = &per_cpu(softnet_data, oldcpu);
11114
11115 /* Find end of our completion_queue. */
11116 list_skb = &sd->completion_queue;
11117 while (*list_skb)
11118 list_skb = &(*list_skb)->next;
11119 /* Append completion queue from offline CPU. */
11120 *list_skb = oldsd->completion_queue;
11121 oldsd->completion_queue = NULL;
11122
1da177e4 11123 /* Append output queue from offline CPU. */
a9cbd588
CG
11124 if (oldsd->output_queue) {
11125 *sd->output_queue_tailp = oldsd->output_queue;
11126 sd->output_queue_tailp = oldsd->output_queue_tailp;
11127 oldsd->output_queue = NULL;
11128 oldsd->output_queue_tailp = &oldsd->output_queue;
11129 }
ac64da0b
ED
11130 /* Append NAPI poll list from offline CPU, with one exception :
11131 * process_backlog() must be called by cpu owning percpu backlog.
11132 * We properly handle process_queue & input_pkt_queue later.
11133 */
11134 while (!list_empty(&oldsd->poll_list)) {
11135 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
11136 struct napi_struct,
11137 poll_list);
11138
11139 list_del_init(&napi->poll_list);
11140 if (napi->poll == process_backlog)
11141 napi->state = 0;
11142 else
11143 ____napi_schedule(sd, napi);
264524d5 11144 }
1da177e4
LT
11145
11146 raise_softirq_irqoff(NET_TX_SOFTIRQ);
11147 local_irq_enable();
11148
773fc8f6 11149#ifdef CONFIG_RPS
11150 remsd = oldsd->rps_ipi_list;
11151 oldsd->rps_ipi_list = NULL;
11152#endif
11153 /* send out pending IPI's on offline CPU */
11154 net_rps_send_ipi(remsd);
11155
1da177e4 11156 /* Process offline CPU's input_pkt_queue */
76cc8b13 11157 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
91e83133 11158 netif_rx_ni(skb);
76cc8b13 11159 input_queue_head_incr(oldsd);
fec5e652 11160 }
ac64da0b 11161 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
91e83133 11162 netif_rx_ni(skb);
76cc8b13
TH
11163 input_queue_head_incr(oldsd);
11164 }
1da177e4 11165
f0bf90de 11166 return 0;
1da177e4 11167}
1da177e4 11168
7f353bf2 11169/**
b63365a2
HX
11170 * netdev_increment_features - increment feature set by one
11171 * @all: current feature set
11172 * @one: new feature set
11173 * @mask: mask feature set
7f353bf2
HX
11174 *
11175 * Computes a new feature set after adding a device with feature set
b63365a2
HX
11176 * @one to the master device with current feature set @all. Will not
11177 * enable anything that is off in @mask. Returns the new feature set.
7f353bf2 11178 */
c8f44aff
MM
11179netdev_features_t netdev_increment_features(netdev_features_t all,
11180 netdev_features_t one, netdev_features_t mask)
b63365a2 11181{
c8cd0989 11182 if (mask & NETIF_F_HW_CSUM)
a188222b 11183 mask |= NETIF_F_CSUM_MASK;
1742f183 11184 mask |= NETIF_F_VLAN_CHALLENGED;
7f353bf2 11185
a188222b 11186 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
1742f183 11187 all &= one | ~NETIF_F_ALL_FOR_ALL;
c6e1a0d1 11188
1742f183 11189 /* If one device supports hw checksumming, set for all. */
c8cd0989
TH
11190 if (all & NETIF_F_HW_CSUM)
11191 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
7f353bf2
HX
11192
11193 return all;
11194}
b63365a2 11195EXPORT_SYMBOL(netdev_increment_features);
7f353bf2 11196
430f03cd 11197static struct hlist_head * __net_init netdev_create_hash(void)
30d97d35
PE
11198{
11199 int i;
11200 struct hlist_head *hash;
11201
6da2ec56 11202 hash = kmalloc_array(NETDEV_HASHENTRIES, sizeof(*hash), GFP_KERNEL);
30d97d35
PE
11203 if (hash != NULL)
11204 for (i = 0; i < NETDEV_HASHENTRIES; i++)
11205 INIT_HLIST_HEAD(&hash[i]);
11206
11207 return hash;
11208}
11209
881d966b 11210/* Initialize per network namespace state */
4665079c 11211static int __net_init netdev_init(struct net *net)
881d966b 11212{
d9f37d01 11213 BUILD_BUG_ON(GRO_HASH_BUCKETS >
c593642c 11214 8 * sizeof_field(struct napi_struct, gro_bitmask));
d9f37d01 11215
734b6541
RM
11216 if (net != &init_net)
11217 INIT_LIST_HEAD(&net->dev_base_head);
881d966b 11218
30d97d35
PE
11219 net->dev_name_head = netdev_create_hash();
11220 if (net->dev_name_head == NULL)
11221 goto err_name;
881d966b 11222
30d97d35
PE
11223 net->dev_index_head = netdev_create_hash();
11224 if (net->dev_index_head == NULL)
11225 goto err_idx;
881d966b 11226
a30c7b42
JP
11227 RAW_INIT_NOTIFIER_HEAD(&net->netdev_chain);
11228
881d966b 11229 return 0;
30d97d35
PE
11230
11231err_idx:
11232 kfree(net->dev_name_head);
11233err_name:
11234 return -ENOMEM;
881d966b
EB
11235}
11236
f0db275a
SH
11237/**
11238 * netdev_drivername - network driver for the device
11239 * @dev: network device
f0db275a
SH
11240 *
11241 * Determine network driver for device.
11242 */
3019de12 11243const char *netdev_drivername(const struct net_device *dev)
6579e57b 11244{
cf04a4c7
SH
11245 const struct device_driver *driver;
11246 const struct device *parent;
3019de12 11247 const char *empty = "";
6579e57b
AV
11248
11249 parent = dev->dev.parent;
6579e57b 11250 if (!parent)
3019de12 11251 return empty;
6579e57b
AV
11252
11253 driver = parent->driver;
11254 if (driver && driver->name)
3019de12
DM
11255 return driver->name;
11256 return empty;
6579e57b
AV
11257}
11258
6ea754eb
JP
11259static void __netdev_printk(const char *level, const struct net_device *dev,
11260 struct va_format *vaf)
256df2f3 11261{
b004ff49 11262 if (dev && dev->dev.parent) {
6ea754eb
JP
11263 dev_printk_emit(level[1] - '0',
11264 dev->dev.parent,
11265 "%s %s %s%s: %pV",
11266 dev_driver_string(dev->dev.parent),
11267 dev_name(dev->dev.parent),
11268 netdev_name(dev), netdev_reg_state(dev),
11269 vaf);
b004ff49 11270 } else if (dev) {
6ea754eb
JP
11271 printk("%s%s%s: %pV",
11272 level, netdev_name(dev), netdev_reg_state(dev), vaf);
b004ff49 11273 } else {
6ea754eb 11274 printk("%s(NULL net_device): %pV", level, vaf);
b004ff49 11275 }
256df2f3
JP
11276}
11277
6ea754eb
JP
11278void netdev_printk(const char *level, const struct net_device *dev,
11279 const char *format, ...)
256df2f3
JP
11280{
11281 struct va_format vaf;
11282 va_list args;
256df2f3
JP
11283
11284 va_start(args, format);
11285
11286 vaf.fmt = format;
11287 vaf.va = &args;
11288
6ea754eb 11289 __netdev_printk(level, dev, &vaf);
b004ff49 11290
256df2f3 11291 va_end(args);
256df2f3
JP
11292}
11293EXPORT_SYMBOL(netdev_printk);
11294
11295#define define_netdev_printk_level(func, level) \
6ea754eb 11296void func(const struct net_device *dev, const char *fmt, ...) \
256df2f3 11297{ \
256df2f3
JP
11298 struct va_format vaf; \
11299 va_list args; \
11300 \
11301 va_start(args, fmt); \
11302 \
11303 vaf.fmt = fmt; \
11304 vaf.va = &args; \
11305 \
6ea754eb 11306 __netdev_printk(level, dev, &vaf); \
b004ff49 11307 \
256df2f3 11308 va_end(args); \
256df2f3
JP
11309} \
11310EXPORT_SYMBOL(func);
11311
11312define_netdev_printk_level(netdev_emerg, KERN_EMERG);
11313define_netdev_printk_level(netdev_alert, KERN_ALERT);
11314define_netdev_printk_level(netdev_crit, KERN_CRIT);
11315define_netdev_printk_level(netdev_err, KERN_ERR);
11316define_netdev_printk_level(netdev_warn, KERN_WARNING);
11317define_netdev_printk_level(netdev_notice, KERN_NOTICE);
11318define_netdev_printk_level(netdev_info, KERN_INFO);
11319
4665079c 11320static void __net_exit netdev_exit(struct net *net)
881d966b
EB
11321{
11322 kfree(net->dev_name_head);
11323 kfree(net->dev_index_head);
ee21b18b
VA
11324 if (net != &init_net)
11325 WARN_ON_ONCE(!list_empty(&net->dev_base_head));
881d966b
EB
11326}
11327
022cbae6 11328static struct pernet_operations __net_initdata netdev_net_ops = {
881d966b
EB
11329 .init = netdev_init,
11330 .exit = netdev_exit,
11331};
11332
4665079c 11333static void __net_exit default_device_exit(struct net *net)
ce286d32 11334{
e008b5fc 11335 struct net_device *dev, *aux;
ce286d32 11336 /*
e008b5fc 11337 * Push all migratable network devices back to the
ce286d32
EB
11338 * initial network namespace
11339 */
11340 rtnl_lock();
e008b5fc 11341 for_each_netdev_safe(net, dev, aux) {
ce286d32 11342 int err;
aca51397 11343 char fb_name[IFNAMSIZ];
ce286d32
EB
11344
11345 /* Ignore unmoveable devices (i.e. loopback) */
11346 if (dev->features & NETIF_F_NETNS_LOCAL)
11347 continue;
11348
e008b5fc
EB
11349 /* Leave virtual devices for the generic cleanup */
11350 if (dev->rtnl_link_ops)
11351 continue;
d0c082ce 11352
25985edc 11353 /* Push remaining network devices to init_net */
aca51397 11354 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
55b40dbf
JP
11355 if (__dev_get_by_name(&init_net, fb_name))
11356 snprintf(fb_name, IFNAMSIZ, "dev%%d");
aca51397 11357 err = dev_change_net_namespace(dev, &init_net, fb_name);
ce286d32 11358 if (err) {
7b6cd1ce
JP
11359 pr_emerg("%s: failed to move %s to init_net: %d\n",
11360 __func__, dev->name, err);
aca51397 11361 BUG();
ce286d32
EB
11362 }
11363 }
11364 rtnl_unlock();
11365}
11366
50624c93
EB
11367static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
11368{
11369 /* Return with the rtnl_lock held when there are no network
11370 * devices unregistering in any network namespace in net_list.
11371 */
11372 struct net *net;
11373 bool unregistering;
ff960a73 11374 DEFINE_WAIT_FUNC(wait, woken_wake_function);
50624c93 11375
ff960a73 11376 add_wait_queue(&netdev_unregistering_wq, &wait);
50624c93 11377 for (;;) {
50624c93
EB
11378 unregistering = false;
11379 rtnl_lock();
11380 list_for_each_entry(net, net_list, exit_list) {
11381 if (net->dev_unreg_count > 0) {
11382 unregistering = true;
11383 break;
11384 }
11385 }
11386 if (!unregistering)
11387 break;
11388 __rtnl_unlock();
ff960a73
PZ
11389
11390 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
50624c93 11391 }
ff960a73 11392 remove_wait_queue(&netdev_unregistering_wq, &wait);
50624c93
EB
11393}
11394
04dc7f6b
EB
11395static void __net_exit default_device_exit_batch(struct list_head *net_list)
11396{
11397 /* At exit all network devices most be removed from a network
b595076a 11398 * namespace. Do this in the reverse order of registration.
04dc7f6b
EB
11399 * Do this across as many network namespaces as possible to
11400 * improve batching efficiency.
11401 */
11402 struct net_device *dev;
11403 struct net *net;
11404 LIST_HEAD(dev_kill_list);
11405
50624c93
EB
11406 /* To prevent network device cleanup code from dereferencing
11407 * loopback devices or network devices that have been freed
11408 * wait here for all pending unregistrations to complete,
11409 * before unregistring the loopback device and allowing the
11410 * network namespace be freed.
11411 *
11412 * The netdev todo list containing all network devices
11413 * unregistrations that happen in default_device_exit_batch
11414 * will run in the rtnl_unlock() at the end of
11415 * default_device_exit_batch.
11416 */
11417 rtnl_lock_unregistering(net_list);
04dc7f6b
EB
11418 list_for_each_entry(net, net_list, exit_list) {
11419 for_each_netdev_reverse(net, dev) {
b0ab2fab 11420 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
04dc7f6b
EB
11421 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
11422 else
11423 unregister_netdevice_queue(dev, &dev_kill_list);
11424 }
11425 }
11426 unregister_netdevice_many(&dev_kill_list);
11427 rtnl_unlock();
11428}
11429
022cbae6 11430static struct pernet_operations __net_initdata default_device_ops = {
ce286d32 11431 .exit = default_device_exit,
04dc7f6b 11432 .exit_batch = default_device_exit_batch,
ce286d32
EB
11433};
11434
1da177e4
LT
11435/*
11436 * Initialize the DEV module. At boot time this walks the device list and
11437 * unhooks any devices that fail to initialise (normally hardware not
11438 * present) and leaves us with a valid list of present and active devices.
11439 *
11440 */
11441
11442/*
11443 * This is called single threaded during boot, so no need
11444 * to take the rtnl semaphore.
11445 */
11446static int __init net_dev_init(void)
11447{
11448 int i, rc = -ENOMEM;
11449
11450 BUG_ON(!dev_boot_phase);
11451
1da177e4
LT
11452 if (dev_proc_init())
11453 goto out;
11454
8b41d188 11455 if (netdev_kobject_init())
1da177e4
LT
11456 goto out;
11457
11458 INIT_LIST_HEAD(&ptype_all);
82d8a867 11459 for (i = 0; i < PTYPE_HASH_SIZE; i++)
1da177e4
LT
11460 INIT_LIST_HEAD(&ptype_base[i]);
11461
62532da9
VY
11462 INIT_LIST_HEAD(&offload_base);
11463
881d966b
EB
11464 if (register_pernet_subsys(&netdev_net_ops))
11465 goto out;
1da177e4
LT
11466
11467 /*
11468 * Initialise the packet receive queues.
11469 */
11470
6f912042 11471 for_each_possible_cpu(i) {
41852497 11472 struct work_struct *flush = per_cpu_ptr(&flush_works, i);
e36fa2f7 11473 struct softnet_data *sd = &per_cpu(softnet_data, i);
1da177e4 11474
41852497
ED
11475 INIT_WORK(flush, flush_backlog);
11476
e36fa2f7 11477 skb_queue_head_init(&sd->input_pkt_queue);
6e7676c1 11478 skb_queue_head_init(&sd->process_queue);
f53c7239
SK
11479#ifdef CONFIG_XFRM_OFFLOAD
11480 skb_queue_head_init(&sd->xfrm_backlog);
11481#endif
e36fa2f7 11482 INIT_LIST_HEAD(&sd->poll_list);
a9cbd588 11483 sd->output_queue_tailp = &sd->output_queue;
df334545 11484#ifdef CONFIG_RPS
545b8c8d 11485 INIT_CSD(&sd->csd, rps_trigger_softirq, sd);
e36fa2f7 11486 sd->cpu = i;
1e94d72f 11487#endif
0a9627f2 11488
7c4ec749 11489 init_gro_hash(&sd->backlog);
e36fa2f7
ED
11490 sd->backlog.poll = process_backlog;
11491 sd->backlog.weight = weight_p;
1da177e4
LT
11492 }
11493
1da177e4
LT
11494 dev_boot_phase = 0;
11495
505d4f73
EB
11496 /* The loopback device is special if any other network devices
11497 * is present in a network namespace the loopback device must
11498 * be present. Since we now dynamically allocate and free the
11499 * loopback device ensure this invariant is maintained by
11500 * keeping the loopback device as the first device on the
11501 * list of network devices. Ensuring the loopback devices
11502 * is the first device that appears and the last network device
11503 * that disappears.
11504 */
11505 if (register_pernet_device(&loopback_net_ops))
11506 goto out;
11507
11508 if (register_pernet_device(&default_device_ops))
11509 goto out;
11510
962cf36c
CM
11511 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
11512 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
1da177e4 11513
f0bf90de
SAS
11514 rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
11515 NULL, dev_cpu_dead);
11516 WARN_ON(rc < 0);
1da177e4
LT
11517 rc = 0;
11518out:
11519 return rc;
11520}
11521
11522subsys_initcall(net_dev_init);