net: bulk free infrastructure for NAPI context, use napi_consume_skb
[linux-2.6-block.git] / net / core / dev.c
CommitLineData
1da177e4
LT
1/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
1da177e4 76#include <linux/bitops.h>
4fc268d2 77#include <linux/capability.h>
1da177e4
LT
78#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
08e9897d 81#include <linux/hash.h>
5a0e3ad6 82#include <linux/slab.h>
1da177e4 83#include <linux/sched.h>
4a3e2f71 84#include <linux/mutex.h>
1da177e4
LT
85#include <linux/string.h>
86#include <linux/mm.h>
87#include <linux/socket.h>
88#include <linux/sockios.h>
89#include <linux/errno.h>
90#include <linux/interrupt.h>
91#include <linux/if_ether.h>
92#include <linux/netdevice.h>
93#include <linux/etherdevice.h>
0187bdfb 94#include <linux/ethtool.h>
1da177e4
LT
95#include <linux/notifier.h>
96#include <linux/skbuff.h>
457c4cbc 97#include <net/net_namespace.h>
1da177e4 98#include <net/sock.h>
02d62e86 99#include <net/busy_poll.h>
1da177e4 100#include <linux/rtnetlink.h>
1da177e4 101#include <linux/stat.h>
1da177e4 102#include <net/dst.h>
fc4099f1 103#include <net/dst_metadata.h>
1da177e4
LT
104#include <net/pkt_sched.h>
105#include <net/checksum.h>
44540960 106#include <net/xfrm.h>
1da177e4
LT
107#include <linux/highmem.h>
108#include <linux/init.h>
1da177e4 109#include <linux/module.h>
1da177e4
LT
110#include <linux/netpoll.h>
111#include <linux/rcupdate.h>
112#include <linux/delay.h>
1da177e4 113#include <net/iw_handler.h>
1da177e4 114#include <asm/current.h>
5bdb9886 115#include <linux/audit.h>
db217334 116#include <linux/dmaengine.h>
f6a78bfc 117#include <linux/err.h>
c7fa9d18 118#include <linux/ctype.h>
723e98b7 119#include <linux/if_arp.h>
6de329e2 120#include <linux/if_vlan.h>
8f0f2223 121#include <linux/ip.h>
ad55dcaf 122#include <net/ip.h>
25cd9ba0 123#include <net/mpls.h>
8f0f2223
DM
124#include <linux/ipv6.h>
125#include <linux/in.h>
b6b2fed1
DM
126#include <linux/jhash.h>
127#include <linux/random.h>
9cbc1cb8 128#include <trace/events/napi.h>
cf66ba58 129#include <trace/events/net.h>
07dc22e7 130#include <trace/events/skb.h>
5acbbd42 131#include <linux/pci.h>
caeda9b9 132#include <linux/inetdevice.h>
c445477d 133#include <linux/cpu_rmap.h>
c5905afb 134#include <linux/static_key.h>
af12fa6e 135#include <linux/hashtable.h>
60877a32 136#include <linux/vmalloc.h>
529d0489 137#include <linux/if_macvlan.h>
e7fd2885 138#include <linux/errqueue.h>
3b47d303 139#include <linux/hrtimer.h>
e687ad60 140#include <linux/netfilter_ingress.h>
6ae23ad3 141#include <linux/sctp.h>
1da177e4 142
342709ef
PE
143#include "net-sysfs.h"
144
d565b0a1
HX
145/* Instead of increasing this, you should create a hash table. */
146#define MAX_GRO_SKBS 8
147
5d38a079
HX
148/* This should be increased if a protocol with a bigger head is added. */
149#define GRO_MAX_HEAD (MAX_HEADER + 128)
150
1da177e4 151static DEFINE_SPINLOCK(ptype_lock);
62532da9 152static DEFINE_SPINLOCK(offload_lock);
900ff8c6
CW
153struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
154struct list_head ptype_all __read_mostly; /* Taps */
62532da9 155static struct list_head offload_base __read_mostly;
1da177e4 156
ae78dbfa 157static int netif_rx_internal(struct sk_buff *skb);
54951194
LP
158static int call_netdevice_notifiers_info(unsigned long val,
159 struct net_device *dev,
160 struct netdev_notifier_info *info);
ae78dbfa 161
1da177e4 162/*
7562f876 163 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
1da177e4
LT
164 * semaphore.
165 *
c6d14c84 166 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
1da177e4
LT
167 *
168 * Writers must hold the rtnl semaphore while they loop through the
7562f876 169 * dev_base_head list, and hold dev_base_lock for writing when they do the
1da177e4
LT
170 * actual updates. This allows pure readers to access the list even
171 * while a writer is preparing to update it.
172 *
173 * To put it another way, dev_base_lock is held for writing only to
174 * protect against pure readers; the rtnl semaphore provides the
175 * protection against other writers.
176 *
177 * See, for example usages, register_netdevice() and
178 * unregister_netdevice(), which must be called with the rtnl
179 * semaphore held.
180 */
1da177e4 181DEFINE_RWLOCK(dev_base_lock);
1da177e4
LT
182EXPORT_SYMBOL(dev_base_lock);
183
af12fa6e
ET
184/* protects napi_hash addition/deletion and napi_gen_id */
185static DEFINE_SPINLOCK(napi_hash_lock);
186
52bd2d62 187static unsigned int napi_gen_id = NR_CPUS;
6180d9de 188static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
af12fa6e 189
18afa4b0 190static seqcount_t devnet_rename_seq;
c91f6df2 191
4e985ada
TG
192static inline void dev_base_seq_inc(struct net *net)
193{
194 while (++net->dev_base_seq == 0);
195}
196
881d966b 197static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
1da177e4 198{
95c96174
ED
199 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
200
08e9897d 201 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
1da177e4
LT
202}
203
881d966b 204static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
1da177e4 205{
7c28bd0b 206 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
1da177e4
LT
207}
208
e36fa2f7 209static inline void rps_lock(struct softnet_data *sd)
152102c7
CG
210{
211#ifdef CONFIG_RPS
e36fa2f7 212 spin_lock(&sd->input_pkt_queue.lock);
152102c7
CG
213#endif
214}
215
e36fa2f7 216static inline void rps_unlock(struct softnet_data *sd)
152102c7
CG
217{
218#ifdef CONFIG_RPS
e36fa2f7 219 spin_unlock(&sd->input_pkt_queue.lock);
152102c7
CG
220#endif
221}
222
ce286d32 223/* Device list insertion */
53759be9 224static void list_netdevice(struct net_device *dev)
ce286d32 225{
c346dca1 226 struct net *net = dev_net(dev);
ce286d32
EB
227
228 ASSERT_RTNL();
229
230 write_lock_bh(&dev_base_lock);
c6d14c84 231 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
72c9528b 232 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
fb699dfd
ED
233 hlist_add_head_rcu(&dev->index_hlist,
234 dev_index_hash(net, dev->ifindex));
ce286d32 235 write_unlock_bh(&dev_base_lock);
4e985ada
TG
236
237 dev_base_seq_inc(net);
ce286d32
EB
238}
239
fb699dfd
ED
240/* Device list removal
241 * caller must respect a RCU grace period before freeing/reusing dev
242 */
ce286d32
EB
243static void unlist_netdevice(struct net_device *dev)
244{
245 ASSERT_RTNL();
246
247 /* Unlink dev from the device chain */
248 write_lock_bh(&dev_base_lock);
c6d14c84 249 list_del_rcu(&dev->dev_list);
72c9528b 250 hlist_del_rcu(&dev->name_hlist);
fb699dfd 251 hlist_del_rcu(&dev->index_hlist);
ce286d32 252 write_unlock_bh(&dev_base_lock);
4e985ada
TG
253
254 dev_base_seq_inc(dev_net(dev));
ce286d32
EB
255}
256
1da177e4
LT
257/*
258 * Our notifier list
259 */
260
f07d5b94 261static RAW_NOTIFIER_HEAD(netdev_chain);
1da177e4
LT
262
263/*
264 * Device drivers call our routines to queue packets here. We empty the
265 * queue in the local softnet handler.
266 */
bea3348e 267
9958da05 268DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
d1b19dff 269EXPORT_PER_CPU_SYMBOL(softnet_data);
1da177e4 270
cf508b12 271#ifdef CONFIG_LOCKDEP
723e98b7 272/*
c773e847 273 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
723e98b7
JP
274 * according to dev->type
275 */
276static const unsigned short netdev_lock_type[] =
277 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
278 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
279 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
280 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
281 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
282 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
283 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
284 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
285 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
286 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
287 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
288 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
211ed865
PG
289 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
290 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
291 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
723e98b7 292
36cbd3dc 293static const char *const netdev_lock_name[] =
723e98b7
JP
294 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
295 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
296 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
297 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
298 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
299 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
300 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
301 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
302 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
303 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
304 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
305 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
211ed865
PG
306 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
307 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
308 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
723e98b7
JP
309
310static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
cf508b12 311static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
723e98b7
JP
312
313static inline unsigned short netdev_lock_pos(unsigned short dev_type)
314{
315 int i;
316
317 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
318 if (netdev_lock_type[i] == dev_type)
319 return i;
320 /* the last key is used by default */
321 return ARRAY_SIZE(netdev_lock_type) - 1;
322}
323
cf508b12
DM
324static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
325 unsigned short dev_type)
723e98b7
JP
326{
327 int i;
328
329 i = netdev_lock_pos(dev_type);
330 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
331 netdev_lock_name[i]);
332}
cf508b12
DM
333
334static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
335{
336 int i;
337
338 i = netdev_lock_pos(dev->type);
339 lockdep_set_class_and_name(&dev->addr_list_lock,
340 &netdev_addr_lock_key[i],
341 netdev_lock_name[i]);
342}
723e98b7 343#else
cf508b12
DM
344static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
345 unsigned short dev_type)
346{
347}
348static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
723e98b7
JP
349{
350}
351#endif
1da177e4
LT
352
353/*******************************************************************************
354
355 Protocol management and registration routines
356
357*******************************************************************************/
358
1da177e4
LT
359/*
360 * Add a protocol ID to the list. Now that the input handler is
361 * smarter we can dispense with all the messy stuff that used to be
362 * here.
363 *
364 * BEWARE!!! Protocol handlers, mangling input packets,
365 * MUST BE last in hash buckets and checking protocol handlers
366 * MUST start from promiscuous ptype_all chain in net_bh.
367 * It is true now, do not change it.
368 * Explanation follows: if protocol handler, mangling packet, will
369 * be the first on list, it is not able to sense, that packet
370 * is cloned and should be copied-on-write, so that it will
371 * change it and subsequent readers will get broken packet.
372 * --ANK (980803)
373 */
374
c07b68e8
ED
375static inline struct list_head *ptype_head(const struct packet_type *pt)
376{
377 if (pt->type == htons(ETH_P_ALL))
7866a621 378 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
c07b68e8 379 else
7866a621
SN
380 return pt->dev ? &pt->dev->ptype_specific :
381 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
c07b68e8
ED
382}
383
1da177e4
LT
384/**
385 * dev_add_pack - add packet handler
386 * @pt: packet type declaration
387 *
388 * Add a protocol handler to the networking stack. The passed &packet_type
389 * is linked into kernel lists and may not be freed until it has been
390 * removed from the kernel lists.
391 *
4ec93edb 392 * This call does not sleep therefore it can not
1da177e4
LT
393 * guarantee all CPU's that are in middle of receiving packets
394 * will see the new packet type (until the next received packet).
395 */
396
397void dev_add_pack(struct packet_type *pt)
398{
c07b68e8 399 struct list_head *head = ptype_head(pt);
1da177e4 400
c07b68e8
ED
401 spin_lock(&ptype_lock);
402 list_add_rcu(&pt->list, head);
403 spin_unlock(&ptype_lock);
1da177e4 404}
d1b19dff 405EXPORT_SYMBOL(dev_add_pack);
1da177e4 406
1da177e4
LT
407/**
408 * __dev_remove_pack - remove packet handler
409 * @pt: packet type declaration
410 *
411 * Remove a protocol handler that was previously added to the kernel
412 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
413 * from the kernel lists and can be freed or reused once this function
4ec93edb 414 * returns.
1da177e4
LT
415 *
416 * The packet type might still be in use by receivers
417 * and must not be freed until after all the CPU's have gone
418 * through a quiescent state.
419 */
420void __dev_remove_pack(struct packet_type *pt)
421{
c07b68e8 422 struct list_head *head = ptype_head(pt);
1da177e4
LT
423 struct packet_type *pt1;
424
c07b68e8 425 spin_lock(&ptype_lock);
1da177e4
LT
426
427 list_for_each_entry(pt1, head, list) {
428 if (pt == pt1) {
429 list_del_rcu(&pt->list);
430 goto out;
431 }
432 }
433
7b6cd1ce 434 pr_warn("dev_remove_pack: %p not found\n", pt);
1da177e4 435out:
c07b68e8 436 spin_unlock(&ptype_lock);
1da177e4 437}
d1b19dff
ED
438EXPORT_SYMBOL(__dev_remove_pack);
439
1da177e4
LT
440/**
441 * dev_remove_pack - remove packet handler
442 * @pt: packet type declaration
443 *
444 * Remove a protocol handler that was previously added to the kernel
445 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
446 * from the kernel lists and can be freed or reused once this function
447 * returns.
448 *
449 * This call sleeps to guarantee that no CPU is looking at the packet
450 * type after return.
451 */
452void dev_remove_pack(struct packet_type *pt)
453{
454 __dev_remove_pack(pt);
4ec93edb 455
1da177e4
LT
456 synchronize_net();
457}
d1b19dff 458EXPORT_SYMBOL(dev_remove_pack);
1da177e4 459
62532da9
VY
460
461/**
462 * dev_add_offload - register offload handlers
463 * @po: protocol offload declaration
464 *
465 * Add protocol offload handlers to the networking stack. The passed
466 * &proto_offload is linked into kernel lists and may not be freed until
467 * it has been removed from the kernel lists.
468 *
469 * This call does not sleep therefore it can not
470 * guarantee all CPU's that are in middle of receiving packets
471 * will see the new offload handlers (until the next received packet).
472 */
473void dev_add_offload(struct packet_offload *po)
474{
bdef7de4 475 struct packet_offload *elem;
62532da9
VY
476
477 spin_lock(&offload_lock);
bdef7de4
DM
478 list_for_each_entry(elem, &offload_base, list) {
479 if (po->priority < elem->priority)
480 break;
481 }
482 list_add_rcu(&po->list, elem->list.prev);
62532da9
VY
483 spin_unlock(&offload_lock);
484}
485EXPORT_SYMBOL(dev_add_offload);
486
487/**
488 * __dev_remove_offload - remove offload handler
489 * @po: packet offload declaration
490 *
491 * Remove a protocol offload handler that was previously added to the
492 * kernel offload handlers by dev_add_offload(). The passed &offload_type
493 * is removed from the kernel lists and can be freed or reused once this
494 * function returns.
495 *
496 * The packet type might still be in use by receivers
497 * and must not be freed until after all the CPU's have gone
498 * through a quiescent state.
499 */
1d143d9f 500static void __dev_remove_offload(struct packet_offload *po)
62532da9
VY
501{
502 struct list_head *head = &offload_base;
503 struct packet_offload *po1;
504
c53aa505 505 spin_lock(&offload_lock);
62532da9
VY
506
507 list_for_each_entry(po1, head, list) {
508 if (po == po1) {
509 list_del_rcu(&po->list);
510 goto out;
511 }
512 }
513
514 pr_warn("dev_remove_offload: %p not found\n", po);
515out:
c53aa505 516 spin_unlock(&offload_lock);
62532da9 517}
62532da9
VY
518
519/**
520 * dev_remove_offload - remove packet offload handler
521 * @po: packet offload declaration
522 *
523 * Remove a packet offload handler that was previously added to the kernel
524 * offload handlers by dev_add_offload(). The passed &offload_type is
525 * removed from the kernel lists and can be freed or reused once this
526 * function returns.
527 *
528 * This call sleeps to guarantee that no CPU is looking at the packet
529 * type after return.
530 */
531void dev_remove_offload(struct packet_offload *po)
532{
533 __dev_remove_offload(po);
534
535 synchronize_net();
536}
537EXPORT_SYMBOL(dev_remove_offload);
538
1da177e4
LT
539/******************************************************************************
540
541 Device Boot-time Settings Routines
542
543*******************************************************************************/
544
545/* Boot time configuration table */
546static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
547
548/**
549 * netdev_boot_setup_add - add new setup entry
550 * @name: name of the device
551 * @map: configured settings for the device
552 *
553 * Adds new setup entry to the dev_boot_setup list. The function
554 * returns 0 on error and 1 on success. This is a generic routine to
555 * all netdevices.
556 */
557static int netdev_boot_setup_add(char *name, struct ifmap *map)
558{
559 struct netdev_boot_setup *s;
560 int i;
561
562 s = dev_boot_setup;
563 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
564 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
565 memset(s[i].name, 0, sizeof(s[i].name));
93b3cff9 566 strlcpy(s[i].name, name, IFNAMSIZ);
1da177e4
LT
567 memcpy(&s[i].map, map, sizeof(s[i].map));
568 break;
569 }
570 }
571
572 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
573}
574
575/**
576 * netdev_boot_setup_check - check boot time settings
577 * @dev: the netdevice
578 *
579 * Check boot time settings for the device.
580 * The found settings are set for the device to be used
581 * later in the device probing.
582 * Returns 0 if no settings found, 1 if they are.
583 */
584int netdev_boot_setup_check(struct net_device *dev)
585{
586 struct netdev_boot_setup *s = dev_boot_setup;
587 int i;
588
589 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
590 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
93b3cff9 591 !strcmp(dev->name, s[i].name)) {
1da177e4
LT
592 dev->irq = s[i].map.irq;
593 dev->base_addr = s[i].map.base_addr;
594 dev->mem_start = s[i].map.mem_start;
595 dev->mem_end = s[i].map.mem_end;
596 return 1;
597 }
598 }
599 return 0;
600}
d1b19dff 601EXPORT_SYMBOL(netdev_boot_setup_check);
1da177e4
LT
602
603
604/**
605 * netdev_boot_base - get address from boot time settings
606 * @prefix: prefix for network device
607 * @unit: id for network device
608 *
609 * Check boot time settings for the base address of device.
610 * The found settings are set for the device to be used
611 * later in the device probing.
612 * Returns 0 if no settings found.
613 */
614unsigned long netdev_boot_base(const char *prefix, int unit)
615{
616 const struct netdev_boot_setup *s = dev_boot_setup;
617 char name[IFNAMSIZ];
618 int i;
619
620 sprintf(name, "%s%d", prefix, unit);
621
622 /*
623 * If device already registered then return base of 1
624 * to indicate not to probe for this interface
625 */
881d966b 626 if (__dev_get_by_name(&init_net, name))
1da177e4
LT
627 return 1;
628
629 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
630 if (!strcmp(name, s[i].name))
631 return s[i].map.base_addr;
632 return 0;
633}
634
635/*
636 * Saves at boot time configured settings for any netdevice.
637 */
638int __init netdev_boot_setup(char *str)
639{
640 int ints[5];
641 struct ifmap map;
642
643 str = get_options(str, ARRAY_SIZE(ints), ints);
644 if (!str || !*str)
645 return 0;
646
647 /* Save settings */
648 memset(&map, 0, sizeof(map));
649 if (ints[0] > 0)
650 map.irq = ints[1];
651 if (ints[0] > 1)
652 map.base_addr = ints[2];
653 if (ints[0] > 2)
654 map.mem_start = ints[3];
655 if (ints[0] > 3)
656 map.mem_end = ints[4];
657
658 /* Add new entry to the list */
659 return netdev_boot_setup_add(str, &map);
660}
661
662__setup("netdev=", netdev_boot_setup);
663
664/*******************************************************************************
665
666 Device Interface Subroutines
667
668*******************************************************************************/
669
a54acb3a
ND
670/**
671 * dev_get_iflink - get 'iflink' value of a interface
672 * @dev: targeted interface
673 *
674 * Indicates the ifindex the interface is linked to.
675 * Physical interfaces have the same 'ifindex' and 'iflink' values.
676 */
677
678int dev_get_iflink(const struct net_device *dev)
679{
680 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
681 return dev->netdev_ops->ndo_get_iflink(dev);
682
7a66bbc9 683 return dev->ifindex;
a54acb3a
ND
684}
685EXPORT_SYMBOL(dev_get_iflink);
686
fc4099f1
PS
687/**
688 * dev_fill_metadata_dst - Retrieve tunnel egress information.
689 * @dev: targeted interface
690 * @skb: The packet.
691 *
692 * For better visibility of tunnel traffic OVS needs to retrieve
693 * egress tunnel information for a packet. Following API allows
694 * user to get this info.
695 */
696int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
697{
698 struct ip_tunnel_info *info;
699
700 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
701 return -EINVAL;
702
703 info = skb_tunnel_info_unclone(skb);
704 if (!info)
705 return -ENOMEM;
706 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
707 return -EINVAL;
708
709 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
710}
711EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
712
1da177e4
LT
713/**
714 * __dev_get_by_name - find a device by its name
c4ea43c5 715 * @net: the applicable net namespace
1da177e4
LT
716 * @name: name to find
717 *
718 * Find an interface by name. Must be called under RTNL semaphore
719 * or @dev_base_lock. If the name is found a pointer to the device
720 * is returned. If the name is not found then %NULL is returned. The
721 * reference counters are not incremented so the caller must be
722 * careful with locks.
723 */
724
881d966b 725struct net_device *__dev_get_by_name(struct net *net, const char *name)
1da177e4 726{
0bd8d536
ED
727 struct net_device *dev;
728 struct hlist_head *head = dev_name_hash(net, name);
1da177e4 729
b67bfe0d 730 hlist_for_each_entry(dev, head, name_hlist)
1da177e4
LT
731 if (!strncmp(dev->name, name, IFNAMSIZ))
732 return dev;
0bd8d536 733
1da177e4
LT
734 return NULL;
735}
d1b19dff 736EXPORT_SYMBOL(__dev_get_by_name);
1da177e4 737
72c9528b
ED
738/**
739 * dev_get_by_name_rcu - find a device by its name
740 * @net: the applicable net namespace
741 * @name: name to find
742 *
743 * Find an interface by name.
744 * If the name is found a pointer to the device is returned.
745 * If the name is not found then %NULL is returned.
746 * The reference counters are not incremented so the caller must be
747 * careful with locks. The caller must hold RCU lock.
748 */
749
750struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
751{
72c9528b
ED
752 struct net_device *dev;
753 struct hlist_head *head = dev_name_hash(net, name);
754
b67bfe0d 755 hlist_for_each_entry_rcu(dev, head, name_hlist)
72c9528b
ED
756 if (!strncmp(dev->name, name, IFNAMSIZ))
757 return dev;
758
759 return NULL;
760}
761EXPORT_SYMBOL(dev_get_by_name_rcu);
762
1da177e4
LT
763/**
764 * dev_get_by_name - find a device by its name
c4ea43c5 765 * @net: the applicable net namespace
1da177e4
LT
766 * @name: name to find
767 *
768 * Find an interface by name. This can be called from any
769 * context and does its own locking. The returned handle has
770 * the usage count incremented and the caller must use dev_put() to
771 * release it when it is no longer needed. %NULL is returned if no
772 * matching device is found.
773 */
774
881d966b 775struct net_device *dev_get_by_name(struct net *net, const char *name)
1da177e4
LT
776{
777 struct net_device *dev;
778
72c9528b
ED
779 rcu_read_lock();
780 dev = dev_get_by_name_rcu(net, name);
1da177e4
LT
781 if (dev)
782 dev_hold(dev);
72c9528b 783 rcu_read_unlock();
1da177e4
LT
784 return dev;
785}
d1b19dff 786EXPORT_SYMBOL(dev_get_by_name);
1da177e4
LT
787
788/**
789 * __dev_get_by_index - find a device by its ifindex
c4ea43c5 790 * @net: the applicable net namespace
1da177e4
LT
791 * @ifindex: index of device
792 *
793 * Search for an interface by index. Returns %NULL if the device
794 * is not found or a pointer to the device. The device has not
795 * had its reference counter increased so the caller must be careful
796 * about locking. The caller must hold either the RTNL semaphore
797 * or @dev_base_lock.
798 */
799
881d966b 800struct net_device *__dev_get_by_index(struct net *net, int ifindex)
1da177e4 801{
0bd8d536
ED
802 struct net_device *dev;
803 struct hlist_head *head = dev_index_hash(net, ifindex);
1da177e4 804
b67bfe0d 805 hlist_for_each_entry(dev, head, index_hlist)
1da177e4
LT
806 if (dev->ifindex == ifindex)
807 return dev;
0bd8d536 808
1da177e4
LT
809 return NULL;
810}
d1b19dff 811EXPORT_SYMBOL(__dev_get_by_index);
1da177e4 812
fb699dfd
ED
813/**
814 * dev_get_by_index_rcu - find a device by its ifindex
815 * @net: the applicable net namespace
816 * @ifindex: index of device
817 *
818 * Search for an interface by index. Returns %NULL if the device
819 * is not found or a pointer to the device. The device has not
820 * had its reference counter increased so the caller must be careful
821 * about locking. The caller must hold RCU lock.
822 */
823
824struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
825{
fb699dfd
ED
826 struct net_device *dev;
827 struct hlist_head *head = dev_index_hash(net, ifindex);
828
b67bfe0d 829 hlist_for_each_entry_rcu(dev, head, index_hlist)
fb699dfd
ED
830 if (dev->ifindex == ifindex)
831 return dev;
832
833 return NULL;
834}
835EXPORT_SYMBOL(dev_get_by_index_rcu);
836
1da177e4
LT
837
838/**
839 * dev_get_by_index - find a device by its ifindex
c4ea43c5 840 * @net: the applicable net namespace
1da177e4
LT
841 * @ifindex: index of device
842 *
843 * Search for an interface by index. Returns NULL if the device
844 * is not found or a pointer to the device. The device returned has
845 * had a reference added and the pointer is safe until the user calls
846 * dev_put to indicate they have finished with it.
847 */
848
881d966b 849struct net_device *dev_get_by_index(struct net *net, int ifindex)
1da177e4
LT
850{
851 struct net_device *dev;
852
fb699dfd
ED
853 rcu_read_lock();
854 dev = dev_get_by_index_rcu(net, ifindex);
1da177e4
LT
855 if (dev)
856 dev_hold(dev);
fb699dfd 857 rcu_read_unlock();
1da177e4
LT
858 return dev;
859}
d1b19dff 860EXPORT_SYMBOL(dev_get_by_index);
1da177e4 861
5dbe7c17
NS
862/**
863 * netdev_get_name - get a netdevice name, knowing its ifindex.
864 * @net: network namespace
865 * @name: a pointer to the buffer where the name will be stored.
866 * @ifindex: the ifindex of the interface to get the name from.
867 *
868 * The use of raw_seqcount_begin() and cond_resched() before
869 * retrying is required as we want to give the writers a chance
870 * to complete when CONFIG_PREEMPT is not set.
871 */
872int netdev_get_name(struct net *net, char *name, int ifindex)
873{
874 struct net_device *dev;
875 unsigned int seq;
876
877retry:
878 seq = raw_seqcount_begin(&devnet_rename_seq);
879 rcu_read_lock();
880 dev = dev_get_by_index_rcu(net, ifindex);
881 if (!dev) {
882 rcu_read_unlock();
883 return -ENODEV;
884 }
885
886 strcpy(name, dev->name);
887 rcu_read_unlock();
888 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
889 cond_resched();
890 goto retry;
891 }
892
893 return 0;
894}
895
1da177e4 896/**
941666c2 897 * dev_getbyhwaddr_rcu - find a device by its hardware address
c4ea43c5 898 * @net: the applicable net namespace
1da177e4
LT
899 * @type: media type of device
900 * @ha: hardware address
901 *
902 * Search for an interface by MAC address. Returns NULL if the device
c506653d
ED
903 * is not found or a pointer to the device.
904 * The caller must hold RCU or RTNL.
941666c2 905 * The returned device has not had its ref count increased
1da177e4
LT
906 * and the caller must therefore be careful about locking
907 *
1da177e4
LT
908 */
909
941666c2
ED
910struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
911 const char *ha)
1da177e4
LT
912{
913 struct net_device *dev;
914
941666c2 915 for_each_netdev_rcu(net, dev)
1da177e4
LT
916 if (dev->type == type &&
917 !memcmp(dev->dev_addr, ha, dev->addr_len))
7562f876
PE
918 return dev;
919
920 return NULL;
1da177e4 921}
941666c2 922EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
cf309e3f 923
881d966b 924struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
1da177e4
LT
925{
926 struct net_device *dev;
927
4e9cac2b 928 ASSERT_RTNL();
881d966b 929 for_each_netdev(net, dev)
4e9cac2b 930 if (dev->type == type)
7562f876
PE
931 return dev;
932
933 return NULL;
4e9cac2b 934}
4e9cac2b
PM
935EXPORT_SYMBOL(__dev_getfirstbyhwtype);
936
881d966b 937struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
4e9cac2b 938{
99fe3c39 939 struct net_device *dev, *ret = NULL;
4e9cac2b 940
99fe3c39
ED
941 rcu_read_lock();
942 for_each_netdev_rcu(net, dev)
943 if (dev->type == type) {
944 dev_hold(dev);
945 ret = dev;
946 break;
947 }
948 rcu_read_unlock();
949 return ret;
1da177e4 950}
1da177e4
LT
951EXPORT_SYMBOL(dev_getfirstbyhwtype);
952
953/**
6c555490 954 * __dev_get_by_flags - find any device with given flags
c4ea43c5 955 * @net: the applicable net namespace
1da177e4
LT
956 * @if_flags: IFF_* values
957 * @mask: bitmask of bits in if_flags to check
958 *
959 * Search for any interface with the given flags. Returns NULL if a device
bb69ae04 960 * is not found or a pointer to the device. Must be called inside
6c555490 961 * rtnl_lock(), and result refcount is unchanged.
1da177e4
LT
962 */
963
6c555490
WC
964struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
965 unsigned short mask)
1da177e4 966{
7562f876 967 struct net_device *dev, *ret;
1da177e4 968
6c555490
WC
969 ASSERT_RTNL();
970
7562f876 971 ret = NULL;
6c555490 972 for_each_netdev(net, dev) {
1da177e4 973 if (((dev->flags ^ if_flags) & mask) == 0) {
7562f876 974 ret = dev;
1da177e4
LT
975 break;
976 }
977 }
7562f876 978 return ret;
1da177e4 979}
6c555490 980EXPORT_SYMBOL(__dev_get_by_flags);
1da177e4
LT
981
982/**
983 * dev_valid_name - check if name is okay for network device
984 * @name: name string
985 *
986 * Network device names need to be valid file names to
c7fa9d18
DM
987 * to allow sysfs to work. We also disallow any kind of
988 * whitespace.
1da177e4 989 */
95f050bf 990bool dev_valid_name(const char *name)
1da177e4 991{
c7fa9d18 992 if (*name == '\0')
95f050bf 993 return false;
b6fe17d6 994 if (strlen(name) >= IFNAMSIZ)
95f050bf 995 return false;
c7fa9d18 996 if (!strcmp(name, ".") || !strcmp(name, ".."))
95f050bf 997 return false;
c7fa9d18
DM
998
999 while (*name) {
a4176a93 1000 if (*name == '/' || *name == ':' || isspace(*name))
95f050bf 1001 return false;
c7fa9d18
DM
1002 name++;
1003 }
95f050bf 1004 return true;
1da177e4 1005}
d1b19dff 1006EXPORT_SYMBOL(dev_valid_name);
1da177e4
LT
1007
1008/**
b267b179
EB
1009 * __dev_alloc_name - allocate a name for a device
1010 * @net: network namespace to allocate the device name in
1da177e4 1011 * @name: name format string
b267b179 1012 * @buf: scratch buffer and result name string
1da177e4
LT
1013 *
1014 * Passed a format string - eg "lt%d" it will try and find a suitable
3041a069
SH
1015 * id. It scans list of devices to build up a free map, then chooses
1016 * the first empty slot. The caller must hold the dev_base or rtnl lock
1017 * while allocating the name and adding the device in order to avoid
1018 * duplicates.
1019 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1020 * Returns the number of the unit assigned or a negative errno code.
1da177e4
LT
1021 */
1022
b267b179 1023static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1da177e4
LT
1024{
1025 int i = 0;
1da177e4
LT
1026 const char *p;
1027 const int max_netdevices = 8*PAGE_SIZE;
cfcabdcc 1028 unsigned long *inuse;
1da177e4
LT
1029 struct net_device *d;
1030
1031 p = strnchr(name, IFNAMSIZ-1, '%');
1032 if (p) {
1033 /*
1034 * Verify the string as this thing may have come from
1035 * the user. There must be either one "%d" and no other "%"
1036 * characters.
1037 */
1038 if (p[1] != 'd' || strchr(p + 2, '%'))
1039 return -EINVAL;
1040
1041 /* Use one page as a bit array of possible slots */
cfcabdcc 1042 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1da177e4
LT
1043 if (!inuse)
1044 return -ENOMEM;
1045
881d966b 1046 for_each_netdev(net, d) {
1da177e4
LT
1047 if (!sscanf(d->name, name, &i))
1048 continue;
1049 if (i < 0 || i >= max_netdevices)
1050 continue;
1051
1052 /* avoid cases where sscanf is not exact inverse of printf */
b267b179 1053 snprintf(buf, IFNAMSIZ, name, i);
1da177e4
LT
1054 if (!strncmp(buf, d->name, IFNAMSIZ))
1055 set_bit(i, inuse);
1056 }
1057
1058 i = find_first_zero_bit(inuse, max_netdevices);
1059 free_page((unsigned long) inuse);
1060 }
1061
d9031024
OP
1062 if (buf != name)
1063 snprintf(buf, IFNAMSIZ, name, i);
b267b179 1064 if (!__dev_get_by_name(net, buf))
1da177e4 1065 return i;
1da177e4
LT
1066
1067 /* It is possible to run out of possible slots
1068 * when the name is long and there isn't enough space left
1069 * for the digits, or if all bits are used.
1070 */
1071 return -ENFILE;
1072}
1073
b267b179
EB
1074/**
1075 * dev_alloc_name - allocate a name for a device
1076 * @dev: device
1077 * @name: name format string
1078 *
1079 * Passed a format string - eg "lt%d" it will try and find a suitable
1080 * id. It scans list of devices to build up a free map, then chooses
1081 * the first empty slot. The caller must hold the dev_base or rtnl lock
1082 * while allocating the name and adding the device in order to avoid
1083 * duplicates.
1084 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1085 * Returns the number of the unit assigned or a negative errno code.
1086 */
1087
1088int dev_alloc_name(struct net_device *dev, const char *name)
1089{
1090 char buf[IFNAMSIZ];
1091 struct net *net;
1092 int ret;
1093
c346dca1
YH
1094 BUG_ON(!dev_net(dev));
1095 net = dev_net(dev);
b267b179
EB
1096 ret = __dev_alloc_name(net, name, buf);
1097 if (ret >= 0)
1098 strlcpy(dev->name, buf, IFNAMSIZ);
1099 return ret;
1100}
d1b19dff 1101EXPORT_SYMBOL(dev_alloc_name);
b267b179 1102
828de4f6
G
1103static int dev_alloc_name_ns(struct net *net,
1104 struct net_device *dev,
1105 const char *name)
d9031024 1106{
828de4f6
G
1107 char buf[IFNAMSIZ];
1108 int ret;
8ce6cebc 1109
828de4f6
G
1110 ret = __dev_alloc_name(net, name, buf);
1111 if (ret >= 0)
1112 strlcpy(dev->name, buf, IFNAMSIZ);
1113 return ret;
1114}
1115
1116static int dev_get_valid_name(struct net *net,
1117 struct net_device *dev,
1118 const char *name)
1119{
1120 BUG_ON(!net);
8ce6cebc 1121
d9031024
OP
1122 if (!dev_valid_name(name))
1123 return -EINVAL;
1124
1c5cae81 1125 if (strchr(name, '%'))
828de4f6 1126 return dev_alloc_name_ns(net, dev, name);
d9031024
OP
1127 else if (__dev_get_by_name(net, name))
1128 return -EEXIST;
8ce6cebc
DL
1129 else if (dev->name != name)
1130 strlcpy(dev->name, name, IFNAMSIZ);
d9031024
OP
1131
1132 return 0;
1133}
1da177e4
LT
1134
1135/**
1136 * dev_change_name - change name of a device
1137 * @dev: device
1138 * @newname: name (or format string) must be at least IFNAMSIZ
1139 *
1140 * Change name of a device, can pass format strings "eth%d".
1141 * for wildcarding.
1142 */
cf04a4c7 1143int dev_change_name(struct net_device *dev, const char *newname)
1da177e4 1144{
238fa362 1145 unsigned char old_assign_type;
fcc5a03a 1146 char oldname[IFNAMSIZ];
1da177e4 1147 int err = 0;
fcc5a03a 1148 int ret;
881d966b 1149 struct net *net;
1da177e4
LT
1150
1151 ASSERT_RTNL();
c346dca1 1152 BUG_ON(!dev_net(dev));
1da177e4 1153
c346dca1 1154 net = dev_net(dev);
1da177e4
LT
1155 if (dev->flags & IFF_UP)
1156 return -EBUSY;
1157
30e6c9fa 1158 write_seqcount_begin(&devnet_rename_seq);
c91f6df2
BH
1159
1160 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
30e6c9fa 1161 write_seqcount_end(&devnet_rename_seq);
c8d90dca 1162 return 0;
c91f6df2 1163 }
c8d90dca 1164
fcc5a03a
HX
1165 memcpy(oldname, dev->name, IFNAMSIZ);
1166
828de4f6 1167 err = dev_get_valid_name(net, dev, newname);
c91f6df2 1168 if (err < 0) {
30e6c9fa 1169 write_seqcount_end(&devnet_rename_seq);
d9031024 1170 return err;
c91f6df2 1171 }
1da177e4 1172
6fe82a39
VF
1173 if (oldname[0] && !strchr(oldname, '%'))
1174 netdev_info(dev, "renamed from %s\n", oldname);
1175
238fa362
TG
1176 old_assign_type = dev->name_assign_type;
1177 dev->name_assign_type = NET_NAME_RENAMED;
1178
fcc5a03a 1179rollback:
a1b3f594
EB
1180 ret = device_rename(&dev->dev, dev->name);
1181 if (ret) {
1182 memcpy(dev->name, oldname, IFNAMSIZ);
238fa362 1183 dev->name_assign_type = old_assign_type;
30e6c9fa 1184 write_seqcount_end(&devnet_rename_seq);
a1b3f594 1185 return ret;
dcc99773 1186 }
7f988eab 1187
30e6c9fa 1188 write_seqcount_end(&devnet_rename_seq);
c91f6df2 1189
5bb025fa
VF
1190 netdev_adjacent_rename_links(dev, oldname);
1191
7f988eab 1192 write_lock_bh(&dev_base_lock);
372b2312 1193 hlist_del_rcu(&dev->name_hlist);
72c9528b
ED
1194 write_unlock_bh(&dev_base_lock);
1195
1196 synchronize_rcu();
1197
1198 write_lock_bh(&dev_base_lock);
1199 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
7f988eab
HX
1200 write_unlock_bh(&dev_base_lock);
1201
056925ab 1202 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
fcc5a03a
HX
1203 ret = notifier_to_errno(ret);
1204
1205 if (ret) {
91e9c07b
ED
1206 /* err >= 0 after dev_alloc_name() or stores the first errno */
1207 if (err >= 0) {
fcc5a03a 1208 err = ret;
30e6c9fa 1209 write_seqcount_begin(&devnet_rename_seq);
fcc5a03a 1210 memcpy(dev->name, oldname, IFNAMSIZ);
5bb025fa 1211 memcpy(oldname, newname, IFNAMSIZ);
238fa362
TG
1212 dev->name_assign_type = old_assign_type;
1213 old_assign_type = NET_NAME_RENAMED;
fcc5a03a 1214 goto rollback;
91e9c07b 1215 } else {
7b6cd1ce 1216 pr_err("%s: name change rollback failed: %d\n",
91e9c07b 1217 dev->name, ret);
fcc5a03a
HX
1218 }
1219 }
1da177e4
LT
1220
1221 return err;
1222}
1223
0b815a1a
SH
1224/**
1225 * dev_set_alias - change ifalias of a device
1226 * @dev: device
1227 * @alias: name up to IFALIASZ
f0db275a 1228 * @len: limit of bytes to copy from info
0b815a1a
SH
1229 *
1230 * Set ifalias for a device,
1231 */
1232int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1233{
7364e445
AK
1234 char *new_ifalias;
1235
0b815a1a
SH
1236 ASSERT_RTNL();
1237
1238 if (len >= IFALIASZ)
1239 return -EINVAL;
1240
96ca4a2c 1241 if (!len) {
388dfc2d
SK
1242 kfree(dev->ifalias);
1243 dev->ifalias = NULL;
96ca4a2c
OH
1244 return 0;
1245 }
1246
7364e445
AK
1247 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1248 if (!new_ifalias)
0b815a1a 1249 return -ENOMEM;
7364e445 1250 dev->ifalias = new_ifalias;
0b815a1a
SH
1251
1252 strlcpy(dev->ifalias, alias, len+1);
1253 return len;
1254}
1255
1256
d8a33ac4 1257/**
3041a069 1258 * netdev_features_change - device changes features
d8a33ac4
SH
1259 * @dev: device to cause notification
1260 *
1261 * Called to indicate a device has changed features.
1262 */
1263void netdev_features_change(struct net_device *dev)
1264{
056925ab 1265 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
d8a33ac4
SH
1266}
1267EXPORT_SYMBOL(netdev_features_change);
1268
1da177e4
LT
1269/**
1270 * netdev_state_change - device changes state
1271 * @dev: device to cause notification
1272 *
1273 * Called to indicate a device has changed state. This function calls
1274 * the notifier chains for netdev_chain and sends a NEWLINK message
1275 * to the routing socket.
1276 */
1277void netdev_state_change(struct net_device *dev)
1278{
1279 if (dev->flags & IFF_UP) {
54951194
LP
1280 struct netdev_notifier_change_info change_info;
1281
1282 change_info.flags_changed = 0;
1283 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1284 &change_info.info);
7f294054 1285 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1da177e4
LT
1286 }
1287}
d1b19dff 1288EXPORT_SYMBOL(netdev_state_change);
1da177e4 1289
ee89bab1
AW
1290/**
1291 * netdev_notify_peers - notify network peers about existence of @dev
1292 * @dev: network device
1293 *
1294 * Generate traffic such that interested network peers are aware of
1295 * @dev, such as by generating a gratuitous ARP. This may be used when
1296 * a device wants to inform the rest of the network about some sort of
1297 * reconfiguration such as a failover event or virtual machine
1298 * migration.
1299 */
1300void netdev_notify_peers(struct net_device *dev)
c1da4ac7 1301{
ee89bab1
AW
1302 rtnl_lock();
1303 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1304 rtnl_unlock();
c1da4ac7 1305}
ee89bab1 1306EXPORT_SYMBOL(netdev_notify_peers);
c1da4ac7 1307
bd380811 1308static int __dev_open(struct net_device *dev)
1da177e4 1309{
d314774c 1310 const struct net_device_ops *ops = dev->netdev_ops;
3b8bcfd5 1311 int ret;
1da177e4 1312
e46b66bc
BH
1313 ASSERT_RTNL();
1314
1da177e4
LT
1315 if (!netif_device_present(dev))
1316 return -ENODEV;
1317
ca99ca14
NH
1318 /* Block netpoll from trying to do any rx path servicing.
1319 * If we don't do this there is a chance ndo_poll_controller
1320 * or ndo_poll may be running while we open the device
1321 */
66b5552f 1322 netpoll_poll_disable(dev);
ca99ca14 1323
3b8bcfd5
JB
1324 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1325 ret = notifier_to_errno(ret);
1326 if (ret)
1327 return ret;
1328
1da177e4 1329 set_bit(__LINK_STATE_START, &dev->state);
bada339b 1330
d314774c
SH
1331 if (ops->ndo_validate_addr)
1332 ret = ops->ndo_validate_addr(dev);
bada339b 1333
d314774c
SH
1334 if (!ret && ops->ndo_open)
1335 ret = ops->ndo_open(dev);
1da177e4 1336
66b5552f 1337 netpoll_poll_enable(dev);
ca99ca14 1338
bada339b
JG
1339 if (ret)
1340 clear_bit(__LINK_STATE_START, &dev->state);
1341 else {
1da177e4 1342 dev->flags |= IFF_UP;
4417da66 1343 dev_set_rx_mode(dev);
1da177e4 1344 dev_activate(dev);
7bf23575 1345 add_device_randomness(dev->dev_addr, dev->addr_len);
1da177e4 1346 }
bada339b 1347
1da177e4
LT
1348 return ret;
1349}
1350
1351/**
bd380811
PM
1352 * dev_open - prepare an interface for use.
1353 * @dev: device to open
1da177e4 1354 *
bd380811
PM
1355 * Takes a device from down to up state. The device's private open
1356 * function is invoked and then the multicast lists are loaded. Finally
1357 * the device is moved into the up state and a %NETDEV_UP message is
1358 * sent to the netdev notifier chain.
1359 *
1360 * Calling this function on an active interface is a nop. On a failure
1361 * a negative errno code is returned.
1da177e4 1362 */
bd380811
PM
1363int dev_open(struct net_device *dev)
1364{
1365 int ret;
1366
bd380811
PM
1367 if (dev->flags & IFF_UP)
1368 return 0;
1369
bd380811
PM
1370 ret = __dev_open(dev);
1371 if (ret < 0)
1372 return ret;
1373
7f294054 1374 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
bd380811
PM
1375 call_netdevice_notifiers(NETDEV_UP, dev);
1376
1377 return ret;
1378}
1379EXPORT_SYMBOL(dev_open);
1380
44345724 1381static int __dev_close_many(struct list_head *head)
1da177e4 1382{
44345724 1383 struct net_device *dev;
e46b66bc 1384
bd380811 1385 ASSERT_RTNL();
9d5010db
DM
1386 might_sleep();
1387
5cde2829 1388 list_for_each_entry(dev, head, close_list) {
3f4df206 1389 /* Temporarily disable netpoll until the interface is down */
66b5552f 1390 netpoll_poll_disable(dev);
3f4df206 1391
44345724 1392 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1da177e4 1393
44345724 1394 clear_bit(__LINK_STATE_START, &dev->state);
1da177e4 1395
44345724
OP
1396 /* Synchronize to scheduled poll. We cannot touch poll list, it
1397 * can be even on different cpu. So just clear netif_running().
1398 *
1399 * dev->stop() will invoke napi_disable() on all of it's
1400 * napi_struct instances on this device.
1401 */
4e857c58 1402 smp_mb__after_atomic(); /* Commit netif_running(). */
44345724 1403 }
1da177e4 1404
44345724 1405 dev_deactivate_many(head);
d8b2a4d2 1406
5cde2829 1407 list_for_each_entry(dev, head, close_list) {
44345724 1408 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4 1409
44345724
OP
1410 /*
1411 * Call the device specific close. This cannot fail.
1412 * Only if device is UP
1413 *
1414 * We allow it to be called even after a DETACH hot-plug
1415 * event.
1416 */
1417 if (ops->ndo_stop)
1418 ops->ndo_stop(dev);
1419
44345724 1420 dev->flags &= ~IFF_UP;
66b5552f 1421 netpoll_poll_enable(dev);
44345724
OP
1422 }
1423
1424 return 0;
1425}
1426
1427static int __dev_close(struct net_device *dev)
1428{
f87e6f47 1429 int retval;
44345724
OP
1430 LIST_HEAD(single);
1431
5cde2829 1432 list_add(&dev->close_list, &single);
f87e6f47
LT
1433 retval = __dev_close_many(&single);
1434 list_del(&single);
ca99ca14 1435
f87e6f47 1436 return retval;
44345724
OP
1437}
1438
99c4a26a 1439int dev_close_many(struct list_head *head, bool unlink)
44345724
OP
1440{
1441 struct net_device *dev, *tmp;
1da177e4 1442
5cde2829
EB
1443 /* Remove the devices that don't need to be closed */
1444 list_for_each_entry_safe(dev, tmp, head, close_list)
44345724 1445 if (!(dev->flags & IFF_UP))
5cde2829 1446 list_del_init(&dev->close_list);
44345724
OP
1447
1448 __dev_close_many(head);
1da177e4 1449
5cde2829 1450 list_for_each_entry_safe(dev, tmp, head, close_list) {
7f294054 1451 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
44345724 1452 call_netdevice_notifiers(NETDEV_DOWN, dev);
99c4a26a
DM
1453 if (unlink)
1454 list_del_init(&dev->close_list);
44345724 1455 }
bd380811
PM
1456
1457 return 0;
1458}
99c4a26a 1459EXPORT_SYMBOL(dev_close_many);
bd380811
PM
1460
1461/**
1462 * dev_close - shutdown an interface.
1463 * @dev: device to shutdown
1464 *
1465 * This function moves an active device into down state. A
1466 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1467 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1468 * chain.
1469 */
1470int dev_close(struct net_device *dev)
1471{
e14a5993
ED
1472 if (dev->flags & IFF_UP) {
1473 LIST_HEAD(single);
1da177e4 1474
5cde2829 1475 list_add(&dev->close_list, &single);
99c4a26a 1476 dev_close_many(&single, true);
e14a5993
ED
1477 list_del(&single);
1478 }
da6e378b 1479 return 0;
1da177e4 1480}
d1b19dff 1481EXPORT_SYMBOL(dev_close);
1da177e4
LT
1482
1483
0187bdfb
BH
1484/**
1485 * dev_disable_lro - disable Large Receive Offload on a device
1486 * @dev: device
1487 *
1488 * Disable Large Receive Offload (LRO) on a net device. Must be
1489 * called under RTNL. This is needed if received packets may be
1490 * forwarded to another interface.
1491 */
1492void dev_disable_lro(struct net_device *dev)
1493{
fbe168ba
MK
1494 struct net_device *lower_dev;
1495 struct list_head *iter;
529d0489 1496
bc5787c6
MM
1497 dev->wanted_features &= ~NETIF_F_LRO;
1498 netdev_update_features(dev);
27660515 1499
22d5969f
MM
1500 if (unlikely(dev->features & NETIF_F_LRO))
1501 netdev_WARN(dev, "failed to disable LRO!\n");
fbe168ba
MK
1502
1503 netdev_for_each_lower_dev(dev, lower_dev, iter)
1504 dev_disable_lro(lower_dev);
0187bdfb
BH
1505}
1506EXPORT_SYMBOL(dev_disable_lro);
1507
351638e7
JP
1508static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1509 struct net_device *dev)
1510{
1511 struct netdev_notifier_info info;
1512
1513 netdev_notifier_info_init(&info, dev);
1514 return nb->notifier_call(nb, val, &info);
1515}
0187bdfb 1516
881d966b
EB
1517static int dev_boot_phase = 1;
1518
1da177e4
LT
1519/**
1520 * register_netdevice_notifier - register a network notifier block
1521 * @nb: notifier
1522 *
1523 * Register a notifier to be called when network device events occur.
1524 * The notifier passed is linked into the kernel structures and must
1525 * not be reused until it has been unregistered. A negative errno code
1526 * is returned on a failure.
1527 *
1528 * When registered all registration and up events are replayed
4ec93edb 1529 * to the new notifier to allow device to have a race free
1da177e4
LT
1530 * view of the network device list.
1531 */
1532
1533int register_netdevice_notifier(struct notifier_block *nb)
1534{
1535 struct net_device *dev;
fcc5a03a 1536 struct net_device *last;
881d966b 1537 struct net *net;
1da177e4
LT
1538 int err;
1539
1540 rtnl_lock();
f07d5b94 1541 err = raw_notifier_chain_register(&netdev_chain, nb);
fcc5a03a
HX
1542 if (err)
1543 goto unlock;
881d966b
EB
1544 if (dev_boot_phase)
1545 goto unlock;
1546 for_each_net(net) {
1547 for_each_netdev(net, dev) {
351638e7 1548 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
881d966b
EB
1549 err = notifier_to_errno(err);
1550 if (err)
1551 goto rollback;
1552
1553 if (!(dev->flags & IFF_UP))
1554 continue;
1da177e4 1555
351638e7 1556 call_netdevice_notifier(nb, NETDEV_UP, dev);
881d966b 1557 }
1da177e4 1558 }
fcc5a03a
HX
1559
1560unlock:
1da177e4
LT
1561 rtnl_unlock();
1562 return err;
fcc5a03a
HX
1563
1564rollback:
1565 last = dev;
881d966b
EB
1566 for_each_net(net) {
1567 for_each_netdev(net, dev) {
1568 if (dev == last)
8f891489 1569 goto outroll;
fcc5a03a 1570
881d966b 1571 if (dev->flags & IFF_UP) {
351638e7
JP
1572 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1573 dev);
1574 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
881d966b 1575 }
351638e7 1576 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
fcc5a03a 1577 }
fcc5a03a 1578 }
c67625a1 1579
8f891489 1580outroll:
c67625a1 1581 raw_notifier_chain_unregister(&netdev_chain, nb);
fcc5a03a 1582 goto unlock;
1da177e4 1583}
d1b19dff 1584EXPORT_SYMBOL(register_netdevice_notifier);
1da177e4
LT
1585
1586/**
1587 * unregister_netdevice_notifier - unregister a network notifier block
1588 * @nb: notifier
1589 *
1590 * Unregister a notifier previously registered by
1591 * register_netdevice_notifier(). The notifier is unlinked into the
1592 * kernel structures and may then be reused. A negative errno code
1593 * is returned on a failure.
7d3d43da
EB
1594 *
1595 * After unregistering unregister and down device events are synthesized
1596 * for all devices on the device list to the removed notifier to remove
1597 * the need for special case cleanup code.
1da177e4
LT
1598 */
1599
1600int unregister_netdevice_notifier(struct notifier_block *nb)
1601{
7d3d43da
EB
1602 struct net_device *dev;
1603 struct net *net;
9f514950
HX
1604 int err;
1605
1606 rtnl_lock();
f07d5b94 1607 err = raw_notifier_chain_unregister(&netdev_chain, nb);
7d3d43da
EB
1608 if (err)
1609 goto unlock;
1610
1611 for_each_net(net) {
1612 for_each_netdev(net, dev) {
1613 if (dev->flags & IFF_UP) {
351638e7
JP
1614 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1615 dev);
1616 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
7d3d43da 1617 }
351638e7 1618 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
7d3d43da
EB
1619 }
1620 }
1621unlock:
9f514950
HX
1622 rtnl_unlock();
1623 return err;
1da177e4 1624}
d1b19dff 1625EXPORT_SYMBOL(unregister_netdevice_notifier);
1da177e4 1626
351638e7
JP
1627/**
1628 * call_netdevice_notifiers_info - call all network notifier blocks
1629 * @val: value passed unmodified to notifier function
1630 * @dev: net_device pointer passed unmodified to notifier function
1631 * @info: notifier information data
1632 *
1633 * Call all network notifier blocks. Parameters and return value
1634 * are as for raw_notifier_call_chain().
1635 */
1636
1d143d9f 1637static int call_netdevice_notifiers_info(unsigned long val,
1638 struct net_device *dev,
1639 struct netdev_notifier_info *info)
351638e7
JP
1640{
1641 ASSERT_RTNL();
1642 netdev_notifier_info_init(info, dev);
1643 return raw_notifier_call_chain(&netdev_chain, val, info);
1644}
351638e7 1645
1da177e4
LT
1646/**
1647 * call_netdevice_notifiers - call all network notifier blocks
1648 * @val: value passed unmodified to notifier function
c4ea43c5 1649 * @dev: net_device pointer passed unmodified to notifier function
1da177e4
LT
1650 *
1651 * Call all network notifier blocks. Parameters and return value
f07d5b94 1652 * are as for raw_notifier_call_chain().
1da177e4
LT
1653 */
1654
ad7379d4 1655int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1da177e4 1656{
351638e7
JP
1657 struct netdev_notifier_info info;
1658
1659 return call_netdevice_notifiers_info(val, dev, &info);
1da177e4 1660}
edf947f1 1661EXPORT_SYMBOL(call_netdevice_notifiers);
1da177e4 1662
1cf51900 1663#ifdef CONFIG_NET_INGRESS
4577139b
DB
1664static struct static_key ingress_needed __read_mostly;
1665
1666void net_inc_ingress_queue(void)
1667{
1668 static_key_slow_inc(&ingress_needed);
1669}
1670EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
1671
1672void net_dec_ingress_queue(void)
1673{
1674 static_key_slow_dec(&ingress_needed);
1675}
1676EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
1677#endif
1678
1f211a1b
DB
1679#ifdef CONFIG_NET_EGRESS
1680static struct static_key egress_needed __read_mostly;
1681
1682void net_inc_egress_queue(void)
1683{
1684 static_key_slow_inc(&egress_needed);
1685}
1686EXPORT_SYMBOL_GPL(net_inc_egress_queue);
1687
1688void net_dec_egress_queue(void)
1689{
1690 static_key_slow_dec(&egress_needed);
1691}
1692EXPORT_SYMBOL_GPL(net_dec_egress_queue);
1693#endif
1694
c5905afb 1695static struct static_key netstamp_needed __read_mostly;
b90e5794 1696#ifdef HAVE_JUMP_LABEL
c5905afb 1697/* We are not allowed to call static_key_slow_dec() from irq context
b90e5794 1698 * If net_disable_timestamp() is called from irq context, defer the
c5905afb 1699 * static_key_slow_dec() calls.
b90e5794
ED
1700 */
1701static atomic_t netstamp_needed_deferred;
1702#endif
1da177e4
LT
1703
1704void net_enable_timestamp(void)
1705{
b90e5794
ED
1706#ifdef HAVE_JUMP_LABEL
1707 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1708
1709 if (deferred) {
1710 while (--deferred)
c5905afb 1711 static_key_slow_dec(&netstamp_needed);
b90e5794
ED
1712 return;
1713 }
1714#endif
c5905afb 1715 static_key_slow_inc(&netstamp_needed);
1da177e4 1716}
d1b19dff 1717EXPORT_SYMBOL(net_enable_timestamp);
1da177e4
LT
1718
1719void net_disable_timestamp(void)
1720{
b90e5794
ED
1721#ifdef HAVE_JUMP_LABEL
1722 if (in_interrupt()) {
1723 atomic_inc(&netstamp_needed_deferred);
1724 return;
1725 }
1726#endif
c5905afb 1727 static_key_slow_dec(&netstamp_needed);
1da177e4 1728}
d1b19dff 1729EXPORT_SYMBOL(net_disable_timestamp);
1da177e4 1730
3b098e2d 1731static inline void net_timestamp_set(struct sk_buff *skb)
1da177e4 1732{
588f0330 1733 skb->tstamp.tv64 = 0;
c5905afb 1734 if (static_key_false(&netstamp_needed))
a61bbcf2 1735 __net_timestamp(skb);
1da177e4
LT
1736}
1737
588f0330 1738#define net_timestamp_check(COND, SKB) \
c5905afb 1739 if (static_key_false(&netstamp_needed)) { \
588f0330
ED
1740 if ((COND) && !(SKB)->tstamp.tv64) \
1741 __net_timestamp(SKB); \
1742 } \
3b098e2d 1743
1ee481fb 1744bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb)
79b569f0
DL
1745{
1746 unsigned int len;
1747
1748 if (!(dev->flags & IFF_UP))
1749 return false;
1750
1751 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1752 if (skb->len <= len)
1753 return true;
1754
1755 /* if TSO is enabled, we don't care about the length as the packet
1756 * could be forwarded without being segmented before
1757 */
1758 if (skb_is_gso(skb))
1759 return true;
1760
1761 return false;
1762}
1ee481fb 1763EXPORT_SYMBOL_GPL(is_skb_forwardable);
79b569f0 1764
a0265d28
HX
1765int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1766{
bbbf2df0
WB
1767 if (skb_orphan_frags(skb, GFP_ATOMIC) ||
1768 unlikely(!is_skb_forwardable(dev, skb))) {
a0265d28
HX
1769 atomic_long_inc(&dev->rx_dropped);
1770 kfree_skb(skb);
1771 return NET_RX_DROP;
1772 }
1773
1774 skb_scrub_packet(skb, true);
08b4b8ea 1775 skb->priority = 0;
a0265d28 1776 skb->protocol = eth_type_trans(skb, dev);
2c26d34b 1777 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
a0265d28
HX
1778
1779 return 0;
1780}
1781EXPORT_SYMBOL_GPL(__dev_forward_skb);
1782
44540960
AB
1783/**
1784 * dev_forward_skb - loopback an skb to another netif
1785 *
1786 * @dev: destination network device
1787 * @skb: buffer to forward
1788 *
1789 * return values:
1790 * NET_RX_SUCCESS (no congestion)
6ec82562 1791 * NET_RX_DROP (packet was dropped, but freed)
44540960
AB
1792 *
1793 * dev_forward_skb can be used for injecting an skb from the
1794 * start_xmit function of one device into the receive queue
1795 * of another device.
1796 *
1797 * The receiving device may be in another namespace, so
1798 * we have to clear all information in the skb that could
1799 * impact namespace isolation.
1800 */
1801int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1802{
a0265d28 1803 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
44540960
AB
1804}
1805EXPORT_SYMBOL_GPL(dev_forward_skb);
1806
71d9dec2
CG
1807static inline int deliver_skb(struct sk_buff *skb,
1808 struct packet_type *pt_prev,
1809 struct net_device *orig_dev)
1810{
1080e512
MT
1811 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1812 return -ENOMEM;
71d9dec2
CG
1813 atomic_inc(&skb->users);
1814 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1815}
1816
7866a621
SN
1817static inline void deliver_ptype_list_skb(struct sk_buff *skb,
1818 struct packet_type **pt,
fbcb2170
JP
1819 struct net_device *orig_dev,
1820 __be16 type,
7866a621
SN
1821 struct list_head *ptype_list)
1822{
1823 struct packet_type *ptype, *pt_prev = *pt;
1824
1825 list_for_each_entry_rcu(ptype, ptype_list, list) {
1826 if (ptype->type != type)
1827 continue;
1828 if (pt_prev)
fbcb2170 1829 deliver_skb(skb, pt_prev, orig_dev);
7866a621
SN
1830 pt_prev = ptype;
1831 }
1832 *pt = pt_prev;
1833}
1834
c0de08d0
EL
1835static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1836{
a3d744e9 1837 if (!ptype->af_packet_priv || !skb->sk)
c0de08d0
EL
1838 return false;
1839
1840 if (ptype->id_match)
1841 return ptype->id_match(ptype, skb->sk);
1842 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1843 return true;
1844
1845 return false;
1846}
1847
1da177e4
LT
1848/*
1849 * Support routine. Sends outgoing frames to any network
1850 * taps currently in use.
1851 */
1852
f6a78bfc 1853static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
1854{
1855 struct packet_type *ptype;
71d9dec2
CG
1856 struct sk_buff *skb2 = NULL;
1857 struct packet_type *pt_prev = NULL;
7866a621 1858 struct list_head *ptype_list = &ptype_all;
a61bbcf2 1859
1da177e4 1860 rcu_read_lock();
7866a621
SN
1861again:
1862 list_for_each_entry_rcu(ptype, ptype_list, list) {
1da177e4
LT
1863 /* Never send packets back to the socket
1864 * they originated from - MvS (miquels@drinkel.ow.org)
1865 */
7866a621
SN
1866 if (skb_loop_sk(ptype, skb))
1867 continue;
71d9dec2 1868
7866a621
SN
1869 if (pt_prev) {
1870 deliver_skb(skb2, pt_prev, skb->dev);
1871 pt_prev = ptype;
1872 continue;
1873 }
1da177e4 1874
7866a621
SN
1875 /* need to clone skb, done only once */
1876 skb2 = skb_clone(skb, GFP_ATOMIC);
1877 if (!skb2)
1878 goto out_unlock;
70978182 1879
7866a621 1880 net_timestamp_set(skb2);
1da177e4 1881
7866a621
SN
1882 /* skb->nh should be correctly
1883 * set by sender, so that the second statement is
1884 * just protection against buggy protocols.
1885 */
1886 skb_reset_mac_header(skb2);
1887
1888 if (skb_network_header(skb2) < skb2->data ||
1889 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
1890 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1891 ntohs(skb2->protocol),
1892 dev->name);
1893 skb_reset_network_header(skb2);
1da177e4 1894 }
7866a621
SN
1895
1896 skb2->transport_header = skb2->network_header;
1897 skb2->pkt_type = PACKET_OUTGOING;
1898 pt_prev = ptype;
1899 }
1900
1901 if (ptype_list == &ptype_all) {
1902 ptype_list = &dev->ptype_all;
1903 goto again;
1da177e4 1904 }
7866a621 1905out_unlock:
71d9dec2
CG
1906 if (pt_prev)
1907 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1da177e4
LT
1908 rcu_read_unlock();
1909}
1910
2c53040f
BH
1911/**
1912 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
4f57c087
JF
1913 * @dev: Network device
1914 * @txq: number of queues available
1915 *
1916 * If real_num_tx_queues is changed the tc mappings may no longer be
1917 * valid. To resolve this verify the tc mapping remains valid and if
1918 * not NULL the mapping. With no priorities mapping to this
1919 * offset/count pair it will no longer be used. In the worst case TC0
1920 * is invalid nothing can be done so disable priority mappings. If is
1921 * expected that drivers will fix this mapping if they can before
1922 * calling netif_set_real_num_tx_queues.
1923 */
bb134d22 1924static void netif_setup_tc(struct net_device *dev, unsigned int txq)
4f57c087
JF
1925{
1926 int i;
1927 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1928
1929 /* If TC0 is invalidated disable TC mapping */
1930 if (tc->offset + tc->count > txq) {
7b6cd1ce 1931 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
4f57c087
JF
1932 dev->num_tc = 0;
1933 return;
1934 }
1935
1936 /* Invalidated prio to tc mappings set to TC0 */
1937 for (i = 1; i < TC_BITMASK + 1; i++) {
1938 int q = netdev_get_prio_tc_map(dev, i);
1939
1940 tc = &dev->tc_to_txq[q];
1941 if (tc->offset + tc->count > txq) {
7b6cd1ce
JP
1942 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1943 i, q);
4f57c087
JF
1944 netdev_set_prio_tc_map(dev, i, 0);
1945 }
1946 }
1947}
1948
537c00de
AD
1949#ifdef CONFIG_XPS
1950static DEFINE_MUTEX(xps_map_mutex);
1951#define xmap_dereference(P) \
1952 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1953
10cdc3f3
AD
1954static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1955 int cpu, u16 index)
537c00de 1956{
10cdc3f3
AD
1957 struct xps_map *map = NULL;
1958 int pos;
537c00de 1959
10cdc3f3
AD
1960 if (dev_maps)
1961 map = xmap_dereference(dev_maps->cpu_map[cpu]);
537c00de 1962
10cdc3f3
AD
1963 for (pos = 0; map && pos < map->len; pos++) {
1964 if (map->queues[pos] == index) {
537c00de
AD
1965 if (map->len > 1) {
1966 map->queues[pos] = map->queues[--map->len];
1967 } else {
10cdc3f3 1968 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
537c00de
AD
1969 kfree_rcu(map, rcu);
1970 map = NULL;
1971 }
10cdc3f3 1972 break;
537c00de 1973 }
537c00de
AD
1974 }
1975
10cdc3f3
AD
1976 return map;
1977}
1978
024e9679 1979static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
10cdc3f3
AD
1980{
1981 struct xps_dev_maps *dev_maps;
024e9679 1982 int cpu, i;
10cdc3f3
AD
1983 bool active = false;
1984
1985 mutex_lock(&xps_map_mutex);
1986 dev_maps = xmap_dereference(dev->xps_maps);
1987
1988 if (!dev_maps)
1989 goto out_no_maps;
1990
1991 for_each_possible_cpu(cpu) {
024e9679
AD
1992 for (i = index; i < dev->num_tx_queues; i++) {
1993 if (!remove_xps_queue(dev_maps, cpu, i))
1994 break;
1995 }
1996 if (i == dev->num_tx_queues)
10cdc3f3
AD
1997 active = true;
1998 }
1999
2000 if (!active) {
537c00de
AD
2001 RCU_INIT_POINTER(dev->xps_maps, NULL);
2002 kfree_rcu(dev_maps, rcu);
2003 }
2004
024e9679
AD
2005 for (i = index; i < dev->num_tx_queues; i++)
2006 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
2007 NUMA_NO_NODE);
2008
537c00de
AD
2009out_no_maps:
2010 mutex_unlock(&xps_map_mutex);
2011}
2012
01c5f864
AD
2013static struct xps_map *expand_xps_map(struct xps_map *map,
2014 int cpu, u16 index)
2015{
2016 struct xps_map *new_map;
2017 int alloc_len = XPS_MIN_MAP_ALLOC;
2018 int i, pos;
2019
2020 for (pos = 0; map && pos < map->len; pos++) {
2021 if (map->queues[pos] != index)
2022 continue;
2023 return map;
2024 }
2025
2026 /* Need to add queue to this CPU's existing map */
2027 if (map) {
2028 if (pos < map->alloc_len)
2029 return map;
2030
2031 alloc_len = map->alloc_len * 2;
2032 }
2033
2034 /* Need to allocate new map to store queue on this CPU's map */
2035 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2036 cpu_to_node(cpu));
2037 if (!new_map)
2038 return NULL;
2039
2040 for (i = 0; i < pos; i++)
2041 new_map->queues[i] = map->queues[i];
2042 new_map->alloc_len = alloc_len;
2043 new_map->len = pos;
2044
2045 return new_map;
2046}
2047
3573540c
MT
2048int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2049 u16 index)
537c00de 2050{
01c5f864 2051 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
537c00de 2052 struct xps_map *map, *new_map;
537c00de 2053 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
01c5f864
AD
2054 int cpu, numa_node_id = -2;
2055 bool active = false;
537c00de
AD
2056
2057 mutex_lock(&xps_map_mutex);
2058
2059 dev_maps = xmap_dereference(dev->xps_maps);
2060
01c5f864
AD
2061 /* allocate memory for queue storage */
2062 for_each_online_cpu(cpu) {
2063 if (!cpumask_test_cpu(cpu, mask))
2064 continue;
2065
2066 if (!new_dev_maps)
2067 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2bb60cb9
AD
2068 if (!new_dev_maps) {
2069 mutex_unlock(&xps_map_mutex);
01c5f864 2070 return -ENOMEM;
2bb60cb9 2071 }
01c5f864
AD
2072
2073 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2074 NULL;
2075
2076 map = expand_xps_map(map, cpu, index);
2077 if (!map)
2078 goto error;
2079
2080 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
2081 }
2082
2083 if (!new_dev_maps)
2084 goto out_no_new_maps;
2085
537c00de 2086 for_each_possible_cpu(cpu) {
01c5f864
AD
2087 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
2088 /* add queue to CPU maps */
2089 int pos = 0;
2090
2091 map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2092 while ((pos < map->len) && (map->queues[pos] != index))
2093 pos++;
2094
2095 if (pos == map->len)
2096 map->queues[map->len++] = index;
537c00de 2097#ifdef CONFIG_NUMA
537c00de
AD
2098 if (numa_node_id == -2)
2099 numa_node_id = cpu_to_node(cpu);
2100 else if (numa_node_id != cpu_to_node(cpu))
2101 numa_node_id = -1;
537c00de 2102#endif
01c5f864
AD
2103 } else if (dev_maps) {
2104 /* fill in the new device map from the old device map */
2105 map = xmap_dereference(dev_maps->cpu_map[cpu]);
2106 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
537c00de 2107 }
01c5f864 2108
537c00de
AD
2109 }
2110
01c5f864
AD
2111 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
2112
537c00de 2113 /* Cleanup old maps */
01c5f864
AD
2114 if (dev_maps) {
2115 for_each_possible_cpu(cpu) {
2116 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2117 map = xmap_dereference(dev_maps->cpu_map[cpu]);
2118 if (map && map != new_map)
2119 kfree_rcu(map, rcu);
2120 }
537c00de 2121
01c5f864 2122 kfree_rcu(dev_maps, rcu);
537c00de
AD
2123 }
2124
01c5f864
AD
2125 dev_maps = new_dev_maps;
2126 active = true;
537c00de 2127
01c5f864
AD
2128out_no_new_maps:
2129 /* update Tx queue numa node */
537c00de
AD
2130 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2131 (numa_node_id >= 0) ? numa_node_id :
2132 NUMA_NO_NODE);
2133
01c5f864
AD
2134 if (!dev_maps)
2135 goto out_no_maps;
2136
2137 /* removes queue from unused CPUs */
2138 for_each_possible_cpu(cpu) {
2139 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
2140 continue;
2141
2142 if (remove_xps_queue(dev_maps, cpu, index))
2143 active = true;
2144 }
2145
2146 /* free map if not active */
2147 if (!active) {
2148 RCU_INIT_POINTER(dev->xps_maps, NULL);
2149 kfree_rcu(dev_maps, rcu);
2150 }
2151
2152out_no_maps:
537c00de
AD
2153 mutex_unlock(&xps_map_mutex);
2154
2155 return 0;
2156error:
01c5f864
AD
2157 /* remove any maps that we added */
2158 for_each_possible_cpu(cpu) {
2159 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2160 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2161 NULL;
2162 if (new_map && new_map != map)
2163 kfree(new_map);
2164 }
2165
537c00de
AD
2166 mutex_unlock(&xps_map_mutex);
2167
537c00de
AD
2168 kfree(new_dev_maps);
2169 return -ENOMEM;
2170}
2171EXPORT_SYMBOL(netif_set_xps_queue);
2172
2173#endif
f0796d5c
JF
2174/*
2175 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2176 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2177 */
e6484930 2178int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
f0796d5c 2179{
1d24eb48
TH
2180 int rc;
2181
e6484930
TH
2182 if (txq < 1 || txq > dev->num_tx_queues)
2183 return -EINVAL;
f0796d5c 2184
5c56580b
BH
2185 if (dev->reg_state == NETREG_REGISTERED ||
2186 dev->reg_state == NETREG_UNREGISTERING) {
e6484930
TH
2187 ASSERT_RTNL();
2188
1d24eb48
TH
2189 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2190 txq);
bf264145
TH
2191 if (rc)
2192 return rc;
2193
4f57c087
JF
2194 if (dev->num_tc)
2195 netif_setup_tc(dev, txq);
2196
024e9679 2197 if (txq < dev->real_num_tx_queues) {
e6484930 2198 qdisc_reset_all_tx_gt(dev, txq);
024e9679
AD
2199#ifdef CONFIG_XPS
2200 netif_reset_xps_queues_gt(dev, txq);
2201#endif
2202 }
f0796d5c 2203 }
e6484930
TH
2204
2205 dev->real_num_tx_queues = txq;
2206 return 0;
f0796d5c
JF
2207}
2208EXPORT_SYMBOL(netif_set_real_num_tx_queues);
56079431 2209
a953be53 2210#ifdef CONFIG_SYSFS
62fe0b40
BH
2211/**
2212 * netif_set_real_num_rx_queues - set actual number of RX queues used
2213 * @dev: Network device
2214 * @rxq: Actual number of RX queues
2215 *
2216 * This must be called either with the rtnl_lock held or before
2217 * registration of the net device. Returns 0 on success, or a
4e7f7951
BH
2218 * negative error code. If called before registration, it always
2219 * succeeds.
62fe0b40
BH
2220 */
2221int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2222{
2223 int rc;
2224
bd25fa7b
TH
2225 if (rxq < 1 || rxq > dev->num_rx_queues)
2226 return -EINVAL;
2227
62fe0b40
BH
2228 if (dev->reg_state == NETREG_REGISTERED) {
2229 ASSERT_RTNL();
2230
62fe0b40
BH
2231 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2232 rxq);
2233 if (rc)
2234 return rc;
62fe0b40
BH
2235 }
2236
2237 dev->real_num_rx_queues = rxq;
2238 return 0;
2239}
2240EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2241#endif
2242
2c53040f
BH
2243/**
2244 * netif_get_num_default_rss_queues - default number of RSS queues
16917b87
YM
2245 *
2246 * This routine should set an upper limit on the number of RSS queues
2247 * used by default by multiqueue devices.
2248 */
a55b138b 2249int netif_get_num_default_rss_queues(void)
16917b87
YM
2250{
2251 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2252}
2253EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2254
def82a1d 2255static inline void __netif_reschedule(struct Qdisc *q)
56079431 2256{
def82a1d
JP
2257 struct softnet_data *sd;
2258 unsigned long flags;
56079431 2259
def82a1d 2260 local_irq_save(flags);
903ceff7 2261 sd = this_cpu_ptr(&softnet_data);
a9cbd588
CG
2262 q->next_sched = NULL;
2263 *sd->output_queue_tailp = q;
2264 sd->output_queue_tailp = &q->next_sched;
def82a1d
JP
2265 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2266 local_irq_restore(flags);
2267}
2268
2269void __netif_schedule(struct Qdisc *q)
2270{
2271 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2272 __netif_reschedule(q);
56079431
DV
2273}
2274EXPORT_SYMBOL(__netif_schedule);
2275
e6247027
ED
2276struct dev_kfree_skb_cb {
2277 enum skb_free_reason reason;
2278};
2279
2280static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
56079431 2281{
e6247027
ED
2282 return (struct dev_kfree_skb_cb *)skb->cb;
2283}
2284
46e5da40
JF
2285void netif_schedule_queue(struct netdev_queue *txq)
2286{
2287 rcu_read_lock();
2288 if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
2289 struct Qdisc *q = rcu_dereference(txq->qdisc);
2290
2291 __netif_schedule(q);
2292 }
2293 rcu_read_unlock();
2294}
2295EXPORT_SYMBOL(netif_schedule_queue);
2296
2297/**
2298 * netif_wake_subqueue - allow sending packets on subqueue
2299 * @dev: network device
2300 * @queue_index: sub queue index
2301 *
2302 * Resume individual transmit queue of a device with multiple transmit queues.
2303 */
2304void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2305{
2306 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2307
2308 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) {
2309 struct Qdisc *q;
2310
2311 rcu_read_lock();
2312 q = rcu_dereference(txq->qdisc);
2313 __netif_schedule(q);
2314 rcu_read_unlock();
2315 }
2316}
2317EXPORT_SYMBOL(netif_wake_subqueue);
2318
2319void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2320{
2321 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2322 struct Qdisc *q;
2323
2324 rcu_read_lock();
2325 q = rcu_dereference(dev_queue->qdisc);
2326 __netif_schedule(q);
2327 rcu_read_unlock();
2328 }
2329}
2330EXPORT_SYMBOL(netif_tx_wake_queue);
2331
e6247027 2332void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
56079431 2333{
e6247027 2334 unsigned long flags;
56079431 2335
e6247027
ED
2336 if (likely(atomic_read(&skb->users) == 1)) {
2337 smp_rmb();
2338 atomic_set(&skb->users, 0);
2339 } else if (likely(!atomic_dec_and_test(&skb->users))) {
2340 return;
bea3348e 2341 }
e6247027
ED
2342 get_kfree_skb_cb(skb)->reason = reason;
2343 local_irq_save(flags);
2344 skb->next = __this_cpu_read(softnet_data.completion_queue);
2345 __this_cpu_write(softnet_data.completion_queue, skb);
2346 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2347 local_irq_restore(flags);
56079431 2348}
e6247027 2349EXPORT_SYMBOL(__dev_kfree_skb_irq);
56079431 2350
e6247027 2351void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
56079431
DV
2352{
2353 if (in_irq() || irqs_disabled())
e6247027 2354 __dev_kfree_skb_irq(skb, reason);
56079431
DV
2355 else
2356 dev_kfree_skb(skb);
2357}
e6247027 2358EXPORT_SYMBOL(__dev_kfree_skb_any);
56079431
DV
2359
2360
bea3348e
SH
2361/**
2362 * netif_device_detach - mark device as removed
2363 * @dev: network device
2364 *
2365 * Mark device as removed from system and therefore no longer available.
2366 */
56079431
DV
2367void netif_device_detach(struct net_device *dev)
2368{
2369 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2370 netif_running(dev)) {
d543103a 2371 netif_tx_stop_all_queues(dev);
56079431
DV
2372 }
2373}
2374EXPORT_SYMBOL(netif_device_detach);
2375
bea3348e
SH
2376/**
2377 * netif_device_attach - mark device as attached
2378 * @dev: network device
2379 *
2380 * Mark device as attached from system and restart if needed.
2381 */
56079431
DV
2382void netif_device_attach(struct net_device *dev)
2383{
2384 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2385 netif_running(dev)) {
d543103a 2386 netif_tx_wake_all_queues(dev);
4ec93edb 2387 __netdev_watchdog_up(dev);
56079431
DV
2388 }
2389}
2390EXPORT_SYMBOL(netif_device_attach);
2391
5605c762
JP
2392/*
2393 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2394 * to be used as a distribution range.
2395 */
2396u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
2397 unsigned int num_tx_queues)
2398{
2399 u32 hash;
2400 u16 qoffset = 0;
2401 u16 qcount = num_tx_queues;
2402
2403 if (skb_rx_queue_recorded(skb)) {
2404 hash = skb_get_rx_queue(skb);
2405 while (unlikely(hash >= num_tx_queues))
2406 hash -= num_tx_queues;
2407 return hash;
2408 }
2409
2410 if (dev->num_tc) {
2411 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2412 qoffset = dev->tc_to_txq[tc].offset;
2413 qcount = dev->tc_to_txq[tc].count;
2414 }
2415
2416 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
2417}
2418EXPORT_SYMBOL(__skb_tx_hash);
2419
36c92474
BH
2420static void skb_warn_bad_offload(const struct sk_buff *skb)
2421{
65e9d2fa 2422 static const netdev_features_t null_features = 0;
36c92474 2423 struct net_device *dev = skb->dev;
88ad4175 2424 const char *name = "";
36c92474 2425
c846ad9b
BG
2426 if (!net_ratelimit())
2427 return;
2428
88ad4175
BM
2429 if (dev) {
2430 if (dev->dev.parent)
2431 name = dev_driver_string(dev->dev.parent);
2432 else
2433 name = netdev_name(dev);
2434 }
36c92474
BH
2435 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2436 "gso_type=%d ip_summed=%d\n",
88ad4175 2437 name, dev ? &dev->features : &null_features,
65e9d2fa 2438 skb->sk ? &skb->sk->sk_route_caps : &null_features,
36c92474
BH
2439 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2440 skb_shinfo(skb)->gso_type, skb->ip_summed);
2441}
2442
1da177e4
LT
2443/*
2444 * Invalidate hardware checksum when packet is to be mangled, and
2445 * complete checksum manually on outgoing path.
2446 */
84fa7933 2447int skb_checksum_help(struct sk_buff *skb)
1da177e4 2448{
d3bc23e7 2449 __wsum csum;
663ead3b 2450 int ret = 0, offset;
1da177e4 2451
84fa7933 2452 if (skb->ip_summed == CHECKSUM_COMPLETE)
a430a43d
HX
2453 goto out_set_summed;
2454
2455 if (unlikely(skb_shinfo(skb)->gso_size)) {
36c92474
BH
2456 skb_warn_bad_offload(skb);
2457 return -EINVAL;
1da177e4
LT
2458 }
2459
cef401de
ED
2460 /* Before computing a checksum, we should make sure no frag could
2461 * be modified by an external entity : checksum could be wrong.
2462 */
2463 if (skb_has_shared_frag(skb)) {
2464 ret = __skb_linearize(skb);
2465 if (ret)
2466 goto out;
2467 }
2468
55508d60 2469 offset = skb_checksum_start_offset(skb);
a030847e
HX
2470 BUG_ON(offset >= skb_headlen(skb));
2471 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2472
2473 offset += skb->csum_offset;
2474 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2475
2476 if (skb_cloned(skb) &&
2477 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1da177e4
LT
2478 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2479 if (ret)
2480 goto out;
2481 }
2482
a030847e 2483 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
a430a43d 2484out_set_summed:
1da177e4 2485 skb->ip_summed = CHECKSUM_NONE;
4ec93edb 2486out:
1da177e4
LT
2487 return ret;
2488}
d1b19dff 2489EXPORT_SYMBOL(skb_checksum_help);
1da177e4 2490
6ae23ad3
TH
2491/* skb_csum_offload_check - Driver helper function to determine if a device
2492 * with limited checksum offload capabilities is able to offload the checksum
2493 * for a given packet.
2494 *
2495 * Arguments:
2496 * skb - sk_buff for the packet in question
2497 * spec - contains the description of what device can offload
2498 * csum_encapped - returns true if the checksum being offloaded is
2499 * encpasulated. That is it is checksum for the transport header
2500 * in the inner headers.
2501 * checksum_help - when set indicates that helper function should
2502 * call skb_checksum_help if offload checks fail
2503 *
2504 * Returns:
2505 * true: Packet has passed the checksum checks and should be offloadable to
2506 * the device (a driver may still need to check for additional
2507 * restrictions of its device)
2508 * false: Checksum is not offloadable. If checksum_help was set then
2509 * skb_checksum_help was called to resolve checksum for non-GSO
2510 * packets and when IP protocol is not SCTP
2511 */
2512bool __skb_csum_offload_chk(struct sk_buff *skb,
2513 const struct skb_csum_offl_spec *spec,
2514 bool *csum_encapped,
2515 bool csum_help)
2516{
2517 struct iphdr *iph;
2518 struct ipv6hdr *ipv6;
2519 void *nhdr;
2520 int protocol;
2521 u8 ip_proto;
2522
2523 if (skb->protocol == htons(ETH_P_8021Q) ||
2524 skb->protocol == htons(ETH_P_8021AD)) {
2525 if (!spec->vlan_okay)
2526 goto need_help;
2527 }
2528
2529 /* We check whether the checksum refers to a transport layer checksum in
2530 * the outermost header or an encapsulated transport layer checksum that
2531 * corresponds to the inner headers of the skb. If the checksum is for
2532 * something else in the packet we need help.
2533 */
2534 if (skb_checksum_start_offset(skb) == skb_transport_offset(skb)) {
2535 /* Non-encapsulated checksum */
2536 protocol = eproto_to_ipproto(vlan_get_protocol(skb));
2537 nhdr = skb_network_header(skb);
2538 *csum_encapped = false;
2539 if (spec->no_not_encapped)
2540 goto need_help;
2541 } else if (skb->encapsulation && spec->encap_okay &&
2542 skb_checksum_start_offset(skb) ==
2543 skb_inner_transport_offset(skb)) {
2544 /* Encapsulated checksum */
2545 *csum_encapped = true;
2546 switch (skb->inner_protocol_type) {
2547 case ENCAP_TYPE_ETHER:
2548 protocol = eproto_to_ipproto(skb->inner_protocol);
2549 break;
2550 case ENCAP_TYPE_IPPROTO:
2551 protocol = skb->inner_protocol;
2552 break;
2553 }
2554 nhdr = skb_inner_network_header(skb);
2555 } else {
2556 goto need_help;
2557 }
2558
2559 switch (protocol) {
2560 case IPPROTO_IP:
2561 if (!spec->ipv4_okay)
2562 goto need_help;
2563 iph = nhdr;
2564 ip_proto = iph->protocol;
2565 if (iph->ihl != 5 && !spec->ip_options_okay)
2566 goto need_help;
2567 break;
2568 case IPPROTO_IPV6:
2569 if (!spec->ipv6_okay)
2570 goto need_help;
2571 if (spec->no_encapped_ipv6 && *csum_encapped)
2572 goto need_help;
2573 ipv6 = nhdr;
2574 nhdr += sizeof(*ipv6);
2575 ip_proto = ipv6->nexthdr;
2576 break;
2577 default:
2578 goto need_help;
2579 }
2580
2581ip_proto_again:
2582 switch (ip_proto) {
2583 case IPPROTO_TCP:
2584 if (!spec->tcp_okay ||
2585 skb->csum_offset != offsetof(struct tcphdr, check))
2586 goto need_help;
2587 break;
2588 case IPPROTO_UDP:
2589 if (!spec->udp_okay ||
2590 skb->csum_offset != offsetof(struct udphdr, check))
2591 goto need_help;
2592 break;
2593 case IPPROTO_SCTP:
2594 if (!spec->sctp_okay ||
2595 skb->csum_offset != offsetof(struct sctphdr, checksum))
2596 goto cant_help;
2597 break;
2598 case NEXTHDR_HOP:
2599 case NEXTHDR_ROUTING:
2600 case NEXTHDR_DEST: {
2601 u8 *opthdr = nhdr;
2602
2603 if (protocol != IPPROTO_IPV6 || !spec->ext_hdrs_okay)
2604 goto need_help;
2605
2606 ip_proto = opthdr[0];
2607 nhdr += (opthdr[1] + 1) << 3;
2608
2609 goto ip_proto_again;
2610 }
2611 default:
2612 goto need_help;
2613 }
2614
2615 /* Passed the tests for offloading checksum */
2616 return true;
2617
2618need_help:
2619 if (csum_help && !skb_shinfo(skb)->gso_size)
2620 skb_checksum_help(skb);
2621cant_help:
2622 return false;
2623}
2624EXPORT_SYMBOL(__skb_csum_offload_chk);
2625
53d6471c 2626__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
f6a78bfc 2627{
252e3346 2628 __be16 type = skb->protocol;
f6a78bfc 2629
19acc327
PS
2630 /* Tunnel gso handlers can set protocol to ethernet. */
2631 if (type == htons(ETH_P_TEB)) {
2632 struct ethhdr *eth;
2633
2634 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2635 return 0;
2636
2637 eth = (struct ethhdr *)skb_mac_header(skb);
2638 type = eth->h_proto;
2639 }
2640
d4bcef3f 2641 return __vlan_get_protocol(skb, type, depth);
ec5f0615
PS
2642}
2643
2644/**
2645 * skb_mac_gso_segment - mac layer segmentation handler.
2646 * @skb: buffer to segment
2647 * @features: features for the output path (see dev->features)
2648 */
2649struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2650 netdev_features_t features)
2651{
2652 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2653 struct packet_offload *ptype;
53d6471c
VY
2654 int vlan_depth = skb->mac_len;
2655 __be16 type = skb_network_protocol(skb, &vlan_depth);
ec5f0615
PS
2656
2657 if (unlikely(!type))
2658 return ERR_PTR(-EINVAL);
2659
53d6471c 2660 __skb_pull(skb, vlan_depth);
f6a78bfc
HX
2661
2662 rcu_read_lock();
22061d80 2663 list_for_each_entry_rcu(ptype, &offload_base, list) {
f191a1d1 2664 if (ptype->type == type && ptype->callbacks.gso_segment) {
f191a1d1 2665 segs = ptype->callbacks.gso_segment(skb, features);
f6a78bfc
HX
2666 break;
2667 }
2668 }
2669 rcu_read_unlock();
2670
98e399f8 2671 __skb_push(skb, skb->data - skb_mac_header(skb));
576a30eb 2672
f6a78bfc
HX
2673 return segs;
2674}
05e8ef4a
PS
2675EXPORT_SYMBOL(skb_mac_gso_segment);
2676
2677
2678/* openvswitch calls this on rx path, so we need a different check.
2679 */
2680static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2681{
2682 if (tx_path)
2683 return skb->ip_summed != CHECKSUM_PARTIAL;
2684 else
2685 return skb->ip_summed == CHECKSUM_NONE;
2686}
2687
2688/**
2689 * __skb_gso_segment - Perform segmentation on skb.
2690 * @skb: buffer to segment
2691 * @features: features for the output path (see dev->features)
2692 * @tx_path: whether it is called in TX path
2693 *
2694 * This function segments the given skb and returns a list of segments.
2695 *
2696 * It may return NULL if the skb requires no segmentation. This is
2697 * only possible when GSO is used for verifying header integrity.
9207f9d4
KK
2698 *
2699 * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
05e8ef4a
PS
2700 */
2701struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2702 netdev_features_t features, bool tx_path)
2703{
2704 if (unlikely(skb_needs_check(skb, tx_path))) {
2705 int err;
2706
2707 skb_warn_bad_offload(skb);
2708
a40e0a66 2709 err = skb_cow_head(skb, 0);
2710 if (err < 0)
05e8ef4a
PS
2711 return ERR_PTR(err);
2712 }
2713
9207f9d4
KK
2714 BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
2715 sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
2716
68c33163 2717 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
3347c960
ED
2718 SKB_GSO_CB(skb)->encap_level = 0;
2719
05e8ef4a
PS
2720 skb_reset_mac_header(skb);
2721 skb_reset_mac_len(skb);
2722
2723 return skb_mac_gso_segment(skb, features);
2724}
12b0004d 2725EXPORT_SYMBOL(__skb_gso_segment);
f6a78bfc 2726
fb286bb2
HX
2727/* Take action when hardware reception checksum errors are detected. */
2728#ifdef CONFIG_BUG
2729void netdev_rx_csum_fault(struct net_device *dev)
2730{
2731 if (net_ratelimit()) {
7b6cd1ce 2732 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
fb286bb2
HX
2733 dump_stack();
2734 }
2735}
2736EXPORT_SYMBOL(netdev_rx_csum_fault);
2737#endif
2738
1da177e4
LT
2739/* Actually, we should eliminate this check as soon as we know, that:
2740 * 1. IOMMU is present and allows to map all the memory.
2741 * 2. No high memory really exists on this machine.
2742 */
2743
c1e756bf 2744static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1da177e4 2745{
3d3a8533 2746#ifdef CONFIG_HIGHMEM
1da177e4 2747 int i;
5acbbd42 2748 if (!(dev->features & NETIF_F_HIGHDMA)) {
ea2ab693
IC
2749 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2750 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2751 if (PageHighMem(skb_frag_page(frag)))
5acbbd42 2752 return 1;
ea2ab693 2753 }
5acbbd42 2754 }
1da177e4 2755
5acbbd42
FT
2756 if (PCI_DMA_BUS_IS_PHYS) {
2757 struct device *pdev = dev->dev.parent;
1da177e4 2758
9092c658
ED
2759 if (!pdev)
2760 return 0;
5acbbd42 2761 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
ea2ab693
IC
2762 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2763 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
5acbbd42
FT
2764 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2765 return 1;
2766 }
2767 }
3d3a8533 2768#endif
1da177e4
LT
2769 return 0;
2770}
1da177e4 2771
3b392ddb
SH
2772/* If MPLS offload request, verify we are testing hardware MPLS features
2773 * instead of standard features for the netdev.
2774 */
d0edc7bf 2775#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
3b392ddb
SH
2776static netdev_features_t net_mpls_features(struct sk_buff *skb,
2777 netdev_features_t features,
2778 __be16 type)
2779{
25cd9ba0 2780 if (eth_p_mpls(type))
3b392ddb
SH
2781 features &= skb->dev->mpls_features;
2782
2783 return features;
2784}
2785#else
2786static netdev_features_t net_mpls_features(struct sk_buff *skb,
2787 netdev_features_t features,
2788 __be16 type)
2789{
2790 return features;
2791}
2792#endif
2793
c8f44aff 2794static netdev_features_t harmonize_features(struct sk_buff *skb,
c1e756bf 2795 netdev_features_t features)
f01a5236 2796{
53d6471c 2797 int tmp;
3b392ddb
SH
2798 __be16 type;
2799
2800 type = skb_network_protocol(skb, &tmp);
2801 features = net_mpls_features(skb, features, type);
53d6471c 2802
c0d680e5 2803 if (skb->ip_summed != CHECKSUM_NONE &&
3b392ddb 2804 !can_checksum_protocol(features, type)) {
a188222b 2805 features &= ~NETIF_F_CSUM_MASK;
c1e756bf 2806 } else if (illegal_highdma(skb->dev, skb)) {
f01a5236
JG
2807 features &= ~NETIF_F_SG;
2808 }
2809
2810 return features;
2811}
2812
e38f3025
TM
2813netdev_features_t passthru_features_check(struct sk_buff *skb,
2814 struct net_device *dev,
2815 netdev_features_t features)
2816{
2817 return features;
2818}
2819EXPORT_SYMBOL(passthru_features_check);
2820
8cb65d00
TM
2821static netdev_features_t dflt_features_check(const struct sk_buff *skb,
2822 struct net_device *dev,
2823 netdev_features_t features)
2824{
2825 return vlan_features_check(skb, features);
2826}
2827
c1e756bf 2828netdev_features_t netif_skb_features(struct sk_buff *skb)
58e998c6 2829{
5f35227e 2830 struct net_device *dev = skb->dev;
fcbeb976
ED
2831 netdev_features_t features = dev->features;
2832 u16 gso_segs = skb_shinfo(skb)->gso_segs;
58e998c6 2833
fcbeb976 2834 if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
30b678d8
BH
2835 features &= ~NETIF_F_GSO_MASK;
2836
5f35227e
JG
2837 /* If encapsulation offload request, verify we are testing
2838 * hardware encapsulation features instead of standard
2839 * features for the netdev
2840 */
2841 if (skb->encapsulation)
2842 features &= dev->hw_enc_features;
2843
f5a7fb88
TM
2844 if (skb_vlan_tagged(skb))
2845 features = netdev_intersect_features(features,
2846 dev->vlan_features |
2847 NETIF_F_HW_VLAN_CTAG_TX |
2848 NETIF_F_HW_VLAN_STAG_TX);
f01a5236 2849
5f35227e
JG
2850 if (dev->netdev_ops->ndo_features_check)
2851 features &= dev->netdev_ops->ndo_features_check(skb, dev,
2852 features);
8cb65d00
TM
2853 else
2854 features &= dflt_features_check(skb, dev, features);
5f35227e 2855
c1e756bf 2856 return harmonize_features(skb, features);
58e998c6 2857}
c1e756bf 2858EXPORT_SYMBOL(netif_skb_features);
58e998c6 2859
2ea25513 2860static int xmit_one(struct sk_buff *skb, struct net_device *dev,
95f6b3dd 2861 struct netdev_queue *txq, bool more)
f6a78bfc 2862{
2ea25513
DM
2863 unsigned int len;
2864 int rc;
00829823 2865
7866a621 2866 if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
2ea25513 2867 dev_queue_xmit_nit(skb, dev);
fc741216 2868
2ea25513
DM
2869 len = skb->len;
2870 trace_net_dev_start_xmit(skb, dev);
95f6b3dd 2871 rc = netdev_start_xmit(skb, dev, txq, more);
2ea25513 2872 trace_net_dev_xmit(skb, rc, dev, len);
adf30907 2873
2ea25513
DM
2874 return rc;
2875}
7b9c6090 2876
8dcda22a
DM
2877struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
2878 struct netdev_queue *txq, int *ret)
7f2e870f
DM
2879{
2880 struct sk_buff *skb = first;
2881 int rc = NETDEV_TX_OK;
7b9c6090 2882
7f2e870f
DM
2883 while (skb) {
2884 struct sk_buff *next = skb->next;
fc70fb64 2885
7f2e870f 2886 skb->next = NULL;
95f6b3dd 2887 rc = xmit_one(skb, dev, txq, next != NULL);
7f2e870f
DM
2888 if (unlikely(!dev_xmit_complete(rc))) {
2889 skb->next = next;
2890 goto out;
2891 }
6afff0ca 2892
7f2e870f
DM
2893 skb = next;
2894 if (netif_xmit_stopped(txq) && skb) {
2895 rc = NETDEV_TX_BUSY;
2896 break;
9ccb8975 2897 }
7f2e870f 2898 }
9ccb8975 2899
7f2e870f
DM
2900out:
2901 *ret = rc;
2902 return skb;
2903}
b40863c6 2904
1ff0dc94
ED
2905static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
2906 netdev_features_t features)
f6a78bfc 2907{
df8a39de 2908 if (skb_vlan_tag_present(skb) &&
5968250c
JP
2909 !vlan_hw_offload_capable(features, skb->vlan_proto))
2910 skb = __vlan_hwaccel_push_inside(skb);
eae3f88e
DM
2911 return skb;
2912}
f6a78bfc 2913
55a93b3e 2914static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
eae3f88e
DM
2915{
2916 netdev_features_t features;
f6a78bfc 2917
eae3f88e
DM
2918 if (skb->next)
2919 return skb;
068a2de5 2920
eae3f88e
DM
2921 features = netif_skb_features(skb);
2922 skb = validate_xmit_vlan(skb, features);
2923 if (unlikely(!skb))
2924 goto out_null;
7b9c6090 2925
8b86a61d 2926 if (netif_needs_gso(skb, features)) {
ce93718f
DM
2927 struct sk_buff *segs;
2928
2929 segs = skb_gso_segment(skb, features);
cecda693 2930 if (IS_ERR(segs)) {
af6dabc9 2931 goto out_kfree_skb;
cecda693
JW
2932 } else if (segs) {
2933 consume_skb(skb);
2934 skb = segs;
f6a78bfc 2935 }
eae3f88e
DM
2936 } else {
2937 if (skb_needs_linearize(skb, features) &&
2938 __skb_linearize(skb))
2939 goto out_kfree_skb;
4ec93edb 2940
eae3f88e
DM
2941 /* If packet is not checksummed and device does not
2942 * support checksumming for this protocol, complete
2943 * checksumming here.
2944 */
2945 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2946 if (skb->encapsulation)
2947 skb_set_inner_transport_header(skb,
2948 skb_checksum_start_offset(skb));
2949 else
2950 skb_set_transport_header(skb,
2951 skb_checksum_start_offset(skb));
a188222b 2952 if (!(features & NETIF_F_CSUM_MASK) &&
eae3f88e
DM
2953 skb_checksum_help(skb))
2954 goto out_kfree_skb;
7b9c6090 2955 }
0c772159 2956 }
7b9c6090 2957
eae3f88e 2958 return skb;
fc70fb64 2959
f6a78bfc
HX
2960out_kfree_skb:
2961 kfree_skb(skb);
eae3f88e
DM
2962out_null:
2963 return NULL;
2964}
6afff0ca 2965
55a93b3e
ED
2966struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
2967{
2968 struct sk_buff *next, *head = NULL, *tail;
2969
bec3cfdc 2970 for (; skb != NULL; skb = next) {
55a93b3e
ED
2971 next = skb->next;
2972 skb->next = NULL;
bec3cfdc
ED
2973
2974 /* in case skb wont be segmented, point to itself */
2975 skb->prev = skb;
2976
55a93b3e 2977 skb = validate_xmit_skb(skb, dev);
bec3cfdc
ED
2978 if (!skb)
2979 continue;
55a93b3e 2980
bec3cfdc
ED
2981 if (!head)
2982 head = skb;
2983 else
2984 tail->next = skb;
2985 /* If skb was segmented, skb->prev points to
2986 * the last segment. If not, it still contains skb.
2987 */
2988 tail = skb->prev;
55a93b3e
ED
2989 }
2990 return head;
f6a78bfc
HX
2991}
2992
1def9238
ED
2993static void qdisc_pkt_len_init(struct sk_buff *skb)
2994{
2995 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2996
2997 qdisc_skb_cb(skb)->pkt_len = skb->len;
2998
2999 /* To get more precise estimation of bytes sent on wire,
3000 * we add to pkt_len the headers size of all segments
3001 */
3002 if (shinfo->gso_size) {
757b8b1d 3003 unsigned int hdr_len;
15e5a030 3004 u16 gso_segs = shinfo->gso_segs;
1def9238 3005
757b8b1d
ED
3006 /* mac layer + network layer */
3007 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3008
3009 /* + transport layer */
1def9238
ED
3010 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
3011 hdr_len += tcp_hdrlen(skb);
3012 else
3013 hdr_len += sizeof(struct udphdr);
15e5a030
JW
3014
3015 if (shinfo->gso_type & SKB_GSO_DODGY)
3016 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3017 shinfo->gso_size);
3018
3019 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
1def9238
ED
3020 }
3021}
3022
bbd8a0d3
KK
3023static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3024 struct net_device *dev,
3025 struct netdev_queue *txq)
3026{
3027 spinlock_t *root_lock = qdisc_lock(q);
a2da570d 3028 bool contended;
bbd8a0d3
KK
3029 int rc;
3030
a2da570d 3031 qdisc_calculate_pkt_len(skb, q);
79640a4c
ED
3032 /*
3033 * Heuristic to force contended enqueues to serialize on a
3034 * separate lock before trying to get qdisc main lock.
9bf2b8c2
YX
3035 * This permits __QDISC___STATE_RUNNING owner to get the lock more
3036 * often and dequeue packets faster.
79640a4c 3037 */
a2da570d 3038 contended = qdisc_is_running(q);
79640a4c
ED
3039 if (unlikely(contended))
3040 spin_lock(&q->busylock);
3041
bbd8a0d3
KK
3042 spin_lock(root_lock);
3043 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3044 kfree_skb(skb);
3045 rc = NET_XMIT_DROP;
3046 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
bc135b23 3047 qdisc_run_begin(q)) {
bbd8a0d3
KK
3048 /*
3049 * This is a work-conserving queue; there are no old skbs
3050 * waiting to be sent out; and the qdisc is not running -
3051 * xmit the skb directly.
3052 */
bfe0d029 3053
bfe0d029
ED
3054 qdisc_bstats_update(q, skb);
3055
55a93b3e 3056 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
79640a4c
ED
3057 if (unlikely(contended)) {
3058 spin_unlock(&q->busylock);
3059 contended = false;
3060 }
bbd8a0d3 3061 __qdisc_run(q);
79640a4c 3062 } else
bc135b23 3063 qdisc_run_end(q);
bbd8a0d3
KK
3064
3065 rc = NET_XMIT_SUCCESS;
3066 } else {
a2da570d 3067 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
79640a4c
ED
3068 if (qdisc_run_begin(q)) {
3069 if (unlikely(contended)) {
3070 spin_unlock(&q->busylock);
3071 contended = false;
3072 }
3073 __qdisc_run(q);
3074 }
bbd8a0d3
KK
3075 }
3076 spin_unlock(root_lock);
79640a4c
ED
3077 if (unlikely(contended))
3078 spin_unlock(&q->busylock);
bbd8a0d3
KK
3079 return rc;
3080}
3081
86f8515f 3082#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
5bc1421e
NH
3083static void skb_update_prio(struct sk_buff *skb)
3084{
6977a79d 3085 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
5bc1421e 3086
91c68ce2 3087 if (!skb->priority && skb->sk && map) {
2a56a1fe
TH
3088 unsigned int prioidx =
3089 sock_cgroup_prioidx(&skb->sk->sk_cgrp_data);
91c68ce2
ED
3090
3091 if (prioidx < map->priomap_len)
3092 skb->priority = map->priomap[prioidx];
3093 }
5bc1421e
NH
3094}
3095#else
3096#define skb_update_prio(skb)
3097#endif
3098
f60e5990 3099DEFINE_PER_CPU(int, xmit_recursion);
3100EXPORT_SYMBOL(xmit_recursion);
3101
11a766ce 3102#define RECURSION_LIMIT 10
745e20f1 3103
95603e22
MM
3104/**
3105 * dev_loopback_xmit - loop back @skb
0c4b51f0
EB
3106 * @net: network namespace this loopback is happening in
3107 * @sk: sk needed to be a netfilter okfn
95603e22
MM
3108 * @skb: buffer to transmit
3109 */
0c4b51f0 3110int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
95603e22
MM
3111{
3112 skb_reset_mac_header(skb);
3113 __skb_pull(skb, skb_network_offset(skb));
3114 skb->pkt_type = PACKET_LOOPBACK;
3115 skb->ip_summed = CHECKSUM_UNNECESSARY;
3116 WARN_ON(!skb_dst(skb));
3117 skb_dst_force(skb);
3118 netif_rx_ni(skb);
3119 return 0;
3120}
3121EXPORT_SYMBOL(dev_loopback_xmit);
3122
1f211a1b
DB
3123#ifdef CONFIG_NET_EGRESS
3124static struct sk_buff *
3125sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
3126{
3127 struct tcf_proto *cl = rcu_dereference_bh(dev->egress_cl_list);
3128 struct tcf_result cl_res;
3129
3130 if (!cl)
3131 return skb;
3132
3133 /* skb->tc_verd and qdisc_skb_cb(skb)->pkt_len were already set
3134 * earlier by the caller.
3135 */
3136 qdisc_bstats_cpu_update(cl->q, skb);
3137
3138 switch (tc_classify(skb, cl, &cl_res, false)) {
3139 case TC_ACT_OK:
3140 case TC_ACT_RECLASSIFY:
3141 skb->tc_index = TC_H_MIN(cl_res.classid);
3142 break;
3143 case TC_ACT_SHOT:
3144 qdisc_qstats_cpu_drop(cl->q);
3145 *ret = NET_XMIT_DROP;
3146 goto drop;
3147 case TC_ACT_STOLEN:
3148 case TC_ACT_QUEUED:
3149 *ret = NET_XMIT_SUCCESS;
3150drop:
3151 kfree_skb(skb);
3152 return NULL;
3153 case TC_ACT_REDIRECT:
3154 /* No need to push/pop skb's mac_header here on egress! */
3155 skb_do_redirect(skb);
3156 *ret = NET_XMIT_SUCCESS;
3157 return NULL;
3158 default:
3159 break;
3160 }
3161
3162 return skb;
3163}
3164#endif /* CONFIG_NET_EGRESS */
3165
638b2a69
JP
3166static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
3167{
3168#ifdef CONFIG_XPS
3169 struct xps_dev_maps *dev_maps;
3170 struct xps_map *map;
3171 int queue_index = -1;
3172
3173 rcu_read_lock();
3174 dev_maps = rcu_dereference(dev->xps_maps);
3175 if (dev_maps) {
3176 map = rcu_dereference(
3177 dev_maps->cpu_map[skb->sender_cpu - 1]);
3178 if (map) {
3179 if (map->len == 1)
3180 queue_index = map->queues[0];
3181 else
3182 queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
3183 map->len)];
3184 if (unlikely(queue_index >= dev->real_num_tx_queues))
3185 queue_index = -1;
3186 }
3187 }
3188 rcu_read_unlock();
3189
3190 return queue_index;
3191#else
3192 return -1;
3193#endif
3194}
3195
3196static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
3197{
3198 struct sock *sk = skb->sk;
3199 int queue_index = sk_tx_queue_get(sk);
3200
3201 if (queue_index < 0 || skb->ooo_okay ||
3202 queue_index >= dev->real_num_tx_queues) {
3203 int new_index = get_xps_queue(dev, skb);
3204 if (new_index < 0)
3205 new_index = skb_tx_hash(dev, skb);
3206
3207 if (queue_index != new_index && sk &&
004a5d01 3208 sk_fullsock(sk) &&
638b2a69
JP
3209 rcu_access_pointer(sk->sk_dst_cache))
3210 sk_tx_queue_set(sk, new_index);
3211
3212 queue_index = new_index;
3213 }
3214
3215 return queue_index;
3216}
3217
3218struct netdev_queue *netdev_pick_tx(struct net_device *dev,
3219 struct sk_buff *skb,
3220 void *accel_priv)
3221{
3222 int queue_index = 0;
3223
3224#ifdef CONFIG_XPS
52bd2d62
ED
3225 u32 sender_cpu = skb->sender_cpu - 1;
3226
3227 if (sender_cpu >= (u32)NR_CPUS)
638b2a69
JP
3228 skb->sender_cpu = raw_smp_processor_id() + 1;
3229#endif
3230
3231 if (dev->real_num_tx_queues != 1) {
3232 const struct net_device_ops *ops = dev->netdev_ops;
3233 if (ops->ndo_select_queue)
3234 queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
3235 __netdev_pick_tx);
3236 else
3237 queue_index = __netdev_pick_tx(dev, skb);
3238
3239 if (!accel_priv)
3240 queue_index = netdev_cap_txqueue(dev, queue_index);
3241 }
3242
3243 skb_set_queue_mapping(skb, queue_index);
3244 return netdev_get_tx_queue(dev, queue_index);
3245}
3246
d29f749e 3247/**
9d08dd3d 3248 * __dev_queue_xmit - transmit a buffer
d29f749e 3249 * @skb: buffer to transmit
9d08dd3d 3250 * @accel_priv: private data used for L2 forwarding offload
d29f749e
DJ
3251 *
3252 * Queue a buffer for transmission to a network device. The caller must
3253 * have set the device and priority and built the buffer before calling
3254 * this function. The function can be called from an interrupt.
3255 *
3256 * A negative errno code is returned on a failure. A success does not
3257 * guarantee the frame will be transmitted as it may be dropped due
3258 * to congestion or traffic shaping.
3259 *
3260 * -----------------------------------------------------------------------------------
3261 * I notice this method can also return errors from the queue disciplines,
3262 * including NET_XMIT_DROP, which is a positive value. So, errors can also
3263 * be positive.
3264 *
3265 * Regardless of the return value, the skb is consumed, so it is currently
3266 * difficult to retry a send to this method. (You can bump the ref count
3267 * before sending to hold a reference for retry if you are careful.)
3268 *
3269 * When calling this method, interrupts MUST be enabled. This is because
3270 * the BH enable code must have IRQs enabled so that it will not deadlock.
3271 * --BLG
3272 */
0a59f3a9 3273static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
1da177e4
LT
3274{
3275 struct net_device *dev = skb->dev;
dc2b4847 3276 struct netdev_queue *txq;
1da177e4
LT
3277 struct Qdisc *q;
3278 int rc = -ENOMEM;
3279
6d1ccff6
ED
3280 skb_reset_mac_header(skb);
3281
e7fd2885
WB
3282 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
3283 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
3284
4ec93edb
YH
3285 /* Disable soft irqs for various locks below. Also
3286 * stops preemption for RCU.
1da177e4 3287 */
4ec93edb 3288 rcu_read_lock_bh();
1da177e4 3289
5bc1421e
NH
3290 skb_update_prio(skb);
3291
1f211a1b
DB
3292 qdisc_pkt_len_init(skb);
3293#ifdef CONFIG_NET_CLS_ACT
3294 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
3295# ifdef CONFIG_NET_EGRESS
3296 if (static_key_false(&egress_needed)) {
3297 skb = sch_handle_egress(skb, &rc, dev);
3298 if (!skb)
3299 goto out;
3300 }
3301# endif
3302#endif
02875878
ED
3303 /* If device/qdisc don't need skb->dst, release it right now while
3304 * its hot in this cpu cache.
3305 */
3306 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
3307 skb_dst_drop(skb);
3308 else
3309 skb_dst_force(skb);
3310
0c4f691f
SF
3311#ifdef CONFIG_NET_SWITCHDEV
3312 /* Don't forward if offload device already forwarded */
3313 if (skb->offload_fwd_mark &&
3314 skb->offload_fwd_mark == dev->offload_fwd_mark) {
3315 consume_skb(skb);
3316 rc = NET_XMIT_SUCCESS;
3317 goto out;
3318 }
3319#endif
3320
f663dd9a 3321 txq = netdev_pick_tx(dev, skb, accel_priv);
a898def2 3322 q = rcu_dereference_bh(txq->qdisc);
37437bb2 3323
cf66ba58 3324 trace_net_dev_queue(skb);
1da177e4 3325 if (q->enqueue) {
bbd8a0d3 3326 rc = __dev_xmit_skb(skb, q, dev, txq);
37437bb2 3327 goto out;
1da177e4
LT
3328 }
3329
3330 /* The device has no queue. Common case for software devices:
3331 loopback, all the sorts of tunnels...
3332
932ff279
HX
3333 Really, it is unlikely that netif_tx_lock protection is necessary
3334 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1da177e4
LT
3335 counters.)
3336 However, it is possible, that they rely on protection
3337 made by us here.
3338
3339 Check this and shot the lock. It is not prone from deadlocks.
3340 Either shot noqueue qdisc, it is even simpler 8)
3341 */
3342 if (dev->flags & IFF_UP) {
3343 int cpu = smp_processor_id(); /* ok because BHs are off */
3344
c773e847 3345 if (txq->xmit_lock_owner != cpu) {
1da177e4 3346
745e20f1
ED
3347 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
3348 goto recursion_alert;
3349
1f59533f
JDB
3350 skb = validate_xmit_skb(skb, dev);
3351 if (!skb)
3352 goto drop;
3353
c773e847 3354 HARD_TX_LOCK(dev, txq, cpu);
1da177e4 3355
73466498 3356 if (!netif_xmit_stopped(txq)) {
745e20f1 3357 __this_cpu_inc(xmit_recursion);
ce93718f 3358 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
745e20f1 3359 __this_cpu_dec(xmit_recursion);
572a9d7b 3360 if (dev_xmit_complete(rc)) {
c773e847 3361 HARD_TX_UNLOCK(dev, txq);
1da177e4
LT
3362 goto out;
3363 }
3364 }
c773e847 3365 HARD_TX_UNLOCK(dev, txq);
e87cc472
JP
3366 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3367 dev->name);
1da177e4
LT
3368 } else {
3369 /* Recursion is detected! It is possible,
745e20f1
ED
3370 * unfortunately
3371 */
3372recursion_alert:
e87cc472
JP
3373 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3374 dev->name);
1da177e4
LT
3375 }
3376 }
3377
3378 rc = -ENETDOWN;
1f59533f 3379drop:
d4828d85 3380 rcu_read_unlock_bh();
1da177e4 3381
015f0688 3382 atomic_long_inc(&dev->tx_dropped);
1f59533f 3383 kfree_skb_list(skb);
1da177e4
LT
3384 return rc;
3385out:
d4828d85 3386 rcu_read_unlock_bh();
1da177e4
LT
3387 return rc;
3388}
f663dd9a 3389
2b4aa3ce 3390int dev_queue_xmit(struct sk_buff *skb)
f663dd9a
JW
3391{
3392 return __dev_queue_xmit(skb, NULL);
3393}
2b4aa3ce 3394EXPORT_SYMBOL(dev_queue_xmit);
1da177e4 3395
f663dd9a
JW
3396int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
3397{
3398 return __dev_queue_xmit(skb, accel_priv);
3399}
3400EXPORT_SYMBOL(dev_queue_xmit_accel);
3401
1da177e4
LT
3402
3403/*=======================================================================
3404 Receiver routines
3405 =======================================================================*/
3406
6b2bedc3 3407int netdev_max_backlog __read_mostly = 1000;
c9e6bc64
ED
3408EXPORT_SYMBOL(netdev_max_backlog);
3409
3b098e2d 3410int netdev_tstamp_prequeue __read_mostly = 1;
6b2bedc3
SH
3411int netdev_budget __read_mostly = 300;
3412int weight_p __read_mostly = 64; /* old backlog weight */
1da177e4 3413
eecfd7c4
ED
3414/* Called with irq disabled */
3415static inline void ____napi_schedule(struct softnet_data *sd,
3416 struct napi_struct *napi)
3417{
3418 list_add_tail(&napi->poll_list, &sd->poll_list);
3419 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3420}
3421
bfb564e7
KK
3422#ifdef CONFIG_RPS
3423
3424/* One global table that all flow-based protocols share. */
6e3f7faf 3425struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
bfb564e7 3426EXPORT_SYMBOL(rps_sock_flow_table);
567e4b79
ED
3427u32 rps_cpu_mask __read_mostly;
3428EXPORT_SYMBOL(rps_cpu_mask);
bfb564e7 3429
c5905afb 3430struct static_key rps_needed __read_mostly;
adc9300e 3431
c445477d
BH
3432static struct rps_dev_flow *
3433set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3434 struct rps_dev_flow *rflow, u16 next_cpu)
3435{
a31196b0 3436 if (next_cpu < nr_cpu_ids) {
c445477d
BH
3437#ifdef CONFIG_RFS_ACCEL
3438 struct netdev_rx_queue *rxqueue;
3439 struct rps_dev_flow_table *flow_table;
3440 struct rps_dev_flow *old_rflow;
3441 u32 flow_id;
3442 u16 rxq_index;
3443 int rc;
3444
3445 /* Should we steer this flow to a different hardware queue? */
69a19ee6
BH
3446 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3447 !(dev->features & NETIF_F_NTUPLE))
c445477d
BH
3448 goto out;
3449 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3450 if (rxq_index == skb_get_rx_queue(skb))
3451 goto out;
3452
3453 rxqueue = dev->_rx + rxq_index;
3454 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3455 if (!flow_table)
3456 goto out;
61b905da 3457 flow_id = skb_get_hash(skb) & flow_table->mask;
c445477d
BH
3458 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3459 rxq_index, flow_id);
3460 if (rc < 0)
3461 goto out;
3462 old_rflow = rflow;
3463 rflow = &flow_table->flows[flow_id];
c445477d
BH
3464 rflow->filter = rc;
3465 if (old_rflow->filter == rflow->filter)
3466 old_rflow->filter = RPS_NO_FILTER;
3467 out:
3468#endif
3469 rflow->last_qtail =
09994d1b 3470 per_cpu(softnet_data, next_cpu).input_queue_head;
c445477d
BH
3471 }
3472
09994d1b 3473 rflow->cpu = next_cpu;
c445477d
BH
3474 return rflow;
3475}
3476
bfb564e7
KK
3477/*
3478 * get_rps_cpu is called from netif_receive_skb and returns the target
3479 * CPU from the RPS map of the receiving queue for a given skb.
3480 * rcu_read_lock must be held on entry.
3481 */
3482static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3483 struct rps_dev_flow **rflowp)
3484{
567e4b79
ED
3485 const struct rps_sock_flow_table *sock_flow_table;
3486 struct netdev_rx_queue *rxqueue = dev->_rx;
bfb564e7 3487 struct rps_dev_flow_table *flow_table;
567e4b79 3488 struct rps_map *map;
bfb564e7 3489 int cpu = -1;
567e4b79 3490 u32 tcpu;
61b905da 3491 u32 hash;
bfb564e7
KK
3492
3493 if (skb_rx_queue_recorded(skb)) {
3494 u16 index = skb_get_rx_queue(skb);
567e4b79 3495
62fe0b40
BH
3496 if (unlikely(index >= dev->real_num_rx_queues)) {
3497 WARN_ONCE(dev->real_num_rx_queues > 1,
3498 "%s received packet on queue %u, but number "
3499 "of RX queues is %u\n",
3500 dev->name, index, dev->real_num_rx_queues);
bfb564e7
KK
3501 goto done;
3502 }
567e4b79
ED
3503 rxqueue += index;
3504 }
bfb564e7 3505
567e4b79
ED
3506 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
3507
3508 flow_table = rcu_dereference(rxqueue->rps_flow_table);
6e3f7faf 3509 map = rcu_dereference(rxqueue->rps_map);
567e4b79 3510 if (!flow_table && !map)
bfb564e7
KK
3511 goto done;
3512
2d47b459 3513 skb_reset_network_header(skb);
61b905da
TH
3514 hash = skb_get_hash(skb);
3515 if (!hash)
bfb564e7
KK
3516 goto done;
3517
fec5e652
TH
3518 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3519 if (flow_table && sock_flow_table) {
fec5e652 3520 struct rps_dev_flow *rflow;
567e4b79
ED
3521 u32 next_cpu;
3522 u32 ident;
3523
3524 /* First check into global flow table if there is a match */
3525 ident = sock_flow_table->ents[hash & sock_flow_table->mask];
3526 if ((ident ^ hash) & ~rps_cpu_mask)
3527 goto try_rps;
fec5e652 3528
567e4b79
ED
3529 next_cpu = ident & rps_cpu_mask;
3530
3531 /* OK, now we know there is a match,
3532 * we can look at the local (per receive queue) flow table
3533 */
61b905da 3534 rflow = &flow_table->flows[hash & flow_table->mask];
fec5e652
TH
3535 tcpu = rflow->cpu;
3536
fec5e652
TH
3537 /*
3538 * If the desired CPU (where last recvmsg was done) is
3539 * different from current CPU (one in the rx-queue flow
3540 * table entry), switch if one of the following holds:
a31196b0 3541 * - Current CPU is unset (>= nr_cpu_ids).
fec5e652
TH
3542 * - Current CPU is offline.
3543 * - The current CPU's queue tail has advanced beyond the
3544 * last packet that was enqueued using this table entry.
3545 * This guarantees that all previous packets for the flow
3546 * have been dequeued, thus preserving in order delivery.
3547 */
3548 if (unlikely(tcpu != next_cpu) &&
a31196b0 3549 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
fec5e652 3550 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
baefa31d
TH
3551 rflow->last_qtail)) >= 0)) {
3552 tcpu = next_cpu;
c445477d 3553 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
baefa31d 3554 }
c445477d 3555
a31196b0 3556 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
fec5e652
TH
3557 *rflowp = rflow;
3558 cpu = tcpu;
3559 goto done;
3560 }
3561 }
3562
567e4b79
ED
3563try_rps:
3564
0a9627f2 3565 if (map) {
8fc54f68 3566 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
0a9627f2
TH
3567 if (cpu_online(tcpu)) {
3568 cpu = tcpu;
3569 goto done;
3570 }
3571 }
3572
3573done:
0a9627f2
TH
3574 return cpu;
3575}
3576
c445477d
BH
3577#ifdef CONFIG_RFS_ACCEL
3578
3579/**
3580 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3581 * @dev: Device on which the filter was set
3582 * @rxq_index: RX queue index
3583 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3584 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3585 *
3586 * Drivers that implement ndo_rx_flow_steer() should periodically call
3587 * this function for each installed filter and remove the filters for
3588 * which it returns %true.
3589 */
3590bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3591 u32 flow_id, u16 filter_id)
3592{
3593 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3594 struct rps_dev_flow_table *flow_table;
3595 struct rps_dev_flow *rflow;
3596 bool expire = true;
a31196b0 3597 unsigned int cpu;
c445477d
BH
3598
3599 rcu_read_lock();
3600 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3601 if (flow_table && flow_id <= flow_table->mask) {
3602 rflow = &flow_table->flows[flow_id];
3603 cpu = ACCESS_ONCE(rflow->cpu);
a31196b0 3604 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
c445477d
BH
3605 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3606 rflow->last_qtail) <
3607 (int)(10 * flow_table->mask)))
3608 expire = false;
3609 }
3610 rcu_read_unlock();
3611 return expire;
3612}
3613EXPORT_SYMBOL(rps_may_expire_flow);
3614
3615#endif /* CONFIG_RFS_ACCEL */
3616
0a9627f2 3617/* Called from hardirq (IPI) context */
e36fa2f7 3618static void rps_trigger_softirq(void *data)
0a9627f2 3619{
e36fa2f7
ED
3620 struct softnet_data *sd = data;
3621
eecfd7c4 3622 ____napi_schedule(sd, &sd->backlog);
dee42870 3623 sd->received_rps++;
0a9627f2 3624}
e36fa2f7 3625
fec5e652 3626#endif /* CONFIG_RPS */
0a9627f2 3627
e36fa2f7
ED
3628/*
3629 * Check if this softnet_data structure is another cpu one
3630 * If yes, queue it to our IPI list and return 1
3631 * If no, return 0
3632 */
3633static int rps_ipi_queued(struct softnet_data *sd)
3634{
3635#ifdef CONFIG_RPS
903ceff7 3636 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
e36fa2f7
ED
3637
3638 if (sd != mysd) {
3639 sd->rps_ipi_next = mysd->rps_ipi_list;
3640 mysd->rps_ipi_list = sd;
3641
3642 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3643 return 1;
3644 }
3645#endif /* CONFIG_RPS */
3646 return 0;
3647}
3648
99bbc707
WB
3649#ifdef CONFIG_NET_FLOW_LIMIT
3650int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3651#endif
3652
3653static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3654{
3655#ifdef CONFIG_NET_FLOW_LIMIT
3656 struct sd_flow_limit *fl;
3657 struct softnet_data *sd;
3658 unsigned int old_flow, new_flow;
3659
3660 if (qlen < (netdev_max_backlog >> 1))
3661 return false;
3662
903ceff7 3663 sd = this_cpu_ptr(&softnet_data);
99bbc707
WB
3664
3665 rcu_read_lock();
3666 fl = rcu_dereference(sd->flow_limit);
3667 if (fl) {
3958afa1 3668 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
99bbc707
WB
3669 old_flow = fl->history[fl->history_head];
3670 fl->history[fl->history_head] = new_flow;
3671
3672 fl->history_head++;
3673 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3674
3675 if (likely(fl->buckets[old_flow]))
3676 fl->buckets[old_flow]--;
3677
3678 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3679 fl->count++;
3680 rcu_read_unlock();
3681 return true;
3682 }
3683 }
3684 rcu_read_unlock();
3685#endif
3686 return false;
3687}
3688
0a9627f2
TH
3689/*
3690 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3691 * queue (may be a remote CPU queue).
3692 */
fec5e652
TH
3693static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3694 unsigned int *qtail)
0a9627f2 3695{
e36fa2f7 3696 struct softnet_data *sd;
0a9627f2 3697 unsigned long flags;
99bbc707 3698 unsigned int qlen;
0a9627f2 3699
e36fa2f7 3700 sd = &per_cpu(softnet_data, cpu);
0a9627f2
TH
3701
3702 local_irq_save(flags);
0a9627f2 3703
e36fa2f7 3704 rps_lock(sd);
e9e4dd32
JA
3705 if (!netif_running(skb->dev))
3706 goto drop;
99bbc707
WB
3707 qlen = skb_queue_len(&sd->input_pkt_queue);
3708 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
e008f3f0 3709 if (qlen) {
0a9627f2 3710enqueue:
e36fa2f7 3711 __skb_queue_tail(&sd->input_pkt_queue, skb);
76cc8b13 3712 input_queue_tail_incr_save(sd, qtail);
e36fa2f7 3713 rps_unlock(sd);
152102c7 3714 local_irq_restore(flags);
0a9627f2
TH
3715 return NET_RX_SUCCESS;
3716 }
3717
ebda37c2
ED
3718 /* Schedule NAPI for backlog device
3719 * We can use non atomic operation since we own the queue lock
3720 */
3721 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
e36fa2f7 3722 if (!rps_ipi_queued(sd))
eecfd7c4 3723 ____napi_schedule(sd, &sd->backlog);
0a9627f2
TH
3724 }
3725 goto enqueue;
3726 }
3727
e9e4dd32 3728drop:
dee42870 3729 sd->dropped++;
e36fa2f7 3730 rps_unlock(sd);
0a9627f2 3731
0a9627f2
TH
3732 local_irq_restore(flags);
3733
caf586e5 3734 atomic_long_inc(&skb->dev->rx_dropped);
0a9627f2
TH
3735 kfree_skb(skb);
3736 return NET_RX_DROP;
3737}
1da177e4 3738
ae78dbfa 3739static int netif_rx_internal(struct sk_buff *skb)
1da177e4 3740{
b0e28f1e 3741 int ret;
1da177e4 3742
588f0330 3743 net_timestamp_check(netdev_tstamp_prequeue, skb);
1da177e4 3744
cf66ba58 3745 trace_netif_rx(skb);
df334545 3746#ifdef CONFIG_RPS
c5905afb 3747 if (static_key_false(&rps_needed)) {
fec5e652 3748 struct rps_dev_flow voidflow, *rflow = &voidflow;
b0e28f1e
ED
3749 int cpu;
3750
cece1945 3751 preempt_disable();
b0e28f1e 3752 rcu_read_lock();
fec5e652
TH
3753
3754 cpu = get_rps_cpu(skb->dev, skb, &rflow);
b0e28f1e
ED
3755 if (cpu < 0)
3756 cpu = smp_processor_id();
fec5e652
TH
3757
3758 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3759
b0e28f1e 3760 rcu_read_unlock();
cece1945 3761 preempt_enable();
adc9300e
ED
3762 } else
3763#endif
fec5e652
TH
3764 {
3765 unsigned int qtail;
3766 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3767 put_cpu();
3768 }
b0e28f1e 3769 return ret;
1da177e4 3770}
ae78dbfa
BH
3771
3772/**
3773 * netif_rx - post buffer to the network code
3774 * @skb: buffer to post
3775 *
3776 * This function receives a packet from a device driver and queues it for
3777 * the upper (protocol) levels to process. It always succeeds. The buffer
3778 * may be dropped during processing for congestion control or by the
3779 * protocol layers.
3780 *
3781 * return values:
3782 * NET_RX_SUCCESS (no congestion)
3783 * NET_RX_DROP (packet was dropped)
3784 *
3785 */
3786
3787int netif_rx(struct sk_buff *skb)
3788{
3789 trace_netif_rx_entry(skb);
3790
3791 return netif_rx_internal(skb);
3792}
d1b19dff 3793EXPORT_SYMBOL(netif_rx);
1da177e4
LT
3794
3795int netif_rx_ni(struct sk_buff *skb)
3796{
3797 int err;
3798
ae78dbfa
BH
3799 trace_netif_rx_ni_entry(skb);
3800
1da177e4 3801 preempt_disable();
ae78dbfa 3802 err = netif_rx_internal(skb);
1da177e4
LT
3803 if (local_softirq_pending())
3804 do_softirq();
3805 preempt_enable();
3806
3807 return err;
3808}
1da177e4
LT
3809EXPORT_SYMBOL(netif_rx_ni);
3810
1da177e4
LT
3811static void net_tx_action(struct softirq_action *h)
3812{
903ceff7 3813 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
1da177e4
LT
3814
3815 if (sd->completion_queue) {
3816 struct sk_buff *clist;
3817
3818 local_irq_disable();
3819 clist = sd->completion_queue;
3820 sd->completion_queue = NULL;
3821 local_irq_enable();
3822
3823 while (clist) {
3824 struct sk_buff *skb = clist;
3825 clist = clist->next;
3826
547b792c 3827 WARN_ON(atomic_read(&skb->users));
e6247027
ED
3828 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
3829 trace_consume_skb(skb);
3830 else
3831 trace_kfree_skb(skb, net_tx_action);
1da177e4
LT
3832 __kfree_skb(skb);
3833 }
3834 }
3835
3836 if (sd->output_queue) {
37437bb2 3837 struct Qdisc *head;
1da177e4
LT
3838
3839 local_irq_disable();
3840 head = sd->output_queue;
3841 sd->output_queue = NULL;
a9cbd588 3842 sd->output_queue_tailp = &sd->output_queue;
1da177e4
LT
3843 local_irq_enable();
3844
3845 while (head) {
37437bb2
DM
3846 struct Qdisc *q = head;
3847 spinlock_t *root_lock;
3848
1da177e4
LT
3849 head = head->next_sched;
3850
5fb66229 3851 root_lock = qdisc_lock(q);
37437bb2 3852 if (spin_trylock(root_lock)) {
4e857c58 3853 smp_mb__before_atomic();
def82a1d
JP
3854 clear_bit(__QDISC_STATE_SCHED,
3855 &q->state);
37437bb2
DM
3856 qdisc_run(q);
3857 spin_unlock(root_lock);
1da177e4 3858 } else {
195648bb 3859 if (!test_bit(__QDISC_STATE_DEACTIVATED,
e8a83e10 3860 &q->state)) {
195648bb 3861 __netif_reschedule(q);
e8a83e10 3862 } else {
4e857c58 3863 smp_mb__before_atomic();
e8a83e10
JP
3864 clear_bit(__QDISC_STATE_SCHED,
3865 &q->state);
3866 }
1da177e4
LT
3867 }
3868 }
3869 }
3870}
3871
ab95bfe0
JP
3872#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3873 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
da678292
MM
3874/* This hook is defined here for ATM LANE */
3875int (*br_fdb_test_addr_hook)(struct net_device *dev,
3876 unsigned char *addr) __read_mostly;
4fb019a0 3877EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
da678292 3878#endif
1da177e4 3879
1f211a1b
DB
3880static inline struct sk_buff *
3881sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
3882 struct net_device *orig_dev)
f697c3e8 3883{
e7582bab 3884#ifdef CONFIG_NET_CLS_ACT
d2788d34
DB
3885 struct tcf_proto *cl = rcu_dereference_bh(skb->dev->ingress_cl_list);
3886 struct tcf_result cl_res;
24824a09 3887
c9e99fd0
DB
3888 /* If there's at least one ingress present somewhere (so
3889 * we get here via enabled static key), remaining devices
3890 * that are not configured with an ingress qdisc will bail
d2788d34 3891 * out here.
c9e99fd0 3892 */
d2788d34 3893 if (!cl)
4577139b 3894 return skb;
f697c3e8
HX
3895 if (*pt_prev) {
3896 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3897 *pt_prev = NULL;
1da177e4
LT
3898 }
3899
3365495c 3900 qdisc_skb_cb(skb)->pkt_len = skb->len;
c9e99fd0 3901 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
24ea591d 3902 qdisc_bstats_cpu_update(cl->q, skb);
c9e99fd0 3903
3b3ae880 3904 switch (tc_classify(skb, cl, &cl_res, false)) {
d2788d34
DB
3905 case TC_ACT_OK:
3906 case TC_ACT_RECLASSIFY:
3907 skb->tc_index = TC_H_MIN(cl_res.classid);
3908 break;
3909 case TC_ACT_SHOT:
24ea591d 3910 qdisc_qstats_cpu_drop(cl->q);
d2788d34
DB
3911 case TC_ACT_STOLEN:
3912 case TC_ACT_QUEUED:
3913 kfree_skb(skb);
3914 return NULL;
27b29f63
AS
3915 case TC_ACT_REDIRECT:
3916 /* skb_mac_header check was done by cls/act_bpf, so
3917 * we can safely push the L2 header back before
3918 * redirecting to another netdev
3919 */
3920 __skb_push(skb, skb->mac_len);
3921 skb_do_redirect(skb);
3922 return NULL;
d2788d34
DB
3923 default:
3924 break;
f697c3e8 3925 }
e7582bab 3926#endif /* CONFIG_NET_CLS_ACT */
e687ad60
PN
3927 return skb;
3928}
1da177e4 3929
ab95bfe0
JP
3930/**
3931 * netdev_rx_handler_register - register receive handler
3932 * @dev: device to register a handler for
3933 * @rx_handler: receive handler to register
93e2c32b 3934 * @rx_handler_data: data pointer that is used by rx handler
ab95bfe0 3935 *
e227867f 3936 * Register a receive handler for a device. This handler will then be
ab95bfe0
JP
3937 * called from __netif_receive_skb. A negative errno code is returned
3938 * on a failure.
3939 *
3940 * The caller must hold the rtnl_mutex.
8a4eb573
JP
3941 *
3942 * For a general description of rx_handler, see enum rx_handler_result.
ab95bfe0
JP
3943 */
3944int netdev_rx_handler_register(struct net_device *dev,
93e2c32b
JP
3945 rx_handler_func_t *rx_handler,
3946 void *rx_handler_data)
ab95bfe0
JP
3947{
3948 ASSERT_RTNL();
3949
3950 if (dev->rx_handler)
3951 return -EBUSY;
3952
00cfec37 3953 /* Note: rx_handler_data must be set before rx_handler */
93e2c32b 3954 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
ab95bfe0
JP
3955 rcu_assign_pointer(dev->rx_handler, rx_handler);
3956
3957 return 0;
3958}
3959EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3960
3961/**
3962 * netdev_rx_handler_unregister - unregister receive handler
3963 * @dev: device to unregister a handler from
3964 *
166ec369 3965 * Unregister a receive handler from a device.
ab95bfe0
JP
3966 *
3967 * The caller must hold the rtnl_mutex.
3968 */
3969void netdev_rx_handler_unregister(struct net_device *dev)
3970{
3971
3972 ASSERT_RTNL();
a9b3cd7f 3973 RCU_INIT_POINTER(dev->rx_handler, NULL);
00cfec37
ED
3974 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3975 * section has a guarantee to see a non NULL rx_handler_data
3976 * as well.
3977 */
3978 synchronize_net();
a9b3cd7f 3979 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
ab95bfe0
JP
3980}
3981EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3982
b4b9e355
MG
3983/*
3984 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3985 * the special handling of PFMEMALLOC skbs.
3986 */
3987static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3988{
3989 switch (skb->protocol) {
2b8837ae
JP
3990 case htons(ETH_P_ARP):
3991 case htons(ETH_P_IP):
3992 case htons(ETH_P_IPV6):
3993 case htons(ETH_P_8021Q):
3994 case htons(ETH_P_8021AD):
b4b9e355
MG
3995 return true;
3996 default:
3997 return false;
3998 }
3999}
4000
e687ad60
PN
4001static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
4002 int *ret, struct net_device *orig_dev)
4003{
e7582bab 4004#ifdef CONFIG_NETFILTER_INGRESS
e687ad60
PN
4005 if (nf_hook_ingress_active(skb)) {
4006 if (*pt_prev) {
4007 *ret = deliver_skb(skb, *pt_prev, orig_dev);
4008 *pt_prev = NULL;
4009 }
4010
4011 return nf_hook_ingress(skb);
4012 }
e7582bab 4013#endif /* CONFIG_NETFILTER_INGRESS */
e687ad60
PN
4014 return 0;
4015}
e687ad60 4016
9754e293 4017static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
1da177e4
LT
4018{
4019 struct packet_type *ptype, *pt_prev;
ab95bfe0 4020 rx_handler_func_t *rx_handler;
f2ccd8fa 4021 struct net_device *orig_dev;
8a4eb573 4022 bool deliver_exact = false;
1da177e4 4023 int ret = NET_RX_DROP;
252e3346 4024 __be16 type;
1da177e4 4025
588f0330 4026 net_timestamp_check(!netdev_tstamp_prequeue, skb);
81bbb3d4 4027
cf66ba58 4028 trace_netif_receive_skb(skb);
9b22ea56 4029
cc9bd5ce 4030 orig_dev = skb->dev;
8f903c70 4031
c1d2bbe1 4032 skb_reset_network_header(skb);
fda55eca
ED
4033 if (!skb_transport_header_was_set(skb))
4034 skb_reset_transport_header(skb);
0b5c9db1 4035 skb_reset_mac_len(skb);
1da177e4
LT
4036
4037 pt_prev = NULL;
4038
63d8ea7f 4039another_round:
b6858177 4040 skb->skb_iif = skb->dev->ifindex;
63d8ea7f
DM
4041
4042 __this_cpu_inc(softnet_data.processed);
4043
8ad227ff
PM
4044 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
4045 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
0d5501c1 4046 skb = skb_vlan_untag(skb);
bcc6d479 4047 if (unlikely(!skb))
2c17d27c 4048 goto out;
bcc6d479
JP
4049 }
4050
1da177e4
LT
4051#ifdef CONFIG_NET_CLS_ACT
4052 if (skb->tc_verd & TC_NCLS) {
4053 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
4054 goto ncls;
4055 }
4056#endif
4057
9754e293 4058 if (pfmemalloc)
b4b9e355
MG
4059 goto skip_taps;
4060
1da177e4 4061 list_for_each_entry_rcu(ptype, &ptype_all, list) {
7866a621
SN
4062 if (pt_prev)
4063 ret = deliver_skb(skb, pt_prev, orig_dev);
4064 pt_prev = ptype;
4065 }
4066
4067 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
4068 if (pt_prev)
4069 ret = deliver_skb(skb, pt_prev, orig_dev);
4070 pt_prev = ptype;
1da177e4
LT
4071 }
4072
b4b9e355 4073skip_taps:
1cf51900 4074#ifdef CONFIG_NET_INGRESS
4577139b 4075 if (static_key_false(&ingress_needed)) {
1f211a1b 4076 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev);
4577139b 4077 if (!skb)
2c17d27c 4078 goto out;
e687ad60
PN
4079
4080 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
2c17d27c 4081 goto out;
4577139b 4082 }
1cf51900
PN
4083#endif
4084#ifdef CONFIG_NET_CLS_ACT
4577139b 4085 skb->tc_verd = 0;
1da177e4
LT
4086ncls:
4087#endif
9754e293 4088 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
b4b9e355
MG
4089 goto drop;
4090
df8a39de 4091 if (skb_vlan_tag_present(skb)) {
2425717b
JF
4092 if (pt_prev) {
4093 ret = deliver_skb(skb, pt_prev, orig_dev);
4094 pt_prev = NULL;
4095 }
48cc32d3 4096 if (vlan_do_receive(&skb))
2425717b
JF
4097 goto another_round;
4098 else if (unlikely(!skb))
2c17d27c 4099 goto out;
2425717b
JF
4100 }
4101
48cc32d3 4102 rx_handler = rcu_dereference(skb->dev->rx_handler);
ab95bfe0
JP
4103 if (rx_handler) {
4104 if (pt_prev) {
4105 ret = deliver_skb(skb, pt_prev, orig_dev);
4106 pt_prev = NULL;
4107 }
8a4eb573
JP
4108 switch (rx_handler(&skb)) {
4109 case RX_HANDLER_CONSUMED:
3bc1b1ad 4110 ret = NET_RX_SUCCESS;
2c17d27c 4111 goto out;
8a4eb573 4112 case RX_HANDLER_ANOTHER:
63d8ea7f 4113 goto another_round;
8a4eb573
JP
4114 case RX_HANDLER_EXACT:
4115 deliver_exact = true;
4116 case RX_HANDLER_PASS:
4117 break;
4118 default:
4119 BUG();
4120 }
ab95bfe0 4121 }
1da177e4 4122
df8a39de
JP
4123 if (unlikely(skb_vlan_tag_present(skb))) {
4124 if (skb_vlan_tag_get_id(skb))
d4b812de
ED
4125 skb->pkt_type = PACKET_OTHERHOST;
4126 /* Note: we might in the future use prio bits
4127 * and set skb->priority like in vlan_do_receive()
4128 * For the time being, just ignore Priority Code Point
4129 */
4130 skb->vlan_tci = 0;
4131 }
48cc32d3 4132
7866a621
SN
4133 type = skb->protocol;
4134
63d8ea7f 4135 /* deliver only exact match when indicated */
7866a621
SN
4136 if (likely(!deliver_exact)) {
4137 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4138 &ptype_base[ntohs(type) &
4139 PTYPE_HASH_MASK]);
4140 }
1f3c8804 4141
7866a621
SN
4142 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4143 &orig_dev->ptype_specific);
4144
4145 if (unlikely(skb->dev != orig_dev)) {
4146 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4147 &skb->dev->ptype_specific);
1da177e4
LT
4148 }
4149
4150 if (pt_prev) {
1080e512 4151 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
0e698bf6 4152 goto drop;
1080e512
MT
4153 else
4154 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1da177e4 4155 } else {
b4b9e355 4156drop:
6e7333d3
JW
4157 if (!deliver_exact)
4158 atomic_long_inc(&skb->dev->rx_dropped);
4159 else
4160 atomic_long_inc(&skb->dev->rx_nohandler);
1da177e4
LT
4161 kfree_skb(skb);
4162 /* Jamal, now you will not able to escape explaining
4163 * me how you were going to use this. :-)
4164 */
4165 ret = NET_RX_DROP;
4166 }
4167
2c17d27c 4168out:
9754e293
DM
4169 return ret;
4170}
4171
4172static int __netif_receive_skb(struct sk_buff *skb)
4173{
4174 int ret;
4175
4176 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
4177 unsigned long pflags = current->flags;
4178
4179 /*
4180 * PFMEMALLOC skbs are special, they should
4181 * - be delivered to SOCK_MEMALLOC sockets only
4182 * - stay away from userspace
4183 * - have bounded memory usage
4184 *
4185 * Use PF_MEMALLOC as this saves us from propagating the allocation
4186 * context down to all allocation sites.
4187 */
4188 current->flags |= PF_MEMALLOC;
4189 ret = __netif_receive_skb_core(skb, true);
4190 tsk_restore_flags(current, pflags, PF_MEMALLOC);
4191 } else
4192 ret = __netif_receive_skb_core(skb, false);
4193
1da177e4
LT
4194 return ret;
4195}
0a9627f2 4196
ae78dbfa 4197static int netif_receive_skb_internal(struct sk_buff *skb)
0a9627f2 4198{
2c17d27c
JA
4199 int ret;
4200
588f0330 4201 net_timestamp_check(netdev_tstamp_prequeue, skb);
3b098e2d 4202
c1f19b51
RC
4203 if (skb_defer_rx_timestamp(skb))
4204 return NET_RX_SUCCESS;
4205
2c17d27c
JA
4206 rcu_read_lock();
4207
df334545 4208#ifdef CONFIG_RPS
c5905afb 4209 if (static_key_false(&rps_needed)) {
3b098e2d 4210 struct rps_dev_flow voidflow, *rflow = &voidflow;
2c17d27c 4211 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
0a9627f2 4212
3b098e2d
ED
4213 if (cpu >= 0) {
4214 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4215 rcu_read_unlock();
adc9300e 4216 return ret;
3b098e2d 4217 }
fec5e652 4218 }
1e94d72f 4219#endif
2c17d27c
JA
4220 ret = __netif_receive_skb(skb);
4221 rcu_read_unlock();
4222 return ret;
0a9627f2 4223}
ae78dbfa
BH
4224
4225/**
4226 * netif_receive_skb - process receive buffer from network
4227 * @skb: buffer to process
4228 *
4229 * netif_receive_skb() is the main receive data processing function.
4230 * It always succeeds. The buffer may be dropped during processing
4231 * for congestion control or by the protocol layers.
4232 *
4233 * This function may only be called from softirq context and interrupts
4234 * should be enabled.
4235 *
4236 * Return values (usually ignored):
4237 * NET_RX_SUCCESS: no congestion
4238 * NET_RX_DROP: packet was dropped
4239 */
04eb4489 4240int netif_receive_skb(struct sk_buff *skb)
ae78dbfa
BH
4241{
4242 trace_netif_receive_skb_entry(skb);
4243
4244 return netif_receive_skb_internal(skb);
4245}
04eb4489 4246EXPORT_SYMBOL(netif_receive_skb);
1da177e4 4247
88751275
ED
4248/* Network device is going away, flush any packets still pending
4249 * Called with irqs disabled.
4250 */
152102c7 4251static void flush_backlog(void *arg)
6e583ce5 4252{
152102c7 4253 struct net_device *dev = arg;
903ceff7 4254 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
6e583ce5
SH
4255 struct sk_buff *skb, *tmp;
4256
e36fa2f7 4257 rps_lock(sd);
6e7676c1 4258 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
6e583ce5 4259 if (skb->dev == dev) {
e36fa2f7 4260 __skb_unlink(skb, &sd->input_pkt_queue);
6e583ce5 4261 kfree_skb(skb);
76cc8b13 4262 input_queue_head_incr(sd);
6e583ce5 4263 }
6e7676c1 4264 }
e36fa2f7 4265 rps_unlock(sd);
6e7676c1
CG
4266
4267 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
4268 if (skb->dev == dev) {
4269 __skb_unlink(skb, &sd->process_queue);
4270 kfree_skb(skb);
76cc8b13 4271 input_queue_head_incr(sd);
6e7676c1
CG
4272 }
4273 }
6e583ce5
SH
4274}
4275
d565b0a1
HX
4276static int napi_gro_complete(struct sk_buff *skb)
4277{
22061d80 4278 struct packet_offload *ptype;
d565b0a1 4279 __be16 type = skb->protocol;
22061d80 4280 struct list_head *head = &offload_base;
d565b0a1
HX
4281 int err = -ENOENT;
4282
c3c7c254
ED
4283 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
4284
fc59f9a3
HX
4285 if (NAPI_GRO_CB(skb)->count == 1) {
4286 skb_shinfo(skb)->gso_size = 0;
d565b0a1 4287 goto out;
fc59f9a3 4288 }
d565b0a1
HX
4289
4290 rcu_read_lock();
4291 list_for_each_entry_rcu(ptype, head, list) {
f191a1d1 4292 if (ptype->type != type || !ptype->callbacks.gro_complete)
d565b0a1
HX
4293 continue;
4294
299603e8 4295 err = ptype->callbacks.gro_complete(skb, 0);
d565b0a1
HX
4296 break;
4297 }
4298 rcu_read_unlock();
4299
4300 if (err) {
4301 WARN_ON(&ptype->list == head);
4302 kfree_skb(skb);
4303 return NET_RX_SUCCESS;
4304 }
4305
4306out:
ae78dbfa 4307 return netif_receive_skb_internal(skb);
d565b0a1
HX
4308}
4309
2e71a6f8
ED
4310/* napi->gro_list contains packets ordered by age.
4311 * youngest packets at the head of it.
4312 * Complete skbs in reverse order to reduce latencies.
4313 */
4314void napi_gro_flush(struct napi_struct *napi, bool flush_old)
d565b0a1 4315{
2e71a6f8 4316 struct sk_buff *skb, *prev = NULL;
d565b0a1 4317
2e71a6f8
ED
4318 /* scan list and build reverse chain */
4319 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
4320 skb->prev = prev;
4321 prev = skb;
4322 }
4323
4324 for (skb = prev; skb; skb = prev) {
d565b0a1 4325 skb->next = NULL;
2e71a6f8
ED
4326
4327 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
4328 return;
4329
4330 prev = skb->prev;
d565b0a1 4331 napi_gro_complete(skb);
2e71a6f8 4332 napi->gro_count--;
d565b0a1
HX
4333 }
4334
4335 napi->gro_list = NULL;
4336}
86cac58b 4337EXPORT_SYMBOL(napi_gro_flush);
d565b0a1 4338
89c5fa33
ED
4339static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
4340{
4341 struct sk_buff *p;
4342 unsigned int maclen = skb->dev->hard_header_len;
0b4cec8c 4343 u32 hash = skb_get_hash_raw(skb);
89c5fa33
ED
4344
4345 for (p = napi->gro_list; p; p = p->next) {
4346 unsigned long diffs;
4347
0b4cec8c
TH
4348 NAPI_GRO_CB(p)->flush = 0;
4349
4350 if (hash != skb_get_hash_raw(p)) {
4351 NAPI_GRO_CB(p)->same_flow = 0;
4352 continue;
4353 }
4354
89c5fa33
ED
4355 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
4356 diffs |= p->vlan_tci ^ skb->vlan_tci;
ce87fc6c 4357 diffs |= skb_metadata_dst_cmp(p, skb);
89c5fa33
ED
4358 if (maclen == ETH_HLEN)
4359 diffs |= compare_ether_header(skb_mac_header(p),
a50e233c 4360 skb_mac_header(skb));
89c5fa33
ED
4361 else if (!diffs)
4362 diffs = memcmp(skb_mac_header(p),
a50e233c 4363 skb_mac_header(skb),
89c5fa33
ED
4364 maclen);
4365 NAPI_GRO_CB(p)->same_flow = !diffs;
89c5fa33
ED
4366 }
4367}
4368
299603e8
JC
4369static void skb_gro_reset_offset(struct sk_buff *skb)
4370{
4371 const struct skb_shared_info *pinfo = skb_shinfo(skb);
4372 const skb_frag_t *frag0 = &pinfo->frags[0];
4373
4374 NAPI_GRO_CB(skb)->data_offset = 0;
4375 NAPI_GRO_CB(skb)->frag0 = NULL;
4376 NAPI_GRO_CB(skb)->frag0_len = 0;
4377
4378 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
4379 pinfo->nr_frags &&
4380 !PageHighMem(skb_frag_page(frag0))) {
4381 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
4382 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
89c5fa33
ED
4383 }
4384}
4385
a50e233c
ED
4386static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
4387{
4388 struct skb_shared_info *pinfo = skb_shinfo(skb);
4389
4390 BUG_ON(skb->end - skb->tail < grow);
4391
4392 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
4393
4394 skb->data_len -= grow;
4395 skb->tail += grow;
4396
4397 pinfo->frags[0].page_offset += grow;
4398 skb_frag_size_sub(&pinfo->frags[0], grow);
4399
4400 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
4401 skb_frag_unref(skb, 0);
4402 memmove(pinfo->frags, pinfo->frags + 1,
4403 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
4404 }
4405}
4406
bb728820 4407static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
d565b0a1
HX
4408{
4409 struct sk_buff **pp = NULL;
22061d80 4410 struct packet_offload *ptype;
d565b0a1 4411 __be16 type = skb->protocol;
22061d80 4412 struct list_head *head = &offload_base;
0da2afd5 4413 int same_flow;
5b252f0c 4414 enum gro_result ret;
a50e233c 4415 int grow;
d565b0a1 4416
9c62a68d 4417 if (!(skb->dev->features & NETIF_F_GRO))
d565b0a1
HX
4418 goto normal;
4419
5a212329 4420 if (skb_is_gso(skb) || skb_has_frag_list(skb) || skb->csum_bad)
f17f5c91
HX
4421 goto normal;
4422
89c5fa33
ED
4423 gro_list_prepare(napi, skb);
4424
d565b0a1
HX
4425 rcu_read_lock();
4426 list_for_each_entry_rcu(ptype, head, list) {
f191a1d1 4427 if (ptype->type != type || !ptype->callbacks.gro_receive)
d565b0a1
HX
4428 continue;
4429
86911732 4430 skb_set_network_header(skb, skb_gro_offset(skb));
efd9450e 4431 skb_reset_mac_len(skb);
d565b0a1
HX
4432 NAPI_GRO_CB(skb)->same_flow = 0;
4433 NAPI_GRO_CB(skb)->flush = 0;
5d38a079 4434 NAPI_GRO_CB(skb)->free = 0;
b582ef09 4435 NAPI_GRO_CB(skb)->udp_mark = 0;
15e2396d 4436 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
d565b0a1 4437
662880f4
TH
4438 /* Setup for GRO checksum validation */
4439 switch (skb->ip_summed) {
4440 case CHECKSUM_COMPLETE:
4441 NAPI_GRO_CB(skb)->csum = skb->csum;
4442 NAPI_GRO_CB(skb)->csum_valid = 1;
4443 NAPI_GRO_CB(skb)->csum_cnt = 0;
4444 break;
4445 case CHECKSUM_UNNECESSARY:
4446 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
4447 NAPI_GRO_CB(skb)->csum_valid = 0;
4448 break;
4449 default:
4450 NAPI_GRO_CB(skb)->csum_cnt = 0;
4451 NAPI_GRO_CB(skb)->csum_valid = 0;
4452 }
d565b0a1 4453
f191a1d1 4454 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
d565b0a1
HX
4455 break;
4456 }
4457 rcu_read_unlock();
4458
4459 if (&ptype->list == head)
4460 goto normal;
4461
0da2afd5 4462 same_flow = NAPI_GRO_CB(skb)->same_flow;
5d0d9be8 4463 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
0da2afd5 4464
d565b0a1
HX
4465 if (pp) {
4466 struct sk_buff *nskb = *pp;
4467
4468 *pp = nskb->next;
4469 nskb->next = NULL;
4470 napi_gro_complete(nskb);
4ae5544f 4471 napi->gro_count--;
d565b0a1
HX
4472 }
4473
0da2afd5 4474 if (same_flow)
d565b0a1
HX
4475 goto ok;
4476
600adc18 4477 if (NAPI_GRO_CB(skb)->flush)
d565b0a1 4478 goto normal;
d565b0a1 4479
600adc18
ED
4480 if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
4481 struct sk_buff *nskb = napi->gro_list;
4482
4483 /* locate the end of the list to select the 'oldest' flow */
4484 while (nskb->next) {
4485 pp = &nskb->next;
4486 nskb = *pp;
4487 }
4488 *pp = NULL;
4489 nskb->next = NULL;
4490 napi_gro_complete(nskb);
4491 } else {
4492 napi->gro_count++;
4493 }
d565b0a1 4494 NAPI_GRO_CB(skb)->count = 1;
2e71a6f8 4495 NAPI_GRO_CB(skb)->age = jiffies;
29e98242 4496 NAPI_GRO_CB(skb)->last = skb;
86911732 4497 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
d565b0a1
HX
4498 skb->next = napi->gro_list;
4499 napi->gro_list = skb;
5d0d9be8 4500 ret = GRO_HELD;
d565b0a1 4501
ad0f9904 4502pull:
a50e233c
ED
4503 grow = skb_gro_offset(skb) - skb_headlen(skb);
4504 if (grow > 0)
4505 gro_pull_from_frag0(skb, grow);
d565b0a1 4506ok:
5d0d9be8 4507 return ret;
d565b0a1
HX
4508
4509normal:
ad0f9904
HX
4510 ret = GRO_NORMAL;
4511 goto pull;
5d38a079 4512}
96e93eab 4513
bf5a755f
JC
4514struct packet_offload *gro_find_receive_by_type(__be16 type)
4515{
4516 struct list_head *offload_head = &offload_base;
4517 struct packet_offload *ptype;
4518
4519 list_for_each_entry_rcu(ptype, offload_head, list) {
4520 if (ptype->type != type || !ptype->callbacks.gro_receive)
4521 continue;
4522 return ptype;
4523 }
4524 return NULL;
4525}
e27a2f83 4526EXPORT_SYMBOL(gro_find_receive_by_type);
bf5a755f
JC
4527
4528struct packet_offload *gro_find_complete_by_type(__be16 type)
4529{
4530 struct list_head *offload_head = &offload_base;
4531 struct packet_offload *ptype;
4532
4533 list_for_each_entry_rcu(ptype, offload_head, list) {
4534 if (ptype->type != type || !ptype->callbacks.gro_complete)
4535 continue;
4536 return ptype;
4537 }
4538 return NULL;
4539}
e27a2f83 4540EXPORT_SYMBOL(gro_find_complete_by_type);
5d38a079 4541
bb728820 4542static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
5d38a079 4543{
5d0d9be8
HX
4544 switch (ret) {
4545 case GRO_NORMAL:
ae78dbfa 4546 if (netif_receive_skb_internal(skb))
c7c4b3b6
BH
4547 ret = GRO_DROP;
4548 break;
5d38a079 4549
5d0d9be8 4550 case GRO_DROP:
5d38a079
HX
4551 kfree_skb(skb);
4552 break;
5b252f0c 4553
daa86548 4554 case GRO_MERGED_FREE:
ce87fc6c
JG
4555 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) {
4556 skb_dst_drop(skb);
d7e8883c 4557 kmem_cache_free(skbuff_head_cache, skb);
ce87fc6c 4558 } else {
d7e8883c 4559 __kfree_skb(skb);
ce87fc6c 4560 }
daa86548
ED
4561 break;
4562
5b252f0c
BH
4563 case GRO_HELD:
4564 case GRO_MERGED:
4565 break;
5d38a079
HX
4566 }
4567
c7c4b3b6 4568 return ret;
5d0d9be8 4569}
5d0d9be8 4570
c7c4b3b6 4571gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
5d0d9be8 4572{
93f93a44 4573 skb_mark_napi_id(skb, napi);
ae78dbfa 4574 trace_napi_gro_receive_entry(skb);
86911732 4575
a50e233c
ED
4576 skb_gro_reset_offset(skb);
4577
89c5fa33 4578 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
d565b0a1
HX
4579}
4580EXPORT_SYMBOL(napi_gro_receive);
4581
d0c2b0d2 4582static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
96e93eab 4583{
93a35f59
ED
4584 if (unlikely(skb->pfmemalloc)) {
4585 consume_skb(skb);
4586 return;
4587 }
96e93eab 4588 __skb_pull(skb, skb_headlen(skb));
2a2a459e
ED
4589 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4590 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
3701e513 4591 skb->vlan_tci = 0;
66c46d74 4592 skb->dev = napi->dev;
6d152e23 4593 skb->skb_iif = 0;
c3caf119
JC
4594 skb->encapsulation = 0;
4595 skb_shinfo(skb)->gso_type = 0;
e33d0ba8 4596 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
96e93eab
HX
4597
4598 napi->skb = skb;
4599}
96e93eab 4600
76620aaf 4601struct sk_buff *napi_get_frags(struct napi_struct *napi)
5d38a079 4602{
5d38a079 4603 struct sk_buff *skb = napi->skb;
5d38a079
HX
4604
4605 if (!skb) {
fd11a83d 4606 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
e2f9dc3b
ED
4607 if (skb) {
4608 napi->skb = skb;
4609 skb_mark_napi_id(skb, napi);
4610 }
80595d59 4611 }
96e93eab
HX
4612 return skb;
4613}
76620aaf 4614EXPORT_SYMBOL(napi_get_frags);
96e93eab 4615
a50e233c
ED
4616static gro_result_t napi_frags_finish(struct napi_struct *napi,
4617 struct sk_buff *skb,
4618 gro_result_t ret)
96e93eab 4619{
5d0d9be8
HX
4620 switch (ret) {
4621 case GRO_NORMAL:
a50e233c
ED
4622 case GRO_HELD:
4623 __skb_push(skb, ETH_HLEN);
4624 skb->protocol = eth_type_trans(skb, skb->dev);
4625 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
c7c4b3b6 4626 ret = GRO_DROP;
86911732 4627 break;
5d38a079 4628
5d0d9be8 4629 case GRO_DROP:
5d0d9be8
HX
4630 case GRO_MERGED_FREE:
4631 napi_reuse_skb(napi, skb);
4632 break;
5b252f0c
BH
4633
4634 case GRO_MERGED:
4635 break;
5d0d9be8 4636 }
5d38a079 4637
c7c4b3b6 4638 return ret;
5d38a079 4639}
5d0d9be8 4640
a50e233c
ED
4641/* Upper GRO stack assumes network header starts at gro_offset=0
4642 * Drivers could call both napi_gro_frags() and napi_gro_receive()
4643 * We copy ethernet header into skb->data to have a common layout.
4644 */
4adb9c4a 4645static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
76620aaf
HX
4646{
4647 struct sk_buff *skb = napi->skb;
a50e233c
ED
4648 const struct ethhdr *eth;
4649 unsigned int hlen = sizeof(*eth);
76620aaf
HX
4650
4651 napi->skb = NULL;
4652
a50e233c
ED
4653 skb_reset_mac_header(skb);
4654 skb_gro_reset_offset(skb);
4655
4656 eth = skb_gro_header_fast(skb, 0);
4657 if (unlikely(skb_gro_header_hard(skb, hlen))) {
4658 eth = skb_gro_header_slow(skb, hlen, 0);
4659 if (unlikely(!eth)) {
4660 napi_reuse_skb(napi, skb);
4661 return NULL;
4662 }
4663 } else {
4664 gro_pull_from_frag0(skb, hlen);
4665 NAPI_GRO_CB(skb)->frag0 += hlen;
4666 NAPI_GRO_CB(skb)->frag0_len -= hlen;
76620aaf 4667 }
a50e233c
ED
4668 __skb_pull(skb, hlen);
4669
4670 /*
4671 * This works because the only protocols we care about don't require
4672 * special handling.
4673 * We'll fix it up properly in napi_frags_finish()
4674 */
4675 skb->protocol = eth->h_proto;
76620aaf 4676
76620aaf
HX
4677 return skb;
4678}
76620aaf 4679
c7c4b3b6 4680gro_result_t napi_gro_frags(struct napi_struct *napi)
5d0d9be8 4681{
76620aaf 4682 struct sk_buff *skb = napi_frags_skb(napi);
5d0d9be8
HX
4683
4684 if (!skb)
c7c4b3b6 4685 return GRO_DROP;
5d0d9be8 4686
ae78dbfa
BH
4687 trace_napi_gro_frags_entry(skb);
4688
89c5fa33 4689 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
5d0d9be8 4690}
5d38a079
HX
4691EXPORT_SYMBOL(napi_gro_frags);
4692
573e8fca
TH
4693/* Compute the checksum from gro_offset and return the folded value
4694 * after adding in any pseudo checksum.
4695 */
4696__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
4697{
4698 __wsum wsum;
4699 __sum16 sum;
4700
4701 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
4702
4703 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
4704 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
4705 if (likely(!sum)) {
4706 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
4707 !skb->csum_complete_sw)
4708 netdev_rx_csum_fault(skb->dev);
4709 }
4710
4711 NAPI_GRO_CB(skb)->csum = wsum;
4712 NAPI_GRO_CB(skb)->csum_valid = 1;
4713
4714 return sum;
4715}
4716EXPORT_SYMBOL(__skb_gro_checksum_complete);
4717
e326bed2 4718/*
855abcf0 4719 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
e326bed2
ED
4720 * Note: called with local irq disabled, but exits with local irq enabled.
4721 */
4722static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4723{
4724#ifdef CONFIG_RPS
4725 struct softnet_data *remsd = sd->rps_ipi_list;
4726
4727 if (remsd) {
4728 sd->rps_ipi_list = NULL;
4729
4730 local_irq_enable();
4731
4732 /* Send pending IPI's to kick RPS processing on remote cpus. */
4733 while (remsd) {
4734 struct softnet_data *next = remsd->rps_ipi_next;
4735
4736 if (cpu_online(remsd->cpu))
c46fff2a 4737 smp_call_function_single_async(remsd->cpu,
fce8ad15 4738 &remsd->csd);
e326bed2
ED
4739 remsd = next;
4740 }
4741 } else
4742#endif
4743 local_irq_enable();
4744}
4745
d75b1ade
ED
4746static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
4747{
4748#ifdef CONFIG_RPS
4749 return sd->rps_ipi_list != NULL;
4750#else
4751 return false;
4752#endif
4753}
4754
bea3348e 4755static int process_backlog(struct napi_struct *napi, int quota)
1da177e4
LT
4756{
4757 int work = 0;
eecfd7c4 4758 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
1da177e4 4759
e326bed2
ED
4760 /* Check if we have pending ipi, its better to send them now,
4761 * not waiting net_rx_action() end.
4762 */
d75b1ade 4763 if (sd_has_rps_ipi_waiting(sd)) {
e326bed2
ED
4764 local_irq_disable();
4765 net_rps_action_and_irq_enable(sd);
4766 }
d75b1ade 4767
bea3348e 4768 napi->weight = weight_p;
6e7676c1 4769 local_irq_disable();
11ef7a89 4770 while (1) {
1da177e4 4771 struct sk_buff *skb;
6e7676c1
CG
4772
4773 while ((skb = __skb_dequeue(&sd->process_queue))) {
2c17d27c 4774 rcu_read_lock();
6e7676c1
CG
4775 local_irq_enable();
4776 __netif_receive_skb(skb);
2c17d27c 4777 rcu_read_unlock();
6e7676c1 4778 local_irq_disable();
76cc8b13
TH
4779 input_queue_head_incr(sd);
4780 if (++work >= quota) {
4781 local_irq_enable();
4782 return work;
4783 }
6e7676c1 4784 }
1da177e4 4785
e36fa2f7 4786 rps_lock(sd);
11ef7a89 4787 if (skb_queue_empty(&sd->input_pkt_queue)) {
eecfd7c4
ED
4788 /*
4789 * Inline a custom version of __napi_complete().
4790 * only current cpu owns and manipulates this napi,
11ef7a89
TH
4791 * and NAPI_STATE_SCHED is the only possible flag set
4792 * on backlog.
4793 * We can use a plain write instead of clear_bit(),
eecfd7c4
ED
4794 * and we dont need an smp_mb() memory barrier.
4795 */
eecfd7c4 4796 napi->state = 0;
11ef7a89 4797 rps_unlock(sd);
eecfd7c4 4798
11ef7a89 4799 break;
bea3348e 4800 }
11ef7a89
TH
4801
4802 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4803 &sd->process_queue);
e36fa2f7 4804 rps_unlock(sd);
6e7676c1
CG
4805 }
4806 local_irq_enable();
1da177e4 4807
bea3348e
SH
4808 return work;
4809}
1da177e4 4810
bea3348e
SH
4811/**
4812 * __napi_schedule - schedule for receive
c4ea43c5 4813 * @n: entry to schedule
bea3348e 4814 *
bc9ad166
ED
4815 * The entry's receive function will be scheduled to run.
4816 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
bea3348e 4817 */
b5606c2d 4818void __napi_schedule(struct napi_struct *n)
bea3348e
SH
4819{
4820 unsigned long flags;
1da177e4 4821
bea3348e 4822 local_irq_save(flags);
903ceff7 4823 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
bea3348e 4824 local_irq_restore(flags);
1da177e4 4825}
bea3348e
SH
4826EXPORT_SYMBOL(__napi_schedule);
4827
bc9ad166
ED
4828/**
4829 * __napi_schedule_irqoff - schedule for receive
4830 * @n: entry to schedule
4831 *
4832 * Variant of __napi_schedule() assuming hard irqs are masked
4833 */
4834void __napi_schedule_irqoff(struct napi_struct *n)
4835{
4836 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
4837}
4838EXPORT_SYMBOL(__napi_schedule_irqoff);
4839
d565b0a1
HX
4840void __napi_complete(struct napi_struct *n)
4841{
4842 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
d565b0a1 4843
d75b1ade 4844 list_del_init(&n->poll_list);
4e857c58 4845 smp_mb__before_atomic();
d565b0a1
HX
4846 clear_bit(NAPI_STATE_SCHED, &n->state);
4847}
4848EXPORT_SYMBOL(__napi_complete);
4849
3b47d303 4850void napi_complete_done(struct napi_struct *n, int work_done)
d565b0a1
HX
4851{
4852 unsigned long flags;
4853
4854 /*
4855 * don't let napi dequeue from the cpu poll list
4856 * just in case its running on a different cpu
4857 */
4858 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4859 return;
4860
3b47d303
ED
4861 if (n->gro_list) {
4862 unsigned long timeout = 0;
d75b1ade 4863
3b47d303
ED
4864 if (work_done)
4865 timeout = n->dev->gro_flush_timeout;
4866
4867 if (timeout)
4868 hrtimer_start(&n->timer, ns_to_ktime(timeout),
4869 HRTIMER_MODE_REL_PINNED);
4870 else
4871 napi_gro_flush(n, false);
4872 }
d75b1ade
ED
4873 if (likely(list_empty(&n->poll_list))) {
4874 WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state));
4875 } else {
4876 /* If n->poll_list is not empty, we need to mask irqs */
4877 local_irq_save(flags);
4878 __napi_complete(n);
4879 local_irq_restore(flags);
4880 }
d565b0a1 4881}
3b47d303 4882EXPORT_SYMBOL(napi_complete_done);
d565b0a1 4883
af12fa6e 4884/* must be called under rcu_read_lock(), as we dont take a reference */
02d62e86 4885static struct napi_struct *napi_by_id(unsigned int napi_id)
af12fa6e
ET
4886{
4887 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
4888 struct napi_struct *napi;
4889
4890 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
4891 if (napi->napi_id == napi_id)
4892 return napi;
4893
4894 return NULL;
4895}
02d62e86
ED
4896
4897#if defined(CONFIG_NET_RX_BUSY_POLL)
ce6aea93 4898#define BUSY_POLL_BUDGET 8
02d62e86
ED
4899bool sk_busy_loop(struct sock *sk, int nonblock)
4900{
4901 unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0;
ce6aea93 4902 int (*busy_poll)(struct napi_struct *dev);
02d62e86
ED
4903 struct napi_struct *napi;
4904 int rc = false;
4905
2a028ecb 4906 rcu_read_lock();
02d62e86
ED
4907
4908 napi = napi_by_id(sk->sk_napi_id);
4909 if (!napi)
4910 goto out;
4911
ce6aea93
ED
4912 /* Note: ndo_busy_poll method is optional in linux-4.5 */
4913 busy_poll = napi->dev->netdev_ops->ndo_busy_poll;
02d62e86
ED
4914
4915 do {
ce6aea93 4916 rc = 0;
2a028ecb 4917 local_bh_disable();
ce6aea93
ED
4918 if (busy_poll) {
4919 rc = busy_poll(napi);
4920 } else if (napi_schedule_prep(napi)) {
4921 void *have = netpoll_poll_lock(napi);
4922
4923 if (test_bit(NAPI_STATE_SCHED, &napi->state)) {
4924 rc = napi->poll(napi, BUSY_POLL_BUDGET);
4925 trace_napi_poll(napi);
4926 if (rc == BUSY_POLL_BUDGET) {
4927 napi_complete_done(napi, rc);
4928 napi_schedule(napi);
4929 }
4930 }
4931 netpoll_poll_unlock(have);
4932 }
2a028ecb
ED
4933 if (rc > 0)
4934 NET_ADD_STATS_BH(sock_net(sk),
4935 LINUX_MIB_BUSYPOLLRXPACKETS, rc);
4936 local_bh_enable();
02d62e86
ED
4937
4938 if (rc == LL_FLUSH_FAILED)
4939 break; /* permanent failure */
4940
02d62e86 4941 cpu_relax();
02d62e86
ED
4942 } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
4943 !need_resched() && !busy_loop_timeout(end_time));
4944
4945 rc = !skb_queue_empty(&sk->sk_receive_queue);
4946out:
2a028ecb 4947 rcu_read_unlock();
02d62e86
ED
4948 return rc;
4949}
4950EXPORT_SYMBOL(sk_busy_loop);
4951
4952#endif /* CONFIG_NET_RX_BUSY_POLL */
af12fa6e
ET
4953
4954void napi_hash_add(struct napi_struct *napi)
4955{
d64b5e85
ED
4956 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) ||
4957 test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
52bd2d62 4958 return;
af12fa6e 4959
52bd2d62 4960 spin_lock(&napi_hash_lock);
af12fa6e 4961
52bd2d62
ED
4962 /* 0..NR_CPUS+1 range is reserved for sender_cpu use */
4963 do {
4964 if (unlikely(++napi_gen_id < NR_CPUS + 1))
4965 napi_gen_id = NR_CPUS + 1;
4966 } while (napi_by_id(napi_gen_id));
4967 napi->napi_id = napi_gen_id;
af12fa6e 4968
52bd2d62
ED
4969 hlist_add_head_rcu(&napi->napi_hash_node,
4970 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
af12fa6e 4971
52bd2d62 4972 spin_unlock(&napi_hash_lock);
af12fa6e
ET
4973}
4974EXPORT_SYMBOL_GPL(napi_hash_add);
4975
4976/* Warning : caller is responsible to make sure rcu grace period
4977 * is respected before freeing memory containing @napi
4978 */
34cbe27e 4979bool napi_hash_del(struct napi_struct *napi)
af12fa6e 4980{
34cbe27e
ED
4981 bool rcu_sync_needed = false;
4982
af12fa6e
ET
4983 spin_lock(&napi_hash_lock);
4984
34cbe27e
ED
4985 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) {
4986 rcu_sync_needed = true;
af12fa6e 4987 hlist_del_rcu(&napi->napi_hash_node);
34cbe27e 4988 }
af12fa6e 4989 spin_unlock(&napi_hash_lock);
34cbe27e 4990 return rcu_sync_needed;
af12fa6e
ET
4991}
4992EXPORT_SYMBOL_GPL(napi_hash_del);
4993
3b47d303
ED
4994static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
4995{
4996 struct napi_struct *napi;
4997
4998 napi = container_of(timer, struct napi_struct, timer);
4999 if (napi->gro_list)
5000 napi_schedule(napi);
5001
5002 return HRTIMER_NORESTART;
5003}
5004
d565b0a1
HX
5005void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
5006 int (*poll)(struct napi_struct *, int), int weight)
5007{
5008 INIT_LIST_HEAD(&napi->poll_list);
3b47d303
ED
5009 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
5010 napi->timer.function = napi_watchdog;
4ae5544f 5011 napi->gro_count = 0;
d565b0a1 5012 napi->gro_list = NULL;
5d38a079 5013 napi->skb = NULL;
d565b0a1 5014 napi->poll = poll;
82dc3c63
ED
5015 if (weight > NAPI_POLL_WEIGHT)
5016 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
5017 weight, dev->name);
d565b0a1
HX
5018 napi->weight = weight;
5019 list_add(&napi->dev_list, &dev->napi_list);
d565b0a1 5020 napi->dev = dev;
5d38a079 5021#ifdef CONFIG_NETPOLL
d565b0a1
HX
5022 spin_lock_init(&napi->poll_lock);
5023 napi->poll_owner = -1;
5024#endif
5025 set_bit(NAPI_STATE_SCHED, &napi->state);
93d05d4a 5026 napi_hash_add(napi);
d565b0a1
HX
5027}
5028EXPORT_SYMBOL(netif_napi_add);
5029
3b47d303
ED
5030void napi_disable(struct napi_struct *n)
5031{
5032 might_sleep();
5033 set_bit(NAPI_STATE_DISABLE, &n->state);
5034
5035 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
5036 msleep(1);
2d8bff12
NH
5037 while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
5038 msleep(1);
3b47d303
ED
5039
5040 hrtimer_cancel(&n->timer);
5041
5042 clear_bit(NAPI_STATE_DISABLE, &n->state);
5043}
5044EXPORT_SYMBOL(napi_disable);
5045
93d05d4a 5046/* Must be called in process context */
d565b0a1
HX
5047void netif_napi_del(struct napi_struct *napi)
5048{
93d05d4a
ED
5049 might_sleep();
5050 if (napi_hash_del(napi))
5051 synchronize_net();
d7b06636 5052 list_del_init(&napi->dev_list);
76620aaf 5053 napi_free_frags(napi);
d565b0a1 5054
289dccbe 5055 kfree_skb_list(napi->gro_list);
d565b0a1 5056 napi->gro_list = NULL;
4ae5544f 5057 napi->gro_count = 0;
d565b0a1
HX
5058}
5059EXPORT_SYMBOL(netif_napi_del);
5060
726ce70e
HX
5061static int napi_poll(struct napi_struct *n, struct list_head *repoll)
5062{
5063 void *have;
5064 int work, weight;
5065
5066 list_del_init(&n->poll_list);
5067
5068 have = netpoll_poll_lock(n);
5069
5070 weight = n->weight;
5071
5072 /* This NAPI_STATE_SCHED test is for avoiding a race
5073 * with netpoll's poll_napi(). Only the entity which
5074 * obtains the lock and sees NAPI_STATE_SCHED set will
5075 * actually make the ->poll() call. Therefore we avoid
5076 * accidentally calling ->poll() when NAPI is not scheduled.
5077 */
5078 work = 0;
5079 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
5080 work = n->poll(n, weight);
5081 trace_napi_poll(n);
5082 }
5083
5084 WARN_ON_ONCE(work > weight);
5085
5086 if (likely(work < weight))
5087 goto out_unlock;
5088
5089 /* Drivers must not modify the NAPI state if they
5090 * consume the entire weight. In such cases this code
5091 * still "owns" the NAPI instance and therefore can
5092 * move the instance around on the list at-will.
5093 */
5094 if (unlikely(napi_disable_pending(n))) {
5095 napi_complete(n);
5096 goto out_unlock;
5097 }
5098
5099 if (n->gro_list) {
5100 /* flush too old packets
5101 * If HZ < 1000, flush all packets.
5102 */
5103 napi_gro_flush(n, HZ >= 1000);
5104 }
5105
001ce546
HX
5106 /* Some drivers may have called napi_schedule
5107 * prior to exhausting their budget.
5108 */
5109 if (unlikely(!list_empty(&n->poll_list))) {
5110 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
5111 n->dev ? n->dev->name : "backlog");
5112 goto out_unlock;
5113 }
5114
726ce70e
HX
5115 list_add_tail(&n->poll_list, repoll);
5116
5117out_unlock:
5118 netpoll_poll_unlock(have);
5119
5120 return work;
5121}
5122
1da177e4
LT
5123static void net_rx_action(struct softirq_action *h)
5124{
903ceff7 5125 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
24f8b238 5126 unsigned long time_limit = jiffies + 2;
51b0bded 5127 int budget = netdev_budget;
d75b1ade
ED
5128 LIST_HEAD(list);
5129 LIST_HEAD(repoll);
53fb95d3 5130
1da177e4 5131 local_irq_disable();
d75b1ade
ED
5132 list_splice_init(&sd->poll_list, &list);
5133 local_irq_enable();
1da177e4 5134
ceb8d5bf 5135 for (;;) {
bea3348e 5136 struct napi_struct *n;
1da177e4 5137
ceb8d5bf
HX
5138 if (list_empty(&list)) {
5139 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
5140 return;
5141 break;
5142 }
5143
6bd373eb
HX
5144 n = list_first_entry(&list, struct napi_struct, poll_list);
5145 budget -= napi_poll(n, &repoll);
5146
d75b1ade 5147 /* If softirq window is exhausted then punt.
24f8b238
SH
5148 * Allow this to run for 2 jiffies since which will allow
5149 * an average latency of 1.5/HZ.
bea3348e 5150 */
ceb8d5bf
HX
5151 if (unlikely(budget <= 0 ||
5152 time_after_eq(jiffies, time_limit))) {
5153 sd->time_squeeze++;
5154 break;
5155 }
1da177e4 5156 }
d75b1ade 5157
795bb1c0 5158 __kfree_skb_flush();
d75b1ade
ED
5159 local_irq_disable();
5160
5161 list_splice_tail_init(&sd->poll_list, &list);
5162 list_splice_tail(&repoll, &list);
5163 list_splice(&list, &sd->poll_list);
5164 if (!list_empty(&sd->poll_list))
5165 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
5166
e326bed2 5167 net_rps_action_and_irq_enable(sd);
1da177e4
LT
5168}
5169
aa9d8560 5170struct netdev_adjacent {
9ff162a8 5171 struct net_device *dev;
5d261913
VF
5172
5173 /* upper master flag, there can only be one master device per list */
9ff162a8 5174 bool master;
5d261913 5175
5d261913
VF
5176 /* counter for the number of times this device was added to us */
5177 u16 ref_nr;
5178
402dae96
VF
5179 /* private field for the users */
5180 void *private;
5181
9ff162a8
JP
5182 struct list_head list;
5183 struct rcu_head rcu;
9ff162a8
JP
5184};
5185
6ea29da1 5186static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
2f268f12 5187 struct list_head *adj_list)
9ff162a8 5188{
5d261913 5189 struct netdev_adjacent *adj;
5d261913 5190
2f268f12 5191 list_for_each_entry(adj, adj_list, list) {
5d261913
VF
5192 if (adj->dev == adj_dev)
5193 return adj;
9ff162a8
JP
5194 }
5195 return NULL;
5196}
5197
5198/**
5199 * netdev_has_upper_dev - Check if device is linked to an upper device
5200 * @dev: device
5201 * @upper_dev: upper device to check
5202 *
5203 * Find out if a device is linked to specified upper device and return true
5204 * in case it is. Note that this checks only immediate upper device,
5205 * not through a complete stack of devices. The caller must hold the RTNL lock.
5206 */
5207bool netdev_has_upper_dev(struct net_device *dev,
5208 struct net_device *upper_dev)
5209{
5210 ASSERT_RTNL();
5211
6ea29da1 5212 return __netdev_find_adj(upper_dev, &dev->all_adj_list.upper);
9ff162a8
JP
5213}
5214EXPORT_SYMBOL(netdev_has_upper_dev);
5215
5216/**
5217 * netdev_has_any_upper_dev - Check if device is linked to some device
5218 * @dev: device
5219 *
5220 * Find out if a device is linked to an upper device and return true in case
5221 * it is. The caller must hold the RTNL lock.
5222 */
1d143d9f 5223static bool netdev_has_any_upper_dev(struct net_device *dev)
9ff162a8
JP
5224{
5225 ASSERT_RTNL();
5226
2f268f12 5227 return !list_empty(&dev->all_adj_list.upper);
9ff162a8 5228}
9ff162a8
JP
5229
5230/**
5231 * netdev_master_upper_dev_get - Get master upper device
5232 * @dev: device
5233 *
5234 * Find a master upper device and return pointer to it or NULL in case
5235 * it's not there. The caller must hold the RTNL lock.
5236 */
5237struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
5238{
aa9d8560 5239 struct netdev_adjacent *upper;
9ff162a8
JP
5240
5241 ASSERT_RTNL();
5242
2f268f12 5243 if (list_empty(&dev->adj_list.upper))
9ff162a8
JP
5244 return NULL;
5245
2f268f12 5246 upper = list_first_entry(&dev->adj_list.upper,
aa9d8560 5247 struct netdev_adjacent, list);
9ff162a8
JP
5248 if (likely(upper->master))
5249 return upper->dev;
5250 return NULL;
5251}
5252EXPORT_SYMBOL(netdev_master_upper_dev_get);
5253
b6ccba4c
VF
5254void *netdev_adjacent_get_private(struct list_head *adj_list)
5255{
5256 struct netdev_adjacent *adj;
5257
5258 adj = list_entry(adj_list, struct netdev_adjacent, list);
5259
5260 return adj->private;
5261}
5262EXPORT_SYMBOL(netdev_adjacent_get_private);
5263
44a40855
VY
5264/**
5265 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
5266 * @dev: device
5267 * @iter: list_head ** of the current position
5268 *
5269 * Gets the next device from the dev's upper list, starting from iter
5270 * position. The caller must hold RCU read lock.
5271 */
5272struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
5273 struct list_head **iter)
5274{
5275 struct netdev_adjacent *upper;
5276
5277 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5278
5279 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5280
5281 if (&upper->list == &dev->adj_list.upper)
5282 return NULL;
5283
5284 *iter = &upper->list;
5285
5286 return upper->dev;
5287}
5288EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
5289
31088a11
VF
5290/**
5291 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
48311f46
VF
5292 * @dev: device
5293 * @iter: list_head ** of the current position
5294 *
5295 * Gets the next device from the dev's upper list, starting from iter
5296 * position. The caller must hold RCU read lock.
5297 */
2f268f12
VF
5298struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
5299 struct list_head **iter)
48311f46
VF
5300{
5301 struct netdev_adjacent *upper;
5302
85328240 5303 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
48311f46
VF
5304
5305 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5306
2f268f12 5307 if (&upper->list == &dev->all_adj_list.upper)
48311f46
VF
5308 return NULL;
5309
5310 *iter = &upper->list;
5311
5312 return upper->dev;
5313}
2f268f12 5314EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
48311f46 5315
31088a11
VF
5316/**
5317 * netdev_lower_get_next_private - Get the next ->private from the
5318 * lower neighbour list
5319 * @dev: device
5320 * @iter: list_head ** of the current position
5321 *
5322 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5323 * list, starting from iter position. The caller must hold either hold the
5324 * RTNL lock or its own locking that guarantees that the neighbour lower
b469139e 5325 * list will remain unchanged.
31088a11
VF
5326 */
5327void *netdev_lower_get_next_private(struct net_device *dev,
5328 struct list_head **iter)
5329{
5330 struct netdev_adjacent *lower;
5331
5332 lower = list_entry(*iter, struct netdev_adjacent, list);
5333
5334 if (&lower->list == &dev->adj_list.lower)
5335 return NULL;
5336
6859e7df 5337 *iter = lower->list.next;
31088a11
VF
5338
5339 return lower->private;
5340}
5341EXPORT_SYMBOL(netdev_lower_get_next_private);
5342
5343/**
5344 * netdev_lower_get_next_private_rcu - Get the next ->private from the
5345 * lower neighbour list, RCU
5346 * variant
5347 * @dev: device
5348 * @iter: list_head ** of the current position
5349 *
5350 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5351 * list, starting from iter position. The caller must hold RCU read lock.
5352 */
5353void *netdev_lower_get_next_private_rcu(struct net_device *dev,
5354 struct list_head **iter)
5355{
5356 struct netdev_adjacent *lower;
5357
5358 WARN_ON_ONCE(!rcu_read_lock_held());
5359
5360 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5361
5362 if (&lower->list == &dev->adj_list.lower)
5363 return NULL;
5364
6859e7df 5365 *iter = &lower->list;
31088a11
VF
5366
5367 return lower->private;
5368}
5369EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
5370
4085ebe8
VY
5371/**
5372 * netdev_lower_get_next - Get the next device from the lower neighbour
5373 * list
5374 * @dev: device
5375 * @iter: list_head ** of the current position
5376 *
5377 * Gets the next netdev_adjacent from the dev's lower neighbour
5378 * list, starting from iter position. The caller must hold RTNL lock or
5379 * its own locking that guarantees that the neighbour lower
b469139e 5380 * list will remain unchanged.
4085ebe8
VY
5381 */
5382void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
5383{
5384 struct netdev_adjacent *lower;
5385
5386 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
5387
5388 if (&lower->list == &dev->adj_list.lower)
5389 return NULL;
5390
5391 *iter = &lower->list;
5392
5393 return lower->dev;
5394}
5395EXPORT_SYMBOL(netdev_lower_get_next);
5396
e001bfad 5397/**
5398 * netdev_lower_get_first_private_rcu - Get the first ->private from the
5399 * lower neighbour list, RCU
5400 * variant
5401 * @dev: device
5402 *
5403 * Gets the first netdev_adjacent->private from the dev's lower neighbour
5404 * list. The caller must hold RCU read lock.
5405 */
5406void *netdev_lower_get_first_private_rcu(struct net_device *dev)
5407{
5408 struct netdev_adjacent *lower;
5409
5410 lower = list_first_or_null_rcu(&dev->adj_list.lower,
5411 struct netdev_adjacent, list);
5412 if (lower)
5413 return lower->private;
5414 return NULL;
5415}
5416EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
5417
9ff162a8
JP
5418/**
5419 * netdev_master_upper_dev_get_rcu - Get master upper device
5420 * @dev: device
5421 *
5422 * Find a master upper device and return pointer to it or NULL in case
5423 * it's not there. The caller must hold the RCU read lock.
5424 */
5425struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
5426{
aa9d8560 5427 struct netdev_adjacent *upper;
9ff162a8 5428
2f268f12 5429 upper = list_first_or_null_rcu(&dev->adj_list.upper,
aa9d8560 5430 struct netdev_adjacent, list);
9ff162a8
JP
5431 if (upper && likely(upper->master))
5432 return upper->dev;
5433 return NULL;
5434}
5435EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
5436
0a59f3a9 5437static int netdev_adjacent_sysfs_add(struct net_device *dev,
3ee32707
VF
5438 struct net_device *adj_dev,
5439 struct list_head *dev_list)
5440{
5441 char linkname[IFNAMSIZ+7];
5442 sprintf(linkname, dev_list == &dev->adj_list.upper ?
5443 "upper_%s" : "lower_%s", adj_dev->name);
5444 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
5445 linkname);
5446}
0a59f3a9 5447static void netdev_adjacent_sysfs_del(struct net_device *dev,
3ee32707
VF
5448 char *name,
5449 struct list_head *dev_list)
5450{
5451 char linkname[IFNAMSIZ+7];
5452 sprintf(linkname, dev_list == &dev->adj_list.upper ?
5453 "upper_%s" : "lower_%s", name);
5454 sysfs_remove_link(&(dev->dev.kobj), linkname);
5455}
5456
7ce64c79
AF
5457static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
5458 struct net_device *adj_dev,
5459 struct list_head *dev_list)
5460{
5461 return (dev_list == &dev->adj_list.upper ||
5462 dev_list == &dev->adj_list.lower) &&
5463 net_eq(dev_net(dev), dev_net(adj_dev));
5464}
3ee32707 5465
5d261913
VF
5466static int __netdev_adjacent_dev_insert(struct net_device *dev,
5467 struct net_device *adj_dev,
7863c054 5468 struct list_head *dev_list,
402dae96 5469 void *private, bool master)
5d261913
VF
5470{
5471 struct netdev_adjacent *adj;
842d67a7 5472 int ret;
5d261913 5473
6ea29da1 5474 adj = __netdev_find_adj(adj_dev, dev_list);
5d261913
VF
5475
5476 if (adj) {
5d261913
VF
5477 adj->ref_nr++;
5478 return 0;
5479 }
5480
5481 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
5482 if (!adj)
5483 return -ENOMEM;
5484
5485 adj->dev = adj_dev;
5486 adj->master = master;
5d261913 5487 adj->ref_nr = 1;
402dae96 5488 adj->private = private;
5d261913 5489 dev_hold(adj_dev);
2f268f12
VF
5490
5491 pr_debug("dev_hold for %s, because of link added from %s to %s\n",
5492 adj_dev->name, dev->name, adj_dev->name);
5d261913 5493
7ce64c79 5494 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
3ee32707 5495 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
5831d66e
VF
5496 if (ret)
5497 goto free_adj;
5498 }
5499
7863c054 5500 /* Ensure that master link is always the first item in list. */
842d67a7
VF
5501 if (master) {
5502 ret = sysfs_create_link(&(dev->dev.kobj),
5503 &(adj_dev->dev.kobj), "master");
5504 if (ret)
5831d66e 5505 goto remove_symlinks;
842d67a7 5506
7863c054 5507 list_add_rcu(&adj->list, dev_list);
842d67a7 5508 } else {
7863c054 5509 list_add_tail_rcu(&adj->list, dev_list);
842d67a7 5510 }
5d261913
VF
5511
5512 return 0;
842d67a7 5513
5831d66e 5514remove_symlinks:
7ce64c79 5515 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
3ee32707 5516 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
842d67a7
VF
5517free_adj:
5518 kfree(adj);
974daef7 5519 dev_put(adj_dev);
842d67a7
VF
5520
5521 return ret;
5d261913
VF
5522}
5523
1d143d9f 5524static void __netdev_adjacent_dev_remove(struct net_device *dev,
5525 struct net_device *adj_dev,
5526 struct list_head *dev_list)
5d261913
VF
5527{
5528 struct netdev_adjacent *adj;
5529
6ea29da1 5530 adj = __netdev_find_adj(adj_dev, dev_list);
5d261913 5531
2f268f12
VF
5532 if (!adj) {
5533 pr_err("tried to remove device %s from %s\n",
5534 dev->name, adj_dev->name);
5d261913 5535 BUG();
2f268f12 5536 }
5d261913
VF
5537
5538 if (adj->ref_nr > 1) {
2f268f12
VF
5539 pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
5540 adj->ref_nr-1);
5d261913
VF
5541 adj->ref_nr--;
5542 return;
5543 }
5544
842d67a7
VF
5545 if (adj->master)
5546 sysfs_remove_link(&(dev->dev.kobj), "master");
5547
7ce64c79 5548 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
3ee32707 5549 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
5831d66e 5550
5d261913 5551 list_del_rcu(&adj->list);
2f268f12
VF
5552 pr_debug("dev_put for %s, because link removed from %s to %s\n",
5553 adj_dev->name, dev->name, adj_dev->name);
5d261913
VF
5554 dev_put(adj_dev);
5555 kfree_rcu(adj, rcu);
5556}
5557
1d143d9f 5558static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
5559 struct net_device *upper_dev,
5560 struct list_head *up_list,
5561 struct list_head *down_list,
5562 void *private, bool master)
5d261913
VF
5563{
5564 int ret;
5565
402dae96
VF
5566 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
5567 master);
5d261913
VF
5568 if (ret)
5569 return ret;
5570
402dae96
VF
5571 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
5572 false);
5d261913 5573 if (ret) {
2f268f12 5574 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
5d261913
VF
5575 return ret;
5576 }
5577
5578 return 0;
5579}
5580
1d143d9f 5581static int __netdev_adjacent_dev_link(struct net_device *dev,
5582 struct net_device *upper_dev)
5d261913 5583{
2f268f12
VF
5584 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
5585 &dev->all_adj_list.upper,
5586 &upper_dev->all_adj_list.lower,
402dae96 5587 NULL, false);
5d261913
VF
5588}
5589
1d143d9f 5590static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
5591 struct net_device *upper_dev,
5592 struct list_head *up_list,
5593 struct list_head *down_list)
5d261913 5594{
2f268f12
VF
5595 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
5596 __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
5d261913
VF
5597}
5598
1d143d9f 5599static void __netdev_adjacent_dev_unlink(struct net_device *dev,
5600 struct net_device *upper_dev)
5d261913 5601{
2f268f12
VF
5602 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5603 &dev->all_adj_list.upper,
5604 &upper_dev->all_adj_list.lower);
5605}
5606
1d143d9f 5607static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
5608 struct net_device *upper_dev,
5609 void *private, bool master)
2f268f12
VF
5610{
5611 int ret = __netdev_adjacent_dev_link(dev, upper_dev);
5612
5613 if (ret)
5614 return ret;
5615
5616 ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
5617 &dev->adj_list.upper,
5618 &upper_dev->adj_list.lower,
402dae96 5619 private, master);
2f268f12
VF
5620 if (ret) {
5621 __netdev_adjacent_dev_unlink(dev, upper_dev);
5622 return ret;
5623 }
5624
5625 return 0;
5d261913
VF
5626}
5627
1d143d9f 5628static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
5629 struct net_device *upper_dev)
2f268f12
VF
5630{
5631 __netdev_adjacent_dev_unlink(dev, upper_dev);
5632 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5633 &dev->adj_list.upper,
5634 &upper_dev->adj_list.lower);
5635}
5d261913 5636
9ff162a8 5637static int __netdev_upper_dev_link(struct net_device *dev,
402dae96 5638 struct net_device *upper_dev, bool master,
29bf24af 5639 void *upper_priv, void *upper_info)
9ff162a8 5640{
0e4ead9d 5641 struct netdev_notifier_changeupper_info changeupper_info;
5d261913
VF
5642 struct netdev_adjacent *i, *j, *to_i, *to_j;
5643 int ret = 0;
9ff162a8
JP
5644
5645 ASSERT_RTNL();
5646
5647 if (dev == upper_dev)
5648 return -EBUSY;
5649
5650 /* To prevent loops, check if dev is not upper device to upper_dev. */
6ea29da1 5651 if (__netdev_find_adj(dev, &upper_dev->all_adj_list.upper))
9ff162a8
JP
5652 return -EBUSY;
5653
6ea29da1 5654 if (__netdev_find_adj(upper_dev, &dev->adj_list.upper))
9ff162a8
JP
5655 return -EEXIST;
5656
5657 if (master && netdev_master_upper_dev_get(dev))
5658 return -EBUSY;
5659
0e4ead9d
JP
5660 changeupper_info.upper_dev = upper_dev;
5661 changeupper_info.master = master;
5662 changeupper_info.linking = true;
29bf24af 5663 changeupper_info.upper_info = upper_info;
0e4ead9d 5664
573c7ba0
JP
5665 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
5666 &changeupper_info.info);
5667 ret = notifier_to_errno(ret);
5668 if (ret)
5669 return ret;
5670
6dffb044 5671 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
402dae96 5672 master);
5d261913
VF
5673 if (ret)
5674 return ret;
9ff162a8 5675
5d261913 5676 /* Now that we linked these devs, make all the upper_dev's
2f268f12 5677 * all_adj_list.upper visible to every dev's all_adj_list.lower an
5d261913
VF
5678 * versa, and don't forget the devices itself. All of these
5679 * links are non-neighbours.
5680 */
2f268f12
VF
5681 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5682 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5683 pr_debug("Interlinking %s with %s, non-neighbour\n",
5684 i->dev->name, j->dev->name);
5d261913
VF
5685 ret = __netdev_adjacent_dev_link(i->dev, j->dev);
5686 if (ret)
5687 goto rollback_mesh;
5688 }
5689 }
5690
5691 /* add dev to every upper_dev's upper device */
2f268f12
VF
5692 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5693 pr_debug("linking %s's upper device %s with %s\n",
5694 upper_dev->name, i->dev->name, dev->name);
5d261913
VF
5695 ret = __netdev_adjacent_dev_link(dev, i->dev);
5696 if (ret)
5697 goto rollback_upper_mesh;
5698 }
5699
5700 /* add upper_dev to every dev's lower device */
2f268f12
VF
5701 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5702 pr_debug("linking %s's lower device %s with %s\n", dev->name,
5703 i->dev->name, upper_dev->name);
5d261913
VF
5704 ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
5705 if (ret)
5706 goto rollback_lower_mesh;
5707 }
9ff162a8 5708
b03804e7
IS
5709 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
5710 &changeupper_info.info);
5711 ret = notifier_to_errno(ret);
5712 if (ret)
5713 goto rollback_lower_mesh;
5714
9ff162a8 5715 return 0;
5d261913
VF
5716
5717rollback_lower_mesh:
5718 to_i = i;
2f268f12 5719 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5d261913
VF
5720 if (i == to_i)
5721 break;
5722 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5723 }
5724
5725 i = NULL;
5726
5727rollback_upper_mesh:
5728 to_i = i;
2f268f12 5729 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5d261913
VF
5730 if (i == to_i)
5731 break;
5732 __netdev_adjacent_dev_unlink(dev, i->dev);
5733 }
5734
5735 i = j = NULL;
5736
5737rollback_mesh:
5738 to_i = i;
5739 to_j = j;
2f268f12
VF
5740 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5741 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5d261913
VF
5742 if (i == to_i && j == to_j)
5743 break;
5744 __netdev_adjacent_dev_unlink(i->dev, j->dev);
5745 }
5746 if (i == to_i)
5747 break;
5748 }
5749
2f268f12 5750 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5d261913
VF
5751
5752 return ret;
9ff162a8
JP
5753}
5754
5755/**
5756 * netdev_upper_dev_link - Add a link to the upper device
5757 * @dev: device
5758 * @upper_dev: new upper device
5759 *
5760 * Adds a link to device which is upper to this one. The caller must hold
5761 * the RTNL lock. On a failure a negative errno code is returned.
5762 * On success the reference counts are adjusted and the function
5763 * returns zero.
5764 */
5765int netdev_upper_dev_link(struct net_device *dev,
5766 struct net_device *upper_dev)
5767{
29bf24af 5768 return __netdev_upper_dev_link(dev, upper_dev, false, NULL, NULL);
9ff162a8
JP
5769}
5770EXPORT_SYMBOL(netdev_upper_dev_link);
5771
5772/**
5773 * netdev_master_upper_dev_link - Add a master link to the upper device
5774 * @dev: device
5775 * @upper_dev: new upper device
6dffb044 5776 * @upper_priv: upper device private
29bf24af 5777 * @upper_info: upper info to be passed down via notifier
9ff162a8
JP
5778 *
5779 * Adds a link to device which is upper to this one. In this case, only
5780 * one master upper device can be linked, although other non-master devices
5781 * might be linked as well. The caller must hold the RTNL lock.
5782 * On a failure a negative errno code is returned. On success the reference
5783 * counts are adjusted and the function returns zero.
5784 */
5785int netdev_master_upper_dev_link(struct net_device *dev,
6dffb044 5786 struct net_device *upper_dev,
29bf24af 5787 void *upper_priv, void *upper_info)
9ff162a8 5788{
29bf24af
JP
5789 return __netdev_upper_dev_link(dev, upper_dev, true,
5790 upper_priv, upper_info);
9ff162a8
JP
5791}
5792EXPORT_SYMBOL(netdev_master_upper_dev_link);
5793
5794/**
5795 * netdev_upper_dev_unlink - Removes a link to upper device
5796 * @dev: device
5797 * @upper_dev: new upper device
5798 *
5799 * Removes a link to device which is upper to this one. The caller must hold
5800 * the RTNL lock.
5801 */
5802void netdev_upper_dev_unlink(struct net_device *dev,
5803 struct net_device *upper_dev)
5804{
0e4ead9d 5805 struct netdev_notifier_changeupper_info changeupper_info;
5d261913 5806 struct netdev_adjacent *i, *j;
9ff162a8
JP
5807 ASSERT_RTNL();
5808
0e4ead9d
JP
5809 changeupper_info.upper_dev = upper_dev;
5810 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
5811 changeupper_info.linking = false;
5812
573c7ba0
JP
5813 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
5814 &changeupper_info.info);
5815
2f268f12 5816 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5d261913
VF
5817
5818 /* Here is the tricky part. We must remove all dev's lower
5819 * devices from all upper_dev's upper devices and vice
5820 * versa, to maintain the graph relationship.
5821 */
2f268f12
VF
5822 list_for_each_entry(i, &dev->all_adj_list.lower, list)
5823 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
5d261913
VF
5824 __netdev_adjacent_dev_unlink(i->dev, j->dev);
5825
5826 /* remove also the devices itself from lower/upper device
5827 * list
5828 */
2f268f12 5829 list_for_each_entry(i, &dev->all_adj_list.lower, list)
5d261913
VF
5830 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5831
2f268f12 5832 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
5d261913
VF
5833 __netdev_adjacent_dev_unlink(dev, i->dev);
5834
0e4ead9d
JP
5835 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
5836 &changeupper_info.info);
9ff162a8
JP
5837}
5838EXPORT_SYMBOL(netdev_upper_dev_unlink);
5839
61bd3857
MS
5840/**
5841 * netdev_bonding_info_change - Dispatch event about slave change
5842 * @dev: device
4a26e453 5843 * @bonding_info: info to dispatch
61bd3857
MS
5844 *
5845 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
5846 * The caller must hold the RTNL lock.
5847 */
5848void netdev_bonding_info_change(struct net_device *dev,
5849 struct netdev_bonding_info *bonding_info)
5850{
5851 struct netdev_notifier_bonding_info info;
5852
5853 memcpy(&info.bonding_info, bonding_info,
5854 sizeof(struct netdev_bonding_info));
5855 call_netdevice_notifiers_info(NETDEV_BONDING_INFO, dev,
5856 &info.info);
5857}
5858EXPORT_SYMBOL(netdev_bonding_info_change);
5859
2ce1ee17 5860static void netdev_adjacent_add_links(struct net_device *dev)
4c75431a
AF
5861{
5862 struct netdev_adjacent *iter;
5863
5864 struct net *net = dev_net(dev);
5865
5866 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5867 if (!net_eq(net,dev_net(iter->dev)))
5868 continue;
5869 netdev_adjacent_sysfs_add(iter->dev, dev,
5870 &iter->dev->adj_list.lower);
5871 netdev_adjacent_sysfs_add(dev, iter->dev,
5872 &dev->adj_list.upper);
5873 }
5874
5875 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5876 if (!net_eq(net,dev_net(iter->dev)))
5877 continue;
5878 netdev_adjacent_sysfs_add(iter->dev, dev,
5879 &iter->dev->adj_list.upper);
5880 netdev_adjacent_sysfs_add(dev, iter->dev,
5881 &dev->adj_list.lower);
5882 }
5883}
5884
2ce1ee17 5885static void netdev_adjacent_del_links(struct net_device *dev)
4c75431a
AF
5886{
5887 struct netdev_adjacent *iter;
5888
5889 struct net *net = dev_net(dev);
5890
5891 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5892 if (!net_eq(net,dev_net(iter->dev)))
5893 continue;
5894 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5895 &iter->dev->adj_list.lower);
5896 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5897 &dev->adj_list.upper);
5898 }
5899
5900 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5901 if (!net_eq(net,dev_net(iter->dev)))
5902 continue;
5903 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5904 &iter->dev->adj_list.upper);
5905 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5906 &dev->adj_list.lower);
5907 }
5908}
5909
5bb025fa 5910void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
402dae96 5911{
5bb025fa 5912 struct netdev_adjacent *iter;
402dae96 5913
4c75431a
AF
5914 struct net *net = dev_net(dev);
5915
5bb025fa 5916 list_for_each_entry(iter, &dev->adj_list.upper, list) {
4c75431a
AF
5917 if (!net_eq(net,dev_net(iter->dev)))
5918 continue;
5bb025fa
VF
5919 netdev_adjacent_sysfs_del(iter->dev, oldname,
5920 &iter->dev->adj_list.lower);
5921 netdev_adjacent_sysfs_add(iter->dev, dev,
5922 &iter->dev->adj_list.lower);
5923 }
402dae96 5924
5bb025fa 5925 list_for_each_entry(iter, &dev->adj_list.lower, list) {
4c75431a
AF
5926 if (!net_eq(net,dev_net(iter->dev)))
5927 continue;
5bb025fa
VF
5928 netdev_adjacent_sysfs_del(iter->dev, oldname,
5929 &iter->dev->adj_list.upper);
5930 netdev_adjacent_sysfs_add(iter->dev, dev,
5931 &iter->dev->adj_list.upper);
5932 }
402dae96 5933}
402dae96
VF
5934
5935void *netdev_lower_dev_get_private(struct net_device *dev,
5936 struct net_device *lower_dev)
5937{
5938 struct netdev_adjacent *lower;
5939
5940 if (!lower_dev)
5941 return NULL;
6ea29da1 5942 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
402dae96
VF
5943 if (!lower)
5944 return NULL;
5945
5946 return lower->private;
5947}
5948EXPORT_SYMBOL(netdev_lower_dev_get_private);
5949
4085ebe8
VY
5950
5951int dev_get_nest_level(struct net_device *dev,
b618aaa9 5952 bool (*type_check)(const struct net_device *dev))
4085ebe8
VY
5953{
5954 struct net_device *lower = NULL;
5955 struct list_head *iter;
5956 int max_nest = -1;
5957 int nest;
5958
5959 ASSERT_RTNL();
5960
5961 netdev_for_each_lower_dev(dev, lower, iter) {
5962 nest = dev_get_nest_level(lower, type_check);
5963 if (max_nest < nest)
5964 max_nest = nest;
5965 }
5966
5967 if (type_check(dev))
5968 max_nest++;
5969
5970 return max_nest;
5971}
5972EXPORT_SYMBOL(dev_get_nest_level);
5973
04d48266
JP
5974/**
5975 * netdev_lower_change - Dispatch event about lower device state change
5976 * @lower_dev: device
5977 * @lower_state_info: state to dispatch
5978 *
5979 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
5980 * The caller must hold the RTNL lock.
5981 */
5982void netdev_lower_state_changed(struct net_device *lower_dev,
5983 void *lower_state_info)
5984{
5985 struct netdev_notifier_changelowerstate_info changelowerstate_info;
5986
5987 ASSERT_RTNL();
5988 changelowerstate_info.lower_state_info = lower_state_info;
5989 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE, lower_dev,
5990 &changelowerstate_info.info);
5991}
5992EXPORT_SYMBOL(netdev_lower_state_changed);
5993
b6c40d68
PM
5994static void dev_change_rx_flags(struct net_device *dev, int flags)
5995{
d314774c
SH
5996 const struct net_device_ops *ops = dev->netdev_ops;
5997
d2615bf4 5998 if (ops->ndo_change_rx_flags)
d314774c 5999 ops->ndo_change_rx_flags(dev, flags);
b6c40d68
PM
6000}
6001
991fb3f7 6002static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
1da177e4 6003{
b536db93 6004 unsigned int old_flags = dev->flags;
d04a48b0
EB
6005 kuid_t uid;
6006 kgid_t gid;
1da177e4 6007
24023451
PM
6008 ASSERT_RTNL();
6009
dad9b335
WC
6010 dev->flags |= IFF_PROMISC;
6011 dev->promiscuity += inc;
6012 if (dev->promiscuity == 0) {
6013 /*
6014 * Avoid overflow.
6015 * If inc causes overflow, untouch promisc and return error.
6016 */
6017 if (inc < 0)
6018 dev->flags &= ~IFF_PROMISC;
6019 else {
6020 dev->promiscuity -= inc;
7b6cd1ce
JP
6021 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
6022 dev->name);
dad9b335
WC
6023 return -EOVERFLOW;
6024 }
6025 }
52609c0b 6026 if (dev->flags != old_flags) {
7b6cd1ce
JP
6027 pr_info("device %s %s promiscuous mode\n",
6028 dev->name,
6029 dev->flags & IFF_PROMISC ? "entered" : "left");
8192b0c4
DH
6030 if (audit_enabled) {
6031 current_uid_gid(&uid, &gid);
7759db82
KHK
6032 audit_log(current->audit_context, GFP_ATOMIC,
6033 AUDIT_ANOM_PROMISCUOUS,
6034 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
6035 dev->name, (dev->flags & IFF_PROMISC),
6036 (old_flags & IFF_PROMISC),
e1760bd5 6037 from_kuid(&init_user_ns, audit_get_loginuid(current)),
d04a48b0
EB
6038 from_kuid(&init_user_ns, uid),
6039 from_kgid(&init_user_ns, gid),
7759db82 6040 audit_get_sessionid(current));
8192b0c4 6041 }
24023451 6042
b6c40d68 6043 dev_change_rx_flags(dev, IFF_PROMISC);
1da177e4 6044 }
991fb3f7
ND
6045 if (notify)
6046 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
dad9b335 6047 return 0;
1da177e4
LT
6048}
6049
4417da66
PM
6050/**
6051 * dev_set_promiscuity - update promiscuity count on a device
6052 * @dev: device
6053 * @inc: modifier
6054 *
6055 * Add or remove promiscuity from a device. While the count in the device
6056 * remains above zero the interface remains promiscuous. Once it hits zero
6057 * the device reverts back to normal filtering operation. A negative inc
6058 * value is used to drop promiscuity on the device.
dad9b335 6059 * Return 0 if successful or a negative errno code on error.
4417da66 6060 */
dad9b335 6061int dev_set_promiscuity(struct net_device *dev, int inc)
4417da66 6062{
b536db93 6063 unsigned int old_flags = dev->flags;
dad9b335 6064 int err;
4417da66 6065
991fb3f7 6066 err = __dev_set_promiscuity(dev, inc, true);
4b5a698e 6067 if (err < 0)
dad9b335 6068 return err;
4417da66
PM
6069 if (dev->flags != old_flags)
6070 dev_set_rx_mode(dev);
dad9b335 6071 return err;
4417da66 6072}
d1b19dff 6073EXPORT_SYMBOL(dev_set_promiscuity);
4417da66 6074
991fb3f7 6075static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
1da177e4 6076{
991fb3f7 6077 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
1da177e4 6078
24023451
PM
6079 ASSERT_RTNL();
6080
1da177e4 6081 dev->flags |= IFF_ALLMULTI;
dad9b335
WC
6082 dev->allmulti += inc;
6083 if (dev->allmulti == 0) {
6084 /*
6085 * Avoid overflow.
6086 * If inc causes overflow, untouch allmulti and return error.
6087 */
6088 if (inc < 0)
6089 dev->flags &= ~IFF_ALLMULTI;
6090 else {
6091 dev->allmulti -= inc;
7b6cd1ce
JP
6092 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
6093 dev->name);
dad9b335
WC
6094 return -EOVERFLOW;
6095 }
6096 }
24023451 6097 if (dev->flags ^ old_flags) {
b6c40d68 6098 dev_change_rx_flags(dev, IFF_ALLMULTI);
4417da66 6099 dev_set_rx_mode(dev);
991fb3f7
ND
6100 if (notify)
6101 __dev_notify_flags(dev, old_flags,
6102 dev->gflags ^ old_gflags);
24023451 6103 }
dad9b335 6104 return 0;
4417da66 6105}
991fb3f7
ND
6106
6107/**
6108 * dev_set_allmulti - update allmulti count on a device
6109 * @dev: device
6110 * @inc: modifier
6111 *
6112 * Add or remove reception of all multicast frames to a device. While the
6113 * count in the device remains above zero the interface remains listening
6114 * to all interfaces. Once it hits zero the device reverts back to normal
6115 * filtering operation. A negative @inc value is used to drop the counter
6116 * when releasing a resource needing all multicasts.
6117 * Return 0 if successful or a negative errno code on error.
6118 */
6119
6120int dev_set_allmulti(struct net_device *dev, int inc)
6121{
6122 return __dev_set_allmulti(dev, inc, true);
6123}
d1b19dff 6124EXPORT_SYMBOL(dev_set_allmulti);
4417da66
PM
6125
6126/*
6127 * Upload unicast and multicast address lists to device and
6128 * configure RX filtering. When the device doesn't support unicast
53ccaae1 6129 * filtering it is put in promiscuous mode while unicast addresses
4417da66
PM
6130 * are present.
6131 */
6132void __dev_set_rx_mode(struct net_device *dev)
6133{
d314774c
SH
6134 const struct net_device_ops *ops = dev->netdev_ops;
6135
4417da66
PM
6136 /* dev_open will call this function so the list will stay sane. */
6137 if (!(dev->flags&IFF_UP))
6138 return;
6139
6140 if (!netif_device_present(dev))
40b77c94 6141 return;
4417da66 6142
01789349 6143 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
4417da66
PM
6144 /* Unicast addresses changes may only happen under the rtnl,
6145 * therefore calling __dev_set_promiscuity here is safe.
6146 */
32e7bfc4 6147 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
991fb3f7 6148 __dev_set_promiscuity(dev, 1, false);
2d348d1f 6149 dev->uc_promisc = true;
32e7bfc4 6150 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
991fb3f7 6151 __dev_set_promiscuity(dev, -1, false);
2d348d1f 6152 dev->uc_promisc = false;
4417da66 6153 }
4417da66 6154 }
01789349
JP
6155
6156 if (ops->ndo_set_rx_mode)
6157 ops->ndo_set_rx_mode(dev);
4417da66
PM
6158}
6159
6160void dev_set_rx_mode(struct net_device *dev)
6161{
b9e40857 6162 netif_addr_lock_bh(dev);
4417da66 6163 __dev_set_rx_mode(dev);
b9e40857 6164 netif_addr_unlock_bh(dev);
1da177e4
LT
6165}
6166
f0db275a
SH
6167/**
6168 * dev_get_flags - get flags reported to userspace
6169 * @dev: device
6170 *
6171 * Get the combination of flag bits exported through APIs to userspace.
6172 */
95c96174 6173unsigned int dev_get_flags(const struct net_device *dev)
1da177e4 6174{
95c96174 6175 unsigned int flags;
1da177e4
LT
6176
6177 flags = (dev->flags & ~(IFF_PROMISC |
6178 IFF_ALLMULTI |
b00055aa
SR
6179 IFF_RUNNING |
6180 IFF_LOWER_UP |
6181 IFF_DORMANT)) |
1da177e4
LT
6182 (dev->gflags & (IFF_PROMISC |
6183 IFF_ALLMULTI));
6184
b00055aa
SR
6185 if (netif_running(dev)) {
6186 if (netif_oper_up(dev))
6187 flags |= IFF_RUNNING;
6188 if (netif_carrier_ok(dev))
6189 flags |= IFF_LOWER_UP;
6190 if (netif_dormant(dev))
6191 flags |= IFF_DORMANT;
6192 }
1da177e4
LT
6193
6194 return flags;
6195}
d1b19dff 6196EXPORT_SYMBOL(dev_get_flags);
1da177e4 6197
bd380811 6198int __dev_change_flags(struct net_device *dev, unsigned int flags)
1da177e4 6199{
b536db93 6200 unsigned int old_flags = dev->flags;
bd380811 6201 int ret;
1da177e4 6202
24023451
PM
6203 ASSERT_RTNL();
6204
1da177e4
LT
6205 /*
6206 * Set the flags on our device.
6207 */
6208
6209 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
6210 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
6211 IFF_AUTOMEDIA)) |
6212 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
6213 IFF_ALLMULTI));
6214
6215 /*
6216 * Load in the correct multicast list now the flags have changed.
6217 */
6218
b6c40d68
PM
6219 if ((old_flags ^ flags) & IFF_MULTICAST)
6220 dev_change_rx_flags(dev, IFF_MULTICAST);
24023451 6221
4417da66 6222 dev_set_rx_mode(dev);
1da177e4
LT
6223
6224 /*
6225 * Have we downed the interface. We handle IFF_UP ourselves
6226 * according to user attempts to set it, rather than blindly
6227 * setting it.
6228 */
6229
6230 ret = 0;
d215d10f 6231 if ((old_flags ^ flags) & IFF_UP)
bd380811 6232 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
1da177e4 6233
1da177e4 6234 if ((flags ^ dev->gflags) & IFF_PROMISC) {
d1b19dff 6235 int inc = (flags & IFF_PROMISC) ? 1 : -1;
991fb3f7 6236 unsigned int old_flags = dev->flags;
d1b19dff 6237
1da177e4 6238 dev->gflags ^= IFF_PROMISC;
991fb3f7
ND
6239
6240 if (__dev_set_promiscuity(dev, inc, false) >= 0)
6241 if (dev->flags != old_flags)
6242 dev_set_rx_mode(dev);
1da177e4
LT
6243 }
6244
6245 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
6246 is important. Some (broken) drivers set IFF_PROMISC, when
6247 IFF_ALLMULTI is requested not asking us and not reporting.
6248 */
6249 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
d1b19dff
ED
6250 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
6251
1da177e4 6252 dev->gflags ^= IFF_ALLMULTI;
991fb3f7 6253 __dev_set_allmulti(dev, inc, false);
1da177e4
LT
6254 }
6255
bd380811
PM
6256 return ret;
6257}
6258
a528c219
ND
6259void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
6260 unsigned int gchanges)
bd380811
PM
6261{
6262 unsigned int changes = dev->flags ^ old_flags;
6263
a528c219 6264 if (gchanges)
7f294054 6265 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
a528c219 6266
bd380811
PM
6267 if (changes & IFF_UP) {
6268 if (dev->flags & IFF_UP)
6269 call_netdevice_notifiers(NETDEV_UP, dev);
6270 else
6271 call_netdevice_notifiers(NETDEV_DOWN, dev);
6272 }
6273
6274 if (dev->flags & IFF_UP &&
be9efd36
JP
6275 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
6276 struct netdev_notifier_change_info change_info;
6277
6278 change_info.flags_changed = changes;
6279 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
6280 &change_info.info);
6281 }
bd380811
PM
6282}
6283
6284/**
6285 * dev_change_flags - change device settings
6286 * @dev: device
6287 * @flags: device state flags
6288 *
6289 * Change settings on device based state flags. The flags are
6290 * in the userspace exported format.
6291 */
b536db93 6292int dev_change_flags(struct net_device *dev, unsigned int flags)
bd380811 6293{
b536db93 6294 int ret;
991fb3f7 6295 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
bd380811
PM
6296
6297 ret = __dev_change_flags(dev, flags);
6298 if (ret < 0)
6299 return ret;
6300
991fb3f7 6301 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
a528c219 6302 __dev_notify_flags(dev, old_flags, changes);
1da177e4
LT
6303 return ret;
6304}
d1b19dff 6305EXPORT_SYMBOL(dev_change_flags);
1da177e4 6306
2315dc91
VF
6307static int __dev_set_mtu(struct net_device *dev, int new_mtu)
6308{
6309 const struct net_device_ops *ops = dev->netdev_ops;
6310
6311 if (ops->ndo_change_mtu)
6312 return ops->ndo_change_mtu(dev, new_mtu);
6313
6314 dev->mtu = new_mtu;
6315 return 0;
6316}
6317
f0db275a
SH
6318/**
6319 * dev_set_mtu - Change maximum transfer unit
6320 * @dev: device
6321 * @new_mtu: new transfer unit
6322 *
6323 * Change the maximum transfer size of the network device.
6324 */
1da177e4
LT
6325int dev_set_mtu(struct net_device *dev, int new_mtu)
6326{
2315dc91 6327 int err, orig_mtu;
1da177e4
LT
6328
6329 if (new_mtu == dev->mtu)
6330 return 0;
6331
6332 /* MTU must be positive. */
6333 if (new_mtu < 0)
6334 return -EINVAL;
6335
6336 if (!netif_device_present(dev))
6337 return -ENODEV;
6338
1d486bfb
VF
6339 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
6340 err = notifier_to_errno(err);
6341 if (err)
6342 return err;
d314774c 6343
2315dc91
VF
6344 orig_mtu = dev->mtu;
6345 err = __dev_set_mtu(dev, new_mtu);
d314774c 6346
2315dc91
VF
6347 if (!err) {
6348 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
6349 err = notifier_to_errno(err);
6350 if (err) {
6351 /* setting mtu back and notifying everyone again,
6352 * so that they have a chance to revert changes.
6353 */
6354 __dev_set_mtu(dev, orig_mtu);
6355 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
6356 }
6357 }
1da177e4
LT
6358 return err;
6359}
d1b19dff 6360EXPORT_SYMBOL(dev_set_mtu);
1da177e4 6361
cbda10fa
VD
6362/**
6363 * dev_set_group - Change group this device belongs to
6364 * @dev: device
6365 * @new_group: group this device should belong to
6366 */
6367void dev_set_group(struct net_device *dev, int new_group)
6368{
6369 dev->group = new_group;
6370}
6371EXPORT_SYMBOL(dev_set_group);
6372
f0db275a
SH
6373/**
6374 * dev_set_mac_address - Change Media Access Control Address
6375 * @dev: device
6376 * @sa: new address
6377 *
6378 * Change the hardware (MAC) address of the device
6379 */
1da177e4
LT
6380int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
6381{
d314774c 6382 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4
LT
6383 int err;
6384
d314774c 6385 if (!ops->ndo_set_mac_address)
1da177e4
LT
6386 return -EOPNOTSUPP;
6387 if (sa->sa_family != dev->type)
6388 return -EINVAL;
6389 if (!netif_device_present(dev))
6390 return -ENODEV;
d314774c 6391 err = ops->ndo_set_mac_address(dev, sa);
f6521516
JP
6392 if (err)
6393 return err;
fbdeca2d 6394 dev->addr_assign_type = NET_ADDR_SET;
f6521516 6395 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
7bf23575 6396 add_device_randomness(dev->dev_addr, dev->addr_len);
f6521516 6397 return 0;
1da177e4 6398}
d1b19dff 6399EXPORT_SYMBOL(dev_set_mac_address);
1da177e4 6400
4bf84c35
JP
6401/**
6402 * dev_change_carrier - Change device carrier
6403 * @dev: device
691b3b7e 6404 * @new_carrier: new value
4bf84c35
JP
6405 *
6406 * Change device carrier
6407 */
6408int dev_change_carrier(struct net_device *dev, bool new_carrier)
6409{
6410 const struct net_device_ops *ops = dev->netdev_ops;
6411
6412 if (!ops->ndo_change_carrier)
6413 return -EOPNOTSUPP;
6414 if (!netif_device_present(dev))
6415 return -ENODEV;
6416 return ops->ndo_change_carrier(dev, new_carrier);
6417}
6418EXPORT_SYMBOL(dev_change_carrier);
6419
66b52b0d
JP
6420/**
6421 * dev_get_phys_port_id - Get device physical port ID
6422 * @dev: device
6423 * @ppid: port ID
6424 *
6425 * Get device physical port ID
6426 */
6427int dev_get_phys_port_id(struct net_device *dev,
02637fce 6428 struct netdev_phys_item_id *ppid)
66b52b0d
JP
6429{
6430 const struct net_device_ops *ops = dev->netdev_ops;
6431
6432 if (!ops->ndo_get_phys_port_id)
6433 return -EOPNOTSUPP;
6434 return ops->ndo_get_phys_port_id(dev, ppid);
6435}
6436EXPORT_SYMBOL(dev_get_phys_port_id);
6437
db24a904
DA
6438/**
6439 * dev_get_phys_port_name - Get device physical port name
6440 * @dev: device
6441 * @name: port name
6442 *
6443 * Get device physical port name
6444 */
6445int dev_get_phys_port_name(struct net_device *dev,
6446 char *name, size_t len)
6447{
6448 const struct net_device_ops *ops = dev->netdev_ops;
6449
6450 if (!ops->ndo_get_phys_port_name)
6451 return -EOPNOTSUPP;
6452 return ops->ndo_get_phys_port_name(dev, name, len);
6453}
6454EXPORT_SYMBOL(dev_get_phys_port_name);
6455
d746d707
AK
6456/**
6457 * dev_change_proto_down - update protocol port state information
6458 * @dev: device
6459 * @proto_down: new value
6460 *
6461 * This info can be used by switch drivers to set the phys state of the
6462 * port.
6463 */
6464int dev_change_proto_down(struct net_device *dev, bool proto_down)
6465{
6466 const struct net_device_ops *ops = dev->netdev_ops;
6467
6468 if (!ops->ndo_change_proto_down)
6469 return -EOPNOTSUPP;
6470 if (!netif_device_present(dev))
6471 return -ENODEV;
6472 return ops->ndo_change_proto_down(dev, proto_down);
6473}
6474EXPORT_SYMBOL(dev_change_proto_down);
6475
1da177e4
LT
6476/**
6477 * dev_new_index - allocate an ifindex
c4ea43c5 6478 * @net: the applicable net namespace
1da177e4
LT
6479 *
6480 * Returns a suitable unique value for a new device interface
6481 * number. The caller must hold the rtnl semaphore or the
6482 * dev_base_lock to be sure it remains unique.
6483 */
881d966b 6484static int dev_new_index(struct net *net)
1da177e4 6485{
aa79e66e 6486 int ifindex = net->ifindex;
1da177e4
LT
6487 for (;;) {
6488 if (++ifindex <= 0)
6489 ifindex = 1;
881d966b 6490 if (!__dev_get_by_index(net, ifindex))
aa79e66e 6491 return net->ifindex = ifindex;
1da177e4
LT
6492 }
6493}
6494
1da177e4 6495/* Delayed registration/unregisteration */
3b5b34fd 6496static LIST_HEAD(net_todo_list);
200b916f 6497DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
1da177e4 6498
6f05f629 6499static void net_set_todo(struct net_device *dev)
1da177e4 6500{
1da177e4 6501 list_add_tail(&dev->todo_list, &net_todo_list);
50624c93 6502 dev_net(dev)->dev_unreg_count++;
1da177e4
LT
6503}
6504
9b5e383c 6505static void rollback_registered_many(struct list_head *head)
93ee31f1 6506{
e93737b0 6507 struct net_device *dev, *tmp;
5cde2829 6508 LIST_HEAD(close_head);
9b5e383c 6509
93ee31f1
DL
6510 BUG_ON(dev_boot_phase);
6511 ASSERT_RTNL();
6512
e93737b0 6513 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
9b5e383c 6514 /* Some devices call without registering
e93737b0
KK
6515 * for initialization unwind. Remove those
6516 * devices and proceed with the remaining.
9b5e383c
ED
6517 */
6518 if (dev->reg_state == NETREG_UNINITIALIZED) {
7b6cd1ce
JP
6519 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
6520 dev->name, dev);
93ee31f1 6521
9b5e383c 6522 WARN_ON(1);
e93737b0
KK
6523 list_del(&dev->unreg_list);
6524 continue;
9b5e383c 6525 }
449f4544 6526 dev->dismantle = true;
9b5e383c 6527 BUG_ON(dev->reg_state != NETREG_REGISTERED);
44345724 6528 }
93ee31f1 6529
44345724 6530 /* If device is running, close it first. */
5cde2829
EB
6531 list_for_each_entry(dev, head, unreg_list)
6532 list_add_tail(&dev->close_list, &close_head);
99c4a26a 6533 dev_close_many(&close_head, true);
93ee31f1 6534
44345724 6535 list_for_each_entry(dev, head, unreg_list) {
9b5e383c
ED
6536 /* And unlink it from device chain. */
6537 unlist_netdevice(dev);
93ee31f1 6538
9b5e383c 6539 dev->reg_state = NETREG_UNREGISTERING;
e9e4dd32 6540 on_each_cpu(flush_backlog, dev, 1);
9b5e383c 6541 }
93ee31f1
DL
6542
6543 synchronize_net();
6544
9b5e383c 6545 list_for_each_entry(dev, head, unreg_list) {
395eea6c
MB
6546 struct sk_buff *skb = NULL;
6547
9b5e383c
ED
6548 /* Shutdown queueing discipline. */
6549 dev_shutdown(dev);
93ee31f1
DL
6550
6551
9b5e383c
ED
6552 /* Notify protocols, that we are about to destroy
6553 this device. They should clean all the things.
6554 */
6555 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
93ee31f1 6556
395eea6c
MB
6557 if (!dev->rtnl_link_ops ||
6558 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
6559 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U,
6560 GFP_KERNEL);
6561
9b5e383c
ED
6562 /*
6563 * Flush the unicast and multicast chains
6564 */
a748ee24 6565 dev_uc_flush(dev);
22bedad3 6566 dev_mc_flush(dev);
93ee31f1 6567
9b5e383c
ED
6568 if (dev->netdev_ops->ndo_uninit)
6569 dev->netdev_ops->ndo_uninit(dev);
93ee31f1 6570
395eea6c
MB
6571 if (skb)
6572 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
56bfa7ee 6573
9ff162a8
JP
6574 /* Notifier chain MUST detach us all upper devices. */
6575 WARN_ON(netdev_has_any_upper_dev(dev));
93ee31f1 6576
9b5e383c
ED
6577 /* Remove entries from kobject tree */
6578 netdev_unregister_kobject(dev);
024e9679
AD
6579#ifdef CONFIG_XPS
6580 /* Remove XPS queueing entries */
6581 netif_reset_xps_queues_gt(dev, 0);
6582#endif
9b5e383c 6583 }
93ee31f1 6584
850a545b 6585 synchronize_net();
395264d5 6586
a5ee1551 6587 list_for_each_entry(dev, head, unreg_list)
9b5e383c
ED
6588 dev_put(dev);
6589}
6590
6591static void rollback_registered(struct net_device *dev)
6592{
6593 LIST_HEAD(single);
6594
6595 list_add(&dev->unreg_list, &single);
6596 rollback_registered_many(&single);
ceaaec98 6597 list_del(&single);
93ee31f1
DL
6598}
6599
fd867d51
JW
6600static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
6601 struct net_device *upper, netdev_features_t features)
6602{
6603 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
6604 netdev_features_t feature;
5ba3f7d6 6605 int feature_bit;
fd867d51 6606
5ba3f7d6
JW
6607 for_each_netdev_feature(&upper_disables, feature_bit) {
6608 feature = __NETIF_F_BIT(feature_bit);
fd867d51
JW
6609 if (!(upper->wanted_features & feature)
6610 && (features & feature)) {
6611 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
6612 &feature, upper->name);
6613 features &= ~feature;
6614 }
6615 }
6616
6617 return features;
6618}
6619
6620static void netdev_sync_lower_features(struct net_device *upper,
6621 struct net_device *lower, netdev_features_t features)
6622{
6623 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
6624 netdev_features_t feature;
5ba3f7d6 6625 int feature_bit;
fd867d51 6626
5ba3f7d6
JW
6627 for_each_netdev_feature(&upper_disables, feature_bit) {
6628 feature = __NETIF_F_BIT(feature_bit);
fd867d51
JW
6629 if (!(features & feature) && (lower->features & feature)) {
6630 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
6631 &feature, lower->name);
6632 lower->wanted_features &= ~feature;
6633 netdev_update_features(lower);
6634
6635 if (unlikely(lower->features & feature))
6636 netdev_WARN(upper, "failed to disable %pNF on %s!\n",
6637 &feature, lower->name);
6638 }
6639 }
6640}
6641
c8f44aff
MM
6642static netdev_features_t netdev_fix_features(struct net_device *dev,
6643 netdev_features_t features)
b63365a2 6644{
57422dc5
MM
6645 /* Fix illegal checksum combinations */
6646 if ((features & NETIF_F_HW_CSUM) &&
6647 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
6f404e44 6648 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
57422dc5
MM
6649 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
6650 }
6651
b63365a2 6652 /* TSO requires that SG is present as well. */
ea2d3688 6653 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
6f404e44 6654 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
ea2d3688 6655 features &= ~NETIF_F_ALL_TSO;
b63365a2
HX
6656 }
6657
ec5f0615
PS
6658 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
6659 !(features & NETIF_F_IP_CSUM)) {
6660 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
6661 features &= ~NETIF_F_TSO;
6662 features &= ~NETIF_F_TSO_ECN;
6663 }
6664
6665 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
6666 !(features & NETIF_F_IPV6_CSUM)) {
6667 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
6668 features &= ~NETIF_F_TSO6;
6669 }
6670
31d8b9e0
BH
6671 /* TSO ECN requires that TSO is present as well. */
6672 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
6673 features &= ~NETIF_F_TSO_ECN;
6674
212b573f
MM
6675 /* Software GSO depends on SG. */
6676 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
6f404e44 6677 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
212b573f
MM
6678 features &= ~NETIF_F_GSO;
6679 }
6680
acd1130e 6681 /* UFO needs SG and checksumming */
b63365a2 6682 if (features & NETIF_F_UFO) {
79032644 6683 /* maybe split UFO into V4 and V6? */
c8cd0989
TH
6684 if (!(features & NETIF_F_HW_CSUM) &&
6685 ((features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) !=
6686 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) {
6f404e44 6687 netdev_dbg(dev,
acd1130e 6688 "Dropping NETIF_F_UFO since no checksum offload features.\n");
b63365a2
HX
6689 features &= ~NETIF_F_UFO;
6690 }
6691
6692 if (!(features & NETIF_F_SG)) {
6f404e44 6693 netdev_dbg(dev,
acd1130e 6694 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
b63365a2
HX
6695 features &= ~NETIF_F_UFO;
6696 }
6697 }
6698
d0290214
JP
6699#ifdef CONFIG_NET_RX_BUSY_POLL
6700 if (dev->netdev_ops->ndo_busy_poll)
6701 features |= NETIF_F_BUSY_POLL;
6702 else
6703#endif
6704 features &= ~NETIF_F_BUSY_POLL;
6705
b63365a2
HX
6706 return features;
6707}
b63365a2 6708
6cb6a27c 6709int __netdev_update_features(struct net_device *dev)
5455c699 6710{
fd867d51 6711 struct net_device *upper, *lower;
c8f44aff 6712 netdev_features_t features;
fd867d51 6713 struct list_head *iter;
e7868a85 6714 int err = -1;
5455c699 6715
87267485
MM
6716 ASSERT_RTNL();
6717
5455c699
MM
6718 features = netdev_get_wanted_features(dev);
6719
6720 if (dev->netdev_ops->ndo_fix_features)
6721 features = dev->netdev_ops->ndo_fix_features(dev, features);
6722
6723 /* driver might be less strict about feature dependencies */
6724 features = netdev_fix_features(dev, features);
6725
fd867d51
JW
6726 /* some features can't be enabled if they're off an an upper device */
6727 netdev_for_each_upper_dev_rcu(dev, upper, iter)
6728 features = netdev_sync_upper_features(dev, upper, features);
6729
5455c699 6730 if (dev->features == features)
e7868a85 6731 goto sync_lower;
5455c699 6732
c8f44aff
MM
6733 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
6734 &dev->features, &features);
5455c699
MM
6735
6736 if (dev->netdev_ops->ndo_set_features)
6737 err = dev->netdev_ops->ndo_set_features(dev, features);
5f8dc33e
NA
6738 else
6739 err = 0;
5455c699 6740
6cb6a27c 6741 if (unlikely(err < 0)) {
5455c699 6742 netdev_err(dev,
c8f44aff
MM
6743 "set_features() failed (%d); wanted %pNF, left %pNF\n",
6744 err, &features, &dev->features);
17b85d29
NA
6745 /* return non-0 since some features might have changed and
6746 * it's better to fire a spurious notification than miss it
6747 */
6748 return -1;
6cb6a27c
MM
6749 }
6750
e7868a85 6751sync_lower:
fd867d51
JW
6752 /* some features must be disabled on lower devices when disabled
6753 * on an upper device (think: bonding master or bridge)
6754 */
6755 netdev_for_each_lower_dev(dev, lower, iter)
6756 netdev_sync_lower_features(dev, lower, features);
6757
6cb6a27c
MM
6758 if (!err)
6759 dev->features = features;
6760
e7868a85 6761 return err < 0 ? 0 : 1;
6cb6a27c
MM
6762}
6763
afe12cc8
MM
6764/**
6765 * netdev_update_features - recalculate device features
6766 * @dev: the device to check
6767 *
6768 * Recalculate dev->features set and send notifications if it
6769 * has changed. Should be called after driver or hardware dependent
6770 * conditions might have changed that influence the features.
6771 */
6cb6a27c
MM
6772void netdev_update_features(struct net_device *dev)
6773{
6774 if (__netdev_update_features(dev))
6775 netdev_features_change(dev);
5455c699
MM
6776}
6777EXPORT_SYMBOL(netdev_update_features);
6778
afe12cc8
MM
6779/**
6780 * netdev_change_features - recalculate device features
6781 * @dev: the device to check
6782 *
6783 * Recalculate dev->features set and send notifications even
6784 * if they have not changed. Should be called instead of
6785 * netdev_update_features() if also dev->vlan_features might
6786 * have changed to allow the changes to be propagated to stacked
6787 * VLAN devices.
6788 */
6789void netdev_change_features(struct net_device *dev)
6790{
6791 __netdev_update_features(dev);
6792 netdev_features_change(dev);
6793}
6794EXPORT_SYMBOL(netdev_change_features);
6795
fc4a7489
PM
6796/**
6797 * netif_stacked_transfer_operstate - transfer operstate
6798 * @rootdev: the root or lower level device to transfer state from
6799 * @dev: the device to transfer operstate to
6800 *
6801 * Transfer operational state from root to device. This is normally
6802 * called when a stacking relationship exists between the root
6803 * device and the device(a leaf device).
6804 */
6805void netif_stacked_transfer_operstate(const struct net_device *rootdev,
6806 struct net_device *dev)
6807{
6808 if (rootdev->operstate == IF_OPER_DORMANT)
6809 netif_dormant_on(dev);
6810 else
6811 netif_dormant_off(dev);
6812
6813 if (netif_carrier_ok(rootdev)) {
6814 if (!netif_carrier_ok(dev))
6815 netif_carrier_on(dev);
6816 } else {
6817 if (netif_carrier_ok(dev))
6818 netif_carrier_off(dev);
6819 }
6820}
6821EXPORT_SYMBOL(netif_stacked_transfer_operstate);
6822
a953be53 6823#ifdef CONFIG_SYSFS
1b4bf461
ED
6824static int netif_alloc_rx_queues(struct net_device *dev)
6825{
1b4bf461 6826 unsigned int i, count = dev->num_rx_queues;
bd25fa7b 6827 struct netdev_rx_queue *rx;
10595902 6828 size_t sz = count * sizeof(*rx);
1b4bf461 6829
bd25fa7b 6830 BUG_ON(count < 1);
1b4bf461 6831
10595902
PG
6832 rx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6833 if (!rx) {
6834 rx = vzalloc(sz);
6835 if (!rx)
6836 return -ENOMEM;
6837 }
bd25fa7b
TH
6838 dev->_rx = rx;
6839
bd25fa7b 6840 for (i = 0; i < count; i++)
fe822240 6841 rx[i].dev = dev;
1b4bf461
ED
6842 return 0;
6843}
bf264145 6844#endif
1b4bf461 6845
aa942104
CG
6846static void netdev_init_one_queue(struct net_device *dev,
6847 struct netdev_queue *queue, void *_unused)
6848{
6849 /* Initialize queue lock */
6850 spin_lock_init(&queue->_xmit_lock);
6851 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
6852 queue->xmit_lock_owner = -1;
b236da69 6853 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
aa942104 6854 queue->dev = dev;
114cf580
TH
6855#ifdef CONFIG_BQL
6856 dql_init(&queue->dql, HZ);
6857#endif
aa942104
CG
6858}
6859
60877a32
ED
6860static void netif_free_tx_queues(struct net_device *dev)
6861{
4cb28970 6862 kvfree(dev->_tx);
60877a32
ED
6863}
6864
e6484930
TH
6865static int netif_alloc_netdev_queues(struct net_device *dev)
6866{
6867 unsigned int count = dev->num_tx_queues;
6868 struct netdev_queue *tx;
60877a32 6869 size_t sz = count * sizeof(*tx);
e6484930 6870
d339727c
ED
6871 if (count < 1 || count > 0xffff)
6872 return -EINVAL;
62b5942a 6873
60877a32
ED
6874 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6875 if (!tx) {
6876 tx = vzalloc(sz);
6877 if (!tx)
6878 return -ENOMEM;
6879 }
e6484930 6880 dev->_tx = tx;
1d24eb48 6881
e6484930
TH
6882 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
6883 spin_lock_init(&dev->tx_global_lock);
aa942104
CG
6884
6885 return 0;
e6484930
TH
6886}
6887
a2029240
DV
6888void netif_tx_stop_all_queues(struct net_device *dev)
6889{
6890 unsigned int i;
6891
6892 for (i = 0; i < dev->num_tx_queues; i++) {
6893 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
6894 netif_tx_stop_queue(txq);
6895 }
6896}
6897EXPORT_SYMBOL(netif_tx_stop_all_queues);
6898
1da177e4
LT
6899/**
6900 * register_netdevice - register a network device
6901 * @dev: device to register
6902 *
6903 * Take a completed network device structure and add it to the kernel
6904 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6905 * chain. 0 is returned on success. A negative errno code is returned
6906 * on a failure to set up the device, or if the name is a duplicate.
6907 *
6908 * Callers must hold the rtnl semaphore. You may want
6909 * register_netdev() instead of this.
6910 *
6911 * BUGS:
6912 * The locking appears insufficient to guarantee two parallel registers
6913 * will not get the same name.
6914 */
6915
6916int register_netdevice(struct net_device *dev)
6917{
1da177e4 6918 int ret;
d314774c 6919 struct net *net = dev_net(dev);
1da177e4
LT
6920
6921 BUG_ON(dev_boot_phase);
6922 ASSERT_RTNL();
6923
b17a7c17
SH
6924 might_sleep();
6925
1da177e4
LT
6926 /* When net_device's are persistent, this will be fatal. */
6927 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
d314774c 6928 BUG_ON(!net);
1da177e4 6929
f1f28aa3 6930 spin_lock_init(&dev->addr_list_lock);
cf508b12 6931 netdev_set_addr_lockdep_class(dev);
1da177e4 6932
828de4f6 6933 ret = dev_get_valid_name(net, dev, dev->name);
0696c3a8
PP
6934 if (ret < 0)
6935 goto out;
6936
1da177e4 6937 /* Init, if this function is available */
d314774c
SH
6938 if (dev->netdev_ops->ndo_init) {
6939 ret = dev->netdev_ops->ndo_init(dev);
1da177e4
LT
6940 if (ret) {
6941 if (ret > 0)
6942 ret = -EIO;
90833aa4 6943 goto out;
1da177e4
LT
6944 }
6945 }
4ec93edb 6946
f646968f
PM
6947 if (((dev->hw_features | dev->features) &
6948 NETIF_F_HW_VLAN_CTAG_FILTER) &&
d2ed273d
MM
6949 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
6950 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
6951 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
6952 ret = -EINVAL;
6953 goto err_uninit;
6954 }
6955
9c7dafbf
PE
6956 ret = -EBUSY;
6957 if (!dev->ifindex)
6958 dev->ifindex = dev_new_index(net);
6959 else if (__dev_get_by_index(net, dev->ifindex))
6960 goto err_uninit;
6961
5455c699
MM
6962 /* Transfer changeable features to wanted_features and enable
6963 * software offloads (GSO and GRO).
6964 */
6965 dev->hw_features |= NETIF_F_SOFT_FEATURES;
14d1232f
MM
6966 dev->features |= NETIF_F_SOFT_FEATURES;
6967 dev->wanted_features = dev->features & dev->hw_features;
1da177e4 6968
34324dc2
MM
6969 if (!(dev->flags & IFF_LOOPBACK)) {
6970 dev->hw_features |= NETIF_F_NOCACHE_COPY;
c6e1a0d1
TH
6971 }
6972
1180e7d6 6973 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
16c3ea78 6974 */
1180e7d6 6975 dev->vlan_features |= NETIF_F_HIGHDMA;
16c3ea78 6976
ee579677
PS
6977 /* Make NETIF_F_SG inheritable to tunnel devices.
6978 */
6979 dev->hw_enc_features |= NETIF_F_SG;
6980
0d89d203
SH
6981 /* Make NETIF_F_SG inheritable to MPLS.
6982 */
6983 dev->mpls_features |= NETIF_F_SG;
6984
7ffbe3fd
JB
6985 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
6986 ret = notifier_to_errno(ret);
6987 if (ret)
6988 goto err_uninit;
6989
8b41d188 6990 ret = netdev_register_kobject(dev);
b17a7c17 6991 if (ret)
7ce1b0ed 6992 goto err_uninit;
b17a7c17
SH
6993 dev->reg_state = NETREG_REGISTERED;
6994
6cb6a27c 6995 __netdev_update_features(dev);
8e9b59b2 6996
1da177e4
LT
6997 /*
6998 * Default initial state at registry is that the
6999 * device is present.
7000 */
7001
7002 set_bit(__LINK_STATE_PRESENT, &dev->state);
7003
8f4cccbb
BH
7004 linkwatch_init_dev(dev);
7005
1da177e4 7006 dev_init_scheduler(dev);
1da177e4 7007 dev_hold(dev);
ce286d32 7008 list_netdevice(dev);
7bf23575 7009 add_device_randomness(dev->dev_addr, dev->addr_len);
1da177e4 7010
948b337e
JP
7011 /* If the device has permanent device address, driver should
7012 * set dev_addr and also addr_assign_type should be set to
7013 * NET_ADDR_PERM (default value).
7014 */
7015 if (dev->addr_assign_type == NET_ADDR_PERM)
7016 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
7017
1da177e4 7018 /* Notify protocols, that a new device appeared. */
056925ab 7019 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
fcc5a03a 7020 ret = notifier_to_errno(ret);
93ee31f1
DL
7021 if (ret) {
7022 rollback_registered(dev);
7023 dev->reg_state = NETREG_UNREGISTERED;
7024 }
d90a909e
EB
7025 /*
7026 * Prevent userspace races by waiting until the network
7027 * device is fully setup before sending notifications.
7028 */
a2835763
PM
7029 if (!dev->rtnl_link_ops ||
7030 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
7f294054 7031 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
1da177e4
LT
7032
7033out:
7034 return ret;
7ce1b0ed
HX
7035
7036err_uninit:
d314774c
SH
7037 if (dev->netdev_ops->ndo_uninit)
7038 dev->netdev_ops->ndo_uninit(dev);
7ce1b0ed 7039 goto out;
1da177e4 7040}
d1b19dff 7041EXPORT_SYMBOL(register_netdevice);
1da177e4 7042
937f1ba5
BH
7043/**
7044 * init_dummy_netdev - init a dummy network device for NAPI
7045 * @dev: device to init
7046 *
7047 * This takes a network device structure and initialize the minimum
7048 * amount of fields so it can be used to schedule NAPI polls without
7049 * registering a full blown interface. This is to be used by drivers
7050 * that need to tie several hardware interfaces to a single NAPI
7051 * poll scheduler due to HW limitations.
7052 */
7053int init_dummy_netdev(struct net_device *dev)
7054{
7055 /* Clear everything. Note we don't initialize spinlocks
7056 * are they aren't supposed to be taken by any of the
7057 * NAPI code and this dummy netdev is supposed to be
7058 * only ever used for NAPI polls
7059 */
7060 memset(dev, 0, sizeof(struct net_device));
7061
7062 /* make sure we BUG if trying to hit standard
7063 * register/unregister code path
7064 */
7065 dev->reg_state = NETREG_DUMMY;
7066
937f1ba5
BH
7067 /* NAPI wants this */
7068 INIT_LIST_HEAD(&dev->napi_list);
7069
7070 /* a dummy interface is started by default */
7071 set_bit(__LINK_STATE_PRESENT, &dev->state);
7072 set_bit(__LINK_STATE_START, &dev->state);
7073
29b4433d
ED
7074 /* Note : We dont allocate pcpu_refcnt for dummy devices,
7075 * because users of this 'device' dont need to change
7076 * its refcount.
7077 */
7078
937f1ba5
BH
7079 return 0;
7080}
7081EXPORT_SYMBOL_GPL(init_dummy_netdev);
7082
7083
1da177e4
LT
7084/**
7085 * register_netdev - register a network device
7086 * @dev: device to register
7087 *
7088 * Take a completed network device structure and add it to the kernel
7089 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
7090 * chain. 0 is returned on success. A negative errno code is returned
7091 * on a failure to set up the device, or if the name is a duplicate.
7092 *
38b4da38 7093 * This is a wrapper around register_netdevice that takes the rtnl semaphore
1da177e4
LT
7094 * and expands the device name if you passed a format string to
7095 * alloc_netdev.
7096 */
7097int register_netdev(struct net_device *dev)
7098{
7099 int err;
7100
7101 rtnl_lock();
1da177e4 7102 err = register_netdevice(dev);
1da177e4
LT
7103 rtnl_unlock();
7104 return err;
7105}
7106EXPORT_SYMBOL(register_netdev);
7107
29b4433d
ED
7108int netdev_refcnt_read(const struct net_device *dev)
7109{
7110 int i, refcnt = 0;
7111
7112 for_each_possible_cpu(i)
7113 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
7114 return refcnt;
7115}
7116EXPORT_SYMBOL(netdev_refcnt_read);
7117
2c53040f 7118/**
1da177e4 7119 * netdev_wait_allrefs - wait until all references are gone.
3de7a37b 7120 * @dev: target net_device
1da177e4
LT
7121 *
7122 * This is called when unregistering network devices.
7123 *
7124 * Any protocol or device that holds a reference should register
7125 * for netdevice notification, and cleanup and put back the
7126 * reference if they receive an UNREGISTER event.
7127 * We can get stuck here if buggy protocols don't correctly
4ec93edb 7128 * call dev_put.
1da177e4
LT
7129 */
7130static void netdev_wait_allrefs(struct net_device *dev)
7131{
7132 unsigned long rebroadcast_time, warning_time;
29b4433d 7133 int refcnt;
1da177e4 7134
e014debe
ED
7135 linkwatch_forget_dev(dev);
7136
1da177e4 7137 rebroadcast_time = warning_time = jiffies;
29b4433d
ED
7138 refcnt = netdev_refcnt_read(dev);
7139
7140 while (refcnt != 0) {
1da177e4 7141 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
6756ae4b 7142 rtnl_lock();
1da177e4
LT
7143
7144 /* Rebroadcast unregister notification */
056925ab 7145 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
1da177e4 7146
748e2d93 7147 __rtnl_unlock();
0115e8e3 7148 rcu_barrier();
748e2d93
ED
7149 rtnl_lock();
7150
0115e8e3 7151 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
1da177e4
LT
7152 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
7153 &dev->state)) {
7154 /* We must not have linkwatch events
7155 * pending on unregister. If this
7156 * happens, we simply run the queue
7157 * unscheduled, resulting in a noop
7158 * for this device.
7159 */
7160 linkwatch_run_queue();
7161 }
7162
6756ae4b 7163 __rtnl_unlock();
1da177e4
LT
7164
7165 rebroadcast_time = jiffies;
7166 }
7167
7168 msleep(250);
7169
29b4433d
ED
7170 refcnt = netdev_refcnt_read(dev);
7171
1da177e4 7172 if (time_after(jiffies, warning_time + 10 * HZ)) {
7b6cd1ce
JP
7173 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
7174 dev->name, refcnt);
1da177e4
LT
7175 warning_time = jiffies;
7176 }
7177 }
7178}
7179
7180/* The sequence is:
7181 *
7182 * rtnl_lock();
7183 * ...
7184 * register_netdevice(x1);
7185 * register_netdevice(x2);
7186 * ...
7187 * unregister_netdevice(y1);
7188 * unregister_netdevice(y2);
7189 * ...
7190 * rtnl_unlock();
7191 * free_netdev(y1);
7192 * free_netdev(y2);
7193 *
58ec3b4d 7194 * We are invoked by rtnl_unlock().
1da177e4 7195 * This allows us to deal with problems:
b17a7c17 7196 * 1) We can delete sysfs objects which invoke hotplug
1da177e4
LT
7197 * without deadlocking with linkwatch via keventd.
7198 * 2) Since we run with the RTNL semaphore not held, we can sleep
7199 * safely in order to wait for the netdev refcnt to drop to zero.
58ec3b4d
HX
7200 *
7201 * We must not return until all unregister events added during
7202 * the interval the lock was held have been completed.
1da177e4 7203 */
1da177e4
LT
7204void netdev_run_todo(void)
7205{
626ab0e6 7206 struct list_head list;
1da177e4 7207
1da177e4 7208 /* Snapshot list, allow later requests */
626ab0e6 7209 list_replace_init(&net_todo_list, &list);
58ec3b4d
HX
7210
7211 __rtnl_unlock();
626ab0e6 7212
0115e8e3
ED
7213
7214 /* Wait for rcu callbacks to finish before next phase */
850a545b
EB
7215 if (!list_empty(&list))
7216 rcu_barrier();
7217
1da177e4
LT
7218 while (!list_empty(&list)) {
7219 struct net_device *dev
e5e26d75 7220 = list_first_entry(&list, struct net_device, todo_list);
1da177e4
LT
7221 list_del(&dev->todo_list);
7222
748e2d93 7223 rtnl_lock();
0115e8e3 7224 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
748e2d93 7225 __rtnl_unlock();
0115e8e3 7226
b17a7c17 7227 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
7b6cd1ce 7228 pr_err("network todo '%s' but state %d\n",
b17a7c17
SH
7229 dev->name, dev->reg_state);
7230 dump_stack();
7231 continue;
7232 }
1da177e4 7233
b17a7c17 7234 dev->reg_state = NETREG_UNREGISTERED;
1da177e4 7235
b17a7c17 7236 netdev_wait_allrefs(dev);
1da177e4 7237
b17a7c17 7238 /* paranoia */
29b4433d 7239 BUG_ON(netdev_refcnt_read(dev));
7866a621
SN
7240 BUG_ON(!list_empty(&dev->ptype_all));
7241 BUG_ON(!list_empty(&dev->ptype_specific));
33d480ce
ED
7242 WARN_ON(rcu_access_pointer(dev->ip_ptr));
7243 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
547b792c 7244 WARN_ON(dev->dn_ptr);
1da177e4 7245
b17a7c17
SH
7246 if (dev->destructor)
7247 dev->destructor(dev);
9093bbb2 7248
50624c93
EB
7249 /* Report a network device has been unregistered */
7250 rtnl_lock();
7251 dev_net(dev)->dev_unreg_count--;
7252 __rtnl_unlock();
7253 wake_up(&netdev_unregistering_wq);
7254
9093bbb2
SH
7255 /* Free network device */
7256 kobject_put(&dev->dev.kobj);
1da177e4 7257 }
1da177e4
LT
7258}
7259
9256645a
JW
7260/* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
7261 * all the same fields in the same order as net_device_stats, with only
7262 * the type differing, but rtnl_link_stats64 may have additional fields
7263 * at the end for newer counters.
3cfde79c 7264 */
77a1abf5
ED
7265void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
7266 const struct net_device_stats *netdev_stats)
3cfde79c
BH
7267{
7268#if BITS_PER_LONG == 64
9256645a 7269 BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
77a1abf5 7270 memcpy(stats64, netdev_stats, sizeof(*stats64));
9256645a
JW
7271 /* zero out counters that only exist in rtnl_link_stats64 */
7272 memset((char *)stats64 + sizeof(*netdev_stats), 0,
7273 sizeof(*stats64) - sizeof(*netdev_stats));
3cfde79c 7274#else
9256645a 7275 size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
3cfde79c
BH
7276 const unsigned long *src = (const unsigned long *)netdev_stats;
7277 u64 *dst = (u64 *)stats64;
7278
9256645a 7279 BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
3cfde79c
BH
7280 for (i = 0; i < n; i++)
7281 dst[i] = src[i];
9256645a
JW
7282 /* zero out counters that only exist in rtnl_link_stats64 */
7283 memset((char *)stats64 + n * sizeof(u64), 0,
7284 sizeof(*stats64) - n * sizeof(u64));
3cfde79c
BH
7285#endif
7286}
77a1abf5 7287EXPORT_SYMBOL(netdev_stats_to_stats64);
3cfde79c 7288
eeda3fd6
SH
7289/**
7290 * dev_get_stats - get network device statistics
7291 * @dev: device to get statistics from
28172739 7292 * @storage: place to store stats
eeda3fd6 7293 *
d7753516
BH
7294 * Get network statistics from device. Return @storage.
7295 * The device driver may provide its own method by setting
7296 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
7297 * otherwise the internal statistics structure is used.
eeda3fd6 7298 */
d7753516
BH
7299struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
7300 struct rtnl_link_stats64 *storage)
7004bf25 7301{
eeda3fd6
SH
7302 const struct net_device_ops *ops = dev->netdev_ops;
7303
28172739
ED
7304 if (ops->ndo_get_stats64) {
7305 memset(storage, 0, sizeof(*storage));
caf586e5
ED
7306 ops->ndo_get_stats64(dev, storage);
7307 } else if (ops->ndo_get_stats) {
3cfde79c 7308 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
caf586e5
ED
7309 } else {
7310 netdev_stats_to_stats64(storage, &dev->stats);
28172739 7311 }
caf586e5 7312 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
015f0688 7313 storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
6e7333d3 7314 storage->rx_nohandler += atomic_long_read(&dev->rx_nohandler);
28172739 7315 return storage;
c45d286e 7316}
eeda3fd6 7317EXPORT_SYMBOL(dev_get_stats);
c45d286e 7318
24824a09 7319struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
dc2b4847 7320{
24824a09 7321 struct netdev_queue *queue = dev_ingress_queue(dev);
dc2b4847 7322
24824a09
ED
7323#ifdef CONFIG_NET_CLS_ACT
7324 if (queue)
7325 return queue;
7326 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
7327 if (!queue)
7328 return NULL;
7329 netdev_init_one_queue(dev, queue, NULL);
2ce1ee17 7330 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
24824a09
ED
7331 queue->qdisc_sleeping = &noop_qdisc;
7332 rcu_assign_pointer(dev->ingress_queue, queue);
7333#endif
7334 return queue;
bb949fbd
DM
7335}
7336
2c60db03
ED
7337static const struct ethtool_ops default_ethtool_ops;
7338
d07d7507
SG
7339void netdev_set_default_ethtool_ops(struct net_device *dev,
7340 const struct ethtool_ops *ops)
7341{
7342 if (dev->ethtool_ops == &default_ethtool_ops)
7343 dev->ethtool_ops = ops;
7344}
7345EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
7346
74d332c1
ED
7347void netdev_freemem(struct net_device *dev)
7348{
7349 char *addr = (char *)dev - dev->padded;
7350
4cb28970 7351 kvfree(addr);
74d332c1
ED
7352}
7353
1da177e4 7354/**
36909ea4 7355 * alloc_netdev_mqs - allocate network device
c835a677
TG
7356 * @sizeof_priv: size of private data to allocate space for
7357 * @name: device name format string
7358 * @name_assign_type: origin of device name
7359 * @setup: callback to initialize device
7360 * @txqs: the number of TX subqueues to allocate
7361 * @rxqs: the number of RX subqueues to allocate
1da177e4
LT
7362 *
7363 * Allocates a struct net_device with private data area for driver use
90e51adf 7364 * and performs basic initialization. Also allocates subqueue structs
36909ea4 7365 * for each queue on the device.
1da177e4 7366 */
36909ea4 7367struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
c835a677 7368 unsigned char name_assign_type,
36909ea4
TH
7369 void (*setup)(struct net_device *),
7370 unsigned int txqs, unsigned int rxqs)
1da177e4 7371{
1da177e4 7372 struct net_device *dev;
7943986c 7373 size_t alloc_size;
1ce8e7b5 7374 struct net_device *p;
1da177e4 7375
b6fe17d6
SH
7376 BUG_ON(strlen(name) >= sizeof(dev->name));
7377
36909ea4 7378 if (txqs < 1) {
7b6cd1ce 7379 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
55513fb4
TH
7380 return NULL;
7381 }
7382
a953be53 7383#ifdef CONFIG_SYSFS
36909ea4 7384 if (rxqs < 1) {
7b6cd1ce 7385 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
36909ea4
TH
7386 return NULL;
7387 }
7388#endif
7389
fd2ea0a7 7390 alloc_size = sizeof(struct net_device);
d1643d24
AD
7391 if (sizeof_priv) {
7392 /* ensure 32-byte alignment of private area */
1ce8e7b5 7393 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
d1643d24
AD
7394 alloc_size += sizeof_priv;
7395 }
7396 /* ensure 32-byte alignment of whole construct */
1ce8e7b5 7397 alloc_size += NETDEV_ALIGN - 1;
1da177e4 7398
74d332c1
ED
7399 p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
7400 if (!p)
7401 p = vzalloc(alloc_size);
62b5942a 7402 if (!p)
1da177e4 7403 return NULL;
1da177e4 7404
1ce8e7b5 7405 dev = PTR_ALIGN(p, NETDEV_ALIGN);
1da177e4 7406 dev->padded = (char *)dev - (char *)p;
ab9c73cc 7407
29b4433d
ED
7408 dev->pcpu_refcnt = alloc_percpu(int);
7409 if (!dev->pcpu_refcnt)
74d332c1 7410 goto free_dev;
ab9c73cc 7411
ab9c73cc 7412 if (dev_addr_init(dev))
29b4433d 7413 goto free_pcpu;
ab9c73cc 7414
22bedad3 7415 dev_mc_init(dev);
a748ee24 7416 dev_uc_init(dev);
ccffad25 7417
c346dca1 7418 dev_net_set(dev, &init_net);
1da177e4 7419
8d3bdbd5 7420 dev->gso_max_size = GSO_MAX_SIZE;
30b678d8 7421 dev->gso_max_segs = GSO_MAX_SEGS;
fcbeb976 7422 dev->gso_min_segs = 0;
8d3bdbd5 7423
8d3bdbd5
DM
7424 INIT_LIST_HEAD(&dev->napi_list);
7425 INIT_LIST_HEAD(&dev->unreg_list);
5cde2829 7426 INIT_LIST_HEAD(&dev->close_list);
8d3bdbd5 7427 INIT_LIST_HEAD(&dev->link_watch_list);
2f268f12
VF
7428 INIT_LIST_HEAD(&dev->adj_list.upper);
7429 INIT_LIST_HEAD(&dev->adj_list.lower);
7430 INIT_LIST_HEAD(&dev->all_adj_list.upper);
7431 INIT_LIST_HEAD(&dev->all_adj_list.lower);
7866a621
SN
7432 INIT_LIST_HEAD(&dev->ptype_all);
7433 INIT_LIST_HEAD(&dev->ptype_specific);
02875878 7434 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
8d3bdbd5
DM
7435 setup(dev);
7436
906470c1 7437 if (!dev->tx_queue_len)
f84bb1ea 7438 dev->priv_flags |= IFF_NO_QUEUE;
906470c1 7439
36909ea4
TH
7440 dev->num_tx_queues = txqs;
7441 dev->real_num_tx_queues = txqs;
ed9af2e8 7442 if (netif_alloc_netdev_queues(dev))
8d3bdbd5 7443 goto free_all;
e8a0464c 7444
a953be53 7445#ifdef CONFIG_SYSFS
36909ea4
TH
7446 dev->num_rx_queues = rxqs;
7447 dev->real_num_rx_queues = rxqs;
fe822240 7448 if (netif_alloc_rx_queues(dev))
8d3bdbd5 7449 goto free_all;
df334545 7450#endif
0a9627f2 7451
1da177e4 7452 strcpy(dev->name, name);
c835a677 7453 dev->name_assign_type = name_assign_type;
cbda10fa 7454 dev->group = INIT_NETDEV_GROUP;
2c60db03
ED
7455 if (!dev->ethtool_ops)
7456 dev->ethtool_ops = &default_ethtool_ops;
e687ad60
PN
7457
7458 nf_hook_ingress_init(dev);
7459
1da177e4 7460 return dev;
ab9c73cc 7461
8d3bdbd5
DM
7462free_all:
7463 free_netdev(dev);
7464 return NULL;
7465
29b4433d
ED
7466free_pcpu:
7467 free_percpu(dev->pcpu_refcnt);
74d332c1
ED
7468free_dev:
7469 netdev_freemem(dev);
ab9c73cc 7470 return NULL;
1da177e4 7471}
36909ea4 7472EXPORT_SYMBOL(alloc_netdev_mqs);
1da177e4
LT
7473
7474/**
7475 * free_netdev - free network device
7476 * @dev: device
7477 *
4ec93edb
YH
7478 * This function does the last stage of destroying an allocated device
7479 * interface. The reference to the device object is released.
1da177e4 7480 * If this is the last reference then it will be freed.
93d05d4a 7481 * Must be called in process context.
1da177e4
LT
7482 */
7483void free_netdev(struct net_device *dev)
7484{
d565b0a1
HX
7485 struct napi_struct *p, *n;
7486
93d05d4a 7487 might_sleep();
60877a32 7488 netif_free_tx_queues(dev);
a953be53 7489#ifdef CONFIG_SYSFS
10595902 7490 kvfree(dev->_rx);
fe822240 7491#endif
e8a0464c 7492
33d480ce 7493 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
24824a09 7494
f001fde5
JP
7495 /* Flush device addresses */
7496 dev_addr_flush(dev);
7497
d565b0a1
HX
7498 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
7499 netif_napi_del(p);
7500
29b4433d
ED
7501 free_percpu(dev->pcpu_refcnt);
7502 dev->pcpu_refcnt = NULL;
7503
3041a069 7504 /* Compatibility with error handling in drivers */
1da177e4 7505 if (dev->reg_state == NETREG_UNINITIALIZED) {
74d332c1 7506 netdev_freemem(dev);
1da177e4
LT
7507 return;
7508 }
7509
7510 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
7511 dev->reg_state = NETREG_RELEASED;
7512
43cb76d9
GKH
7513 /* will free via device release */
7514 put_device(&dev->dev);
1da177e4 7515}
d1b19dff 7516EXPORT_SYMBOL(free_netdev);
4ec93edb 7517
f0db275a
SH
7518/**
7519 * synchronize_net - Synchronize with packet receive processing
7520 *
7521 * Wait for packets currently being received to be done.
7522 * Does not block later packets from starting.
7523 */
4ec93edb 7524void synchronize_net(void)
1da177e4
LT
7525{
7526 might_sleep();
be3fc413
ED
7527 if (rtnl_is_locked())
7528 synchronize_rcu_expedited();
7529 else
7530 synchronize_rcu();
1da177e4 7531}
d1b19dff 7532EXPORT_SYMBOL(synchronize_net);
1da177e4
LT
7533
7534/**
44a0873d 7535 * unregister_netdevice_queue - remove device from the kernel
1da177e4 7536 * @dev: device
44a0873d 7537 * @head: list
6ebfbc06 7538 *
1da177e4 7539 * This function shuts down a device interface and removes it
d59b54b1 7540 * from the kernel tables.
44a0873d 7541 * If head not NULL, device is queued to be unregistered later.
1da177e4
LT
7542 *
7543 * Callers must hold the rtnl semaphore. You may want
7544 * unregister_netdev() instead of this.
7545 */
7546
44a0873d 7547void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
1da177e4 7548{
a6620712
HX
7549 ASSERT_RTNL();
7550
44a0873d 7551 if (head) {
9fdce099 7552 list_move_tail(&dev->unreg_list, head);
44a0873d
ED
7553 } else {
7554 rollback_registered(dev);
7555 /* Finish processing unregister after unlock */
7556 net_set_todo(dev);
7557 }
1da177e4 7558}
44a0873d 7559EXPORT_SYMBOL(unregister_netdevice_queue);
1da177e4 7560
9b5e383c
ED
7561/**
7562 * unregister_netdevice_many - unregister many devices
7563 * @head: list of devices
87757a91
ED
7564 *
7565 * Note: As most callers use a stack allocated list_head,
7566 * we force a list_del() to make sure stack wont be corrupted later.
9b5e383c
ED
7567 */
7568void unregister_netdevice_many(struct list_head *head)
7569{
7570 struct net_device *dev;
7571
7572 if (!list_empty(head)) {
7573 rollback_registered_many(head);
7574 list_for_each_entry(dev, head, unreg_list)
7575 net_set_todo(dev);
87757a91 7576 list_del(head);
9b5e383c
ED
7577 }
7578}
63c8099d 7579EXPORT_SYMBOL(unregister_netdevice_many);
9b5e383c 7580
1da177e4
LT
7581/**
7582 * unregister_netdev - remove device from the kernel
7583 * @dev: device
7584 *
7585 * This function shuts down a device interface and removes it
d59b54b1 7586 * from the kernel tables.
1da177e4
LT
7587 *
7588 * This is just a wrapper for unregister_netdevice that takes
7589 * the rtnl semaphore. In general you want to use this and not
7590 * unregister_netdevice.
7591 */
7592void unregister_netdev(struct net_device *dev)
7593{
7594 rtnl_lock();
7595 unregister_netdevice(dev);
7596 rtnl_unlock();
7597}
1da177e4
LT
7598EXPORT_SYMBOL(unregister_netdev);
7599
ce286d32
EB
7600/**
7601 * dev_change_net_namespace - move device to different nethost namespace
7602 * @dev: device
7603 * @net: network namespace
7604 * @pat: If not NULL name pattern to try if the current device name
7605 * is already taken in the destination network namespace.
7606 *
7607 * This function shuts down a device interface and moves it
7608 * to a new network namespace. On success 0 is returned, on
7609 * a failure a netagive errno code is returned.
7610 *
7611 * Callers must hold the rtnl semaphore.
7612 */
7613
7614int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
7615{
ce286d32
EB
7616 int err;
7617
7618 ASSERT_RTNL();
7619
7620 /* Don't allow namespace local devices to be moved. */
7621 err = -EINVAL;
7622 if (dev->features & NETIF_F_NETNS_LOCAL)
7623 goto out;
7624
7625 /* Ensure the device has been registrered */
ce286d32
EB
7626 if (dev->reg_state != NETREG_REGISTERED)
7627 goto out;
7628
7629 /* Get out if there is nothing todo */
7630 err = 0;
878628fb 7631 if (net_eq(dev_net(dev), net))
ce286d32
EB
7632 goto out;
7633
7634 /* Pick the destination device name, and ensure
7635 * we can use it in the destination network namespace.
7636 */
7637 err = -EEXIST;
d9031024 7638 if (__dev_get_by_name(net, dev->name)) {
ce286d32
EB
7639 /* We get here if we can't use the current device name */
7640 if (!pat)
7641 goto out;
828de4f6 7642 if (dev_get_valid_name(net, dev, pat) < 0)
ce286d32
EB
7643 goto out;
7644 }
7645
7646 /*
7647 * And now a mini version of register_netdevice unregister_netdevice.
7648 */
7649
7650 /* If device is running close it first. */
9b772652 7651 dev_close(dev);
ce286d32
EB
7652
7653 /* And unlink it from device chain */
7654 err = -ENODEV;
7655 unlist_netdevice(dev);
7656
7657 synchronize_net();
7658
7659 /* Shutdown queueing discipline. */
7660 dev_shutdown(dev);
7661
7662 /* Notify protocols, that we are about to destroy
7663 this device. They should clean all the things.
3b27e105
DL
7664
7665 Note that dev->reg_state stays at NETREG_REGISTERED.
7666 This is wanted because this way 8021q and macvlan know
7667 the device is just moving and can keep their slaves up.
ce286d32
EB
7668 */
7669 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6549dd43
G
7670 rcu_barrier();
7671 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
7f294054 7672 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
ce286d32
EB
7673
7674 /*
7675 * Flush the unicast and multicast chains
7676 */
a748ee24 7677 dev_uc_flush(dev);
22bedad3 7678 dev_mc_flush(dev);
ce286d32 7679
4e66ae2e
SH
7680 /* Send a netdev-removed uevent to the old namespace */
7681 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
4c75431a 7682 netdev_adjacent_del_links(dev);
4e66ae2e 7683
ce286d32 7684 /* Actually switch the network namespace */
c346dca1 7685 dev_net_set(dev, net);
ce286d32 7686
ce286d32 7687 /* If there is an ifindex conflict assign a new one */
7a66bbc9 7688 if (__dev_get_by_index(net, dev->ifindex))
ce286d32 7689 dev->ifindex = dev_new_index(net);
ce286d32 7690
4e66ae2e
SH
7691 /* Send a netdev-add uevent to the new namespace */
7692 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
4c75431a 7693 netdev_adjacent_add_links(dev);
4e66ae2e 7694
8b41d188 7695 /* Fixup kobjects */
a1b3f594 7696 err = device_rename(&dev->dev, dev->name);
8b41d188 7697 WARN_ON(err);
ce286d32
EB
7698
7699 /* Add the device back in the hashes */
7700 list_netdevice(dev);
7701
7702 /* Notify protocols, that a new device appeared. */
7703 call_netdevice_notifiers(NETDEV_REGISTER, dev);
7704
d90a909e
EB
7705 /*
7706 * Prevent userspace races by waiting until the network
7707 * device is fully setup before sending notifications.
7708 */
7f294054 7709 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
d90a909e 7710
ce286d32
EB
7711 synchronize_net();
7712 err = 0;
7713out:
7714 return err;
7715}
463d0183 7716EXPORT_SYMBOL_GPL(dev_change_net_namespace);
ce286d32 7717
1da177e4
LT
7718static int dev_cpu_callback(struct notifier_block *nfb,
7719 unsigned long action,
7720 void *ocpu)
7721{
7722 struct sk_buff **list_skb;
1da177e4
LT
7723 struct sk_buff *skb;
7724 unsigned int cpu, oldcpu = (unsigned long)ocpu;
7725 struct softnet_data *sd, *oldsd;
7726
8bb78442 7727 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
1da177e4
LT
7728 return NOTIFY_OK;
7729
7730 local_irq_disable();
7731 cpu = smp_processor_id();
7732 sd = &per_cpu(softnet_data, cpu);
7733 oldsd = &per_cpu(softnet_data, oldcpu);
7734
7735 /* Find end of our completion_queue. */
7736 list_skb = &sd->completion_queue;
7737 while (*list_skb)
7738 list_skb = &(*list_skb)->next;
7739 /* Append completion queue from offline CPU. */
7740 *list_skb = oldsd->completion_queue;
7741 oldsd->completion_queue = NULL;
7742
1da177e4 7743 /* Append output queue from offline CPU. */
a9cbd588
CG
7744 if (oldsd->output_queue) {
7745 *sd->output_queue_tailp = oldsd->output_queue;
7746 sd->output_queue_tailp = oldsd->output_queue_tailp;
7747 oldsd->output_queue = NULL;
7748 oldsd->output_queue_tailp = &oldsd->output_queue;
7749 }
ac64da0b
ED
7750 /* Append NAPI poll list from offline CPU, with one exception :
7751 * process_backlog() must be called by cpu owning percpu backlog.
7752 * We properly handle process_queue & input_pkt_queue later.
7753 */
7754 while (!list_empty(&oldsd->poll_list)) {
7755 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
7756 struct napi_struct,
7757 poll_list);
7758
7759 list_del_init(&napi->poll_list);
7760 if (napi->poll == process_backlog)
7761 napi->state = 0;
7762 else
7763 ____napi_schedule(sd, napi);
264524d5 7764 }
1da177e4
LT
7765
7766 raise_softirq_irqoff(NET_TX_SOFTIRQ);
7767 local_irq_enable();
7768
7769 /* Process offline CPU's input_pkt_queue */
76cc8b13 7770 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
91e83133 7771 netif_rx_ni(skb);
76cc8b13 7772 input_queue_head_incr(oldsd);
fec5e652 7773 }
ac64da0b 7774 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
91e83133 7775 netif_rx_ni(skb);
76cc8b13
TH
7776 input_queue_head_incr(oldsd);
7777 }
1da177e4
LT
7778
7779 return NOTIFY_OK;
7780}
1da177e4
LT
7781
7782
7f353bf2 7783/**
b63365a2
HX
7784 * netdev_increment_features - increment feature set by one
7785 * @all: current feature set
7786 * @one: new feature set
7787 * @mask: mask feature set
7f353bf2
HX
7788 *
7789 * Computes a new feature set after adding a device with feature set
b63365a2
HX
7790 * @one to the master device with current feature set @all. Will not
7791 * enable anything that is off in @mask. Returns the new feature set.
7f353bf2 7792 */
c8f44aff
MM
7793netdev_features_t netdev_increment_features(netdev_features_t all,
7794 netdev_features_t one, netdev_features_t mask)
b63365a2 7795{
c8cd0989 7796 if (mask & NETIF_F_HW_CSUM)
a188222b 7797 mask |= NETIF_F_CSUM_MASK;
1742f183 7798 mask |= NETIF_F_VLAN_CHALLENGED;
7f353bf2 7799
a188222b 7800 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
1742f183 7801 all &= one | ~NETIF_F_ALL_FOR_ALL;
c6e1a0d1 7802
1742f183 7803 /* If one device supports hw checksumming, set for all. */
c8cd0989
TH
7804 if (all & NETIF_F_HW_CSUM)
7805 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
7f353bf2
HX
7806
7807 return all;
7808}
b63365a2 7809EXPORT_SYMBOL(netdev_increment_features);
7f353bf2 7810
430f03cd 7811static struct hlist_head * __net_init netdev_create_hash(void)
30d97d35
PE
7812{
7813 int i;
7814 struct hlist_head *hash;
7815
7816 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
7817 if (hash != NULL)
7818 for (i = 0; i < NETDEV_HASHENTRIES; i++)
7819 INIT_HLIST_HEAD(&hash[i]);
7820
7821 return hash;
7822}
7823
881d966b 7824/* Initialize per network namespace state */
4665079c 7825static int __net_init netdev_init(struct net *net)
881d966b 7826{
734b6541
RM
7827 if (net != &init_net)
7828 INIT_LIST_HEAD(&net->dev_base_head);
881d966b 7829
30d97d35
PE
7830 net->dev_name_head = netdev_create_hash();
7831 if (net->dev_name_head == NULL)
7832 goto err_name;
881d966b 7833
30d97d35
PE
7834 net->dev_index_head = netdev_create_hash();
7835 if (net->dev_index_head == NULL)
7836 goto err_idx;
881d966b
EB
7837
7838 return 0;
30d97d35
PE
7839
7840err_idx:
7841 kfree(net->dev_name_head);
7842err_name:
7843 return -ENOMEM;
881d966b
EB
7844}
7845
f0db275a
SH
7846/**
7847 * netdev_drivername - network driver for the device
7848 * @dev: network device
f0db275a
SH
7849 *
7850 * Determine network driver for device.
7851 */
3019de12 7852const char *netdev_drivername(const struct net_device *dev)
6579e57b 7853{
cf04a4c7
SH
7854 const struct device_driver *driver;
7855 const struct device *parent;
3019de12 7856 const char *empty = "";
6579e57b
AV
7857
7858 parent = dev->dev.parent;
6579e57b 7859 if (!parent)
3019de12 7860 return empty;
6579e57b
AV
7861
7862 driver = parent->driver;
7863 if (driver && driver->name)
3019de12
DM
7864 return driver->name;
7865 return empty;
6579e57b
AV
7866}
7867
6ea754eb
JP
7868static void __netdev_printk(const char *level, const struct net_device *dev,
7869 struct va_format *vaf)
256df2f3 7870{
b004ff49 7871 if (dev && dev->dev.parent) {
6ea754eb
JP
7872 dev_printk_emit(level[1] - '0',
7873 dev->dev.parent,
7874 "%s %s %s%s: %pV",
7875 dev_driver_string(dev->dev.parent),
7876 dev_name(dev->dev.parent),
7877 netdev_name(dev), netdev_reg_state(dev),
7878 vaf);
b004ff49 7879 } else if (dev) {
6ea754eb
JP
7880 printk("%s%s%s: %pV",
7881 level, netdev_name(dev), netdev_reg_state(dev), vaf);
b004ff49 7882 } else {
6ea754eb 7883 printk("%s(NULL net_device): %pV", level, vaf);
b004ff49 7884 }
256df2f3
JP
7885}
7886
6ea754eb
JP
7887void netdev_printk(const char *level, const struct net_device *dev,
7888 const char *format, ...)
256df2f3
JP
7889{
7890 struct va_format vaf;
7891 va_list args;
256df2f3
JP
7892
7893 va_start(args, format);
7894
7895 vaf.fmt = format;
7896 vaf.va = &args;
7897
6ea754eb 7898 __netdev_printk(level, dev, &vaf);
b004ff49 7899
256df2f3 7900 va_end(args);
256df2f3
JP
7901}
7902EXPORT_SYMBOL(netdev_printk);
7903
7904#define define_netdev_printk_level(func, level) \
6ea754eb 7905void func(const struct net_device *dev, const char *fmt, ...) \
256df2f3 7906{ \
256df2f3
JP
7907 struct va_format vaf; \
7908 va_list args; \
7909 \
7910 va_start(args, fmt); \
7911 \
7912 vaf.fmt = fmt; \
7913 vaf.va = &args; \
7914 \
6ea754eb 7915 __netdev_printk(level, dev, &vaf); \
b004ff49 7916 \
256df2f3 7917 va_end(args); \
256df2f3
JP
7918} \
7919EXPORT_SYMBOL(func);
7920
7921define_netdev_printk_level(netdev_emerg, KERN_EMERG);
7922define_netdev_printk_level(netdev_alert, KERN_ALERT);
7923define_netdev_printk_level(netdev_crit, KERN_CRIT);
7924define_netdev_printk_level(netdev_err, KERN_ERR);
7925define_netdev_printk_level(netdev_warn, KERN_WARNING);
7926define_netdev_printk_level(netdev_notice, KERN_NOTICE);
7927define_netdev_printk_level(netdev_info, KERN_INFO);
7928
4665079c 7929static void __net_exit netdev_exit(struct net *net)
881d966b
EB
7930{
7931 kfree(net->dev_name_head);
7932 kfree(net->dev_index_head);
7933}
7934
022cbae6 7935static struct pernet_operations __net_initdata netdev_net_ops = {
881d966b
EB
7936 .init = netdev_init,
7937 .exit = netdev_exit,
7938};
7939
4665079c 7940static void __net_exit default_device_exit(struct net *net)
ce286d32 7941{
e008b5fc 7942 struct net_device *dev, *aux;
ce286d32 7943 /*
e008b5fc 7944 * Push all migratable network devices back to the
ce286d32
EB
7945 * initial network namespace
7946 */
7947 rtnl_lock();
e008b5fc 7948 for_each_netdev_safe(net, dev, aux) {
ce286d32 7949 int err;
aca51397 7950 char fb_name[IFNAMSIZ];
ce286d32
EB
7951
7952 /* Ignore unmoveable devices (i.e. loopback) */
7953 if (dev->features & NETIF_F_NETNS_LOCAL)
7954 continue;
7955
e008b5fc
EB
7956 /* Leave virtual devices for the generic cleanup */
7957 if (dev->rtnl_link_ops)
7958 continue;
d0c082ce 7959
25985edc 7960 /* Push remaining network devices to init_net */
aca51397
PE
7961 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
7962 err = dev_change_net_namespace(dev, &init_net, fb_name);
ce286d32 7963 if (err) {
7b6cd1ce
JP
7964 pr_emerg("%s: failed to move %s to init_net: %d\n",
7965 __func__, dev->name, err);
aca51397 7966 BUG();
ce286d32
EB
7967 }
7968 }
7969 rtnl_unlock();
7970}
7971
50624c93
EB
7972static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
7973{
7974 /* Return with the rtnl_lock held when there are no network
7975 * devices unregistering in any network namespace in net_list.
7976 */
7977 struct net *net;
7978 bool unregistering;
ff960a73 7979 DEFINE_WAIT_FUNC(wait, woken_wake_function);
50624c93 7980
ff960a73 7981 add_wait_queue(&netdev_unregistering_wq, &wait);
50624c93 7982 for (;;) {
50624c93
EB
7983 unregistering = false;
7984 rtnl_lock();
7985 list_for_each_entry(net, net_list, exit_list) {
7986 if (net->dev_unreg_count > 0) {
7987 unregistering = true;
7988 break;
7989 }
7990 }
7991 if (!unregistering)
7992 break;
7993 __rtnl_unlock();
ff960a73
PZ
7994
7995 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
50624c93 7996 }
ff960a73 7997 remove_wait_queue(&netdev_unregistering_wq, &wait);
50624c93
EB
7998}
7999
04dc7f6b
EB
8000static void __net_exit default_device_exit_batch(struct list_head *net_list)
8001{
8002 /* At exit all network devices most be removed from a network
b595076a 8003 * namespace. Do this in the reverse order of registration.
04dc7f6b
EB
8004 * Do this across as many network namespaces as possible to
8005 * improve batching efficiency.
8006 */
8007 struct net_device *dev;
8008 struct net *net;
8009 LIST_HEAD(dev_kill_list);
8010
50624c93
EB
8011 /* To prevent network device cleanup code from dereferencing
8012 * loopback devices or network devices that have been freed
8013 * wait here for all pending unregistrations to complete,
8014 * before unregistring the loopback device and allowing the
8015 * network namespace be freed.
8016 *
8017 * The netdev todo list containing all network devices
8018 * unregistrations that happen in default_device_exit_batch
8019 * will run in the rtnl_unlock() at the end of
8020 * default_device_exit_batch.
8021 */
8022 rtnl_lock_unregistering(net_list);
04dc7f6b
EB
8023 list_for_each_entry(net, net_list, exit_list) {
8024 for_each_netdev_reverse(net, dev) {
b0ab2fab 8025 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
04dc7f6b
EB
8026 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
8027 else
8028 unregister_netdevice_queue(dev, &dev_kill_list);
8029 }
8030 }
8031 unregister_netdevice_many(&dev_kill_list);
8032 rtnl_unlock();
8033}
8034
022cbae6 8035static struct pernet_operations __net_initdata default_device_ops = {
ce286d32 8036 .exit = default_device_exit,
04dc7f6b 8037 .exit_batch = default_device_exit_batch,
ce286d32
EB
8038};
8039
1da177e4
LT
8040/*
8041 * Initialize the DEV module. At boot time this walks the device list and
8042 * unhooks any devices that fail to initialise (normally hardware not
8043 * present) and leaves us with a valid list of present and active devices.
8044 *
8045 */
8046
8047/*
8048 * This is called single threaded during boot, so no need
8049 * to take the rtnl semaphore.
8050 */
8051static int __init net_dev_init(void)
8052{
8053 int i, rc = -ENOMEM;
8054
8055 BUG_ON(!dev_boot_phase);
8056
1da177e4
LT
8057 if (dev_proc_init())
8058 goto out;
8059
8b41d188 8060 if (netdev_kobject_init())
1da177e4
LT
8061 goto out;
8062
8063 INIT_LIST_HEAD(&ptype_all);
82d8a867 8064 for (i = 0; i < PTYPE_HASH_SIZE; i++)
1da177e4
LT
8065 INIT_LIST_HEAD(&ptype_base[i]);
8066
62532da9
VY
8067 INIT_LIST_HEAD(&offload_base);
8068
881d966b
EB
8069 if (register_pernet_subsys(&netdev_net_ops))
8070 goto out;
1da177e4
LT
8071
8072 /*
8073 * Initialise the packet receive queues.
8074 */
8075
6f912042 8076 for_each_possible_cpu(i) {
e36fa2f7 8077 struct softnet_data *sd = &per_cpu(softnet_data, i);
1da177e4 8078
e36fa2f7 8079 skb_queue_head_init(&sd->input_pkt_queue);
6e7676c1 8080 skb_queue_head_init(&sd->process_queue);
e36fa2f7 8081 INIT_LIST_HEAD(&sd->poll_list);
a9cbd588 8082 sd->output_queue_tailp = &sd->output_queue;
df334545 8083#ifdef CONFIG_RPS
e36fa2f7
ED
8084 sd->csd.func = rps_trigger_softirq;
8085 sd->csd.info = sd;
e36fa2f7 8086 sd->cpu = i;
1e94d72f 8087#endif
0a9627f2 8088
e36fa2f7
ED
8089 sd->backlog.poll = process_backlog;
8090 sd->backlog.weight = weight_p;
1da177e4
LT
8091 }
8092
1da177e4
LT
8093 dev_boot_phase = 0;
8094
505d4f73
EB
8095 /* The loopback device is special if any other network devices
8096 * is present in a network namespace the loopback device must
8097 * be present. Since we now dynamically allocate and free the
8098 * loopback device ensure this invariant is maintained by
8099 * keeping the loopback device as the first device on the
8100 * list of network devices. Ensuring the loopback devices
8101 * is the first device that appears and the last network device
8102 * that disappears.
8103 */
8104 if (register_pernet_device(&loopback_net_ops))
8105 goto out;
8106
8107 if (register_pernet_device(&default_device_ops))
8108 goto out;
8109
962cf36c
CM
8110 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
8111 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
1da177e4
LT
8112
8113 hotcpu_notifier(dev_cpu_callback, 0);
f38a9eb1 8114 dst_subsys_init();
1da177e4
LT
8115 rc = 0;
8116out:
8117 return rc;
8118}
8119
8120subsys_initcall(net_dev_init);