ipv6: inet6_sk() should use sk_fullsock()
[linux-2.6-block.git] / net / core / dev.c
CommitLineData
1da177e4
LT
1/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
1da177e4 76#include <linux/bitops.h>
4fc268d2 77#include <linux/capability.h>
1da177e4
LT
78#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
08e9897d 81#include <linux/hash.h>
5a0e3ad6 82#include <linux/slab.h>
1da177e4 83#include <linux/sched.h>
4a3e2f71 84#include <linux/mutex.h>
1da177e4
LT
85#include <linux/string.h>
86#include <linux/mm.h>
87#include <linux/socket.h>
88#include <linux/sockios.h>
89#include <linux/errno.h>
90#include <linux/interrupt.h>
91#include <linux/if_ether.h>
92#include <linux/netdevice.h>
93#include <linux/etherdevice.h>
0187bdfb 94#include <linux/ethtool.h>
1da177e4
LT
95#include <linux/notifier.h>
96#include <linux/skbuff.h>
457c4cbc 97#include <net/net_namespace.h>
1da177e4
LT
98#include <net/sock.h>
99#include <linux/rtnetlink.h>
1da177e4 100#include <linux/stat.h>
1da177e4
LT
101#include <net/dst.h>
102#include <net/pkt_sched.h>
103#include <net/checksum.h>
44540960 104#include <net/xfrm.h>
1da177e4
LT
105#include <linux/highmem.h>
106#include <linux/init.h>
1da177e4 107#include <linux/module.h>
1da177e4
LT
108#include <linux/netpoll.h>
109#include <linux/rcupdate.h>
110#include <linux/delay.h>
1da177e4 111#include <net/iw_handler.h>
1da177e4 112#include <asm/current.h>
5bdb9886 113#include <linux/audit.h>
db217334 114#include <linux/dmaengine.h>
f6a78bfc 115#include <linux/err.h>
c7fa9d18 116#include <linux/ctype.h>
723e98b7 117#include <linux/if_arp.h>
6de329e2 118#include <linux/if_vlan.h>
8f0f2223 119#include <linux/ip.h>
ad55dcaf 120#include <net/ip.h>
25cd9ba0 121#include <net/mpls.h>
8f0f2223
DM
122#include <linux/ipv6.h>
123#include <linux/in.h>
b6b2fed1
DM
124#include <linux/jhash.h>
125#include <linux/random.h>
9cbc1cb8 126#include <trace/events/napi.h>
cf66ba58 127#include <trace/events/net.h>
07dc22e7 128#include <trace/events/skb.h>
5acbbd42 129#include <linux/pci.h>
caeda9b9 130#include <linux/inetdevice.h>
c445477d 131#include <linux/cpu_rmap.h>
c5905afb 132#include <linux/static_key.h>
af12fa6e 133#include <linux/hashtable.h>
60877a32 134#include <linux/vmalloc.h>
529d0489 135#include <linux/if_macvlan.h>
e7fd2885 136#include <linux/errqueue.h>
3b47d303 137#include <linux/hrtimer.h>
e687ad60 138#include <linux/netfilter_ingress.h>
1da177e4 139
342709ef
PE
140#include "net-sysfs.h"
141
d565b0a1
HX
142/* Instead of increasing this, you should create a hash table. */
143#define MAX_GRO_SKBS 8
144
5d38a079
HX
145/* This should be increased if a protocol with a bigger head is added. */
146#define GRO_MAX_HEAD (MAX_HEADER + 128)
147
1da177e4 148static DEFINE_SPINLOCK(ptype_lock);
62532da9 149static DEFINE_SPINLOCK(offload_lock);
900ff8c6
CW
150struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
151struct list_head ptype_all __read_mostly; /* Taps */
62532da9 152static struct list_head offload_base __read_mostly;
1da177e4 153
ae78dbfa 154static int netif_rx_internal(struct sk_buff *skb);
54951194
LP
155static int call_netdevice_notifiers_info(unsigned long val,
156 struct net_device *dev,
157 struct netdev_notifier_info *info);
ae78dbfa 158
1da177e4 159/*
7562f876 160 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
1da177e4
LT
161 * semaphore.
162 *
c6d14c84 163 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
1da177e4
LT
164 *
165 * Writers must hold the rtnl semaphore while they loop through the
7562f876 166 * dev_base_head list, and hold dev_base_lock for writing when they do the
1da177e4
LT
167 * actual updates. This allows pure readers to access the list even
168 * while a writer is preparing to update it.
169 *
170 * To put it another way, dev_base_lock is held for writing only to
171 * protect against pure readers; the rtnl semaphore provides the
172 * protection against other writers.
173 *
174 * See, for example usages, register_netdevice() and
175 * unregister_netdevice(), which must be called with the rtnl
176 * semaphore held.
177 */
1da177e4 178DEFINE_RWLOCK(dev_base_lock);
1da177e4
LT
179EXPORT_SYMBOL(dev_base_lock);
180
af12fa6e
ET
181/* protects napi_hash addition/deletion and napi_gen_id */
182static DEFINE_SPINLOCK(napi_hash_lock);
183
184static unsigned int napi_gen_id;
185static DEFINE_HASHTABLE(napi_hash, 8);
186
18afa4b0 187static seqcount_t devnet_rename_seq;
c91f6df2 188
4e985ada
TG
189static inline void dev_base_seq_inc(struct net *net)
190{
191 while (++net->dev_base_seq == 0);
192}
193
881d966b 194static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
1da177e4 195{
95c96174
ED
196 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
197
08e9897d 198 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
1da177e4
LT
199}
200
881d966b 201static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
1da177e4 202{
7c28bd0b 203 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
1da177e4
LT
204}
205
e36fa2f7 206static inline void rps_lock(struct softnet_data *sd)
152102c7
CG
207{
208#ifdef CONFIG_RPS
e36fa2f7 209 spin_lock(&sd->input_pkt_queue.lock);
152102c7
CG
210#endif
211}
212
e36fa2f7 213static inline void rps_unlock(struct softnet_data *sd)
152102c7
CG
214{
215#ifdef CONFIG_RPS
e36fa2f7 216 spin_unlock(&sd->input_pkt_queue.lock);
152102c7
CG
217#endif
218}
219
ce286d32 220/* Device list insertion */
53759be9 221static void list_netdevice(struct net_device *dev)
ce286d32 222{
c346dca1 223 struct net *net = dev_net(dev);
ce286d32
EB
224
225 ASSERT_RTNL();
226
227 write_lock_bh(&dev_base_lock);
c6d14c84 228 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
72c9528b 229 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
fb699dfd
ED
230 hlist_add_head_rcu(&dev->index_hlist,
231 dev_index_hash(net, dev->ifindex));
ce286d32 232 write_unlock_bh(&dev_base_lock);
4e985ada
TG
233
234 dev_base_seq_inc(net);
ce286d32
EB
235}
236
fb699dfd
ED
237/* Device list removal
238 * caller must respect a RCU grace period before freeing/reusing dev
239 */
ce286d32
EB
240static void unlist_netdevice(struct net_device *dev)
241{
242 ASSERT_RTNL();
243
244 /* Unlink dev from the device chain */
245 write_lock_bh(&dev_base_lock);
c6d14c84 246 list_del_rcu(&dev->dev_list);
72c9528b 247 hlist_del_rcu(&dev->name_hlist);
fb699dfd 248 hlist_del_rcu(&dev->index_hlist);
ce286d32 249 write_unlock_bh(&dev_base_lock);
4e985ada
TG
250
251 dev_base_seq_inc(dev_net(dev));
ce286d32
EB
252}
253
1da177e4
LT
254/*
255 * Our notifier list
256 */
257
f07d5b94 258static RAW_NOTIFIER_HEAD(netdev_chain);
1da177e4
LT
259
260/*
261 * Device drivers call our routines to queue packets here. We empty the
262 * queue in the local softnet handler.
263 */
bea3348e 264
9958da05 265DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
d1b19dff 266EXPORT_PER_CPU_SYMBOL(softnet_data);
1da177e4 267
cf508b12 268#ifdef CONFIG_LOCKDEP
723e98b7 269/*
c773e847 270 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
723e98b7
JP
271 * according to dev->type
272 */
273static const unsigned short netdev_lock_type[] =
274 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
275 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
276 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
277 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
278 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
279 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
280 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
281 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
282 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
283 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
284 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
285 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
211ed865
PG
286 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
287 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
288 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
723e98b7 289
36cbd3dc 290static const char *const netdev_lock_name[] =
723e98b7
JP
291 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
292 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
293 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
294 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
295 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
296 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
297 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
298 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
299 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
300 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
301 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
302 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
211ed865
PG
303 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
304 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
305 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
723e98b7
JP
306
307static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
cf508b12 308static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
723e98b7
JP
309
310static inline unsigned short netdev_lock_pos(unsigned short dev_type)
311{
312 int i;
313
314 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
315 if (netdev_lock_type[i] == dev_type)
316 return i;
317 /* the last key is used by default */
318 return ARRAY_SIZE(netdev_lock_type) - 1;
319}
320
cf508b12
DM
321static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
322 unsigned short dev_type)
723e98b7
JP
323{
324 int i;
325
326 i = netdev_lock_pos(dev_type);
327 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
328 netdev_lock_name[i]);
329}
cf508b12
DM
330
331static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
332{
333 int i;
334
335 i = netdev_lock_pos(dev->type);
336 lockdep_set_class_and_name(&dev->addr_list_lock,
337 &netdev_addr_lock_key[i],
338 netdev_lock_name[i]);
339}
723e98b7 340#else
cf508b12
DM
341static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
342 unsigned short dev_type)
343{
344}
345static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
723e98b7
JP
346{
347}
348#endif
1da177e4
LT
349
350/*******************************************************************************
351
352 Protocol management and registration routines
353
354*******************************************************************************/
355
1da177e4
LT
356/*
357 * Add a protocol ID to the list. Now that the input handler is
358 * smarter we can dispense with all the messy stuff that used to be
359 * here.
360 *
361 * BEWARE!!! Protocol handlers, mangling input packets,
362 * MUST BE last in hash buckets and checking protocol handlers
363 * MUST start from promiscuous ptype_all chain in net_bh.
364 * It is true now, do not change it.
365 * Explanation follows: if protocol handler, mangling packet, will
366 * be the first on list, it is not able to sense, that packet
367 * is cloned and should be copied-on-write, so that it will
368 * change it and subsequent readers will get broken packet.
369 * --ANK (980803)
370 */
371
c07b68e8
ED
372static inline struct list_head *ptype_head(const struct packet_type *pt)
373{
374 if (pt->type == htons(ETH_P_ALL))
7866a621 375 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
c07b68e8 376 else
7866a621
SN
377 return pt->dev ? &pt->dev->ptype_specific :
378 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
c07b68e8
ED
379}
380
1da177e4
LT
381/**
382 * dev_add_pack - add packet handler
383 * @pt: packet type declaration
384 *
385 * Add a protocol handler to the networking stack. The passed &packet_type
386 * is linked into kernel lists and may not be freed until it has been
387 * removed from the kernel lists.
388 *
4ec93edb 389 * This call does not sleep therefore it can not
1da177e4
LT
390 * guarantee all CPU's that are in middle of receiving packets
391 * will see the new packet type (until the next received packet).
392 */
393
394void dev_add_pack(struct packet_type *pt)
395{
c07b68e8 396 struct list_head *head = ptype_head(pt);
1da177e4 397
c07b68e8
ED
398 spin_lock(&ptype_lock);
399 list_add_rcu(&pt->list, head);
400 spin_unlock(&ptype_lock);
1da177e4 401}
d1b19dff 402EXPORT_SYMBOL(dev_add_pack);
1da177e4 403
1da177e4
LT
404/**
405 * __dev_remove_pack - remove packet handler
406 * @pt: packet type declaration
407 *
408 * Remove a protocol handler that was previously added to the kernel
409 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
410 * from the kernel lists and can be freed or reused once this function
4ec93edb 411 * returns.
1da177e4
LT
412 *
413 * The packet type might still be in use by receivers
414 * and must not be freed until after all the CPU's have gone
415 * through a quiescent state.
416 */
417void __dev_remove_pack(struct packet_type *pt)
418{
c07b68e8 419 struct list_head *head = ptype_head(pt);
1da177e4
LT
420 struct packet_type *pt1;
421
c07b68e8 422 spin_lock(&ptype_lock);
1da177e4
LT
423
424 list_for_each_entry(pt1, head, list) {
425 if (pt == pt1) {
426 list_del_rcu(&pt->list);
427 goto out;
428 }
429 }
430
7b6cd1ce 431 pr_warn("dev_remove_pack: %p not found\n", pt);
1da177e4 432out:
c07b68e8 433 spin_unlock(&ptype_lock);
1da177e4 434}
d1b19dff
ED
435EXPORT_SYMBOL(__dev_remove_pack);
436
1da177e4
LT
437/**
438 * dev_remove_pack - remove packet handler
439 * @pt: packet type declaration
440 *
441 * Remove a protocol handler that was previously added to the kernel
442 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
443 * from the kernel lists and can be freed or reused once this function
444 * returns.
445 *
446 * This call sleeps to guarantee that no CPU is looking at the packet
447 * type after return.
448 */
449void dev_remove_pack(struct packet_type *pt)
450{
451 __dev_remove_pack(pt);
4ec93edb 452
1da177e4
LT
453 synchronize_net();
454}
d1b19dff 455EXPORT_SYMBOL(dev_remove_pack);
1da177e4 456
62532da9
VY
457
458/**
459 * dev_add_offload - register offload handlers
460 * @po: protocol offload declaration
461 *
462 * Add protocol offload handlers to the networking stack. The passed
463 * &proto_offload is linked into kernel lists and may not be freed until
464 * it has been removed from the kernel lists.
465 *
466 * This call does not sleep therefore it can not
467 * guarantee all CPU's that are in middle of receiving packets
468 * will see the new offload handlers (until the next received packet).
469 */
470void dev_add_offload(struct packet_offload *po)
471{
bdef7de4 472 struct packet_offload *elem;
62532da9
VY
473
474 spin_lock(&offload_lock);
bdef7de4
DM
475 list_for_each_entry(elem, &offload_base, list) {
476 if (po->priority < elem->priority)
477 break;
478 }
479 list_add_rcu(&po->list, elem->list.prev);
62532da9
VY
480 spin_unlock(&offload_lock);
481}
482EXPORT_SYMBOL(dev_add_offload);
483
484/**
485 * __dev_remove_offload - remove offload handler
486 * @po: packet offload declaration
487 *
488 * Remove a protocol offload handler that was previously added to the
489 * kernel offload handlers by dev_add_offload(). The passed &offload_type
490 * is removed from the kernel lists and can be freed or reused once this
491 * function returns.
492 *
493 * The packet type might still be in use by receivers
494 * and must not be freed until after all the CPU's have gone
495 * through a quiescent state.
496 */
1d143d9f 497static void __dev_remove_offload(struct packet_offload *po)
62532da9
VY
498{
499 struct list_head *head = &offload_base;
500 struct packet_offload *po1;
501
c53aa505 502 spin_lock(&offload_lock);
62532da9
VY
503
504 list_for_each_entry(po1, head, list) {
505 if (po == po1) {
506 list_del_rcu(&po->list);
507 goto out;
508 }
509 }
510
511 pr_warn("dev_remove_offload: %p not found\n", po);
512out:
c53aa505 513 spin_unlock(&offload_lock);
62532da9 514}
62532da9
VY
515
516/**
517 * dev_remove_offload - remove packet offload handler
518 * @po: packet offload declaration
519 *
520 * Remove a packet offload handler that was previously added to the kernel
521 * offload handlers by dev_add_offload(). The passed &offload_type is
522 * removed from the kernel lists and can be freed or reused once this
523 * function returns.
524 *
525 * This call sleeps to guarantee that no CPU is looking at the packet
526 * type after return.
527 */
528void dev_remove_offload(struct packet_offload *po)
529{
530 __dev_remove_offload(po);
531
532 synchronize_net();
533}
534EXPORT_SYMBOL(dev_remove_offload);
535
1da177e4
LT
536/******************************************************************************
537
538 Device Boot-time Settings Routines
539
540*******************************************************************************/
541
542/* Boot time configuration table */
543static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
544
545/**
546 * netdev_boot_setup_add - add new setup entry
547 * @name: name of the device
548 * @map: configured settings for the device
549 *
550 * Adds new setup entry to the dev_boot_setup list. The function
551 * returns 0 on error and 1 on success. This is a generic routine to
552 * all netdevices.
553 */
554static int netdev_boot_setup_add(char *name, struct ifmap *map)
555{
556 struct netdev_boot_setup *s;
557 int i;
558
559 s = dev_boot_setup;
560 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
561 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
562 memset(s[i].name, 0, sizeof(s[i].name));
93b3cff9 563 strlcpy(s[i].name, name, IFNAMSIZ);
1da177e4
LT
564 memcpy(&s[i].map, map, sizeof(s[i].map));
565 break;
566 }
567 }
568
569 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
570}
571
572/**
573 * netdev_boot_setup_check - check boot time settings
574 * @dev: the netdevice
575 *
576 * Check boot time settings for the device.
577 * The found settings are set for the device to be used
578 * later in the device probing.
579 * Returns 0 if no settings found, 1 if they are.
580 */
581int netdev_boot_setup_check(struct net_device *dev)
582{
583 struct netdev_boot_setup *s = dev_boot_setup;
584 int i;
585
586 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
587 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
93b3cff9 588 !strcmp(dev->name, s[i].name)) {
1da177e4
LT
589 dev->irq = s[i].map.irq;
590 dev->base_addr = s[i].map.base_addr;
591 dev->mem_start = s[i].map.mem_start;
592 dev->mem_end = s[i].map.mem_end;
593 return 1;
594 }
595 }
596 return 0;
597}
d1b19dff 598EXPORT_SYMBOL(netdev_boot_setup_check);
1da177e4
LT
599
600
601/**
602 * netdev_boot_base - get address from boot time settings
603 * @prefix: prefix for network device
604 * @unit: id for network device
605 *
606 * Check boot time settings for the base address of device.
607 * The found settings are set for the device to be used
608 * later in the device probing.
609 * Returns 0 if no settings found.
610 */
611unsigned long netdev_boot_base(const char *prefix, int unit)
612{
613 const struct netdev_boot_setup *s = dev_boot_setup;
614 char name[IFNAMSIZ];
615 int i;
616
617 sprintf(name, "%s%d", prefix, unit);
618
619 /*
620 * If device already registered then return base of 1
621 * to indicate not to probe for this interface
622 */
881d966b 623 if (__dev_get_by_name(&init_net, name))
1da177e4
LT
624 return 1;
625
626 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
627 if (!strcmp(name, s[i].name))
628 return s[i].map.base_addr;
629 return 0;
630}
631
632/*
633 * Saves at boot time configured settings for any netdevice.
634 */
635int __init netdev_boot_setup(char *str)
636{
637 int ints[5];
638 struct ifmap map;
639
640 str = get_options(str, ARRAY_SIZE(ints), ints);
641 if (!str || !*str)
642 return 0;
643
644 /* Save settings */
645 memset(&map, 0, sizeof(map));
646 if (ints[0] > 0)
647 map.irq = ints[1];
648 if (ints[0] > 1)
649 map.base_addr = ints[2];
650 if (ints[0] > 2)
651 map.mem_start = ints[3];
652 if (ints[0] > 3)
653 map.mem_end = ints[4];
654
655 /* Add new entry to the list */
656 return netdev_boot_setup_add(str, &map);
657}
658
659__setup("netdev=", netdev_boot_setup);
660
661/*******************************************************************************
662
663 Device Interface Subroutines
664
665*******************************************************************************/
666
a54acb3a
ND
667/**
668 * dev_get_iflink - get 'iflink' value of a interface
669 * @dev: targeted interface
670 *
671 * Indicates the ifindex the interface is linked to.
672 * Physical interfaces have the same 'ifindex' and 'iflink' values.
673 */
674
675int dev_get_iflink(const struct net_device *dev)
676{
677 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
678 return dev->netdev_ops->ndo_get_iflink(dev);
679
7a66bbc9 680 return dev->ifindex;
a54acb3a
ND
681}
682EXPORT_SYMBOL(dev_get_iflink);
683
1da177e4
LT
684/**
685 * __dev_get_by_name - find a device by its name
c4ea43c5 686 * @net: the applicable net namespace
1da177e4
LT
687 * @name: name to find
688 *
689 * Find an interface by name. Must be called under RTNL semaphore
690 * or @dev_base_lock. If the name is found a pointer to the device
691 * is returned. If the name is not found then %NULL is returned. The
692 * reference counters are not incremented so the caller must be
693 * careful with locks.
694 */
695
881d966b 696struct net_device *__dev_get_by_name(struct net *net, const char *name)
1da177e4 697{
0bd8d536
ED
698 struct net_device *dev;
699 struct hlist_head *head = dev_name_hash(net, name);
1da177e4 700
b67bfe0d 701 hlist_for_each_entry(dev, head, name_hlist)
1da177e4
LT
702 if (!strncmp(dev->name, name, IFNAMSIZ))
703 return dev;
0bd8d536 704
1da177e4
LT
705 return NULL;
706}
d1b19dff 707EXPORT_SYMBOL(__dev_get_by_name);
1da177e4 708
72c9528b
ED
709/**
710 * dev_get_by_name_rcu - find a device by its name
711 * @net: the applicable net namespace
712 * @name: name to find
713 *
714 * Find an interface by name.
715 * If the name is found a pointer to the device is returned.
716 * If the name is not found then %NULL is returned.
717 * The reference counters are not incremented so the caller must be
718 * careful with locks. The caller must hold RCU lock.
719 */
720
721struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
722{
72c9528b
ED
723 struct net_device *dev;
724 struct hlist_head *head = dev_name_hash(net, name);
725
b67bfe0d 726 hlist_for_each_entry_rcu(dev, head, name_hlist)
72c9528b
ED
727 if (!strncmp(dev->name, name, IFNAMSIZ))
728 return dev;
729
730 return NULL;
731}
732EXPORT_SYMBOL(dev_get_by_name_rcu);
733
1da177e4
LT
734/**
735 * dev_get_by_name - find a device by its name
c4ea43c5 736 * @net: the applicable net namespace
1da177e4
LT
737 * @name: name to find
738 *
739 * Find an interface by name. This can be called from any
740 * context and does its own locking. The returned handle has
741 * the usage count incremented and the caller must use dev_put() to
742 * release it when it is no longer needed. %NULL is returned if no
743 * matching device is found.
744 */
745
881d966b 746struct net_device *dev_get_by_name(struct net *net, const char *name)
1da177e4
LT
747{
748 struct net_device *dev;
749
72c9528b
ED
750 rcu_read_lock();
751 dev = dev_get_by_name_rcu(net, name);
1da177e4
LT
752 if (dev)
753 dev_hold(dev);
72c9528b 754 rcu_read_unlock();
1da177e4
LT
755 return dev;
756}
d1b19dff 757EXPORT_SYMBOL(dev_get_by_name);
1da177e4
LT
758
759/**
760 * __dev_get_by_index - find a device by its ifindex
c4ea43c5 761 * @net: the applicable net namespace
1da177e4
LT
762 * @ifindex: index of device
763 *
764 * Search for an interface by index. Returns %NULL if the device
765 * is not found or a pointer to the device. The device has not
766 * had its reference counter increased so the caller must be careful
767 * about locking. The caller must hold either the RTNL semaphore
768 * or @dev_base_lock.
769 */
770
881d966b 771struct net_device *__dev_get_by_index(struct net *net, int ifindex)
1da177e4 772{
0bd8d536
ED
773 struct net_device *dev;
774 struct hlist_head *head = dev_index_hash(net, ifindex);
1da177e4 775
b67bfe0d 776 hlist_for_each_entry(dev, head, index_hlist)
1da177e4
LT
777 if (dev->ifindex == ifindex)
778 return dev;
0bd8d536 779
1da177e4
LT
780 return NULL;
781}
d1b19dff 782EXPORT_SYMBOL(__dev_get_by_index);
1da177e4 783
fb699dfd
ED
784/**
785 * dev_get_by_index_rcu - find a device by its ifindex
786 * @net: the applicable net namespace
787 * @ifindex: index of device
788 *
789 * Search for an interface by index. Returns %NULL if the device
790 * is not found or a pointer to the device. The device has not
791 * had its reference counter increased so the caller must be careful
792 * about locking. The caller must hold RCU lock.
793 */
794
795struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
796{
fb699dfd
ED
797 struct net_device *dev;
798 struct hlist_head *head = dev_index_hash(net, ifindex);
799
b67bfe0d 800 hlist_for_each_entry_rcu(dev, head, index_hlist)
fb699dfd
ED
801 if (dev->ifindex == ifindex)
802 return dev;
803
804 return NULL;
805}
806EXPORT_SYMBOL(dev_get_by_index_rcu);
807
1da177e4
LT
808
809/**
810 * dev_get_by_index - find a device by its ifindex
c4ea43c5 811 * @net: the applicable net namespace
1da177e4
LT
812 * @ifindex: index of device
813 *
814 * Search for an interface by index. Returns NULL if the device
815 * is not found or a pointer to the device. The device returned has
816 * had a reference added and the pointer is safe until the user calls
817 * dev_put to indicate they have finished with it.
818 */
819
881d966b 820struct net_device *dev_get_by_index(struct net *net, int ifindex)
1da177e4
LT
821{
822 struct net_device *dev;
823
fb699dfd
ED
824 rcu_read_lock();
825 dev = dev_get_by_index_rcu(net, ifindex);
1da177e4
LT
826 if (dev)
827 dev_hold(dev);
fb699dfd 828 rcu_read_unlock();
1da177e4
LT
829 return dev;
830}
d1b19dff 831EXPORT_SYMBOL(dev_get_by_index);
1da177e4 832
5dbe7c17
NS
833/**
834 * netdev_get_name - get a netdevice name, knowing its ifindex.
835 * @net: network namespace
836 * @name: a pointer to the buffer where the name will be stored.
837 * @ifindex: the ifindex of the interface to get the name from.
838 *
839 * The use of raw_seqcount_begin() and cond_resched() before
840 * retrying is required as we want to give the writers a chance
841 * to complete when CONFIG_PREEMPT is not set.
842 */
843int netdev_get_name(struct net *net, char *name, int ifindex)
844{
845 struct net_device *dev;
846 unsigned int seq;
847
848retry:
849 seq = raw_seqcount_begin(&devnet_rename_seq);
850 rcu_read_lock();
851 dev = dev_get_by_index_rcu(net, ifindex);
852 if (!dev) {
853 rcu_read_unlock();
854 return -ENODEV;
855 }
856
857 strcpy(name, dev->name);
858 rcu_read_unlock();
859 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
860 cond_resched();
861 goto retry;
862 }
863
864 return 0;
865}
866
1da177e4 867/**
941666c2 868 * dev_getbyhwaddr_rcu - find a device by its hardware address
c4ea43c5 869 * @net: the applicable net namespace
1da177e4
LT
870 * @type: media type of device
871 * @ha: hardware address
872 *
873 * Search for an interface by MAC address. Returns NULL if the device
c506653d
ED
874 * is not found or a pointer to the device.
875 * The caller must hold RCU or RTNL.
941666c2 876 * The returned device has not had its ref count increased
1da177e4
LT
877 * and the caller must therefore be careful about locking
878 *
1da177e4
LT
879 */
880
941666c2
ED
881struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
882 const char *ha)
1da177e4
LT
883{
884 struct net_device *dev;
885
941666c2 886 for_each_netdev_rcu(net, dev)
1da177e4
LT
887 if (dev->type == type &&
888 !memcmp(dev->dev_addr, ha, dev->addr_len))
7562f876
PE
889 return dev;
890
891 return NULL;
1da177e4 892}
941666c2 893EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
cf309e3f 894
881d966b 895struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
1da177e4
LT
896{
897 struct net_device *dev;
898
4e9cac2b 899 ASSERT_RTNL();
881d966b 900 for_each_netdev(net, dev)
4e9cac2b 901 if (dev->type == type)
7562f876
PE
902 return dev;
903
904 return NULL;
4e9cac2b 905}
4e9cac2b
PM
906EXPORT_SYMBOL(__dev_getfirstbyhwtype);
907
881d966b 908struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
4e9cac2b 909{
99fe3c39 910 struct net_device *dev, *ret = NULL;
4e9cac2b 911
99fe3c39
ED
912 rcu_read_lock();
913 for_each_netdev_rcu(net, dev)
914 if (dev->type == type) {
915 dev_hold(dev);
916 ret = dev;
917 break;
918 }
919 rcu_read_unlock();
920 return ret;
1da177e4 921}
1da177e4
LT
922EXPORT_SYMBOL(dev_getfirstbyhwtype);
923
924/**
6c555490 925 * __dev_get_by_flags - find any device with given flags
c4ea43c5 926 * @net: the applicable net namespace
1da177e4
LT
927 * @if_flags: IFF_* values
928 * @mask: bitmask of bits in if_flags to check
929 *
930 * Search for any interface with the given flags. Returns NULL if a device
bb69ae04 931 * is not found or a pointer to the device. Must be called inside
6c555490 932 * rtnl_lock(), and result refcount is unchanged.
1da177e4
LT
933 */
934
6c555490
WC
935struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
936 unsigned short mask)
1da177e4 937{
7562f876 938 struct net_device *dev, *ret;
1da177e4 939
6c555490
WC
940 ASSERT_RTNL();
941
7562f876 942 ret = NULL;
6c555490 943 for_each_netdev(net, dev) {
1da177e4 944 if (((dev->flags ^ if_flags) & mask) == 0) {
7562f876 945 ret = dev;
1da177e4
LT
946 break;
947 }
948 }
7562f876 949 return ret;
1da177e4 950}
6c555490 951EXPORT_SYMBOL(__dev_get_by_flags);
1da177e4
LT
952
953/**
954 * dev_valid_name - check if name is okay for network device
955 * @name: name string
956 *
957 * Network device names need to be valid file names to
c7fa9d18
DM
958 * to allow sysfs to work. We also disallow any kind of
959 * whitespace.
1da177e4 960 */
95f050bf 961bool dev_valid_name(const char *name)
1da177e4 962{
c7fa9d18 963 if (*name == '\0')
95f050bf 964 return false;
b6fe17d6 965 if (strlen(name) >= IFNAMSIZ)
95f050bf 966 return false;
c7fa9d18 967 if (!strcmp(name, ".") || !strcmp(name, ".."))
95f050bf 968 return false;
c7fa9d18
DM
969
970 while (*name) {
a4176a93 971 if (*name == '/' || *name == ':' || isspace(*name))
95f050bf 972 return false;
c7fa9d18
DM
973 name++;
974 }
95f050bf 975 return true;
1da177e4 976}
d1b19dff 977EXPORT_SYMBOL(dev_valid_name);
1da177e4
LT
978
979/**
b267b179
EB
980 * __dev_alloc_name - allocate a name for a device
981 * @net: network namespace to allocate the device name in
1da177e4 982 * @name: name format string
b267b179 983 * @buf: scratch buffer and result name string
1da177e4
LT
984 *
985 * Passed a format string - eg "lt%d" it will try and find a suitable
3041a069
SH
986 * id. It scans list of devices to build up a free map, then chooses
987 * the first empty slot. The caller must hold the dev_base or rtnl lock
988 * while allocating the name and adding the device in order to avoid
989 * duplicates.
990 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
991 * Returns the number of the unit assigned or a negative errno code.
1da177e4
LT
992 */
993
b267b179 994static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1da177e4
LT
995{
996 int i = 0;
1da177e4
LT
997 const char *p;
998 const int max_netdevices = 8*PAGE_SIZE;
cfcabdcc 999 unsigned long *inuse;
1da177e4
LT
1000 struct net_device *d;
1001
1002 p = strnchr(name, IFNAMSIZ-1, '%');
1003 if (p) {
1004 /*
1005 * Verify the string as this thing may have come from
1006 * the user. There must be either one "%d" and no other "%"
1007 * characters.
1008 */
1009 if (p[1] != 'd' || strchr(p + 2, '%'))
1010 return -EINVAL;
1011
1012 /* Use one page as a bit array of possible slots */
cfcabdcc 1013 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1da177e4
LT
1014 if (!inuse)
1015 return -ENOMEM;
1016
881d966b 1017 for_each_netdev(net, d) {
1da177e4
LT
1018 if (!sscanf(d->name, name, &i))
1019 continue;
1020 if (i < 0 || i >= max_netdevices)
1021 continue;
1022
1023 /* avoid cases where sscanf is not exact inverse of printf */
b267b179 1024 snprintf(buf, IFNAMSIZ, name, i);
1da177e4
LT
1025 if (!strncmp(buf, d->name, IFNAMSIZ))
1026 set_bit(i, inuse);
1027 }
1028
1029 i = find_first_zero_bit(inuse, max_netdevices);
1030 free_page((unsigned long) inuse);
1031 }
1032
d9031024
OP
1033 if (buf != name)
1034 snprintf(buf, IFNAMSIZ, name, i);
b267b179 1035 if (!__dev_get_by_name(net, buf))
1da177e4 1036 return i;
1da177e4
LT
1037
1038 /* It is possible to run out of possible slots
1039 * when the name is long and there isn't enough space left
1040 * for the digits, or if all bits are used.
1041 */
1042 return -ENFILE;
1043}
1044
b267b179
EB
1045/**
1046 * dev_alloc_name - allocate a name for a device
1047 * @dev: device
1048 * @name: name format string
1049 *
1050 * Passed a format string - eg "lt%d" it will try and find a suitable
1051 * id. It scans list of devices to build up a free map, then chooses
1052 * the first empty slot. The caller must hold the dev_base or rtnl lock
1053 * while allocating the name and adding the device in order to avoid
1054 * duplicates.
1055 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1056 * Returns the number of the unit assigned or a negative errno code.
1057 */
1058
1059int dev_alloc_name(struct net_device *dev, const char *name)
1060{
1061 char buf[IFNAMSIZ];
1062 struct net *net;
1063 int ret;
1064
c346dca1
YH
1065 BUG_ON(!dev_net(dev));
1066 net = dev_net(dev);
b267b179
EB
1067 ret = __dev_alloc_name(net, name, buf);
1068 if (ret >= 0)
1069 strlcpy(dev->name, buf, IFNAMSIZ);
1070 return ret;
1071}
d1b19dff 1072EXPORT_SYMBOL(dev_alloc_name);
b267b179 1073
828de4f6
G
1074static int dev_alloc_name_ns(struct net *net,
1075 struct net_device *dev,
1076 const char *name)
d9031024 1077{
828de4f6
G
1078 char buf[IFNAMSIZ];
1079 int ret;
8ce6cebc 1080
828de4f6
G
1081 ret = __dev_alloc_name(net, name, buf);
1082 if (ret >= 0)
1083 strlcpy(dev->name, buf, IFNAMSIZ);
1084 return ret;
1085}
1086
1087static int dev_get_valid_name(struct net *net,
1088 struct net_device *dev,
1089 const char *name)
1090{
1091 BUG_ON(!net);
8ce6cebc 1092
d9031024
OP
1093 if (!dev_valid_name(name))
1094 return -EINVAL;
1095
1c5cae81 1096 if (strchr(name, '%'))
828de4f6 1097 return dev_alloc_name_ns(net, dev, name);
d9031024
OP
1098 else if (__dev_get_by_name(net, name))
1099 return -EEXIST;
8ce6cebc
DL
1100 else if (dev->name != name)
1101 strlcpy(dev->name, name, IFNAMSIZ);
d9031024
OP
1102
1103 return 0;
1104}
1da177e4
LT
1105
1106/**
1107 * dev_change_name - change name of a device
1108 * @dev: device
1109 * @newname: name (or format string) must be at least IFNAMSIZ
1110 *
1111 * Change name of a device, can pass format strings "eth%d".
1112 * for wildcarding.
1113 */
cf04a4c7 1114int dev_change_name(struct net_device *dev, const char *newname)
1da177e4 1115{
238fa362 1116 unsigned char old_assign_type;
fcc5a03a 1117 char oldname[IFNAMSIZ];
1da177e4 1118 int err = 0;
fcc5a03a 1119 int ret;
881d966b 1120 struct net *net;
1da177e4
LT
1121
1122 ASSERT_RTNL();
c346dca1 1123 BUG_ON(!dev_net(dev));
1da177e4 1124
c346dca1 1125 net = dev_net(dev);
1da177e4
LT
1126 if (dev->flags & IFF_UP)
1127 return -EBUSY;
1128
30e6c9fa 1129 write_seqcount_begin(&devnet_rename_seq);
c91f6df2
BH
1130
1131 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
30e6c9fa 1132 write_seqcount_end(&devnet_rename_seq);
c8d90dca 1133 return 0;
c91f6df2 1134 }
c8d90dca 1135
fcc5a03a
HX
1136 memcpy(oldname, dev->name, IFNAMSIZ);
1137
828de4f6 1138 err = dev_get_valid_name(net, dev, newname);
c91f6df2 1139 if (err < 0) {
30e6c9fa 1140 write_seqcount_end(&devnet_rename_seq);
d9031024 1141 return err;
c91f6df2 1142 }
1da177e4 1143
6fe82a39
VF
1144 if (oldname[0] && !strchr(oldname, '%'))
1145 netdev_info(dev, "renamed from %s\n", oldname);
1146
238fa362
TG
1147 old_assign_type = dev->name_assign_type;
1148 dev->name_assign_type = NET_NAME_RENAMED;
1149
fcc5a03a 1150rollback:
a1b3f594
EB
1151 ret = device_rename(&dev->dev, dev->name);
1152 if (ret) {
1153 memcpy(dev->name, oldname, IFNAMSIZ);
238fa362 1154 dev->name_assign_type = old_assign_type;
30e6c9fa 1155 write_seqcount_end(&devnet_rename_seq);
a1b3f594 1156 return ret;
dcc99773 1157 }
7f988eab 1158
30e6c9fa 1159 write_seqcount_end(&devnet_rename_seq);
c91f6df2 1160
5bb025fa
VF
1161 netdev_adjacent_rename_links(dev, oldname);
1162
7f988eab 1163 write_lock_bh(&dev_base_lock);
372b2312 1164 hlist_del_rcu(&dev->name_hlist);
72c9528b
ED
1165 write_unlock_bh(&dev_base_lock);
1166
1167 synchronize_rcu();
1168
1169 write_lock_bh(&dev_base_lock);
1170 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
7f988eab
HX
1171 write_unlock_bh(&dev_base_lock);
1172
056925ab 1173 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
fcc5a03a
HX
1174 ret = notifier_to_errno(ret);
1175
1176 if (ret) {
91e9c07b
ED
1177 /* err >= 0 after dev_alloc_name() or stores the first errno */
1178 if (err >= 0) {
fcc5a03a 1179 err = ret;
30e6c9fa 1180 write_seqcount_begin(&devnet_rename_seq);
fcc5a03a 1181 memcpy(dev->name, oldname, IFNAMSIZ);
5bb025fa 1182 memcpy(oldname, newname, IFNAMSIZ);
238fa362
TG
1183 dev->name_assign_type = old_assign_type;
1184 old_assign_type = NET_NAME_RENAMED;
fcc5a03a 1185 goto rollback;
91e9c07b 1186 } else {
7b6cd1ce 1187 pr_err("%s: name change rollback failed: %d\n",
91e9c07b 1188 dev->name, ret);
fcc5a03a
HX
1189 }
1190 }
1da177e4
LT
1191
1192 return err;
1193}
1194
0b815a1a
SH
1195/**
1196 * dev_set_alias - change ifalias of a device
1197 * @dev: device
1198 * @alias: name up to IFALIASZ
f0db275a 1199 * @len: limit of bytes to copy from info
0b815a1a
SH
1200 *
1201 * Set ifalias for a device,
1202 */
1203int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1204{
7364e445
AK
1205 char *new_ifalias;
1206
0b815a1a
SH
1207 ASSERT_RTNL();
1208
1209 if (len >= IFALIASZ)
1210 return -EINVAL;
1211
96ca4a2c 1212 if (!len) {
388dfc2d
SK
1213 kfree(dev->ifalias);
1214 dev->ifalias = NULL;
96ca4a2c
OH
1215 return 0;
1216 }
1217
7364e445
AK
1218 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1219 if (!new_ifalias)
0b815a1a 1220 return -ENOMEM;
7364e445 1221 dev->ifalias = new_ifalias;
0b815a1a
SH
1222
1223 strlcpy(dev->ifalias, alias, len+1);
1224 return len;
1225}
1226
1227
d8a33ac4 1228/**
3041a069 1229 * netdev_features_change - device changes features
d8a33ac4
SH
1230 * @dev: device to cause notification
1231 *
1232 * Called to indicate a device has changed features.
1233 */
1234void netdev_features_change(struct net_device *dev)
1235{
056925ab 1236 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
d8a33ac4
SH
1237}
1238EXPORT_SYMBOL(netdev_features_change);
1239
1da177e4
LT
1240/**
1241 * netdev_state_change - device changes state
1242 * @dev: device to cause notification
1243 *
1244 * Called to indicate a device has changed state. This function calls
1245 * the notifier chains for netdev_chain and sends a NEWLINK message
1246 * to the routing socket.
1247 */
1248void netdev_state_change(struct net_device *dev)
1249{
1250 if (dev->flags & IFF_UP) {
54951194
LP
1251 struct netdev_notifier_change_info change_info;
1252
1253 change_info.flags_changed = 0;
1254 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1255 &change_info.info);
7f294054 1256 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1da177e4
LT
1257 }
1258}
d1b19dff 1259EXPORT_SYMBOL(netdev_state_change);
1da177e4 1260
ee89bab1
AW
1261/**
1262 * netdev_notify_peers - notify network peers about existence of @dev
1263 * @dev: network device
1264 *
1265 * Generate traffic such that interested network peers are aware of
1266 * @dev, such as by generating a gratuitous ARP. This may be used when
1267 * a device wants to inform the rest of the network about some sort of
1268 * reconfiguration such as a failover event or virtual machine
1269 * migration.
1270 */
1271void netdev_notify_peers(struct net_device *dev)
c1da4ac7 1272{
ee89bab1
AW
1273 rtnl_lock();
1274 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1275 rtnl_unlock();
c1da4ac7 1276}
ee89bab1 1277EXPORT_SYMBOL(netdev_notify_peers);
c1da4ac7 1278
bd380811 1279static int __dev_open(struct net_device *dev)
1da177e4 1280{
d314774c 1281 const struct net_device_ops *ops = dev->netdev_ops;
3b8bcfd5 1282 int ret;
1da177e4 1283
e46b66bc
BH
1284 ASSERT_RTNL();
1285
1da177e4
LT
1286 if (!netif_device_present(dev))
1287 return -ENODEV;
1288
ca99ca14
NH
1289 /* Block netpoll from trying to do any rx path servicing.
1290 * If we don't do this there is a chance ndo_poll_controller
1291 * or ndo_poll may be running while we open the device
1292 */
66b5552f 1293 netpoll_poll_disable(dev);
ca99ca14 1294
3b8bcfd5
JB
1295 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1296 ret = notifier_to_errno(ret);
1297 if (ret)
1298 return ret;
1299
1da177e4 1300 set_bit(__LINK_STATE_START, &dev->state);
bada339b 1301
d314774c
SH
1302 if (ops->ndo_validate_addr)
1303 ret = ops->ndo_validate_addr(dev);
bada339b 1304
d314774c
SH
1305 if (!ret && ops->ndo_open)
1306 ret = ops->ndo_open(dev);
1da177e4 1307
66b5552f 1308 netpoll_poll_enable(dev);
ca99ca14 1309
bada339b
JG
1310 if (ret)
1311 clear_bit(__LINK_STATE_START, &dev->state);
1312 else {
1da177e4 1313 dev->flags |= IFF_UP;
4417da66 1314 dev_set_rx_mode(dev);
1da177e4 1315 dev_activate(dev);
7bf23575 1316 add_device_randomness(dev->dev_addr, dev->addr_len);
1da177e4 1317 }
bada339b 1318
1da177e4
LT
1319 return ret;
1320}
1321
1322/**
bd380811
PM
1323 * dev_open - prepare an interface for use.
1324 * @dev: device to open
1da177e4 1325 *
bd380811
PM
1326 * Takes a device from down to up state. The device's private open
1327 * function is invoked and then the multicast lists are loaded. Finally
1328 * the device is moved into the up state and a %NETDEV_UP message is
1329 * sent to the netdev notifier chain.
1330 *
1331 * Calling this function on an active interface is a nop. On a failure
1332 * a negative errno code is returned.
1da177e4 1333 */
bd380811
PM
1334int dev_open(struct net_device *dev)
1335{
1336 int ret;
1337
bd380811
PM
1338 if (dev->flags & IFF_UP)
1339 return 0;
1340
bd380811
PM
1341 ret = __dev_open(dev);
1342 if (ret < 0)
1343 return ret;
1344
7f294054 1345 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
bd380811
PM
1346 call_netdevice_notifiers(NETDEV_UP, dev);
1347
1348 return ret;
1349}
1350EXPORT_SYMBOL(dev_open);
1351
44345724 1352static int __dev_close_many(struct list_head *head)
1da177e4 1353{
44345724 1354 struct net_device *dev;
e46b66bc 1355
bd380811 1356 ASSERT_RTNL();
9d5010db
DM
1357 might_sleep();
1358
5cde2829 1359 list_for_each_entry(dev, head, close_list) {
3f4df206 1360 /* Temporarily disable netpoll until the interface is down */
66b5552f 1361 netpoll_poll_disable(dev);
3f4df206 1362
44345724 1363 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1da177e4 1364
44345724 1365 clear_bit(__LINK_STATE_START, &dev->state);
1da177e4 1366
44345724
OP
1367 /* Synchronize to scheduled poll. We cannot touch poll list, it
1368 * can be even on different cpu. So just clear netif_running().
1369 *
1370 * dev->stop() will invoke napi_disable() on all of it's
1371 * napi_struct instances on this device.
1372 */
4e857c58 1373 smp_mb__after_atomic(); /* Commit netif_running(). */
44345724 1374 }
1da177e4 1375
44345724 1376 dev_deactivate_many(head);
d8b2a4d2 1377
5cde2829 1378 list_for_each_entry(dev, head, close_list) {
44345724 1379 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4 1380
44345724
OP
1381 /*
1382 * Call the device specific close. This cannot fail.
1383 * Only if device is UP
1384 *
1385 * We allow it to be called even after a DETACH hot-plug
1386 * event.
1387 */
1388 if (ops->ndo_stop)
1389 ops->ndo_stop(dev);
1390
44345724 1391 dev->flags &= ~IFF_UP;
66b5552f 1392 netpoll_poll_enable(dev);
44345724
OP
1393 }
1394
1395 return 0;
1396}
1397
1398static int __dev_close(struct net_device *dev)
1399{
f87e6f47 1400 int retval;
44345724
OP
1401 LIST_HEAD(single);
1402
5cde2829 1403 list_add(&dev->close_list, &single);
f87e6f47
LT
1404 retval = __dev_close_many(&single);
1405 list_del(&single);
ca99ca14 1406
f87e6f47 1407 return retval;
44345724
OP
1408}
1409
99c4a26a 1410int dev_close_many(struct list_head *head, bool unlink)
44345724
OP
1411{
1412 struct net_device *dev, *tmp;
1da177e4 1413
5cde2829
EB
1414 /* Remove the devices that don't need to be closed */
1415 list_for_each_entry_safe(dev, tmp, head, close_list)
44345724 1416 if (!(dev->flags & IFF_UP))
5cde2829 1417 list_del_init(&dev->close_list);
44345724
OP
1418
1419 __dev_close_many(head);
1da177e4 1420
5cde2829 1421 list_for_each_entry_safe(dev, tmp, head, close_list) {
7f294054 1422 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
44345724 1423 call_netdevice_notifiers(NETDEV_DOWN, dev);
99c4a26a
DM
1424 if (unlink)
1425 list_del_init(&dev->close_list);
44345724 1426 }
bd380811
PM
1427
1428 return 0;
1429}
99c4a26a 1430EXPORT_SYMBOL(dev_close_many);
bd380811
PM
1431
1432/**
1433 * dev_close - shutdown an interface.
1434 * @dev: device to shutdown
1435 *
1436 * This function moves an active device into down state. A
1437 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1438 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1439 * chain.
1440 */
1441int dev_close(struct net_device *dev)
1442{
e14a5993
ED
1443 if (dev->flags & IFF_UP) {
1444 LIST_HEAD(single);
1da177e4 1445
5cde2829 1446 list_add(&dev->close_list, &single);
99c4a26a 1447 dev_close_many(&single, true);
e14a5993
ED
1448 list_del(&single);
1449 }
da6e378b 1450 return 0;
1da177e4 1451}
d1b19dff 1452EXPORT_SYMBOL(dev_close);
1da177e4
LT
1453
1454
0187bdfb
BH
1455/**
1456 * dev_disable_lro - disable Large Receive Offload on a device
1457 * @dev: device
1458 *
1459 * Disable Large Receive Offload (LRO) on a net device. Must be
1460 * called under RTNL. This is needed if received packets may be
1461 * forwarded to another interface.
1462 */
1463void dev_disable_lro(struct net_device *dev)
1464{
fbe168ba
MK
1465 struct net_device *lower_dev;
1466 struct list_head *iter;
529d0489 1467
bc5787c6
MM
1468 dev->wanted_features &= ~NETIF_F_LRO;
1469 netdev_update_features(dev);
27660515 1470
22d5969f
MM
1471 if (unlikely(dev->features & NETIF_F_LRO))
1472 netdev_WARN(dev, "failed to disable LRO!\n");
fbe168ba
MK
1473
1474 netdev_for_each_lower_dev(dev, lower_dev, iter)
1475 dev_disable_lro(lower_dev);
0187bdfb
BH
1476}
1477EXPORT_SYMBOL(dev_disable_lro);
1478
351638e7
JP
1479static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1480 struct net_device *dev)
1481{
1482 struct netdev_notifier_info info;
1483
1484 netdev_notifier_info_init(&info, dev);
1485 return nb->notifier_call(nb, val, &info);
1486}
0187bdfb 1487
881d966b
EB
1488static int dev_boot_phase = 1;
1489
1da177e4
LT
1490/**
1491 * register_netdevice_notifier - register a network notifier block
1492 * @nb: notifier
1493 *
1494 * Register a notifier to be called when network device events occur.
1495 * The notifier passed is linked into the kernel structures and must
1496 * not be reused until it has been unregistered. A negative errno code
1497 * is returned on a failure.
1498 *
1499 * When registered all registration and up events are replayed
4ec93edb 1500 * to the new notifier to allow device to have a race free
1da177e4
LT
1501 * view of the network device list.
1502 */
1503
1504int register_netdevice_notifier(struct notifier_block *nb)
1505{
1506 struct net_device *dev;
fcc5a03a 1507 struct net_device *last;
881d966b 1508 struct net *net;
1da177e4
LT
1509 int err;
1510
1511 rtnl_lock();
f07d5b94 1512 err = raw_notifier_chain_register(&netdev_chain, nb);
fcc5a03a
HX
1513 if (err)
1514 goto unlock;
881d966b
EB
1515 if (dev_boot_phase)
1516 goto unlock;
1517 for_each_net(net) {
1518 for_each_netdev(net, dev) {
351638e7 1519 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
881d966b
EB
1520 err = notifier_to_errno(err);
1521 if (err)
1522 goto rollback;
1523
1524 if (!(dev->flags & IFF_UP))
1525 continue;
1da177e4 1526
351638e7 1527 call_netdevice_notifier(nb, NETDEV_UP, dev);
881d966b 1528 }
1da177e4 1529 }
fcc5a03a
HX
1530
1531unlock:
1da177e4
LT
1532 rtnl_unlock();
1533 return err;
fcc5a03a
HX
1534
1535rollback:
1536 last = dev;
881d966b
EB
1537 for_each_net(net) {
1538 for_each_netdev(net, dev) {
1539 if (dev == last)
8f891489 1540 goto outroll;
fcc5a03a 1541
881d966b 1542 if (dev->flags & IFF_UP) {
351638e7
JP
1543 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1544 dev);
1545 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
881d966b 1546 }
351638e7 1547 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
fcc5a03a 1548 }
fcc5a03a 1549 }
c67625a1 1550
8f891489 1551outroll:
c67625a1 1552 raw_notifier_chain_unregister(&netdev_chain, nb);
fcc5a03a 1553 goto unlock;
1da177e4 1554}
d1b19dff 1555EXPORT_SYMBOL(register_netdevice_notifier);
1da177e4
LT
1556
1557/**
1558 * unregister_netdevice_notifier - unregister a network notifier block
1559 * @nb: notifier
1560 *
1561 * Unregister a notifier previously registered by
1562 * register_netdevice_notifier(). The notifier is unlinked into the
1563 * kernel structures and may then be reused. A negative errno code
1564 * is returned on a failure.
7d3d43da
EB
1565 *
1566 * After unregistering unregister and down device events are synthesized
1567 * for all devices on the device list to the removed notifier to remove
1568 * the need for special case cleanup code.
1da177e4
LT
1569 */
1570
1571int unregister_netdevice_notifier(struct notifier_block *nb)
1572{
7d3d43da
EB
1573 struct net_device *dev;
1574 struct net *net;
9f514950
HX
1575 int err;
1576
1577 rtnl_lock();
f07d5b94 1578 err = raw_notifier_chain_unregister(&netdev_chain, nb);
7d3d43da
EB
1579 if (err)
1580 goto unlock;
1581
1582 for_each_net(net) {
1583 for_each_netdev(net, dev) {
1584 if (dev->flags & IFF_UP) {
351638e7
JP
1585 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1586 dev);
1587 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
7d3d43da 1588 }
351638e7 1589 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
7d3d43da
EB
1590 }
1591 }
1592unlock:
9f514950
HX
1593 rtnl_unlock();
1594 return err;
1da177e4 1595}
d1b19dff 1596EXPORT_SYMBOL(unregister_netdevice_notifier);
1da177e4 1597
351638e7
JP
1598/**
1599 * call_netdevice_notifiers_info - call all network notifier blocks
1600 * @val: value passed unmodified to notifier function
1601 * @dev: net_device pointer passed unmodified to notifier function
1602 * @info: notifier information data
1603 *
1604 * Call all network notifier blocks. Parameters and return value
1605 * are as for raw_notifier_call_chain().
1606 */
1607
1d143d9f 1608static int call_netdevice_notifiers_info(unsigned long val,
1609 struct net_device *dev,
1610 struct netdev_notifier_info *info)
351638e7
JP
1611{
1612 ASSERT_RTNL();
1613 netdev_notifier_info_init(info, dev);
1614 return raw_notifier_call_chain(&netdev_chain, val, info);
1615}
351638e7 1616
1da177e4
LT
1617/**
1618 * call_netdevice_notifiers - call all network notifier blocks
1619 * @val: value passed unmodified to notifier function
c4ea43c5 1620 * @dev: net_device pointer passed unmodified to notifier function
1da177e4
LT
1621 *
1622 * Call all network notifier blocks. Parameters and return value
f07d5b94 1623 * are as for raw_notifier_call_chain().
1da177e4
LT
1624 */
1625
ad7379d4 1626int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1da177e4 1627{
351638e7
JP
1628 struct netdev_notifier_info info;
1629
1630 return call_netdevice_notifiers_info(val, dev, &info);
1da177e4 1631}
edf947f1 1632EXPORT_SYMBOL(call_netdevice_notifiers);
1da177e4 1633
1cf51900 1634#ifdef CONFIG_NET_INGRESS
4577139b
DB
1635static struct static_key ingress_needed __read_mostly;
1636
1637void net_inc_ingress_queue(void)
1638{
1639 static_key_slow_inc(&ingress_needed);
1640}
1641EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
1642
1643void net_dec_ingress_queue(void)
1644{
1645 static_key_slow_dec(&ingress_needed);
1646}
1647EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
1648#endif
1649
c5905afb 1650static struct static_key netstamp_needed __read_mostly;
b90e5794 1651#ifdef HAVE_JUMP_LABEL
c5905afb 1652/* We are not allowed to call static_key_slow_dec() from irq context
b90e5794 1653 * If net_disable_timestamp() is called from irq context, defer the
c5905afb 1654 * static_key_slow_dec() calls.
b90e5794
ED
1655 */
1656static atomic_t netstamp_needed_deferred;
1657#endif
1da177e4
LT
1658
1659void net_enable_timestamp(void)
1660{
b90e5794
ED
1661#ifdef HAVE_JUMP_LABEL
1662 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1663
1664 if (deferred) {
1665 while (--deferred)
c5905afb 1666 static_key_slow_dec(&netstamp_needed);
b90e5794
ED
1667 return;
1668 }
1669#endif
c5905afb 1670 static_key_slow_inc(&netstamp_needed);
1da177e4 1671}
d1b19dff 1672EXPORT_SYMBOL(net_enable_timestamp);
1da177e4
LT
1673
1674void net_disable_timestamp(void)
1675{
b90e5794
ED
1676#ifdef HAVE_JUMP_LABEL
1677 if (in_interrupt()) {
1678 atomic_inc(&netstamp_needed_deferred);
1679 return;
1680 }
1681#endif
c5905afb 1682 static_key_slow_dec(&netstamp_needed);
1da177e4 1683}
d1b19dff 1684EXPORT_SYMBOL(net_disable_timestamp);
1da177e4 1685
3b098e2d 1686static inline void net_timestamp_set(struct sk_buff *skb)
1da177e4 1687{
588f0330 1688 skb->tstamp.tv64 = 0;
c5905afb 1689 if (static_key_false(&netstamp_needed))
a61bbcf2 1690 __net_timestamp(skb);
1da177e4
LT
1691}
1692
588f0330 1693#define net_timestamp_check(COND, SKB) \
c5905afb 1694 if (static_key_false(&netstamp_needed)) { \
588f0330
ED
1695 if ((COND) && !(SKB)->tstamp.tv64) \
1696 __net_timestamp(SKB); \
1697 } \
3b098e2d 1698
1ee481fb 1699bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb)
79b569f0
DL
1700{
1701 unsigned int len;
1702
1703 if (!(dev->flags & IFF_UP))
1704 return false;
1705
1706 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1707 if (skb->len <= len)
1708 return true;
1709
1710 /* if TSO is enabled, we don't care about the length as the packet
1711 * could be forwarded without being segmented before
1712 */
1713 if (skb_is_gso(skb))
1714 return true;
1715
1716 return false;
1717}
1ee481fb 1718EXPORT_SYMBOL_GPL(is_skb_forwardable);
79b569f0 1719
a0265d28
HX
1720int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1721{
bbbf2df0
WB
1722 if (skb_orphan_frags(skb, GFP_ATOMIC) ||
1723 unlikely(!is_skb_forwardable(dev, skb))) {
a0265d28
HX
1724 atomic_long_inc(&dev->rx_dropped);
1725 kfree_skb(skb);
1726 return NET_RX_DROP;
1727 }
1728
1729 skb_scrub_packet(skb, true);
08b4b8ea 1730 skb->priority = 0;
a0265d28 1731 skb->protocol = eth_type_trans(skb, dev);
2c26d34b 1732 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
a0265d28
HX
1733
1734 return 0;
1735}
1736EXPORT_SYMBOL_GPL(__dev_forward_skb);
1737
44540960
AB
1738/**
1739 * dev_forward_skb - loopback an skb to another netif
1740 *
1741 * @dev: destination network device
1742 * @skb: buffer to forward
1743 *
1744 * return values:
1745 * NET_RX_SUCCESS (no congestion)
6ec82562 1746 * NET_RX_DROP (packet was dropped, but freed)
44540960
AB
1747 *
1748 * dev_forward_skb can be used for injecting an skb from the
1749 * start_xmit function of one device into the receive queue
1750 * of another device.
1751 *
1752 * The receiving device may be in another namespace, so
1753 * we have to clear all information in the skb that could
1754 * impact namespace isolation.
1755 */
1756int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1757{
a0265d28 1758 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
44540960
AB
1759}
1760EXPORT_SYMBOL_GPL(dev_forward_skb);
1761
71d9dec2
CG
1762static inline int deliver_skb(struct sk_buff *skb,
1763 struct packet_type *pt_prev,
1764 struct net_device *orig_dev)
1765{
1080e512
MT
1766 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1767 return -ENOMEM;
71d9dec2
CG
1768 atomic_inc(&skb->users);
1769 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1770}
1771
7866a621
SN
1772static inline void deliver_ptype_list_skb(struct sk_buff *skb,
1773 struct packet_type **pt,
fbcb2170
JP
1774 struct net_device *orig_dev,
1775 __be16 type,
7866a621
SN
1776 struct list_head *ptype_list)
1777{
1778 struct packet_type *ptype, *pt_prev = *pt;
1779
1780 list_for_each_entry_rcu(ptype, ptype_list, list) {
1781 if (ptype->type != type)
1782 continue;
1783 if (pt_prev)
fbcb2170 1784 deliver_skb(skb, pt_prev, orig_dev);
7866a621
SN
1785 pt_prev = ptype;
1786 }
1787 *pt = pt_prev;
1788}
1789
c0de08d0
EL
1790static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1791{
a3d744e9 1792 if (!ptype->af_packet_priv || !skb->sk)
c0de08d0
EL
1793 return false;
1794
1795 if (ptype->id_match)
1796 return ptype->id_match(ptype, skb->sk);
1797 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1798 return true;
1799
1800 return false;
1801}
1802
1da177e4
LT
1803/*
1804 * Support routine. Sends outgoing frames to any network
1805 * taps currently in use.
1806 */
1807
f6a78bfc 1808static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
1809{
1810 struct packet_type *ptype;
71d9dec2
CG
1811 struct sk_buff *skb2 = NULL;
1812 struct packet_type *pt_prev = NULL;
7866a621 1813 struct list_head *ptype_list = &ptype_all;
a61bbcf2 1814
1da177e4 1815 rcu_read_lock();
7866a621
SN
1816again:
1817 list_for_each_entry_rcu(ptype, ptype_list, list) {
1da177e4
LT
1818 /* Never send packets back to the socket
1819 * they originated from - MvS (miquels@drinkel.ow.org)
1820 */
7866a621
SN
1821 if (skb_loop_sk(ptype, skb))
1822 continue;
71d9dec2 1823
7866a621
SN
1824 if (pt_prev) {
1825 deliver_skb(skb2, pt_prev, skb->dev);
1826 pt_prev = ptype;
1827 continue;
1828 }
1da177e4 1829
7866a621
SN
1830 /* need to clone skb, done only once */
1831 skb2 = skb_clone(skb, GFP_ATOMIC);
1832 if (!skb2)
1833 goto out_unlock;
70978182 1834
7866a621 1835 net_timestamp_set(skb2);
1da177e4 1836
7866a621
SN
1837 /* skb->nh should be correctly
1838 * set by sender, so that the second statement is
1839 * just protection against buggy protocols.
1840 */
1841 skb_reset_mac_header(skb2);
1842
1843 if (skb_network_header(skb2) < skb2->data ||
1844 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
1845 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1846 ntohs(skb2->protocol),
1847 dev->name);
1848 skb_reset_network_header(skb2);
1da177e4 1849 }
7866a621
SN
1850
1851 skb2->transport_header = skb2->network_header;
1852 skb2->pkt_type = PACKET_OUTGOING;
1853 pt_prev = ptype;
1854 }
1855
1856 if (ptype_list == &ptype_all) {
1857 ptype_list = &dev->ptype_all;
1858 goto again;
1da177e4 1859 }
7866a621 1860out_unlock:
71d9dec2
CG
1861 if (pt_prev)
1862 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1da177e4
LT
1863 rcu_read_unlock();
1864}
1865
2c53040f
BH
1866/**
1867 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
4f57c087
JF
1868 * @dev: Network device
1869 * @txq: number of queues available
1870 *
1871 * If real_num_tx_queues is changed the tc mappings may no longer be
1872 * valid. To resolve this verify the tc mapping remains valid and if
1873 * not NULL the mapping. With no priorities mapping to this
1874 * offset/count pair it will no longer be used. In the worst case TC0
1875 * is invalid nothing can be done so disable priority mappings. If is
1876 * expected that drivers will fix this mapping if they can before
1877 * calling netif_set_real_num_tx_queues.
1878 */
bb134d22 1879static void netif_setup_tc(struct net_device *dev, unsigned int txq)
4f57c087
JF
1880{
1881 int i;
1882 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1883
1884 /* If TC0 is invalidated disable TC mapping */
1885 if (tc->offset + tc->count > txq) {
7b6cd1ce 1886 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
4f57c087
JF
1887 dev->num_tc = 0;
1888 return;
1889 }
1890
1891 /* Invalidated prio to tc mappings set to TC0 */
1892 for (i = 1; i < TC_BITMASK + 1; i++) {
1893 int q = netdev_get_prio_tc_map(dev, i);
1894
1895 tc = &dev->tc_to_txq[q];
1896 if (tc->offset + tc->count > txq) {
7b6cd1ce
JP
1897 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1898 i, q);
4f57c087
JF
1899 netdev_set_prio_tc_map(dev, i, 0);
1900 }
1901 }
1902}
1903
537c00de
AD
1904#ifdef CONFIG_XPS
1905static DEFINE_MUTEX(xps_map_mutex);
1906#define xmap_dereference(P) \
1907 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1908
10cdc3f3
AD
1909static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1910 int cpu, u16 index)
537c00de 1911{
10cdc3f3
AD
1912 struct xps_map *map = NULL;
1913 int pos;
537c00de 1914
10cdc3f3
AD
1915 if (dev_maps)
1916 map = xmap_dereference(dev_maps->cpu_map[cpu]);
537c00de 1917
10cdc3f3
AD
1918 for (pos = 0; map && pos < map->len; pos++) {
1919 if (map->queues[pos] == index) {
537c00de
AD
1920 if (map->len > 1) {
1921 map->queues[pos] = map->queues[--map->len];
1922 } else {
10cdc3f3 1923 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
537c00de
AD
1924 kfree_rcu(map, rcu);
1925 map = NULL;
1926 }
10cdc3f3 1927 break;
537c00de 1928 }
537c00de
AD
1929 }
1930
10cdc3f3
AD
1931 return map;
1932}
1933
024e9679 1934static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
10cdc3f3
AD
1935{
1936 struct xps_dev_maps *dev_maps;
024e9679 1937 int cpu, i;
10cdc3f3
AD
1938 bool active = false;
1939
1940 mutex_lock(&xps_map_mutex);
1941 dev_maps = xmap_dereference(dev->xps_maps);
1942
1943 if (!dev_maps)
1944 goto out_no_maps;
1945
1946 for_each_possible_cpu(cpu) {
024e9679
AD
1947 for (i = index; i < dev->num_tx_queues; i++) {
1948 if (!remove_xps_queue(dev_maps, cpu, i))
1949 break;
1950 }
1951 if (i == dev->num_tx_queues)
10cdc3f3
AD
1952 active = true;
1953 }
1954
1955 if (!active) {
537c00de
AD
1956 RCU_INIT_POINTER(dev->xps_maps, NULL);
1957 kfree_rcu(dev_maps, rcu);
1958 }
1959
024e9679
AD
1960 for (i = index; i < dev->num_tx_queues; i++)
1961 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
1962 NUMA_NO_NODE);
1963
537c00de
AD
1964out_no_maps:
1965 mutex_unlock(&xps_map_mutex);
1966}
1967
01c5f864
AD
1968static struct xps_map *expand_xps_map(struct xps_map *map,
1969 int cpu, u16 index)
1970{
1971 struct xps_map *new_map;
1972 int alloc_len = XPS_MIN_MAP_ALLOC;
1973 int i, pos;
1974
1975 for (pos = 0; map && pos < map->len; pos++) {
1976 if (map->queues[pos] != index)
1977 continue;
1978 return map;
1979 }
1980
1981 /* Need to add queue to this CPU's existing map */
1982 if (map) {
1983 if (pos < map->alloc_len)
1984 return map;
1985
1986 alloc_len = map->alloc_len * 2;
1987 }
1988
1989 /* Need to allocate new map to store queue on this CPU's map */
1990 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
1991 cpu_to_node(cpu));
1992 if (!new_map)
1993 return NULL;
1994
1995 for (i = 0; i < pos; i++)
1996 new_map->queues[i] = map->queues[i];
1997 new_map->alloc_len = alloc_len;
1998 new_map->len = pos;
1999
2000 return new_map;
2001}
2002
3573540c
MT
2003int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2004 u16 index)
537c00de 2005{
01c5f864 2006 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
537c00de 2007 struct xps_map *map, *new_map;
537c00de 2008 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
01c5f864
AD
2009 int cpu, numa_node_id = -2;
2010 bool active = false;
537c00de
AD
2011
2012 mutex_lock(&xps_map_mutex);
2013
2014 dev_maps = xmap_dereference(dev->xps_maps);
2015
01c5f864
AD
2016 /* allocate memory for queue storage */
2017 for_each_online_cpu(cpu) {
2018 if (!cpumask_test_cpu(cpu, mask))
2019 continue;
2020
2021 if (!new_dev_maps)
2022 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2bb60cb9
AD
2023 if (!new_dev_maps) {
2024 mutex_unlock(&xps_map_mutex);
01c5f864 2025 return -ENOMEM;
2bb60cb9 2026 }
01c5f864
AD
2027
2028 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2029 NULL;
2030
2031 map = expand_xps_map(map, cpu, index);
2032 if (!map)
2033 goto error;
2034
2035 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
2036 }
2037
2038 if (!new_dev_maps)
2039 goto out_no_new_maps;
2040
537c00de 2041 for_each_possible_cpu(cpu) {
01c5f864
AD
2042 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
2043 /* add queue to CPU maps */
2044 int pos = 0;
2045
2046 map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2047 while ((pos < map->len) && (map->queues[pos] != index))
2048 pos++;
2049
2050 if (pos == map->len)
2051 map->queues[map->len++] = index;
537c00de 2052#ifdef CONFIG_NUMA
537c00de
AD
2053 if (numa_node_id == -2)
2054 numa_node_id = cpu_to_node(cpu);
2055 else if (numa_node_id != cpu_to_node(cpu))
2056 numa_node_id = -1;
537c00de 2057#endif
01c5f864
AD
2058 } else if (dev_maps) {
2059 /* fill in the new device map from the old device map */
2060 map = xmap_dereference(dev_maps->cpu_map[cpu]);
2061 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
537c00de 2062 }
01c5f864 2063
537c00de
AD
2064 }
2065
01c5f864
AD
2066 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
2067
537c00de 2068 /* Cleanup old maps */
01c5f864
AD
2069 if (dev_maps) {
2070 for_each_possible_cpu(cpu) {
2071 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2072 map = xmap_dereference(dev_maps->cpu_map[cpu]);
2073 if (map && map != new_map)
2074 kfree_rcu(map, rcu);
2075 }
537c00de 2076
01c5f864 2077 kfree_rcu(dev_maps, rcu);
537c00de
AD
2078 }
2079
01c5f864
AD
2080 dev_maps = new_dev_maps;
2081 active = true;
537c00de 2082
01c5f864
AD
2083out_no_new_maps:
2084 /* update Tx queue numa node */
537c00de
AD
2085 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2086 (numa_node_id >= 0) ? numa_node_id :
2087 NUMA_NO_NODE);
2088
01c5f864
AD
2089 if (!dev_maps)
2090 goto out_no_maps;
2091
2092 /* removes queue from unused CPUs */
2093 for_each_possible_cpu(cpu) {
2094 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
2095 continue;
2096
2097 if (remove_xps_queue(dev_maps, cpu, index))
2098 active = true;
2099 }
2100
2101 /* free map if not active */
2102 if (!active) {
2103 RCU_INIT_POINTER(dev->xps_maps, NULL);
2104 kfree_rcu(dev_maps, rcu);
2105 }
2106
2107out_no_maps:
537c00de
AD
2108 mutex_unlock(&xps_map_mutex);
2109
2110 return 0;
2111error:
01c5f864
AD
2112 /* remove any maps that we added */
2113 for_each_possible_cpu(cpu) {
2114 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2115 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2116 NULL;
2117 if (new_map && new_map != map)
2118 kfree(new_map);
2119 }
2120
537c00de
AD
2121 mutex_unlock(&xps_map_mutex);
2122
537c00de
AD
2123 kfree(new_dev_maps);
2124 return -ENOMEM;
2125}
2126EXPORT_SYMBOL(netif_set_xps_queue);
2127
2128#endif
f0796d5c
JF
2129/*
2130 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2131 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2132 */
e6484930 2133int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
f0796d5c 2134{
1d24eb48
TH
2135 int rc;
2136
e6484930
TH
2137 if (txq < 1 || txq > dev->num_tx_queues)
2138 return -EINVAL;
f0796d5c 2139
5c56580b
BH
2140 if (dev->reg_state == NETREG_REGISTERED ||
2141 dev->reg_state == NETREG_UNREGISTERING) {
e6484930
TH
2142 ASSERT_RTNL();
2143
1d24eb48
TH
2144 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2145 txq);
bf264145
TH
2146 if (rc)
2147 return rc;
2148
4f57c087
JF
2149 if (dev->num_tc)
2150 netif_setup_tc(dev, txq);
2151
024e9679 2152 if (txq < dev->real_num_tx_queues) {
e6484930 2153 qdisc_reset_all_tx_gt(dev, txq);
024e9679
AD
2154#ifdef CONFIG_XPS
2155 netif_reset_xps_queues_gt(dev, txq);
2156#endif
2157 }
f0796d5c 2158 }
e6484930
TH
2159
2160 dev->real_num_tx_queues = txq;
2161 return 0;
f0796d5c
JF
2162}
2163EXPORT_SYMBOL(netif_set_real_num_tx_queues);
56079431 2164
a953be53 2165#ifdef CONFIG_SYSFS
62fe0b40
BH
2166/**
2167 * netif_set_real_num_rx_queues - set actual number of RX queues used
2168 * @dev: Network device
2169 * @rxq: Actual number of RX queues
2170 *
2171 * This must be called either with the rtnl_lock held or before
2172 * registration of the net device. Returns 0 on success, or a
4e7f7951
BH
2173 * negative error code. If called before registration, it always
2174 * succeeds.
62fe0b40
BH
2175 */
2176int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2177{
2178 int rc;
2179
bd25fa7b
TH
2180 if (rxq < 1 || rxq > dev->num_rx_queues)
2181 return -EINVAL;
2182
62fe0b40
BH
2183 if (dev->reg_state == NETREG_REGISTERED) {
2184 ASSERT_RTNL();
2185
62fe0b40
BH
2186 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2187 rxq);
2188 if (rc)
2189 return rc;
62fe0b40
BH
2190 }
2191
2192 dev->real_num_rx_queues = rxq;
2193 return 0;
2194}
2195EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2196#endif
2197
2c53040f
BH
2198/**
2199 * netif_get_num_default_rss_queues - default number of RSS queues
16917b87
YM
2200 *
2201 * This routine should set an upper limit on the number of RSS queues
2202 * used by default by multiqueue devices.
2203 */
a55b138b 2204int netif_get_num_default_rss_queues(void)
16917b87
YM
2205{
2206 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2207}
2208EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2209
def82a1d 2210static inline void __netif_reschedule(struct Qdisc *q)
56079431 2211{
def82a1d
JP
2212 struct softnet_data *sd;
2213 unsigned long flags;
56079431 2214
def82a1d 2215 local_irq_save(flags);
903ceff7 2216 sd = this_cpu_ptr(&softnet_data);
a9cbd588
CG
2217 q->next_sched = NULL;
2218 *sd->output_queue_tailp = q;
2219 sd->output_queue_tailp = &q->next_sched;
def82a1d
JP
2220 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2221 local_irq_restore(flags);
2222}
2223
2224void __netif_schedule(struct Qdisc *q)
2225{
2226 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2227 __netif_reschedule(q);
56079431
DV
2228}
2229EXPORT_SYMBOL(__netif_schedule);
2230
e6247027
ED
2231struct dev_kfree_skb_cb {
2232 enum skb_free_reason reason;
2233};
2234
2235static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
56079431 2236{
e6247027
ED
2237 return (struct dev_kfree_skb_cb *)skb->cb;
2238}
2239
46e5da40
JF
2240void netif_schedule_queue(struct netdev_queue *txq)
2241{
2242 rcu_read_lock();
2243 if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
2244 struct Qdisc *q = rcu_dereference(txq->qdisc);
2245
2246 __netif_schedule(q);
2247 }
2248 rcu_read_unlock();
2249}
2250EXPORT_SYMBOL(netif_schedule_queue);
2251
2252/**
2253 * netif_wake_subqueue - allow sending packets on subqueue
2254 * @dev: network device
2255 * @queue_index: sub queue index
2256 *
2257 * Resume individual transmit queue of a device with multiple transmit queues.
2258 */
2259void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2260{
2261 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2262
2263 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) {
2264 struct Qdisc *q;
2265
2266 rcu_read_lock();
2267 q = rcu_dereference(txq->qdisc);
2268 __netif_schedule(q);
2269 rcu_read_unlock();
2270 }
2271}
2272EXPORT_SYMBOL(netif_wake_subqueue);
2273
2274void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2275{
2276 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2277 struct Qdisc *q;
2278
2279 rcu_read_lock();
2280 q = rcu_dereference(dev_queue->qdisc);
2281 __netif_schedule(q);
2282 rcu_read_unlock();
2283 }
2284}
2285EXPORT_SYMBOL(netif_tx_wake_queue);
2286
e6247027 2287void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
56079431 2288{
e6247027 2289 unsigned long flags;
56079431 2290
e6247027
ED
2291 if (likely(atomic_read(&skb->users) == 1)) {
2292 smp_rmb();
2293 atomic_set(&skb->users, 0);
2294 } else if (likely(!atomic_dec_and_test(&skb->users))) {
2295 return;
bea3348e 2296 }
e6247027
ED
2297 get_kfree_skb_cb(skb)->reason = reason;
2298 local_irq_save(flags);
2299 skb->next = __this_cpu_read(softnet_data.completion_queue);
2300 __this_cpu_write(softnet_data.completion_queue, skb);
2301 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2302 local_irq_restore(flags);
56079431 2303}
e6247027 2304EXPORT_SYMBOL(__dev_kfree_skb_irq);
56079431 2305
e6247027 2306void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
56079431
DV
2307{
2308 if (in_irq() || irqs_disabled())
e6247027 2309 __dev_kfree_skb_irq(skb, reason);
56079431
DV
2310 else
2311 dev_kfree_skb(skb);
2312}
e6247027 2313EXPORT_SYMBOL(__dev_kfree_skb_any);
56079431
DV
2314
2315
bea3348e
SH
2316/**
2317 * netif_device_detach - mark device as removed
2318 * @dev: network device
2319 *
2320 * Mark device as removed from system and therefore no longer available.
2321 */
56079431
DV
2322void netif_device_detach(struct net_device *dev)
2323{
2324 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2325 netif_running(dev)) {
d543103a 2326 netif_tx_stop_all_queues(dev);
56079431
DV
2327 }
2328}
2329EXPORT_SYMBOL(netif_device_detach);
2330
bea3348e
SH
2331/**
2332 * netif_device_attach - mark device as attached
2333 * @dev: network device
2334 *
2335 * Mark device as attached from system and restart if needed.
2336 */
56079431
DV
2337void netif_device_attach(struct net_device *dev)
2338{
2339 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2340 netif_running(dev)) {
d543103a 2341 netif_tx_wake_all_queues(dev);
4ec93edb 2342 __netdev_watchdog_up(dev);
56079431
DV
2343 }
2344}
2345EXPORT_SYMBOL(netif_device_attach);
2346
5605c762
JP
2347/*
2348 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2349 * to be used as a distribution range.
2350 */
2351u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
2352 unsigned int num_tx_queues)
2353{
2354 u32 hash;
2355 u16 qoffset = 0;
2356 u16 qcount = num_tx_queues;
2357
2358 if (skb_rx_queue_recorded(skb)) {
2359 hash = skb_get_rx_queue(skb);
2360 while (unlikely(hash >= num_tx_queues))
2361 hash -= num_tx_queues;
2362 return hash;
2363 }
2364
2365 if (dev->num_tc) {
2366 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2367 qoffset = dev->tc_to_txq[tc].offset;
2368 qcount = dev->tc_to_txq[tc].count;
2369 }
2370
2371 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
2372}
2373EXPORT_SYMBOL(__skb_tx_hash);
2374
36c92474
BH
2375static void skb_warn_bad_offload(const struct sk_buff *skb)
2376{
65e9d2fa 2377 static const netdev_features_t null_features = 0;
36c92474
BH
2378 struct net_device *dev = skb->dev;
2379 const char *driver = "";
2380
c846ad9b
BG
2381 if (!net_ratelimit())
2382 return;
2383
36c92474
BH
2384 if (dev && dev->dev.parent)
2385 driver = dev_driver_string(dev->dev.parent);
2386
2387 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2388 "gso_type=%d ip_summed=%d\n",
65e9d2fa
MM
2389 driver, dev ? &dev->features : &null_features,
2390 skb->sk ? &skb->sk->sk_route_caps : &null_features,
36c92474
BH
2391 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2392 skb_shinfo(skb)->gso_type, skb->ip_summed);
2393}
2394
1da177e4
LT
2395/*
2396 * Invalidate hardware checksum when packet is to be mangled, and
2397 * complete checksum manually on outgoing path.
2398 */
84fa7933 2399int skb_checksum_help(struct sk_buff *skb)
1da177e4 2400{
d3bc23e7 2401 __wsum csum;
663ead3b 2402 int ret = 0, offset;
1da177e4 2403
84fa7933 2404 if (skb->ip_summed == CHECKSUM_COMPLETE)
a430a43d
HX
2405 goto out_set_summed;
2406
2407 if (unlikely(skb_shinfo(skb)->gso_size)) {
36c92474
BH
2408 skb_warn_bad_offload(skb);
2409 return -EINVAL;
1da177e4
LT
2410 }
2411
cef401de
ED
2412 /* Before computing a checksum, we should make sure no frag could
2413 * be modified by an external entity : checksum could be wrong.
2414 */
2415 if (skb_has_shared_frag(skb)) {
2416 ret = __skb_linearize(skb);
2417 if (ret)
2418 goto out;
2419 }
2420
55508d60 2421 offset = skb_checksum_start_offset(skb);
a030847e
HX
2422 BUG_ON(offset >= skb_headlen(skb));
2423 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2424
2425 offset += skb->csum_offset;
2426 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2427
2428 if (skb_cloned(skb) &&
2429 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1da177e4
LT
2430 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2431 if (ret)
2432 goto out;
2433 }
2434
a030847e 2435 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
a430a43d 2436out_set_summed:
1da177e4 2437 skb->ip_summed = CHECKSUM_NONE;
4ec93edb 2438out:
1da177e4
LT
2439 return ret;
2440}
d1b19dff 2441EXPORT_SYMBOL(skb_checksum_help);
1da177e4 2442
53d6471c 2443__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
f6a78bfc 2444{
252e3346 2445 __be16 type = skb->protocol;
f6a78bfc 2446
19acc327
PS
2447 /* Tunnel gso handlers can set protocol to ethernet. */
2448 if (type == htons(ETH_P_TEB)) {
2449 struct ethhdr *eth;
2450
2451 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2452 return 0;
2453
2454 eth = (struct ethhdr *)skb_mac_header(skb);
2455 type = eth->h_proto;
2456 }
2457
d4bcef3f 2458 return __vlan_get_protocol(skb, type, depth);
ec5f0615
PS
2459}
2460
2461/**
2462 * skb_mac_gso_segment - mac layer segmentation handler.
2463 * @skb: buffer to segment
2464 * @features: features for the output path (see dev->features)
2465 */
2466struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2467 netdev_features_t features)
2468{
2469 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2470 struct packet_offload *ptype;
53d6471c
VY
2471 int vlan_depth = skb->mac_len;
2472 __be16 type = skb_network_protocol(skb, &vlan_depth);
ec5f0615
PS
2473
2474 if (unlikely(!type))
2475 return ERR_PTR(-EINVAL);
2476
53d6471c 2477 __skb_pull(skb, vlan_depth);
f6a78bfc
HX
2478
2479 rcu_read_lock();
22061d80 2480 list_for_each_entry_rcu(ptype, &offload_base, list) {
f191a1d1 2481 if (ptype->type == type && ptype->callbacks.gso_segment) {
f191a1d1 2482 segs = ptype->callbacks.gso_segment(skb, features);
f6a78bfc
HX
2483 break;
2484 }
2485 }
2486 rcu_read_unlock();
2487
98e399f8 2488 __skb_push(skb, skb->data - skb_mac_header(skb));
576a30eb 2489
f6a78bfc
HX
2490 return segs;
2491}
05e8ef4a
PS
2492EXPORT_SYMBOL(skb_mac_gso_segment);
2493
2494
2495/* openvswitch calls this on rx path, so we need a different check.
2496 */
2497static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2498{
2499 if (tx_path)
2500 return skb->ip_summed != CHECKSUM_PARTIAL;
2501 else
2502 return skb->ip_summed == CHECKSUM_NONE;
2503}
2504
2505/**
2506 * __skb_gso_segment - Perform segmentation on skb.
2507 * @skb: buffer to segment
2508 * @features: features for the output path (see dev->features)
2509 * @tx_path: whether it is called in TX path
2510 *
2511 * This function segments the given skb and returns a list of segments.
2512 *
2513 * It may return NULL if the skb requires no segmentation. This is
2514 * only possible when GSO is used for verifying header integrity.
2515 */
2516struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2517 netdev_features_t features, bool tx_path)
2518{
2519 if (unlikely(skb_needs_check(skb, tx_path))) {
2520 int err;
2521
2522 skb_warn_bad_offload(skb);
2523
a40e0a66 2524 err = skb_cow_head(skb, 0);
2525 if (err < 0)
05e8ef4a
PS
2526 return ERR_PTR(err);
2527 }
2528
68c33163 2529 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
3347c960
ED
2530 SKB_GSO_CB(skb)->encap_level = 0;
2531
05e8ef4a
PS
2532 skb_reset_mac_header(skb);
2533 skb_reset_mac_len(skb);
2534
2535 return skb_mac_gso_segment(skb, features);
2536}
12b0004d 2537EXPORT_SYMBOL(__skb_gso_segment);
f6a78bfc 2538
fb286bb2
HX
2539/* Take action when hardware reception checksum errors are detected. */
2540#ifdef CONFIG_BUG
2541void netdev_rx_csum_fault(struct net_device *dev)
2542{
2543 if (net_ratelimit()) {
7b6cd1ce 2544 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
fb286bb2
HX
2545 dump_stack();
2546 }
2547}
2548EXPORT_SYMBOL(netdev_rx_csum_fault);
2549#endif
2550
1da177e4
LT
2551/* Actually, we should eliminate this check as soon as we know, that:
2552 * 1. IOMMU is present and allows to map all the memory.
2553 * 2. No high memory really exists on this machine.
2554 */
2555
c1e756bf 2556static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1da177e4 2557{
3d3a8533 2558#ifdef CONFIG_HIGHMEM
1da177e4 2559 int i;
5acbbd42 2560 if (!(dev->features & NETIF_F_HIGHDMA)) {
ea2ab693
IC
2561 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2562 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2563 if (PageHighMem(skb_frag_page(frag)))
5acbbd42 2564 return 1;
ea2ab693 2565 }
5acbbd42 2566 }
1da177e4 2567
5acbbd42
FT
2568 if (PCI_DMA_BUS_IS_PHYS) {
2569 struct device *pdev = dev->dev.parent;
1da177e4 2570
9092c658
ED
2571 if (!pdev)
2572 return 0;
5acbbd42 2573 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
ea2ab693
IC
2574 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2575 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
5acbbd42
FT
2576 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2577 return 1;
2578 }
2579 }
3d3a8533 2580#endif
1da177e4
LT
2581 return 0;
2582}
1da177e4 2583
3b392ddb
SH
2584/* If MPLS offload request, verify we are testing hardware MPLS features
2585 * instead of standard features for the netdev.
2586 */
d0edc7bf 2587#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
3b392ddb
SH
2588static netdev_features_t net_mpls_features(struct sk_buff *skb,
2589 netdev_features_t features,
2590 __be16 type)
2591{
25cd9ba0 2592 if (eth_p_mpls(type))
3b392ddb
SH
2593 features &= skb->dev->mpls_features;
2594
2595 return features;
2596}
2597#else
2598static netdev_features_t net_mpls_features(struct sk_buff *skb,
2599 netdev_features_t features,
2600 __be16 type)
2601{
2602 return features;
2603}
2604#endif
2605
c8f44aff 2606static netdev_features_t harmonize_features(struct sk_buff *skb,
c1e756bf 2607 netdev_features_t features)
f01a5236 2608{
53d6471c 2609 int tmp;
3b392ddb
SH
2610 __be16 type;
2611
2612 type = skb_network_protocol(skb, &tmp);
2613 features = net_mpls_features(skb, features, type);
53d6471c 2614
c0d680e5 2615 if (skb->ip_summed != CHECKSUM_NONE &&
3b392ddb 2616 !can_checksum_protocol(features, type)) {
f01a5236 2617 features &= ~NETIF_F_ALL_CSUM;
c1e756bf 2618 } else if (illegal_highdma(skb->dev, skb)) {
f01a5236
JG
2619 features &= ~NETIF_F_SG;
2620 }
2621
2622 return features;
2623}
2624
e38f3025
TM
2625netdev_features_t passthru_features_check(struct sk_buff *skb,
2626 struct net_device *dev,
2627 netdev_features_t features)
2628{
2629 return features;
2630}
2631EXPORT_SYMBOL(passthru_features_check);
2632
8cb65d00
TM
2633static netdev_features_t dflt_features_check(const struct sk_buff *skb,
2634 struct net_device *dev,
2635 netdev_features_t features)
2636{
2637 return vlan_features_check(skb, features);
2638}
2639
c1e756bf 2640netdev_features_t netif_skb_features(struct sk_buff *skb)
58e998c6 2641{
5f35227e 2642 struct net_device *dev = skb->dev;
fcbeb976
ED
2643 netdev_features_t features = dev->features;
2644 u16 gso_segs = skb_shinfo(skb)->gso_segs;
58e998c6 2645
fcbeb976 2646 if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
30b678d8
BH
2647 features &= ~NETIF_F_GSO_MASK;
2648
5f35227e
JG
2649 /* If encapsulation offload request, verify we are testing
2650 * hardware encapsulation features instead of standard
2651 * features for the netdev
2652 */
2653 if (skb->encapsulation)
2654 features &= dev->hw_enc_features;
2655
f5a7fb88
TM
2656 if (skb_vlan_tagged(skb))
2657 features = netdev_intersect_features(features,
2658 dev->vlan_features |
2659 NETIF_F_HW_VLAN_CTAG_TX |
2660 NETIF_F_HW_VLAN_STAG_TX);
f01a5236 2661
5f35227e
JG
2662 if (dev->netdev_ops->ndo_features_check)
2663 features &= dev->netdev_ops->ndo_features_check(skb, dev,
2664 features);
8cb65d00
TM
2665 else
2666 features &= dflt_features_check(skb, dev, features);
5f35227e 2667
c1e756bf 2668 return harmonize_features(skb, features);
58e998c6 2669}
c1e756bf 2670EXPORT_SYMBOL(netif_skb_features);
58e998c6 2671
2ea25513 2672static int xmit_one(struct sk_buff *skb, struct net_device *dev,
95f6b3dd 2673 struct netdev_queue *txq, bool more)
f6a78bfc 2674{
2ea25513
DM
2675 unsigned int len;
2676 int rc;
00829823 2677
7866a621 2678 if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
2ea25513 2679 dev_queue_xmit_nit(skb, dev);
fc741216 2680
2ea25513
DM
2681 len = skb->len;
2682 trace_net_dev_start_xmit(skb, dev);
95f6b3dd 2683 rc = netdev_start_xmit(skb, dev, txq, more);
2ea25513 2684 trace_net_dev_xmit(skb, rc, dev, len);
adf30907 2685
2ea25513
DM
2686 return rc;
2687}
7b9c6090 2688
8dcda22a
DM
2689struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
2690 struct netdev_queue *txq, int *ret)
7f2e870f
DM
2691{
2692 struct sk_buff *skb = first;
2693 int rc = NETDEV_TX_OK;
7b9c6090 2694
7f2e870f
DM
2695 while (skb) {
2696 struct sk_buff *next = skb->next;
fc70fb64 2697
7f2e870f 2698 skb->next = NULL;
95f6b3dd 2699 rc = xmit_one(skb, dev, txq, next != NULL);
7f2e870f
DM
2700 if (unlikely(!dev_xmit_complete(rc))) {
2701 skb->next = next;
2702 goto out;
2703 }
6afff0ca 2704
7f2e870f
DM
2705 skb = next;
2706 if (netif_xmit_stopped(txq) && skb) {
2707 rc = NETDEV_TX_BUSY;
2708 break;
9ccb8975 2709 }
7f2e870f 2710 }
9ccb8975 2711
7f2e870f
DM
2712out:
2713 *ret = rc;
2714 return skb;
2715}
b40863c6 2716
1ff0dc94
ED
2717static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
2718 netdev_features_t features)
f6a78bfc 2719{
df8a39de 2720 if (skb_vlan_tag_present(skb) &&
5968250c
JP
2721 !vlan_hw_offload_capable(features, skb->vlan_proto))
2722 skb = __vlan_hwaccel_push_inside(skb);
eae3f88e
DM
2723 return skb;
2724}
f6a78bfc 2725
55a93b3e 2726static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
eae3f88e
DM
2727{
2728 netdev_features_t features;
f6a78bfc 2729
eae3f88e
DM
2730 if (skb->next)
2731 return skb;
068a2de5 2732
eae3f88e
DM
2733 features = netif_skb_features(skb);
2734 skb = validate_xmit_vlan(skb, features);
2735 if (unlikely(!skb))
2736 goto out_null;
7b9c6090 2737
8b86a61d 2738 if (netif_needs_gso(skb, features)) {
ce93718f
DM
2739 struct sk_buff *segs;
2740
2741 segs = skb_gso_segment(skb, features);
cecda693 2742 if (IS_ERR(segs)) {
af6dabc9 2743 goto out_kfree_skb;
cecda693
JW
2744 } else if (segs) {
2745 consume_skb(skb);
2746 skb = segs;
f6a78bfc 2747 }
eae3f88e
DM
2748 } else {
2749 if (skb_needs_linearize(skb, features) &&
2750 __skb_linearize(skb))
2751 goto out_kfree_skb;
4ec93edb 2752
eae3f88e
DM
2753 /* If packet is not checksummed and device does not
2754 * support checksumming for this protocol, complete
2755 * checksumming here.
2756 */
2757 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2758 if (skb->encapsulation)
2759 skb_set_inner_transport_header(skb,
2760 skb_checksum_start_offset(skb));
2761 else
2762 skb_set_transport_header(skb,
2763 skb_checksum_start_offset(skb));
2764 if (!(features & NETIF_F_ALL_CSUM) &&
2765 skb_checksum_help(skb))
2766 goto out_kfree_skb;
7b9c6090 2767 }
0c772159 2768 }
7b9c6090 2769
eae3f88e 2770 return skb;
fc70fb64 2771
f6a78bfc
HX
2772out_kfree_skb:
2773 kfree_skb(skb);
eae3f88e
DM
2774out_null:
2775 return NULL;
2776}
6afff0ca 2777
55a93b3e
ED
2778struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
2779{
2780 struct sk_buff *next, *head = NULL, *tail;
2781
bec3cfdc 2782 for (; skb != NULL; skb = next) {
55a93b3e
ED
2783 next = skb->next;
2784 skb->next = NULL;
bec3cfdc
ED
2785
2786 /* in case skb wont be segmented, point to itself */
2787 skb->prev = skb;
2788
55a93b3e 2789 skb = validate_xmit_skb(skb, dev);
bec3cfdc
ED
2790 if (!skb)
2791 continue;
55a93b3e 2792
bec3cfdc
ED
2793 if (!head)
2794 head = skb;
2795 else
2796 tail->next = skb;
2797 /* If skb was segmented, skb->prev points to
2798 * the last segment. If not, it still contains skb.
2799 */
2800 tail = skb->prev;
55a93b3e
ED
2801 }
2802 return head;
f6a78bfc
HX
2803}
2804
1def9238
ED
2805static void qdisc_pkt_len_init(struct sk_buff *skb)
2806{
2807 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2808
2809 qdisc_skb_cb(skb)->pkt_len = skb->len;
2810
2811 /* To get more precise estimation of bytes sent on wire,
2812 * we add to pkt_len the headers size of all segments
2813 */
2814 if (shinfo->gso_size) {
757b8b1d 2815 unsigned int hdr_len;
15e5a030 2816 u16 gso_segs = shinfo->gso_segs;
1def9238 2817
757b8b1d
ED
2818 /* mac layer + network layer */
2819 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2820
2821 /* + transport layer */
1def9238
ED
2822 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2823 hdr_len += tcp_hdrlen(skb);
2824 else
2825 hdr_len += sizeof(struct udphdr);
15e5a030
JW
2826
2827 if (shinfo->gso_type & SKB_GSO_DODGY)
2828 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
2829 shinfo->gso_size);
2830
2831 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
1def9238
ED
2832 }
2833}
2834
bbd8a0d3
KK
2835static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2836 struct net_device *dev,
2837 struct netdev_queue *txq)
2838{
2839 spinlock_t *root_lock = qdisc_lock(q);
a2da570d 2840 bool contended;
bbd8a0d3
KK
2841 int rc;
2842
1def9238 2843 qdisc_pkt_len_init(skb);
a2da570d 2844 qdisc_calculate_pkt_len(skb, q);
79640a4c
ED
2845 /*
2846 * Heuristic to force contended enqueues to serialize on a
2847 * separate lock before trying to get qdisc main lock.
9bf2b8c2
YX
2848 * This permits __QDISC___STATE_RUNNING owner to get the lock more
2849 * often and dequeue packets faster.
79640a4c 2850 */
a2da570d 2851 contended = qdisc_is_running(q);
79640a4c
ED
2852 if (unlikely(contended))
2853 spin_lock(&q->busylock);
2854
bbd8a0d3
KK
2855 spin_lock(root_lock);
2856 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2857 kfree_skb(skb);
2858 rc = NET_XMIT_DROP;
2859 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
bc135b23 2860 qdisc_run_begin(q)) {
bbd8a0d3
KK
2861 /*
2862 * This is a work-conserving queue; there are no old skbs
2863 * waiting to be sent out; and the qdisc is not running -
2864 * xmit the skb directly.
2865 */
bfe0d029 2866
bfe0d029
ED
2867 qdisc_bstats_update(q, skb);
2868
55a93b3e 2869 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
79640a4c
ED
2870 if (unlikely(contended)) {
2871 spin_unlock(&q->busylock);
2872 contended = false;
2873 }
bbd8a0d3 2874 __qdisc_run(q);
79640a4c 2875 } else
bc135b23 2876 qdisc_run_end(q);
bbd8a0d3
KK
2877
2878 rc = NET_XMIT_SUCCESS;
2879 } else {
a2da570d 2880 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
79640a4c
ED
2881 if (qdisc_run_begin(q)) {
2882 if (unlikely(contended)) {
2883 spin_unlock(&q->busylock);
2884 contended = false;
2885 }
2886 __qdisc_run(q);
2887 }
bbd8a0d3
KK
2888 }
2889 spin_unlock(root_lock);
79640a4c
ED
2890 if (unlikely(contended))
2891 spin_unlock(&q->busylock);
bbd8a0d3
KK
2892 return rc;
2893}
2894
86f8515f 2895#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
5bc1421e
NH
2896static void skb_update_prio(struct sk_buff *skb)
2897{
6977a79d 2898 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
5bc1421e 2899
91c68ce2
ED
2900 if (!skb->priority && skb->sk && map) {
2901 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2902
2903 if (prioidx < map->priomap_len)
2904 skb->priority = map->priomap[prioidx];
2905 }
5bc1421e
NH
2906}
2907#else
2908#define skb_update_prio(skb)
2909#endif
2910
f60e5990 2911DEFINE_PER_CPU(int, xmit_recursion);
2912EXPORT_SYMBOL(xmit_recursion);
2913
11a766ce 2914#define RECURSION_LIMIT 10
745e20f1 2915
95603e22
MM
2916/**
2917 * dev_loopback_xmit - loop back @skb
0c4b51f0
EB
2918 * @net: network namespace this loopback is happening in
2919 * @sk: sk needed to be a netfilter okfn
95603e22
MM
2920 * @skb: buffer to transmit
2921 */
0c4b51f0 2922int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
95603e22
MM
2923{
2924 skb_reset_mac_header(skb);
2925 __skb_pull(skb, skb_network_offset(skb));
2926 skb->pkt_type = PACKET_LOOPBACK;
2927 skb->ip_summed = CHECKSUM_UNNECESSARY;
2928 WARN_ON(!skb_dst(skb));
2929 skb_dst_force(skb);
2930 netif_rx_ni(skb);
2931 return 0;
2932}
2933EXPORT_SYMBOL(dev_loopback_xmit);
2934
638b2a69
JP
2935static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
2936{
2937#ifdef CONFIG_XPS
2938 struct xps_dev_maps *dev_maps;
2939 struct xps_map *map;
2940 int queue_index = -1;
2941
2942 rcu_read_lock();
2943 dev_maps = rcu_dereference(dev->xps_maps);
2944 if (dev_maps) {
2945 map = rcu_dereference(
2946 dev_maps->cpu_map[skb->sender_cpu - 1]);
2947 if (map) {
2948 if (map->len == 1)
2949 queue_index = map->queues[0];
2950 else
2951 queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
2952 map->len)];
2953 if (unlikely(queue_index >= dev->real_num_tx_queues))
2954 queue_index = -1;
2955 }
2956 }
2957 rcu_read_unlock();
2958
2959 return queue_index;
2960#else
2961 return -1;
2962#endif
2963}
2964
2965static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
2966{
2967 struct sock *sk = skb->sk;
2968 int queue_index = sk_tx_queue_get(sk);
2969
2970 if (queue_index < 0 || skb->ooo_okay ||
2971 queue_index >= dev->real_num_tx_queues) {
2972 int new_index = get_xps_queue(dev, skb);
2973 if (new_index < 0)
2974 new_index = skb_tx_hash(dev, skb);
2975
2976 if (queue_index != new_index && sk &&
2977 rcu_access_pointer(sk->sk_dst_cache))
2978 sk_tx_queue_set(sk, new_index);
2979
2980 queue_index = new_index;
2981 }
2982
2983 return queue_index;
2984}
2985
2986struct netdev_queue *netdev_pick_tx(struct net_device *dev,
2987 struct sk_buff *skb,
2988 void *accel_priv)
2989{
2990 int queue_index = 0;
2991
2992#ifdef CONFIG_XPS
2993 if (skb->sender_cpu == 0)
2994 skb->sender_cpu = raw_smp_processor_id() + 1;
2995#endif
2996
2997 if (dev->real_num_tx_queues != 1) {
2998 const struct net_device_ops *ops = dev->netdev_ops;
2999 if (ops->ndo_select_queue)
3000 queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
3001 __netdev_pick_tx);
3002 else
3003 queue_index = __netdev_pick_tx(dev, skb);
3004
3005 if (!accel_priv)
3006 queue_index = netdev_cap_txqueue(dev, queue_index);
3007 }
3008
3009 skb_set_queue_mapping(skb, queue_index);
3010 return netdev_get_tx_queue(dev, queue_index);
3011}
3012
d29f749e 3013/**
9d08dd3d 3014 * __dev_queue_xmit - transmit a buffer
d29f749e 3015 * @skb: buffer to transmit
9d08dd3d 3016 * @accel_priv: private data used for L2 forwarding offload
d29f749e
DJ
3017 *
3018 * Queue a buffer for transmission to a network device. The caller must
3019 * have set the device and priority and built the buffer before calling
3020 * this function. The function can be called from an interrupt.
3021 *
3022 * A negative errno code is returned on a failure. A success does not
3023 * guarantee the frame will be transmitted as it may be dropped due
3024 * to congestion or traffic shaping.
3025 *
3026 * -----------------------------------------------------------------------------------
3027 * I notice this method can also return errors from the queue disciplines,
3028 * including NET_XMIT_DROP, which is a positive value. So, errors can also
3029 * be positive.
3030 *
3031 * Regardless of the return value, the skb is consumed, so it is currently
3032 * difficult to retry a send to this method. (You can bump the ref count
3033 * before sending to hold a reference for retry if you are careful.)
3034 *
3035 * When calling this method, interrupts MUST be enabled. This is because
3036 * the BH enable code must have IRQs enabled so that it will not deadlock.
3037 * --BLG
3038 */
0a59f3a9 3039static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
1da177e4
LT
3040{
3041 struct net_device *dev = skb->dev;
dc2b4847 3042 struct netdev_queue *txq;
1da177e4
LT
3043 struct Qdisc *q;
3044 int rc = -ENOMEM;
3045
6d1ccff6
ED
3046 skb_reset_mac_header(skb);
3047
e7fd2885
WB
3048 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
3049 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
3050
4ec93edb
YH
3051 /* Disable soft irqs for various locks below. Also
3052 * stops preemption for RCU.
1da177e4 3053 */
4ec93edb 3054 rcu_read_lock_bh();
1da177e4 3055
5bc1421e
NH
3056 skb_update_prio(skb);
3057
02875878
ED
3058 /* If device/qdisc don't need skb->dst, release it right now while
3059 * its hot in this cpu cache.
3060 */
3061 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
3062 skb_dst_drop(skb);
3063 else
3064 skb_dst_force(skb);
3065
0c4f691f
SF
3066#ifdef CONFIG_NET_SWITCHDEV
3067 /* Don't forward if offload device already forwarded */
3068 if (skb->offload_fwd_mark &&
3069 skb->offload_fwd_mark == dev->offload_fwd_mark) {
3070 consume_skb(skb);
3071 rc = NET_XMIT_SUCCESS;
3072 goto out;
3073 }
3074#endif
3075
f663dd9a 3076 txq = netdev_pick_tx(dev, skb, accel_priv);
a898def2 3077 q = rcu_dereference_bh(txq->qdisc);
37437bb2 3078
1da177e4 3079#ifdef CONFIG_NET_CLS_ACT
d1b19dff 3080 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
1da177e4 3081#endif
cf66ba58 3082 trace_net_dev_queue(skb);
1da177e4 3083 if (q->enqueue) {
bbd8a0d3 3084 rc = __dev_xmit_skb(skb, q, dev, txq);
37437bb2 3085 goto out;
1da177e4
LT
3086 }
3087
3088 /* The device has no queue. Common case for software devices:
3089 loopback, all the sorts of tunnels...
3090
932ff279
HX
3091 Really, it is unlikely that netif_tx_lock protection is necessary
3092 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1da177e4
LT
3093 counters.)
3094 However, it is possible, that they rely on protection
3095 made by us here.
3096
3097 Check this and shot the lock. It is not prone from deadlocks.
3098 Either shot noqueue qdisc, it is even simpler 8)
3099 */
3100 if (dev->flags & IFF_UP) {
3101 int cpu = smp_processor_id(); /* ok because BHs are off */
3102
c773e847 3103 if (txq->xmit_lock_owner != cpu) {
1da177e4 3104
745e20f1
ED
3105 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
3106 goto recursion_alert;
3107
1f59533f
JDB
3108 skb = validate_xmit_skb(skb, dev);
3109 if (!skb)
3110 goto drop;
3111
c773e847 3112 HARD_TX_LOCK(dev, txq, cpu);
1da177e4 3113
73466498 3114 if (!netif_xmit_stopped(txq)) {
745e20f1 3115 __this_cpu_inc(xmit_recursion);
ce93718f 3116 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
745e20f1 3117 __this_cpu_dec(xmit_recursion);
572a9d7b 3118 if (dev_xmit_complete(rc)) {
c773e847 3119 HARD_TX_UNLOCK(dev, txq);
1da177e4
LT
3120 goto out;
3121 }
3122 }
c773e847 3123 HARD_TX_UNLOCK(dev, txq);
e87cc472
JP
3124 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3125 dev->name);
1da177e4
LT
3126 } else {
3127 /* Recursion is detected! It is possible,
745e20f1
ED
3128 * unfortunately
3129 */
3130recursion_alert:
e87cc472
JP
3131 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3132 dev->name);
1da177e4
LT
3133 }
3134 }
3135
3136 rc = -ENETDOWN;
1f59533f 3137drop:
d4828d85 3138 rcu_read_unlock_bh();
1da177e4 3139
015f0688 3140 atomic_long_inc(&dev->tx_dropped);
1f59533f 3141 kfree_skb_list(skb);
1da177e4
LT
3142 return rc;
3143out:
d4828d85 3144 rcu_read_unlock_bh();
1da177e4
LT
3145 return rc;
3146}
f663dd9a 3147
2b4aa3ce 3148int dev_queue_xmit(struct sk_buff *skb)
f663dd9a
JW
3149{
3150 return __dev_queue_xmit(skb, NULL);
3151}
2b4aa3ce 3152EXPORT_SYMBOL(dev_queue_xmit);
1da177e4 3153
f663dd9a
JW
3154int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
3155{
3156 return __dev_queue_xmit(skb, accel_priv);
3157}
3158EXPORT_SYMBOL(dev_queue_xmit_accel);
3159
1da177e4
LT
3160
3161/*=======================================================================
3162 Receiver routines
3163 =======================================================================*/
3164
6b2bedc3 3165int netdev_max_backlog __read_mostly = 1000;
c9e6bc64
ED
3166EXPORT_SYMBOL(netdev_max_backlog);
3167
3b098e2d 3168int netdev_tstamp_prequeue __read_mostly = 1;
6b2bedc3
SH
3169int netdev_budget __read_mostly = 300;
3170int weight_p __read_mostly = 64; /* old backlog weight */
1da177e4 3171
eecfd7c4
ED
3172/* Called with irq disabled */
3173static inline void ____napi_schedule(struct softnet_data *sd,
3174 struct napi_struct *napi)
3175{
3176 list_add_tail(&napi->poll_list, &sd->poll_list);
3177 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3178}
3179
bfb564e7
KK
3180#ifdef CONFIG_RPS
3181
3182/* One global table that all flow-based protocols share. */
6e3f7faf 3183struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
bfb564e7 3184EXPORT_SYMBOL(rps_sock_flow_table);
567e4b79
ED
3185u32 rps_cpu_mask __read_mostly;
3186EXPORT_SYMBOL(rps_cpu_mask);
bfb564e7 3187
c5905afb 3188struct static_key rps_needed __read_mostly;
adc9300e 3189
c445477d
BH
3190static struct rps_dev_flow *
3191set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3192 struct rps_dev_flow *rflow, u16 next_cpu)
3193{
a31196b0 3194 if (next_cpu < nr_cpu_ids) {
c445477d
BH
3195#ifdef CONFIG_RFS_ACCEL
3196 struct netdev_rx_queue *rxqueue;
3197 struct rps_dev_flow_table *flow_table;
3198 struct rps_dev_flow *old_rflow;
3199 u32 flow_id;
3200 u16 rxq_index;
3201 int rc;
3202
3203 /* Should we steer this flow to a different hardware queue? */
69a19ee6
BH
3204 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3205 !(dev->features & NETIF_F_NTUPLE))
c445477d
BH
3206 goto out;
3207 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3208 if (rxq_index == skb_get_rx_queue(skb))
3209 goto out;
3210
3211 rxqueue = dev->_rx + rxq_index;
3212 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3213 if (!flow_table)
3214 goto out;
61b905da 3215 flow_id = skb_get_hash(skb) & flow_table->mask;
c445477d
BH
3216 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3217 rxq_index, flow_id);
3218 if (rc < 0)
3219 goto out;
3220 old_rflow = rflow;
3221 rflow = &flow_table->flows[flow_id];
c445477d
BH
3222 rflow->filter = rc;
3223 if (old_rflow->filter == rflow->filter)
3224 old_rflow->filter = RPS_NO_FILTER;
3225 out:
3226#endif
3227 rflow->last_qtail =
09994d1b 3228 per_cpu(softnet_data, next_cpu).input_queue_head;
c445477d
BH
3229 }
3230
09994d1b 3231 rflow->cpu = next_cpu;
c445477d
BH
3232 return rflow;
3233}
3234
bfb564e7
KK
3235/*
3236 * get_rps_cpu is called from netif_receive_skb and returns the target
3237 * CPU from the RPS map of the receiving queue for a given skb.
3238 * rcu_read_lock must be held on entry.
3239 */
3240static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3241 struct rps_dev_flow **rflowp)
3242{
567e4b79
ED
3243 const struct rps_sock_flow_table *sock_flow_table;
3244 struct netdev_rx_queue *rxqueue = dev->_rx;
bfb564e7 3245 struct rps_dev_flow_table *flow_table;
567e4b79 3246 struct rps_map *map;
bfb564e7 3247 int cpu = -1;
567e4b79 3248 u32 tcpu;
61b905da 3249 u32 hash;
bfb564e7
KK
3250
3251 if (skb_rx_queue_recorded(skb)) {
3252 u16 index = skb_get_rx_queue(skb);
567e4b79 3253
62fe0b40
BH
3254 if (unlikely(index >= dev->real_num_rx_queues)) {
3255 WARN_ONCE(dev->real_num_rx_queues > 1,
3256 "%s received packet on queue %u, but number "
3257 "of RX queues is %u\n",
3258 dev->name, index, dev->real_num_rx_queues);
bfb564e7
KK
3259 goto done;
3260 }
567e4b79
ED
3261 rxqueue += index;
3262 }
bfb564e7 3263
567e4b79
ED
3264 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
3265
3266 flow_table = rcu_dereference(rxqueue->rps_flow_table);
6e3f7faf 3267 map = rcu_dereference(rxqueue->rps_map);
567e4b79 3268 if (!flow_table && !map)
bfb564e7
KK
3269 goto done;
3270
2d47b459 3271 skb_reset_network_header(skb);
61b905da
TH
3272 hash = skb_get_hash(skb);
3273 if (!hash)
bfb564e7
KK
3274 goto done;
3275
fec5e652
TH
3276 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3277 if (flow_table && sock_flow_table) {
fec5e652 3278 struct rps_dev_flow *rflow;
567e4b79
ED
3279 u32 next_cpu;
3280 u32 ident;
3281
3282 /* First check into global flow table if there is a match */
3283 ident = sock_flow_table->ents[hash & sock_flow_table->mask];
3284 if ((ident ^ hash) & ~rps_cpu_mask)
3285 goto try_rps;
fec5e652 3286
567e4b79
ED
3287 next_cpu = ident & rps_cpu_mask;
3288
3289 /* OK, now we know there is a match,
3290 * we can look at the local (per receive queue) flow table
3291 */
61b905da 3292 rflow = &flow_table->flows[hash & flow_table->mask];
fec5e652
TH
3293 tcpu = rflow->cpu;
3294
fec5e652
TH
3295 /*
3296 * If the desired CPU (where last recvmsg was done) is
3297 * different from current CPU (one in the rx-queue flow
3298 * table entry), switch if one of the following holds:
a31196b0 3299 * - Current CPU is unset (>= nr_cpu_ids).
fec5e652
TH
3300 * - Current CPU is offline.
3301 * - The current CPU's queue tail has advanced beyond the
3302 * last packet that was enqueued using this table entry.
3303 * This guarantees that all previous packets for the flow
3304 * have been dequeued, thus preserving in order delivery.
3305 */
3306 if (unlikely(tcpu != next_cpu) &&
a31196b0 3307 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
fec5e652 3308 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
baefa31d
TH
3309 rflow->last_qtail)) >= 0)) {
3310 tcpu = next_cpu;
c445477d 3311 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
baefa31d 3312 }
c445477d 3313
a31196b0 3314 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
fec5e652
TH
3315 *rflowp = rflow;
3316 cpu = tcpu;
3317 goto done;
3318 }
3319 }
3320
567e4b79
ED
3321try_rps:
3322
0a9627f2 3323 if (map) {
8fc54f68 3324 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
0a9627f2
TH
3325 if (cpu_online(tcpu)) {
3326 cpu = tcpu;
3327 goto done;
3328 }
3329 }
3330
3331done:
0a9627f2
TH
3332 return cpu;
3333}
3334
c445477d
BH
3335#ifdef CONFIG_RFS_ACCEL
3336
3337/**
3338 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3339 * @dev: Device on which the filter was set
3340 * @rxq_index: RX queue index
3341 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3342 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3343 *
3344 * Drivers that implement ndo_rx_flow_steer() should periodically call
3345 * this function for each installed filter and remove the filters for
3346 * which it returns %true.
3347 */
3348bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3349 u32 flow_id, u16 filter_id)
3350{
3351 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3352 struct rps_dev_flow_table *flow_table;
3353 struct rps_dev_flow *rflow;
3354 bool expire = true;
a31196b0 3355 unsigned int cpu;
c445477d
BH
3356
3357 rcu_read_lock();
3358 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3359 if (flow_table && flow_id <= flow_table->mask) {
3360 rflow = &flow_table->flows[flow_id];
3361 cpu = ACCESS_ONCE(rflow->cpu);
a31196b0 3362 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
c445477d
BH
3363 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3364 rflow->last_qtail) <
3365 (int)(10 * flow_table->mask)))
3366 expire = false;
3367 }
3368 rcu_read_unlock();
3369 return expire;
3370}
3371EXPORT_SYMBOL(rps_may_expire_flow);
3372
3373#endif /* CONFIG_RFS_ACCEL */
3374
0a9627f2 3375/* Called from hardirq (IPI) context */
e36fa2f7 3376static void rps_trigger_softirq(void *data)
0a9627f2 3377{
e36fa2f7
ED
3378 struct softnet_data *sd = data;
3379
eecfd7c4 3380 ____napi_schedule(sd, &sd->backlog);
dee42870 3381 sd->received_rps++;
0a9627f2 3382}
e36fa2f7 3383
fec5e652 3384#endif /* CONFIG_RPS */
0a9627f2 3385
e36fa2f7
ED
3386/*
3387 * Check if this softnet_data structure is another cpu one
3388 * If yes, queue it to our IPI list and return 1
3389 * If no, return 0
3390 */
3391static int rps_ipi_queued(struct softnet_data *sd)
3392{
3393#ifdef CONFIG_RPS
903ceff7 3394 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
e36fa2f7
ED
3395
3396 if (sd != mysd) {
3397 sd->rps_ipi_next = mysd->rps_ipi_list;
3398 mysd->rps_ipi_list = sd;
3399
3400 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3401 return 1;
3402 }
3403#endif /* CONFIG_RPS */
3404 return 0;
3405}
3406
99bbc707
WB
3407#ifdef CONFIG_NET_FLOW_LIMIT
3408int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3409#endif
3410
3411static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3412{
3413#ifdef CONFIG_NET_FLOW_LIMIT
3414 struct sd_flow_limit *fl;
3415 struct softnet_data *sd;
3416 unsigned int old_flow, new_flow;
3417
3418 if (qlen < (netdev_max_backlog >> 1))
3419 return false;
3420
903ceff7 3421 sd = this_cpu_ptr(&softnet_data);
99bbc707
WB
3422
3423 rcu_read_lock();
3424 fl = rcu_dereference(sd->flow_limit);
3425 if (fl) {
3958afa1 3426 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
99bbc707
WB
3427 old_flow = fl->history[fl->history_head];
3428 fl->history[fl->history_head] = new_flow;
3429
3430 fl->history_head++;
3431 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3432
3433 if (likely(fl->buckets[old_flow]))
3434 fl->buckets[old_flow]--;
3435
3436 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3437 fl->count++;
3438 rcu_read_unlock();
3439 return true;
3440 }
3441 }
3442 rcu_read_unlock();
3443#endif
3444 return false;
3445}
3446
0a9627f2
TH
3447/*
3448 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3449 * queue (may be a remote CPU queue).
3450 */
fec5e652
TH
3451static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3452 unsigned int *qtail)
0a9627f2 3453{
e36fa2f7 3454 struct softnet_data *sd;
0a9627f2 3455 unsigned long flags;
99bbc707 3456 unsigned int qlen;
0a9627f2 3457
e36fa2f7 3458 sd = &per_cpu(softnet_data, cpu);
0a9627f2
TH
3459
3460 local_irq_save(flags);
0a9627f2 3461
e36fa2f7 3462 rps_lock(sd);
e9e4dd32
JA
3463 if (!netif_running(skb->dev))
3464 goto drop;
99bbc707
WB
3465 qlen = skb_queue_len(&sd->input_pkt_queue);
3466 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
e008f3f0 3467 if (qlen) {
0a9627f2 3468enqueue:
e36fa2f7 3469 __skb_queue_tail(&sd->input_pkt_queue, skb);
76cc8b13 3470 input_queue_tail_incr_save(sd, qtail);
e36fa2f7 3471 rps_unlock(sd);
152102c7 3472 local_irq_restore(flags);
0a9627f2
TH
3473 return NET_RX_SUCCESS;
3474 }
3475
ebda37c2
ED
3476 /* Schedule NAPI for backlog device
3477 * We can use non atomic operation since we own the queue lock
3478 */
3479 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
e36fa2f7 3480 if (!rps_ipi_queued(sd))
eecfd7c4 3481 ____napi_schedule(sd, &sd->backlog);
0a9627f2
TH
3482 }
3483 goto enqueue;
3484 }
3485
e9e4dd32 3486drop:
dee42870 3487 sd->dropped++;
e36fa2f7 3488 rps_unlock(sd);
0a9627f2 3489
0a9627f2
TH
3490 local_irq_restore(flags);
3491
caf586e5 3492 atomic_long_inc(&skb->dev->rx_dropped);
0a9627f2
TH
3493 kfree_skb(skb);
3494 return NET_RX_DROP;
3495}
1da177e4 3496
ae78dbfa 3497static int netif_rx_internal(struct sk_buff *skb)
1da177e4 3498{
b0e28f1e 3499 int ret;
1da177e4 3500
588f0330 3501 net_timestamp_check(netdev_tstamp_prequeue, skb);
1da177e4 3502
cf66ba58 3503 trace_netif_rx(skb);
df334545 3504#ifdef CONFIG_RPS
c5905afb 3505 if (static_key_false(&rps_needed)) {
fec5e652 3506 struct rps_dev_flow voidflow, *rflow = &voidflow;
b0e28f1e
ED
3507 int cpu;
3508
cece1945 3509 preempt_disable();
b0e28f1e 3510 rcu_read_lock();
fec5e652
TH
3511
3512 cpu = get_rps_cpu(skb->dev, skb, &rflow);
b0e28f1e
ED
3513 if (cpu < 0)
3514 cpu = smp_processor_id();
fec5e652
TH
3515
3516 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3517
b0e28f1e 3518 rcu_read_unlock();
cece1945 3519 preempt_enable();
adc9300e
ED
3520 } else
3521#endif
fec5e652
TH
3522 {
3523 unsigned int qtail;
3524 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3525 put_cpu();
3526 }
b0e28f1e 3527 return ret;
1da177e4 3528}
ae78dbfa
BH
3529
3530/**
3531 * netif_rx - post buffer to the network code
3532 * @skb: buffer to post
3533 *
3534 * This function receives a packet from a device driver and queues it for
3535 * the upper (protocol) levels to process. It always succeeds. The buffer
3536 * may be dropped during processing for congestion control or by the
3537 * protocol layers.
3538 *
3539 * return values:
3540 * NET_RX_SUCCESS (no congestion)
3541 * NET_RX_DROP (packet was dropped)
3542 *
3543 */
3544
3545int netif_rx(struct sk_buff *skb)
3546{
3547 trace_netif_rx_entry(skb);
3548
3549 return netif_rx_internal(skb);
3550}
d1b19dff 3551EXPORT_SYMBOL(netif_rx);
1da177e4
LT
3552
3553int netif_rx_ni(struct sk_buff *skb)
3554{
3555 int err;
3556
ae78dbfa
BH
3557 trace_netif_rx_ni_entry(skb);
3558
1da177e4 3559 preempt_disable();
ae78dbfa 3560 err = netif_rx_internal(skb);
1da177e4
LT
3561 if (local_softirq_pending())
3562 do_softirq();
3563 preempt_enable();
3564
3565 return err;
3566}
1da177e4
LT
3567EXPORT_SYMBOL(netif_rx_ni);
3568
1da177e4
LT
3569static void net_tx_action(struct softirq_action *h)
3570{
903ceff7 3571 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
1da177e4
LT
3572
3573 if (sd->completion_queue) {
3574 struct sk_buff *clist;
3575
3576 local_irq_disable();
3577 clist = sd->completion_queue;
3578 sd->completion_queue = NULL;
3579 local_irq_enable();
3580
3581 while (clist) {
3582 struct sk_buff *skb = clist;
3583 clist = clist->next;
3584
547b792c 3585 WARN_ON(atomic_read(&skb->users));
e6247027
ED
3586 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
3587 trace_consume_skb(skb);
3588 else
3589 trace_kfree_skb(skb, net_tx_action);
1da177e4
LT
3590 __kfree_skb(skb);
3591 }
3592 }
3593
3594 if (sd->output_queue) {
37437bb2 3595 struct Qdisc *head;
1da177e4
LT
3596
3597 local_irq_disable();
3598 head = sd->output_queue;
3599 sd->output_queue = NULL;
a9cbd588 3600 sd->output_queue_tailp = &sd->output_queue;
1da177e4
LT
3601 local_irq_enable();
3602
3603 while (head) {
37437bb2
DM
3604 struct Qdisc *q = head;
3605 spinlock_t *root_lock;
3606
1da177e4
LT
3607 head = head->next_sched;
3608
5fb66229 3609 root_lock = qdisc_lock(q);
37437bb2 3610 if (spin_trylock(root_lock)) {
4e857c58 3611 smp_mb__before_atomic();
def82a1d
JP
3612 clear_bit(__QDISC_STATE_SCHED,
3613 &q->state);
37437bb2
DM
3614 qdisc_run(q);
3615 spin_unlock(root_lock);
1da177e4 3616 } else {
195648bb 3617 if (!test_bit(__QDISC_STATE_DEACTIVATED,
e8a83e10 3618 &q->state)) {
195648bb 3619 __netif_reschedule(q);
e8a83e10 3620 } else {
4e857c58 3621 smp_mb__before_atomic();
e8a83e10
JP
3622 clear_bit(__QDISC_STATE_SCHED,
3623 &q->state);
3624 }
1da177e4
LT
3625 }
3626 }
3627 }
3628}
3629
ab95bfe0
JP
3630#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3631 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
da678292
MM
3632/* This hook is defined here for ATM LANE */
3633int (*br_fdb_test_addr_hook)(struct net_device *dev,
3634 unsigned char *addr) __read_mostly;
4fb019a0 3635EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
da678292 3636#endif
1da177e4 3637
f697c3e8
HX
3638static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3639 struct packet_type **pt_prev,
3640 int *ret, struct net_device *orig_dev)
3641{
e7582bab 3642#ifdef CONFIG_NET_CLS_ACT
d2788d34
DB
3643 struct tcf_proto *cl = rcu_dereference_bh(skb->dev->ingress_cl_list);
3644 struct tcf_result cl_res;
24824a09 3645
c9e99fd0
DB
3646 /* If there's at least one ingress present somewhere (so
3647 * we get here via enabled static key), remaining devices
3648 * that are not configured with an ingress qdisc will bail
d2788d34 3649 * out here.
c9e99fd0 3650 */
d2788d34 3651 if (!cl)
4577139b 3652 return skb;
f697c3e8
HX
3653 if (*pt_prev) {
3654 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3655 *pt_prev = NULL;
1da177e4
LT
3656 }
3657
3365495c 3658 qdisc_skb_cb(skb)->pkt_len = skb->len;
c9e99fd0 3659 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
24ea591d 3660 qdisc_bstats_cpu_update(cl->q, skb);
c9e99fd0 3661
3b3ae880 3662 switch (tc_classify(skb, cl, &cl_res, false)) {
d2788d34
DB
3663 case TC_ACT_OK:
3664 case TC_ACT_RECLASSIFY:
3665 skb->tc_index = TC_H_MIN(cl_res.classid);
3666 break;
3667 case TC_ACT_SHOT:
24ea591d 3668 qdisc_qstats_cpu_drop(cl->q);
d2788d34
DB
3669 case TC_ACT_STOLEN:
3670 case TC_ACT_QUEUED:
3671 kfree_skb(skb);
3672 return NULL;
27b29f63
AS
3673 case TC_ACT_REDIRECT:
3674 /* skb_mac_header check was done by cls/act_bpf, so
3675 * we can safely push the L2 header back before
3676 * redirecting to another netdev
3677 */
3678 __skb_push(skb, skb->mac_len);
3679 skb_do_redirect(skb);
3680 return NULL;
d2788d34
DB
3681 default:
3682 break;
f697c3e8 3683 }
e7582bab 3684#endif /* CONFIG_NET_CLS_ACT */
e687ad60
PN
3685 return skb;
3686}
1da177e4 3687
ab95bfe0
JP
3688/**
3689 * netdev_rx_handler_register - register receive handler
3690 * @dev: device to register a handler for
3691 * @rx_handler: receive handler to register
93e2c32b 3692 * @rx_handler_data: data pointer that is used by rx handler
ab95bfe0 3693 *
e227867f 3694 * Register a receive handler for a device. This handler will then be
ab95bfe0
JP
3695 * called from __netif_receive_skb. A negative errno code is returned
3696 * on a failure.
3697 *
3698 * The caller must hold the rtnl_mutex.
8a4eb573
JP
3699 *
3700 * For a general description of rx_handler, see enum rx_handler_result.
ab95bfe0
JP
3701 */
3702int netdev_rx_handler_register(struct net_device *dev,
93e2c32b
JP
3703 rx_handler_func_t *rx_handler,
3704 void *rx_handler_data)
ab95bfe0
JP
3705{
3706 ASSERT_RTNL();
3707
3708 if (dev->rx_handler)
3709 return -EBUSY;
3710
00cfec37 3711 /* Note: rx_handler_data must be set before rx_handler */
93e2c32b 3712 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
ab95bfe0
JP
3713 rcu_assign_pointer(dev->rx_handler, rx_handler);
3714
3715 return 0;
3716}
3717EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3718
3719/**
3720 * netdev_rx_handler_unregister - unregister receive handler
3721 * @dev: device to unregister a handler from
3722 *
166ec369 3723 * Unregister a receive handler from a device.
ab95bfe0
JP
3724 *
3725 * The caller must hold the rtnl_mutex.
3726 */
3727void netdev_rx_handler_unregister(struct net_device *dev)
3728{
3729
3730 ASSERT_RTNL();
a9b3cd7f 3731 RCU_INIT_POINTER(dev->rx_handler, NULL);
00cfec37
ED
3732 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3733 * section has a guarantee to see a non NULL rx_handler_data
3734 * as well.
3735 */
3736 synchronize_net();
a9b3cd7f 3737 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
ab95bfe0
JP
3738}
3739EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3740
b4b9e355
MG
3741/*
3742 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3743 * the special handling of PFMEMALLOC skbs.
3744 */
3745static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3746{
3747 switch (skb->protocol) {
2b8837ae
JP
3748 case htons(ETH_P_ARP):
3749 case htons(ETH_P_IP):
3750 case htons(ETH_P_IPV6):
3751 case htons(ETH_P_8021Q):
3752 case htons(ETH_P_8021AD):
b4b9e355
MG
3753 return true;
3754 default:
3755 return false;
3756 }
3757}
3758
e687ad60
PN
3759static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
3760 int *ret, struct net_device *orig_dev)
3761{
e7582bab 3762#ifdef CONFIG_NETFILTER_INGRESS
e687ad60
PN
3763 if (nf_hook_ingress_active(skb)) {
3764 if (*pt_prev) {
3765 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3766 *pt_prev = NULL;
3767 }
3768
3769 return nf_hook_ingress(skb);
3770 }
e7582bab 3771#endif /* CONFIG_NETFILTER_INGRESS */
e687ad60
PN
3772 return 0;
3773}
e687ad60 3774
9754e293 3775static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
1da177e4
LT
3776{
3777 struct packet_type *ptype, *pt_prev;
ab95bfe0 3778 rx_handler_func_t *rx_handler;
f2ccd8fa 3779 struct net_device *orig_dev;
8a4eb573 3780 bool deliver_exact = false;
1da177e4 3781 int ret = NET_RX_DROP;
252e3346 3782 __be16 type;
1da177e4 3783
588f0330 3784 net_timestamp_check(!netdev_tstamp_prequeue, skb);
81bbb3d4 3785
cf66ba58 3786 trace_netif_receive_skb(skb);
9b22ea56 3787
cc9bd5ce 3788 orig_dev = skb->dev;
8f903c70 3789
c1d2bbe1 3790 skb_reset_network_header(skb);
fda55eca
ED
3791 if (!skb_transport_header_was_set(skb))
3792 skb_reset_transport_header(skb);
0b5c9db1 3793 skb_reset_mac_len(skb);
1da177e4
LT
3794
3795 pt_prev = NULL;
3796
63d8ea7f 3797another_round:
b6858177 3798 skb->skb_iif = skb->dev->ifindex;
63d8ea7f
DM
3799
3800 __this_cpu_inc(softnet_data.processed);
3801
8ad227ff
PM
3802 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
3803 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
0d5501c1 3804 skb = skb_vlan_untag(skb);
bcc6d479 3805 if (unlikely(!skb))
2c17d27c 3806 goto out;
bcc6d479
JP
3807 }
3808
1da177e4
LT
3809#ifdef CONFIG_NET_CLS_ACT
3810 if (skb->tc_verd & TC_NCLS) {
3811 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3812 goto ncls;
3813 }
3814#endif
3815
9754e293 3816 if (pfmemalloc)
b4b9e355
MG
3817 goto skip_taps;
3818
1da177e4 3819 list_for_each_entry_rcu(ptype, &ptype_all, list) {
7866a621
SN
3820 if (pt_prev)
3821 ret = deliver_skb(skb, pt_prev, orig_dev);
3822 pt_prev = ptype;
3823 }
3824
3825 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
3826 if (pt_prev)
3827 ret = deliver_skb(skb, pt_prev, orig_dev);
3828 pt_prev = ptype;
1da177e4
LT
3829 }
3830
b4b9e355 3831skip_taps:
1cf51900 3832#ifdef CONFIG_NET_INGRESS
4577139b
DB
3833 if (static_key_false(&ingress_needed)) {
3834 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3835 if (!skb)
2c17d27c 3836 goto out;
e687ad60
PN
3837
3838 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
2c17d27c 3839 goto out;
4577139b 3840 }
1cf51900
PN
3841#endif
3842#ifdef CONFIG_NET_CLS_ACT
4577139b 3843 skb->tc_verd = 0;
1da177e4
LT
3844ncls:
3845#endif
9754e293 3846 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
b4b9e355
MG
3847 goto drop;
3848
df8a39de 3849 if (skb_vlan_tag_present(skb)) {
2425717b
JF
3850 if (pt_prev) {
3851 ret = deliver_skb(skb, pt_prev, orig_dev);
3852 pt_prev = NULL;
3853 }
48cc32d3 3854 if (vlan_do_receive(&skb))
2425717b
JF
3855 goto another_round;
3856 else if (unlikely(!skb))
2c17d27c 3857 goto out;
2425717b
JF
3858 }
3859
48cc32d3 3860 rx_handler = rcu_dereference(skb->dev->rx_handler);
ab95bfe0
JP
3861 if (rx_handler) {
3862 if (pt_prev) {
3863 ret = deliver_skb(skb, pt_prev, orig_dev);
3864 pt_prev = NULL;
3865 }
8a4eb573
JP
3866 switch (rx_handler(&skb)) {
3867 case RX_HANDLER_CONSUMED:
3bc1b1ad 3868 ret = NET_RX_SUCCESS;
2c17d27c 3869 goto out;
8a4eb573 3870 case RX_HANDLER_ANOTHER:
63d8ea7f 3871 goto another_round;
8a4eb573
JP
3872 case RX_HANDLER_EXACT:
3873 deliver_exact = true;
3874 case RX_HANDLER_PASS:
3875 break;
3876 default:
3877 BUG();
3878 }
ab95bfe0 3879 }
1da177e4 3880
df8a39de
JP
3881 if (unlikely(skb_vlan_tag_present(skb))) {
3882 if (skb_vlan_tag_get_id(skb))
d4b812de
ED
3883 skb->pkt_type = PACKET_OTHERHOST;
3884 /* Note: we might in the future use prio bits
3885 * and set skb->priority like in vlan_do_receive()
3886 * For the time being, just ignore Priority Code Point
3887 */
3888 skb->vlan_tci = 0;
3889 }
48cc32d3 3890
7866a621
SN
3891 type = skb->protocol;
3892
63d8ea7f 3893 /* deliver only exact match when indicated */
7866a621
SN
3894 if (likely(!deliver_exact)) {
3895 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
3896 &ptype_base[ntohs(type) &
3897 PTYPE_HASH_MASK]);
3898 }
1f3c8804 3899
7866a621
SN
3900 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
3901 &orig_dev->ptype_specific);
3902
3903 if (unlikely(skb->dev != orig_dev)) {
3904 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
3905 &skb->dev->ptype_specific);
1da177e4
LT
3906 }
3907
3908 if (pt_prev) {
1080e512 3909 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
0e698bf6 3910 goto drop;
1080e512
MT
3911 else
3912 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1da177e4 3913 } else {
b4b9e355 3914drop:
caf586e5 3915 atomic_long_inc(&skb->dev->rx_dropped);
1da177e4
LT
3916 kfree_skb(skb);
3917 /* Jamal, now you will not able to escape explaining
3918 * me how you were going to use this. :-)
3919 */
3920 ret = NET_RX_DROP;
3921 }
3922
2c17d27c 3923out:
9754e293
DM
3924 return ret;
3925}
3926
3927static int __netif_receive_skb(struct sk_buff *skb)
3928{
3929 int ret;
3930
3931 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
3932 unsigned long pflags = current->flags;
3933
3934 /*
3935 * PFMEMALLOC skbs are special, they should
3936 * - be delivered to SOCK_MEMALLOC sockets only
3937 * - stay away from userspace
3938 * - have bounded memory usage
3939 *
3940 * Use PF_MEMALLOC as this saves us from propagating the allocation
3941 * context down to all allocation sites.
3942 */
3943 current->flags |= PF_MEMALLOC;
3944 ret = __netif_receive_skb_core(skb, true);
3945 tsk_restore_flags(current, pflags, PF_MEMALLOC);
3946 } else
3947 ret = __netif_receive_skb_core(skb, false);
3948
1da177e4
LT
3949 return ret;
3950}
0a9627f2 3951
ae78dbfa 3952static int netif_receive_skb_internal(struct sk_buff *skb)
0a9627f2 3953{
2c17d27c
JA
3954 int ret;
3955
588f0330 3956 net_timestamp_check(netdev_tstamp_prequeue, skb);
3b098e2d 3957
c1f19b51
RC
3958 if (skb_defer_rx_timestamp(skb))
3959 return NET_RX_SUCCESS;
3960
2c17d27c
JA
3961 rcu_read_lock();
3962
df334545 3963#ifdef CONFIG_RPS
c5905afb 3964 if (static_key_false(&rps_needed)) {
3b098e2d 3965 struct rps_dev_flow voidflow, *rflow = &voidflow;
2c17d27c 3966 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
0a9627f2 3967
3b098e2d
ED
3968 if (cpu >= 0) {
3969 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3970 rcu_read_unlock();
adc9300e 3971 return ret;
3b098e2d 3972 }
fec5e652 3973 }
1e94d72f 3974#endif
2c17d27c
JA
3975 ret = __netif_receive_skb(skb);
3976 rcu_read_unlock();
3977 return ret;
0a9627f2 3978}
ae78dbfa
BH
3979
3980/**
3981 * netif_receive_skb - process receive buffer from network
3982 * @skb: buffer to process
3983 *
3984 * netif_receive_skb() is the main receive data processing function.
3985 * It always succeeds. The buffer may be dropped during processing
3986 * for congestion control or by the protocol layers.
3987 *
3988 * This function may only be called from softirq context and interrupts
3989 * should be enabled.
3990 *
3991 * Return values (usually ignored):
3992 * NET_RX_SUCCESS: no congestion
3993 * NET_RX_DROP: packet was dropped
3994 */
04eb4489 3995int netif_receive_skb(struct sk_buff *skb)
ae78dbfa
BH
3996{
3997 trace_netif_receive_skb_entry(skb);
3998
3999 return netif_receive_skb_internal(skb);
4000}
04eb4489 4001EXPORT_SYMBOL(netif_receive_skb);
1da177e4 4002
88751275
ED
4003/* Network device is going away, flush any packets still pending
4004 * Called with irqs disabled.
4005 */
152102c7 4006static void flush_backlog(void *arg)
6e583ce5 4007{
152102c7 4008 struct net_device *dev = arg;
903ceff7 4009 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
6e583ce5
SH
4010 struct sk_buff *skb, *tmp;
4011
e36fa2f7 4012 rps_lock(sd);
6e7676c1 4013 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
6e583ce5 4014 if (skb->dev == dev) {
e36fa2f7 4015 __skb_unlink(skb, &sd->input_pkt_queue);
6e583ce5 4016 kfree_skb(skb);
76cc8b13 4017 input_queue_head_incr(sd);
6e583ce5 4018 }
6e7676c1 4019 }
e36fa2f7 4020 rps_unlock(sd);
6e7676c1
CG
4021
4022 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
4023 if (skb->dev == dev) {
4024 __skb_unlink(skb, &sd->process_queue);
4025 kfree_skb(skb);
76cc8b13 4026 input_queue_head_incr(sd);
6e7676c1
CG
4027 }
4028 }
6e583ce5
SH
4029}
4030
d565b0a1
HX
4031static int napi_gro_complete(struct sk_buff *skb)
4032{
22061d80 4033 struct packet_offload *ptype;
d565b0a1 4034 __be16 type = skb->protocol;
22061d80 4035 struct list_head *head = &offload_base;
d565b0a1
HX
4036 int err = -ENOENT;
4037
c3c7c254
ED
4038 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
4039
fc59f9a3
HX
4040 if (NAPI_GRO_CB(skb)->count == 1) {
4041 skb_shinfo(skb)->gso_size = 0;
d565b0a1 4042 goto out;
fc59f9a3 4043 }
d565b0a1
HX
4044
4045 rcu_read_lock();
4046 list_for_each_entry_rcu(ptype, head, list) {
f191a1d1 4047 if (ptype->type != type || !ptype->callbacks.gro_complete)
d565b0a1
HX
4048 continue;
4049
299603e8 4050 err = ptype->callbacks.gro_complete(skb, 0);
d565b0a1
HX
4051 break;
4052 }
4053 rcu_read_unlock();
4054
4055 if (err) {
4056 WARN_ON(&ptype->list == head);
4057 kfree_skb(skb);
4058 return NET_RX_SUCCESS;
4059 }
4060
4061out:
ae78dbfa 4062 return netif_receive_skb_internal(skb);
d565b0a1
HX
4063}
4064
2e71a6f8
ED
4065/* napi->gro_list contains packets ordered by age.
4066 * youngest packets at the head of it.
4067 * Complete skbs in reverse order to reduce latencies.
4068 */
4069void napi_gro_flush(struct napi_struct *napi, bool flush_old)
d565b0a1 4070{
2e71a6f8 4071 struct sk_buff *skb, *prev = NULL;
d565b0a1 4072
2e71a6f8
ED
4073 /* scan list and build reverse chain */
4074 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
4075 skb->prev = prev;
4076 prev = skb;
4077 }
4078
4079 for (skb = prev; skb; skb = prev) {
d565b0a1 4080 skb->next = NULL;
2e71a6f8
ED
4081
4082 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
4083 return;
4084
4085 prev = skb->prev;
d565b0a1 4086 napi_gro_complete(skb);
2e71a6f8 4087 napi->gro_count--;
d565b0a1
HX
4088 }
4089
4090 napi->gro_list = NULL;
4091}
86cac58b 4092EXPORT_SYMBOL(napi_gro_flush);
d565b0a1 4093
89c5fa33
ED
4094static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
4095{
4096 struct sk_buff *p;
4097 unsigned int maclen = skb->dev->hard_header_len;
0b4cec8c 4098 u32 hash = skb_get_hash_raw(skb);
89c5fa33
ED
4099
4100 for (p = napi->gro_list; p; p = p->next) {
4101 unsigned long diffs;
4102
0b4cec8c
TH
4103 NAPI_GRO_CB(p)->flush = 0;
4104
4105 if (hash != skb_get_hash_raw(p)) {
4106 NAPI_GRO_CB(p)->same_flow = 0;
4107 continue;
4108 }
4109
89c5fa33
ED
4110 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
4111 diffs |= p->vlan_tci ^ skb->vlan_tci;
4112 if (maclen == ETH_HLEN)
4113 diffs |= compare_ether_header(skb_mac_header(p),
a50e233c 4114 skb_mac_header(skb));
89c5fa33
ED
4115 else if (!diffs)
4116 diffs = memcmp(skb_mac_header(p),
a50e233c 4117 skb_mac_header(skb),
89c5fa33
ED
4118 maclen);
4119 NAPI_GRO_CB(p)->same_flow = !diffs;
89c5fa33
ED
4120 }
4121}
4122
299603e8
JC
4123static void skb_gro_reset_offset(struct sk_buff *skb)
4124{
4125 const struct skb_shared_info *pinfo = skb_shinfo(skb);
4126 const skb_frag_t *frag0 = &pinfo->frags[0];
4127
4128 NAPI_GRO_CB(skb)->data_offset = 0;
4129 NAPI_GRO_CB(skb)->frag0 = NULL;
4130 NAPI_GRO_CB(skb)->frag0_len = 0;
4131
4132 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
4133 pinfo->nr_frags &&
4134 !PageHighMem(skb_frag_page(frag0))) {
4135 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
4136 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
89c5fa33
ED
4137 }
4138}
4139
a50e233c
ED
4140static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
4141{
4142 struct skb_shared_info *pinfo = skb_shinfo(skb);
4143
4144 BUG_ON(skb->end - skb->tail < grow);
4145
4146 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
4147
4148 skb->data_len -= grow;
4149 skb->tail += grow;
4150
4151 pinfo->frags[0].page_offset += grow;
4152 skb_frag_size_sub(&pinfo->frags[0], grow);
4153
4154 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
4155 skb_frag_unref(skb, 0);
4156 memmove(pinfo->frags, pinfo->frags + 1,
4157 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
4158 }
4159}
4160
bb728820 4161static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
d565b0a1
HX
4162{
4163 struct sk_buff **pp = NULL;
22061d80 4164 struct packet_offload *ptype;
d565b0a1 4165 __be16 type = skb->protocol;
22061d80 4166 struct list_head *head = &offload_base;
0da2afd5 4167 int same_flow;
5b252f0c 4168 enum gro_result ret;
a50e233c 4169 int grow;
d565b0a1 4170
9c62a68d 4171 if (!(skb->dev->features & NETIF_F_GRO))
d565b0a1
HX
4172 goto normal;
4173
5a212329 4174 if (skb_is_gso(skb) || skb_has_frag_list(skb) || skb->csum_bad)
f17f5c91
HX
4175 goto normal;
4176
89c5fa33
ED
4177 gro_list_prepare(napi, skb);
4178
d565b0a1
HX
4179 rcu_read_lock();
4180 list_for_each_entry_rcu(ptype, head, list) {
f191a1d1 4181 if (ptype->type != type || !ptype->callbacks.gro_receive)
d565b0a1
HX
4182 continue;
4183
86911732 4184 skb_set_network_header(skb, skb_gro_offset(skb));
efd9450e 4185 skb_reset_mac_len(skb);
d565b0a1
HX
4186 NAPI_GRO_CB(skb)->same_flow = 0;
4187 NAPI_GRO_CB(skb)->flush = 0;
5d38a079 4188 NAPI_GRO_CB(skb)->free = 0;
b582ef09 4189 NAPI_GRO_CB(skb)->udp_mark = 0;
15e2396d 4190 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
d565b0a1 4191
662880f4
TH
4192 /* Setup for GRO checksum validation */
4193 switch (skb->ip_summed) {
4194 case CHECKSUM_COMPLETE:
4195 NAPI_GRO_CB(skb)->csum = skb->csum;
4196 NAPI_GRO_CB(skb)->csum_valid = 1;
4197 NAPI_GRO_CB(skb)->csum_cnt = 0;
4198 break;
4199 case CHECKSUM_UNNECESSARY:
4200 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
4201 NAPI_GRO_CB(skb)->csum_valid = 0;
4202 break;
4203 default:
4204 NAPI_GRO_CB(skb)->csum_cnt = 0;
4205 NAPI_GRO_CB(skb)->csum_valid = 0;
4206 }
d565b0a1 4207
f191a1d1 4208 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
d565b0a1
HX
4209 break;
4210 }
4211 rcu_read_unlock();
4212
4213 if (&ptype->list == head)
4214 goto normal;
4215
0da2afd5 4216 same_flow = NAPI_GRO_CB(skb)->same_flow;
5d0d9be8 4217 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
0da2afd5 4218
d565b0a1
HX
4219 if (pp) {
4220 struct sk_buff *nskb = *pp;
4221
4222 *pp = nskb->next;
4223 nskb->next = NULL;
4224 napi_gro_complete(nskb);
4ae5544f 4225 napi->gro_count--;
d565b0a1
HX
4226 }
4227
0da2afd5 4228 if (same_flow)
d565b0a1
HX
4229 goto ok;
4230
600adc18 4231 if (NAPI_GRO_CB(skb)->flush)
d565b0a1 4232 goto normal;
d565b0a1 4233
600adc18
ED
4234 if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
4235 struct sk_buff *nskb = napi->gro_list;
4236
4237 /* locate the end of the list to select the 'oldest' flow */
4238 while (nskb->next) {
4239 pp = &nskb->next;
4240 nskb = *pp;
4241 }
4242 *pp = NULL;
4243 nskb->next = NULL;
4244 napi_gro_complete(nskb);
4245 } else {
4246 napi->gro_count++;
4247 }
d565b0a1 4248 NAPI_GRO_CB(skb)->count = 1;
2e71a6f8 4249 NAPI_GRO_CB(skb)->age = jiffies;
29e98242 4250 NAPI_GRO_CB(skb)->last = skb;
86911732 4251 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
d565b0a1
HX
4252 skb->next = napi->gro_list;
4253 napi->gro_list = skb;
5d0d9be8 4254 ret = GRO_HELD;
d565b0a1 4255
ad0f9904 4256pull:
a50e233c
ED
4257 grow = skb_gro_offset(skb) - skb_headlen(skb);
4258 if (grow > 0)
4259 gro_pull_from_frag0(skb, grow);
d565b0a1 4260ok:
5d0d9be8 4261 return ret;
d565b0a1
HX
4262
4263normal:
ad0f9904
HX
4264 ret = GRO_NORMAL;
4265 goto pull;
5d38a079 4266}
96e93eab 4267
bf5a755f
JC
4268struct packet_offload *gro_find_receive_by_type(__be16 type)
4269{
4270 struct list_head *offload_head = &offload_base;
4271 struct packet_offload *ptype;
4272
4273 list_for_each_entry_rcu(ptype, offload_head, list) {
4274 if (ptype->type != type || !ptype->callbacks.gro_receive)
4275 continue;
4276 return ptype;
4277 }
4278 return NULL;
4279}
e27a2f83 4280EXPORT_SYMBOL(gro_find_receive_by_type);
bf5a755f
JC
4281
4282struct packet_offload *gro_find_complete_by_type(__be16 type)
4283{
4284 struct list_head *offload_head = &offload_base;
4285 struct packet_offload *ptype;
4286
4287 list_for_each_entry_rcu(ptype, offload_head, list) {
4288 if (ptype->type != type || !ptype->callbacks.gro_complete)
4289 continue;
4290 return ptype;
4291 }
4292 return NULL;
4293}
e27a2f83 4294EXPORT_SYMBOL(gro_find_complete_by_type);
5d38a079 4295
bb728820 4296static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
5d38a079 4297{
5d0d9be8
HX
4298 switch (ret) {
4299 case GRO_NORMAL:
ae78dbfa 4300 if (netif_receive_skb_internal(skb))
c7c4b3b6
BH
4301 ret = GRO_DROP;
4302 break;
5d38a079 4303
5d0d9be8 4304 case GRO_DROP:
5d38a079
HX
4305 kfree_skb(skb);
4306 break;
5b252f0c 4307
daa86548 4308 case GRO_MERGED_FREE:
d7e8883c
ED
4309 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
4310 kmem_cache_free(skbuff_head_cache, skb);
4311 else
4312 __kfree_skb(skb);
daa86548
ED
4313 break;
4314
5b252f0c
BH
4315 case GRO_HELD:
4316 case GRO_MERGED:
4317 break;
5d38a079
HX
4318 }
4319
c7c4b3b6 4320 return ret;
5d0d9be8 4321}
5d0d9be8 4322
c7c4b3b6 4323gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
5d0d9be8 4324{
ae78dbfa 4325 trace_napi_gro_receive_entry(skb);
86911732 4326
a50e233c
ED
4327 skb_gro_reset_offset(skb);
4328
89c5fa33 4329 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
d565b0a1
HX
4330}
4331EXPORT_SYMBOL(napi_gro_receive);
4332
d0c2b0d2 4333static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
96e93eab 4334{
93a35f59
ED
4335 if (unlikely(skb->pfmemalloc)) {
4336 consume_skb(skb);
4337 return;
4338 }
96e93eab 4339 __skb_pull(skb, skb_headlen(skb));
2a2a459e
ED
4340 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4341 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
3701e513 4342 skb->vlan_tci = 0;
66c46d74 4343 skb->dev = napi->dev;
6d152e23 4344 skb->skb_iif = 0;
c3caf119
JC
4345 skb->encapsulation = 0;
4346 skb_shinfo(skb)->gso_type = 0;
e33d0ba8 4347 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
96e93eab
HX
4348
4349 napi->skb = skb;
4350}
96e93eab 4351
76620aaf 4352struct sk_buff *napi_get_frags(struct napi_struct *napi)
5d38a079 4353{
5d38a079 4354 struct sk_buff *skb = napi->skb;
5d38a079
HX
4355
4356 if (!skb) {
fd11a83d 4357 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
84b9cd63 4358 napi->skb = skb;
80595d59 4359 }
96e93eab
HX
4360 return skb;
4361}
76620aaf 4362EXPORT_SYMBOL(napi_get_frags);
96e93eab 4363
a50e233c
ED
4364static gro_result_t napi_frags_finish(struct napi_struct *napi,
4365 struct sk_buff *skb,
4366 gro_result_t ret)
96e93eab 4367{
5d0d9be8
HX
4368 switch (ret) {
4369 case GRO_NORMAL:
a50e233c
ED
4370 case GRO_HELD:
4371 __skb_push(skb, ETH_HLEN);
4372 skb->protocol = eth_type_trans(skb, skb->dev);
4373 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
c7c4b3b6 4374 ret = GRO_DROP;
86911732 4375 break;
5d38a079 4376
5d0d9be8 4377 case GRO_DROP:
5d0d9be8
HX
4378 case GRO_MERGED_FREE:
4379 napi_reuse_skb(napi, skb);
4380 break;
5b252f0c
BH
4381
4382 case GRO_MERGED:
4383 break;
5d0d9be8 4384 }
5d38a079 4385
c7c4b3b6 4386 return ret;
5d38a079 4387}
5d0d9be8 4388
a50e233c
ED
4389/* Upper GRO stack assumes network header starts at gro_offset=0
4390 * Drivers could call both napi_gro_frags() and napi_gro_receive()
4391 * We copy ethernet header into skb->data to have a common layout.
4392 */
4adb9c4a 4393static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
76620aaf
HX
4394{
4395 struct sk_buff *skb = napi->skb;
a50e233c
ED
4396 const struct ethhdr *eth;
4397 unsigned int hlen = sizeof(*eth);
76620aaf
HX
4398
4399 napi->skb = NULL;
4400
a50e233c
ED
4401 skb_reset_mac_header(skb);
4402 skb_gro_reset_offset(skb);
4403
4404 eth = skb_gro_header_fast(skb, 0);
4405 if (unlikely(skb_gro_header_hard(skb, hlen))) {
4406 eth = skb_gro_header_slow(skb, hlen, 0);
4407 if (unlikely(!eth)) {
4408 napi_reuse_skb(napi, skb);
4409 return NULL;
4410 }
4411 } else {
4412 gro_pull_from_frag0(skb, hlen);
4413 NAPI_GRO_CB(skb)->frag0 += hlen;
4414 NAPI_GRO_CB(skb)->frag0_len -= hlen;
76620aaf 4415 }
a50e233c
ED
4416 __skb_pull(skb, hlen);
4417
4418 /*
4419 * This works because the only protocols we care about don't require
4420 * special handling.
4421 * We'll fix it up properly in napi_frags_finish()
4422 */
4423 skb->protocol = eth->h_proto;
76620aaf 4424
76620aaf
HX
4425 return skb;
4426}
76620aaf 4427
c7c4b3b6 4428gro_result_t napi_gro_frags(struct napi_struct *napi)
5d0d9be8 4429{
76620aaf 4430 struct sk_buff *skb = napi_frags_skb(napi);
5d0d9be8
HX
4431
4432 if (!skb)
c7c4b3b6 4433 return GRO_DROP;
5d0d9be8 4434
ae78dbfa
BH
4435 trace_napi_gro_frags_entry(skb);
4436
89c5fa33 4437 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
5d0d9be8 4438}
5d38a079
HX
4439EXPORT_SYMBOL(napi_gro_frags);
4440
573e8fca
TH
4441/* Compute the checksum from gro_offset and return the folded value
4442 * after adding in any pseudo checksum.
4443 */
4444__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
4445{
4446 __wsum wsum;
4447 __sum16 sum;
4448
4449 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
4450
4451 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
4452 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
4453 if (likely(!sum)) {
4454 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
4455 !skb->csum_complete_sw)
4456 netdev_rx_csum_fault(skb->dev);
4457 }
4458
4459 NAPI_GRO_CB(skb)->csum = wsum;
4460 NAPI_GRO_CB(skb)->csum_valid = 1;
4461
4462 return sum;
4463}
4464EXPORT_SYMBOL(__skb_gro_checksum_complete);
4465
e326bed2 4466/*
855abcf0 4467 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
e326bed2
ED
4468 * Note: called with local irq disabled, but exits with local irq enabled.
4469 */
4470static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4471{
4472#ifdef CONFIG_RPS
4473 struct softnet_data *remsd = sd->rps_ipi_list;
4474
4475 if (remsd) {
4476 sd->rps_ipi_list = NULL;
4477
4478 local_irq_enable();
4479
4480 /* Send pending IPI's to kick RPS processing on remote cpus. */
4481 while (remsd) {
4482 struct softnet_data *next = remsd->rps_ipi_next;
4483
4484 if (cpu_online(remsd->cpu))
c46fff2a 4485 smp_call_function_single_async(remsd->cpu,
fce8ad15 4486 &remsd->csd);
e326bed2
ED
4487 remsd = next;
4488 }
4489 } else
4490#endif
4491 local_irq_enable();
4492}
4493
d75b1ade
ED
4494static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
4495{
4496#ifdef CONFIG_RPS
4497 return sd->rps_ipi_list != NULL;
4498#else
4499 return false;
4500#endif
4501}
4502
bea3348e 4503static int process_backlog(struct napi_struct *napi, int quota)
1da177e4
LT
4504{
4505 int work = 0;
eecfd7c4 4506 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
1da177e4 4507
e326bed2
ED
4508 /* Check if we have pending ipi, its better to send them now,
4509 * not waiting net_rx_action() end.
4510 */
d75b1ade 4511 if (sd_has_rps_ipi_waiting(sd)) {
e326bed2
ED
4512 local_irq_disable();
4513 net_rps_action_and_irq_enable(sd);
4514 }
d75b1ade 4515
bea3348e 4516 napi->weight = weight_p;
6e7676c1 4517 local_irq_disable();
11ef7a89 4518 while (1) {
1da177e4 4519 struct sk_buff *skb;
6e7676c1
CG
4520
4521 while ((skb = __skb_dequeue(&sd->process_queue))) {
2c17d27c 4522 rcu_read_lock();
6e7676c1
CG
4523 local_irq_enable();
4524 __netif_receive_skb(skb);
2c17d27c 4525 rcu_read_unlock();
6e7676c1 4526 local_irq_disable();
76cc8b13
TH
4527 input_queue_head_incr(sd);
4528 if (++work >= quota) {
4529 local_irq_enable();
4530 return work;
4531 }
6e7676c1 4532 }
1da177e4 4533
e36fa2f7 4534 rps_lock(sd);
11ef7a89 4535 if (skb_queue_empty(&sd->input_pkt_queue)) {
eecfd7c4
ED
4536 /*
4537 * Inline a custom version of __napi_complete().
4538 * only current cpu owns and manipulates this napi,
11ef7a89
TH
4539 * and NAPI_STATE_SCHED is the only possible flag set
4540 * on backlog.
4541 * We can use a plain write instead of clear_bit(),
eecfd7c4
ED
4542 * and we dont need an smp_mb() memory barrier.
4543 */
eecfd7c4 4544 napi->state = 0;
11ef7a89 4545 rps_unlock(sd);
eecfd7c4 4546
11ef7a89 4547 break;
bea3348e 4548 }
11ef7a89
TH
4549
4550 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4551 &sd->process_queue);
e36fa2f7 4552 rps_unlock(sd);
6e7676c1
CG
4553 }
4554 local_irq_enable();
1da177e4 4555
bea3348e
SH
4556 return work;
4557}
1da177e4 4558
bea3348e
SH
4559/**
4560 * __napi_schedule - schedule for receive
c4ea43c5 4561 * @n: entry to schedule
bea3348e 4562 *
bc9ad166
ED
4563 * The entry's receive function will be scheduled to run.
4564 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
bea3348e 4565 */
b5606c2d 4566void __napi_schedule(struct napi_struct *n)
bea3348e
SH
4567{
4568 unsigned long flags;
1da177e4 4569
bea3348e 4570 local_irq_save(flags);
903ceff7 4571 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
bea3348e 4572 local_irq_restore(flags);
1da177e4 4573}
bea3348e
SH
4574EXPORT_SYMBOL(__napi_schedule);
4575
bc9ad166
ED
4576/**
4577 * __napi_schedule_irqoff - schedule for receive
4578 * @n: entry to schedule
4579 *
4580 * Variant of __napi_schedule() assuming hard irqs are masked
4581 */
4582void __napi_schedule_irqoff(struct napi_struct *n)
4583{
4584 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
4585}
4586EXPORT_SYMBOL(__napi_schedule_irqoff);
4587
d565b0a1
HX
4588void __napi_complete(struct napi_struct *n)
4589{
4590 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
d565b0a1 4591
d75b1ade 4592 list_del_init(&n->poll_list);
4e857c58 4593 smp_mb__before_atomic();
d565b0a1
HX
4594 clear_bit(NAPI_STATE_SCHED, &n->state);
4595}
4596EXPORT_SYMBOL(__napi_complete);
4597
3b47d303 4598void napi_complete_done(struct napi_struct *n, int work_done)
d565b0a1
HX
4599{
4600 unsigned long flags;
4601
4602 /*
4603 * don't let napi dequeue from the cpu poll list
4604 * just in case its running on a different cpu
4605 */
4606 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4607 return;
4608
3b47d303
ED
4609 if (n->gro_list) {
4610 unsigned long timeout = 0;
d75b1ade 4611
3b47d303
ED
4612 if (work_done)
4613 timeout = n->dev->gro_flush_timeout;
4614
4615 if (timeout)
4616 hrtimer_start(&n->timer, ns_to_ktime(timeout),
4617 HRTIMER_MODE_REL_PINNED);
4618 else
4619 napi_gro_flush(n, false);
4620 }
d75b1ade
ED
4621 if (likely(list_empty(&n->poll_list))) {
4622 WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state));
4623 } else {
4624 /* If n->poll_list is not empty, we need to mask irqs */
4625 local_irq_save(flags);
4626 __napi_complete(n);
4627 local_irq_restore(flags);
4628 }
d565b0a1 4629}
3b47d303 4630EXPORT_SYMBOL(napi_complete_done);
d565b0a1 4631
af12fa6e
ET
4632/* must be called under rcu_read_lock(), as we dont take a reference */
4633struct napi_struct *napi_by_id(unsigned int napi_id)
4634{
4635 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
4636 struct napi_struct *napi;
4637
4638 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
4639 if (napi->napi_id == napi_id)
4640 return napi;
4641
4642 return NULL;
4643}
4644EXPORT_SYMBOL_GPL(napi_by_id);
4645
4646void napi_hash_add(struct napi_struct *napi)
4647{
4648 if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) {
4649
4650 spin_lock(&napi_hash_lock);
4651
4652 /* 0 is not a valid id, we also skip an id that is taken
4653 * we expect both events to be extremely rare
4654 */
4655 napi->napi_id = 0;
4656 while (!napi->napi_id) {
4657 napi->napi_id = ++napi_gen_id;
4658 if (napi_by_id(napi->napi_id))
4659 napi->napi_id = 0;
4660 }
4661
4662 hlist_add_head_rcu(&napi->napi_hash_node,
4663 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
4664
4665 spin_unlock(&napi_hash_lock);
4666 }
4667}
4668EXPORT_SYMBOL_GPL(napi_hash_add);
4669
4670/* Warning : caller is responsible to make sure rcu grace period
4671 * is respected before freeing memory containing @napi
4672 */
4673void napi_hash_del(struct napi_struct *napi)
4674{
4675 spin_lock(&napi_hash_lock);
4676
4677 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state))
4678 hlist_del_rcu(&napi->napi_hash_node);
4679
4680 spin_unlock(&napi_hash_lock);
4681}
4682EXPORT_SYMBOL_GPL(napi_hash_del);
4683
3b47d303
ED
4684static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
4685{
4686 struct napi_struct *napi;
4687
4688 napi = container_of(timer, struct napi_struct, timer);
4689 if (napi->gro_list)
4690 napi_schedule(napi);
4691
4692 return HRTIMER_NORESTART;
4693}
4694
d565b0a1
HX
4695void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4696 int (*poll)(struct napi_struct *, int), int weight)
4697{
4698 INIT_LIST_HEAD(&napi->poll_list);
3b47d303
ED
4699 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
4700 napi->timer.function = napi_watchdog;
4ae5544f 4701 napi->gro_count = 0;
d565b0a1 4702 napi->gro_list = NULL;
5d38a079 4703 napi->skb = NULL;
d565b0a1 4704 napi->poll = poll;
82dc3c63
ED
4705 if (weight > NAPI_POLL_WEIGHT)
4706 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4707 weight, dev->name);
d565b0a1
HX
4708 napi->weight = weight;
4709 list_add(&napi->dev_list, &dev->napi_list);
d565b0a1 4710 napi->dev = dev;
5d38a079 4711#ifdef CONFIG_NETPOLL
d565b0a1
HX
4712 spin_lock_init(&napi->poll_lock);
4713 napi->poll_owner = -1;
4714#endif
4715 set_bit(NAPI_STATE_SCHED, &napi->state);
4716}
4717EXPORT_SYMBOL(netif_napi_add);
4718
3b47d303
ED
4719void napi_disable(struct napi_struct *n)
4720{
4721 might_sleep();
4722 set_bit(NAPI_STATE_DISABLE, &n->state);
4723
4724 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
4725 msleep(1);
2d8bff12
NH
4726 while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
4727 msleep(1);
3b47d303
ED
4728
4729 hrtimer_cancel(&n->timer);
4730
4731 clear_bit(NAPI_STATE_DISABLE, &n->state);
4732}
4733EXPORT_SYMBOL(napi_disable);
4734
d565b0a1
HX
4735void netif_napi_del(struct napi_struct *napi)
4736{
d7b06636 4737 list_del_init(&napi->dev_list);
76620aaf 4738 napi_free_frags(napi);
d565b0a1 4739
289dccbe 4740 kfree_skb_list(napi->gro_list);
d565b0a1 4741 napi->gro_list = NULL;
4ae5544f 4742 napi->gro_count = 0;
d565b0a1
HX
4743}
4744EXPORT_SYMBOL(netif_napi_del);
4745
726ce70e
HX
4746static int napi_poll(struct napi_struct *n, struct list_head *repoll)
4747{
4748 void *have;
4749 int work, weight;
4750
4751 list_del_init(&n->poll_list);
4752
4753 have = netpoll_poll_lock(n);
4754
4755 weight = n->weight;
4756
4757 /* This NAPI_STATE_SCHED test is for avoiding a race
4758 * with netpoll's poll_napi(). Only the entity which
4759 * obtains the lock and sees NAPI_STATE_SCHED set will
4760 * actually make the ->poll() call. Therefore we avoid
4761 * accidentally calling ->poll() when NAPI is not scheduled.
4762 */
4763 work = 0;
4764 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
4765 work = n->poll(n, weight);
4766 trace_napi_poll(n);
4767 }
4768
4769 WARN_ON_ONCE(work > weight);
4770
4771 if (likely(work < weight))
4772 goto out_unlock;
4773
4774 /* Drivers must not modify the NAPI state if they
4775 * consume the entire weight. In such cases this code
4776 * still "owns" the NAPI instance and therefore can
4777 * move the instance around on the list at-will.
4778 */
4779 if (unlikely(napi_disable_pending(n))) {
4780 napi_complete(n);
4781 goto out_unlock;
4782 }
4783
4784 if (n->gro_list) {
4785 /* flush too old packets
4786 * If HZ < 1000, flush all packets.
4787 */
4788 napi_gro_flush(n, HZ >= 1000);
4789 }
4790
001ce546
HX
4791 /* Some drivers may have called napi_schedule
4792 * prior to exhausting their budget.
4793 */
4794 if (unlikely(!list_empty(&n->poll_list))) {
4795 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
4796 n->dev ? n->dev->name : "backlog");
4797 goto out_unlock;
4798 }
4799
726ce70e
HX
4800 list_add_tail(&n->poll_list, repoll);
4801
4802out_unlock:
4803 netpoll_poll_unlock(have);
4804
4805 return work;
4806}
4807
1da177e4
LT
4808static void net_rx_action(struct softirq_action *h)
4809{
903ceff7 4810 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
24f8b238 4811 unsigned long time_limit = jiffies + 2;
51b0bded 4812 int budget = netdev_budget;
d75b1ade
ED
4813 LIST_HEAD(list);
4814 LIST_HEAD(repoll);
53fb95d3 4815
1da177e4 4816 local_irq_disable();
d75b1ade
ED
4817 list_splice_init(&sd->poll_list, &list);
4818 local_irq_enable();
1da177e4 4819
ceb8d5bf 4820 for (;;) {
bea3348e 4821 struct napi_struct *n;
1da177e4 4822
ceb8d5bf
HX
4823 if (list_empty(&list)) {
4824 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
4825 return;
4826 break;
4827 }
4828
6bd373eb
HX
4829 n = list_first_entry(&list, struct napi_struct, poll_list);
4830 budget -= napi_poll(n, &repoll);
4831
d75b1ade 4832 /* If softirq window is exhausted then punt.
24f8b238
SH
4833 * Allow this to run for 2 jiffies since which will allow
4834 * an average latency of 1.5/HZ.
bea3348e 4835 */
ceb8d5bf
HX
4836 if (unlikely(budget <= 0 ||
4837 time_after_eq(jiffies, time_limit))) {
4838 sd->time_squeeze++;
4839 break;
4840 }
1da177e4 4841 }
d75b1ade 4842
d75b1ade
ED
4843 local_irq_disable();
4844
4845 list_splice_tail_init(&sd->poll_list, &list);
4846 list_splice_tail(&repoll, &list);
4847 list_splice(&list, &sd->poll_list);
4848 if (!list_empty(&sd->poll_list))
4849 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4850
e326bed2 4851 net_rps_action_and_irq_enable(sd);
1da177e4
LT
4852}
4853
aa9d8560 4854struct netdev_adjacent {
9ff162a8 4855 struct net_device *dev;
5d261913
VF
4856
4857 /* upper master flag, there can only be one master device per list */
9ff162a8 4858 bool master;
5d261913 4859
5d261913
VF
4860 /* counter for the number of times this device was added to us */
4861 u16 ref_nr;
4862
402dae96
VF
4863 /* private field for the users */
4864 void *private;
4865
9ff162a8
JP
4866 struct list_head list;
4867 struct rcu_head rcu;
9ff162a8
JP
4868};
4869
6ea29da1 4870static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
2f268f12 4871 struct list_head *adj_list)
9ff162a8 4872{
5d261913 4873 struct netdev_adjacent *adj;
5d261913 4874
2f268f12 4875 list_for_each_entry(adj, adj_list, list) {
5d261913
VF
4876 if (adj->dev == adj_dev)
4877 return adj;
9ff162a8
JP
4878 }
4879 return NULL;
4880}
4881
4882/**
4883 * netdev_has_upper_dev - Check if device is linked to an upper device
4884 * @dev: device
4885 * @upper_dev: upper device to check
4886 *
4887 * Find out if a device is linked to specified upper device and return true
4888 * in case it is. Note that this checks only immediate upper device,
4889 * not through a complete stack of devices. The caller must hold the RTNL lock.
4890 */
4891bool netdev_has_upper_dev(struct net_device *dev,
4892 struct net_device *upper_dev)
4893{
4894 ASSERT_RTNL();
4895
6ea29da1 4896 return __netdev_find_adj(upper_dev, &dev->all_adj_list.upper);
9ff162a8
JP
4897}
4898EXPORT_SYMBOL(netdev_has_upper_dev);
4899
4900/**
4901 * netdev_has_any_upper_dev - Check if device is linked to some device
4902 * @dev: device
4903 *
4904 * Find out if a device is linked to an upper device and return true in case
4905 * it is. The caller must hold the RTNL lock.
4906 */
1d143d9f 4907static bool netdev_has_any_upper_dev(struct net_device *dev)
9ff162a8
JP
4908{
4909 ASSERT_RTNL();
4910
2f268f12 4911 return !list_empty(&dev->all_adj_list.upper);
9ff162a8 4912}
9ff162a8
JP
4913
4914/**
4915 * netdev_master_upper_dev_get - Get master upper device
4916 * @dev: device
4917 *
4918 * Find a master upper device and return pointer to it or NULL in case
4919 * it's not there. The caller must hold the RTNL lock.
4920 */
4921struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
4922{
aa9d8560 4923 struct netdev_adjacent *upper;
9ff162a8
JP
4924
4925 ASSERT_RTNL();
4926
2f268f12 4927 if (list_empty(&dev->adj_list.upper))
9ff162a8
JP
4928 return NULL;
4929
2f268f12 4930 upper = list_first_entry(&dev->adj_list.upper,
aa9d8560 4931 struct netdev_adjacent, list);
9ff162a8
JP
4932 if (likely(upper->master))
4933 return upper->dev;
4934 return NULL;
4935}
4936EXPORT_SYMBOL(netdev_master_upper_dev_get);
4937
b6ccba4c
VF
4938void *netdev_adjacent_get_private(struct list_head *adj_list)
4939{
4940 struct netdev_adjacent *adj;
4941
4942 adj = list_entry(adj_list, struct netdev_adjacent, list);
4943
4944 return adj->private;
4945}
4946EXPORT_SYMBOL(netdev_adjacent_get_private);
4947
44a40855
VY
4948/**
4949 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
4950 * @dev: device
4951 * @iter: list_head ** of the current position
4952 *
4953 * Gets the next device from the dev's upper list, starting from iter
4954 * position. The caller must hold RCU read lock.
4955 */
4956struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
4957 struct list_head **iter)
4958{
4959 struct netdev_adjacent *upper;
4960
4961 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4962
4963 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4964
4965 if (&upper->list == &dev->adj_list.upper)
4966 return NULL;
4967
4968 *iter = &upper->list;
4969
4970 return upper->dev;
4971}
4972EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
4973
31088a11
VF
4974/**
4975 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
48311f46
VF
4976 * @dev: device
4977 * @iter: list_head ** of the current position
4978 *
4979 * Gets the next device from the dev's upper list, starting from iter
4980 * position. The caller must hold RCU read lock.
4981 */
2f268f12
VF
4982struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
4983 struct list_head **iter)
48311f46
VF
4984{
4985 struct netdev_adjacent *upper;
4986
85328240 4987 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
48311f46
VF
4988
4989 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4990
2f268f12 4991 if (&upper->list == &dev->all_adj_list.upper)
48311f46
VF
4992 return NULL;
4993
4994 *iter = &upper->list;
4995
4996 return upper->dev;
4997}
2f268f12 4998EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
48311f46 4999
31088a11
VF
5000/**
5001 * netdev_lower_get_next_private - Get the next ->private from the
5002 * lower neighbour list
5003 * @dev: device
5004 * @iter: list_head ** of the current position
5005 *
5006 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5007 * list, starting from iter position. The caller must hold either hold the
5008 * RTNL lock or its own locking that guarantees that the neighbour lower
b469139e 5009 * list will remain unchanged.
31088a11
VF
5010 */
5011void *netdev_lower_get_next_private(struct net_device *dev,
5012 struct list_head **iter)
5013{
5014 struct netdev_adjacent *lower;
5015
5016 lower = list_entry(*iter, struct netdev_adjacent, list);
5017
5018 if (&lower->list == &dev->adj_list.lower)
5019 return NULL;
5020
6859e7df 5021 *iter = lower->list.next;
31088a11
VF
5022
5023 return lower->private;
5024}
5025EXPORT_SYMBOL(netdev_lower_get_next_private);
5026
5027/**
5028 * netdev_lower_get_next_private_rcu - Get the next ->private from the
5029 * lower neighbour list, RCU
5030 * variant
5031 * @dev: device
5032 * @iter: list_head ** of the current position
5033 *
5034 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5035 * list, starting from iter position. The caller must hold RCU read lock.
5036 */
5037void *netdev_lower_get_next_private_rcu(struct net_device *dev,
5038 struct list_head **iter)
5039{
5040 struct netdev_adjacent *lower;
5041
5042 WARN_ON_ONCE(!rcu_read_lock_held());
5043
5044 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5045
5046 if (&lower->list == &dev->adj_list.lower)
5047 return NULL;
5048
6859e7df 5049 *iter = &lower->list;
31088a11
VF
5050
5051 return lower->private;
5052}
5053EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
5054
4085ebe8
VY
5055/**
5056 * netdev_lower_get_next - Get the next device from the lower neighbour
5057 * list
5058 * @dev: device
5059 * @iter: list_head ** of the current position
5060 *
5061 * Gets the next netdev_adjacent from the dev's lower neighbour
5062 * list, starting from iter position. The caller must hold RTNL lock or
5063 * its own locking that guarantees that the neighbour lower
b469139e 5064 * list will remain unchanged.
4085ebe8
VY
5065 */
5066void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
5067{
5068 struct netdev_adjacent *lower;
5069
5070 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
5071
5072 if (&lower->list == &dev->adj_list.lower)
5073 return NULL;
5074
5075 *iter = &lower->list;
5076
5077 return lower->dev;
5078}
5079EXPORT_SYMBOL(netdev_lower_get_next);
5080
e001bfad 5081/**
5082 * netdev_lower_get_first_private_rcu - Get the first ->private from the
5083 * lower neighbour list, RCU
5084 * variant
5085 * @dev: device
5086 *
5087 * Gets the first netdev_adjacent->private from the dev's lower neighbour
5088 * list. The caller must hold RCU read lock.
5089 */
5090void *netdev_lower_get_first_private_rcu(struct net_device *dev)
5091{
5092 struct netdev_adjacent *lower;
5093
5094 lower = list_first_or_null_rcu(&dev->adj_list.lower,
5095 struct netdev_adjacent, list);
5096 if (lower)
5097 return lower->private;
5098 return NULL;
5099}
5100EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
5101
9ff162a8
JP
5102/**
5103 * netdev_master_upper_dev_get_rcu - Get master upper device
5104 * @dev: device
5105 *
5106 * Find a master upper device and return pointer to it or NULL in case
5107 * it's not there. The caller must hold the RCU read lock.
5108 */
5109struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
5110{
aa9d8560 5111 struct netdev_adjacent *upper;
9ff162a8 5112
2f268f12 5113 upper = list_first_or_null_rcu(&dev->adj_list.upper,
aa9d8560 5114 struct netdev_adjacent, list);
9ff162a8
JP
5115 if (upper && likely(upper->master))
5116 return upper->dev;
5117 return NULL;
5118}
5119EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
5120
0a59f3a9 5121static int netdev_adjacent_sysfs_add(struct net_device *dev,
3ee32707
VF
5122 struct net_device *adj_dev,
5123 struct list_head *dev_list)
5124{
5125 char linkname[IFNAMSIZ+7];
5126 sprintf(linkname, dev_list == &dev->adj_list.upper ?
5127 "upper_%s" : "lower_%s", adj_dev->name);
5128 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
5129 linkname);
5130}
0a59f3a9 5131static void netdev_adjacent_sysfs_del(struct net_device *dev,
3ee32707
VF
5132 char *name,
5133 struct list_head *dev_list)
5134{
5135 char linkname[IFNAMSIZ+7];
5136 sprintf(linkname, dev_list == &dev->adj_list.upper ?
5137 "upper_%s" : "lower_%s", name);
5138 sysfs_remove_link(&(dev->dev.kobj), linkname);
5139}
5140
7ce64c79
AF
5141static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
5142 struct net_device *adj_dev,
5143 struct list_head *dev_list)
5144{
5145 return (dev_list == &dev->adj_list.upper ||
5146 dev_list == &dev->adj_list.lower) &&
5147 net_eq(dev_net(dev), dev_net(adj_dev));
5148}
3ee32707 5149
5d261913
VF
5150static int __netdev_adjacent_dev_insert(struct net_device *dev,
5151 struct net_device *adj_dev,
7863c054 5152 struct list_head *dev_list,
402dae96 5153 void *private, bool master)
5d261913
VF
5154{
5155 struct netdev_adjacent *adj;
842d67a7 5156 int ret;
5d261913 5157
6ea29da1 5158 adj = __netdev_find_adj(adj_dev, dev_list);
5d261913
VF
5159
5160 if (adj) {
5d261913
VF
5161 adj->ref_nr++;
5162 return 0;
5163 }
5164
5165 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
5166 if (!adj)
5167 return -ENOMEM;
5168
5169 adj->dev = adj_dev;
5170 adj->master = master;
5d261913 5171 adj->ref_nr = 1;
402dae96 5172 adj->private = private;
5d261913 5173 dev_hold(adj_dev);
2f268f12
VF
5174
5175 pr_debug("dev_hold for %s, because of link added from %s to %s\n",
5176 adj_dev->name, dev->name, adj_dev->name);
5d261913 5177
7ce64c79 5178 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
3ee32707 5179 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
5831d66e
VF
5180 if (ret)
5181 goto free_adj;
5182 }
5183
7863c054 5184 /* Ensure that master link is always the first item in list. */
842d67a7
VF
5185 if (master) {
5186 ret = sysfs_create_link(&(dev->dev.kobj),
5187 &(adj_dev->dev.kobj), "master");
5188 if (ret)
5831d66e 5189 goto remove_symlinks;
842d67a7 5190
7863c054 5191 list_add_rcu(&adj->list, dev_list);
842d67a7 5192 } else {
7863c054 5193 list_add_tail_rcu(&adj->list, dev_list);
842d67a7 5194 }
5d261913
VF
5195
5196 return 0;
842d67a7 5197
5831d66e 5198remove_symlinks:
7ce64c79 5199 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
3ee32707 5200 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
842d67a7
VF
5201free_adj:
5202 kfree(adj);
974daef7 5203 dev_put(adj_dev);
842d67a7
VF
5204
5205 return ret;
5d261913
VF
5206}
5207
1d143d9f 5208static void __netdev_adjacent_dev_remove(struct net_device *dev,
5209 struct net_device *adj_dev,
5210 struct list_head *dev_list)
5d261913
VF
5211{
5212 struct netdev_adjacent *adj;
5213
6ea29da1 5214 adj = __netdev_find_adj(adj_dev, dev_list);
5d261913 5215
2f268f12
VF
5216 if (!adj) {
5217 pr_err("tried to remove device %s from %s\n",
5218 dev->name, adj_dev->name);
5d261913 5219 BUG();
2f268f12 5220 }
5d261913
VF
5221
5222 if (adj->ref_nr > 1) {
2f268f12
VF
5223 pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
5224 adj->ref_nr-1);
5d261913
VF
5225 adj->ref_nr--;
5226 return;
5227 }
5228
842d67a7
VF
5229 if (adj->master)
5230 sysfs_remove_link(&(dev->dev.kobj), "master");
5231
7ce64c79 5232 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
3ee32707 5233 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
5831d66e 5234
5d261913 5235 list_del_rcu(&adj->list);
2f268f12
VF
5236 pr_debug("dev_put for %s, because link removed from %s to %s\n",
5237 adj_dev->name, dev->name, adj_dev->name);
5d261913
VF
5238 dev_put(adj_dev);
5239 kfree_rcu(adj, rcu);
5240}
5241
1d143d9f 5242static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
5243 struct net_device *upper_dev,
5244 struct list_head *up_list,
5245 struct list_head *down_list,
5246 void *private, bool master)
5d261913
VF
5247{
5248 int ret;
5249
402dae96
VF
5250 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
5251 master);
5d261913
VF
5252 if (ret)
5253 return ret;
5254
402dae96
VF
5255 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
5256 false);
5d261913 5257 if (ret) {
2f268f12 5258 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
5d261913
VF
5259 return ret;
5260 }
5261
5262 return 0;
5263}
5264
1d143d9f 5265static int __netdev_adjacent_dev_link(struct net_device *dev,
5266 struct net_device *upper_dev)
5d261913 5267{
2f268f12
VF
5268 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
5269 &dev->all_adj_list.upper,
5270 &upper_dev->all_adj_list.lower,
402dae96 5271 NULL, false);
5d261913
VF
5272}
5273
1d143d9f 5274static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
5275 struct net_device *upper_dev,
5276 struct list_head *up_list,
5277 struct list_head *down_list)
5d261913 5278{
2f268f12
VF
5279 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
5280 __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
5d261913
VF
5281}
5282
1d143d9f 5283static void __netdev_adjacent_dev_unlink(struct net_device *dev,
5284 struct net_device *upper_dev)
5d261913 5285{
2f268f12
VF
5286 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5287 &dev->all_adj_list.upper,
5288 &upper_dev->all_adj_list.lower);
5289}
5290
1d143d9f 5291static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
5292 struct net_device *upper_dev,
5293 void *private, bool master)
2f268f12
VF
5294{
5295 int ret = __netdev_adjacent_dev_link(dev, upper_dev);
5296
5297 if (ret)
5298 return ret;
5299
5300 ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
5301 &dev->adj_list.upper,
5302 &upper_dev->adj_list.lower,
402dae96 5303 private, master);
2f268f12
VF
5304 if (ret) {
5305 __netdev_adjacent_dev_unlink(dev, upper_dev);
5306 return ret;
5307 }
5308
5309 return 0;
5d261913
VF
5310}
5311
1d143d9f 5312static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
5313 struct net_device *upper_dev)
2f268f12
VF
5314{
5315 __netdev_adjacent_dev_unlink(dev, upper_dev);
5316 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5317 &dev->adj_list.upper,
5318 &upper_dev->adj_list.lower);
5319}
5d261913 5320
9ff162a8 5321static int __netdev_upper_dev_link(struct net_device *dev,
402dae96
VF
5322 struct net_device *upper_dev, bool master,
5323 void *private)
9ff162a8 5324{
0e4ead9d 5325 struct netdev_notifier_changeupper_info changeupper_info;
5d261913
VF
5326 struct netdev_adjacent *i, *j, *to_i, *to_j;
5327 int ret = 0;
9ff162a8
JP
5328
5329 ASSERT_RTNL();
5330
5331 if (dev == upper_dev)
5332 return -EBUSY;
5333
5334 /* To prevent loops, check if dev is not upper device to upper_dev. */
6ea29da1 5335 if (__netdev_find_adj(dev, &upper_dev->all_adj_list.upper))
9ff162a8
JP
5336 return -EBUSY;
5337
6ea29da1 5338 if (__netdev_find_adj(upper_dev, &dev->adj_list.upper))
9ff162a8
JP
5339 return -EEXIST;
5340
5341 if (master && netdev_master_upper_dev_get(dev))
5342 return -EBUSY;
5343
0e4ead9d
JP
5344 changeupper_info.upper_dev = upper_dev;
5345 changeupper_info.master = master;
5346 changeupper_info.linking = true;
5347
402dae96
VF
5348 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
5349 master);
5d261913
VF
5350 if (ret)
5351 return ret;
9ff162a8 5352
5d261913 5353 /* Now that we linked these devs, make all the upper_dev's
2f268f12 5354 * all_adj_list.upper visible to every dev's all_adj_list.lower an
5d261913
VF
5355 * versa, and don't forget the devices itself. All of these
5356 * links are non-neighbours.
5357 */
2f268f12
VF
5358 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5359 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5360 pr_debug("Interlinking %s with %s, non-neighbour\n",
5361 i->dev->name, j->dev->name);
5d261913
VF
5362 ret = __netdev_adjacent_dev_link(i->dev, j->dev);
5363 if (ret)
5364 goto rollback_mesh;
5365 }
5366 }
5367
5368 /* add dev to every upper_dev's upper device */
2f268f12
VF
5369 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5370 pr_debug("linking %s's upper device %s with %s\n",
5371 upper_dev->name, i->dev->name, dev->name);
5d261913
VF
5372 ret = __netdev_adjacent_dev_link(dev, i->dev);
5373 if (ret)
5374 goto rollback_upper_mesh;
5375 }
5376
5377 /* add upper_dev to every dev's lower device */
2f268f12
VF
5378 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5379 pr_debug("linking %s's lower device %s with %s\n", dev->name,
5380 i->dev->name, upper_dev->name);
5d261913
VF
5381 ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
5382 if (ret)
5383 goto rollback_lower_mesh;
5384 }
9ff162a8 5385
0e4ead9d
JP
5386 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
5387 &changeupper_info.info);
9ff162a8 5388 return 0;
5d261913
VF
5389
5390rollback_lower_mesh:
5391 to_i = i;
2f268f12 5392 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5d261913
VF
5393 if (i == to_i)
5394 break;
5395 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5396 }
5397
5398 i = NULL;
5399
5400rollback_upper_mesh:
5401 to_i = i;
2f268f12 5402 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5d261913
VF
5403 if (i == to_i)
5404 break;
5405 __netdev_adjacent_dev_unlink(dev, i->dev);
5406 }
5407
5408 i = j = NULL;
5409
5410rollback_mesh:
5411 to_i = i;
5412 to_j = j;
2f268f12
VF
5413 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5414 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5d261913
VF
5415 if (i == to_i && j == to_j)
5416 break;
5417 __netdev_adjacent_dev_unlink(i->dev, j->dev);
5418 }
5419 if (i == to_i)
5420 break;
5421 }
5422
2f268f12 5423 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5d261913
VF
5424
5425 return ret;
9ff162a8
JP
5426}
5427
5428/**
5429 * netdev_upper_dev_link - Add a link to the upper device
5430 * @dev: device
5431 * @upper_dev: new upper device
5432 *
5433 * Adds a link to device which is upper to this one. The caller must hold
5434 * the RTNL lock. On a failure a negative errno code is returned.
5435 * On success the reference counts are adjusted and the function
5436 * returns zero.
5437 */
5438int netdev_upper_dev_link(struct net_device *dev,
5439 struct net_device *upper_dev)
5440{
402dae96 5441 return __netdev_upper_dev_link(dev, upper_dev, false, NULL);
9ff162a8
JP
5442}
5443EXPORT_SYMBOL(netdev_upper_dev_link);
5444
5445/**
5446 * netdev_master_upper_dev_link - Add a master link to the upper device
5447 * @dev: device
5448 * @upper_dev: new upper device
5449 *
5450 * Adds a link to device which is upper to this one. In this case, only
5451 * one master upper device can be linked, although other non-master devices
5452 * might be linked as well. The caller must hold the RTNL lock.
5453 * On a failure a negative errno code is returned. On success the reference
5454 * counts are adjusted and the function returns zero.
5455 */
5456int netdev_master_upper_dev_link(struct net_device *dev,
5457 struct net_device *upper_dev)
5458{
402dae96 5459 return __netdev_upper_dev_link(dev, upper_dev, true, NULL);
9ff162a8
JP
5460}
5461EXPORT_SYMBOL(netdev_master_upper_dev_link);
5462
402dae96
VF
5463int netdev_master_upper_dev_link_private(struct net_device *dev,
5464 struct net_device *upper_dev,
5465 void *private)
5466{
5467 return __netdev_upper_dev_link(dev, upper_dev, true, private);
5468}
5469EXPORT_SYMBOL(netdev_master_upper_dev_link_private);
5470
9ff162a8
JP
5471/**
5472 * netdev_upper_dev_unlink - Removes a link to upper device
5473 * @dev: device
5474 * @upper_dev: new upper device
5475 *
5476 * Removes a link to device which is upper to this one. The caller must hold
5477 * the RTNL lock.
5478 */
5479void netdev_upper_dev_unlink(struct net_device *dev,
5480 struct net_device *upper_dev)
5481{
0e4ead9d 5482 struct netdev_notifier_changeupper_info changeupper_info;
5d261913 5483 struct netdev_adjacent *i, *j;
9ff162a8
JP
5484 ASSERT_RTNL();
5485
0e4ead9d
JP
5486 changeupper_info.upper_dev = upper_dev;
5487 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
5488 changeupper_info.linking = false;
5489
2f268f12 5490 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5d261913
VF
5491
5492 /* Here is the tricky part. We must remove all dev's lower
5493 * devices from all upper_dev's upper devices and vice
5494 * versa, to maintain the graph relationship.
5495 */
2f268f12
VF
5496 list_for_each_entry(i, &dev->all_adj_list.lower, list)
5497 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
5d261913
VF
5498 __netdev_adjacent_dev_unlink(i->dev, j->dev);
5499
5500 /* remove also the devices itself from lower/upper device
5501 * list
5502 */
2f268f12 5503 list_for_each_entry(i, &dev->all_adj_list.lower, list)
5d261913
VF
5504 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5505
2f268f12 5506 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
5d261913
VF
5507 __netdev_adjacent_dev_unlink(dev, i->dev);
5508
0e4ead9d
JP
5509 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
5510 &changeupper_info.info);
9ff162a8
JP
5511}
5512EXPORT_SYMBOL(netdev_upper_dev_unlink);
5513
61bd3857
MS
5514/**
5515 * netdev_bonding_info_change - Dispatch event about slave change
5516 * @dev: device
4a26e453 5517 * @bonding_info: info to dispatch
61bd3857
MS
5518 *
5519 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
5520 * The caller must hold the RTNL lock.
5521 */
5522void netdev_bonding_info_change(struct net_device *dev,
5523 struct netdev_bonding_info *bonding_info)
5524{
5525 struct netdev_notifier_bonding_info info;
5526
5527 memcpy(&info.bonding_info, bonding_info,
5528 sizeof(struct netdev_bonding_info));
5529 call_netdevice_notifiers_info(NETDEV_BONDING_INFO, dev,
5530 &info.info);
5531}
5532EXPORT_SYMBOL(netdev_bonding_info_change);
5533
2ce1ee17 5534static void netdev_adjacent_add_links(struct net_device *dev)
4c75431a
AF
5535{
5536 struct netdev_adjacent *iter;
5537
5538 struct net *net = dev_net(dev);
5539
5540 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5541 if (!net_eq(net,dev_net(iter->dev)))
5542 continue;
5543 netdev_adjacent_sysfs_add(iter->dev, dev,
5544 &iter->dev->adj_list.lower);
5545 netdev_adjacent_sysfs_add(dev, iter->dev,
5546 &dev->adj_list.upper);
5547 }
5548
5549 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5550 if (!net_eq(net,dev_net(iter->dev)))
5551 continue;
5552 netdev_adjacent_sysfs_add(iter->dev, dev,
5553 &iter->dev->adj_list.upper);
5554 netdev_adjacent_sysfs_add(dev, iter->dev,
5555 &dev->adj_list.lower);
5556 }
5557}
5558
2ce1ee17 5559static void netdev_adjacent_del_links(struct net_device *dev)
4c75431a
AF
5560{
5561 struct netdev_adjacent *iter;
5562
5563 struct net *net = dev_net(dev);
5564
5565 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5566 if (!net_eq(net,dev_net(iter->dev)))
5567 continue;
5568 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5569 &iter->dev->adj_list.lower);
5570 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5571 &dev->adj_list.upper);
5572 }
5573
5574 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5575 if (!net_eq(net,dev_net(iter->dev)))
5576 continue;
5577 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5578 &iter->dev->adj_list.upper);
5579 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5580 &dev->adj_list.lower);
5581 }
5582}
5583
5bb025fa 5584void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
402dae96 5585{
5bb025fa 5586 struct netdev_adjacent *iter;
402dae96 5587
4c75431a
AF
5588 struct net *net = dev_net(dev);
5589
5bb025fa 5590 list_for_each_entry(iter, &dev->adj_list.upper, list) {
4c75431a
AF
5591 if (!net_eq(net,dev_net(iter->dev)))
5592 continue;
5bb025fa
VF
5593 netdev_adjacent_sysfs_del(iter->dev, oldname,
5594 &iter->dev->adj_list.lower);
5595 netdev_adjacent_sysfs_add(iter->dev, dev,
5596 &iter->dev->adj_list.lower);
5597 }
402dae96 5598
5bb025fa 5599 list_for_each_entry(iter, &dev->adj_list.lower, list) {
4c75431a
AF
5600 if (!net_eq(net,dev_net(iter->dev)))
5601 continue;
5bb025fa
VF
5602 netdev_adjacent_sysfs_del(iter->dev, oldname,
5603 &iter->dev->adj_list.upper);
5604 netdev_adjacent_sysfs_add(iter->dev, dev,
5605 &iter->dev->adj_list.upper);
5606 }
402dae96 5607}
402dae96
VF
5608
5609void *netdev_lower_dev_get_private(struct net_device *dev,
5610 struct net_device *lower_dev)
5611{
5612 struct netdev_adjacent *lower;
5613
5614 if (!lower_dev)
5615 return NULL;
6ea29da1 5616 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
402dae96
VF
5617 if (!lower)
5618 return NULL;
5619
5620 return lower->private;
5621}
5622EXPORT_SYMBOL(netdev_lower_dev_get_private);
5623
4085ebe8
VY
5624
5625int dev_get_nest_level(struct net_device *dev,
5626 bool (*type_check)(struct net_device *dev))
5627{
5628 struct net_device *lower = NULL;
5629 struct list_head *iter;
5630 int max_nest = -1;
5631 int nest;
5632
5633 ASSERT_RTNL();
5634
5635 netdev_for_each_lower_dev(dev, lower, iter) {
5636 nest = dev_get_nest_level(lower, type_check);
5637 if (max_nest < nest)
5638 max_nest = nest;
5639 }
5640
5641 if (type_check(dev))
5642 max_nest++;
5643
5644 return max_nest;
5645}
5646EXPORT_SYMBOL(dev_get_nest_level);
5647
b6c40d68
PM
5648static void dev_change_rx_flags(struct net_device *dev, int flags)
5649{
d314774c
SH
5650 const struct net_device_ops *ops = dev->netdev_ops;
5651
d2615bf4 5652 if (ops->ndo_change_rx_flags)
d314774c 5653 ops->ndo_change_rx_flags(dev, flags);
b6c40d68
PM
5654}
5655
991fb3f7 5656static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
1da177e4 5657{
b536db93 5658 unsigned int old_flags = dev->flags;
d04a48b0
EB
5659 kuid_t uid;
5660 kgid_t gid;
1da177e4 5661
24023451
PM
5662 ASSERT_RTNL();
5663
dad9b335
WC
5664 dev->flags |= IFF_PROMISC;
5665 dev->promiscuity += inc;
5666 if (dev->promiscuity == 0) {
5667 /*
5668 * Avoid overflow.
5669 * If inc causes overflow, untouch promisc and return error.
5670 */
5671 if (inc < 0)
5672 dev->flags &= ~IFF_PROMISC;
5673 else {
5674 dev->promiscuity -= inc;
7b6cd1ce
JP
5675 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
5676 dev->name);
dad9b335
WC
5677 return -EOVERFLOW;
5678 }
5679 }
52609c0b 5680 if (dev->flags != old_flags) {
7b6cd1ce
JP
5681 pr_info("device %s %s promiscuous mode\n",
5682 dev->name,
5683 dev->flags & IFF_PROMISC ? "entered" : "left");
8192b0c4
DH
5684 if (audit_enabled) {
5685 current_uid_gid(&uid, &gid);
7759db82
KHK
5686 audit_log(current->audit_context, GFP_ATOMIC,
5687 AUDIT_ANOM_PROMISCUOUS,
5688 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
5689 dev->name, (dev->flags & IFF_PROMISC),
5690 (old_flags & IFF_PROMISC),
e1760bd5 5691 from_kuid(&init_user_ns, audit_get_loginuid(current)),
d04a48b0
EB
5692 from_kuid(&init_user_ns, uid),
5693 from_kgid(&init_user_ns, gid),
7759db82 5694 audit_get_sessionid(current));
8192b0c4 5695 }
24023451 5696
b6c40d68 5697 dev_change_rx_flags(dev, IFF_PROMISC);
1da177e4 5698 }
991fb3f7
ND
5699 if (notify)
5700 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
dad9b335 5701 return 0;
1da177e4
LT
5702}
5703
4417da66
PM
5704/**
5705 * dev_set_promiscuity - update promiscuity count on a device
5706 * @dev: device
5707 * @inc: modifier
5708 *
5709 * Add or remove promiscuity from a device. While the count in the device
5710 * remains above zero the interface remains promiscuous. Once it hits zero
5711 * the device reverts back to normal filtering operation. A negative inc
5712 * value is used to drop promiscuity on the device.
dad9b335 5713 * Return 0 if successful or a negative errno code on error.
4417da66 5714 */
dad9b335 5715int dev_set_promiscuity(struct net_device *dev, int inc)
4417da66 5716{
b536db93 5717 unsigned int old_flags = dev->flags;
dad9b335 5718 int err;
4417da66 5719
991fb3f7 5720 err = __dev_set_promiscuity(dev, inc, true);
4b5a698e 5721 if (err < 0)
dad9b335 5722 return err;
4417da66
PM
5723 if (dev->flags != old_flags)
5724 dev_set_rx_mode(dev);
dad9b335 5725 return err;
4417da66 5726}
d1b19dff 5727EXPORT_SYMBOL(dev_set_promiscuity);
4417da66 5728
991fb3f7 5729static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
1da177e4 5730{
991fb3f7 5731 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
1da177e4 5732
24023451
PM
5733 ASSERT_RTNL();
5734
1da177e4 5735 dev->flags |= IFF_ALLMULTI;
dad9b335
WC
5736 dev->allmulti += inc;
5737 if (dev->allmulti == 0) {
5738 /*
5739 * Avoid overflow.
5740 * If inc causes overflow, untouch allmulti and return error.
5741 */
5742 if (inc < 0)
5743 dev->flags &= ~IFF_ALLMULTI;
5744 else {
5745 dev->allmulti -= inc;
7b6cd1ce
JP
5746 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
5747 dev->name);
dad9b335
WC
5748 return -EOVERFLOW;
5749 }
5750 }
24023451 5751 if (dev->flags ^ old_flags) {
b6c40d68 5752 dev_change_rx_flags(dev, IFF_ALLMULTI);
4417da66 5753 dev_set_rx_mode(dev);
991fb3f7
ND
5754 if (notify)
5755 __dev_notify_flags(dev, old_flags,
5756 dev->gflags ^ old_gflags);
24023451 5757 }
dad9b335 5758 return 0;
4417da66 5759}
991fb3f7
ND
5760
5761/**
5762 * dev_set_allmulti - update allmulti count on a device
5763 * @dev: device
5764 * @inc: modifier
5765 *
5766 * Add or remove reception of all multicast frames to a device. While the
5767 * count in the device remains above zero the interface remains listening
5768 * to all interfaces. Once it hits zero the device reverts back to normal
5769 * filtering operation. A negative @inc value is used to drop the counter
5770 * when releasing a resource needing all multicasts.
5771 * Return 0 if successful or a negative errno code on error.
5772 */
5773
5774int dev_set_allmulti(struct net_device *dev, int inc)
5775{
5776 return __dev_set_allmulti(dev, inc, true);
5777}
d1b19dff 5778EXPORT_SYMBOL(dev_set_allmulti);
4417da66
PM
5779
5780/*
5781 * Upload unicast and multicast address lists to device and
5782 * configure RX filtering. When the device doesn't support unicast
53ccaae1 5783 * filtering it is put in promiscuous mode while unicast addresses
4417da66
PM
5784 * are present.
5785 */
5786void __dev_set_rx_mode(struct net_device *dev)
5787{
d314774c
SH
5788 const struct net_device_ops *ops = dev->netdev_ops;
5789
4417da66
PM
5790 /* dev_open will call this function so the list will stay sane. */
5791 if (!(dev->flags&IFF_UP))
5792 return;
5793
5794 if (!netif_device_present(dev))
40b77c94 5795 return;
4417da66 5796
01789349 5797 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
4417da66
PM
5798 /* Unicast addresses changes may only happen under the rtnl,
5799 * therefore calling __dev_set_promiscuity here is safe.
5800 */
32e7bfc4 5801 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
991fb3f7 5802 __dev_set_promiscuity(dev, 1, false);
2d348d1f 5803 dev->uc_promisc = true;
32e7bfc4 5804 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
991fb3f7 5805 __dev_set_promiscuity(dev, -1, false);
2d348d1f 5806 dev->uc_promisc = false;
4417da66 5807 }
4417da66 5808 }
01789349
JP
5809
5810 if (ops->ndo_set_rx_mode)
5811 ops->ndo_set_rx_mode(dev);
4417da66
PM
5812}
5813
5814void dev_set_rx_mode(struct net_device *dev)
5815{
b9e40857 5816 netif_addr_lock_bh(dev);
4417da66 5817 __dev_set_rx_mode(dev);
b9e40857 5818 netif_addr_unlock_bh(dev);
1da177e4
LT
5819}
5820
f0db275a
SH
5821/**
5822 * dev_get_flags - get flags reported to userspace
5823 * @dev: device
5824 *
5825 * Get the combination of flag bits exported through APIs to userspace.
5826 */
95c96174 5827unsigned int dev_get_flags(const struct net_device *dev)
1da177e4 5828{
95c96174 5829 unsigned int flags;
1da177e4
LT
5830
5831 flags = (dev->flags & ~(IFF_PROMISC |
5832 IFF_ALLMULTI |
b00055aa
SR
5833 IFF_RUNNING |
5834 IFF_LOWER_UP |
5835 IFF_DORMANT)) |
1da177e4
LT
5836 (dev->gflags & (IFF_PROMISC |
5837 IFF_ALLMULTI));
5838
b00055aa
SR
5839 if (netif_running(dev)) {
5840 if (netif_oper_up(dev))
5841 flags |= IFF_RUNNING;
5842 if (netif_carrier_ok(dev))
5843 flags |= IFF_LOWER_UP;
5844 if (netif_dormant(dev))
5845 flags |= IFF_DORMANT;
5846 }
1da177e4
LT
5847
5848 return flags;
5849}
d1b19dff 5850EXPORT_SYMBOL(dev_get_flags);
1da177e4 5851
bd380811 5852int __dev_change_flags(struct net_device *dev, unsigned int flags)
1da177e4 5853{
b536db93 5854 unsigned int old_flags = dev->flags;
bd380811 5855 int ret;
1da177e4 5856
24023451
PM
5857 ASSERT_RTNL();
5858
1da177e4
LT
5859 /*
5860 * Set the flags on our device.
5861 */
5862
5863 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
5864 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
5865 IFF_AUTOMEDIA)) |
5866 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
5867 IFF_ALLMULTI));
5868
5869 /*
5870 * Load in the correct multicast list now the flags have changed.
5871 */
5872
b6c40d68
PM
5873 if ((old_flags ^ flags) & IFF_MULTICAST)
5874 dev_change_rx_flags(dev, IFF_MULTICAST);
24023451 5875
4417da66 5876 dev_set_rx_mode(dev);
1da177e4
LT
5877
5878 /*
5879 * Have we downed the interface. We handle IFF_UP ourselves
5880 * according to user attempts to set it, rather than blindly
5881 * setting it.
5882 */
5883
5884 ret = 0;
d215d10f 5885 if ((old_flags ^ flags) & IFF_UP)
bd380811 5886 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
1da177e4 5887
1da177e4 5888 if ((flags ^ dev->gflags) & IFF_PROMISC) {
d1b19dff 5889 int inc = (flags & IFF_PROMISC) ? 1 : -1;
991fb3f7 5890 unsigned int old_flags = dev->flags;
d1b19dff 5891
1da177e4 5892 dev->gflags ^= IFF_PROMISC;
991fb3f7
ND
5893
5894 if (__dev_set_promiscuity(dev, inc, false) >= 0)
5895 if (dev->flags != old_flags)
5896 dev_set_rx_mode(dev);
1da177e4
LT
5897 }
5898
5899 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
5900 is important. Some (broken) drivers set IFF_PROMISC, when
5901 IFF_ALLMULTI is requested not asking us and not reporting.
5902 */
5903 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
d1b19dff
ED
5904 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
5905
1da177e4 5906 dev->gflags ^= IFF_ALLMULTI;
991fb3f7 5907 __dev_set_allmulti(dev, inc, false);
1da177e4
LT
5908 }
5909
bd380811
PM
5910 return ret;
5911}
5912
a528c219
ND
5913void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
5914 unsigned int gchanges)
bd380811
PM
5915{
5916 unsigned int changes = dev->flags ^ old_flags;
5917
a528c219 5918 if (gchanges)
7f294054 5919 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
a528c219 5920
bd380811
PM
5921 if (changes & IFF_UP) {
5922 if (dev->flags & IFF_UP)
5923 call_netdevice_notifiers(NETDEV_UP, dev);
5924 else
5925 call_netdevice_notifiers(NETDEV_DOWN, dev);
5926 }
5927
5928 if (dev->flags & IFF_UP &&
be9efd36
JP
5929 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
5930 struct netdev_notifier_change_info change_info;
5931
5932 change_info.flags_changed = changes;
5933 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
5934 &change_info.info);
5935 }
bd380811
PM
5936}
5937
5938/**
5939 * dev_change_flags - change device settings
5940 * @dev: device
5941 * @flags: device state flags
5942 *
5943 * Change settings on device based state flags. The flags are
5944 * in the userspace exported format.
5945 */
b536db93 5946int dev_change_flags(struct net_device *dev, unsigned int flags)
bd380811 5947{
b536db93 5948 int ret;
991fb3f7 5949 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
bd380811
PM
5950
5951 ret = __dev_change_flags(dev, flags);
5952 if (ret < 0)
5953 return ret;
5954
991fb3f7 5955 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
a528c219 5956 __dev_notify_flags(dev, old_flags, changes);
1da177e4
LT
5957 return ret;
5958}
d1b19dff 5959EXPORT_SYMBOL(dev_change_flags);
1da177e4 5960
2315dc91
VF
5961static int __dev_set_mtu(struct net_device *dev, int new_mtu)
5962{
5963 const struct net_device_ops *ops = dev->netdev_ops;
5964
5965 if (ops->ndo_change_mtu)
5966 return ops->ndo_change_mtu(dev, new_mtu);
5967
5968 dev->mtu = new_mtu;
5969 return 0;
5970}
5971
f0db275a
SH
5972/**
5973 * dev_set_mtu - Change maximum transfer unit
5974 * @dev: device
5975 * @new_mtu: new transfer unit
5976 *
5977 * Change the maximum transfer size of the network device.
5978 */
1da177e4
LT
5979int dev_set_mtu(struct net_device *dev, int new_mtu)
5980{
2315dc91 5981 int err, orig_mtu;
1da177e4
LT
5982
5983 if (new_mtu == dev->mtu)
5984 return 0;
5985
5986 /* MTU must be positive. */
5987 if (new_mtu < 0)
5988 return -EINVAL;
5989
5990 if (!netif_device_present(dev))
5991 return -ENODEV;
5992
1d486bfb
VF
5993 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
5994 err = notifier_to_errno(err);
5995 if (err)
5996 return err;
d314774c 5997
2315dc91
VF
5998 orig_mtu = dev->mtu;
5999 err = __dev_set_mtu(dev, new_mtu);
d314774c 6000
2315dc91
VF
6001 if (!err) {
6002 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
6003 err = notifier_to_errno(err);
6004 if (err) {
6005 /* setting mtu back and notifying everyone again,
6006 * so that they have a chance to revert changes.
6007 */
6008 __dev_set_mtu(dev, orig_mtu);
6009 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
6010 }
6011 }
1da177e4
LT
6012 return err;
6013}
d1b19dff 6014EXPORT_SYMBOL(dev_set_mtu);
1da177e4 6015
cbda10fa
VD
6016/**
6017 * dev_set_group - Change group this device belongs to
6018 * @dev: device
6019 * @new_group: group this device should belong to
6020 */
6021void dev_set_group(struct net_device *dev, int new_group)
6022{
6023 dev->group = new_group;
6024}
6025EXPORT_SYMBOL(dev_set_group);
6026
f0db275a
SH
6027/**
6028 * dev_set_mac_address - Change Media Access Control Address
6029 * @dev: device
6030 * @sa: new address
6031 *
6032 * Change the hardware (MAC) address of the device
6033 */
1da177e4
LT
6034int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
6035{
d314774c 6036 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4
LT
6037 int err;
6038
d314774c 6039 if (!ops->ndo_set_mac_address)
1da177e4
LT
6040 return -EOPNOTSUPP;
6041 if (sa->sa_family != dev->type)
6042 return -EINVAL;
6043 if (!netif_device_present(dev))
6044 return -ENODEV;
d314774c 6045 err = ops->ndo_set_mac_address(dev, sa);
f6521516
JP
6046 if (err)
6047 return err;
fbdeca2d 6048 dev->addr_assign_type = NET_ADDR_SET;
f6521516 6049 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
7bf23575 6050 add_device_randomness(dev->dev_addr, dev->addr_len);
f6521516 6051 return 0;
1da177e4 6052}
d1b19dff 6053EXPORT_SYMBOL(dev_set_mac_address);
1da177e4 6054
4bf84c35
JP
6055/**
6056 * dev_change_carrier - Change device carrier
6057 * @dev: device
691b3b7e 6058 * @new_carrier: new value
4bf84c35
JP
6059 *
6060 * Change device carrier
6061 */
6062int dev_change_carrier(struct net_device *dev, bool new_carrier)
6063{
6064 const struct net_device_ops *ops = dev->netdev_ops;
6065
6066 if (!ops->ndo_change_carrier)
6067 return -EOPNOTSUPP;
6068 if (!netif_device_present(dev))
6069 return -ENODEV;
6070 return ops->ndo_change_carrier(dev, new_carrier);
6071}
6072EXPORT_SYMBOL(dev_change_carrier);
6073
66b52b0d
JP
6074/**
6075 * dev_get_phys_port_id - Get device physical port ID
6076 * @dev: device
6077 * @ppid: port ID
6078 *
6079 * Get device physical port ID
6080 */
6081int dev_get_phys_port_id(struct net_device *dev,
02637fce 6082 struct netdev_phys_item_id *ppid)
66b52b0d
JP
6083{
6084 const struct net_device_ops *ops = dev->netdev_ops;
6085
6086 if (!ops->ndo_get_phys_port_id)
6087 return -EOPNOTSUPP;
6088 return ops->ndo_get_phys_port_id(dev, ppid);
6089}
6090EXPORT_SYMBOL(dev_get_phys_port_id);
6091
db24a904
DA
6092/**
6093 * dev_get_phys_port_name - Get device physical port name
6094 * @dev: device
6095 * @name: port name
6096 *
6097 * Get device physical port name
6098 */
6099int dev_get_phys_port_name(struct net_device *dev,
6100 char *name, size_t len)
6101{
6102 const struct net_device_ops *ops = dev->netdev_ops;
6103
6104 if (!ops->ndo_get_phys_port_name)
6105 return -EOPNOTSUPP;
6106 return ops->ndo_get_phys_port_name(dev, name, len);
6107}
6108EXPORT_SYMBOL(dev_get_phys_port_name);
6109
d746d707
AK
6110/**
6111 * dev_change_proto_down - update protocol port state information
6112 * @dev: device
6113 * @proto_down: new value
6114 *
6115 * This info can be used by switch drivers to set the phys state of the
6116 * port.
6117 */
6118int dev_change_proto_down(struct net_device *dev, bool proto_down)
6119{
6120 const struct net_device_ops *ops = dev->netdev_ops;
6121
6122 if (!ops->ndo_change_proto_down)
6123 return -EOPNOTSUPP;
6124 if (!netif_device_present(dev))
6125 return -ENODEV;
6126 return ops->ndo_change_proto_down(dev, proto_down);
6127}
6128EXPORT_SYMBOL(dev_change_proto_down);
6129
1da177e4
LT
6130/**
6131 * dev_new_index - allocate an ifindex
c4ea43c5 6132 * @net: the applicable net namespace
1da177e4
LT
6133 *
6134 * Returns a suitable unique value for a new device interface
6135 * number. The caller must hold the rtnl semaphore or the
6136 * dev_base_lock to be sure it remains unique.
6137 */
881d966b 6138static int dev_new_index(struct net *net)
1da177e4 6139{
aa79e66e 6140 int ifindex = net->ifindex;
1da177e4
LT
6141 for (;;) {
6142 if (++ifindex <= 0)
6143 ifindex = 1;
881d966b 6144 if (!__dev_get_by_index(net, ifindex))
aa79e66e 6145 return net->ifindex = ifindex;
1da177e4
LT
6146 }
6147}
6148
1da177e4 6149/* Delayed registration/unregisteration */
3b5b34fd 6150static LIST_HEAD(net_todo_list);
200b916f 6151DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
1da177e4 6152
6f05f629 6153static void net_set_todo(struct net_device *dev)
1da177e4 6154{
1da177e4 6155 list_add_tail(&dev->todo_list, &net_todo_list);
50624c93 6156 dev_net(dev)->dev_unreg_count++;
1da177e4
LT
6157}
6158
9b5e383c 6159static void rollback_registered_many(struct list_head *head)
93ee31f1 6160{
e93737b0 6161 struct net_device *dev, *tmp;
5cde2829 6162 LIST_HEAD(close_head);
9b5e383c 6163
93ee31f1
DL
6164 BUG_ON(dev_boot_phase);
6165 ASSERT_RTNL();
6166
e93737b0 6167 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
9b5e383c 6168 /* Some devices call without registering
e93737b0
KK
6169 * for initialization unwind. Remove those
6170 * devices and proceed with the remaining.
9b5e383c
ED
6171 */
6172 if (dev->reg_state == NETREG_UNINITIALIZED) {
7b6cd1ce
JP
6173 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
6174 dev->name, dev);
93ee31f1 6175
9b5e383c 6176 WARN_ON(1);
e93737b0
KK
6177 list_del(&dev->unreg_list);
6178 continue;
9b5e383c 6179 }
449f4544 6180 dev->dismantle = true;
9b5e383c 6181 BUG_ON(dev->reg_state != NETREG_REGISTERED);
44345724 6182 }
93ee31f1 6183
44345724 6184 /* If device is running, close it first. */
5cde2829
EB
6185 list_for_each_entry(dev, head, unreg_list)
6186 list_add_tail(&dev->close_list, &close_head);
99c4a26a 6187 dev_close_many(&close_head, true);
93ee31f1 6188
44345724 6189 list_for_each_entry(dev, head, unreg_list) {
9b5e383c
ED
6190 /* And unlink it from device chain. */
6191 unlist_netdevice(dev);
93ee31f1 6192
9b5e383c 6193 dev->reg_state = NETREG_UNREGISTERING;
e9e4dd32 6194 on_each_cpu(flush_backlog, dev, 1);
9b5e383c 6195 }
93ee31f1
DL
6196
6197 synchronize_net();
6198
9b5e383c 6199 list_for_each_entry(dev, head, unreg_list) {
395eea6c
MB
6200 struct sk_buff *skb = NULL;
6201
9b5e383c
ED
6202 /* Shutdown queueing discipline. */
6203 dev_shutdown(dev);
93ee31f1
DL
6204
6205
9b5e383c
ED
6206 /* Notify protocols, that we are about to destroy
6207 this device. They should clean all the things.
6208 */
6209 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
93ee31f1 6210
395eea6c
MB
6211 if (!dev->rtnl_link_ops ||
6212 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
6213 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U,
6214 GFP_KERNEL);
6215
9b5e383c
ED
6216 /*
6217 * Flush the unicast and multicast chains
6218 */
a748ee24 6219 dev_uc_flush(dev);
22bedad3 6220 dev_mc_flush(dev);
93ee31f1 6221
9b5e383c
ED
6222 if (dev->netdev_ops->ndo_uninit)
6223 dev->netdev_ops->ndo_uninit(dev);
93ee31f1 6224
395eea6c
MB
6225 if (skb)
6226 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
56bfa7ee 6227
9ff162a8
JP
6228 /* Notifier chain MUST detach us all upper devices. */
6229 WARN_ON(netdev_has_any_upper_dev(dev));
93ee31f1 6230
9b5e383c
ED
6231 /* Remove entries from kobject tree */
6232 netdev_unregister_kobject(dev);
024e9679
AD
6233#ifdef CONFIG_XPS
6234 /* Remove XPS queueing entries */
6235 netif_reset_xps_queues_gt(dev, 0);
6236#endif
9b5e383c 6237 }
93ee31f1 6238
850a545b 6239 synchronize_net();
395264d5 6240
a5ee1551 6241 list_for_each_entry(dev, head, unreg_list)
9b5e383c
ED
6242 dev_put(dev);
6243}
6244
6245static void rollback_registered(struct net_device *dev)
6246{
6247 LIST_HEAD(single);
6248
6249 list_add(&dev->unreg_list, &single);
6250 rollback_registered_many(&single);
ceaaec98 6251 list_del(&single);
93ee31f1
DL
6252}
6253
c8f44aff
MM
6254static netdev_features_t netdev_fix_features(struct net_device *dev,
6255 netdev_features_t features)
b63365a2 6256{
57422dc5
MM
6257 /* Fix illegal checksum combinations */
6258 if ((features & NETIF_F_HW_CSUM) &&
6259 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
6f404e44 6260 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
57422dc5
MM
6261 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
6262 }
6263
b63365a2 6264 /* TSO requires that SG is present as well. */
ea2d3688 6265 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
6f404e44 6266 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
ea2d3688 6267 features &= ~NETIF_F_ALL_TSO;
b63365a2
HX
6268 }
6269
ec5f0615
PS
6270 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
6271 !(features & NETIF_F_IP_CSUM)) {
6272 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
6273 features &= ~NETIF_F_TSO;
6274 features &= ~NETIF_F_TSO_ECN;
6275 }
6276
6277 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
6278 !(features & NETIF_F_IPV6_CSUM)) {
6279 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
6280 features &= ~NETIF_F_TSO6;
6281 }
6282
31d8b9e0
BH
6283 /* TSO ECN requires that TSO is present as well. */
6284 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
6285 features &= ~NETIF_F_TSO_ECN;
6286
212b573f
MM
6287 /* Software GSO depends on SG. */
6288 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
6f404e44 6289 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
212b573f
MM
6290 features &= ~NETIF_F_GSO;
6291 }
6292
acd1130e 6293 /* UFO needs SG and checksumming */
b63365a2 6294 if (features & NETIF_F_UFO) {
79032644
MM
6295 /* maybe split UFO into V4 and V6? */
6296 if (!((features & NETIF_F_GEN_CSUM) ||
6297 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
6298 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
6f404e44 6299 netdev_dbg(dev,
acd1130e 6300 "Dropping NETIF_F_UFO since no checksum offload features.\n");
b63365a2
HX
6301 features &= ~NETIF_F_UFO;
6302 }
6303
6304 if (!(features & NETIF_F_SG)) {
6f404e44 6305 netdev_dbg(dev,
acd1130e 6306 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
b63365a2
HX
6307 features &= ~NETIF_F_UFO;
6308 }
6309 }
6310
d0290214
JP
6311#ifdef CONFIG_NET_RX_BUSY_POLL
6312 if (dev->netdev_ops->ndo_busy_poll)
6313 features |= NETIF_F_BUSY_POLL;
6314 else
6315#endif
6316 features &= ~NETIF_F_BUSY_POLL;
6317
b63365a2
HX
6318 return features;
6319}
b63365a2 6320
6cb6a27c 6321int __netdev_update_features(struct net_device *dev)
5455c699 6322{
c8f44aff 6323 netdev_features_t features;
5455c699
MM
6324 int err = 0;
6325
87267485
MM
6326 ASSERT_RTNL();
6327
5455c699
MM
6328 features = netdev_get_wanted_features(dev);
6329
6330 if (dev->netdev_ops->ndo_fix_features)
6331 features = dev->netdev_ops->ndo_fix_features(dev, features);
6332
6333 /* driver might be less strict about feature dependencies */
6334 features = netdev_fix_features(dev, features);
6335
6336 if (dev->features == features)
6cb6a27c 6337 return 0;
5455c699 6338
c8f44aff
MM
6339 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
6340 &dev->features, &features);
5455c699
MM
6341
6342 if (dev->netdev_ops->ndo_set_features)
6343 err = dev->netdev_ops->ndo_set_features(dev, features);
6344
6cb6a27c 6345 if (unlikely(err < 0)) {
5455c699 6346 netdev_err(dev,
c8f44aff
MM
6347 "set_features() failed (%d); wanted %pNF, left %pNF\n",
6348 err, &features, &dev->features);
6cb6a27c
MM
6349 return -1;
6350 }
6351
6352 if (!err)
6353 dev->features = features;
6354
6355 return 1;
6356}
6357
afe12cc8
MM
6358/**
6359 * netdev_update_features - recalculate device features
6360 * @dev: the device to check
6361 *
6362 * Recalculate dev->features set and send notifications if it
6363 * has changed. Should be called after driver or hardware dependent
6364 * conditions might have changed that influence the features.
6365 */
6cb6a27c
MM
6366void netdev_update_features(struct net_device *dev)
6367{
6368 if (__netdev_update_features(dev))
6369 netdev_features_change(dev);
5455c699
MM
6370}
6371EXPORT_SYMBOL(netdev_update_features);
6372
afe12cc8
MM
6373/**
6374 * netdev_change_features - recalculate device features
6375 * @dev: the device to check
6376 *
6377 * Recalculate dev->features set and send notifications even
6378 * if they have not changed. Should be called instead of
6379 * netdev_update_features() if also dev->vlan_features might
6380 * have changed to allow the changes to be propagated to stacked
6381 * VLAN devices.
6382 */
6383void netdev_change_features(struct net_device *dev)
6384{
6385 __netdev_update_features(dev);
6386 netdev_features_change(dev);
6387}
6388EXPORT_SYMBOL(netdev_change_features);
6389
fc4a7489
PM
6390/**
6391 * netif_stacked_transfer_operstate - transfer operstate
6392 * @rootdev: the root or lower level device to transfer state from
6393 * @dev: the device to transfer operstate to
6394 *
6395 * Transfer operational state from root to device. This is normally
6396 * called when a stacking relationship exists between the root
6397 * device and the device(a leaf device).
6398 */
6399void netif_stacked_transfer_operstate(const struct net_device *rootdev,
6400 struct net_device *dev)
6401{
6402 if (rootdev->operstate == IF_OPER_DORMANT)
6403 netif_dormant_on(dev);
6404 else
6405 netif_dormant_off(dev);
6406
6407 if (netif_carrier_ok(rootdev)) {
6408 if (!netif_carrier_ok(dev))
6409 netif_carrier_on(dev);
6410 } else {
6411 if (netif_carrier_ok(dev))
6412 netif_carrier_off(dev);
6413 }
6414}
6415EXPORT_SYMBOL(netif_stacked_transfer_operstate);
6416
a953be53 6417#ifdef CONFIG_SYSFS
1b4bf461
ED
6418static int netif_alloc_rx_queues(struct net_device *dev)
6419{
1b4bf461 6420 unsigned int i, count = dev->num_rx_queues;
bd25fa7b 6421 struct netdev_rx_queue *rx;
10595902 6422 size_t sz = count * sizeof(*rx);
1b4bf461 6423
bd25fa7b 6424 BUG_ON(count < 1);
1b4bf461 6425
10595902
PG
6426 rx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6427 if (!rx) {
6428 rx = vzalloc(sz);
6429 if (!rx)
6430 return -ENOMEM;
6431 }
bd25fa7b
TH
6432 dev->_rx = rx;
6433
bd25fa7b 6434 for (i = 0; i < count; i++)
fe822240 6435 rx[i].dev = dev;
1b4bf461
ED
6436 return 0;
6437}
bf264145 6438#endif
1b4bf461 6439
aa942104
CG
6440static void netdev_init_one_queue(struct net_device *dev,
6441 struct netdev_queue *queue, void *_unused)
6442{
6443 /* Initialize queue lock */
6444 spin_lock_init(&queue->_xmit_lock);
6445 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
6446 queue->xmit_lock_owner = -1;
b236da69 6447 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
aa942104 6448 queue->dev = dev;
114cf580
TH
6449#ifdef CONFIG_BQL
6450 dql_init(&queue->dql, HZ);
6451#endif
aa942104
CG
6452}
6453
60877a32
ED
6454static void netif_free_tx_queues(struct net_device *dev)
6455{
4cb28970 6456 kvfree(dev->_tx);
60877a32
ED
6457}
6458
e6484930
TH
6459static int netif_alloc_netdev_queues(struct net_device *dev)
6460{
6461 unsigned int count = dev->num_tx_queues;
6462 struct netdev_queue *tx;
60877a32 6463 size_t sz = count * sizeof(*tx);
e6484930 6464
d339727c
ED
6465 if (count < 1 || count > 0xffff)
6466 return -EINVAL;
62b5942a 6467
60877a32
ED
6468 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6469 if (!tx) {
6470 tx = vzalloc(sz);
6471 if (!tx)
6472 return -ENOMEM;
6473 }
e6484930 6474 dev->_tx = tx;
1d24eb48 6475
e6484930
TH
6476 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
6477 spin_lock_init(&dev->tx_global_lock);
aa942104
CG
6478
6479 return 0;
e6484930
TH
6480}
6481
a2029240
DV
6482void netif_tx_stop_all_queues(struct net_device *dev)
6483{
6484 unsigned int i;
6485
6486 for (i = 0; i < dev->num_tx_queues; i++) {
6487 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
6488 netif_tx_stop_queue(txq);
6489 }
6490}
6491EXPORT_SYMBOL(netif_tx_stop_all_queues);
6492
1da177e4
LT
6493/**
6494 * register_netdevice - register a network device
6495 * @dev: device to register
6496 *
6497 * Take a completed network device structure and add it to the kernel
6498 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6499 * chain. 0 is returned on success. A negative errno code is returned
6500 * on a failure to set up the device, or if the name is a duplicate.
6501 *
6502 * Callers must hold the rtnl semaphore. You may want
6503 * register_netdev() instead of this.
6504 *
6505 * BUGS:
6506 * The locking appears insufficient to guarantee two parallel registers
6507 * will not get the same name.
6508 */
6509
6510int register_netdevice(struct net_device *dev)
6511{
1da177e4 6512 int ret;
d314774c 6513 struct net *net = dev_net(dev);
1da177e4
LT
6514
6515 BUG_ON(dev_boot_phase);
6516 ASSERT_RTNL();
6517
b17a7c17
SH
6518 might_sleep();
6519
1da177e4
LT
6520 /* When net_device's are persistent, this will be fatal. */
6521 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
d314774c 6522 BUG_ON(!net);
1da177e4 6523
f1f28aa3 6524 spin_lock_init(&dev->addr_list_lock);
cf508b12 6525 netdev_set_addr_lockdep_class(dev);
1da177e4 6526
828de4f6 6527 ret = dev_get_valid_name(net, dev, dev->name);
0696c3a8
PP
6528 if (ret < 0)
6529 goto out;
6530
1da177e4 6531 /* Init, if this function is available */
d314774c
SH
6532 if (dev->netdev_ops->ndo_init) {
6533 ret = dev->netdev_ops->ndo_init(dev);
1da177e4
LT
6534 if (ret) {
6535 if (ret > 0)
6536 ret = -EIO;
90833aa4 6537 goto out;
1da177e4
LT
6538 }
6539 }
4ec93edb 6540
f646968f
PM
6541 if (((dev->hw_features | dev->features) &
6542 NETIF_F_HW_VLAN_CTAG_FILTER) &&
d2ed273d
MM
6543 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
6544 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
6545 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
6546 ret = -EINVAL;
6547 goto err_uninit;
6548 }
6549
9c7dafbf
PE
6550 ret = -EBUSY;
6551 if (!dev->ifindex)
6552 dev->ifindex = dev_new_index(net);
6553 else if (__dev_get_by_index(net, dev->ifindex))
6554 goto err_uninit;
6555
5455c699
MM
6556 /* Transfer changeable features to wanted_features and enable
6557 * software offloads (GSO and GRO).
6558 */
6559 dev->hw_features |= NETIF_F_SOFT_FEATURES;
14d1232f
MM
6560 dev->features |= NETIF_F_SOFT_FEATURES;
6561 dev->wanted_features = dev->features & dev->hw_features;
1da177e4 6562
34324dc2
MM
6563 if (!(dev->flags & IFF_LOOPBACK)) {
6564 dev->hw_features |= NETIF_F_NOCACHE_COPY;
c6e1a0d1
TH
6565 }
6566
1180e7d6 6567 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
16c3ea78 6568 */
1180e7d6 6569 dev->vlan_features |= NETIF_F_HIGHDMA;
16c3ea78 6570
ee579677
PS
6571 /* Make NETIF_F_SG inheritable to tunnel devices.
6572 */
6573 dev->hw_enc_features |= NETIF_F_SG;
6574
0d89d203
SH
6575 /* Make NETIF_F_SG inheritable to MPLS.
6576 */
6577 dev->mpls_features |= NETIF_F_SG;
6578
7ffbe3fd
JB
6579 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
6580 ret = notifier_to_errno(ret);
6581 if (ret)
6582 goto err_uninit;
6583
8b41d188 6584 ret = netdev_register_kobject(dev);
b17a7c17 6585 if (ret)
7ce1b0ed 6586 goto err_uninit;
b17a7c17
SH
6587 dev->reg_state = NETREG_REGISTERED;
6588
6cb6a27c 6589 __netdev_update_features(dev);
8e9b59b2 6590
1da177e4
LT
6591 /*
6592 * Default initial state at registry is that the
6593 * device is present.
6594 */
6595
6596 set_bit(__LINK_STATE_PRESENT, &dev->state);
6597
8f4cccbb
BH
6598 linkwatch_init_dev(dev);
6599
1da177e4 6600 dev_init_scheduler(dev);
1da177e4 6601 dev_hold(dev);
ce286d32 6602 list_netdevice(dev);
7bf23575 6603 add_device_randomness(dev->dev_addr, dev->addr_len);
1da177e4 6604
948b337e
JP
6605 /* If the device has permanent device address, driver should
6606 * set dev_addr and also addr_assign_type should be set to
6607 * NET_ADDR_PERM (default value).
6608 */
6609 if (dev->addr_assign_type == NET_ADDR_PERM)
6610 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6611
1da177e4 6612 /* Notify protocols, that a new device appeared. */
056925ab 6613 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
fcc5a03a 6614 ret = notifier_to_errno(ret);
93ee31f1
DL
6615 if (ret) {
6616 rollback_registered(dev);
6617 dev->reg_state = NETREG_UNREGISTERED;
6618 }
d90a909e
EB
6619 /*
6620 * Prevent userspace races by waiting until the network
6621 * device is fully setup before sending notifications.
6622 */
a2835763
PM
6623 if (!dev->rtnl_link_ops ||
6624 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
7f294054 6625 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
1da177e4
LT
6626
6627out:
6628 return ret;
7ce1b0ed
HX
6629
6630err_uninit:
d314774c
SH
6631 if (dev->netdev_ops->ndo_uninit)
6632 dev->netdev_ops->ndo_uninit(dev);
7ce1b0ed 6633 goto out;
1da177e4 6634}
d1b19dff 6635EXPORT_SYMBOL(register_netdevice);
1da177e4 6636
937f1ba5
BH
6637/**
6638 * init_dummy_netdev - init a dummy network device for NAPI
6639 * @dev: device to init
6640 *
6641 * This takes a network device structure and initialize the minimum
6642 * amount of fields so it can be used to schedule NAPI polls without
6643 * registering a full blown interface. This is to be used by drivers
6644 * that need to tie several hardware interfaces to a single NAPI
6645 * poll scheduler due to HW limitations.
6646 */
6647int init_dummy_netdev(struct net_device *dev)
6648{
6649 /* Clear everything. Note we don't initialize spinlocks
6650 * are they aren't supposed to be taken by any of the
6651 * NAPI code and this dummy netdev is supposed to be
6652 * only ever used for NAPI polls
6653 */
6654 memset(dev, 0, sizeof(struct net_device));
6655
6656 /* make sure we BUG if trying to hit standard
6657 * register/unregister code path
6658 */
6659 dev->reg_state = NETREG_DUMMY;
6660
937f1ba5
BH
6661 /* NAPI wants this */
6662 INIT_LIST_HEAD(&dev->napi_list);
6663
6664 /* a dummy interface is started by default */
6665 set_bit(__LINK_STATE_PRESENT, &dev->state);
6666 set_bit(__LINK_STATE_START, &dev->state);
6667
29b4433d
ED
6668 /* Note : We dont allocate pcpu_refcnt for dummy devices,
6669 * because users of this 'device' dont need to change
6670 * its refcount.
6671 */
6672
937f1ba5
BH
6673 return 0;
6674}
6675EXPORT_SYMBOL_GPL(init_dummy_netdev);
6676
6677
1da177e4
LT
6678/**
6679 * register_netdev - register a network device
6680 * @dev: device to register
6681 *
6682 * Take a completed network device structure and add it to the kernel
6683 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6684 * chain. 0 is returned on success. A negative errno code is returned
6685 * on a failure to set up the device, or if the name is a duplicate.
6686 *
38b4da38 6687 * This is a wrapper around register_netdevice that takes the rtnl semaphore
1da177e4
LT
6688 * and expands the device name if you passed a format string to
6689 * alloc_netdev.
6690 */
6691int register_netdev(struct net_device *dev)
6692{
6693 int err;
6694
6695 rtnl_lock();
1da177e4 6696 err = register_netdevice(dev);
1da177e4
LT
6697 rtnl_unlock();
6698 return err;
6699}
6700EXPORT_SYMBOL(register_netdev);
6701
29b4433d
ED
6702int netdev_refcnt_read(const struct net_device *dev)
6703{
6704 int i, refcnt = 0;
6705
6706 for_each_possible_cpu(i)
6707 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
6708 return refcnt;
6709}
6710EXPORT_SYMBOL(netdev_refcnt_read);
6711
2c53040f 6712/**
1da177e4 6713 * netdev_wait_allrefs - wait until all references are gone.
3de7a37b 6714 * @dev: target net_device
1da177e4
LT
6715 *
6716 * This is called when unregistering network devices.
6717 *
6718 * Any protocol or device that holds a reference should register
6719 * for netdevice notification, and cleanup and put back the
6720 * reference if they receive an UNREGISTER event.
6721 * We can get stuck here if buggy protocols don't correctly
4ec93edb 6722 * call dev_put.
1da177e4
LT
6723 */
6724static void netdev_wait_allrefs(struct net_device *dev)
6725{
6726 unsigned long rebroadcast_time, warning_time;
29b4433d 6727 int refcnt;
1da177e4 6728
e014debe
ED
6729 linkwatch_forget_dev(dev);
6730
1da177e4 6731 rebroadcast_time = warning_time = jiffies;
29b4433d
ED
6732 refcnt = netdev_refcnt_read(dev);
6733
6734 while (refcnt != 0) {
1da177e4 6735 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
6756ae4b 6736 rtnl_lock();
1da177e4
LT
6737
6738 /* Rebroadcast unregister notification */
056925ab 6739 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
1da177e4 6740
748e2d93 6741 __rtnl_unlock();
0115e8e3 6742 rcu_barrier();
748e2d93
ED
6743 rtnl_lock();
6744
0115e8e3 6745 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
1da177e4
LT
6746 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
6747 &dev->state)) {
6748 /* We must not have linkwatch events
6749 * pending on unregister. If this
6750 * happens, we simply run the queue
6751 * unscheduled, resulting in a noop
6752 * for this device.
6753 */
6754 linkwatch_run_queue();
6755 }
6756
6756ae4b 6757 __rtnl_unlock();
1da177e4
LT
6758
6759 rebroadcast_time = jiffies;
6760 }
6761
6762 msleep(250);
6763
29b4433d
ED
6764 refcnt = netdev_refcnt_read(dev);
6765
1da177e4 6766 if (time_after(jiffies, warning_time + 10 * HZ)) {
7b6cd1ce
JP
6767 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
6768 dev->name, refcnt);
1da177e4
LT
6769 warning_time = jiffies;
6770 }
6771 }
6772}
6773
6774/* The sequence is:
6775 *
6776 * rtnl_lock();
6777 * ...
6778 * register_netdevice(x1);
6779 * register_netdevice(x2);
6780 * ...
6781 * unregister_netdevice(y1);
6782 * unregister_netdevice(y2);
6783 * ...
6784 * rtnl_unlock();
6785 * free_netdev(y1);
6786 * free_netdev(y2);
6787 *
58ec3b4d 6788 * We are invoked by rtnl_unlock().
1da177e4 6789 * This allows us to deal with problems:
b17a7c17 6790 * 1) We can delete sysfs objects which invoke hotplug
1da177e4
LT
6791 * without deadlocking with linkwatch via keventd.
6792 * 2) Since we run with the RTNL semaphore not held, we can sleep
6793 * safely in order to wait for the netdev refcnt to drop to zero.
58ec3b4d
HX
6794 *
6795 * We must not return until all unregister events added during
6796 * the interval the lock was held have been completed.
1da177e4 6797 */
1da177e4
LT
6798void netdev_run_todo(void)
6799{
626ab0e6 6800 struct list_head list;
1da177e4 6801
1da177e4 6802 /* Snapshot list, allow later requests */
626ab0e6 6803 list_replace_init(&net_todo_list, &list);
58ec3b4d
HX
6804
6805 __rtnl_unlock();
626ab0e6 6806
0115e8e3
ED
6807
6808 /* Wait for rcu callbacks to finish before next phase */
850a545b
EB
6809 if (!list_empty(&list))
6810 rcu_barrier();
6811
1da177e4
LT
6812 while (!list_empty(&list)) {
6813 struct net_device *dev
e5e26d75 6814 = list_first_entry(&list, struct net_device, todo_list);
1da177e4
LT
6815 list_del(&dev->todo_list);
6816
748e2d93 6817 rtnl_lock();
0115e8e3 6818 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
748e2d93 6819 __rtnl_unlock();
0115e8e3 6820
b17a7c17 6821 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
7b6cd1ce 6822 pr_err("network todo '%s' but state %d\n",
b17a7c17
SH
6823 dev->name, dev->reg_state);
6824 dump_stack();
6825 continue;
6826 }
1da177e4 6827
b17a7c17 6828 dev->reg_state = NETREG_UNREGISTERED;
1da177e4 6829
b17a7c17 6830 netdev_wait_allrefs(dev);
1da177e4 6831
b17a7c17 6832 /* paranoia */
29b4433d 6833 BUG_ON(netdev_refcnt_read(dev));
7866a621
SN
6834 BUG_ON(!list_empty(&dev->ptype_all));
6835 BUG_ON(!list_empty(&dev->ptype_specific));
33d480ce
ED
6836 WARN_ON(rcu_access_pointer(dev->ip_ptr));
6837 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
547b792c 6838 WARN_ON(dev->dn_ptr);
1da177e4 6839
b17a7c17
SH
6840 if (dev->destructor)
6841 dev->destructor(dev);
9093bbb2 6842
50624c93
EB
6843 /* Report a network device has been unregistered */
6844 rtnl_lock();
6845 dev_net(dev)->dev_unreg_count--;
6846 __rtnl_unlock();
6847 wake_up(&netdev_unregistering_wq);
6848
9093bbb2
SH
6849 /* Free network device */
6850 kobject_put(&dev->dev.kobj);
1da177e4 6851 }
1da177e4
LT
6852}
6853
3cfde79c
BH
6854/* Convert net_device_stats to rtnl_link_stats64. They have the same
6855 * fields in the same order, with only the type differing.
6856 */
77a1abf5
ED
6857void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
6858 const struct net_device_stats *netdev_stats)
3cfde79c
BH
6859{
6860#if BITS_PER_LONG == 64
77a1abf5
ED
6861 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
6862 memcpy(stats64, netdev_stats, sizeof(*stats64));
3cfde79c
BH
6863#else
6864 size_t i, n = sizeof(*stats64) / sizeof(u64);
6865 const unsigned long *src = (const unsigned long *)netdev_stats;
6866 u64 *dst = (u64 *)stats64;
6867
6868 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
6869 sizeof(*stats64) / sizeof(u64));
6870 for (i = 0; i < n; i++)
6871 dst[i] = src[i];
6872#endif
6873}
77a1abf5 6874EXPORT_SYMBOL(netdev_stats_to_stats64);
3cfde79c 6875
eeda3fd6
SH
6876/**
6877 * dev_get_stats - get network device statistics
6878 * @dev: device to get statistics from
28172739 6879 * @storage: place to store stats
eeda3fd6 6880 *
d7753516
BH
6881 * Get network statistics from device. Return @storage.
6882 * The device driver may provide its own method by setting
6883 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
6884 * otherwise the internal statistics structure is used.
eeda3fd6 6885 */
d7753516
BH
6886struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
6887 struct rtnl_link_stats64 *storage)
7004bf25 6888{
eeda3fd6
SH
6889 const struct net_device_ops *ops = dev->netdev_ops;
6890
28172739
ED
6891 if (ops->ndo_get_stats64) {
6892 memset(storage, 0, sizeof(*storage));
caf586e5
ED
6893 ops->ndo_get_stats64(dev, storage);
6894 } else if (ops->ndo_get_stats) {
3cfde79c 6895 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
caf586e5
ED
6896 } else {
6897 netdev_stats_to_stats64(storage, &dev->stats);
28172739 6898 }
caf586e5 6899 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
015f0688 6900 storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
28172739 6901 return storage;
c45d286e 6902}
eeda3fd6 6903EXPORT_SYMBOL(dev_get_stats);
c45d286e 6904
24824a09 6905struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
dc2b4847 6906{
24824a09 6907 struct netdev_queue *queue = dev_ingress_queue(dev);
dc2b4847 6908
24824a09
ED
6909#ifdef CONFIG_NET_CLS_ACT
6910 if (queue)
6911 return queue;
6912 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
6913 if (!queue)
6914 return NULL;
6915 netdev_init_one_queue(dev, queue, NULL);
2ce1ee17 6916 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
24824a09
ED
6917 queue->qdisc_sleeping = &noop_qdisc;
6918 rcu_assign_pointer(dev->ingress_queue, queue);
6919#endif
6920 return queue;
bb949fbd
DM
6921}
6922
2c60db03
ED
6923static const struct ethtool_ops default_ethtool_ops;
6924
d07d7507
SG
6925void netdev_set_default_ethtool_ops(struct net_device *dev,
6926 const struct ethtool_ops *ops)
6927{
6928 if (dev->ethtool_ops == &default_ethtool_ops)
6929 dev->ethtool_ops = ops;
6930}
6931EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
6932
74d332c1
ED
6933void netdev_freemem(struct net_device *dev)
6934{
6935 char *addr = (char *)dev - dev->padded;
6936
4cb28970 6937 kvfree(addr);
74d332c1
ED
6938}
6939
1da177e4 6940/**
36909ea4 6941 * alloc_netdev_mqs - allocate network device
c835a677
TG
6942 * @sizeof_priv: size of private data to allocate space for
6943 * @name: device name format string
6944 * @name_assign_type: origin of device name
6945 * @setup: callback to initialize device
6946 * @txqs: the number of TX subqueues to allocate
6947 * @rxqs: the number of RX subqueues to allocate
1da177e4
LT
6948 *
6949 * Allocates a struct net_device with private data area for driver use
90e51adf 6950 * and performs basic initialization. Also allocates subqueue structs
36909ea4 6951 * for each queue on the device.
1da177e4 6952 */
36909ea4 6953struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
c835a677 6954 unsigned char name_assign_type,
36909ea4
TH
6955 void (*setup)(struct net_device *),
6956 unsigned int txqs, unsigned int rxqs)
1da177e4 6957{
1da177e4 6958 struct net_device *dev;
7943986c 6959 size_t alloc_size;
1ce8e7b5 6960 struct net_device *p;
1da177e4 6961
b6fe17d6
SH
6962 BUG_ON(strlen(name) >= sizeof(dev->name));
6963
36909ea4 6964 if (txqs < 1) {
7b6cd1ce 6965 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
55513fb4
TH
6966 return NULL;
6967 }
6968
a953be53 6969#ifdef CONFIG_SYSFS
36909ea4 6970 if (rxqs < 1) {
7b6cd1ce 6971 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
36909ea4
TH
6972 return NULL;
6973 }
6974#endif
6975
fd2ea0a7 6976 alloc_size = sizeof(struct net_device);
d1643d24
AD
6977 if (sizeof_priv) {
6978 /* ensure 32-byte alignment of private area */
1ce8e7b5 6979 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
d1643d24
AD
6980 alloc_size += sizeof_priv;
6981 }
6982 /* ensure 32-byte alignment of whole construct */
1ce8e7b5 6983 alloc_size += NETDEV_ALIGN - 1;
1da177e4 6984
74d332c1
ED
6985 p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6986 if (!p)
6987 p = vzalloc(alloc_size);
62b5942a 6988 if (!p)
1da177e4 6989 return NULL;
1da177e4 6990
1ce8e7b5 6991 dev = PTR_ALIGN(p, NETDEV_ALIGN);
1da177e4 6992 dev->padded = (char *)dev - (char *)p;
ab9c73cc 6993
29b4433d
ED
6994 dev->pcpu_refcnt = alloc_percpu(int);
6995 if (!dev->pcpu_refcnt)
74d332c1 6996 goto free_dev;
ab9c73cc 6997
ab9c73cc 6998 if (dev_addr_init(dev))
29b4433d 6999 goto free_pcpu;
ab9c73cc 7000
22bedad3 7001 dev_mc_init(dev);
a748ee24 7002 dev_uc_init(dev);
ccffad25 7003
c346dca1 7004 dev_net_set(dev, &init_net);
1da177e4 7005
8d3bdbd5 7006 dev->gso_max_size = GSO_MAX_SIZE;
30b678d8 7007 dev->gso_max_segs = GSO_MAX_SEGS;
fcbeb976 7008 dev->gso_min_segs = 0;
8d3bdbd5 7009
8d3bdbd5
DM
7010 INIT_LIST_HEAD(&dev->napi_list);
7011 INIT_LIST_HEAD(&dev->unreg_list);
5cde2829 7012 INIT_LIST_HEAD(&dev->close_list);
8d3bdbd5 7013 INIT_LIST_HEAD(&dev->link_watch_list);
2f268f12
VF
7014 INIT_LIST_HEAD(&dev->adj_list.upper);
7015 INIT_LIST_HEAD(&dev->adj_list.lower);
7016 INIT_LIST_HEAD(&dev->all_adj_list.upper);
7017 INIT_LIST_HEAD(&dev->all_adj_list.lower);
7866a621
SN
7018 INIT_LIST_HEAD(&dev->ptype_all);
7019 INIT_LIST_HEAD(&dev->ptype_specific);
02875878 7020 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
8d3bdbd5
DM
7021 setup(dev);
7022
906470c1 7023 if (!dev->tx_queue_len)
f84bb1ea 7024 dev->priv_flags |= IFF_NO_QUEUE;
906470c1 7025
36909ea4
TH
7026 dev->num_tx_queues = txqs;
7027 dev->real_num_tx_queues = txqs;
ed9af2e8 7028 if (netif_alloc_netdev_queues(dev))
8d3bdbd5 7029 goto free_all;
e8a0464c 7030
a953be53 7031#ifdef CONFIG_SYSFS
36909ea4
TH
7032 dev->num_rx_queues = rxqs;
7033 dev->real_num_rx_queues = rxqs;
fe822240 7034 if (netif_alloc_rx_queues(dev))
8d3bdbd5 7035 goto free_all;
df334545 7036#endif
0a9627f2 7037
1da177e4 7038 strcpy(dev->name, name);
c835a677 7039 dev->name_assign_type = name_assign_type;
cbda10fa 7040 dev->group = INIT_NETDEV_GROUP;
2c60db03
ED
7041 if (!dev->ethtool_ops)
7042 dev->ethtool_ops = &default_ethtool_ops;
e687ad60
PN
7043
7044 nf_hook_ingress_init(dev);
7045
1da177e4 7046 return dev;
ab9c73cc 7047
8d3bdbd5
DM
7048free_all:
7049 free_netdev(dev);
7050 return NULL;
7051
29b4433d
ED
7052free_pcpu:
7053 free_percpu(dev->pcpu_refcnt);
74d332c1
ED
7054free_dev:
7055 netdev_freemem(dev);
ab9c73cc 7056 return NULL;
1da177e4 7057}
36909ea4 7058EXPORT_SYMBOL(alloc_netdev_mqs);
1da177e4
LT
7059
7060/**
7061 * free_netdev - free network device
7062 * @dev: device
7063 *
4ec93edb
YH
7064 * This function does the last stage of destroying an allocated device
7065 * interface. The reference to the device object is released.
1da177e4
LT
7066 * If this is the last reference then it will be freed.
7067 */
7068void free_netdev(struct net_device *dev)
7069{
d565b0a1
HX
7070 struct napi_struct *p, *n;
7071
60877a32 7072 netif_free_tx_queues(dev);
a953be53 7073#ifdef CONFIG_SYSFS
10595902 7074 kvfree(dev->_rx);
fe822240 7075#endif
e8a0464c 7076
33d480ce 7077 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
24824a09 7078
f001fde5
JP
7079 /* Flush device addresses */
7080 dev_addr_flush(dev);
7081
d565b0a1
HX
7082 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
7083 netif_napi_del(p);
7084
29b4433d
ED
7085 free_percpu(dev->pcpu_refcnt);
7086 dev->pcpu_refcnt = NULL;
7087
3041a069 7088 /* Compatibility with error handling in drivers */
1da177e4 7089 if (dev->reg_state == NETREG_UNINITIALIZED) {
74d332c1 7090 netdev_freemem(dev);
1da177e4
LT
7091 return;
7092 }
7093
7094 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
7095 dev->reg_state = NETREG_RELEASED;
7096
43cb76d9
GKH
7097 /* will free via device release */
7098 put_device(&dev->dev);
1da177e4 7099}
d1b19dff 7100EXPORT_SYMBOL(free_netdev);
4ec93edb 7101
f0db275a
SH
7102/**
7103 * synchronize_net - Synchronize with packet receive processing
7104 *
7105 * Wait for packets currently being received to be done.
7106 * Does not block later packets from starting.
7107 */
4ec93edb 7108void synchronize_net(void)
1da177e4
LT
7109{
7110 might_sleep();
be3fc413
ED
7111 if (rtnl_is_locked())
7112 synchronize_rcu_expedited();
7113 else
7114 synchronize_rcu();
1da177e4 7115}
d1b19dff 7116EXPORT_SYMBOL(synchronize_net);
1da177e4
LT
7117
7118/**
44a0873d 7119 * unregister_netdevice_queue - remove device from the kernel
1da177e4 7120 * @dev: device
44a0873d 7121 * @head: list
6ebfbc06 7122 *
1da177e4 7123 * This function shuts down a device interface and removes it
d59b54b1 7124 * from the kernel tables.
44a0873d 7125 * If head not NULL, device is queued to be unregistered later.
1da177e4
LT
7126 *
7127 * Callers must hold the rtnl semaphore. You may want
7128 * unregister_netdev() instead of this.
7129 */
7130
44a0873d 7131void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
1da177e4 7132{
a6620712
HX
7133 ASSERT_RTNL();
7134
44a0873d 7135 if (head) {
9fdce099 7136 list_move_tail(&dev->unreg_list, head);
44a0873d
ED
7137 } else {
7138 rollback_registered(dev);
7139 /* Finish processing unregister after unlock */
7140 net_set_todo(dev);
7141 }
1da177e4 7142}
44a0873d 7143EXPORT_SYMBOL(unregister_netdevice_queue);
1da177e4 7144
9b5e383c
ED
7145/**
7146 * unregister_netdevice_many - unregister many devices
7147 * @head: list of devices
87757a91
ED
7148 *
7149 * Note: As most callers use a stack allocated list_head,
7150 * we force a list_del() to make sure stack wont be corrupted later.
9b5e383c
ED
7151 */
7152void unregister_netdevice_many(struct list_head *head)
7153{
7154 struct net_device *dev;
7155
7156 if (!list_empty(head)) {
7157 rollback_registered_many(head);
7158 list_for_each_entry(dev, head, unreg_list)
7159 net_set_todo(dev);
87757a91 7160 list_del(head);
9b5e383c
ED
7161 }
7162}
63c8099d 7163EXPORT_SYMBOL(unregister_netdevice_many);
9b5e383c 7164
1da177e4
LT
7165/**
7166 * unregister_netdev - remove device from the kernel
7167 * @dev: device
7168 *
7169 * This function shuts down a device interface and removes it
d59b54b1 7170 * from the kernel tables.
1da177e4
LT
7171 *
7172 * This is just a wrapper for unregister_netdevice that takes
7173 * the rtnl semaphore. In general you want to use this and not
7174 * unregister_netdevice.
7175 */
7176void unregister_netdev(struct net_device *dev)
7177{
7178 rtnl_lock();
7179 unregister_netdevice(dev);
7180 rtnl_unlock();
7181}
1da177e4
LT
7182EXPORT_SYMBOL(unregister_netdev);
7183
ce286d32
EB
7184/**
7185 * dev_change_net_namespace - move device to different nethost namespace
7186 * @dev: device
7187 * @net: network namespace
7188 * @pat: If not NULL name pattern to try if the current device name
7189 * is already taken in the destination network namespace.
7190 *
7191 * This function shuts down a device interface and moves it
7192 * to a new network namespace. On success 0 is returned, on
7193 * a failure a netagive errno code is returned.
7194 *
7195 * Callers must hold the rtnl semaphore.
7196 */
7197
7198int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
7199{
ce286d32
EB
7200 int err;
7201
7202 ASSERT_RTNL();
7203
7204 /* Don't allow namespace local devices to be moved. */
7205 err = -EINVAL;
7206 if (dev->features & NETIF_F_NETNS_LOCAL)
7207 goto out;
7208
7209 /* Ensure the device has been registrered */
ce286d32
EB
7210 if (dev->reg_state != NETREG_REGISTERED)
7211 goto out;
7212
7213 /* Get out if there is nothing todo */
7214 err = 0;
878628fb 7215 if (net_eq(dev_net(dev), net))
ce286d32
EB
7216 goto out;
7217
7218 /* Pick the destination device name, and ensure
7219 * we can use it in the destination network namespace.
7220 */
7221 err = -EEXIST;
d9031024 7222 if (__dev_get_by_name(net, dev->name)) {
ce286d32
EB
7223 /* We get here if we can't use the current device name */
7224 if (!pat)
7225 goto out;
828de4f6 7226 if (dev_get_valid_name(net, dev, pat) < 0)
ce286d32
EB
7227 goto out;
7228 }
7229
7230 /*
7231 * And now a mini version of register_netdevice unregister_netdevice.
7232 */
7233
7234 /* If device is running close it first. */
9b772652 7235 dev_close(dev);
ce286d32
EB
7236
7237 /* And unlink it from device chain */
7238 err = -ENODEV;
7239 unlist_netdevice(dev);
7240
7241 synchronize_net();
7242
7243 /* Shutdown queueing discipline. */
7244 dev_shutdown(dev);
7245
7246 /* Notify protocols, that we are about to destroy
7247 this device. They should clean all the things.
3b27e105
DL
7248
7249 Note that dev->reg_state stays at NETREG_REGISTERED.
7250 This is wanted because this way 8021q and macvlan know
7251 the device is just moving and can keep their slaves up.
ce286d32
EB
7252 */
7253 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6549dd43
G
7254 rcu_barrier();
7255 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
7f294054 7256 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
ce286d32
EB
7257
7258 /*
7259 * Flush the unicast and multicast chains
7260 */
a748ee24 7261 dev_uc_flush(dev);
22bedad3 7262 dev_mc_flush(dev);
ce286d32 7263
4e66ae2e
SH
7264 /* Send a netdev-removed uevent to the old namespace */
7265 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
4c75431a 7266 netdev_adjacent_del_links(dev);
4e66ae2e 7267
ce286d32 7268 /* Actually switch the network namespace */
c346dca1 7269 dev_net_set(dev, net);
ce286d32 7270
ce286d32 7271 /* If there is an ifindex conflict assign a new one */
7a66bbc9 7272 if (__dev_get_by_index(net, dev->ifindex))
ce286d32 7273 dev->ifindex = dev_new_index(net);
ce286d32 7274
4e66ae2e
SH
7275 /* Send a netdev-add uevent to the new namespace */
7276 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
4c75431a 7277 netdev_adjacent_add_links(dev);
4e66ae2e 7278
8b41d188 7279 /* Fixup kobjects */
a1b3f594 7280 err = device_rename(&dev->dev, dev->name);
8b41d188 7281 WARN_ON(err);
ce286d32
EB
7282
7283 /* Add the device back in the hashes */
7284 list_netdevice(dev);
7285
7286 /* Notify protocols, that a new device appeared. */
7287 call_netdevice_notifiers(NETDEV_REGISTER, dev);
7288
d90a909e
EB
7289 /*
7290 * Prevent userspace races by waiting until the network
7291 * device is fully setup before sending notifications.
7292 */
7f294054 7293 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
d90a909e 7294
ce286d32
EB
7295 synchronize_net();
7296 err = 0;
7297out:
7298 return err;
7299}
463d0183 7300EXPORT_SYMBOL_GPL(dev_change_net_namespace);
ce286d32 7301
1da177e4
LT
7302static int dev_cpu_callback(struct notifier_block *nfb,
7303 unsigned long action,
7304 void *ocpu)
7305{
7306 struct sk_buff **list_skb;
1da177e4
LT
7307 struct sk_buff *skb;
7308 unsigned int cpu, oldcpu = (unsigned long)ocpu;
7309 struct softnet_data *sd, *oldsd;
7310
8bb78442 7311 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
1da177e4
LT
7312 return NOTIFY_OK;
7313
7314 local_irq_disable();
7315 cpu = smp_processor_id();
7316 sd = &per_cpu(softnet_data, cpu);
7317 oldsd = &per_cpu(softnet_data, oldcpu);
7318
7319 /* Find end of our completion_queue. */
7320 list_skb = &sd->completion_queue;
7321 while (*list_skb)
7322 list_skb = &(*list_skb)->next;
7323 /* Append completion queue from offline CPU. */
7324 *list_skb = oldsd->completion_queue;
7325 oldsd->completion_queue = NULL;
7326
1da177e4 7327 /* Append output queue from offline CPU. */
a9cbd588
CG
7328 if (oldsd->output_queue) {
7329 *sd->output_queue_tailp = oldsd->output_queue;
7330 sd->output_queue_tailp = oldsd->output_queue_tailp;
7331 oldsd->output_queue = NULL;
7332 oldsd->output_queue_tailp = &oldsd->output_queue;
7333 }
ac64da0b
ED
7334 /* Append NAPI poll list from offline CPU, with one exception :
7335 * process_backlog() must be called by cpu owning percpu backlog.
7336 * We properly handle process_queue & input_pkt_queue later.
7337 */
7338 while (!list_empty(&oldsd->poll_list)) {
7339 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
7340 struct napi_struct,
7341 poll_list);
7342
7343 list_del_init(&napi->poll_list);
7344 if (napi->poll == process_backlog)
7345 napi->state = 0;
7346 else
7347 ____napi_schedule(sd, napi);
264524d5 7348 }
1da177e4
LT
7349
7350 raise_softirq_irqoff(NET_TX_SOFTIRQ);
7351 local_irq_enable();
7352
7353 /* Process offline CPU's input_pkt_queue */
76cc8b13 7354 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
91e83133 7355 netif_rx_ni(skb);
76cc8b13 7356 input_queue_head_incr(oldsd);
fec5e652 7357 }
ac64da0b 7358 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
91e83133 7359 netif_rx_ni(skb);
76cc8b13
TH
7360 input_queue_head_incr(oldsd);
7361 }
1da177e4
LT
7362
7363 return NOTIFY_OK;
7364}
1da177e4
LT
7365
7366
7f353bf2 7367/**
b63365a2
HX
7368 * netdev_increment_features - increment feature set by one
7369 * @all: current feature set
7370 * @one: new feature set
7371 * @mask: mask feature set
7f353bf2
HX
7372 *
7373 * Computes a new feature set after adding a device with feature set
b63365a2
HX
7374 * @one to the master device with current feature set @all. Will not
7375 * enable anything that is off in @mask. Returns the new feature set.
7f353bf2 7376 */
c8f44aff
MM
7377netdev_features_t netdev_increment_features(netdev_features_t all,
7378 netdev_features_t one, netdev_features_t mask)
b63365a2 7379{
1742f183
MM
7380 if (mask & NETIF_F_GEN_CSUM)
7381 mask |= NETIF_F_ALL_CSUM;
7382 mask |= NETIF_F_VLAN_CHALLENGED;
7f353bf2 7383
1742f183
MM
7384 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
7385 all &= one | ~NETIF_F_ALL_FOR_ALL;
c6e1a0d1 7386
1742f183
MM
7387 /* If one device supports hw checksumming, set for all. */
7388 if (all & NETIF_F_GEN_CSUM)
7389 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
7f353bf2
HX
7390
7391 return all;
7392}
b63365a2 7393EXPORT_SYMBOL(netdev_increment_features);
7f353bf2 7394
430f03cd 7395static struct hlist_head * __net_init netdev_create_hash(void)
30d97d35
PE
7396{
7397 int i;
7398 struct hlist_head *hash;
7399
7400 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
7401 if (hash != NULL)
7402 for (i = 0; i < NETDEV_HASHENTRIES; i++)
7403 INIT_HLIST_HEAD(&hash[i]);
7404
7405 return hash;
7406}
7407
881d966b 7408/* Initialize per network namespace state */
4665079c 7409static int __net_init netdev_init(struct net *net)
881d966b 7410{
734b6541
RM
7411 if (net != &init_net)
7412 INIT_LIST_HEAD(&net->dev_base_head);
881d966b 7413
30d97d35
PE
7414 net->dev_name_head = netdev_create_hash();
7415 if (net->dev_name_head == NULL)
7416 goto err_name;
881d966b 7417
30d97d35
PE
7418 net->dev_index_head = netdev_create_hash();
7419 if (net->dev_index_head == NULL)
7420 goto err_idx;
881d966b
EB
7421
7422 return 0;
30d97d35
PE
7423
7424err_idx:
7425 kfree(net->dev_name_head);
7426err_name:
7427 return -ENOMEM;
881d966b
EB
7428}
7429
f0db275a
SH
7430/**
7431 * netdev_drivername - network driver for the device
7432 * @dev: network device
f0db275a
SH
7433 *
7434 * Determine network driver for device.
7435 */
3019de12 7436const char *netdev_drivername(const struct net_device *dev)
6579e57b 7437{
cf04a4c7
SH
7438 const struct device_driver *driver;
7439 const struct device *parent;
3019de12 7440 const char *empty = "";
6579e57b
AV
7441
7442 parent = dev->dev.parent;
6579e57b 7443 if (!parent)
3019de12 7444 return empty;
6579e57b
AV
7445
7446 driver = parent->driver;
7447 if (driver && driver->name)
3019de12
DM
7448 return driver->name;
7449 return empty;
6579e57b
AV
7450}
7451
6ea754eb
JP
7452static void __netdev_printk(const char *level, const struct net_device *dev,
7453 struct va_format *vaf)
256df2f3 7454{
b004ff49 7455 if (dev && dev->dev.parent) {
6ea754eb
JP
7456 dev_printk_emit(level[1] - '0',
7457 dev->dev.parent,
7458 "%s %s %s%s: %pV",
7459 dev_driver_string(dev->dev.parent),
7460 dev_name(dev->dev.parent),
7461 netdev_name(dev), netdev_reg_state(dev),
7462 vaf);
b004ff49 7463 } else if (dev) {
6ea754eb
JP
7464 printk("%s%s%s: %pV",
7465 level, netdev_name(dev), netdev_reg_state(dev), vaf);
b004ff49 7466 } else {
6ea754eb 7467 printk("%s(NULL net_device): %pV", level, vaf);
b004ff49 7468 }
256df2f3
JP
7469}
7470
6ea754eb
JP
7471void netdev_printk(const char *level, const struct net_device *dev,
7472 const char *format, ...)
256df2f3
JP
7473{
7474 struct va_format vaf;
7475 va_list args;
256df2f3
JP
7476
7477 va_start(args, format);
7478
7479 vaf.fmt = format;
7480 vaf.va = &args;
7481
6ea754eb 7482 __netdev_printk(level, dev, &vaf);
b004ff49 7483
256df2f3 7484 va_end(args);
256df2f3
JP
7485}
7486EXPORT_SYMBOL(netdev_printk);
7487
7488#define define_netdev_printk_level(func, level) \
6ea754eb 7489void func(const struct net_device *dev, const char *fmt, ...) \
256df2f3 7490{ \
256df2f3
JP
7491 struct va_format vaf; \
7492 va_list args; \
7493 \
7494 va_start(args, fmt); \
7495 \
7496 vaf.fmt = fmt; \
7497 vaf.va = &args; \
7498 \
6ea754eb 7499 __netdev_printk(level, dev, &vaf); \
b004ff49 7500 \
256df2f3 7501 va_end(args); \
256df2f3
JP
7502} \
7503EXPORT_SYMBOL(func);
7504
7505define_netdev_printk_level(netdev_emerg, KERN_EMERG);
7506define_netdev_printk_level(netdev_alert, KERN_ALERT);
7507define_netdev_printk_level(netdev_crit, KERN_CRIT);
7508define_netdev_printk_level(netdev_err, KERN_ERR);
7509define_netdev_printk_level(netdev_warn, KERN_WARNING);
7510define_netdev_printk_level(netdev_notice, KERN_NOTICE);
7511define_netdev_printk_level(netdev_info, KERN_INFO);
7512
4665079c 7513static void __net_exit netdev_exit(struct net *net)
881d966b
EB
7514{
7515 kfree(net->dev_name_head);
7516 kfree(net->dev_index_head);
7517}
7518
022cbae6 7519static struct pernet_operations __net_initdata netdev_net_ops = {
881d966b
EB
7520 .init = netdev_init,
7521 .exit = netdev_exit,
7522};
7523
4665079c 7524static void __net_exit default_device_exit(struct net *net)
ce286d32 7525{
e008b5fc 7526 struct net_device *dev, *aux;
ce286d32 7527 /*
e008b5fc 7528 * Push all migratable network devices back to the
ce286d32
EB
7529 * initial network namespace
7530 */
7531 rtnl_lock();
e008b5fc 7532 for_each_netdev_safe(net, dev, aux) {
ce286d32 7533 int err;
aca51397 7534 char fb_name[IFNAMSIZ];
ce286d32
EB
7535
7536 /* Ignore unmoveable devices (i.e. loopback) */
7537 if (dev->features & NETIF_F_NETNS_LOCAL)
7538 continue;
7539
e008b5fc
EB
7540 /* Leave virtual devices for the generic cleanup */
7541 if (dev->rtnl_link_ops)
7542 continue;
d0c082ce 7543
25985edc 7544 /* Push remaining network devices to init_net */
aca51397
PE
7545 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
7546 err = dev_change_net_namespace(dev, &init_net, fb_name);
ce286d32 7547 if (err) {
7b6cd1ce
JP
7548 pr_emerg("%s: failed to move %s to init_net: %d\n",
7549 __func__, dev->name, err);
aca51397 7550 BUG();
ce286d32
EB
7551 }
7552 }
7553 rtnl_unlock();
7554}
7555
50624c93
EB
7556static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
7557{
7558 /* Return with the rtnl_lock held when there are no network
7559 * devices unregistering in any network namespace in net_list.
7560 */
7561 struct net *net;
7562 bool unregistering;
ff960a73 7563 DEFINE_WAIT_FUNC(wait, woken_wake_function);
50624c93 7564
ff960a73 7565 add_wait_queue(&netdev_unregistering_wq, &wait);
50624c93 7566 for (;;) {
50624c93
EB
7567 unregistering = false;
7568 rtnl_lock();
7569 list_for_each_entry(net, net_list, exit_list) {
7570 if (net->dev_unreg_count > 0) {
7571 unregistering = true;
7572 break;
7573 }
7574 }
7575 if (!unregistering)
7576 break;
7577 __rtnl_unlock();
ff960a73
PZ
7578
7579 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
50624c93 7580 }
ff960a73 7581 remove_wait_queue(&netdev_unregistering_wq, &wait);
50624c93
EB
7582}
7583
04dc7f6b
EB
7584static void __net_exit default_device_exit_batch(struct list_head *net_list)
7585{
7586 /* At exit all network devices most be removed from a network
b595076a 7587 * namespace. Do this in the reverse order of registration.
04dc7f6b
EB
7588 * Do this across as many network namespaces as possible to
7589 * improve batching efficiency.
7590 */
7591 struct net_device *dev;
7592 struct net *net;
7593 LIST_HEAD(dev_kill_list);
7594
50624c93
EB
7595 /* To prevent network device cleanup code from dereferencing
7596 * loopback devices or network devices that have been freed
7597 * wait here for all pending unregistrations to complete,
7598 * before unregistring the loopback device and allowing the
7599 * network namespace be freed.
7600 *
7601 * The netdev todo list containing all network devices
7602 * unregistrations that happen in default_device_exit_batch
7603 * will run in the rtnl_unlock() at the end of
7604 * default_device_exit_batch.
7605 */
7606 rtnl_lock_unregistering(net_list);
04dc7f6b
EB
7607 list_for_each_entry(net, net_list, exit_list) {
7608 for_each_netdev_reverse(net, dev) {
b0ab2fab 7609 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
04dc7f6b
EB
7610 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
7611 else
7612 unregister_netdevice_queue(dev, &dev_kill_list);
7613 }
7614 }
7615 unregister_netdevice_many(&dev_kill_list);
7616 rtnl_unlock();
7617}
7618
022cbae6 7619static struct pernet_operations __net_initdata default_device_ops = {
ce286d32 7620 .exit = default_device_exit,
04dc7f6b 7621 .exit_batch = default_device_exit_batch,
ce286d32
EB
7622};
7623
1da177e4
LT
7624/*
7625 * Initialize the DEV module. At boot time this walks the device list and
7626 * unhooks any devices that fail to initialise (normally hardware not
7627 * present) and leaves us with a valid list of present and active devices.
7628 *
7629 */
7630
7631/*
7632 * This is called single threaded during boot, so no need
7633 * to take the rtnl semaphore.
7634 */
7635static int __init net_dev_init(void)
7636{
7637 int i, rc = -ENOMEM;
7638
7639 BUG_ON(!dev_boot_phase);
7640
1da177e4
LT
7641 if (dev_proc_init())
7642 goto out;
7643
8b41d188 7644 if (netdev_kobject_init())
1da177e4
LT
7645 goto out;
7646
7647 INIT_LIST_HEAD(&ptype_all);
82d8a867 7648 for (i = 0; i < PTYPE_HASH_SIZE; i++)
1da177e4
LT
7649 INIT_LIST_HEAD(&ptype_base[i]);
7650
62532da9
VY
7651 INIT_LIST_HEAD(&offload_base);
7652
881d966b
EB
7653 if (register_pernet_subsys(&netdev_net_ops))
7654 goto out;
1da177e4
LT
7655
7656 /*
7657 * Initialise the packet receive queues.
7658 */
7659
6f912042 7660 for_each_possible_cpu(i) {
e36fa2f7 7661 struct softnet_data *sd = &per_cpu(softnet_data, i);
1da177e4 7662
e36fa2f7 7663 skb_queue_head_init(&sd->input_pkt_queue);
6e7676c1 7664 skb_queue_head_init(&sd->process_queue);
e36fa2f7 7665 INIT_LIST_HEAD(&sd->poll_list);
a9cbd588 7666 sd->output_queue_tailp = &sd->output_queue;
df334545 7667#ifdef CONFIG_RPS
e36fa2f7
ED
7668 sd->csd.func = rps_trigger_softirq;
7669 sd->csd.info = sd;
e36fa2f7 7670 sd->cpu = i;
1e94d72f 7671#endif
0a9627f2 7672
e36fa2f7
ED
7673 sd->backlog.poll = process_backlog;
7674 sd->backlog.weight = weight_p;
1da177e4
LT
7675 }
7676
1da177e4
LT
7677 dev_boot_phase = 0;
7678
505d4f73
EB
7679 /* The loopback device is special if any other network devices
7680 * is present in a network namespace the loopback device must
7681 * be present. Since we now dynamically allocate and free the
7682 * loopback device ensure this invariant is maintained by
7683 * keeping the loopback device as the first device on the
7684 * list of network devices. Ensuring the loopback devices
7685 * is the first device that appears and the last network device
7686 * that disappears.
7687 */
7688 if (register_pernet_device(&loopback_net_ops))
7689 goto out;
7690
7691 if (register_pernet_device(&default_device_ops))
7692 goto out;
7693
962cf36c
CM
7694 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
7695 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
1da177e4
LT
7696
7697 hotcpu_notifier(dev_cpu_callback, 0);
f38a9eb1 7698 dst_subsys_init();
1da177e4
LT
7699 rc = 0;
7700out:
7701 return rc;
7702}
7703
7704subsys_initcall(net_dev_init);