Merge branch 'master' of git://gitorious.org/linux-can/linux-can-next
[linux-2.6-block.git] / net / core / dev.c
CommitLineData
1da177e4
LT
1/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
4fc268d2 78#include <linux/capability.h>
1da177e4
LT
79#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
08e9897d 82#include <linux/hash.h>
5a0e3ad6 83#include <linux/slab.h>
1da177e4 84#include <linux/sched.h>
4a3e2f71 85#include <linux/mutex.h>
1da177e4
LT
86#include <linux/string.h>
87#include <linux/mm.h>
88#include <linux/socket.h>
89#include <linux/sockios.h>
90#include <linux/errno.h>
91#include <linux/interrupt.h>
92#include <linux/if_ether.h>
93#include <linux/netdevice.h>
94#include <linux/etherdevice.h>
0187bdfb 95#include <linux/ethtool.h>
1da177e4
LT
96#include <linux/notifier.h>
97#include <linux/skbuff.h>
457c4cbc 98#include <net/net_namespace.h>
1da177e4
LT
99#include <net/sock.h>
100#include <linux/rtnetlink.h>
101#include <linux/proc_fs.h>
102#include <linux/seq_file.h>
103#include <linux/stat.h>
1da177e4
LT
104#include <net/dst.h>
105#include <net/pkt_sched.h>
106#include <net/checksum.h>
44540960 107#include <net/xfrm.h>
1da177e4
LT
108#include <linux/highmem.h>
109#include <linux/init.h>
110#include <linux/kmod.h>
111#include <linux/module.h>
1da177e4
LT
112#include <linux/netpoll.h>
113#include <linux/rcupdate.h>
114#include <linux/delay.h>
295f4a1f 115#include <net/wext.h>
1da177e4 116#include <net/iw_handler.h>
1da177e4 117#include <asm/current.h>
5bdb9886 118#include <linux/audit.h>
db217334 119#include <linux/dmaengine.h>
f6a78bfc 120#include <linux/err.h>
c7fa9d18 121#include <linux/ctype.h>
723e98b7 122#include <linux/if_arp.h>
6de329e2 123#include <linux/if_vlan.h>
8f0f2223 124#include <linux/ip.h>
ad55dcaf 125#include <net/ip.h>
8f0f2223
DM
126#include <linux/ipv6.h>
127#include <linux/in.h>
b6b2fed1
DM
128#include <linux/jhash.h>
129#include <linux/random.h>
9cbc1cb8 130#include <trace/events/napi.h>
cf66ba58 131#include <trace/events/net.h>
07dc22e7 132#include <trace/events/skb.h>
5acbbd42 133#include <linux/pci.h>
caeda9b9 134#include <linux/inetdevice.h>
c445477d 135#include <linux/cpu_rmap.h>
4dc360c5 136#include <linux/net_tstamp.h>
588f0330 137#include <linux/jump_label.h>
4504b861 138#include <net/flow_keys.h>
1da177e4 139
342709ef
PE
140#include "net-sysfs.h"
141
d565b0a1
HX
142/* Instead of increasing this, you should create a hash table. */
143#define MAX_GRO_SKBS 8
144
5d38a079
HX
145/* This should be increased if a protocol with a bigger head is added. */
146#define GRO_MAX_HEAD (MAX_HEADER + 128)
147
1da177e4
LT
148/*
149 * The list of packet types we will receive (as opposed to discard)
150 * and the routines to invoke.
151 *
152 * Why 16. Because with 16 the only overlap we get on a hash of the
153 * low nibble of the protocol value is RARP/SNAP/X.25.
154 *
155 * NOTE: That is no longer true with the addition of VLAN tags. Not
156 * sure which should go first, but I bet it won't make much
157 * difference if we are running VLANs. The good news is that
158 * this protocol won't be in the list unless compiled in, so
3041a069 159 * the average user (w/out VLANs) will not be adversely affected.
1da177e4
LT
160 * --BLG
161 *
162 * 0800 IP
163 * 8100 802.1Q VLAN
164 * 0001 802.3
165 * 0002 AX.25
166 * 0004 802.2
167 * 8035 RARP
168 * 0005 SNAP
169 * 0805 X.25
170 * 0806 ARP
171 * 8137 IPX
172 * 0009 Localtalk
173 * 86DD IPv6
174 */
175
82d8a867
PE
176#define PTYPE_HASH_SIZE (16)
177#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
178
1da177e4 179static DEFINE_SPINLOCK(ptype_lock);
82d8a867 180static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
6b2bedc3 181static struct list_head ptype_all __read_mostly; /* Taps */
1da177e4 182
1da177e4 183/*
7562f876 184 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
1da177e4
LT
185 * semaphore.
186 *
c6d14c84 187 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
1da177e4
LT
188 *
189 * Writers must hold the rtnl semaphore while they loop through the
7562f876 190 * dev_base_head list, and hold dev_base_lock for writing when they do the
1da177e4
LT
191 * actual updates. This allows pure readers to access the list even
192 * while a writer is preparing to update it.
193 *
194 * To put it another way, dev_base_lock is held for writing only to
195 * protect against pure readers; the rtnl semaphore provides the
196 * protection against other writers.
197 *
198 * See, for example usages, register_netdevice() and
199 * unregister_netdevice(), which must be called with the rtnl
200 * semaphore held.
201 */
1da177e4 202DEFINE_RWLOCK(dev_base_lock);
1da177e4
LT
203EXPORT_SYMBOL(dev_base_lock);
204
4e985ada
TG
205static inline void dev_base_seq_inc(struct net *net)
206{
207 while (++net->dev_base_seq == 0);
208}
209
881d966b 210static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
1da177e4
LT
211{
212 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
08e9897d 213 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
1da177e4
LT
214}
215
881d966b 216static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
1da177e4 217{
7c28bd0b 218 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
1da177e4
LT
219}
220
e36fa2f7 221static inline void rps_lock(struct softnet_data *sd)
152102c7
CG
222{
223#ifdef CONFIG_RPS
e36fa2f7 224 spin_lock(&sd->input_pkt_queue.lock);
152102c7
CG
225#endif
226}
227
e36fa2f7 228static inline void rps_unlock(struct softnet_data *sd)
152102c7
CG
229{
230#ifdef CONFIG_RPS
e36fa2f7 231 spin_unlock(&sd->input_pkt_queue.lock);
152102c7
CG
232#endif
233}
234
ce286d32
EB
235/* Device list insertion */
236static int list_netdevice(struct net_device *dev)
237{
c346dca1 238 struct net *net = dev_net(dev);
ce286d32
EB
239
240 ASSERT_RTNL();
241
242 write_lock_bh(&dev_base_lock);
c6d14c84 243 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
72c9528b 244 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
fb699dfd
ED
245 hlist_add_head_rcu(&dev->index_hlist,
246 dev_index_hash(net, dev->ifindex));
ce286d32 247 write_unlock_bh(&dev_base_lock);
4e985ada
TG
248
249 dev_base_seq_inc(net);
250
ce286d32
EB
251 return 0;
252}
253
fb699dfd
ED
254/* Device list removal
255 * caller must respect a RCU grace period before freeing/reusing dev
256 */
ce286d32
EB
257static void unlist_netdevice(struct net_device *dev)
258{
259 ASSERT_RTNL();
260
261 /* Unlink dev from the device chain */
262 write_lock_bh(&dev_base_lock);
c6d14c84 263 list_del_rcu(&dev->dev_list);
72c9528b 264 hlist_del_rcu(&dev->name_hlist);
fb699dfd 265 hlist_del_rcu(&dev->index_hlist);
ce286d32 266 write_unlock_bh(&dev_base_lock);
4e985ada
TG
267
268 dev_base_seq_inc(dev_net(dev));
ce286d32
EB
269}
270
1da177e4
LT
271/*
272 * Our notifier list
273 */
274
f07d5b94 275static RAW_NOTIFIER_HEAD(netdev_chain);
1da177e4
LT
276
277/*
278 * Device drivers call our routines to queue packets here. We empty the
279 * queue in the local softnet handler.
280 */
bea3348e 281
9958da05 282DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
d1b19dff 283EXPORT_PER_CPU_SYMBOL(softnet_data);
1da177e4 284
cf508b12 285#ifdef CONFIG_LOCKDEP
723e98b7 286/*
c773e847 287 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
723e98b7
JP
288 * according to dev->type
289 */
290static const unsigned short netdev_lock_type[] =
291 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
292 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
293 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
294 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
295 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
296 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
297 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
298 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
299 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
300 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
301 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
302 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
303 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
2d91d78b 304 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
929122cd 305 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
fcb94e42 306 ARPHRD_VOID, ARPHRD_NONE};
723e98b7 307
36cbd3dc 308static const char *const netdev_lock_name[] =
723e98b7
JP
309 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
310 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
311 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
312 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
313 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
314 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
315 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
316 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
317 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
318 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
319 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
320 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
321 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
2d91d78b 322 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
929122cd 323 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
fcb94e42 324 "_xmit_VOID", "_xmit_NONE"};
723e98b7
JP
325
326static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
cf508b12 327static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
723e98b7
JP
328
329static inline unsigned short netdev_lock_pos(unsigned short dev_type)
330{
331 int i;
332
333 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
334 if (netdev_lock_type[i] == dev_type)
335 return i;
336 /* the last key is used by default */
337 return ARRAY_SIZE(netdev_lock_type) - 1;
338}
339
cf508b12
DM
340static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
341 unsigned short dev_type)
723e98b7
JP
342{
343 int i;
344
345 i = netdev_lock_pos(dev_type);
346 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
347 netdev_lock_name[i]);
348}
cf508b12
DM
349
350static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
351{
352 int i;
353
354 i = netdev_lock_pos(dev->type);
355 lockdep_set_class_and_name(&dev->addr_list_lock,
356 &netdev_addr_lock_key[i],
357 netdev_lock_name[i]);
358}
723e98b7 359#else
cf508b12
DM
360static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
361 unsigned short dev_type)
362{
363}
364static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
723e98b7
JP
365{
366}
367#endif
1da177e4
LT
368
369/*******************************************************************************
370
371 Protocol management and registration routines
372
373*******************************************************************************/
374
1da177e4
LT
375/*
376 * Add a protocol ID to the list. Now that the input handler is
377 * smarter we can dispense with all the messy stuff that used to be
378 * here.
379 *
380 * BEWARE!!! Protocol handlers, mangling input packets,
381 * MUST BE last in hash buckets and checking protocol handlers
382 * MUST start from promiscuous ptype_all chain in net_bh.
383 * It is true now, do not change it.
384 * Explanation follows: if protocol handler, mangling packet, will
385 * be the first on list, it is not able to sense, that packet
386 * is cloned and should be copied-on-write, so that it will
387 * change it and subsequent readers will get broken packet.
388 * --ANK (980803)
389 */
390
c07b68e8
ED
391static inline struct list_head *ptype_head(const struct packet_type *pt)
392{
393 if (pt->type == htons(ETH_P_ALL))
394 return &ptype_all;
395 else
396 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
397}
398
1da177e4
LT
399/**
400 * dev_add_pack - add packet handler
401 * @pt: packet type declaration
402 *
403 * Add a protocol handler to the networking stack. The passed &packet_type
404 * is linked into kernel lists and may not be freed until it has been
405 * removed from the kernel lists.
406 *
4ec93edb 407 * This call does not sleep therefore it can not
1da177e4
LT
408 * guarantee all CPU's that are in middle of receiving packets
409 * will see the new packet type (until the next received packet).
410 */
411
412void dev_add_pack(struct packet_type *pt)
413{
c07b68e8 414 struct list_head *head = ptype_head(pt);
1da177e4 415
c07b68e8
ED
416 spin_lock(&ptype_lock);
417 list_add_rcu(&pt->list, head);
418 spin_unlock(&ptype_lock);
1da177e4 419}
d1b19dff 420EXPORT_SYMBOL(dev_add_pack);
1da177e4 421
1da177e4
LT
422/**
423 * __dev_remove_pack - remove packet handler
424 * @pt: packet type declaration
425 *
426 * Remove a protocol handler that was previously added to the kernel
427 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
428 * from the kernel lists and can be freed or reused once this function
4ec93edb 429 * returns.
1da177e4
LT
430 *
431 * The packet type might still be in use by receivers
432 * and must not be freed until after all the CPU's have gone
433 * through a quiescent state.
434 */
435void __dev_remove_pack(struct packet_type *pt)
436{
c07b68e8 437 struct list_head *head = ptype_head(pt);
1da177e4
LT
438 struct packet_type *pt1;
439
c07b68e8 440 spin_lock(&ptype_lock);
1da177e4
LT
441
442 list_for_each_entry(pt1, head, list) {
443 if (pt == pt1) {
444 list_del_rcu(&pt->list);
445 goto out;
446 }
447 }
448
7b6cd1ce 449 pr_warn("dev_remove_pack: %p not found\n", pt);
1da177e4 450out:
c07b68e8 451 spin_unlock(&ptype_lock);
1da177e4 452}
d1b19dff
ED
453EXPORT_SYMBOL(__dev_remove_pack);
454
1da177e4
LT
455/**
456 * dev_remove_pack - remove packet handler
457 * @pt: packet type declaration
458 *
459 * Remove a protocol handler that was previously added to the kernel
460 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
461 * from the kernel lists and can be freed or reused once this function
462 * returns.
463 *
464 * This call sleeps to guarantee that no CPU is looking at the packet
465 * type after return.
466 */
467void dev_remove_pack(struct packet_type *pt)
468{
469 __dev_remove_pack(pt);
4ec93edb 470
1da177e4
LT
471 synchronize_net();
472}
d1b19dff 473EXPORT_SYMBOL(dev_remove_pack);
1da177e4
LT
474
475/******************************************************************************
476
477 Device Boot-time Settings Routines
478
479*******************************************************************************/
480
481/* Boot time configuration table */
482static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
483
484/**
485 * netdev_boot_setup_add - add new setup entry
486 * @name: name of the device
487 * @map: configured settings for the device
488 *
489 * Adds new setup entry to the dev_boot_setup list. The function
490 * returns 0 on error and 1 on success. This is a generic routine to
491 * all netdevices.
492 */
493static int netdev_boot_setup_add(char *name, struct ifmap *map)
494{
495 struct netdev_boot_setup *s;
496 int i;
497
498 s = dev_boot_setup;
499 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
500 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
501 memset(s[i].name, 0, sizeof(s[i].name));
93b3cff9 502 strlcpy(s[i].name, name, IFNAMSIZ);
1da177e4
LT
503 memcpy(&s[i].map, map, sizeof(s[i].map));
504 break;
505 }
506 }
507
508 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
509}
510
511/**
512 * netdev_boot_setup_check - check boot time settings
513 * @dev: the netdevice
514 *
515 * Check boot time settings for the device.
516 * The found settings are set for the device to be used
517 * later in the device probing.
518 * Returns 0 if no settings found, 1 if they are.
519 */
520int netdev_boot_setup_check(struct net_device *dev)
521{
522 struct netdev_boot_setup *s = dev_boot_setup;
523 int i;
524
525 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
526 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
93b3cff9 527 !strcmp(dev->name, s[i].name)) {
1da177e4
LT
528 dev->irq = s[i].map.irq;
529 dev->base_addr = s[i].map.base_addr;
530 dev->mem_start = s[i].map.mem_start;
531 dev->mem_end = s[i].map.mem_end;
532 return 1;
533 }
534 }
535 return 0;
536}
d1b19dff 537EXPORT_SYMBOL(netdev_boot_setup_check);
1da177e4
LT
538
539
540/**
541 * netdev_boot_base - get address from boot time settings
542 * @prefix: prefix for network device
543 * @unit: id for network device
544 *
545 * Check boot time settings for the base address of device.
546 * The found settings are set for the device to be used
547 * later in the device probing.
548 * Returns 0 if no settings found.
549 */
550unsigned long netdev_boot_base(const char *prefix, int unit)
551{
552 const struct netdev_boot_setup *s = dev_boot_setup;
553 char name[IFNAMSIZ];
554 int i;
555
556 sprintf(name, "%s%d", prefix, unit);
557
558 /*
559 * If device already registered then return base of 1
560 * to indicate not to probe for this interface
561 */
881d966b 562 if (__dev_get_by_name(&init_net, name))
1da177e4
LT
563 return 1;
564
565 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
566 if (!strcmp(name, s[i].name))
567 return s[i].map.base_addr;
568 return 0;
569}
570
571/*
572 * Saves at boot time configured settings for any netdevice.
573 */
574int __init netdev_boot_setup(char *str)
575{
576 int ints[5];
577 struct ifmap map;
578
579 str = get_options(str, ARRAY_SIZE(ints), ints);
580 if (!str || !*str)
581 return 0;
582
583 /* Save settings */
584 memset(&map, 0, sizeof(map));
585 if (ints[0] > 0)
586 map.irq = ints[1];
587 if (ints[0] > 1)
588 map.base_addr = ints[2];
589 if (ints[0] > 2)
590 map.mem_start = ints[3];
591 if (ints[0] > 3)
592 map.mem_end = ints[4];
593
594 /* Add new entry to the list */
595 return netdev_boot_setup_add(str, &map);
596}
597
598__setup("netdev=", netdev_boot_setup);
599
600/*******************************************************************************
601
602 Device Interface Subroutines
603
604*******************************************************************************/
605
606/**
607 * __dev_get_by_name - find a device by its name
c4ea43c5 608 * @net: the applicable net namespace
1da177e4
LT
609 * @name: name to find
610 *
611 * Find an interface by name. Must be called under RTNL semaphore
612 * or @dev_base_lock. If the name is found a pointer to the device
613 * is returned. If the name is not found then %NULL is returned. The
614 * reference counters are not incremented so the caller must be
615 * careful with locks.
616 */
617
881d966b 618struct net_device *__dev_get_by_name(struct net *net, const char *name)
1da177e4
LT
619{
620 struct hlist_node *p;
0bd8d536
ED
621 struct net_device *dev;
622 struct hlist_head *head = dev_name_hash(net, name);
1da177e4 623
0bd8d536 624 hlist_for_each_entry(dev, p, head, name_hlist)
1da177e4
LT
625 if (!strncmp(dev->name, name, IFNAMSIZ))
626 return dev;
0bd8d536 627
1da177e4
LT
628 return NULL;
629}
d1b19dff 630EXPORT_SYMBOL(__dev_get_by_name);
1da177e4 631
72c9528b
ED
632/**
633 * dev_get_by_name_rcu - find a device by its name
634 * @net: the applicable net namespace
635 * @name: name to find
636 *
637 * Find an interface by name.
638 * If the name is found a pointer to the device is returned.
639 * If the name is not found then %NULL is returned.
640 * The reference counters are not incremented so the caller must be
641 * careful with locks. The caller must hold RCU lock.
642 */
643
644struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
645{
646 struct hlist_node *p;
647 struct net_device *dev;
648 struct hlist_head *head = dev_name_hash(net, name);
649
650 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
651 if (!strncmp(dev->name, name, IFNAMSIZ))
652 return dev;
653
654 return NULL;
655}
656EXPORT_SYMBOL(dev_get_by_name_rcu);
657
1da177e4
LT
658/**
659 * dev_get_by_name - find a device by its name
c4ea43c5 660 * @net: the applicable net namespace
1da177e4
LT
661 * @name: name to find
662 *
663 * Find an interface by name. This can be called from any
664 * context and does its own locking. The returned handle has
665 * the usage count incremented and the caller must use dev_put() to
666 * release it when it is no longer needed. %NULL is returned if no
667 * matching device is found.
668 */
669
881d966b 670struct net_device *dev_get_by_name(struct net *net, const char *name)
1da177e4
LT
671{
672 struct net_device *dev;
673
72c9528b
ED
674 rcu_read_lock();
675 dev = dev_get_by_name_rcu(net, name);
1da177e4
LT
676 if (dev)
677 dev_hold(dev);
72c9528b 678 rcu_read_unlock();
1da177e4
LT
679 return dev;
680}
d1b19dff 681EXPORT_SYMBOL(dev_get_by_name);
1da177e4
LT
682
683/**
684 * __dev_get_by_index - find a device by its ifindex
c4ea43c5 685 * @net: the applicable net namespace
1da177e4
LT
686 * @ifindex: index of device
687 *
688 * Search for an interface by index. Returns %NULL if the device
689 * is not found or a pointer to the device. The device has not
690 * had its reference counter increased so the caller must be careful
691 * about locking. The caller must hold either the RTNL semaphore
692 * or @dev_base_lock.
693 */
694
881d966b 695struct net_device *__dev_get_by_index(struct net *net, int ifindex)
1da177e4
LT
696{
697 struct hlist_node *p;
0bd8d536
ED
698 struct net_device *dev;
699 struct hlist_head *head = dev_index_hash(net, ifindex);
1da177e4 700
0bd8d536 701 hlist_for_each_entry(dev, p, head, index_hlist)
1da177e4
LT
702 if (dev->ifindex == ifindex)
703 return dev;
0bd8d536 704
1da177e4
LT
705 return NULL;
706}
d1b19dff 707EXPORT_SYMBOL(__dev_get_by_index);
1da177e4 708
fb699dfd
ED
709/**
710 * dev_get_by_index_rcu - find a device by its ifindex
711 * @net: the applicable net namespace
712 * @ifindex: index of device
713 *
714 * Search for an interface by index. Returns %NULL if the device
715 * is not found or a pointer to the device. The device has not
716 * had its reference counter increased so the caller must be careful
717 * about locking. The caller must hold RCU lock.
718 */
719
720struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
721{
722 struct hlist_node *p;
723 struct net_device *dev;
724 struct hlist_head *head = dev_index_hash(net, ifindex);
725
726 hlist_for_each_entry_rcu(dev, p, head, index_hlist)
727 if (dev->ifindex == ifindex)
728 return dev;
729
730 return NULL;
731}
732EXPORT_SYMBOL(dev_get_by_index_rcu);
733
1da177e4
LT
734
735/**
736 * dev_get_by_index - find a device by its ifindex
c4ea43c5 737 * @net: the applicable net namespace
1da177e4
LT
738 * @ifindex: index of device
739 *
740 * Search for an interface by index. Returns NULL if the device
741 * is not found or a pointer to the device. The device returned has
742 * had a reference added and the pointer is safe until the user calls
743 * dev_put to indicate they have finished with it.
744 */
745
881d966b 746struct net_device *dev_get_by_index(struct net *net, int ifindex)
1da177e4
LT
747{
748 struct net_device *dev;
749
fb699dfd
ED
750 rcu_read_lock();
751 dev = dev_get_by_index_rcu(net, ifindex);
1da177e4
LT
752 if (dev)
753 dev_hold(dev);
fb699dfd 754 rcu_read_unlock();
1da177e4
LT
755 return dev;
756}
d1b19dff 757EXPORT_SYMBOL(dev_get_by_index);
1da177e4
LT
758
759/**
941666c2 760 * dev_getbyhwaddr_rcu - find a device by its hardware address
c4ea43c5 761 * @net: the applicable net namespace
1da177e4
LT
762 * @type: media type of device
763 * @ha: hardware address
764 *
765 * Search for an interface by MAC address. Returns NULL if the device
c506653d
ED
766 * is not found or a pointer to the device.
767 * The caller must hold RCU or RTNL.
941666c2 768 * The returned device has not had its ref count increased
1da177e4
LT
769 * and the caller must therefore be careful about locking
770 *
1da177e4
LT
771 */
772
941666c2
ED
773struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
774 const char *ha)
1da177e4
LT
775{
776 struct net_device *dev;
777
941666c2 778 for_each_netdev_rcu(net, dev)
1da177e4
LT
779 if (dev->type == type &&
780 !memcmp(dev->dev_addr, ha, dev->addr_len))
7562f876
PE
781 return dev;
782
783 return NULL;
1da177e4 784}
941666c2 785EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
cf309e3f 786
881d966b 787struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
1da177e4
LT
788{
789 struct net_device *dev;
790
4e9cac2b 791 ASSERT_RTNL();
881d966b 792 for_each_netdev(net, dev)
4e9cac2b 793 if (dev->type == type)
7562f876
PE
794 return dev;
795
796 return NULL;
4e9cac2b 797}
4e9cac2b
PM
798EXPORT_SYMBOL(__dev_getfirstbyhwtype);
799
881d966b 800struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
4e9cac2b 801{
99fe3c39 802 struct net_device *dev, *ret = NULL;
4e9cac2b 803
99fe3c39
ED
804 rcu_read_lock();
805 for_each_netdev_rcu(net, dev)
806 if (dev->type == type) {
807 dev_hold(dev);
808 ret = dev;
809 break;
810 }
811 rcu_read_unlock();
812 return ret;
1da177e4 813}
1da177e4
LT
814EXPORT_SYMBOL(dev_getfirstbyhwtype);
815
816/**
bb69ae04 817 * dev_get_by_flags_rcu - find any device with given flags
c4ea43c5 818 * @net: the applicable net namespace
1da177e4
LT
819 * @if_flags: IFF_* values
820 * @mask: bitmask of bits in if_flags to check
821 *
822 * Search for any interface with the given flags. Returns NULL if a device
bb69ae04
ED
823 * is not found or a pointer to the device. Must be called inside
824 * rcu_read_lock(), and result refcount is unchanged.
1da177e4
LT
825 */
826
bb69ae04 827struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
d1b19dff 828 unsigned short mask)
1da177e4 829{
7562f876 830 struct net_device *dev, *ret;
1da177e4 831
7562f876 832 ret = NULL;
c6d14c84 833 for_each_netdev_rcu(net, dev) {
1da177e4 834 if (((dev->flags ^ if_flags) & mask) == 0) {
7562f876 835 ret = dev;
1da177e4
LT
836 break;
837 }
838 }
7562f876 839 return ret;
1da177e4 840}
bb69ae04 841EXPORT_SYMBOL(dev_get_by_flags_rcu);
1da177e4
LT
842
843/**
844 * dev_valid_name - check if name is okay for network device
845 * @name: name string
846 *
847 * Network device names need to be valid file names to
c7fa9d18
DM
848 * to allow sysfs to work. We also disallow any kind of
849 * whitespace.
1da177e4 850 */
c2373ee9 851int dev_valid_name(const char *name)
1da177e4 852{
c7fa9d18
DM
853 if (*name == '\0')
854 return 0;
b6fe17d6
SH
855 if (strlen(name) >= IFNAMSIZ)
856 return 0;
c7fa9d18
DM
857 if (!strcmp(name, ".") || !strcmp(name, ".."))
858 return 0;
859
860 while (*name) {
861 if (*name == '/' || isspace(*name))
862 return 0;
863 name++;
864 }
865 return 1;
1da177e4 866}
d1b19dff 867EXPORT_SYMBOL(dev_valid_name);
1da177e4
LT
868
869/**
b267b179
EB
870 * __dev_alloc_name - allocate a name for a device
871 * @net: network namespace to allocate the device name in
1da177e4 872 * @name: name format string
b267b179 873 * @buf: scratch buffer and result name string
1da177e4
LT
874 *
875 * Passed a format string - eg "lt%d" it will try and find a suitable
3041a069
SH
876 * id. It scans list of devices to build up a free map, then chooses
877 * the first empty slot. The caller must hold the dev_base or rtnl lock
878 * while allocating the name and adding the device in order to avoid
879 * duplicates.
880 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
881 * Returns the number of the unit assigned or a negative errno code.
1da177e4
LT
882 */
883
b267b179 884static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1da177e4
LT
885{
886 int i = 0;
1da177e4
LT
887 const char *p;
888 const int max_netdevices = 8*PAGE_SIZE;
cfcabdcc 889 unsigned long *inuse;
1da177e4
LT
890 struct net_device *d;
891
892 p = strnchr(name, IFNAMSIZ-1, '%');
893 if (p) {
894 /*
895 * Verify the string as this thing may have come from
896 * the user. There must be either one "%d" and no other "%"
897 * characters.
898 */
899 if (p[1] != 'd' || strchr(p + 2, '%'))
900 return -EINVAL;
901
902 /* Use one page as a bit array of possible slots */
cfcabdcc 903 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1da177e4
LT
904 if (!inuse)
905 return -ENOMEM;
906
881d966b 907 for_each_netdev(net, d) {
1da177e4
LT
908 if (!sscanf(d->name, name, &i))
909 continue;
910 if (i < 0 || i >= max_netdevices)
911 continue;
912
913 /* avoid cases where sscanf is not exact inverse of printf */
b267b179 914 snprintf(buf, IFNAMSIZ, name, i);
1da177e4
LT
915 if (!strncmp(buf, d->name, IFNAMSIZ))
916 set_bit(i, inuse);
917 }
918
919 i = find_first_zero_bit(inuse, max_netdevices);
920 free_page((unsigned long) inuse);
921 }
922
d9031024
OP
923 if (buf != name)
924 snprintf(buf, IFNAMSIZ, name, i);
b267b179 925 if (!__dev_get_by_name(net, buf))
1da177e4 926 return i;
1da177e4
LT
927
928 /* It is possible to run out of possible slots
929 * when the name is long and there isn't enough space left
930 * for the digits, or if all bits are used.
931 */
932 return -ENFILE;
933}
934
b267b179
EB
935/**
936 * dev_alloc_name - allocate a name for a device
937 * @dev: device
938 * @name: name format string
939 *
940 * Passed a format string - eg "lt%d" it will try and find a suitable
941 * id. It scans list of devices to build up a free map, then chooses
942 * the first empty slot. The caller must hold the dev_base or rtnl lock
943 * while allocating the name and adding the device in order to avoid
944 * duplicates.
945 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
946 * Returns the number of the unit assigned or a negative errno code.
947 */
948
949int dev_alloc_name(struct net_device *dev, const char *name)
950{
951 char buf[IFNAMSIZ];
952 struct net *net;
953 int ret;
954
c346dca1
YH
955 BUG_ON(!dev_net(dev));
956 net = dev_net(dev);
b267b179
EB
957 ret = __dev_alloc_name(net, name, buf);
958 if (ret >= 0)
959 strlcpy(dev->name, buf, IFNAMSIZ);
960 return ret;
961}
d1b19dff 962EXPORT_SYMBOL(dev_alloc_name);
b267b179 963
1c5cae81 964static int dev_get_valid_name(struct net_device *dev, const char *name)
d9031024 965{
8ce6cebc
DL
966 struct net *net;
967
968 BUG_ON(!dev_net(dev));
969 net = dev_net(dev);
970
d9031024
OP
971 if (!dev_valid_name(name))
972 return -EINVAL;
973
1c5cae81 974 if (strchr(name, '%'))
8ce6cebc 975 return dev_alloc_name(dev, name);
d9031024
OP
976 else if (__dev_get_by_name(net, name))
977 return -EEXIST;
8ce6cebc
DL
978 else if (dev->name != name)
979 strlcpy(dev->name, name, IFNAMSIZ);
d9031024
OP
980
981 return 0;
982}
1da177e4
LT
983
984/**
985 * dev_change_name - change name of a device
986 * @dev: device
987 * @newname: name (or format string) must be at least IFNAMSIZ
988 *
989 * Change name of a device, can pass format strings "eth%d".
990 * for wildcarding.
991 */
cf04a4c7 992int dev_change_name(struct net_device *dev, const char *newname)
1da177e4 993{
fcc5a03a 994 char oldname[IFNAMSIZ];
1da177e4 995 int err = 0;
fcc5a03a 996 int ret;
881d966b 997 struct net *net;
1da177e4
LT
998
999 ASSERT_RTNL();
c346dca1 1000 BUG_ON(!dev_net(dev));
1da177e4 1001
c346dca1 1002 net = dev_net(dev);
1da177e4
LT
1003 if (dev->flags & IFF_UP)
1004 return -EBUSY;
1005
c8d90dca
SH
1006 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
1007 return 0;
1008
fcc5a03a
HX
1009 memcpy(oldname, dev->name, IFNAMSIZ);
1010
1c5cae81 1011 err = dev_get_valid_name(dev, newname);
d9031024
OP
1012 if (err < 0)
1013 return err;
1da177e4 1014
fcc5a03a 1015rollback:
a1b3f594
EB
1016 ret = device_rename(&dev->dev, dev->name);
1017 if (ret) {
1018 memcpy(dev->name, oldname, IFNAMSIZ);
1019 return ret;
dcc99773 1020 }
7f988eab
HX
1021
1022 write_lock_bh(&dev_base_lock);
372b2312 1023 hlist_del_rcu(&dev->name_hlist);
72c9528b
ED
1024 write_unlock_bh(&dev_base_lock);
1025
1026 synchronize_rcu();
1027
1028 write_lock_bh(&dev_base_lock);
1029 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
7f988eab
HX
1030 write_unlock_bh(&dev_base_lock);
1031
056925ab 1032 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
fcc5a03a
HX
1033 ret = notifier_to_errno(ret);
1034
1035 if (ret) {
91e9c07b
ED
1036 /* err >= 0 after dev_alloc_name() or stores the first errno */
1037 if (err >= 0) {
fcc5a03a
HX
1038 err = ret;
1039 memcpy(dev->name, oldname, IFNAMSIZ);
1040 goto rollback;
91e9c07b 1041 } else {
7b6cd1ce 1042 pr_err("%s: name change rollback failed: %d\n",
91e9c07b 1043 dev->name, ret);
fcc5a03a
HX
1044 }
1045 }
1da177e4
LT
1046
1047 return err;
1048}
1049
0b815a1a
SH
1050/**
1051 * dev_set_alias - change ifalias of a device
1052 * @dev: device
1053 * @alias: name up to IFALIASZ
f0db275a 1054 * @len: limit of bytes to copy from info
0b815a1a
SH
1055 *
1056 * Set ifalias for a device,
1057 */
1058int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1059{
1060 ASSERT_RTNL();
1061
1062 if (len >= IFALIASZ)
1063 return -EINVAL;
1064
96ca4a2c
OH
1065 if (!len) {
1066 if (dev->ifalias) {
1067 kfree(dev->ifalias);
1068 dev->ifalias = NULL;
1069 }
1070 return 0;
1071 }
1072
d1b19dff 1073 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
0b815a1a
SH
1074 if (!dev->ifalias)
1075 return -ENOMEM;
1076
1077 strlcpy(dev->ifalias, alias, len+1);
1078 return len;
1079}
1080
1081
d8a33ac4 1082/**
3041a069 1083 * netdev_features_change - device changes features
d8a33ac4
SH
1084 * @dev: device to cause notification
1085 *
1086 * Called to indicate a device has changed features.
1087 */
1088void netdev_features_change(struct net_device *dev)
1089{
056925ab 1090 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
d8a33ac4
SH
1091}
1092EXPORT_SYMBOL(netdev_features_change);
1093
1da177e4
LT
1094/**
1095 * netdev_state_change - device changes state
1096 * @dev: device to cause notification
1097 *
1098 * Called to indicate a device has changed state. This function calls
1099 * the notifier chains for netdev_chain and sends a NEWLINK message
1100 * to the routing socket.
1101 */
1102void netdev_state_change(struct net_device *dev)
1103{
1104 if (dev->flags & IFF_UP) {
056925ab 1105 call_netdevice_notifiers(NETDEV_CHANGE, dev);
1da177e4
LT
1106 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1107 }
1108}
d1b19dff 1109EXPORT_SYMBOL(netdev_state_change);
1da177e4 1110
3ca5b404 1111int netdev_bonding_change(struct net_device *dev, unsigned long event)
c1da4ac7 1112{
3ca5b404 1113 return call_netdevice_notifiers(event, dev);
c1da4ac7
OG
1114}
1115EXPORT_SYMBOL(netdev_bonding_change);
1116
1da177e4
LT
1117/**
1118 * dev_load - load a network module
c4ea43c5 1119 * @net: the applicable net namespace
1da177e4
LT
1120 * @name: name of interface
1121 *
1122 * If a network interface is not present and the process has suitable
1123 * privileges this function loads the module. If module loading is not
1124 * available in this kernel then it becomes a nop.
1125 */
1126
881d966b 1127void dev_load(struct net *net, const char *name)
1da177e4 1128{
4ec93edb 1129 struct net_device *dev;
8909c9ad 1130 int no_module;
1da177e4 1131
72c9528b
ED
1132 rcu_read_lock();
1133 dev = dev_get_by_name_rcu(net, name);
1134 rcu_read_unlock();
1da177e4 1135
8909c9ad
VK
1136 no_module = !dev;
1137 if (no_module && capable(CAP_NET_ADMIN))
1138 no_module = request_module("netdev-%s", name);
1139 if (no_module && capable(CAP_SYS_MODULE)) {
1140 if (!request_module("%s", name))
7b6cd1ce
JP
1141 pr_err("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
1142 name);
8909c9ad 1143 }
1da177e4 1144}
d1b19dff 1145EXPORT_SYMBOL(dev_load);
1da177e4 1146
bd380811 1147static int __dev_open(struct net_device *dev)
1da177e4 1148{
d314774c 1149 const struct net_device_ops *ops = dev->netdev_ops;
3b8bcfd5 1150 int ret;
1da177e4 1151
e46b66bc
BH
1152 ASSERT_RTNL();
1153
1da177e4
LT
1154 if (!netif_device_present(dev))
1155 return -ENODEV;
1156
3b8bcfd5
JB
1157 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1158 ret = notifier_to_errno(ret);
1159 if (ret)
1160 return ret;
1161
1da177e4 1162 set_bit(__LINK_STATE_START, &dev->state);
bada339b 1163
d314774c
SH
1164 if (ops->ndo_validate_addr)
1165 ret = ops->ndo_validate_addr(dev);
bada339b 1166
d314774c
SH
1167 if (!ret && ops->ndo_open)
1168 ret = ops->ndo_open(dev);
1da177e4 1169
bada339b
JG
1170 if (ret)
1171 clear_bit(__LINK_STATE_START, &dev->state);
1172 else {
1da177e4 1173 dev->flags |= IFF_UP;
b4bd07c2 1174 net_dmaengine_get();
4417da66 1175 dev_set_rx_mode(dev);
1da177e4 1176 dev_activate(dev);
1da177e4 1177 }
bada339b 1178
1da177e4
LT
1179 return ret;
1180}
1181
1182/**
bd380811
PM
1183 * dev_open - prepare an interface for use.
1184 * @dev: device to open
1da177e4 1185 *
bd380811
PM
1186 * Takes a device from down to up state. The device's private open
1187 * function is invoked and then the multicast lists are loaded. Finally
1188 * the device is moved into the up state and a %NETDEV_UP message is
1189 * sent to the netdev notifier chain.
1190 *
1191 * Calling this function on an active interface is a nop. On a failure
1192 * a negative errno code is returned.
1da177e4 1193 */
bd380811
PM
1194int dev_open(struct net_device *dev)
1195{
1196 int ret;
1197
bd380811
PM
1198 if (dev->flags & IFF_UP)
1199 return 0;
1200
bd380811
PM
1201 ret = __dev_open(dev);
1202 if (ret < 0)
1203 return ret;
1204
bd380811
PM
1205 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1206 call_netdevice_notifiers(NETDEV_UP, dev);
1207
1208 return ret;
1209}
1210EXPORT_SYMBOL(dev_open);
1211
44345724 1212static int __dev_close_many(struct list_head *head)
1da177e4 1213{
44345724 1214 struct net_device *dev;
e46b66bc 1215
bd380811 1216 ASSERT_RTNL();
9d5010db
DM
1217 might_sleep();
1218
44345724 1219 list_for_each_entry(dev, head, unreg_list) {
44345724 1220 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1da177e4 1221
44345724 1222 clear_bit(__LINK_STATE_START, &dev->state);
1da177e4 1223
44345724
OP
1224 /* Synchronize to scheduled poll. We cannot touch poll list, it
1225 * can be even on different cpu. So just clear netif_running().
1226 *
1227 * dev->stop() will invoke napi_disable() on all of it's
1228 * napi_struct instances on this device.
1229 */
1230 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1231 }
1da177e4 1232
44345724 1233 dev_deactivate_many(head);
d8b2a4d2 1234
44345724
OP
1235 list_for_each_entry(dev, head, unreg_list) {
1236 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4 1237
44345724
OP
1238 /*
1239 * Call the device specific close. This cannot fail.
1240 * Only if device is UP
1241 *
1242 * We allow it to be called even after a DETACH hot-plug
1243 * event.
1244 */
1245 if (ops->ndo_stop)
1246 ops->ndo_stop(dev);
1247
44345724 1248 dev->flags &= ~IFF_UP;
44345724
OP
1249 net_dmaengine_put();
1250 }
1251
1252 return 0;
1253}
1254
1255static int __dev_close(struct net_device *dev)
1256{
f87e6f47 1257 int retval;
44345724
OP
1258 LIST_HEAD(single);
1259
1260 list_add(&dev->unreg_list, &single);
f87e6f47
LT
1261 retval = __dev_close_many(&single);
1262 list_del(&single);
1263 return retval;
44345724
OP
1264}
1265
3fbd8758 1266static int dev_close_many(struct list_head *head)
44345724
OP
1267{
1268 struct net_device *dev, *tmp;
1269 LIST_HEAD(tmp_list);
1da177e4 1270
44345724
OP
1271 list_for_each_entry_safe(dev, tmp, head, unreg_list)
1272 if (!(dev->flags & IFF_UP))
1273 list_move(&dev->unreg_list, &tmp_list);
1274
1275 __dev_close_many(head);
1da177e4 1276
44345724
OP
1277 list_for_each_entry(dev, head, unreg_list) {
1278 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1279 call_netdevice_notifiers(NETDEV_DOWN, dev);
1280 }
bd380811 1281
44345724
OP
1282 /* rollback_registered_many needs the complete original list */
1283 list_splice(&tmp_list, head);
bd380811
PM
1284 return 0;
1285}
1286
1287/**
1288 * dev_close - shutdown an interface.
1289 * @dev: device to shutdown
1290 *
1291 * This function moves an active device into down state. A
1292 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1293 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1294 * chain.
1295 */
1296int dev_close(struct net_device *dev)
1297{
e14a5993
ED
1298 if (dev->flags & IFF_UP) {
1299 LIST_HEAD(single);
1da177e4 1300
e14a5993
ED
1301 list_add(&dev->unreg_list, &single);
1302 dev_close_many(&single);
1303 list_del(&single);
1304 }
1da177e4
LT
1305 return 0;
1306}
d1b19dff 1307EXPORT_SYMBOL(dev_close);
1da177e4
LT
1308
1309
0187bdfb
BH
1310/**
1311 * dev_disable_lro - disable Large Receive Offload on a device
1312 * @dev: device
1313 *
1314 * Disable Large Receive Offload (LRO) on a net device. Must be
1315 * called under RTNL. This is needed if received packets may be
1316 * forwarded to another interface.
1317 */
1318void dev_disable_lro(struct net_device *dev)
1319{
f11970e3
NH
1320 /*
1321 * If we're trying to disable lro on a vlan device
1322 * use the underlying physical device instead
1323 */
1324 if (is_vlan_dev(dev))
1325 dev = vlan_dev_real_dev(dev);
1326
bc5787c6
MM
1327 dev->wanted_features &= ~NETIF_F_LRO;
1328 netdev_update_features(dev);
27660515 1329
22d5969f
MM
1330 if (unlikely(dev->features & NETIF_F_LRO))
1331 netdev_WARN(dev, "failed to disable LRO!\n");
0187bdfb
BH
1332}
1333EXPORT_SYMBOL(dev_disable_lro);
1334
1335
881d966b
EB
1336static int dev_boot_phase = 1;
1337
1da177e4
LT
1338/**
1339 * register_netdevice_notifier - register a network notifier block
1340 * @nb: notifier
1341 *
1342 * Register a notifier to be called when network device events occur.
1343 * The notifier passed is linked into the kernel structures and must
1344 * not be reused until it has been unregistered. A negative errno code
1345 * is returned on a failure.
1346 *
1347 * When registered all registration and up events are replayed
4ec93edb 1348 * to the new notifier to allow device to have a race free
1da177e4
LT
1349 * view of the network device list.
1350 */
1351
1352int register_netdevice_notifier(struct notifier_block *nb)
1353{
1354 struct net_device *dev;
fcc5a03a 1355 struct net_device *last;
881d966b 1356 struct net *net;
1da177e4
LT
1357 int err;
1358
1359 rtnl_lock();
f07d5b94 1360 err = raw_notifier_chain_register(&netdev_chain, nb);
fcc5a03a
HX
1361 if (err)
1362 goto unlock;
881d966b
EB
1363 if (dev_boot_phase)
1364 goto unlock;
1365 for_each_net(net) {
1366 for_each_netdev(net, dev) {
1367 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1368 err = notifier_to_errno(err);
1369 if (err)
1370 goto rollback;
1371
1372 if (!(dev->flags & IFF_UP))
1373 continue;
1da177e4 1374
881d966b
EB
1375 nb->notifier_call(nb, NETDEV_UP, dev);
1376 }
1da177e4 1377 }
fcc5a03a
HX
1378
1379unlock:
1da177e4
LT
1380 rtnl_unlock();
1381 return err;
fcc5a03a
HX
1382
1383rollback:
1384 last = dev;
881d966b
EB
1385 for_each_net(net) {
1386 for_each_netdev(net, dev) {
1387 if (dev == last)
8f891489 1388 goto outroll;
fcc5a03a 1389
881d966b
EB
1390 if (dev->flags & IFF_UP) {
1391 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1392 nb->notifier_call(nb, NETDEV_DOWN, dev);
1393 }
1394 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
a5ee1551 1395 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
fcc5a03a 1396 }
fcc5a03a 1397 }
c67625a1 1398
8f891489 1399outroll:
c67625a1 1400 raw_notifier_chain_unregister(&netdev_chain, nb);
fcc5a03a 1401 goto unlock;
1da177e4 1402}
d1b19dff 1403EXPORT_SYMBOL(register_netdevice_notifier);
1da177e4
LT
1404
1405/**
1406 * unregister_netdevice_notifier - unregister a network notifier block
1407 * @nb: notifier
1408 *
1409 * Unregister a notifier previously registered by
1410 * register_netdevice_notifier(). The notifier is unlinked into the
1411 * kernel structures and may then be reused. A negative errno code
1412 * is returned on a failure.
1413 */
1414
1415int unregister_netdevice_notifier(struct notifier_block *nb)
1416{
9f514950
HX
1417 int err;
1418
1419 rtnl_lock();
f07d5b94 1420 err = raw_notifier_chain_unregister(&netdev_chain, nb);
9f514950
HX
1421 rtnl_unlock();
1422 return err;
1da177e4 1423}
d1b19dff 1424EXPORT_SYMBOL(unregister_netdevice_notifier);
1da177e4
LT
1425
1426/**
1427 * call_netdevice_notifiers - call all network notifier blocks
1428 * @val: value passed unmodified to notifier function
c4ea43c5 1429 * @dev: net_device pointer passed unmodified to notifier function
1da177e4
LT
1430 *
1431 * Call all network notifier blocks. Parameters and return value
f07d5b94 1432 * are as for raw_notifier_call_chain().
1da177e4
LT
1433 */
1434
ad7379d4 1435int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1da177e4 1436{
ab930471 1437 ASSERT_RTNL();
ad7379d4 1438 return raw_notifier_call_chain(&netdev_chain, val, dev);
1da177e4 1439}
edf947f1 1440EXPORT_SYMBOL(call_netdevice_notifiers);
1da177e4 1441
588f0330 1442static struct jump_label_key netstamp_needed __read_mostly;
b90e5794
ED
1443#ifdef HAVE_JUMP_LABEL
1444/* We are not allowed to call jump_label_dec() from irq context
1445 * If net_disable_timestamp() is called from irq context, defer the
1446 * jump_label_dec() calls.
1447 */
1448static atomic_t netstamp_needed_deferred;
1449#endif
1da177e4
LT
1450
1451void net_enable_timestamp(void)
1452{
b90e5794
ED
1453#ifdef HAVE_JUMP_LABEL
1454 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1455
1456 if (deferred) {
1457 while (--deferred)
1458 jump_label_dec(&netstamp_needed);
1459 return;
1460 }
1461#endif
1462 WARN_ON(in_interrupt());
588f0330 1463 jump_label_inc(&netstamp_needed);
1da177e4 1464}
d1b19dff 1465EXPORT_SYMBOL(net_enable_timestamp);
1da177e4
LT
1466
1467void net_disable_timestamp(void)
1468{
b90e5794
ED
1469#ifdef HAVE_JUMP_LABEL
1470 if (in_interrupt()) {
1471 atomic_inc(&netstamp_needed_deferred);
1472 return;
1473 }
1474#endif
588f0330 1475 jump_label_dec(&netstamp_needed);
1da177e4 1476}
d1b19dff 1477EXPORT_SYMBOL(net_disable_timestamp);
1da177e4 1478
3b098e2d 1479static inline void net_timestamp_set(struct sk_buff *skb)
1da177e4 1480{
588f0330
ED
1481 skb->tstamp.tv64 = 0;
1482 if (static_branch(&netstamp_needed))
a61bbcf2 1483 __net_timestamp(skb);
1da177e4
LT
1484}
1485
588f0330
ED
1486#define net_timestamp_check(COND, SKB) \
1487 if (static_branch(&netstamp_needed)) { \
1488 if ((COND) && !(SKB)->tstamp.tv64) \
1489 __net_timestamp(SKB); \
1490 } \
3b098e2d 1491
4dc360c5
RC
1492static int net_hwtstamp_validate(struct ifreq *ifr)
1493{
1494 struct hwtstamp_config cfg;
1495 enum hwtstamp_tx_types tx_type;
1496 enum hwtstamp_rx_filters rx_filter;
1497 int tx_type_valid = 0;
1498 int rx_filter_valid = 0;
1499
1500 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1501 return -EFAULT;
1502
1503 if (cfg.flags) /* reserved for future extensions */
1504 return -EINVAL;
1505
1506 tx_type = cfg.tx_type;
1507 rx_filter = cfg.rx_filter;
1508
1509 switch (tx_type) {
1510 case HWTSTAMP_TX_OFF:
1511 case HWTSTAMP_TX_ON:
1512 case HWTSTAMP_TX_ONESTEP_SYNC:
1513 tx_type_valid = 1;
1514 break;
1515 }
1516
1517 switch (rx_filter) {
1518 case HWTSTAMP_FILTER_NONE:
1519 case HWTSTAMP_FILTER_ALL:
1520 case HWTSTAMP_FILTER_SOME:
1521 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1522 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1523 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1524 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1525 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1526 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1527 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1528 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1529 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1530 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1531 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1532 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1533 rx_filter_valid = 1;
1534 break;
1535 }
1536
1537 if (!tx_type_valid || !rx_filter_valid)
1538 return -ERANGE;
1539
1540 return 0;
1541}
1542
79b569f0
DL
1543static inline bool is_skb_forwardable(struct net_device *dev,
1544 struct sk_buff *skb)
1545{
1546 unsigned int len;
1547
1548 if (!(dev->flags & IFF_UP))
1549 return false;
1550
1551 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1552 if (skb->len <= len)
1553 return true;
1554
1555 /* if TSO is enabled, we don't care about the length as the packet
1556 * could be forwarded without being segmented before
1557 */
1558 if (skb_is_gso(skb))
1559 return true;
1560
1561 return false;
1562}
1563
44540960
AB
1564/**
1565 * dev_forward_skb - loopback an skb to another netif
1566 *
1567 * @dev: destination network device
1568 * @skb: buffer to forward
1569 *
1570 * return values:
1571 * NET_RX_SUCCESS (no congestion)
6ec82562 1572 * NET_RX_DROP (packet was dropped, but freed)
44540960
AB
1573 *
1574 * dev_forward_skb can be used for injecting an skb from the
1575 * start_xmit function of one device into the receive queue
1576 * of another device.
1577 *
1578 * The receiving device may be in another namespace, so
1579 * we have to clear all information in the skb that could
1580 * impact namespace isolation.
1581 */
1582int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1583{
48c83012
MT
1584 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1585 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1586 atomic_long_inc(&dev->rx_dropped);
1587 kfree_skb(skb);
1588 return NET_RX_DROP;
1589 }
1590 }
1591
44540960 1592 skb_orphan(skb);
c736eefa 1593 nf_reset(skb);
44540960 1594
79b569f0 1595 if (unlikely(!is_skb_forwardable(dev, skb))) {
caf586e5 1596 atomic_long_inc(&dev->rx_dropped);
6ec82562 1597 kfree_skb(skb);
44540960 1598 return NET_RX_DROP;
6ec82562 1599 }
8a83a00b 1600 skb_set_dev(skb, dev);
44540960
AB
1601 skb->tstamp.tv64 = 0;
1602 skb->pkt_type = PACKET_HOST;
1603 skb->protocol = eth_type_trans(skb, dev);
44540960
AB
1604 return netif_rx(skb);
1605}
1606EXPORT_SYMBOL_GPL(dev_forward_skb);
1607
71d9dec2
CG
1608static inline int deliver_skb(struct sk_buff *skb,
1609 struct packet_type *pt_prev,
1610 struct net_device *orig_dev)
1611{
1612 atomic_inc(&skb->users);
1613 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1614}
1615
1da177e4
LT
1616/*
1617 * Support routine. Sends outgoing frames to any network
1618 * taps currently in use.
1619 */
1620
f6a78bfc 1621static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
1622{
1623 struct packet_type *ptype;
71d9dec2
CG
1624 struct sk_buff *skb2 = NULL;
1625 struct packet_type *pt_prev = NULL;
a61bbcf2 1626
1da177e4
LT
1627 rcu_read_lock();
1628 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1629 /* Never send packets back to the socket
1630 * they originated from - MvS (miquels@drinkel.ow.org)
1631 */
1632 if ((ptype->dev == dev || !ptype->dev) &&
1633 (ptype->af_packet_priv == NULL ||
1634 (struct sock *)ptype->af_packet_priv != skb->sk)) {
71d9dec2
CG
1635 if (pt_prev) {
1636 deliver_skb(skb2, pt_prev, skb->dev);
1637 pt_prev = ptype;
1638 continue;
1639 }
1640
1641 skb2 = skb_clone(skb, GFP_ATOMIC);
1da177e4
LT
1642 if (!skb2)
1643 break;
1644
70978182
ED
1645 net_timestamp_set(skb2);
1646
1da177e4
LT
1647 /* skb->nh should be correctly
1648 set by sender, so that the second statement is
1649 just protection against buggy protocols.
1650 */
459a98ed 1651 skb_reset_mac_header(skb2);
1da177e4 1652
d56f90a7 1653 if (skb_network_header(skb2) < skb2->data ||
27a884dc 1654 skb2->network_header > skb2->tail) {
1da177e4 1655 if (net_ratelimit())
7b6cd1ce
JP
1656 pr_crit("protocol %04x is buggy, dev %s\n",
1657 ntohs(skb2->protocol),
1658 dev->name);
c1d2bbe1 1659 skb_reset_network_header(skb2);
1da177e4
LT
1660 }
1661
b0e380b1 1662 skb2->transport_header = skb2->network_header;
1da177e4 1663 skb2->pkt_type = PACKET_OUTGOING;
71d9dec2 1664 pt_prev = ptype;
1da177e4
LT
1665 }
1666 }
71d9dec2
CG
1667 if (pt_prev)
1668 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1da177e4
LT
1669 rcu_read_unlock();
1670}
1671
4f57c087
JF
1672/* netif_setup_tc - Handle tc mappings on real_num_tx_queues change
1673 * @dev: Network device
1674 * @txq: number of queues available
1675 *
1676 * If real_num_tx_queues is changed the tc mappings may no longer be
1677 * valid. To resolve this verify the tc mapping remains valid and if
1678 * not NULL the mapping. With no priorities mapping to this
1679 * offset/count pair it will no longer be used. In the worst case TC0
1680 * is invalid nothing can be done so disable priority mappings. If is
1681 * expected that drivers will fix this mapping if they can before
1682 * calling netif_set_real_num_tx_queues.
1683 */
bb134d22 1684static void netif_setup_tc(struct net_device *dev, unsigned int txq)
4f57c087
JF
1685{
1686 int i;
1687 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1688
1689 /* If TC0 is invalidated disable TC mapping */
1690 if (tc->offset + tc->count > txq) {
7b6cd1ce 1691 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
4f57c087
JF
1692 dev->num_tc = 0;
1693 return;
1694 }
1695
1696 /* Invalidated prio to tc mappings set to TC0 */
1697 for (i = 1; i < TC_BITMASK + 1; i++) {
1698 int q = netdev_get_prio_tc_map(dev, i);
1699
1700 tc = &dev->tc_to_txq[q];
1701 if (tc->offset + tc->count > txq) {
7b6cd1ce
JP
1702 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1703 i, q);
4f57c087
JF
1704 netdev_set_prio_tc_map(dev, i, 0);
1705 }
1706 }
1707}
1708
f0796d5c
JF
1709/*
1710 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
1711 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
1712 */
e6484930 1713int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
f0796d5c 1714{
1d24eb48
TH
1715 int rc;
1716
e6484930
TH
1717 if (txq < 1 || txq > dev->num_tx_queues)
1718 return -EINVAL;
f0796d5c 1719
5c56580b
BH
1720 if (dev->reg_state == NETREG_REGISTERED ||
1721 dev->reg_state == NETREG_UNREGISTERING) {
e6484930
TH
1722 ASSERT_RTNL();
1723
1d24eb48
TH
1724 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
1725 txq);
bf264145
TH
1726 if (rc)
1727 return rc;
1728
4f57c087
JF
1729 if (dev->num_tc)
1730 netif_setup_tc(dev, txq);
1731
e6484930
TH
1732 if (txq < dev->real_num_tx_queues)
1733 qdisc_reset_all_tx_gt(dev, txq);
f0796d5c 1734 }
e6484930
TH
1735
1736 dev->real_num_tx_queues = txq;
1737 return 0;
f0796d5c
JF
1738}
1739EXPORT_SYMBOL(netif_set_real_num_tx_queues);
56079431 1740
62fe0b40
BH
1741#ifdef CONFIG_RPS
1742/**
1743 * netif_set_real_num_rx_queues - set actual number of RX queues used
1744 * @dev: Network device
1745 * @rxq: Actual number of RX queues
1746 *
1747 * This must be called either with the rtnl_lock held or before
1748 * registration of the net device. Returns 0 on success, or a
4e7f7951
BH
1749 * negative error code. If called before registration, it always
1750 * succeeds.
62fe0b40
BH
1751 */
1752int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
1753{
1754 int rc;
1755
bd25fa7b
TH
1756 if (rxq < 1 || rxq > dev->num_rx_queues)
1757 return -EINVAL;
1758
62fe0b40
BH
1759 if (dev->reg_state == NETREG_REGISTERED) {
1760 ASSERT_RTNL();
1761
62fe0b40
BH
1762 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
1763 rxq);
1764 if (rc)
1765 return rc;
62fe0b40
BH
1766 }
1767
1768 dev->real_num_rx_queues = rxq;
1769 return 0;
1770}
1771EXPORT_SYMBOL(netif_set_real_num_rx_queues);
1772#endif
1773
def82a1d 1774static inline void __netif_reschedule(struct Qdisc *q)
56079431 1775{
def82a1d
JP
1776 struct softnet_data *sd;
1777 unsigned long flags;
56079431 1778
def82a1d
JP
1779 local_irq_save(flags);
1780 sd = &__get_cpu_var(softnet_data);
a9cbd588
CG
1781 q->next_sched = NULL;
1782 *sd->output_queue_tailp = q;
1783 sd->output_queue_tailp = &q->next_sched;
def82a1d
JP
1784 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1785 local_irq_restore(flags);
1786}
1787
1788void __netif_schedule(struct Qdisc *q)
1789{
1790 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1791 __netif_reschedule(q);
56079431
DV
1792}
1793EXPORT_SYMBOL(__netif_schedule);
1794
bea3348e 1795void dev_kfree_skb_irq(struct sk_buff *skb)
56079431 1796{
3578b0c8 1797 if (atomic_dec_and_test(&skb->users)) {
bea3348e
SH
1798 struct softnet_data *sd;
1799 unsigned long flags;
56079431 1800
bea3348e
SH
1801 local_irq_save(flags);
1802 sd = &__get_cpu_var(softnet_data);
1803 skb->next = sd->completion_queue;
1804 sd->completion_queue = skb;
1805 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1806 local_irq_restore(flags);
1807 }
56079431 1808}
bea3348e 1809EXPORT_SYMBOL(dev_kfree_skb_irq);
56079431
DV
1810
1811void dev_kfree_skb_any(struct sk_buff *skb)
1812{
1813 if (in_irq() || irqs_disabled())
1814 dev_kfree_skb_irq(skb);
1815 else
1816 dev_kfree_skb(skb);
1817}
1818EXPORT_SYMBOL(dev_kfree_skb_any);
1819
1820
bea3348e
SH
1821/**
1822 * netif_device_detach - mark device as removed
1823 * @dev: network device
1824 *
1825 * Mark device as removed from system and therefore no longer available.
1826 */
56079431
DV
1827void netif_device_detach(struct net_device *dev)
1828{
1829 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1830 netif_running(dev)) {
d543103a 1831 netif_tx_stop_all_queues(dev);
56079431
DV
1832 }
1833}
1834EXPORT_SYMBOL(netif_device_detach);
1835
bea3348e
SH
1836/**
1837 * netif_device_attach - mark device as attached
1838 * @dev: network device
1839 *
1840 * Mark device as attached from system and restart if needed.
1841 */
56079431
DV
1842void netif_device_attach(struct net_device *dev)
1843{
1844 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1845 netif_running(dev)) {
d543103a 1846 netif_tx_wake_all_queues(dev);
4ec93edb 1847 __netdev_watchdog_up(dev);
56079431
DV
1848 }
1849}
1850EXPORT_SYMBOL(netif_device_attach);
1851
8a83a00b
AB
1852/**
1853 * skb_dev_set -- assign a new device to a buffer
1854 * @skb: buffer for the new device
1855 * @dev: network device
1856 *
1857 * If an skb is owned by a device already, we have to reset
1858 * all data private to the namespace a device belongs to
1859 * before assigning it a new device.
1860 */
1861#ifdef CONFIG_NET_NS
1862void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1863{
1864 skb_dst_drop(skb);
1865 if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
1866 secpath_reset(skb);
1867 nf_reset(skb);
1868 skb_init_secmark(skb);
1869 skb->mark = 0;
1870 skb->priority = 0;
1871 skb->nf_trace = 0;
1872 skb->ipvs_property = 0;
1873#ifdef CONFIG_NET_SCHED
1874 skb->tc_index = 0;
1875#endif
1876 }
1877 skb->dev = dev;
1878}
1879EXPORT_SYMBOL(skb_set_dev);
1880#endif /* CONFIG_NET_NS */
1881
36c92474
BH
1882static void skb_warn_bad_offload(const struct sk_buff *skb)
1883{
65e9d2fa 1884 static const netdev_features_t null_features = 0;
36c92474
BH
1885 struct net_device *dev = skb->dev;
1886 const char *driver = "";
1887
1888 if (dev && dev->dev.parent)
1889 driver = dev_driver_string(dev->dev.parent);
1890
1891 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
1892 "gso_type=%d ip_summed=%d\n",
65e9d2fa
MM
1893 driver, dev ? &dev->features : &null_features,
1894 skb->sk ? &skb->sk->sk_route_caps : &null_features,
36c92474
BH
1895 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
1896 skb_shinfo(skb)->gso_type, skb->ip_summed);
1897}
1898
1da177e4
LT
1899/*
1900 * Invalidate hardware checksum when packet is to be mangled, and
1901 * complete checksum manually on outgoing path.
1902 */
84fa7933 1903int skb_checksum_help(struct sk_buff *skb)
1da177e4 1904{
d3bc23e7 1905 __wsum csum;
663ead3b 1906 int ret = 0, offset;
1da177e4 1907
84fa7933 1908 if (skb->ip_summed == CHECKSUM_COMPLETE)
a430a43d
HX
1909 goto out_set_summed;
1910
1911 if (unlikely(skb_shinfo(skb)->gso_size)) {
36c92474
BH
1912 skb_warn_bad_offload(skb);
1913 return -EINVAL;
1da177e4
LT
1914 }
1915
55508d60 1916 offset = skb_checksum_start_offset(skb);
a030847e
HX
1917 BUG_ON(offset >= skb_headlen(skb));
1918 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1919
1920 offset += skb->csum_offset;
1921 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1922
1923 if (skb_cloned(skb) &&
1924 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1da177e4
LT
1925 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1926 if (ret)
1927 goto out;
1928 }
1929
a030847e 1930 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
a430a43d 1931out_set_summed:
1da177e4 1932 skb->ip_summed = CHECKSUM_NONE;
4ec93edb 1933out:
1da177e4
LT
1934 return ret;
1935}
d1b19dff 1936EXPORT_SYMBOL(skb_checksum_help);
1da177e4 1937
f6a78bfc
HX
1938/**
1939 * skb_gso_segment - Perform segmentation on skb.
1940 * @skb: buffer to segment
576a30eb 1941 * @features: features for the output path (see dev->features)
f6a78bfc
HX
1942 *
1943 * This function segments the given skb and returns a list of segments.
576a30eb
HX
1944 *
1945 * It may return NULL if the skb requires no segmentation. This is
1946 * only possible when GSO is used for verifying header integrity.
f6a78bfc 1947 */
c8f44aff
MM
1948struct sk_buff *skb_gso_segment(struct sk_buff *skb,
1949 netdev_features_t features)
f6a78bfc
HX
1950{
1951 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1952 struct packet_type *ptype;
252e3346 1953 __be16 type = skb->protocol;
c8d5bcd1 1954 int vlan_depth = ETH_HLEN;
a430a43d 1955 int err;
f6a78bfc 1956
c8d5bcd1
JG
1957 while (type == htons(ETH_P_8021Q)) {
1958 struct vlan_hdr *vh;
7b9c6090 1959
c8d5bcd1 1960 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
7b9c6090
JG
1961 return ERR_PTR(-EINVAL);
1962
c8d5bcd1
JG
1963 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
1964 type = vh->h_vlan_encapsulated_proto;
1965 vlan_depth += VLAN_HLEN;
7b9c6090
JG
1966 }
1967
459a98ed 1968 skb_reset_mac_header(skb);
b0e380b1 1969 skb->mac_len = skb->network_header - skb->mac_header;
f6a78bfc
HX
1970 __skb_pull(skb, skb->mac_len);
1971
67fd1a73 1972 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
36c92474 1973 skb_warn_bad_offload(skb);
67fd1a73 1974
a430a43d
HX
1975 if (skb_header_cloned(skb) &&
1976 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1977 return ERR_PTR(err);
1978 }
1979
f6a78bfc 1980 rcu_read_lock();
82d8a867
PE
1981 list_for_each_entry_rcu(ptype,
1982 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
f6a78bfc 1983 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
84fa7933 1984 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
a430a43d
HX
1985 err = ptype->gso_send_check(skb);
1986 segs = ERR_PTR(err);
1987 if (err || skb_gso_ok(skb, features))
1988 break;
d56f90a7
ACM
1989 __skb_push(skb, (skb->data -
1990 skb_network_header(skb)));
a430a43d 1991 }
576a30eb 1992 segs = ptype->gso_segment(skb, features);
f6a78bfc
HX
1993 break;
1994 }
1995 }
1996 rcu_read_unlock();
1997
98e399f8 1998 __skb_push(skb, skb->data - skb_mac_header(skb));
576a30eb 1999
f6a78bfc
HX
2000 return segs;
2001}
f6a78bfc
HX
2002EXPORT_SYMBOL(skb_gso_segment);
2003
fb286bb2
HX
2004/* Take action when hardware reception checksum errors are detected. */
2005#ifdef CONFIG_BUG
2006void netdev_rx_csum_fault(struct net_device *dev)
2007{
2008 if (net_ratelimit()) {
7b6cd1ce 2009 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
fb286bb2
HX
2010 dump_stack();
2011 }
2012}
2013EXPORT_SYMBOL(netdev_rx_csum_fault);
2014#endif
2015
1da177e4
LT
2016/* Actually, we should eliminate this check as soon as we know, that:
2017 * 1. IOMMU is present and allows to map all the memory.
2018 * 2. No high memory really exists on this machine.
2019 */
2020
9092c658 2021static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1da177e4 2022{
3d3a8533 2023#ifdef CONFIG_HIGHMEM
1da177e4 2024 int i;
5acbbd42 2025 if (!(dev->features & NETIF_F_HIGHDMA)) {
ea2ab693
IC
2026 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2027 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2028 if (PageHighMem(skb_frag_page(frag)))
5acbbd42 2029 return 1;
ea2ab693 2030 }
5acbbd42 2031 }
1da177e4 2032
5acbbd42
FT
2033 if (PCI_DMA_BUS_IS_PHYS) {
2034 struct device *pdev = dev->dev.parent;
1da177e4 2035
9092c658
ED
2036 if (!pdev)
2037 return 0;
5acbbd42 2038 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
ea2ab693
IC
2039 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2040 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
5acbbd42
FT
2041 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2042 return 1;
2043 }
2044 }
3d3a8533 2045#endif
1da177e4
LT
2046 return 0;
2047}
1da177e4 2048
f6a78bfc
HX
2049struct dev_gso_cb {
2050 void (*destructor)(struct sk_buff *skb);
2051};
2052
2053#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
2054
2055static void dev_gso_skb_destructor(struct sk_buff *skb)
2056{
2057 struct dev_gso_cb *cb;
2058
2059 do {
2060 struct sk_buff *nskb = skb->next;
2061
2062 skb->next = nskb->next;
2063 nskb->next = NULL;
2064 kfree_skb(nskb);
2065 } while (skb->next);
2066
2067 cb = DEV_GSO_CB(skb);
2068 if (cb->destructor)
2069 cb->destructor(skb);
2070}
2071
2072/**
2073 * dev_gso_segment - Perform emulated hardware segmentation on skb.
2074 * @skb: buffer to segment
91ecb63c 2075 * @features: device features as applicable to this skb
f6a78bfc
HX
2076 *
2077 * This function segments the given skb and stores the list of segments
2078 * in skb->next.
2079 */
c8f44aff 2080static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
f6a78bfc 2081{
f6a78bfc 2082 struct sk_buff *segs;
576a30eb
HX
2083
2084 segs = skb_gso_segment(skb, features);
2085
2086 /* Verifying header integrity only. */
2087 if (!segs)
2088 return 0;
f6a78bfc 2089
801678c5 2090 if (IS_ERR(segs))
f6a78bfc
HX
2091 return PTR_ERR(segs);
2092
2093 skb->next = segs;
2094 DEV_GSO_CB(skb)->destructor = skb->destructor;
2095 skb->destructor = dev_gso_skb_destructor;
2096
2097 return 0;
2098}
2099
fc6055a5
ED
2100/*
2101 * Try to orphan skb early, right before transmission by the device.
2244d07b
OH
2102 * We cannot orphan skb if tx timestamp is requested or the sk-reference
2103 * is needed on driver level for other reasons, e.g. see net/can/raw.c
fc6055a5
ED
2104 */
2105static inline void skb_orphan_try(struct sk_buff *skb)
2106{
87fd308c
ED
2107 struct sock *sk = skb->sk;
2108
2244d07b 2109 if (sk && !skb_shinfo(skb)->tx_flags) {
87fd308c
ED
2110 /* skb_tx_hash() wont be able to get sk.
2111 * We copy sk_hash into skb->rxhash
2112 */
2113 if (!skb->rxhash)
2114 skb->rxhash = sk->sk_hash;
fc6055a5 2115 skb_orphan(skb);
87fd308c 2116 }
fc6055a5
ED
2117}
2118
c8f44aff 2119static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
03634668
JG
2120{
2121 return ((features & NETIF_F_GEN_CSUM) ||
2122 ((features & NETIF_F_V4_CSUM) &&
2123 protocol == htons(ETH_P_IP)) ||
2124 ((features & NETIF_F_V6_CSUM) &&
2125 protocol == htons(ETH_P_IPV6)) ||
2126 ((features & NETIF_F_FCOE_CRC) &&
2127 protocol == htons(ETH_P_FCOE)));
2128}
2129
c8f44aff
MM
2130static netdev_features_t harmonize_features(struct sk_buff *skb,
2131 __be16 protocol, netdev_features_t features)
f01a5236 2132{
d402786e 2133 if (!can_checksum_protocol(features, protocol)) {
f01a5236
JG
2134 features &= ~NETIF_F_ALL_CSUM;
2135 features &= ~NETIF_F_SG;
2136 } else if (illegal_highdma(skb->dev, skb)) {
2137 features &= ~NETIF_F_SG;
2138 }
2139
2140 return features;
2141}
2142
c8f44aff 2143netdev_features_t netif_skb_features(struct sk_buff *skb)
58e998c6
JG
2144{
2145 __be16 protocol = skb->protocol;
c8f44aff 2146 netdev_features_t features = skb->dev->features;
58e998c6
JG
2147
2148 if (protocol == htons(ETH_P_8021Q)) {
2149 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2150 protocol = veh->h_vlan_encapsulated_proto;
f01a5236
JG
2151 } else if (!vlan_tx_tag_present(skb)) {
2152 return harmonize_features(skb, protocol, features);
2153 }
58e998c6 2154
6ee400aa 2155 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX);
f01a5236
JG
2156
2157 if (protocol != htons(ETH_P_8021Q)) {
2158 return harmonize_features(skb, protocol, features);
2159 } else {
2160 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
6ee400aa 2161 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX;
f01a5236
JG
2162 return harmonize_features(skb, protocol, features);
2163 }
58e998c6 2164}
f01a5236 2165EXPORT_SYMBOL(netif_skb_features);
58e998c6 2166
6afff0ca
JF
2167/*
2168 * Returns true if either:
2169 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
2170 * 2. skb is fragmented and the device does not support SG, or if
2171 * at least one of fragments is in highmem and device does not
2172 * support DMA from it.
2173 */
2174static inline int skb_needs_linearize(struct sk_buff *skb,
02932ce9 2175 int features)
6afff0ca 2176{
02932ce9
JG
2177 return skb_is_nonlinear(skb) &&
2178 ((skb_has_frag_list(skb) &&
2179 !(features & NETIF_F_FRAGLIST)) ||
e1e78db6 2180 (skb_shinfo(skb)->nr_frags &&
02932ce9 2181 !(features & NETIF_F_SG)));
6afff0ca
JF
2182}
2183
fd2ea0a7
DM
2184int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2185 struct netdev_queue *txq)
f6a78bfc 2186{
00829823 2187 const struct net_device_ops *ops = dev->netdev_ops;
572a9d7b 2188 int rc = NETDEV_TX_OK;
ec764bf0 2189 unsigned int skb_len;
00829823 2190
f6a78bfc 2191 if (likely(!skb->next)) {
c8f44aff 2192 netdev_features_t features;
fc741216 2193
93f154b5 2194 /*
25985edc 2195 * If device doesn't need skb->dst, release it right now while
93f154b5
ED
2196 * its hot in this cpu cache
2197 */
adf30907
ED
2198 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2199 skb_dst_drop(skb);
2200
15c2d75f
ED
2201 if (!list_empty(&ptype_all))
2202 dev_queue_xmit_nit(skb, dev);
2203
fc6055a5 2204 skb_orphan_try(skb);
9ccb8975 2205
fc741216
JG
2206 features = netif_skb_features(skb);
2207
7b9c6090 2208 if (vlan_tx_tag_present(skb) &&
fc741216 2209 !(features & NETIF_F_HW_VLAN_TX)) {
7b9c6090
JG
2210 skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
2211 if (unlikely(!skb))
2212 goto out;
2213
2214 skb->vlan_tci = 0;
2215 }
2216
fc741216 2217 if (netif_needs_gso(skb, features)) {
91ecb63c 2218 if (unlikely(dev_gso_segment(skb, features)))
9ccb8975
DM
2219 goto out_kfree_skb;
2220 if (skb->next)
2221 goto gso;
6afff0ca 2222 } else {
02932ce9 2223 if (skb_needs_linearize(skb, features) &&
6afff0ca
JF
2224 __skb_linearize(skb))
2225 goto out_kfree_skb;
2226
2227 /* If packet is not checksummed and device does not
2228 * support checksumming for this protocol, complete
2229 * checksumming here.
2230 */
2231 if (skb->ip_summed == CHECKSUM_PARTIAL) {
55508d60
MM
2232 skb_set_transport_header(skb,
2233 skb_checksum_start_offset(skb));
03634668 2234 if (!(features & NETIF_F_ALL_CSUM) &&
6afff0ca
JF
2235 skb_checksum_help(skb))
2236 goto out_kfree_skb;
2237 }
9ccb8975
DM
2238 }
2239
ec764bf0 2240 skb_len = skb->len;
ac45f602 2241 rc = ops->ndo_start_xmit(skb, dev);
ec764bf0 2242 trace_net_dev_xmit(skb, rc, dev, skb_len);
ec634fe3 2243 if (rc == NETDEV_TX_OK)
08baf561 2244 txq_trans_update(txq);
ac45f602 2245 return rc;
f6a78bfc
HX
2246 }
2247
576a30eb 2248gso:
f6a78bfc
HX
2249 do {
2250 struct sk_buff *nskb = skb->next;
f6a78bfc
HX
2251
2252 skb->next = nskb->next;
2253 nskb->next = NULL;
068a2de5
KK
2254
2255 /*
25985edc 2256 * If device doesn't need nskb->dst, release it right now while
068a2de5
KK
2257 * its hot in this cpu cache
2258 */
2259 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2260 skb_dst_drop(nskb);
2261
ec764bf0 2262 skb_len = nskb->len;
00829823 2263 rc = ops->ndo_start_xmit(nskb, dev);
ec764bf0 2264 trace_net_dev_xmit(nskb, rc, dev, skb_len);
ec634fe3 2265 if (unlikely(rc != NETDEV_TX_OK)) {
572a9d7b
PM
2266 if (rc & ~NETDEV_TX_MASK)
2267 goto out_kfree_gso_skb;
f54d9e8d 2268 nskb->next = skb->next;
f6a78bfc
HX
2269 skb->next = nskb;
2270 return rc;
2271 }
08baf561 2272 txq_trans_update(txq);
73466498 2273 if (unlikely(netif_xmit_stopped(txq) && skb->next))
f54d9e8d 2274 return NETDEV_TX_BUSY;
f6a78bfc 2275 } while (skb->next);
4ec93edb 2276
572a9d7b
PM
2277out_kfree_gso_skb:
2278 if (likely(skb->next == NULL))
2279 skb->destructor = DEV_GSO_CB(skb)->destructor;
f6a78bfc
HX
2280out_kfree_skb:
2281 kfree_skb(skb);
7b9c6090 2282out:
572a9d7b 2283 return rc;
f6a78bfc
HX
2284}
2285
0a9627f2 2286static u32 hashrnd __read_mostly;
b6b2fed1 2287
a3d22a68
VZ
2288/*
2289 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2290 * to be used as a distribution range.
2291 */
2292u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
2293 unsigned int num_tx_queues)
8f0f2223 2294{
7019298a 2295 u32 hash;
4f57c087
JF
2296 u16 qoffset = 0;
2297 u16 qcount = num_tx_queues;
b6b2fed1 2298
513de11b
DM
2299 if (skb_rx_queue_recorded(skb)) {
2300 hash = skb_get_rx_queue(skb);
a3d22a68
VZ
2301 while (unlikely(hash >= num_tx_queues))
2302 hash -= num_tx_queues;
513de11b
DM
2303 return hash;
2304 }
ec581f6a 2305
4f57c087
JF
2306 if (dev->num_tc) {
2307 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2308 qoffset = dev->tc_to_txq[tc].offset;
2309 qcount = dev->tc_to_txq[tc].count;
2310 }
2311
ec581f6a 2312 if (skb->sk && skb->sk->sk_hash)
7019298a 2313 hash = skb->sk->sk_hash;
ec581f6a 2314 else
87fd308c 2315 hash = (__force u16) skb->protocol ^ skb->rxhash;
0a9627f2 2316 hash = jhash_1word(hash, hashrnd);
b6b2fed1 2317
4f57c087 2318 return (u16) (((u64) hash * qcount) >> 32) + qoffset;
8f0f2223 2319}
a3d22a68 2320EXPORT_SYMBOL(__skb_tx_hash);
8f0f2223 2321
ed04642f
ED
2322static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
2323{
2324 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
2325 if (net_ratelimit()) {
7b6cd1ce
JP
2326 pr_warn("%s selects TX queue %d, but real number of TX queues is %d\n",
2327 dev->name, queue_index,
2328 dev->real_num_tx_queues);
ed04642f
ED
2329 }
2330 return 0;
2331 }
2332 return queue_index;
2333}
2334
1d24eb48
TH
2335static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
2336{
bf264145 2337#ifdef CONFIG_XPS
1d24eb48
TH
2338 struct xps_dev_maps *dev_maps;
2339 struct xps_map *map;
2340 int queue_index = -1;
2341
2342 rcu_read_lock();
2343 dev_maps = rcu_dereference(dev->xps_maps);
2344 if (dev_maps) {
2345 map = rcu_dereference(
2346 dev_maps->cpu_map[raw_smp_processor_id()]);
2347 if (map) {
2348 if (map->len == 1)
2349 queue_index = map->queues[0];
2350 else {
2351 u32 hash;
2352 if (skb->sk && skb->sk->sk_hash)
2353 hash = skb->sk->sk_hash;
2354 else
2355 hash = (__force u16) skb->protocol ^
2356 skb->rxhash;
2357 hash = jhash_1word(hash, hashrnd);
2358 queue_index = map->queues[
2359 ((u64)hash * map->len) >> 32];
2360 }
2361 if (unlikely(queue_index >= dev->real_num_tx_queues))
2362 queue_index = -1;
2363 }
2364 }
2365 rcu_read_unlock();
2366
2367 return queue_index;
2368#else
2369 return -1;
2370#endif
2371}
2372
e8a0464c
DM
2373static struct netdev_queue *dev_pick_tx(struct net_device *dev,
2374 struct sk_buff *skb)
2375{
b0f77d0e 2376 int queue_index;
deabc772 2377 const struct net_device_ops *ops = dev->netdev_ops;
a4ee3ce3 2378
3853b584
TH
2379 if (dev->real_num_tx_queues == 1)
2380 queue_index = 0;
2381 else if (ops->ndo_select_queue) {
deabc772
HS
2382 queue_index = ops->ndo_select_queue(dev, skb);
2383 queue_index = dev_cap_txqueue(dev, queue_index);
2384 } else {
2385 struct sock *sk = skb->sk;
2386 queue_index = sk_tx_queue_get(sk);
a4ee3ce3 2387
3853b584
TH
2388 if (queue_index < 0 || skb->ooo_okay ||
2389 queue_index >= dev->real_num_tx_queues) {
2390 int old_index = queue_index;
fd2ea0a7 2391
1d24eb48
TH
2392 queue_index = get_xps_queue(dev, skb);
2393 if (queue_index < 0)
2394 queue_index = skb_tx_hash(dev, skb);
3853b584
TH
2395
2396 if (queue_index != old_index && sk) {
2397 struct dst_entry *dst =
2398 rcu_dereference_check(sk->sk_dst_cache, 1);
8728c544
ED
2399
2400 if (dst && skb_dst(skb) == dst)
2401 sk_tx_queue_set(sk, queue_index);
2402 }
a4ee3ce3
KK
2403 }
2404 }
eae792b7 2405
fd2ea0a7
DM
2406 skb_set_queue_mapping(skb, queue_index);
2407 return netdev_get_tx_queue(dev, queue_index);
e8a0464c
DM
2408}
2409
bbd8a0d3
KK
2410static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2411 struct net_device *dev,
2412 struct netdev_queue *txq)
2413{
2414 spinlock_t *root_lock = qdisc_lock(q);
a2da570d 2415 bool contended;
bbd8a0d3
KK
2416 int rc;
2417
a2da570d
ED
2418 qdisc_skb_cb(skb)->pkt_len = skb->len;
2419 qdisc_calculate_pkt_len(skb, q);
79640a4c
ED
2420 /*
2421 * Heuristic to force contended enqueues to serialize on a
2422 * separate lock before trying to get qdisc main lock.
2423 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2424 * and dequeue packets faster.
2425 */
a2da570d 2426 contended = qdisc_is_running(q);
79640a4c
ED
2427 if (unlikely(contended))
2428 spin_lock(&q->busylock);
2429
bbd8a0d3
KK
2430 spin_lock(root_lock);
2431 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2432 kfree_skb(skb);
2433 rc = NET_XMIT_DROP;
2434 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
bc135b23 2435 qdisc_run_begin(q)) {
bbd8a0d3
KK
2436 /*
2437 * This is a work-conserving queue; there are no old skbs
2438 * waiting to be sent out; and the qdisc is not running -
2439 * xmit the skb directly.
2440 */
7fee226a
ED
2441 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2442 skb_dst_force(skb);
bfe0d029 2443
bfe0d029
ED
2444 qdisc_bstats_update(q, skb);
2445
79640a4c
ED
2446 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2447 if (unlikely(contended)) {
2448 spin_unlock(&q->busylock);
2449 contended = false;
2450 }
bbd8a0d3 2451 __qdisc_run(q);
79640a4c 2452 } else
bc135b23 2453 qdisc_run_end(q);
bbd8a0d3
KK
2454
2455 rc = NET_XMIT_SUCCESS;
2456 } else {
7fee226a 2457 skb_dst_force(skb);
a2da570d 2458 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
79640a4c
ED
2459 if (qdisc_run_begin(q)) {
2460 if (unlikely(contended)) {
2461 spin_unlock(&q->busylock);
2462 contended = false;
2463 }
2464 __qdisc_run(q);
2465 }
bbd8a0d3
KK
2466 }
2467 spin_unlock(root_lock);
79640a4c
ED
2468 if (unlikely(contended))
2469 spin_unlock(&q->busylock);
bbd8a0d3
KK
2470 return rc;
2471}
2472
5bc1421e
NH
2473#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
2474static void skb_update_prio(struct sk_buff *skb)
2475{
6977a79d 2476 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
5bc1421e
NH
2477
2478 if ((!skb->priority) && (skb->sk) && map)
2479 skb->priority = map->priomap[skb->sk->sk_cgrp_prioidx];
2480}
2481#else
2482#define skb_update_prio(skb)
2483#endif
2484
745e20f1 2485static DEFINE_PER_CPU(int, xmit_recursion);
11a766ce 2486#define RECURSION_LIMIT 10
745e20f1 2487
d29f749e
DJ
2488/**
2489 * dev_queue_xmit - transmit a buffer
2490 * @skb: buffer to transmit
2491 *
2492 * Queue a buffer for transmission to a network device. The caller must
2493 * have set the device and priority and built the buffer before calling
2494 * this function. The function can be called from an interrupt.
2495 *
2496 * A negative errno code is returned on a failure. A success does not
2497 * guarantee the frame will be transmitted as it may be dropped due
2498 * to congestion or traffic shaping.
2499 *
2500 * -----------------------------------------------------------------------------------
2501 * I notice this method can also return errors from the queue disciplines,
2502 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2503 * be positive.
2504 *
2505 * Regardless of the return value, the skb is consumed, so it is currently
2506 * difficult to retry a send to this method. (You can bump the ref count
2507 * before sending to hold a reference for retry if you are careful.)
2508 *
2509 * When calling this method, interrupts MUST be enabled. This is because
2510 * the BH enable code must have IRQs enabled so that it will not deadlock.
2511 * --BLG
2512 */
1da177e4
LT
2513int dev_queue_xmit(struct sk_buff *skb)
2514{
2515 struct net_device *dev = skb->dev;
dc2b4847 2516 struct netdev_queue *txq;
1da177e4
LT
2517 struct Qdisc *q;
2518 int rc = -ENOMEM;
2519
4ec93edb
YH
2520 /* Disable soft irqs for various locks below. Also
2521 * stops preemption for RCU.
1da177e4 2522 */
4ec93edb 2523 rcu_read_lock_bh();
1da177e4 2524
5bc1421e
NH
2525 skb_update_prio(skb);
2526
eae792b7 2527 txq = dev_pick_tx(dev, skb);
a898def2 2528 q = rcu_dereference_bh(txq->qdisc);
37437bb2 2529
1da177e4 2530#ifdef CONFIG_NET_CLS_ACT
d1b19dff 2531 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
1da177e4 2532#endif
cf66ba58 2533 trace_net_dev_queue(skb);
1da177e4 2534 if (q->enqueue) {
bbd8a0d3 2535 rc = __dev_xmit_skb(skb, q, dev, txq);
37437bb2 2536 goto out;
1da177e4
LT
2537 }
2538
2539 /* The device has no queue. Common case for software devices:
2540 loopback, all the sorts of tunnels...
2541
932ff279
HX
2542 Really, it is unlikely that netif_tx_lock protection is necessary
2543 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1da177e4
LT
2544 counters.)
2545 However, it is possible, that they rely on protection
2546 made by us here.
2547
2548 Check this and shot the lock. It is not prone from deadlocks.
2549 Either shot noqueue qdisc, it is even simpler 8)
2550 */
2551 if (dev->flags & IFF_UP) {
2552 int cpu = smp_processor_id(); /* ok because BHs are off */
2553
c773e847 2554 if (txq->xmit_lock_owner != cpu) {
1da177e4 2555
745e20f1
ED
2556 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2557 goto recursion_alert;
2558
c773e847 2559 HARD_TX_LOCK(dev, txq, cpu);
1da177e4 2560
73466498 2561 if (!netif_xmit_stopped(txq)) {
745e20f1 2562 __this_cpu_inc(xmit_recursion);
572a9d7b 2563 rc = dev_hard_start_xmit(skb, dev, txq);
745e20f1 2564 __this_cpu_dec(xmit_recursion);
572a9d7b 2565 if (dev_xmit_complete(rc)) {
c773e847 2566 HARD_TX_UNLOCK(dev, txq);
1da177e4
LT
2567 goto out;
2568 }
2569 }
c773e847 2570 HARD_TX_UNLOCK(dev, txq);
1da177e4 2571 if (net_ratelimit())
7b6cd1ce
JP
2572 pr_crit("Virtual device %s asks to queue packet!\n",
2573 dev->name);
1da177e4
LT
2574 } else {
2575 /* Recursion is detected! It is possible,
745e20f1
ED
2576 * unfortunately
2577 */
2578recursion_alert:
1da177e4 2579 if (net_ratelimit())
7b6cd1ce
JP
2580 pr_crit("Dead loop on virtual device %s, fix it urgently!\n",
2581 dev->name);
1da177e4
LT
2582 }
2583 }
2584
2585 rc = -ENETDOWN;
d4828d85 2586 rcu_read_unlock_bh();
1da177e4 2587
1da177e4
LT
2588 kfree_skb(skb);
2589 return rc;
2590out:
d4828d85 2591 rcu_read_unlock_bh();
1da177e4
LT
2592 return rc;
2593}
d1b19dff 2594EXPORT_SYMBOL(dev_queue_xmit);
1da177e4
LT
2595
2596
2597/*=======================================================================
2598 Receiver routines
2599 =======================================================================*/
2600
6b2bedc3 2601int netdev_max_backlog __read_mostly = 1000;
3b098e2d 2602int netdev_tstamp_prequeue __read_mostly = 1;
6b2bedc3
SH
2603int netdev_budget __read_mostly = 300;
2604int weight_p __read_mostly = 64; /* old backlog weight */
1da177e4 2605
eecfd7c4
ED
2606/* Called with irq disabled */
2607static inline void ____napi_schedule(struct softnet_data *sd,
2608 struct napi_struct *napi)
2609{
2610 list_add_tail(&napi->poll_list, &sd->poll_list);
2611 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2612}
2613
0a9627f2 2614/*
bfb564e7 2615 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
bdeab991
TH
2616 * and src/dst port numbers. Sets rxhash in skb to non-zero hash value
2617 * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb
2618 * if hash is a canonical 4-tuple hash over transport ports.
0a9627f2 2619 */
bdeab991 2620void __skb_get_rxhash(struct sk_buff *skb)
0a9627f2 2621{
4504b861
ED
2622 struct flow_keys keys;
2623 u32 hash;
c6865cb3 2624
4504b861
ED
2625 if (!skb_flow_dissect(skb, &keys))
2626 return;
e971b722 2627
4504b861
ED
2628 if (keys.ports) {
2629 if ((__force u16)keys.port16[1] < (__force u16)keys.port16[0])
2630 swap(keys.port16[0], keys.port16[1]);
2631 skb->l4_rxhash = 1;
0a9627f2
TH
2632 }
2633
b249dcb8 2634 /* get a consistent hash (same value on both flow directions) */
4504b861
ED
2635 if ((__force u32)keys.dst < (__force u32)keys.src)
2636 swap(keys.dst, keys.src);
0a9627f2 2637
4504b861
ED
2638 hash = jhash_3words((__force u32)keys.dst,
2639 (__force u32)keys.src,
2640 (__force u32)keys.ports, hashrnd);
bfb564e7
KK
2641 if (!hash)
2642 hash = 1;
2643
bdeab991 2644 skb->rxhash = hash;
bfb564e7
KK
2645}
2646EXPORT_SYMBOL(__skb_get_rxhash);
2647
2648#ifdef CONFIG_RPS
2649
2650/* One global table that all flow-based protocols share. */
6e3f7faf 2651struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
bfb564e7
KK
2652EXPORT_SYMBOL(rps_sock_flow_table);
2653
adc9300e
ED
2654struct jump_label_key rps_needed __read_mostly;
2655
c445477d
BH
2656static struct rps_dev_flow *
2657set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2658 struct rps_dev_flow *rflow, u16 next_cpu)
2659{
09994d1b 2660 if (next_cpu != RPS_NO_CPU) {
c445477d
BH
2661#ifdef CONFIG_RFS_ACCEL
2662 struct netdev_rx_queue *rxqueue;
2663 struct rps_dev_flow_table *flow_table;
2664 struct rps_dev_flow *old_rflow;
2665 u32 flow_id;
2666 u16 rxq_index;
2667 int rc;
2668
2669 /* Should we steer this flow to a different hardware queue? */
69a19ee6
BH
2670 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
2671 !(dev->features & NETIF_F_NTUPLE))
c445477d
BH
2672 goto out;
2673 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
2674 if (rxq_index == skb_get_rx_queue(skb))
2675 goto out;
2676
2677 rxqueue = dev->_rx + rxq_index;
2678 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2679 if (!flow_table)
2680 goto out;
2681 flow_id = skb->rxhash & flow_table->mask;
2682 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
2683 rxq_index, flow_id);
2684 if (rc < 0)
2685 goto out;
2686 old_rflow = rflow;
2687 rflow = &flow_table->flows[flow_id];
c445477d
BH
2688 rflow->filter = rc;
2689 if (old_rflow->filter == rflow->filter)
2690 old_rflow->filter = RPS_NO_FILTER;
2691 out:
2692#endif
2693 rflow->last_qtail =
09994d1b 2694 per_cpu(softnet_data, next_cpu).input_queue_head;
c445477d
BH
2695 }
2696
09994d1b 2697 rflow->cpu = next_cpu;
c445477d
BH
2698 return rflow;
2699}
2700
bfb564e7
KK
2701/*
2702 * get_rps_cpu is called from netif_receive_skb and returns the target
2703 * CPU from the RPS map of the receiving queue for a given skb.
2704 * rcu_read_lock must be held on entry.
2705 */
2706static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2707 struct rps_dev_flow **rflowp)
2708{
2709 struct netdev_rx_queue *rxqueue;
6e3f7faf 2710 struct rps_map *map;
bfb564e7
KK
2711 struct rps_dev_flow_table *flow_table;
2712 struct rps_sock_flow_table *sock_flow_table;
2713 int cpu = -1;
2714 u16 tcpu;
2715
2716 if (skb_rx_queue_recorded(skb)) {
2717 u16 index = skb_get_rx_queue(skb);
62fe0b40
BH
2718 if (unlikely(index >= dev->real_num_rx_queues)) {
2719 WARN_ONCE(dev->real_num_rx_queues > 1,
2720 "%s received packet on queue %u, but number "
2721 "of RX queues is %u\n",
2722 dev->name, index, dev->real_num_rx_queues);
bfb564e7
KK
2723 goto done;
2724 }
2725 rxqueue = dev->_rx + index;
2726 } else
2727 rxqueue = dev->_rx;
2728
6e3f7faf
ED
2729 map = rcu_dereference(rxqueue->rps_map);
2730 if (map) {
85875236 2731 if (map->len == 1 &&
33d480ce 2732 !rcu_access_pointer(rxqueue->rps_flow_table)) {
6febfca9
CG
2733 tcpu = map->cpus[0];
2734 if (cpu_online(tcpu))
2735 cpu = tcpu;
2736 goto done;
2737 }
33d480ce 2738 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
bfb564e7 2739 goto done;
6febfca9 2740 }
bfb564e7 2741
2d47b459 2742 skb_reset_network_header(skb);
bfb564e7
KK
2743 if (!skb_get_rxhash(skb))
2744 goto done;
2745
fec5e652
TH
2746 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2747 sock_flow_table = rcu_dereference(rps_sock_flow_table);
2748 if (flow_table && sock_flow_table) {
2749 u16 next_cpu;
2750 struct rps_dev_flow *rflow;
2751
2752 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
2753 tcpu = rflow->cpu;
2754
2755 next_cpu = sock_flow_table->ents[skb->rxhash &
2756 sock_flow_table->mask];
2757
2758 /*
2759 * If the desired CPU (where last recvmsg was done) is
2760 * different from current CPU (one in the rx-queue flow
2761 * table entry), switch if one of the following holds:
2762 * - Current CPU is unset (equal to RPS_NO_CPU).
2763 * - Current CPU is offline.
2764 * - The current CPU's queue tail has advanced beyond the
2765 * last packet that was enqueued using this table entry.
2766 * This guarantees that all previous packets for the flow
2767 * have been dequeued, thus preserving in order delivery.
2768 */
2769 if (unlikely(tcpu != next_cpu) &&
2770 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
2771 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
c445477d
BH
2772 rflow->last_qtail)) >= 0))
2773 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
2774
fec5e652
TH
2775 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
2776 *rflowp = rflow;
2777 cpu = tcpu;
2778 goto done;
2779 }
2780 }
2781
0a9627f2 2782 if (map) {
fec5e652 2783 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
0a9627f2
TH
2784
2785 if (cpu_online(tcpu)) {
2786 cpu = tcpu;
2787 goto done;
2788 }
2789 }
2790
2791done:
0a9627f2
TH
2792 return cpu;
2793}
2794
c445477d
BH
2795#ifdef CONFIG_RFS_ACCEL
2796
2797/**
2798 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
2799 * @dev: Device on which the filter was set
2800 * @rxq_index: RX queue index
2801 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
2802 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
2803 *
2804 * Drivers that implement ndo_rx_flow_steer() should periodically call
2805 * this function for each installed filter and remove the filters for
2806 * which it returns %true.
2807 */
2808bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
2809 u32 flow_id, u16 filter_id)
2810{
2811 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
2812 struct rps_dev_flow_table *flow_table;
2813 struct rps_dev_flow *rflow;
2814 bool expire = true;
2815 int cpu;
2816
2817 rcu_read_lock();
2818 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2819 if (flow_table && flow_id <= flow_table->mask) {
2820 rflow = &flow_table->flows[flow_id];
2821 cpu = ACCESS_ONCE(rflow->cpu);
2822 if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
2823 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
2824 rflow->last_qtail) <
2825 (int)(10 * flow_table->mask)))
2826 expire = false;
2827 }
2828 rcu_read_unlock();
2829 return expire;
2830}
2831EXPORT_SYMBOL(rps_may_expire_flow);
2832
2833#endif /* CONFIG_RFS_ACCEL */
2834
0a9627f2 2835/* Called from hardirq (IPI) context */
e36fa2f7 2836static void rps_trigger_softirq(void *data)
0a9627f2 2837{
e36fa2f7
ED
2838 struct softnet_data *sd = data;
2839
eecfd7c4 2840 ____napi_schedule(sd, &sd->backlog);
dee42870 2841 sd->received_rps++;
0a9627f2 2842}
e36fa2f7 2843
fec5e652 2844#endif /* CONFIG_RPS */
0a9627f2 2845
e36fa2f7
ED
2846/*
2847 * Check if this softnet_data structure is another cpu one
2848 * If yes, queue it to our IPI list and return 1
2849 * If no, return 0
2850 */
2851static int rps_ipi_queued(struct softnet_data *sd)
2852{
2853#ifdef CONFIG_RPS
2854 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
2855
2856 if (sd != mysd) {
2857 sd->rps_ipi_next = mysd->rps_ipi_list;
2858 mysd->rps_ipi_list = sd;
2859
2860 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2861 return 1;
2862 }
2863#endif /* CONFIG_RPS */
2864 return 0;
2865}
2866
0a9627f2
TH
2867/*
2868 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
2869 * queue (may be a remote CPU queue).
2870 */
fec5e652
TH
2871static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
2872 unsigned int *qtail)
0a9627f2 2873{
e36fa2f7 2874 struct softnet_data *sd;
0a9627f2
TH
2875 unsigned long flags;
2876
e36fa2f7 2877 sd = &per_cpu(softnet_data, cpu);
0a9627f2
TH
2878
2879 local_irq_save(flags);
0a9627f2 2880
e36fa2f7 2881 rps_lock(sd);
6e7676c1
CG
2882 if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
2883 if (skb_queue_len(&sd->input_pkt_queue)) {
0a9627f2 2884enqueue:
e36fa2f7 2885 __skb_queue_tail(&sd->input_pkt_queue, skb);
76cc8b13 2886 input_queue_tail_incr_save(sd, qtail);
e36fa2f7 2887 rps_unlock(sd);
152102c7 2888 local_irq_restore(flags);
0a9627f2
TH
2889 return NET_RX_SUCCESS;
2890 }
2891
ebda37c2
ED
2892 /* Schedule NAPI for backlog device
2893 * We can use non atomic operation since we own the queue lock
2894 */
2895 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
e36fa2f7 2896 if (!rps_ipi_queued(sd))
eecfd7c4 2897 ____napi_schedule(sd, &sd->backlog);
0a9627f2
TH
2898 }
2899 goto enqueue;
2900 }
2901
dee42870 2902 sd->dropped++;
e36fa2f7 2903 rps_unlock(sd);
0a9627f2 2904
0a9627f2
TH
2905 local_irq_restore(flags);
2906
caf586e5 2907 atomic_long_inc(&skb->dev->rx_dropped);
0a9627f2
TH
2908 kfree_skb(skb);
2909 return NET_RX_DROP;
2910}
1da177e4 2911
1da177e4
LT
2912/**
2913 * netif_rx - post buffer to the network code
2914 * @skb: buffer to post
2915 *
2916 * This function receives a packet from a device driver and queues it for
2917 * the upper (protocol) levels to process. It always succeeds. The buffer
2918 * may be dropped during processing for congestion control or by the
2919 * protocol layers.
2920 *
2921 * return values:
2922 * NET_RX_SUCCESS (no congestion)
1da177e4
LT
2923 * NET_RX_DROP (packet was dropped)
2924 *
2925 */
2926
2927int netif_rx(struct sk_buff *skb)
2928{
b0e28f1e 2929 int ret;
1da177e4
LT
2930
2931 /* if netpoll wants it, pretend we never saw it */
2932 if (netpoll_rx(skb))
2933 return NET_RX_DROP;
2934
588f0330 2935 net_timestamp_check(netdev_tstamp_prequeue, skb);
1da177e4 2936
cf66ba58 2937 trace_netif_rx(skb);
df334545 2938#ifdef CONFIG_RPS
adc9300e 2939 if (static_branch(&rps_needed)) {
fec5e652 2940 struct rps_dev_flow voidflow, *rflow = &voidflow;
b0e28f1e
ED
2941 int cpu;
2942
cece1945 2943 preempt_disable();
b0e28f1e 2944 rcu_read_lock();
fec5e652
TH
2945
2946 cpu = get_rps_cpu(skb->dev, skb, &rflow);
b0e28f1e
ED
2947 if (cpu < 0)
2948 cpu = smp_processor_id();
fec5e652
TH
2949
2950 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
2951
b0e28f1e 2952 rcu_read_unlock();
cece1945 2953 preempt_enable();
adc9300e
ED
2954 } else
2955#endif
fec5e652
TH
2956 {
2957 unsigned int qtail;
2958 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
2959 put_cpu();
2960 }
b0e28f1e 2961 return ret;
1da177e4 2962}
d1b19dff 2963EXPORT_SYMBOL(netif_rx);
1da177e4
LT
2964
2965int netif_rx_ni(struct sk_buff *skb)
2966{
2967 int err;
2968
2969 preempt_disable();
2970 err = netif_rx(skb);
2971 if (local_softirq_pending())
2972 do_softirq();
2973 preempt_enable();
2974
2975 return err;
2976}
1da177e4
LT
2977EXPORT_SYMBOL(netif_rx_ni);
2978
1da177e4
LT
2979static void net_tx_action(struct softirq_action *h)
2980{
2981 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2982
2983 if (sd->completion_queue) {
2984 struct sk_buff *clist;
2985
2986 local_irq_disable();
2987 clist = sd->completion_queue;
2988 sd->completion_queue = NULL;
2989 local_irq_enable();
2990
2991 while (clist) {
2992 struct sk_buff *skb = clist;
2993 clist = clist->next;
2994
547b792c 2995 WARN_ON(atomic_read(&skb->users));
07dc22e7 2996 trace_kfree_skb(skb, net_tx_action);
1da177e4
LT
2997 __kfree_skb(skb);
2998 }
2999 }
3000
3001 if (sd->output_queue) {
37437bb2 3002 struct Qdisc *head;
1da177e4
LT
3003
3004 local_irq_disable();
3005 head = sd->output_queue;
3006 sd->output_queue = NULL;
a9cbd588 3007 sd->output_queue_tailp = &sd->output_queue;
1da177e4
LT
3008 local_irq_enable();
3009
3010 while (head) {
37437bb2
DM
3011 struct Qdisc *q = head;
3012 spinlock_t *root_lock;
3013
1da177e4
LT
3014 head = head->next_sched;
3015
5fb66229 3016 root_lock = qdisc_lock(q);
37437bb2 3017 if (spin_trylock(root_lock)) {
def82a1d
JP
3018 smp_mb__before_clear_bit();
3019 clear_bit(__QDISC_STATE_SCHED,
3020 &q->state);
37437bb2
DM
3021 qdisc_run(q);
3022 spin_unlock(root_lock);
1da177e4 3023 } else {
195648bb 3024 if (!test_bit(__QDISC_STATE_DEACTIVATED,
e8a83e10 3025 &q->state)) {
195648bb 3026 __netif_reschedule(q);
e8a83e10
JP
3027 } else {
3028 smp_mb__before_clear_bit();
3029 clear_bit(__QDISC_STATE_SCHED,
3030 &q->state);
3031 }
1da177e4
LT
3032 }
3033 }
3034 }
3035}
3036
ab95bfe0
JP
3037#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3038 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
da678292
MM
3039/* This hook is defined here for ATM LANE */
3040int (*br_fdb_test_addr_hook)(struct net_device *dev,
3041 unsigned char *addr) __read_mostly;
4fb019a0 3042EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
da678292 3043#endif
1da177e4 3044
1da177e4
LT
3045#ifdef CONFIG_NET_CLS_ACT
3046/* TODO: Maybe we should just force sch_ingress to be compiled in
3047 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3048 * a compare and 2 stores extra right now if we dont have it on
3049 * but have CONFIG_NET_CLS_ACT
25985edc
LDM
3050 * NOTE: This doesn't stop any functionality; if you dont have
3051 * the ingress scheduler, you just can't add policies on ingress.
1da177e4
LT
3052 *
3053 */
24824a09 3054static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
1da177e4 3055{
1da177e4 3056 struct net_device *dev = skb->dev;
f697c3e8 3057 u32 ttl = G_TC_RTTL(skb->tc_verd);
555353cf
DM
3058 int result = TC_ACT_OK;
3059 struct Qdisc *q;
4ec93edb 3060
de384830
SH
3061 if (unlikely(MAX_RED_LOOP < ttl++)) {
3062 if (net_ratelimit())
7b6cd1ce
JP
3063 pr_warn("Redir loop detected Dropping packet (%d->%d)\n",
3064 skb->skb_iif, dev->ifindex);
f697c3e8
HX
3065 return TC_ACT_SHOT;
3066 }
1da177e4 3067
f697c3e8
HX
3068 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3069 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
1da177e4 3070
83874000 3071 q = rxq->qdisc;
8d50b53d 3072 if (q != &noop_qdisc) {
83874000 3073 spin_lock(qdisc_lock(q));
a9312ae8
DM
3074 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3075 result = qdisc_enqueue_root(skb, q);
83874000
DM
3076 spin_unlock(qdisc_lock(q));
3077 }
f697c3e8
HX
3078
3079 return result;
3080}
86e65da9 3081
f697c3e8
HX
3082static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3083 struct packet_type **pt_prev,
3084 int *ret, struct net_device *orig_dev)
3085{
24824a09
ED
3086 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3087
3088 if (!rxq || rxq->qdisc == &noop_qdisc)
f697c3e8 3089 goto out;
1da177e4 3090
f697c3e8
HX
3091 if (*pt_prev) {
3092 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3093 *pt_prev = NULL;
1da177e4
LT
3094 }
3095
24824a09 3096 switch (ing_filter(skb, rxq)) {
f697c3e8
HX
3097 case TC_ACT_SHOT:
3098 case TC_ACT_STOLEN:
3099 kfree_skb(skb);
3100 return NULL;
3101 }
3102
3103out:
3104 skb->tc_verd = 0;
3105 return skb;
1da177e4
LT
3106}
3107#endif
3108
ab95bfe0
JP
3109/**
3110 * netdev_rx_handler_register - register receive handler
3111 * @dev: device to register a handler for
3112 * @rx_handler: receive handler to register
93e2c32b 3113 * @rx_handler_data: data pointer that is used by rx handler
ab95bfe0
JP
3114 *
3115 * Register a receive hander for a device. This handler will then be
3116 * called from __netif_receive_skb. A negative errno code is returned
3117 * on a failure.
3118 *
3119 * The caller must hold the rtnl_mutex.
8a4eb573
JP
3120 *
3121 * For a general description of rx_handler, see enum rx_handler_result.
ab95bfe0
JP
3122 */
3123int netdev_rx_handler_register(struct net_device *dev,
93e2c32b
JP
3124 rx_handler_func_t *rx_handler,
3125 void *rx_handler_data)
ab95bfe0
JP
3126{
3127 ASSERT_RTNL();
3128
3129 if (dev->rx_handler)
3130 return -EBUSY;
3131
93e2c32b 3132 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
ab95bfe0
JP
3133 rcu_assign_pointer(dev->rx_handler, rx_handler);
3134
3135 return 0;
3136}
3137EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3138
3139/**
3140 * netdev_rx_handler_unregister - unregister receive handler
3141 * @dev: device to unregister a handler from
3142 *
3143 * Unregister a receive hander from a device.
3144 *
3145 * The caller must hold the rtnl_mutex.
3146 */
3147void netdev_rx_handler_unregister(struct net_device *dev)
3148{
3149
3150 ASSERT_RTNL();
a9b3cd7f
SH
3151 RCU_INIT_POINTER(dev->rx_handler, NULL);
3152 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
ab95bfe0
JP
3153}
3154EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3155
10f744d2 3156static int __netif_receive_skb(struct sk_buff *skb)
1da177e4
LT
3157{
3158 struct packet_type *ptype, *pt_prev;
ab95bfe0 3159 rx_handler_func_t *rx_handler;
f2ccd8fa 3160 struct net_device *orig_dev;
63d8ea7f 3161 struct net_device *null_or_dev;
8a4eb573 3162 bool deliver_exact = false;
1da177e4 3163 int ret = NET_RX_DROP;
252e3346 3164 __be16 type;
1da177e4 3165
588f0330 3166 net_timestamp_check(!netdev_tstamp_prequeue, skb);
81bbb3d4 3167
cf66ba58 3168 trace_netif_receive_skb(skb);
9b22ea56 3169
1da177e4 3170 /* if we've gotten here through NAPI, check netpoll */
bea3348e 3171 if (netpoll_receive_skb(skb))
1da177e4
LT
3172 return NET_RX_DROP;
3173
8964be4a
ED
3174 if (!skb->skb_iif)
3175 skb->skb_iif = skb->dev->ifindex;
cc9bd5ce 3176 orig_dev = skb->dev;
8f903c70 3177
c1d2bbe1 3178 skb_reset_network_header(skb);
badff6d0 3179 skb_reset_transport_header(skb);
0b5c9db1 3180 skb_reset_mac_len(skb);
1da177e4
LT
3181
3182 pt_prev = NULL;
3183
3184 rcu_read_lock();
3185
63d8ea7f
DM
3186another_round:
3187
3188 __this_cpu_inc(softnet_data.processed);
3189
bcc6d479
JP
3190 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3191 skb = vlan_untag(skb);
3192 if (unlikely(!skb))
3193 goto out;
3194 }
3195
1da177e4
LT
3196#ifdef CONFIG_NET_CLS_ACT
3197 if (skb->tc_verd & TC_NCLS) {
3198 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3199 goto ncls;
3200 }
3201#endif
3202
3203 list_for_each_entry_rcu(ptype, &ptype_all, list) {
63d8ea7f 3204 if (!ptype->dev || ptype->dev == skb->dev) {
4ec93edb 3205 if (pt_prev)
f2ccd8fa 3206 ret = deliver_skb(skb, pt_prev, orig_dev);
1da177e4
LT
3207 pt_prev = ptype;
3208 }
3209 }
3210
3211#ifdef CONFIG_NET_CLS_ACT
f697c3e8
HX
3212 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3213 if (!skb)
1da177e4 3214 goto out;
1da177e4
LT
3215ncls:
3216#endif
3217
6a32e4f9 3218 rx_handler = rcu_dereference(skb->dev->rx_handler);
2425717b
JF
3219 if (vlan_tx_tag_present(skb)) {
3220 if (pt_prev) {
3221 ret = deliver_skb(skb, pt_prev, orig_dev);
3222 pt_prev = NULL;
3223 }
6a32e4f9 3224 if (vlan_do_receive(&skb, !rx_handler))
2425717b
JF
3225 goto another_round;
3226 else if (unlikely(!skb))
3227 goto out;
3228 }
3229
ab95bfe0
JP
3230 if (rx_handler) {
3231 if (pt_prev) {
3232 ret = deliver_skb(skb, pt_prev, orig_dev);
3233 pt_prev = NULL;
3234 }
8a4eb573
JP
3235 switch (rx_handler(&skb)) {
3236 case RX_HANDLER_CONSUMED:
ab95bfe0 3237 goto out;
8a4eb573 3238 case RX_HANDLER_ANOTHER:
63d8ea7f 3239 goto another_round;
8a4eb573
JP
3240 case RX_HANDLER_EXACT:
3241 deliver_exact = true;
3242 case RX_HANDLER_PASS:
3243 break;
3244 default:
3245 BUG();
3246 }
ab95bfe0 3247 }
1da177e4 3248
63d8ea7f 3249 /* deliver only exact match when indicated */
8a4eb573 3250 null_or_dev = deliver_exact ? skb->dev : NULL;
1f3c8804 3251
1da177e4 3252 type = skb->protocol;
82d8a867
PE
3253 list_for_each_entry_rcu(ptype,
3254 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
63d8ea7f 3255 if (ptype->type == type &&
e3f48d37
JP
3256 (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3257 ptype->dev == orig_dev)) {
4ec93edb 3258 if (pt_prev)
f2ccd8fa 3259 ret = deliver_skb(skb, pt_prev, orig_dev);
1da177e4
LT
3260 pt_prev = ptype;
3261 }
3262 }
3263
3264 if (pt_prev) {
f2ccd8fa 3265 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1da177e4 3266 } else {
caf586e5 3267 atomic_long_inc(&skb->dev->rx_dropped);
1da177e4
LT
3268 kfree_skb(skb);
3269 /* Jamal, now you will not able to escape explaining
3270 * me how you were going to use this. :-)
3271 */
3272 ret = NET_RX_DROP;
3273 }
3274
3275out:
3276 rcu_read_unlock();
3277 return ret;
3278}
0a9627f2
TH
3279
3280/**
3281 * netif_receive_skb - process receive buffer from network
3282 * @skb: buffer to process
3283 *
3284 * netif_receive_skb() is the main receive data processing function.
3285 * It always succeeds. The buffer may be dropped during processing
3286 * for congestion control or by the protocol layers.
3287 *
3288 * This function may only be called from softirq context and interrupts
3289 * should be enabled.
3290 *
3291 * Return values (usually ignored):
3292 * NET_RX_SUCCESS: no congestion
3293 * NET_RX_DROP: packet was dropped
3294 */
3295int netif_receive_skb(struct sk_buff *skb)
3296{
588f0330 3297 net_timestamp_check(netdev_tstamp_prequeue, skb);
3b098e2d 3298
c1f19b51
RC
3299 if (skb_defer_rx_timestamp(skb))
3300 return NET_RX_SUCCESS;
3301
df334545 3302#ifdef CONFIG_RPS
adc9300e 3303 if (static_branch(&rps_needed)) {
3b098e2d
ED
3304 struct rps_dev_flow voidflow, *rflow = &voidflow;
3305 int cpu, ret;
fec5e652 3306
3b098e2d
ED
3307 rcu_read_lock();
3308
3309 cpu = get_rps_cpu(skb->dev, skb, &rflow);
0a9627f2 3310
3b098e2d
ED
3311 if (cpu >= 0) {
3312 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3313 rcu_read_unlock();
adc9300e 3314 return ret;
3b098e2d 3315 }
adc9300e 3316 rcu_read_unlock();
fec5e652 3317 }
1e94d72f 3318#endif
adc9300e 3319 return __netif_receive_skb(skb);
0a9627f2 3320}
d1b19dff 3321EXPORT_SYMBOL(netif_receive_skb);
1da177e4 3322
88751275
ED
3323/* Network device is going away, flush any packets still pending
3324 * Called with irqs disabled.
3325 */
152102c7 3326static void flush_backlog(void *arg)
6e583ce5 3327{
152102c7 3328 struct net_device *dev = arg;
e36fa2f7 3329 struct softnet_data *sd = &__get_cpu_var(softnet_data);
6e583ce5
SH
3330 struct sk_buff *skb, *tmp;
3331
e36fa2f7 3332 rps_lock(sd);
6e7676c1 3333 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
6e583ce5 3334 if (skb->dev == dev) {
e36fa2f7 3335 __skb_unlink(skb, &sd->input_pkt_queue);
6e583ce5 3336 kfree_skb(skb);
76cc8b13 3337 input_queue_head_incr(sd);
6e583ce5 3338 }
6e7676c1 3339 }
e36fa2f7 3340 rps_unlock(sd);
6e7676c1
CG
3341
3342 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3343 if (skb->dev == dev) {
3344 __skb_unlink(skb, &sd->process_queue);
3345 kfree_skb(skb);
76cc8b13 3346 input_queue_head_incr(sd);
6e7676c1
CG
3347 }
3348 }
6e583ce5
SH
3349}
3350
d565b0a1
HX
3351static int napi_gro_complete(struct sk_buff *skb)
3352{
3353 struct packet_type *ptype;
3354 __be16 type = skb->protocol;
3355 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
3356 int err = -ENOENT;
3357
fc59f9a3
HX
3358 if (NAPI_GRO_CB(skb)->count == 1) {
3359 skb_shinfo(skb)->gso_size = 0;
d565b0a1 3360 goto out;
fc59f9a3 3361 }
d565b0a1
HX
3362
3363 rcu_read_lock();
3364 list_for_each_entry_rcu(ptype, head, list) {
3365 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
3366 continue;
3367
3368 err = ptype->gro_complete(skb);
3369 break;
3370 }
3371 rcu_read_unlock();
3372
3373 if (err) {
3374 WARN_ON(&ptype->list == head);
3375 kfree_skb(skb);
3376 return NET_RX_SUCCESS;
3377 }
3378
3379out:
d565b0a1
HX
3380 return netif_receive_skb(skb);
3381}
3382
86cac58b 3383inline void napi_gro_flush(struct napi_struct *napi)
d565b0a1
HX
3384{
3385 struct sk_buff *skb, *next;
3386
3387 for (skb = napi->gro_list; skb; skb = next) {
3388 next = skb->next;
3389 skb->next = NULL;
3390 napi_gro_complete(skb);
3391 }
3392
4ae5544f 3393 napi->gro_count = 0;
d565b0a1
HX
3394 napi->gro_list = NULL;
3395}
86cac58b 3396EXPORT_SYMBOL(napi_gro_flush);
d565b0a1 3397
5b252f0c 3398enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
d565b0a1
HX
3399{
3400 struct sk_buff **pp = NULL;
3401 struct packet_type *ptype;
3402 __be16 type = skb->protocol;
3403 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
0da2afd5 3404 int same_flow;
d565b0a1 3405 int mac_len;
5b252f0c 3406 enum gro_result ret;
d565b0a1 3407
ce9e76c8 3408 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
d565b0a1
HX
3409 goto normal;
3410
21dc3301 3411 if (skb_is_gso(skb) || skb_has_frag_list(skb))
f17f5c91
HX
3412 goto normal;
3413
d565b0a1
HX
3414 rcu_read_lock();
3415 list_for_each_entry_rcu(ptype, head, list) {
d565b0a1
HX
3416 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
3417 continue;
3418
86911732 3419 skb_set_network_header(skb, skb_gro_offset(skb));
d565b0a1
HX
3420 mac_len = skb->network_header - skb->mac_header;
3421 skb->mac_len = mac_len;
3422 NAPI_GRO_CB(skb)->same_flow = 0;
3423 NAPI_GRO_CB(skb)->flush = 0;
5d38a079 3424 NAPI_GRO_CB(skb)->free = 0;
d565b0a1 3425
d565b0a1
HX
3426 pp = ptype->gro_receive(&napi->gro_list, skb);
3427 break;
3428 }
3429 rcu_read_unlock();
3430
3431 if (&ptype->list == head)
3432 goto normal;
3433
0da2afd5 3434 same_flow = NAPI_GRO_CB(skb)->same_flow;
5d0d9be8 3435 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
0da2afd5 3436
d565b0a1
HX
3437 if (pp) {
3438 struct sk_buff *nskb = *pp;
3439
3440 *pp = nskb->next;
3441 nskb->next = NULL;
3442 napi_gro_complete(nskb);
4ae5544f 3443 napi->gro_count--;
d565b0a1
HX
3444 }
3445
0da2afd5 3446 if (same_flow)
d565b0a1
HX
3447 goto ok;
3448
4ae5544f 3449 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
d565b0a1 3450 goto normal;
d565b0a1 3451
4ae5544f 3452 napi->gro_count++;
d565b0a1 3453 NAPI_GRO_CB(skb)->count = 1;
86911732 3454 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
d565b0a1
HX
3455 skb->next = napi->gro_list;
3456 napi->gro_list = skb;
5d0d9be8 3457 ret = GRO_HELD;
d565b0a1 3458
ad0f9904 3459pull:
cb18978c
HX
3460 if (skb_headlen(skb) < skb_gro_offset(skb)) {
3461 int grow = skb_gro_offset(skb) - skb_headlen(skb);
3462
3463 BUG_ON(skb->end - skb->tail < grow);
3464
3465 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3466
3467 skb->tail += grow;
3468 skb->data_len -= grow;
3469
3470 skb_shinfo(skb)->frags[0].page_offset += grow;
9e903e08 3471 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
cb18978c 3472
9e903e08 3473 if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
ea2ab693 3474 skb_frag_unref(skb, 0);
cb18978c
HX
3475 memmove(skb_shinfo(skb)->frags,
3476 skb_shinfo(skb)->frags + 1,
e5093aec 3477 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
cb18978c 3478 }
ad0f9904
HX
3479 }
3480
d565b0a1 3481ok:
5d0d9be8 3482 return ret;
d565b0a1
HX
3483
3484normal:
ad0f9904
HX
3485 ret = GRO_NORMAL;
3486 goto pull;
5d38a079 3487}
96e93eab
HX
3488EXPORT_SYMBOL(dev_gro_receive);
3489
40d0802b 3490static inline gro_result_t
5b252f0c 3491__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
96e93eab
HX
3492{
3493 struct sk_buff *p;
43480aec 3494 unsigned int maclen = skb->dev->hard_header_len;
96e93eab
HX
3495
3496 for (p = napi->gro_list; p; p = p->next) {
40d0802b
ED
3497 unsigned long diffs;
3498
3499 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3701e513 3500 diffs |= p->vlan_tci ^ skb->vlan_tci;
43480aec
ED
3501 if (maclen == ETH_HLEN)
3502 diffs |= compare_ether_header(skb_mac_header(p),
3503 skb_gro_mac_header(skb));
3504 else if (!diffs)
3505 diffs = memcmp(skb_mac_header(p),
3506 skb_gro_mac_header(skb),
3507 maclen);
40d0802b 3508 NAPI_GRO_CB(p)->same_flow = !diffs;
96e93eab
HX
3509 NAPI_GRO_CB(p)->flush = 0;
3510 }
3511
3512 return dev_gro_receive(napi, skb);
3513}
5d38a079 3514
c7c4b3b6 3515gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
5d38a079 3516{
5d0d9be8
HX
3517 switch (ret) {
3518 case GRO_NORMAL:
c7c4b3b6
BH
3519 if (netif_receive_skb(skb))
3520 ret = GRO_DROP;
3521 break;
5d38a079 3522
5d0d9be8 3523 case GRO_DROP:
5d0d9be8 3524 case GRO_MERGED_FREE:
5d38a079
HX
3525 kfree_skb(skb);
3526 break;
5b252f0c
BH
3527
3528 case GRO_HELD:
3529 case GRO_MERGED:
3530 break;
5d38a079
HX
3531 }
3532
c7c4b3b6 3533 return ret;
5d0d9be8
HX
3534}
3535EXPORT_SYMBOL(napi_skb_finish);
3536
78a478d0
HX
3537void skb_gro_reset_offset(struct sk_buff *skb)
3538{
3539 NAPI_GRO_CB(skb)->data_offset = 0;
3540 NAPI_GRO_CB(skb)->frag0 = NULL;
7489594c 3541 NAPI_GRO_CB(skb)->frag0_len = 0;
78a478d0 3542
78d3fd0b 3543 if (skb->mac_header == skb->tail &&
ea2ab693 3544 !PageHighMem(skb_frag_page(&skb_shinfo(skb)->frags[0]))) {
78a478d0 3545 NAPI_GRO_CB(skb)->frag0 =
ea2ab693 3546 skb_frag_address(&skb_shinfo(skb)->frags[0]);
9e903e08 3547 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(&skb_shinfo(skb)->frags[0]);
7489594c 3548 }
78a478d0
HX
3549}
3550EXPORT_SYMBOL(skb_gro_reset_offset);
3551
c7c4b3b6 3552gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
5d0d9be8 3553{
86911732
HX
3554 skb_gro_reset_offset(skb);
3555
5d0d9be8 3556 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
d565b0a1
HX
3557}
3558EXPORT_SYMBOL(napi_gro_receive);
3559
d0c2b0d2 3560static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
96e93eab 3561{
96e93eab
HX
3562 __skb_pull(skb, skb_headlen(skb));
3563 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
3701e513 3564 skb->vlan_tci = 0;
66c46d74 3565 skb->dev = napi->dev;
6d152e23 3566 skb->skb_iif = 0;
96e93eab
HX
3567
3568 napi->skb = skb;
3569}
96e93eab 3570
76620aaf 3571struct sk_buff *napi_get_frags(struct napi_struct *napi)
5d38a079 3572{
5d38a079 3573 struct sk_buff *skb = napi->skb;
5d38a079
HX
3574
3575 if (!skb) {
89d71a66
ED
3576 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3577 if (skb)
3578 napi->skb = skb;
80595d59 3579 }
96e93eab
HX
3580 return skb;
3581}
76620aaf 3582EXPORT_SYMBOL(napi_get_frags);
96e93eab 3583
c7c4b3b6
BH
3584gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
3585 gro_result_t ret)
96e93eab 3586{
5d0d9be8
HX
3587 switch (ret) {
3588 case GRO_NORMAL:
86911732 3589 case GRO_HELD:
e76b69cc 3590 skb->protocol = eth_type_trans(skb, skb->dev);
86911732 3591
c7c4b3b6
BH
3592 if (ret == GRO_HELD)
3593 skb_gro_pull(skb, -ETH_HLEN);
3594 else if (netif_receive_skb(skb))
3595 ret = GRO_DROP;
86911732 3596 break;
5d38a079 3597
5d0d9be8 3598 case GRO_DROP:
5d0d9be8
HX
3599 case GRO_MERGED_FREE:
3600 napi_reuse_skb(napi, skb);
3601 break;
5b252f0c
BH
3602
3603 case GRO_MERGED:
3604 break;
5d0d9be8 3605 }
5d38a079 3606
c7c4b3b6 3607 return ret;
5d38a079 3608}
5d0d9be8
HX
3609EXPORT_SYMBOL(napi_frags_finish);
3610
76620aaf
HX
3611struct sk_buff *napi_frags_skb(struct napi_struct *napi)
3612{
3613 struct sk_buff *skb = napi->skb;
3614 struct ethhdr *eth;
a5b1cf28
HX
3615 unsigned int hlen;
3616 unsigned int off;
76620aaf
HX
3617
3618 napi->skb = NULL;
3619
3620 skb_reset_mac_header(skb);
3621 skb_gro_reset_offset(skb);
3622
a5b1cf28
HX
3623 off = skb_gro_offset(skb);
3624 hlen = off + sizeof(*eth);
3625 eth = skb_gro_header_fast(skb, off);
3626 if (skb_gro_header_hard(skb, hlen)) {
3627 eth = skb_gro_header_slow(skb, hlen, off);
3628 if (unlikely(!eth)) {
3629 napi_reuse_skb(napi, skb);
3630 skb = NULL;
3631 goto out;
3632 }
76620aaf
HX
3633 }
3634
3635 skb_gro_pull(skb, sizeof(*eth));
3636
3637 /*
3638 * This works because the only protocols we care about don't require
3639 * special handling. We'll fix it up properly at the end.
3640 */
3641 skb->protocol = eth->h_proto;
3642
3643out:
3644 return skb;
3645}
3646EXPORT_SYMBOL(napi_frags_skb);
3647
c7c4b3b6 3648gro_result_t napi_gro_frags(struct napi_struct *napi)
5d0d9be8 3649{
76620aaf 3650 struct sk_buff *skb = napi_frags_skb(napi);
5d0d9be8
HX
3651
3652 if (!skb)
c7c4b3b6 3653 return GRO_DROP;
5d0d9be8
HX
3654
3655 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
3656}
5d38a079
HX
3657EXPORT_SYMBOL(napi_gro_frags);
3658
e326bed2
ED
3659/*
3660 * net_rps_action sends any pending IPI's for rps.
3661 * Note: called with local irq disabled, but exits with local irq enabled.
3662 */
3663static void net_rps_action_and_irq_enable(struct softnet_data *sd)
3664{
3665#ifdef CONFIG_RPS
3666 struct softnet_data *remsd = sd->rps_ipi_list;
3667
3668 if (remsd) {
3669 sd->rps_ipi_list = NULL;
3670
3671 local_irq_enable();
3672
3673 /* Send pending IPI's to kick RPS processing on remote cpus. */
3674 while (remsd) {
3675 struct softnet_data *next = remsd->rps_ipi_next;
3676
3677 if (cpu_online(remsd->cpu))
3678 __smp_call_function_single(remsd->cpu,
3679 &remsd->csd, 0);
3680 remsd = next;
3681 }
3682 } else
3683#endif
3684 local_irq_enable();
3685}
3686
bea3348e 3687static int process_backlog(struct napi_struct *napi, int quota)
1da177e4
LT
3688{
3689 int work = 0;
eecfd7c4 3690 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
1da177e4 3691
e326bed2
ED
3692#ifdef CONFIG_RPS
3693 /* Check if we have pending ipi, its better to send them now,
3694 * not waiting net_rx_action() end.
3695 */
3696 if (sd->rps_ipi_list) {
3697 local_irq_disable();
3698 net_rps_action_and_irq_enable(sd);
3699 }
3700#endif
bea3348e 3701 napi->weight = weight_p;
6e7676c1
CG
3702 local_irq_disable();
3703 while (work < quota) {
1da177e4 3704 struct sk_buff *skb;
6e7676c1
CG
3705 unsigned int qlen;
3706
3707 while ((skb = __skb_dequeue(&sd->process_queue))) {
3708 local_irq_enable();
3709 __netif_receive_skb(skb);
6e7676c1 3710 local_irq_disable();
76cc8b13
TH
3711 input_queue_head_incr(sd);
3712 if (++work >= quota) {
3713 local_irq_enable();
3714 return work;
3715 }
6e7676c1 3716 }
1da177e4 3717
e36fa2f7 3718 rps_lock(sd);
6e7676c1 3719 qlen = skb_queue_len(&sd->input_pkt_queue);
76cc8b13 3720 if (qlen)
6e7676c1
CG
3721 skb_queue_splice_tail_init(&sd->input_pkt_queue,
3722 &sd->process_queue);
76cc8b13 3723
6e7676c1 3724 if (qlen < quota - work) {
eecfd7c4
ED
3725 /*
3726 * Inline a custom version of __napi_complete().
3727 * only current cpu owns and manipulates this napi,
3728 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
3729 * we can use a plain write instead of clear_bit(),
3730 * and we dont need an smp_mb() memory barrier.
3731 */
3732 list_del(&napi->poll_list);
3733 napi->state = 0;
3734
6e7676c1 3735 quota = work + qlen;
bea3348e 3736 }
e36fa2f7 3737 rps_unlock(sd);
6e7676c1
CG
3738 }
3739 local_irq_enable();
1da177e4 3740
bea3348e
SH
3741 return work;
3742}
1da177e4 3743
bea3348e
SH
3744/**
3745 * __napi_schedule - schedule for receive
c4ea43c5 3746 * @n: entry to schedule
bea3348e
SH
3747 *
3748 * The entry's receive function will be scheduled to run
3749 */
b5606c2d 3750void __napi_schedule(struct napi_struct *n)
bea3348e
SH
3751{
3752 unsigned long flags;
1da177e4 3753
bea3348e 3754 local_irq_save(flags);
eecfd7c4 3755 ____napi_schedule(&__get_cpu_var(softnet_data), n);
bea3348e 3756 local_irq_restore(flags);
1da177e4 3757}
bea3348e
SH
3758EXPORT_SYMBOL(__napi_schedule);
3759
d565b0a1
HX
3760void __napi_complete(struct napi_struct *n)
3761{
3762 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
3763 BUG_ON(n->gro_list);
3764
3765 list_del(&n->poll_list);
3766 smp_mb__before_clear_bit();
3767 clear_bit(NAPI_STATE_SCHED, &n->state);
3768}
3769EXPORT_SYMBOL(__napi_complete);
3770
3771void napi_complete(struct napi_struct *n)
3772{
3773 unsigned long flags;
3774
3775 /*
3776 * don't let napi dequeue from the cpu poll list
3777 * just in case its running on a different cpu
3778 */
3779 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
3780 return;
3781
3782 napi_gro_flush(n);
3783 local_irq_save(flags);
3784 __napi_complete(n);
3785 local_irq_restore(flags);
3786}
3787EXPORT_SYMBOL(napi_complete);
3788
3789void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
3790 int (*poll)(struct napi_struct *, int), int weight)
3791{
3792 INIT_LIST_HEAD(&napi->poll_list);
4ae5544f 3793 napi->gro_count = 0;
d565b0a1 3794 napi->gro_list = NULL;
5d38a079 3795 napi->skb = NULL;
d565b0a1
HX
3796 napi->poll = poll;
3797 napi->weight = weight;
3798 list_add(&napi->dev_list, &dev->napi_list);
d565b0a1 3799 napi->dev = dev;
5d38a079 3800#ifdef CONFIG_NETPOLL
d565b0a1
HX
3801 spin_lock_init(&napi->poll_lock);
3802 napi->poll_owner = -1;
3803#endif
3804 set_bit(NAPI_STATE_SCHED, &napi->state);
3805}
3806EXPORT_SYMBOL(netif_napi_add);
3807
3808void netif_napi_del(struct napi_struct *napi)
3809{
3810 struct sk_buff *skb, *next;
3811
d7b06636 3812 list_del_init(&napi->dev_list);
76620aaf 3813 napi_free_frags(napi);
d565b0a1
HX
3814
3815 for (skb = napi->gro_list; skb; skb = next) {
3816 next = skb->next;
3817 skb->next = NULL;
3818 kfree_skb(skb);
3819 }
3820
3821 napi->gro_list = NULL;
4ae5544f 3822 napi->gro_count = 0;
d565b0a1
HX
3823}
3824EXPORT_SYMBOL(netif_napi_del);
3825
1da177e4
LT
3826static void net_rx_action(struct softirq_action *h)
3827{
e326bed2 3828 struct softnet_data *sd = &__get_cpu_var(softnet_data);
24f8b238 3829 unsigned long time_limit = jiffies + 2;
51b0bded 3830 int budget = netdev_budget;
53fb95d3
MM
3831 void *have;
3832
1da177e4
LT
3833 local_irq_disable();
3834
e326bed2 3835 while (!list_empty(&sd->poll_list)) {
bea3348e
SH
3836 struct napi_struct *n;
3837 int work, weight;
1da177e4 3838
bea3348e 3839 /* If softirq window is exhuasted then punt.
24f8b238
SH
3840 * Allow this to run for 2 jiffies since which will allow
3841 * an average latency of 1.5/HZ.
bea3348e 3842 */
24f8b238 3843 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
1da177e4
LT
3844 goto softnet_break;
3845
3846 local_irq_enable();
3847
bea3348e
SH
3848 /* Even though interrupts have been re-enabled, this
3849 * access is safe because interrupts can only add new
3850 * entries to the tail of this list, and only ->poll()
3851 * calls can remove this head entry from the list.
3852 */
e326bed2 3853 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
1da177e4 3854
bea3348e
SH
3855 have = netpoll_poll_lock(n);
3856
3857 weight = n->weight;
3858
0a7606c1
DM
3859 /* This NAPI_STATE_SCHED test is for avoiding a race
3860 * with netpoll's poll_napi(). Only the entity which
3861 * obtains the lock and sees NAPI_STATE_SCHED set will
3862 * actually make the ->poll() call. Therefore we avoid
25985edc 3863 * accidentally calling ->poll() when NAPI is not scheduled.
0a7606c1
DM
3864 */
3865 work = 0;
4ea7e386 3866 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
0a7606c1 3867 work = n->poll(n, weight);
4ea7e386
NH
3868 trace_napi_poll(n);
3869 }
bea3348e
SH
3870
3871 WARN_ON_ONCE(work > weight);
3872
3873 budget -= work;
3874
3875 local_irq_disable();
3876
3877 /* Drivers must not modify the NAPI state if they
3878 * consume the entire weight. In such cases this code
3879 * still "owns" the NAPI instance and therefore can
3880 * move the instance around on the list at-will.
3881 */
fed17f30 3882 if (unlikely(work == weight)) {
ff780cd8
HX
3883 if (unlikely(napi_disable_pending(n))) {
3884 local_irq_enable();
3885 napi_complete(n);
3886 local_irq_disable();
3887 } else
e326bed2 3888 list_move_tail(&n->poll_list, &sd->poll_list);
fed17f30 3889 }
bea3348e
SH
3890
3891 netpoll_poll_unlock(have);
1da177e4
LT
3892 }
3893out:
e326bed2 3894 net_rps_action_and_irq_enable(sd);
0a9627f2 3895
db217334
CL
3896#ifdef CONFIG_NET_DMA
3897 /*
3898 * There may not be any more sk_buffs coming right now, so push
3899 * any pending DMA copies to hardware
3900 */
2ba05622 3901 dma_issue_pending_all();
db217334 3902#endif
bea3348e 3903
1da177e4
LT
3904 return;
3905
3906softnet_break:
dee42870 3907 sd->time_squeeze++;
1da177e4
LT
3908 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3909 goto out;
3910}
3911
d1b19dff 3912static gifconf_func_t *gifconf_list[NPROTO];
1da177e4
LT
3913
3914/**
3915 * register_gifconf - register a SIOCGIF handler
3916 * @family: Address family
3917 * @gifconf: Function handler
3918 *
3919 * Register protocol dependent address dumping routines. The handler
3920 * that is passed must not be freed or reused until it has been replaced
3921 * by another handler.
3922 */
d1b19dff 3923int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
1da177e4
LT
3924{
3925 if (family >= NPROTO)
3926 return -EINVAL;
3927 gifconf_list[family] = gifconf;
3928 return 0;
3929}
d1b19dff 3930EXPORT_SYMBOL(register_gifconf);
1da177e4
LT
3931
3932
3933/*
3934 * Map an interface index to its name (SIOCGIFNAME)
3935 */
3936
3937/*
3938 * We need this ioctl for efficient implementation of the
3939 * if_indextoname() function required by the IPv6 API. Without
3940 * it, we would have to search all the interfaces to find a
3941 * match. --pb
3942 */
3943
881d966b 3944static int dev_ifname(struct net *net, struct ifreq __user *arg)
1da177e4
LT
3945{
3946 struct net_device *dev;
3947 struct ifreq ifr;
3948
3949 /*
3950 * Fetch the caller's info block.
3951 */
3952
3953 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3954 return -EFAULT;
3955
fb699dfd
ED
3956 rcu_read_lock();
3957 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
1da177e4 3958 if (!dev) {
fb699dfd 3959 rcu_read_unlock();
1da177e4
LT
3960 return -ENODEV;
3961 }
3962
3963 strcpy(ifr.ifr_name, dev->name);
fb699dfd 3964 rcu_read_unlock();
1da177e4
LT
3965
3966 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
3967 return -EFAULT;
3968 return 0;
3969}
3970
3971/*
3972 * Perform a SIOCGIFCONF call. This structure will change
3973 * size eventually, and there is nothing I can do about it.
3974 * Thus we will need a 'compatibility mode'.
3975 */
3976
881d966b 3977static int dev_ifconf(struct net *net, char __user *arg)
1da177e4
LT
3978{
3979 struct ifconf ifc;
3980 struct net_device *dev;
3981 char __user *pos;
3982 int len;
3983 int total;
3984 int i;
3985
3986 /*
3987 * Fetch the caller's info block.
3988 */
3989
3990 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
3991 return -EFAULT;
3992
3993 pos = ifc.ifc_buf;
3994 len = ifc.ifc_len;
3995
3996 /*
3997 * Loop over the interfaces, and write an info block for each.
3998 */
3999
4000 total = 0;
881d966b 4001 for_each_netdev(net, dev) {
1da177e4
LT
4002 for (i = 0; i < NPROTO; i++) {
4003 if (gifconf_list[i]) {
4004 int done;
4005 if (!pos)
4006 done = gifconf_list[i](dev, NULL, 0);
4007 else
4008 done = gifconf_list[i](dev, pos + total,
4009 len - total);
4010 if (done < 0)
4011 return -EFAULT;
4012 total += done;
4013 }
4014 }
4ec93edb 4015 }
1da177e4
LT
4016
4017 /*
4018 * All done. Write the updated control block back to the caller.
4019 */
4020 ifc.ifc_len = total;
4021
4022 /*
4023 * Both BSD and Solaris return 0 here, so we do too.
4024 */
4025 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
4026}
4027
4028#ifdef CONFIG_PROC_FS
f04565dd
MM
4029
4030#define BUCKET_SPACE (32 - NETDEV_HASHBITS)
4031
4032struct dev_iter_state {
4033 struct seq_net_private p;
4034 unsigned int pos; /* bucket << BUCKET_SPACE + offset */
4035};
4036
4037#define get_bucket(x) ((x) >> BUCKET_SPACE)
4038#define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
4039#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
4040
4041static inline struct net_device *dev_from_same_bucket(struct seq_file *seq)
4042{
4043 struct dev_iter_state *state = seq->private;
4044 struct net *net = seq_file_net(seq);
4045 struct net_device *dev;
4046 struct hlist_node *p;
4047 struct hlist_head *h;
4048 unsigned int count, bucket, offset;
4049
4050 bucket = get_bucket(state->pos);
4051 offset = get_offset(state->pos);
4052 h = &net->dev_name_head[bucket];
4053 count = 0;
4054 hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
4055 if (count++ == offset) {
4056 state->pos = set_bucket_offset(bucket, count);
4057 return dev;
4058 }
4059 }
4060
4061 return NULL;
4062}
4063
4064static inline struct net_device *dev_from_new_bucket(struct seq_file *seq)
4065{
4066 struct dev_iter_state *state = seq->private;
4067 struct net_device *dev;
4068 unsigned int bucket;
4069
4070 bucket = get_bucket(state->pos);
4071 do {
4072 dev = dev_from_same_bucket(seq);
4073 if (dev)
4074 return dev;
4075
4076 bucket++;
4077 state->pos = set_bucket_offset(bucket, 0);
4078 } while (bucket < NETDEV_HASHENTRIES);
4079
4080 return NULL;
4081}
4082
1da177e4
LT
4083/*
4084 * This is invoked by the /proc filesystem handler to display a device
4085 * in detail.
4086 */
7562f876 4087void *dev_seq_start(struct seq_file *seq, loff_t *pos)
c6d14c84 4088 __acquires(RCU)
1da177e4 4089{
f04565dd 4090 struct dev_iter_state *state = seq->private;
1da177e4 4091
c6d14c84 4092 rcu_read_lock();
7562f876
PE
4093 if (!*pos)
4094 return SEQ_START_TOKEN;
1da177e4 4095
f04565dd
MM
4096 /* check for end of the hash */
4097 if (state->pos == 0 && *pos > 1)
4098 return NULL;
1da177e4 4099
f04565dd 4100 return dev_from_new_bucket(seq);
1da177e4
LT
4101}
4102
4103void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4104{
f04565dd
MM
4105 struct net_device *dev;
4106
4107 ++*pos;
ccf43438
ED
4108
4109 if (v == SEQ_START_TOKEN)
f04565dd 4110 return dev_from_new_bucket(seq);
c6d14c84 4111
f04565dd
MM
4112 dev = dev_from_same_bucket(seq);
4113 if (dev)
4114 return dev;
4115
4116 return dev_from_new_bucket(seq);
1da177e4
LT
4117}
4118
4119void dev_seq_stop(struct seq_file *seq, void *v)
c6d14c84 4120 __releases(RCU)
1da177e4 4121{
c6d14c84 4122 rcu_read_unlock();
1da177e4
LT
4123}
4124
4125static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
4126{
28172739
ED
4127 struct rtnl_link_stats64 temp;
4128 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
1da177e4 4129
be1f3c2c
BH
4130 seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
4131 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
5a1b5898
RR
4132 dev->name, stats->rx_bytes, stats->rx_packets,
4133 stats->rx_errors,
4134 stats->rx_dropped + stats->rx_missed_errors,
4135 stats->rx_fifo_errors,
4136 stats->rx_length_errors + stats->rx_over_errors +
4137 stats->rx_crc_errors + stats->rx_frame_errors,
4138 stats->rx_compressed, stats->multicast,
4139 stats->tx_bytes, stats->tx_packets,
4140 stats->tx_errors, stats->tx_dropped,
4141 stats->tx_fifo_errors, stats->collisions,
4142 stats->tx_carrier_errors +
4143 stats->tx_aborted_errors +
4144 stats->tx_window_errors +
4145 stats->tx_heartbeat_errors,
4146 stats->tx_compressed);
1da177e4
LT
4147}
4148
4149/*
4150 * Called from the PROCfs module. This now uses the new arbitrary sized
4151 * /proc/net interface to create /proc/net/dev
4152 */
4153static int dev_seq_show(struct seq_file *seq, void *v)
4154{
4155 if (v == SEQ_START_TOKEN)
4156 seq_puts(seq, "Inter-| Receive "
4157 " | Transmit\n"
4158 " face |bytes packets errs drop fifo frame "
4159 "compressed multicast|bytes packets errs "
4160 "drop fifo colls carrier compressed\n");
4161 else
4162 dev_seq_printf_stats(seq, v);
4163 return 0;
4164}
4165
dee42870 4166static struct softnet_data *softnet_get_online(loff_t *pos)
1da177e4 4167{
dee42870 4168 struct softnet_data *sd = NULL;
1da177e4 4169
0c0b0aca 4170 while (*pos < nr_cpu_ids)
4ec93edb 4171 if (cpu_online(*pos)) {
dee42870 4172 sd = &per_cpu(softnet_data, *pos);
1da177e4
LT
4173 break;
4174 } else
4175 ++*pos;
dee42870 4176 return sd;
1da177e4
LT
4177}
4178
4179static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
4180{
4181 return softnet_get_online(pos);
4182}
4183
4184static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4185{
4186 ++*pos;
4187 return softnet_get_online(pos);
4188}
4189
4190static void softnet_seq_stop(struct seq_file *seq, void *v)
4191{
4192}
4193
4194static int softnet_seq_show(struct seq_file *seq, void *v)
4195{
dee42870 4196 struct softnet_data *sd = v;
1da177e4 4197
0a9627f2 4198 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
dee42870 4199 sd->processed, sd->dropped, sd->time_squeeze, 0,
c1ebcdb8 4200 0, 0, 0, 0, /* was fastroute */
dee42870 4201 sd->cpu_collision, sd->received_rps);
1da177e4
LT
4202 return 0;
4203}
4204
f690808e 4205static const struct seq_operations dev_seq_ops = {
1da177e4
LT
4206 .start = dev_seq_start,
4207 .next = dev_seq_next,
4208 .stop = dev_seq_stop,
4209 .show = dev_seq_show,
4210};
4211
4212static int dev_seq_open(struct inode *inode, struct file *file)
4213{
e372c414 4214 return seq_open_net(inode, file, &dev_seq_ops,
f04565dd 4215 sizeof(struct dev_iter_state));
1da177e4
LT
4216}
4217
5cac98dd
AB
4218int dev_seq_open_ops(struct inode *inode, struct file *file,
4219 const struct seq_operations *ops)
4220{
4221 return seq_open_net(inode, file, ops, sizeof(struct dev_iter_state));
4222}
4223
9a32144e 4224static const struct file_operations dev_seq_fops = {
1da177e4
LT
4225 .owner = THIS_MODULE,
4226 .open = dev_seq_open,
4227 .read = seq_read,
4228 .llseek = seq_lseek,
e372c414 4229 .release = seq_release_net,
1da177e4
LT
4230};
4231
f690808e 4232static const struct seq_operations softnet_seq_ops = {
1da177e4
LT
4233 .start = softnet_seq_start,
4234 .next = softnet_seq_next,
4235 .stop = softnet_seq_stop,
4236 .show = softnet_seq_show,
4237};
4238
4239static int softnet_seq_open(struct inode *inode, struct file *file)
4240{
4241 return seq_open(file, &softnet_seq_ops);
4242}
4243
9a32144e 4244static const struct file_operations softnet_seq_fops = {
1da177e4
LT
4245 .owner = THIS_MODULE,
4246 .open = softnet_seq_open,
4247 .read = seq_read,
4248 .llseek = seq_lseek,
4249 .release = seq_release,
4250};
4251
0e1256ff
SH
4252static void *ptype_get_idx(loff_t pos)
4253{
4254 struct packet_type *pt = NULL;
4255 loff_t i = 0;
4256 int t;
4257
4258 list_for_each_entry_rcu(pt, &ptype_all, list) {
4259 if (i == pos)
4260 return pt;
4261 ++i;
4262 }
4263
82d8a867 4264 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
0e1256ff
SH
4265 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
4266 if (i == pos)
4267 return pt;
4268 ++i;
4269 }
4270 }
4271 return NULL;
4272}
4273
4274static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
72348a42 4275 __acquires(RCU)
0e1256ff
SH
4276{
4277 rcu_read_lock();
4278 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
4279}
4280
4281static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4282{
4283 struct packet_type *pt;
4284 struct list_head *nxt;
4285 int hash;
4286
4287 ++*pos;
4288 if (v == SEQ_START_TOKEN)
4289 return ptype_get_idx(0);
4290
4291 pt = v;
4292 nxt = pt->list.next;
4293 if (pt->type == htons(ETH_P_ALL)) {
4294 if (nxt != &ptype_all)
4295 goto found;
4296 hash = 0;
4297 nxt = ptype_base[0].next;
4298 } else
82d8a867 4299 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
0e1256ff
SH
4300
4301 while (nxt == &ptype_base[hash]) {
82d8a867 4302 if (++hash >= PTYPE_HASH_SIZE)
0e1256ff
SH
4303 return NULL;
4304 nxt = ptype_base[hash].next;
4305 }
4306found:
4307 return list_entry(nxt, struct packet_type, list);
4308}
4309
4310static void ptype_seq_stop(struct seq_file *seq, void *v)
72348a42 4311 __releases(RCU)
0e1256ff
SH
4312{
4313 rcu_read_unlock();
4314}
4315
0e1256ff
SH
4316static int ptype_seq_show(struct seq_file *seq, void *v)
4317{
4318 struct packet_type *pt = v;
4319
4320 if (v == SEQ_START_TOKEN)
4321 seq_puts(seq, "Type Device Function\n");
c346dca1 4322 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
0e1256ff
SH
4323 if (pt->type == htons(ETH_P_ALL))
4324 seq_puts(seq, "ALL ");
4325 else
4326 seq_printf(seq, "%04x", ntohs(pt->type));
4327
908cd2da
AD
4328 seq_printf(seq, " %-8s %pF\n",
4329 pt->dev ? pt->dev->name : "", pt->func);
0e1256ff
SH
4330 }
4331
4332 return 0;
4333}
4334
4335static const struct seq_operations ptype_seq_ops = {
4336 .start = ptype_seq_start,
4337 .next = ptype_seq_next,
4338 .stop = ptype_seq_stop,
4339 .show = ptype_seq_show,
4340};
4341
4342static int ptype_seq_open(struct inode *inode, struct file *file)
4343{
2feb27db
PE
4344 return seq_open_net(inode, file, &ptype_seq_ops,
4345 sizeof(struct seq_net_private));
0e1256ff
SH
4346}
4347
4348static const struct file_operations ptype_seq_fops = {
4349 .owner = THIS_MODULE,
4350 .open = ptype_seq_open,
4351 .read = seq_read,
4352 .llseek = seq_lseek,
2feb27db 4353 .release = seq_release_net,
0e1256ff
SH
4354};
4355
4356
4665079c 4357static int __net_init dev_proc_net_init(struct net *net)
1da177e4
LT
4358{
4359 int rc = -ENOMEM;
4360
881d966b 4361 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
1da177e4 4362 goto out;
881d966b 4363 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
1da177e4 4364 goto out_dev;
881d966b 4365 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
457c4cbc 4366 goto out_softnet;
0e1256ff 4367
881d966b 4368 if (wext_proc_init(net))
457c4cbc 4369 goto out_ptype;
1da177e4
LT
4370 rc = 0;
4371out:
4372 return rc;
457c4cbc 4373out_ptype:
881d966b 4374 proc_net_remove(net, "ptype");
1da177e4 4375out_softnet:
881d966b 4376 proc_net_remove(net, "softnet_stat");
1da177e4 4377out_dev:
881d966b 4378 proc_net_remove(net, "dev");
1da177e4
LT
4379 goto out;
4380}
881d966b 4381
4665079c 4382static void __net_exit dev_proc_net_exit(struct net *net)
881d966b
EB
4383{
4384 wext_proc_exit(net);
4385
4386 proc_net_remove(net, "ptype");
4387 proc_net_remove(net, "softnet_stat");
4388 proc_net_remove(net, "dev");
4389}
4390
022cbae6 4391static struct pernet_operations __net_initdata dev_proc_ops = {
881d966b
EB
4392 .init = dev_proc_net_init,
4393 .exit = dev_proc_net_exit,
4394};
4395
4396static int __init dev_proc_init(void)
4397{
4398 return register_pernet_subsys(&dev_proc_ops);
4399}
1da177e4
LT
4400#else
4401#define dev_proc_init() 0
4402#endif /* CONFIG_PROC_FS */
4403
4404
4405/**
1765a575 4406 * netdev_set_master - set up master pointer
1da177e4
LT
4407 * @slave: slave device
4408 * @master: new master device
4409 *
4410 * Changes the master device of the slave. Pass %NULL to break the
4411 * bonding. The caller must hold the RTNL semaphore. On a failure
4412 * a negative errno code is returned. On success the reference counts
1765a575 4413 * are adjusted and the function returns zero.
1da177e4
LT
4414 */
4415int netdev_set_master(struct net_device *slave, struct net_device *master)
4416{
4417 struct net_device *old = slave->master;
4418
4419 ASSERT_RTNL();
4420
4421 if (master) {
4422 if (old)
4423 return -EBUSY;
4424 dev_hold(master);
4425 }
4426
4427 slave->master = master;
4ec93edb 4428
6df427fe 4429 if (old)
1da177e4 4430 dev_put(old);
1765a575
JP
4431 return 0;
4432}
4433EXPORT_SYMBOL(netdev_set_master);
4434
4435/**
4436 * netdev_set_bond_master - set up bonding master/slave pair
4437 * @slave: slave device
4438 * @master: new master device
4439 *
4440 * Changes the master device of the slave. Pass %NULL to break the
4441 * bonding. The caller must hold the RTNL semaphore. On a failure
4442 * a negative errno code is returned. On success %RTM_NEWLINK is sent
4443 * to the routing socket and the function returns zero.
4444 */
4445int netdev_set_bond_master(struct net_device *slave, struct net_device *master)
4446{
4447 int err;
4448
4449 ASSERT_RTNL();
4450
4451 err = netdev_set_master(slave, master);
4452 if (err)
4453 return err;
1da177e4
LT
4454 if (master)
4455 slave->flags |= IFF_SLAVE;
4456 else
4457 slave->flags &= ~IFF_SLAVE;
4458
4459 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
4460 return 0;
4461}
1765a575 4462EXPORT_SYMBOL(netdev_set_bond_master);
1da177e4 4463
b6c40d68
PM
4464static void dev_change_rx_flags(struct net_device *dev, int flags)
4465{
d314774c
SH
4466 const struct net_device_ops *ops = dev->netdev_ops;
4467
4468 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
4469 ops->ndo_change_rx_flags(dev, flags);
b6c40d68
PM
4470}
4471
dad9b335 4472static int __dev_set_promiscuity(struct net_device *dev, int inc)
1da177e4 4473{
b536db93 4474 unsigned int old_flags = dev->flags;
8192b0c4
DH
4475 uid_t uid;
4476 gid_t gid;
1da177e4 4477
24023451
PM
4478 ASSERT_RTNL();
4479
dad9b335
WC
4480 dev->flags |= IFF_PROMISC;
4481 dev->promiscuity += inc;
4482 if (dev->promiscuity == 0) {
4483 /*
4484 * Avoid overflow.
4485 * If inc causes overflow, untouch promisc and return error.
4486 */
4487 if (inc < 0)
4488 dev->flags &= ~IFF_PROMISC;
4489 else {
4490 dev->promiscuity -= inc;
7b6cd1ce
JP
4491 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
4492 dev->name);
dad9b335
WC
4493 return -EOVERFLOW;
4494 }
4495 }
52609c0b 4496 if (dev->flags != old_flags) {
7b6cd1ce
JP
4497 pr_info("device %s %s promiscuous mode\n",
4498 dev->name,
4499 dev->flags & IFF_PROMISC ? "entered" : "left");
8192b0c4
DH
4500 if (audit_enabled) {
4501 current_uid_gid(&uid, &gid);
7759db82
KHK
4502 audit_log(current->audit_context, GFP_ATOMIC,
4503 AUDIT_ANOM_PROMISCUOUS,
4504 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
4505 dev->name, (dev->flags & IFF_PROMISC),
4506 (old_flags & IFF_PROMISC),
4507 audit_get_loginuid(current),
8192b0c4 4508 uid, gid,
7759db82 4509 audit_get_sessionid(current));
8192b0c4 4510 }
24023451 4511
b6c40d68 4512 dev_change_rx_flags(dev, IFF_PROMISC);
1da177e4 4513 }
dad9b335 4514 return 0;
1da177e4
LT
4515}
4516
4417da66
PM
4517/**
4518 * dev_set_promiscuity - update promiscuity count on a device
4519 * @dev: device
4520 * @inc: modifier
4521 *
4522 * Add or remove promiscuity from a device. While the count in the device
4523 * remains above zero the interface remains promiscuous. Once it hits zero
4524 * the device reverts back to normal filtering operation. A negative inc
4525 * value is used to drop promiscuity on the device.
dad9b335 4526 * Return 0 if successful or a negative errno code on error.
4417da66 4527 */
dad9b335 4528int dev_set_promiscuity(struct net_device *dev, int inc)
4417da66 4529{
b536db93 4530 unsigned int old_flags = dev->flags;
dad9b335 4531 int err;
4417da66 4532
dad9b335 4533 err = __dev_set_promiscuity(dev, inc);
4b5a698e 4534 if (err < 0)
dad9b335 4535 return err;
4417da66
PM
4536 if (dev->flags != old_flags)
4537 dev_set_rx_mode(dev);
dad9b335 4538 return err;
4417da66 4539}
d1b19dff 4540EXPORT_SYMBOL(dev_set_promiscuity);
4417da66 4541
1da177e4
LT
4542/**
4543 * dev_set_allmulti - update allmulti count on a device
4544 * @dev: device
4545 * @inc: modifier
4546 *
4547 * Add or remove reception of all multicast frames to a device. While the
4548 * count in the device remains above zero the interface remains listening
4549 * to all interfaces. Once it hits zero the device reverts back to normal
4550 * filtering operation. A negative @inc value is used to drop the counter
4551 * when releasing a resource needing all multicasts.
dad9b335 4552 * Return 0 if successful or a negative errno code on error.
1da177e4
LT
4553 */
4554
dad9b335 4555int dev_set_allmulti(struct net_device *dev, int inc)
1da177e4 4556{
b536db93 4557 unsigned int old_flags = dev->flags;
1da177e4 4558
24023451
PM
4559 ASSERT_RTNL();
4560
1da177e4 4561 dev->flags |= IFF_ALLMULTI;
dad9b335
WC
4562 dev->allmulti += inc;
4563 if (dev->allmulti == 0) {
4564 /*
4565 * Avoid overflow.
4566 * If inc causes overflow, untouch allmulti and return error.
4567 */
4568 if (inc < 0)
4569 dev->flags &= ~IFF_ALLMULTI;
4570 else {
4571 dev->allmulti -= inc;
7b6cd1ce
JP
4572 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
4573 dev->name);
dad9b335
WC
4574 return -EOVERFLOW;
4575 }
4576 }
24023451 4577 if (dev->flags ^ old_flags) {
b6c40d68 4578 dev_change_rx_flags(dev, IFF_ALLMULTI);
4417da66 4579 dev_set_rx_mode(dev);
24023451 4580 }
dad9b335 4581 return 0;
4417da66 4582}
d1b19dff 4583EXPORT_SYMBOL(dev_set_allmulti);
4417da66
PM
4584
4585/*
4586 * Upload unicast and multicast address lists to device and
4587 * configure RX filtering. When the device doesn't support unicast
53ccaae1 4588 * filtering it is put in promiscuous mode while unicast addresses
4417da66
PM
4589 * are present.
4590 */
4591void __dev_set_rx_mode(struct net_device *dev)
4592{
d314774c
SH
4593 const struct net_device_ops *ops = dev->netdev_ops;
4594
4417da66
PM
4595 /* dev_open will call this function so the list will stay sane. */
4596 if (!(dev->flags&IFF_UP))
4597 return;
4598
4599 if (!netif_device_present(dev))
40b77c94 4600 return;
4417da66 4601
01789349 4602 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
4417da66
PM
4603 /* Unicast addresses changes may only happen under the rtnl,
4604 * therefore calling __dev_set_promiscuity here is safe.
4605 */
32e7bfc4 4606 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
4417da66 4607 __dev_set_promiscuity(dev, 1);
2d348d1f 4608 dev->uc_promisc = true;
32e7bfc4 4609 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
4417da66 4610 __dev_set_promiscuity(dev, -1);
2d348d1f 4611 dev->uc_promisc = false;
4417da66 4612 }
4417da66 4613 }
01789349
JP
4614
4615 if (ops->ndo_set_rx_mode)
4616 ops->ndo_set_rx_mode(dev);
4417da66
PM
4617}
4618
4619void dev_set_rx_mode(struct net_device *dev)
4620{
b9e40857 4621 netif_addr_lock_bh(dev);
4417da66 4622 __dev_set_rx_mode(dev);
b9e40857 4623 netif_addr_unlock_bh(dev);
1da177e4
LT
4624}
4625
f0db275a
SH
4626/**
4627 * dev_get_flags - get flags reported to userspace
4628 * @dev: device
4629 *
4630 * Get the combination of flag bits exported through APIs to userspace.
4631 */
1da177e4
LT
4632unsigned dev_get_flags(const struct net_device *dev)
4633{
4634 unsigned flags;
4635
4636 flags = (dev->flags & ~(IFF_PROMISC |
4637 IFF_ALLMULTI |
b00055aa
SR
4638 IFF_RUNNING |
4639 IFF_LOWER_UP |
4640 IFF_DORMANT)) |
1da177e4
LT
4641 (dev->gflags & (IFF_PROMISC |
4642 IFF_ALLMULTI));
4643
b00055aa
SR
4644 if (netif_running(dev)) {
4645 if (netif_oper_up(dev))
4646 flags |= IFF_RUNNING;
4647 if (netif_carrier_ok(dev))
4648 flags |= IFF_LOWER_UP;
4649 if (netif_dormant(dev))
4650 flags |= IFF_DORMANT;
4651 }
1da177e4
LT
4652
4653 return flags;
4654}
d1b19dff 4655EXPORT_SYMBOL(dev_get_flags);
1da177e4 4656
bd380811 4657int __dev_change_flags(struct net_device *dev, unsigned int flags)
1da177e4 4658{
b536db93 4659 unsigned int old_flags = dev->flags;
bd380811 4660 int ret;
1da177e4 4661
24023451
PM
4662 ASSERT_RTNL();
4663
1da177e4
LT
4664 /*
4665 * Set the flags on our device.
4666 */
4667
4668 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4669 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4670 IFF_AUTOMEDIA)) |
4671 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4672 IFF_ALLMULTI));
4673
4674 /*
4675 * Load in the correct multicast list now the flags have changed.
4676 */
4677
b6c40d68
PM
4678 if ((old_flags ^ flags) & IFF_MULTICAST)
4679 dev_change_rx_flags(dev, IFF_MULTICAST);
24023451 4680
4417da66 4681 dev_set_rx_mode(dev);
1da177e4
LT
4682
4683 /*
4684 * Have we downed the interface. We handle IFF_UP ourselves
4685 * according to user attempts to set it, rather than blindly
4686 * setting it.
4687 */
4688
4689 ret = 0;
4690 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
bd380811 4691 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
1da177e4
LT
4692
4693 if (!ret)
4417da66 4694 dev_set_rx_mode(dev);
1da177e4
LT
4695 }
4696
1da177e4 4697 if ((flags ^ dev->gflags) & IFF_PROMISC) {
d1b19dff
ED
4698 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4699
1da177e4
LT
4700 dev->gflags ^= IFF_PROMISC;
4701 dev_set_promiscuity(dev, inc);
4702 }
4703
4704 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4705 is important. Some (broken) drivers set IFF_PROMISC, when
4706 IFF_ALLMULTI is requested not asking us and not reporting.
4707 */
4708 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
d1b19dff
ED
4709 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4710
1da177e4
LT
4711 dev->gflags ^= IFF_ALLMULTI;
4712 dev_set_allmulti(dev, inc);
4713 }
4714
bd380811
PM
4715 return ret;
4716}
4717
4718void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4719{
4720 unsigned int changes = dev->flags ^ old_flags;
4721
4722 if (changes & IFF_UP) {
4723 if (dev->flags & IFF_UP)
4724 call_netdevice_notifiers(NETDEV_UP, dev);
4725 else
4726 call_netdevice_notifiers(NETDEV_DOWN, dev);
4727 }
4728
4729 if (dev->flags & IFF_UP &&
4730 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4731 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4732}
4733
4734/**
4735 * dev_change_flags - change device settings
4736 * @dev: device
4737 * @flags: device state flags
4738 *
4739 * Change settings on device based state flags. The flags are
4740 * in the userspace exported format.
4741 */
b536db93 4742int dev_change_flags(struct net_device *dev, unsigned int flags)
bd380811 4743{
b536db93
ED
4744 int ret;
4745 unsigned int changes, old_flags = dev->flags;
bd380811
PM
4746
4747 ret = __dev_change_flags(dev, flags);
4748 if (ret < 0)
4749 return ret;
4750
4751 changes = old_flags ^ dev->flags;
7c355f53
TG
4752 if (changes)
4753 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
1da177e4 4754
bd380811 4755 __dev_notify_flags(dev, old_flags);
1da177e4
LT
4756 return ret;
4757}
d1b19dff 4758EXPORT_SYMBOL(dev_change_flags);
1da177e4 4759
f0db275a
SH
4760/**
4761 * dev_set_mtu - Change maximum transfer unit
4762 * @dev: device
4763 * @new_mtu: new transfer unit
4764 *
4765 * Change the maximum transfer size of the network device.
4766 */
1da177e4
LT
4767int dev_set_mtu(struct net_device *dev, int new_mtu)
4768{
d314774c 4769 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4
LT
4770 int err;
4771
4772 if (new_mtu == dev->mtu)
4773 return 0;
4774
4775 /* MTU must be positive. */
4776 if (new_mtu < 0)
4777 return -EINVAL;
4778
4779 if (!netif_device_present(dev))
4780 return -ENODEV;
4781
4782 err = 0;
d314774c
SH
4783 if (ops->ndo_change_mtu)
4784 err = ops->ndo_change_mtu(dev, new_mtu);
1da177e4
LT
4785 else
4786 dev->mtu = new_mtu;
d314774c 4787
1da177e4 4788 if (!err && dev->flags & IFF_UP)
056925ab 4789 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
1da177e4
LT
4790 return err;
4791}
d1b19dff 4792EXPORT_SYMBOL(dev_set_mtu);
1da177e4 4793
cbda10fa
VD
4794/**
4795 * dev_set_group - Change group this device belongs to
4796 * @dev: device
4797 * @new_group: group this device should belong to
4798 */
4799void dev_set_group(struct net_device *dev, int new_group)
4800{
4801 dev->group = new_group;
4802}
4803EXPORT_SYMBOL(dev_set_group);
4804
f0db275a
SH
4805/**
4806 * dev_set_mac_address - Change Media Access Control Address
4807 * @dev: device
4808 * @sa: new address
4809 *
4810 * Change the hardware (MAC) address of the device
4811 */
1da177e4
LT
4812int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4813{
d314774c 4814 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4
LT
4815 int err;
4816
d314774c 4817 if (!ops->ndo_set_mac_address)
1da177e4
LT
4818 return -EOPNOTSUPP;
4819 if (sa->sa_family != dev->type)
4820 return -EINVAL;
4821 if (!netif_device_present(dev))
4822 return -ENODEV;
d314774c 4823 err = ops->ndo_set_mac_address(dev, sa);
1da177e4 4824 if (!err)
056925ab 4825 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
1da177e4
LT
4826 return err;
4827}
d1b19dff 4828EXPORT_SYMBOL(dev_set_mac_address);
1da177e4
LT
4829
4830/*
3710becf 4831 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
1da177e4 4832 */
14e3e079 4833static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
1da177e4
LT
4834{
4835 int err;
3710becf 4836 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
1da177e4
LT
4837
4838 if (!dev)
4839 return -ENODEV;
4840
4841 switch (cmd) {
d1b19dff
ED
4842 case SIOCGIFFLAGS: /* Get interface flags */
4843 ifr->ifr_flags = (short) dev_get_flags(dev);
4844 return 0;
1da177e4 4845
d1b19dff
ED
4846 case SIOCGIFMETRIC: /* Get the metric on the interface
4847 (currently unused) */
4848 ifr->ifr_metric = 0;
4849 return 0;
1da177e4 4850
d1b19dff
ED
4851 case SIOCGIFMTU: /* Get the MTU of a device */
4852 ifr->ifr_mtu = dev->mtu;
4853 return 0;
1da177e4 4854
d1b19dff
ED
4855 case SIOCGIFHWADDR:
4856 if (!dev->addr_len)
4857 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4858 else
4859 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4860 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4861 ifr->ifr_hwaddr.sa_family = dev->type;
4862 return 0;
1da177e4 4863
d1b19dff
ED
4864 case SIOCGIFSLAVE:
4865 err = -EINVAL;
4866 break;
14e3e079 4867
d1b19dff
ED
4868 case SIOCGIFMAP:
4869 ifr->ifr_map.mem_start = dev->mem_start;
4870 ifr->ifr_map.mem_end = dev->mem_end;
4871 ifr->ifr_map.base_addr = dev->base_addr;
4872 ifr->ifr_map.irq = dev->irq;
4873 ifr->ifr_map.dma = dev->dma;
4874 ifr->ifr_map.port = dev->if_port;
4875 return 0;
14e3e079 4876
d1b19dff
ED
4877 case SIOCGIFINDEX:
4878 ifr->ifr_ifindex = dev->ifindex;
4879 return 0;
14e3e079 4880
d1b19dff
ED
4881 case SIOCGIFTXQLEN:
4882 ifr->ifr_qlen = dev->tx_queue_len;
4883 return 0;
14e3e079 4884
d1b19dff
ED
4885 default:
4886 /* dev_ioctl() should ensure this case
4887 * is never reached
4888 */
4889 WARN_ON(1);
41c31f31 4890 err = -ENOTTY;
d1b19dff 4891 break;
14e3e079
JG
4892
4893 }
4894 return err;
4895}
4896
4897/*
4898 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4899 */
4900static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4901{
4902 int err;
4903 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
5f2f6da7 4904 const struct net_device_ops *ops;
14e3e079
JG
4905
4906 if (!dev)
4907 return -ENODEV;
4908
5f2f6da7
JP
4909 ops = dev->netdev_ops;
4910
14e3e079 4911 switch (cmd) {
d1b19dff
ED
4912 case SIOCSIFFLAGS: /* Set interface flags */
4913 return dev_change_flags(dev, ifr->ifr_flags);
14e3e079 4914
d1b19dff
ED
4915 case SIOCSIFMETRIC: /* Set the metric on the interface
4916 (currently unused) */
4917 return -EOPNOTSUPP;
14e3e079 4918
d1b19dff
ED
4919 case SIOCSIFMTU: /* Set the MTU of a device */
4920 return dev_set_mtu(dev, ifr->ifr_mtu);
1da177e4 4921
d1b19dff
ED
4922 case SIOCSIFHWADDR:
4923 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
1da177e4 4924
d1b19dff
ED
4925 case SIOCSIFHWBROADCAST:
4926 if (ifr->ifr_hwaddr.sa_family != dev->type)
4927 return -EINVAL;
4928 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4929 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4930 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4931 return 0;
1da177e4 4932
d1b19dff
ED
4933 case SIOCSIFMAP:
4934 if (ops->ndo_set_config) {
1da177e4
LT
4935 if (!netif_device_present(dev))
4936 return -ENODEV;
d1b19dff
ED
4937 return ops->ndo_set_config(dev, &ifr->ifr_map);
4938 }
4939 return -EOPNOTSUPP;
1da177e4 4940
d1b19dff 4941 case SIOCADDMULTI:
b81693d9 4942 if (!ops->ndo_set_rx_mode ||
d1b19dff
ED
4943 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4944 return -EINVAL;
4945 if (!netif_device_present(dev))
4946 return -ENODEV;
22bedad3 4947 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
d1b19dff
ED
4948
4949 case SIOCDELMULTI:
b81693d9 4950 if (!ops->ndo_set_rx_mode ||
d1b19dff
ED
4951 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4952 return -EINVAL;
4953 if (!netif_device_present(dev))
4954 return -ENODEV;
22bedad3 4955 return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
1da177e4 4956
d1b19dff
ED
4957 case SIOCSIFTXQLEN:
4958 if (ifr->ifr_qlen < 0)
4959 return -EINVAL;
4960 dev->tx_queue_len = ifr->ifr_qlen;
4961 return 0;
1da177e4 4962
d1b19dff
ED
4963 case SIOCSIFNAME:
4964 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4965 return dev_change_name(dev, ifr->ifr_newname);
1da177e4 4966
4dc360c5
RC
4967 case SIOCSHWTSTAMP:
4968 err = net_hwtstamp_validate(ifr);
4969 if (err)
4970 return err;
4971 /* fall through */
4972
d1b19dff
ED
4973 /*
4974 * Unknown or private ioctl
4975 */
4976 default:
4977 if ((cmd >= SIOCDEVPRIVATE &&
4978 cmd <= SIOCDEVPRIVATE + 15) ||
4979 cmd == SIOCBONDENSLAVE ||
4980 cmd == SIOCBONDRELEASE ||
4981 cmd == SIOCBONDSETHWADDR ||
4982 cmd == SIOCBONDSLAVEINFOQUERY ||
4983 cmd == SIOCBONDINFOQUERY ||
4984 cmd == SIOCBONDCHANGEACTIVE ||
4985 cmd == SIOCGMIIPHY ||
4986 cmd == SIOCGMIIREG ||
4987 cmd == SIOCSMIIREG ||
4988 cmd == SIOCBRADDIF ||
4989 cmd == SIOCBRDELIF ||
4990 cmd == SIOCSHWTSTAMP ||
4991 cmd == SIOCWANDEV) {
4992 err = -EOPNOTSUPP;
4993 if (ops->ndo_do_ioctl) {
4994 if (netif_device_present(dev))
4995 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4996 else
4997 err = -ENODEV;
4998 }
4999 } else
5000 err = -EINVAL;
1da177e4
LT
5001
5002 }
5003 return err;
5004}
5005
5006/*
5007 * This function handles all "interface"-type I/O control requests. The actual
5008 * 'doing' part of this is dev_ifsioc above.
5009 */
5010
5011/**
5012 * dev_ioctl - network device ioctl
c4ea43c5 5013 * @net: the applicable net namespace
1da177e4
LT
5014 * @cmd: command to issue
5015 * @arg: pointer to a struct ifreq in user space
5016 *
5017 * Issue ioctl functions to devices. This is normally called by the
5018 * user space syscall interfaces but can sometimes be useful for
5019 * other purposes. The return value is the return from the syscall if
5020 * positive or a negative errno code on error.
5021 */
5022
881d966b 5023int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1da177e4
LT
5024{
5025 struct ifreq ifr;
5026 int ret;
5027 char *colon;
5028
5029 /* One special case: SIOCGIFCONF takes ifconf argument
5030 and requires shared lock, because it sleeps writing
5031 to user space.
5032 */
5033
5034 if (cmd == SIOCGIFCONF) {
6756ae4b 5035 rtnl_lock();
881d966b 5036 ret = dev_ifconf(net, (char __user *) arg);
6756ae4b 5037 rtnl_unlock();
1da177e4
LT
5038 return ret;
5039 }
5040 if (cmd == SIOCGIFNAME)
881d966b 5041 return dev_ifname(net, (struct ifreq __user *)arg);
1da177e4
LT
5042
5043 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
5044 return -EFAULT;
5045
5046 ifr.ifr_name[IFNAMSIZ-1] = 0;
5047
5048 colon = strchr(ifr.ifr_name, ':');
5049 if (colon)
5050 *colon = 0;
5051
5052 /*
5053 * See which interface the caller is talking about.
5054 */
5055
5056 switch (cmd) {
d1b19dff
ED
5057 /*
5058 * These ioctl calls:
5059 * - can be done by all.
5060 * - atomic and do not require locking.
5061 * - return a value
5062 */
5063 case SIOCGIFFLAGS:
5064 case SIOCGIFMETRIC:
5065 case SIOCGIFMTU:
5066 case SIOCGIFHWADDR:
5067 case SIOCGIFSLAVE:
5068 case SIOCGIFMAP:
5069 case SIOCGIFINDEX:
5070 case SIOCGIFTXQLEN:
5071 dev_load(net, ifr.ifr_name);
3710becf 5072 rcu_read_lock();
d1b19dff 5073 ret = dev_ifsioc_locked(net, &ifr, cmd);
3710becf 5074 rcu_read_unlock();
d1b19dff
ED
5075 if (!ret) {
5076 if (colon)
5077 *colon = ':';
5078 if (copy_to_user(arg, &ifr,
5079 sizeof(struct ifreq)))
5080 ret = -EFAULT;
5081 }
5082 return ret;
1da177e4 5083
d1b19dff
ED
5084 case SIOCETHTOOL:
5085 dev_load(net, ifr.ifr_name);
5086 rtnl_lock();
5087 ret = dev_ethtool(net, &ifr);
5088 rtnl_unlock();
5089 if (!ret) {
5090 if (colon)
5091 *colon = ':';
5092 if (copy_to_user(arg, &ifr,
5093 sizeof(struct ifreq)))
5094 ret = -EFAULT;
5095 }
5096 return ret;
1da177e4 5097
d1b19dff
ED
5098 /*
5099 * These ioctl calls:
5100 * - require superuser power.
5101 * - require strict serialization.
5102 * - return a value
5103 */
5104 case SIOCGMIIPHY:
5105 case SIOCGMIIREG:
5106 case SIOCSIFNAME:
5107 if (!capable(CAP_NET_ADMIN))
5108 return -EPERM;
5109 dev_load(net, ifr.ifr_name);
5110 rtnl_lock();
5111 ret = dev_ifsioc(net, &ifr, cmd);
5112 rtnl_unlock();
5113 if (!ret) {
5114 if (colon)
5115 *colon = ':';
5116 if (copy_to_user(arg, &ifr,
5117 sizeof(struct ifreq)))
5118 ret = -EFAULT;
5119 }
5120 return ret;
1da177e4 5121
d1b19dff
ED
5122 /*
5123 * These ioctl calls:
5124 * - require superuser power.
5125 * - require strict serialization.
5126 * - do not return a value
5127 */
5128 case SIOCSIFFLAGS:
5129 case SIOCSIFMETRIC:
5130 case SIOCSIFMTU:
5131 case SIOCSIFMAP:
5132 case SIOCSIFHWADDR:
5133 case SIOCSIFSLAVE:
5134 case SIOCADDMULTI:
5135 case SIOCDELMULTI:
5136 case SIOCSIFHWBROADCAST:
5137 case SIOCSIFTXQLEN:
5138 case SIOCSMIIREG:
5139 case SIOCBONDENSLAVE:
5140 case SIOCBONDRELEASE:
5141 case SIOCBONDSETHWADDR:
5142 case SIOCBONDCHANGEACTIVE:
5143 case SIOCBRADDIF:
5144 case SIOCBRDELIF:
5145 case SIOCSHWTSTAMP:
5146 if (!capable(CAP_NET_ADMIN))
5147 return -EPERM;
5148 /* fall through */
5149 case SIOCBONDSLAVEINFOQUERY:
5150 case SIOCBONDINFOQUERY:
5151 dev_load(net, ifr.ifr_name);
5152 rtnl_lock();
5153 ret = dev_ifsioc(net, &ifr, cmd);
5154 rtnl_unlock();
5155 return ret;
5156
5157 case SIOCGIFMEM:
5158 /* Get the per device memory space. We can add this but
5159 * currently do not support it */
5160 case SIOCSIFMEM:
5161 /* Set the per device memory buffer space.
5162 * Not applicable in our case */
5163 case SIOCSIFLINK:
41c31f31 5164 return -ENOTTY;
d1b19dff
ED
5165
5166 /*
5167 * Unknown or private ioctl.
5168 */
5169 default:
5170 if (cmd == SIOCWANDEV ||
5171 (cmd >= SIOCDEVPRIVATE &&
5172 cmd <= SIOCDEVPRIVATE + 15)) {
881d966b 5173 dev_load(net, ifr.ifr_name);
1da177e4 5174 rtnl_lock();
881d966b 5175 ret = dev_ifsioc(net, &ifr, cmd);
1da177e4 5176 rtnl_unlock();
d1b19dff
ED
5177 if (!ret && copy_to_user(arg, &ifr,
5178 sizeof(struct ifreq)))
5179 ret = -EFAULT;
1da177e4 5180 return ret;
d1b19dff
ED
5181 }
5182 /* Take care of Wireless Extensions */
5183 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
5184 return wext_handle_ioctl(net, &ifr, cmd, arg);
41c31f31 5185 return -ENOTTY;
1da177e4
LT
5186 }
5187}
5188
5189
5190/**
5191 * dev_new_index - allocate an ifindex
c4ea43c5 5192 * @net: the applicable net namespace
1da177e4
LT
5193 *
5194 * Returns a suitable unique value for a new device interface
5195 * number. The caller must hold the rtnl semaphore or the
5196 * dev_base_lock to be sure it remains unique.
5197 */
881d966b 5198static int dev_new_index(struct net *net)
1da177e4
LT
5199{
5200 static int ifindex;
5201 for (;;) {
5202 if (++ifindex <= 0)
5203 ifindex = 1;
881d966b 5204 if (!__dev_get_by_index(net, ifindex))
1da177e4
LT
5205 return ifindex;
5206 }
5207}
5208
1da177e4 5209/* Delayed registration/unregisteration */
3b5b34fd 5210static LIST_HEAD(net_todo_list);
1da177e4 5211
6f05f629 5212static void net_set_todo(struct net_device *dev)
1da177e4 5213{
1da177e4 5214 list_add_tail(&dev->todo_list, &net_todo_list);
1da177e4
LT
5215}
5216
9b5e383c 5217static void rollback_registered_many(struct list_head *head)
93ee31f1 5218{
e93737b0 5219 struct net_device *dev, *tmp;
9b5e383c 5220
93ee31f1
DL
5221 BUG_ON(dev_boot_phase);
5222 ASSERT_RTNL();
5223
e93737b0 5224 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
9b5e383c 5225 /* Some devices call without registering
e93737b0
KK
5226 * for initialization unwind. Remove those
5227 * devices and proceed with the remaining.
9b5e383c
ED
5228 */
5229 if (dev->reg_state == NETREG_UNINITIALIZED) {
7b6cd1ce
JP
5230 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5231 dev->name, dev);
93ee31f1 5232
9b5e383c 5233 WARN_ON(1);
e93737b0
KK
5234 list_del(&dev->unreg_list);
5235 continue;
9b5e383c 5236 }
449f4544 5237 dev->dismantle = true;
9b5e383c 5238 BUG_ON(dev->reg_state != NETREG_REGISTERED);
44345724 5239 }
93ee31f1 5240
44345724
OP
5241 /* If device is running, close it first. */
5242 dev_close_many(head);
93ee31f1 5243
44345724 5244 list_for_each_entry(dev, head, unreg_list) {
9b5e383c
ED
5245 /* And unlink it from device chain. */
5246 unlist_netdevice(dev);
93ee31f1 5247
9b5e383c
ED
5248 dev->reg_state = NETREG_UNREGISTERING;
5249 }
93ee31f1
DL
5250
5251 synchronize_net();
5252
9b5e383c
ED
5253 list_for_each_entry(dev, head, unreg_list) {
5254 /* Shutdown queueing discipline. */
5255 dev_shutdown(dev);
93ee31f1
DL
5256
5257
9b5e383c
ED
5258 /* Notify protocols, that we are about to destroy
5259 this device. They should clean all the things.
5260 */
5261 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
93ee31f1 5262
a2835763
PM
5263 if (!dev->rtnl_link_ops ||
5264 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5265 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
5266
9b5e383c
ED
5267 /*
5268 * Flush the unicast and multicast chains
5269 */
a748ee24 5270 dev_uc_flush(dev);
22bedad3 5271 dev_mc_flush(dev);
93ee31f1 5272
9b5e383c
ED
5273 if (dev->netdev_ops->ndo_uninit)
5274 dev->netdev_ops->ndo_uninit(dev);
93ee31f1 5275
9b5e383c
ED
5276 /* Notifier chain MUST detach us from master device. */
5277 WARN_ON(dev->master);
93ee31f1 5278
9b5e383c
ED
5279 /* Remove entries from kobject tree */
5280 netdev_unregister_kobject(dev);
5281 }
93ee31f1 5282
a5ee1551 5283 /* Process any work delayed until the end of the batch */
e5e26d75 5284 dev = list_first_entry(head, struct net_device, unreg_list);
a5ee1551 5285 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
93ee31f1 5286
850a545b 5287 synchronize_net();
395264d5 5288
a5ee1551 5289 list_for_each_entry(dev, head, unreg_list)
9b5e383c
ED
5290 dev_put(dev);
5291}
5292
5293static void rollback_registered(struct net_device *dev)
5294{
5295 LIST_HEAD(single);
5296
5297 list_add(&dev->unreg_list, &single);
5298 rollback_registered_many(&single);
ceaaec98 5299 list_del(&single);
93ee31f1
DL
5300}
5301
c8f44aff
MM
5302static netdev_features_t netdev_fix_features(struct net_device *dev,
5303 netdev_features_t features)
b63365a2 5304{
57422dc5
MM
5305 /* Fix illegal checksum combinations */
5306 if ((features & NETIF_F_HW_CSUM) &&
5307 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
6f404e44 5308 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
57422dc5
MM
5309 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5310 }
5311
b63365a2
HX
5312 /* Fix illegal SG+CSUM combinations. */
5313 if ((features & NETIF_F_SG) &&
5314 !(features & NETIF_F_ALL_CSUM)) {
6f404e44
MM
5315 netdev_dbg(dev,
5316 "Dropping NETIF_F_SG since no checksum feature.\n");
b63365a2
HX
5317 features &= ~NETIF_F_SG;
5318 }
5319
5320 /* TSO requires that SG is present as well. */
ea2d3688 5321 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
6f404e44 5322 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
ea2d3688 5323 features &= ~NETIF_F_ALL_TSO;
b63365a2
HX
5324 }
5325
31d8b9e0
BH
5326 /* TSO ECN requires that TSO is present as well. */
5327 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5328 features &= ~NETIF_F_TSO_ECN;
5329
212b573f
MM
5330 /* Software GSO depends on SG. */
5331 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
6f404e44 5332 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
212b573f
MM
5333 features &= ~NETIF_F_GSO;
5334 }
5335
acd1130e 5336 /* UFO needs SG and checksumming */
b63365a2 5337 if (features & NETIF_F_UFO) {
79032644
MM
5338 /* maybe split UFO into V4 and V6? */
5339 if (!((features & NETIF_F_GEN_CSUM) ||
5340 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5341 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
6f404e44 5342 netdev_dbg(dev,
acd1130e 5343 "Dropping NETIF_F_UFO since no checksum offload features.\n");
b63365a2
HX
5344 features &= ~NETIF_F_UFO;
5345 }
5346
5347 if (!(features & NETIF_F_SG)) {
6f404e44 5348 netdev_dbg(dev,
acd1130e 5349 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
b63365a2
HX
5350 features &= ~NETIF_F_UFO;
5351 }
5352 }
5353
5354 return features;
5355}
b63365a2 5356
6cb6a27c 5357int __netdev_update_features(struct net_device *dev)
5455c699 5358{
c8f44aff 5359 netdev_features_t features;
5455c699
MM
5360 int err = 0;
5361
87267485
MM
5362 ASSERT_RTNL();
5363
5455c699
MM
5364 features = netdev_get_wanted_features(dev);
5365
5366 if (dev->netdev_ops->ndo_fix_features)
5367 features = dev->netdev_ops->ndo_fix_features(dev, features);
5368
5369 /* driver might be less strict about feature dependencies */
5370 features = netdev_fix_features(dev, features);
5371
5372 if (dev->features == features)
6cb6a27c 5373 return 0;
5455c699 5374
c8f44aff
MM
5375 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
5376 &dev->features, &features);
5455c699
MM
5377
5378 if (dev->netdev_ops->ndo_set_features)
5379 err = dev->netdev_ops->ndo_set_features(dev, features);
5380
6cb6a27c 5381 if (unlikely(err < 0)) {
5455c699 5382 netdev_err(dev,
c8f44aff
MM
5383 "set_features() failed (%d); wanted %pNF, left %pNF\n",
5384 err, &features, &dev->features);
6cb6a27c
MM
5385 return -1;
5386 }
5387
5388 if (!err)
5389 dev->features = features;
5390
5391 return 1;
5392}
5393
afe12cc8
MM
5394/**
5395 * netdev_update_features - recalculate device features
5396 * @dev: the device to check
5397 *
5398 * Recalculate dev->features set and send notifications if it
5399 * has changed. Should be called after driver or hardware dependent
5400 * conditions might have changed that influence the features.
5401 */
6cb6a27c
MM
5402void netdev_update_features(struct net_device *dev)
5403{
5404 if (__netdev_update_features(dev))
5405 netdev_features_change(dev);
5455c699
MM
5406}
5407EXPORT_SYMBOL(netdev_update_features);
5408
afe12cc8
MM
5409/**
5410 * netdev_change_features - recalculate device features
5411 * @dev: the device to check
5412 *
5413 * Recalculate dev->features set and send notifications even
5414 * if they have not changed. Should be called instead of
5415 * netdev_update_features() if also dev->vlan_features might
5416 * have changed to allow the changes to be propagated to stacked
5417 * VLAN devices.
5418 */
5419void netdev_change_features(struct net_device *dev)
5420{
5421 __netdev_update_features(dev);
5422 netdev_features_change(dev);
5423}
5424EXPORT_SYMBOL(netdev_change_features);
5425
fc4a7489
PM
5426/**
5427 * netif_stacked_transfer_operstate - transfer operstate
5428 * @rootdev: the root or lower level device to transfer state from
5429 * @dev: the device to transfer operstate to
5430 *
5431 * Transfer operational state from root to device. This is normally
5432 * called when a stacking relationship exists between the root
5433 * device and the device(a leaf device).
5434 */
5435void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5436 struct net_device *dev)
5437{
5438 if (rootdev->operstate == IF_OPER_DORMANT)
5439 netif_dormant_on(dev);
5440 else
5441 netif_dormant_off(dev);
5442
5443 if (netif_carrier_ok(rootdev)) {
5444 if (!netif_carrier_ok(dev))
5445 netif_carrier_on(dev);
5446 } else {
5447 if (netif_carrier_ok(dev))
5448 netif_carrier_off(dev);
5449 }
5450}
5451EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5452
bf264145 5453#ifdef CONFIG_RPS
1b4bf461
ED
5454static int netif_alloc_rx_queues(struct net_device *dev)
5455{
1b4bf461 5456 unsigned int i, count = dev->num_rx_queues;
bd25fa7b 5457 struct netdev_rx_queue *rx;
1b4bf461 5458
bd25fa7b 5459 BUG_ON(count < 1);
1b4bf461 5460
bd25fa7b
TH
5461 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5462 if (!rx) {
7b6cd1ce 5463 pr_err("netdev: Unable to allocate %u rx queues\n", count);
bd25fa7b 5464 return -ENOMEM;
1b4bf461 5465 }
bd25fa7b
TH
5466 dev->_rx = rx;
5467
bd25fa7b 5468 for (i = 0; i < count; i++)
fe822240 5469 rx[i].dev = dev;
1b4bf461
ED
5470 return 0;
5471}
bf264145 5472#endif
1b4bf461 5473
aa942104
CG
5474static void netdev_init_one_queue(struct net_device *dev,
5475 struct netdev_queue *queue, void *_unused)
5476{
5477 /* Initialize queue lock */
5478 spin_lock_init(&queue->_xmit_lock);
5479 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
5480 queue->xmit_lock_owner = -1;
b236da69 5481 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
aa942104 5482 queue->dev = dev;
114cf580
TH
5483#ifdef CONFIG_BQL
5484 dql_init(&queue->dql, HZ);
5485#endif
aa942104
CG
5486}
5487
e6484930
TH
5488static int netif_alloc_netdev_queues(struct net_device *dev)
5489{
5490 unsigned int count = dev->num_tx_queues;
5491 struct netdev_queue *tx;
5492
5493 BUG_ON(count < 1);
5494
5495 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
5496 if (!tx) {
7b6cd1ce 5497 pr_err("netdev: Unable to allocate %u tx queues\n", count);
e6484930
TH
5498 return -ENOMEM;
5499 }
5500 dev->_tx = tx;
1d24eb48 5501
e6484930
TH
5502 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5503 spin_lock_init(&dev->tx_global_lock);
aa942104
CG
5504
5505 return 0;
e6484930
TH
5506}
5507
1da177e4
LT
5508/**
5509 * register_netdevice - register a network device
5510 * @dev: device to register
5511 *
5512 * Take a completed network device structure and add it to the kernel
5513 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5514 * chain. 0 is returned on success. A negative errno code is returned
5515 * on a failure to set up the device, or if the name is a duplicate.
5516 *
5517 * Callers must hold the rtnl semaphore. You may want
5518 * register_netdev() instead of this.
5519 *
5520 * BUGS:
5521 * The locking appears insufficient to guarantee two parallel registers
5522 * will not get the same name.
5523 */
5524
5525int register_netdevice(struct net_device *dev)
5526{
1da177e4 5527 int ret;
d314774c 5528 struct net *net = dev_net(dev);
1da177e4
LT
5529
5530 BUG_ON(dev_boot_phase);
5531 ASSERT_RTNL();
5532
b17a7c17
SH
5533 might_sleep();
5534
1da177e4
LT
5535 /* When net_device's are persistent, this will be fatal. */
5536 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
d314774c 5537 BUG_ON(!net);
1da177e4 5538
f1f28aa3 5539 spin_lock_init(&dev->addr_list_lock);
cf508b12 5540 netdev_set_addr_lockdep_class(dev);
1da177e4 5541
1da177e4
LT
5542 dev->iflink = -1;
5543
0696c3a8
PP
5544 ret = dev_get_valid_name(dev, dev->name);
5545 if (ret < 0)
5546 goto out;
5547
1da177e4 5548 /* Init, if this function is available */
d314774c
SH
5549 if (dev->netdev_ops->ndo_init) {
5550 ret = dev->netdev_ops->ndo_init(dev);
1da177e4
LT
5551 if (ret) {
5552 if (ret > 0)
5553 ret = -EIO;
90833aa4 5554 goto out;
1da177e4
LT
5555 }
5556 }
4ec93edb 5557
881d966b 5558 dev->ifindex = dev_new_index(net);
1da177e4
LT
5559 if (dev->iflink == -1)
5560 dev->iflink = dev->ifindex;
5561
5455c699
MM
5562 /* Transfer changeable features to wanted_features and enable
5563 * software offloads (GSO and GRO).
5564 */
5565 dev->hw_features |= NETIF_F_SOFT_FEATURES;
14d1232f
MM
5566 dev->features |= NETIF_F_SOFT_FEATURES;
5567 dev->wanted_features = dev->features & dev->hw_features;
1da177e4 5568
c6e1a0d1 5569 /* Turn on no cache copy if HW is doing checksum */
34324dc2
MM
5570 if (!(dev->flags & IFF_LOOPBACK)) {
5571 dev->hw_features |= NETIF_F_NOCACHE_COPY;
5572 if (dev->features & NETIF_F_ALL_CSUM) {
5573 dev->wanted_features |= NETIF_F_NOCACHE_COPY;
5574 dev->features |= NETIF_F_NOCACHE_COPY;
5575 }
c6e1a0d1
TH
5576 }
5577
1180e7d6 5578 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
16c3ea78 5579 */
1180e7d6 5580 dev->vlan_features |= NETIF_F_HIGHDMA;
16c3ea78 5581
7ffbe3fd
JB
5582 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5583 ret = notifier_to_errno(ret);
5584 if (ret)
5585 goto err_uninit;
5586
8b41d188 5587 ret = netdev_register_kobject(dev);
b17a7c17 5588 if (ret)
7ce1b0ed 5589 goto err_uninit;
b17a7c17
SH
5590 dev->reg_state = NETREG_REGISTERED;
5591
6cb6a27c 5592 __netdev_update_features(dev);
8e9b59b2 5593
1da177e4
LT
5594 /*
5595 * Default initial state at registry is that the
5596 * device is present.
5597 */
5598
5599 set_bit(__LINK_STATE_PRESENT, &dev->state);
5600
1da177e4 5601 dev_init_scheduler(dev);
1da177e4 5602 dev_hold(dev);
ce286d32 5603 list_netdevice(dev);
1da177e4
LT
5604
5605 /* Notify protocols, that a new device appeared. */
056925ab 5606 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
fcc5a03a 5607 ret = notifier_to_errno(ret);
93ee31f1
DL
5608 if (ret) {
5609 rollback_registered(dev);
5610 dev->reg_state = NETREG_UNREGISTERED;
5611 }
d90a909e
EB
5612 /*
5613 * Prevent userspace races by waiting until the network
5614 * device is fully setup before sending notifications.
5615 */
a2835763
PM
5616 if (!dev->rtnl_link_ops ||
5617 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5618 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
1da177e4
LT
5619
5620out:
5621 return ret;
7ce1b0ed
HX
5622
5623err_uninit:
d314774c
SH
5624 if (dev->netdev_ops->ndo_uninit)
5625 dev->netdev_ops->ndo_uninit(dev);
7ce1b0ed 5626 goto out;
1da177e4 5627}
d1b19dff 5628EXPORT_SYMBOL(register_netdevice);
1da177e4 5629
937f1ba5
BH
5630/**
5631 * init_dummy_netdev - init a dummy network device for NAPI
5632 * @dev: device to init
5633 *
5634 * This takes a network device structure and initialize the minimum
5635 * amount of fields so it can be used to schedule NAPI polls without
5636 * registering a full blown interface. This is to be used by drivers
5637 * that need to tie several hardware interfaces to a single NAPI
5638 * poll scheduler due to HW limitations.
5639 */
5640int init_dummy_netdev(struct net_device *dev)
5641{
5642 /* Clear everything. Note we don't initialize spinlocks
5643 * are they aren't supposed to be taken by any of the
5644 * NAPI code and this dummy netdev is supposed to be
5645 * only ever used for NAPI polls
5646 */
5647 memset(dev, 0, sizeof(struct net_device));
5648
5649 /* make sure we BUG if trying to hit standard
5650 * register/unregister code path
5651 */
5652 dev->reg_state = NETREG_DUMMY;
5653
937f1ba5
BH
5654 /* NAPI wants this */
5655 INIT_LIST_HEAD(&dev->napi_list);
5656
5657 /* a dummy interface is started by default */
5658 set_bit(__LINK_STATE_PRESENT, &dev->state);
5659 set_bit(__LINK_STATE_START, &dev->state);
5660
29b4433d
ED
5661 /* Note : We dont allocate pcpu_refcnt for dummy devices,
5662 * because users of this 'device' dont need to change
5663 * its refcount.
5664 */
5665
937f1ba5
BH
5666 return 0;
5667}
5668EXPORT_SYMBOL_GPL(init_dummy_netdev);
5669
5670
1da177e4
LT
5671/**
5672 * register_netdev - register a network device
5673 * @dev: device to register
5674 *
5675 * Take a completed network device structure and add it to the kernel
5676 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5677 * chain. 0 is returned on success. A negative errno code is returned
5678 * on a failure to set up the device, or if the name is a duplicate.
5679 *
38b4da38 5680 * This is a wrapper around register_netdevice that takes the rtnl semaphore
1da177e4
LT
5681 * and expands the device name if you passed a format string to
5682 * alloc_netdev.
5683 */
5684int register_netdev(struct net_device *dev)
5685{
5686 int err;
5687
5688 rtnl_lock();
1da177e4 5689 err = register_netdevice(dev);
1da177e4
LT
5690 rtnl_unlock();
5691 return err;
5692}
5693EXPORT_SYMBOL(register_netdev);
5694
29b4433d
ED
5695int netdev_refcnt_read(const struct net_device *dev)
5696{
5697 int i, refcnt = 0;
5698
5699 for_each_possible_cpu(i)
5700 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
5701 return refcnt;
5702}
5703EXPORT_SYMBOL(netdev_refcnt_read);
5704
1da177e4
LT
5705/*
5706 * netdev_wait_allrefs - wait until all references are gone.
5707 *
5708 * This is called when unregistering network devices.
5709 *
5710 * Any protocol or device that holds a reference should register
5711 * for netdevice notification, and cleanup and put back the
5712 * reference if they receive an UNREGISTER event.
5713 * We can get stuck here if buggy protocols don't correctly
4ec93edb 5714 * call dev_put.
1da177e4
LT
5715 */
5716static void netdev_wait_allrefs(struct net_device *dev)
5717{
5718 unsigned long rebroadcast_time, warning_time;
29b4433d 5719 int refcnt;
1da177e4 5720
e014debe
ED
5721 linkwatch_forget_dev(dev);
5722
1da177e4 5723 rebroadcast_time = warning_time = jiffies;
29b4433d
ED
5724 refcnt = netdev_refcnt_read(dev);
5725
5726 while (refcnt != 0) {
1da177e4 5727 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
6756ae4b 5728 rtnl_lock();
1da177e4
LT
5729
5730 /* Rebroadcast unregister notification */
056925ab 5731 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
a5ee1551 5732 /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
395264d5 5733 * should have already handle it the first time */
1da177e4
LT
5734
5735 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5736 &dev->state)) {
5737 /* We must not have linkwatch events
5738 * pending on unregister. If this
5739 * happens, we simply run the queue
5740 * unscheduled, resulting in a noop
5741 * for this device.
5742 */
5743 linkwatch_run_queue();
5744 }
5745
6756ae4b 5746 __rtnl_unlock();
1da177e4
LT
5747
5748 rebroadcast_time = jiffies;
5749 }
5750
5751 msleep(250);
5752
29b4433d
ED
5753 refcnt = netdev_refcnt_read(dev);
5754
1da177e4 5755 if (time_after(jiffies, warning_time + 10 * HZ)) {
7b6cd1ce
JP
5756 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
5757 dev->name, refcnt);
1da177e4
LT
5758 warning_time = jiffies;
5759 }
5760 }
5761}
5762
5763/* The sequence is:
5764 *
5765 * rtnl_lock();
5766 * ...
5767 * register_netdevice(x1);
5768 * register_netdevice(x2);
5769 * ...
5770 * unregister_netdevice(y1);
5771 * unregister_netdevice(y2);
5772 * ...
5773 * rtnl_unlock();
5774 * free_netdev(y1);
5775 * free_netdev(y2);
5776 *
58ec3b4d 5777 * We are invoked by rtnl_unlock().
1da177e4 5778 * This allows us to deal with problems:
b17a7c17 5779 * 1) We can delete sysfs objects which invoke hotplug
1da177e4
LT
5780 * without deadlocking with linkwatch via keventd.
5781 * 2) Since we run with the RTNL semaphore not held, we can sleep
5782 * safely in order to wait for the netdev refcnt to drop to zero.
58ec3b4d
HX
5783 *
5784 * We must not return until all unregister events added during
5785 * the interval the lock was held have been completed.
1da177e4 5786 */
1da177e4
LT
5787void netdev_run_todo(void)
5788{
626ab0e6 5789 struct list_head list;
1da177e4 5790
1da177e4 5791 /* Snapshot list, allow later requests */
626ab0e6 5792 list_replace_init(&net_todo_list, &list);
58ec3b4d
HX
5793
5794 __rtnl_unlock();
626ab0e6 5795
850a545b
EB
5796 /* Wait for rcu callbacks to finish before attempting to drain
5797 * the device list. This usually avoids a 250ms wait.
5798 */
5799 if (!list_empty(&list))
5800 rcu_barrier();
5801
1da177e4
LT
5802 while (!list_empty(&list)) {
5803 struct net_device *dev
e5e26d75 5804 = list_first_entry(&list, struct net_device, todo_list);
1da177e4
LT
5805 list_del(&dev->todo_list);
5806
b17a7c17 5807 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
7b6cd1ce 5808 pr_err("network todo '%s' but state %d\n",
b17a7c17
SH
5809 dev->name, dev->reg_state);
5810 dump_stack();
5811 continue;
5812 }
1da177e4 5813
b17a7c17 5814 dev->reg_state = NETREG_UNREGISTERED;
1da177e4 5815
152102c7 5816 on_each_cpu(flush_backlog, dev, 1);
6e583ce5 5817
b17a7c17 5818 netdev_wait_allrefs(dev);
1da177e4 5819
b17a7c17 5820 /* paranoia */
29b4433d 5821 BUG_ON(netdev_refcnt_read(dev));
33d480ce
ED
5822 WARN_ON(rcu_access_pointer(dev->ip_ptr));
5823 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
547b792c 5824 WARN_ON(dev->dn_ptr);
1da177e4 5825
b17a7c17
SH
5826 if (dev->destructor)
5827 dev->destructor(dev);
9093bbb2
SH
5828
5829 /* Free network device */
5830 kobject_put(&dev->dev.kobj);
1da177e4 5831 }
1da177e4
LT
5832}
5833
3cfde79c
BH
5834/* Convert net_device_stats to rtnl_link_stats64. They have the same
5835 * fields in the same order, with only the type differing.
5836 */
5837static void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
5838 const struct net_device_stats *netdev_stats)
5839{
5840#if BITS_PER_LONG == 64
5841 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
5842 memcpy(stats64, netdev_stats, sizeof(*stats64));
5843#else
5844 size_t i, n = sizeof(*stats64) / sizeof(u64);
5845 const unsigned long *src = (const unsigned long *)netdev_stats;
5846 u64 *dst = (u64 *)stats64;
5847
5848 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
5849 sizeof(*stats64) / sizeof(u64));
5850 for (i = 0; i < n; i++)
5851 dst[i] = src[i];
5852#endif
5853}
5854
eeda3fd6
SH
5855/**
5856 * dev_get_stats - get network device statistics
5857 * @dev: device to get statistics from
28172739 5858 * @storage: place to store stats
eeda3fd6 5859 *
d7753516
BH
5860 * Get network statistics from device. Return @storage.
5861 * The device driver may provide its own method by setting
5862 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
5863 * otherwise the internal statistics structure is used.
eeda3fd6 5864 */
d7753516
BH
5865struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
5866 struct rtnl_link_stats64 *storage)
7004bf25 5867{
eeda3fd6
SH
5868 const struct net_device_ops *ops = dev->netdev_ops;
5869
28172739
ED
5870 if (ops->ndo_get_stats64) {
5871 memset(storage, 0, sizeof(*storage));
caf586e5
ED
5872 ops->ndo_get_stats64(dev, storage);
5873 } else if (ops->ndo_get_stats) {
3cfde79c 5874 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
caf586e5
ED
5875 } else {
5876 netdev_stats_to_stats64(storage, &dev->stats);
28172739 5877 }
caf586e5 5878 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
28172739 5879 return storage;
c45d286e 5880}
eeda3fd6 5881EXPORT_SYMBOL(dev_get_stats);
c45d286e 5882
24824a09 5883struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
dc2b4847 5884{
24824a09 5885 struct netdev_queue *queue = dev_ingress_queue(dev);
dc2b4847 5886
24824a09
ED
5887#ifdef CONFIG_NET_CLS_ACT
5888 if (queue)
5889 return queue;
5890 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
5891 if (!queue)
5892 return NULL;
5893 netdev_init_one_queue(dev, queue, NULL);
24824a09
ED
5894 queue->qdisc = &noop_qdisc;
5895 queue->qdisc_sleeping = &noop_qdisc;
5896 rcu_assign_pointer(dev->ingress_queue, queue);
5897#endif
5898 return queue;
bb949fbd
DM
5899}
5900
1da177e4 5901/**
36909ea4 5902 * alloc_netdev_mqs - allocate network device
1da177e4
LT
5903 * @sizeof_priv: size of private data to allocate space for
5904 * @name: device name format string
5905 * @setup: callback to initialize device
36909ea4
TH
5906 * @txqs: the number of TX subqueues to allocate
5907 * @rxqs: the number of RX subqueues to allocate
1da177e4
LT
5908 *
5909 * Allocates a struct net_device with private data area for driver use
f25f4e44 5910 * and performs basic initialization. Also allocates subquue structs
36909ea4 5911 * for each queue on the device.
1da177e4 5912 */
36909ea4
TH
5913struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5914 void (*setup)(struct net_device *),
5915 unsigned int txqs, unsigned int rxqs)
1da177e4 5916{
1da177e4 5917 struct net_device *dev;
7943986c 5918 size_t alloc_size;
1ce8e7b5 5919 struct net_device *p;
1da177e4 5920
b6fe17d6
SH
5921 BUG_ON(strlen(name) >= sizeof(dev->name));
5922
36909ea4 5923 if (txqs < 1) {
7b6cd1ce 5924 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
55513fb4
TH
5925 return NULL;
5926 }
5927
36909ea4
TH
5928#ifdef CONFIG_RPS
5929 if (rxqs < 1) {
7b6cd1ce 5930 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
36909ea4
TH
5931 return NULL;
5932 }
5933#endif
5934
fd2ea0a7 5935 alloc_size = sizeof(struct net_device);
d1643d24
AD
5936 if (sizeof_priv) {
5937 /* ensure 32-byte alignment of private area */
1ce8e7b5 5938 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
d1643d24
AD
5939 alloc_size += sizeof_priv;
5940 }
5941 /* ensure 32-byte alignment of whole construct */
1ce8e7b5 5942 alloc_size += NETDEV_ALIGN - 1;
1da177e4 5943
31380de9 5944 p = kzalloc(alloc_size, GFP_KERNEL);
1da177e4 5945 if (!p) {
7b6cd1ce 5946 pr_err("alloc_netdev: Unable to allocate device\n");
1da177e4
LT
5947 return NULL;
5948 }
1da177e4 5949
1ce8e7b5 5950 dev = PTR_ALIGN(p, NETDEV_ALIGN);
1da177e4 5951 dev->padded = (char *)dev - (char *)p;
ab9c73cc 5952
29b4433d
ED
5953 dev->pcpu_refcnt = alloc_percpu(int);
5954 if (!dev->pcpu_refcnt)
e6484930 5955 goto free_p;
ab9c73cc 5956
ab9c73cc 5957 if (dev_addr_init(dev))
29b4433d 5958 goto free_pcpu;
ab9c73cc 5959
22bedad3 5960 dev_mc_init(dev);
a748ee24 5961 dev_uc_init(dev);
ccffad25 5962
c346dca1 5963 dev_net_set(dev, &init_net);
1da177e4 5964
8d3bdbd5
DM
5965 dev->gso_max_size = GSO_MAX_SIZE;
5966
8d3bdbd5
DM
5967 INIT_LIST_HEAD(&dev->napi_list);
5968 INIT_LIST_HEAD(&dev->unreg_list);
5969 INIT_LIST_HEAD(&dev->link_watch_list);
5970 dev->priv_flags = IFF_XMIT_DST_RELEASE;
5971 setup(dev);
5972
36909ea4
TH
5973 dev->num_tx_queues = txqs;
5974 dev->real_num_tx_queues = txqs;
ed9af2e8 5975 if (netif_alloc_netdev_queues(dev))
8d3bdbd5 5976 goto free_all;
e8a0464c 5977
df334545 5978#ifdef CONFIG_RPS
36909ea4
TH
5979 dev->num_rx_queues = rxqs;
5980 dev->real_num_rx_queues = rxqs;
fe822240 5981 if (netif_alloc_rx_queues(dev))
8d3bdbd5 5982 goto free_all;
df334545 5983#endif
0a9627f2 5984
1da177e4 5985 strcpy(dev->name, name);
cbda10fa 5986 dev->group = INIT_NETDEV_GROUP;
1da177e4 5987 return dev;
ab9c73cc 5988
8d3bdbd5
DM
5989free_all:
5990 free_netdev(dev);
5991 return NULL;
5992
29b4433d
ED
5993free_pcpu:
5994 free_percpu(dev->pcpu_refcnt);
ed9af2e8 5995 kfree(dev->_tx);
fe822240
TH
5996#ifdef CONFIG_RPS
5997 kfree(dev->_rx);
5998#endif
5999
ab9c73cc
JP
6000free_p:
6001 kfree(p);
6002 return NULL;
1da177e4 6003}
36909ea4 6004EXPORT_SYMBOL(alloc_netdev_mqs);
1da177e4
LT
6005
6006/**
6007 * free_netdev - free network device
6008 * @dev: device
6009 *
4ec93edb
YH
6010 * This function does the last stage of destroying an allocated device
6011 * interface. The reference to the device object is released.
1da177e4
LT
6012 * If this is the last reference then it will be freed.
6013 */
6014void free_netdev(struct net_device *dev)
6015{
d565b0a1
HX
6016 struct napi_struct *p, *n;
6017
f3005d7f
DL
6018 release_net(dev_net(dev));
6019
e8a0464c 6020 kfree(dev->_tx);
fe822240
TH
6021#ifdef CONFIG_RPS
6022 kfree(dev->_rx);
6023#endif
e8a0464c 6024
33d480ce 6025 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
24824a09 6026
f001fde5
JP
6027 /* Flush device addresses */
6028 dev_addr_flush(dev);
6029
d565b0a1
HX
6030 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
6031 netif_napi_del(p);
6032
29b4433d
ED
6033 free_percpu(dev->pcpu_refcnt);
6034 dev->pcpu_refcnt = NULL;
6035
3041a069 6036 /* Compatibility with error handling in drivers */
1da177e4
LT
6037 if (dev->reg_state == NETREG_UNINITIALIZED) {
6038 kfree((char *)dev - dev->padded);
6039 return;
6040 }
6041
6042 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
6043 dev->reg_state = NETREG_RELEASED;
6044
43cb76d9
GKH
6045 /* will free via device release */
6046 put_device(&dev->dev);
1da177e4 6047}
d1b19dff 6048EXPORT_SYMBOL(free_netdev);
4ec93edb 6049
f0db275a
SH
6050/**
6051 * synchronize_net - Synchronize with packet receive processing
6052 *
6053 * Wait for packets currently being received to be done.
6054 * Does not block later packets from starting.
6055 */
4ec93edb 6056void synchronize_net(void)
1da177e4
LT
6057{
6058 might_sleep();
be3fc413
ED
6059 if (rtnl_is_locked())
6060 synchronize_rcu_expedited();
6061 else
6062 synchronize_rcu();
1da177e4 6063}
d1b19dff 6064EXPORT_SYMBOL(synchronize_net);
1da177e4
LT
6065
6066/**
44a0873d 6067 * unregister_netdevice_queue - remove device from the kernel
1da177e4 6068 * @dev: device
44a0873d 6069 * @head: list
6ebfbc06 6070 *
1da177e4 6071 * This function shuts down a device interface and removes it
d59b54b1 6072 * from the kernel tables.
44a0873d 6073 * If head not NULL, device is queued to be unregistered later.
1da177e4
LT
6074 *
6075 * Callers must hold the rtnl semaphore. You may want
6076 * unregister_netdev() instead of this.
6077 */
6078
44a0873d 6079void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
1da177e4 6080{
a6620712
HX
6081 ASSERT_RTNL();
6082
44a0873d 6083 if (head) {
9fdce099 6084 list_move_tail(&dev->unreg_list, head);
44a0873d
ED
6085 } else {
6086 rollback_registered(dev);
6087 /* Finish processing unregister after unlock */
6088 net_set_todo(dev);
6089 }
1da177e4 6090}
44a0873d 6091EXPORT_SYMBOL(unregister_netdevice_queue);
1da177e4 6092
9b5e383c
ED
6093/**
6094 * unregister_netdevice_many - unregister many devices
6095 * @head: list of devices
9b5e383c
ED
6096 */
6097void unregister_netdevice_many(struct list_head *head)
6098{
6099 struct net_device *dev;
6100
6101 if (!list_empty(head)) {
6102 rollback_registered_many(head);
6103 list_for_each_entry(dev, head, unreg_list)
6104 net_set_todo(dev);
6105 }
6106}
63c8099d 6107EXPORT_SYMBOL(unregister_netdevice_many);
9b5e383c 6108
1da177e4
LT
6109/**
6110 * unregister_netdev - remove device from the kernel
6111 * @dev: device
6112 *
6113 * This function shuts down a device interface and removes it
d59b54b1 6114 * from the kernel tables.
1da177e4
LT
6115 *
6116 * This is just a wrapper for unregister_netdevice that takes
6117 * the rtnl semaphore. In general you want to use this and not
6118 * unregister_netdevice.
6119 */
6120void unregister_netdev(struct net_device *dev)
6121{
6122 rtnl_lock();
6123 unregister_netdevice(dev);
6124 rtnl_unlock();
6125}
1da177e4
LT
6126EXPORT_SYMBOL(unregister_netdev);
6127
ce286d32
EB
6128/**
6129 * dev_change_net_namespace - move device to different nethost namespace
6130 * @dev: device
6131 * @net: network namespace
6132 * @pat: If not NULL name pattern to try if the current device name
6133 * is already taken in the destination network namespace.
6134 *
6135 * This function shuts down a device interface and moves it
6136 * to a new network namespace. On success 0 is returned, on
6137 * a failure a netagive errno code is returned.
6138 *
6139 * Callers must hold the rtnl semaphore.
6140 */
6141
6142int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
6143{
ce286d32
EB
6144 int err;
6145
6146 ASSERT_RTNL();
6147
6148 /* Don't allow namespace local devices to be moved. */
6149 err = -EINVAL;
6150 if (dev->features & NETIF_F_NETNS_LOCAL)
6151 goto out;
6152
6153 /* Ensure the device has been registrered */
6154 err = -EINVAL;
6155 if (dev->reg_state != NETREG_REGISTERED)
6156 goto out;
6157
6158 /* Get out if there is nothing todo */
6159 err = 0;
878628fb 6160 if (net_eq(dev_net(dev), net))
ce286d32
EB
6161 goto out;
6162
6163 /* Pick the destination device name, and ensure
6164 * we can use it in the destination network namespace.
6165 */
6166 err = -EEXIST;
d9031024 6167 if (__dev_get_by_name(net, dev->name)) {
ce286d32
EB
6168 /* We get here if we can't use the current device name */
6169 if (!pat)
6170 goto out;
1c5cae81 6171 if (dev_get_valid_name(dev, pat) < 0)
ce286d32
EB
6172 goto out;
6173 }
6174
6175 /*
6176 * And now a mini version of register_netdevice unregister_netdevice.
6177 */
6178
6179 /* If device is running close it first. */
9b772652 6180 dev_close(dev);
ce286d32
EB
6181
6182 /* And unlink it from device chain */
6183 err = -ENODEV;
6184 unlist_netdevice(dev);
6185
6186 synchronize_net();
6187
6188 /* Shutdown queueing discipline. */
6189 dev_shutdown(dev);
6190
6191 /* Notify protocols, that we are about to destroy
6192 this device. They should clean all the things.
3b27e105
DL
6193
6194 Note that dev->reg_state stays at NETREG_REGISTERED.
6195 This is wanted because this way 8021q and macvlan know
6196 the device is just moving and can keep their slaves up.
ce286d32
EB
6197 */
6198 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
a5ee1551 6199 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
d2237d35 6200 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
ce286d32
EB
6201
6202 /*
6203 * Flush the unicast and multicast chains
6204 */
a748ee24 6205 dev_uc_flush(dev);
22bedad3 6206 dev_mc_flush(dev);
ce286d32
EB
6207
6208 /* Actually switch the network namespace */
c346dca1 6209 dev_net_set(dev, net);
ce286d32 6210
ce286d32
EB
6211 /* If there is an ifindex conflict assign a new one */
6212 if (__dev_get_by_index(net, dev->ifindex)) {
6213 int iflink = (dev->iflink == dev->ifindex);
6214 dev->ifindex = dev_new_index(net);
6215 if (iflink)
6216 dev->iflink = dev->ifindex;
6217 }
6218
8b41d188 6219 /* Fixup kobjects */
a1b3f594 6220 err = device_rename(&dev->dev, dev->name);
8b41d188 6221 WARN_ON(err);
ce286d32
EB
6222
6223 /* Add the device back in the hashes */
6224 list_netdevice(dev);
6225
6226 /* Notify protocols, that a new device appeared. */
6227 call_netdevice_notifiers(NETDEV_REGISTER, dev);
6228
d90a909e
EB
6229 /*
6230 * Prevent userspace races by waiting until the network
6231 * device is fully setup before sending notifications.
6232 */
6233 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
6234
ce286d32
EB
6235 synchronize_net();
6236 err = 0;
6237out:
6238 return err;
6239}
463d0183 6240EXPORT_SYMBOL_GPL(dev_change_net_namespace);
ce286d32 6241
1da177e4
LT
6242static int dev_cpu_callback(struct notifier_block *nfb,
6243 unsigned long action,
6244 void *ocpu)
6245{
6246 struct sk_buff **list_skb;
1da177e4
LT
6247 struct sk_buff *skb;
6248 unsigned int cpu, oldcpu = (unsigned long)ocpu;
6249 struct softnet_data *sd, *oldsd;
6250
8bb78442 6251 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
1da177e4
LT
6252 return NOTIFY_OK;
6253
6254 local_irq_disable();
6255 cpu = smp_processor_id();
6256 sd = &per_cpu(softnet_data, cpu);
6257 oldsd = &per_cpu(softnet_data, oldcpu);
6258
6259 /* Find end of our completion_queue. */
6260 list_skb = &sd->completion_queue;
6261 while (*list_skb)
6262 list_skb = &(*list_skb)->next;
6263 /* Append completion queue from offline CPU. */
6264 *list_skb = oldsd->completion_queue;
6265 oldsd->completion_queue = NULL;
6266
1da177e4 6267 /* Append output queue from offline CPU. */
a9cbd588
CG
6268 if (oldsd->output_queue) {
6269 *sd->output_queue_tailp = oldsd->output_queue;
6270 sd->output_queue_tailp = oldsd->output_queue_tailp;
6271 oldsd->output_queue = NULL;
6272 oldsd->output_queue_tailp = &oldsd->output_queue;
6273 }
264524d5
HC
6274 /* Append NAPI poll list from offline CPU. */
6275 if (!list_empty(&oldsd->poll_list)) {
6276 list_splice_init(&oldsd->poll_list, &sd->poll_list);
6277 raise_softirq_irqoff(NET_RX_SOFTIRQ);
6278 }
1da177e4
LT
6279
6280 raise_softirq_irqoff(NET_TX_SOFTIRQ);
6281 local_irq_enable();
6282
6283 /* Process offline CPU's input_pkt_queue */
76cc8b13 6284 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
1da177e4 6285 netif_rx(skb);
76cc8b13 6286 input_queue_head_incr(oldsd);
fec5e652 6287 }
76cc8b13 6288 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
6e7676c1 6289 netif_rx(skb);
76cc8b13
TH
6290 input_queue_head_incr(oldsd);
6291 }
1da177e4
LT
6292
6293 return NOTIFY_OK;
6294}
1da177e4
LT
6295
6296
7f353bf2 6297/**
b63365a2
HX
6298 * netdev_increment_features - increment feature set by one
6299 * @all: current feature set
6300 * @one: new feature set
6301 * @mask: mask feature set
7f353bf2
HX
6302 *
6303 * Computes a new feature set after adding a device with feature set
b63365a2
HX
6304 * @one to the master device with current feature set @all. Will not
6305 * enable anything that is off in @mask. Returns the new feature set.
7f353bf2 6306 */
c8f44aff
MM
6307netdev_features_t netdev_increment_features(netdev_features_t all,
6308 netdev_features_t one, netdev_features_t mask)
b63365a2 6309{
1742f183
MM
6310 if (mask & NETIF_F_GEN_CSUM)
6311 mask |= NETIF_F_ALL_CSUM;
6312 mask |= NETIF_F_VLAN_CHALLENGED;
7f353bf2 6313
1742f183
MM
6314 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
6315 all &= one | ~NETIF_F_ALL_FOR_ALL;
c6e1a0d1 6316
1742f183
MM
6317 /* If one device supports hw checksumming, set for all. */
6318 if (all & NETIF_F_GEN_CSUM)
6319 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
7f353bf2
HX
6320
6321 return all;
6322}
b63365a2 6323EXPORT_SYMBOL(netdev_increment_features);
7f353bf2 6324
30d97d35
PE
6325static struct hlist_head *netdev_create_hash(void)
6326{
6327 int i;
6328 struct hlist_head *hash;
6329
6330 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
6331 if (hash != NULL)
6332 for (i = 0; i < NETDEV_HASHENTRIES; i++)
6333 INIT_HLIST_HEAD(&hash[i]);
6334
6335 return hash;
6336}
6337
881d966b 6338/* Initialize per network namespace state */
4665079c 6339static int __net_init netdev_init(struct net *net)
881d966b 6340{
881d966b 6341 INIT_LIST_HEAD(&net->dev_base_head);
881d966b 6342
30d97d35
PE
6343 net->dev_name_head = netdev_create_hash();
6344 if (net->dev_name_head == NULL)
6345 goto err_name;
881d966b 6346
30d97d35
PE
6347 net->dev_index_head = netdev_create_hash();
6348 if (net->dev_index_head == NULL)
6349 goto err_idx;
881d966b
EB
6350
6351 return 0;
30d97d35
PE
6352
6353err_idx:
6354 kfree(net->dev_name_head);
6355err_name:
6356 return -ENOMEM;
881d966b
EB
6357}
6358
f0db275a
SH
6359/**
6360 * netdev_drivername - network driver for the device
6361 * @dev: network device
f0db275a
SH
6362 *
6363 * Determine network driver for device.
6364 */
3019de12 6365const char *netdev_drivername(const struct net_device *dev)
6579e57b 6366{
cf04a4c7
SH
6367 const struct device_driver *driver;
6368 const struct device *parent;
3019de12 6369 const char *empty = "";
6579e57b
AV
6370
6371 parent = dev->dev.parent;
6579e57b 6372 if (!parent)
3019de12 6373 return empty;
6579e57b
AV
6374
6375 driver = parent->driver;
6376 if (driver && driver->name)
3019de12
DM
6377 return driver->name;
6378 return empty;
6579e57b
AV
6379}
6380
ffa10cb4 6381int __netdev_printk(const char *level, const struct net_device *dev,
256df2f3
JP
6382 struct va_format *vaf)
6383{
6384 int r;
6385
6386 if (dev && dev->dev.parent)
6387 r = dev_printk(level, dev->dev.parent, "%s: %pV",
6388 netdev_name(dev), vaf);
6389 else if (dev)
6390 r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
6391 else
6392 r = printk("%s(NULL net_device): %pV", level, vaf);
6393
6394 return r;
6395}
ffa10cb4 6396EXPORT_SYMBOL(__netdev_printk);
256df2f3
JP
6397
6398int netdev_printk(const char *level, const struct net_device *dev,
6399 const char *format, ...)
6400{
6401 struct va_format vaf;
6402 va_list args;
6403 int r;
6404
6405 va_start(args, format);
6406
6407 vaf.fmt = format;
6408 vaf.va = &args;
6409
6410 r = __netdev_printk(level, dev, &vaf);
6411 va_end(args);
6412
6413 return r;
6414}
6415EXPORT_SYMBOL(netdev_printk);
6416
6417#define define_netdev_printk_level(func, level) \
6418int func(const struct net_device *dev, const char *fmt, ...) \
6419{ \
6420 int r; \
6421 struct va_format vaf; \
6422 va_list args; \
6423 \
6424 va_start(args, fmt); \
6425 \
6426 vaf.fmt = fmt; \
6427 vaf.va = &args; \
6428 \
6429 r = __netdev_printk(level, dev, &vaf); \
6430 va_end(args); \
6431 \
6432 return r; \
6433} \
6434EXPORT_SYMBOL(func);
6435
6436define_netdev_printk_level(netdev_emerg, KERN_EMERG);
6437define_netdev_printk_level(netdev_alert, KERN_ALERT);
6438define_netdev_printk_level(netdev_crit, KERN_CRIT);
6439define_netdev_printk_level(netdev_err, KERN_ERR);
6440define_netdev_printk_level(netdev_warn, KERN_WARNING);
6441define_netdev_printk_level(netdev_notice, KERN_NOTICE);
6442define_netdev_printk_level(netdev_info, KERN_INFO);
6443
4665079c 6444static void __net_exit netdev_exit(struct net *net)
881d966b
EB
6445{
6446 kfree(net->dev_name_head);
6447 kfree(net->dev_index_head);
6448}
6449
022cbae6 6450static struct pernet_operations __net_initdata netdev_net_ops = {
881d966b
EB
6451 .init = netdev_init,
6452 .exit = netdev_exit,
6453};
6454
4665079c 6455static void __net_exit default_device_exit(struct net *net)
ce286d32 6456{
e008b5fc 6457 struct net_device *dev, *aux;
ce286d32 6458 /*
e008b5fc 6459 * Push all migratable network devices back to the
ce286d32
EB
6460 * initial network namespace
6461 */
6462 rtnl_lock();
e008b5fc 6463 for_each_netdev_safe(net, dev, aux) {
ce286d32 6464 int err;
aca51397 6465 char fb_name[IFNAMSIZ];
ce286d32
EB
6466
6467 /* Ignore unmoveable devices (i.e. loopback) */
6468 if (dev->features & NETIF_F_NETNS_LOCAL)
6469 continue;
6470
e008b5fc
EB
6471 /* Leave virtual devices for the generic cleanup */
6472 if (dev->rtnl_link_ops)
6473 continue;
d0c082ce 6474
25985edc 6475 /* Push remaining network devices to init_net */
aca51397
PE
6476 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
6477 err = dev_change_net_namespace(dev, &init_net, fb_name);
ce286d32 6478 if (err) {
7b6cd1ce
JP
6479 pr_emerg("%s: failed to move %s to init_net: %d\n",
6480 __func__, dev->name, err);
aca51397 6481 BUG();
ce286d32
EB
6482 }
6483 }
6484 rtnl_unlock();
6485}
6486
04dc7f6b
EB
6487static void __net_exit default_device_exit_batch(struct list_head *net_list)
6488{
6489 /* At exit all network devices most be removed from a network
b595076a 6490 * namespace. Do this in the reverse order of registration.
04dc7f6b
EB
6491 * Do this across as many network namespaces as possible to
6492 * improve batching efficiency.
6493 */
6494 struct net_device *dev;
6495 struct net *net;
6496 LIST_HEAD(dev_kill_list);
6497
6498 rtnl_lock();
6499 list_for_each_entry(net, net_list, exit_list) {
6500 for_each_netdev_reverse(net, dev) {
6501 if (dev->rtnl_link_ops)
6502 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
6503 else
6504 unregister_netdevice_queue(dev, &dev_kill_list);
6505 }
6506 }
6507 unregister_netdevice_many(&dev_kill_list);
ceaaec98 6508 list_del(&dev_kill_list);
04dc7f6b
EB
6509 rtnl_unlock();
6510}
6511
022cbae6 6512static struct pernet_operations __net_initdata default_device_ops = {
ce286d32 6513 .exit = default_device_exit,
04dc7f6b 6514 .exit_batch = default_device_exit_batch,
ce286d32
EB
6515};
6516
1da177e4
LT
6517/*
6518 * Initialize the DEV module. At boot time this walks the device list and
6519 * unhooks any devices that fail to initialise (normally hardware not
6520 * present) and leaves us with a valid list of present and active devices.
6521 *
6522 */
6523
6524/*
6525 * This is called single threaded during boot, so no need
6526 * to take the rtnl semaphore.
6527 */
6528static int __init net_dev_init(void)
6529{
6530 int i, rc = -ENOMEM;
6531
6532 BUG_ON(!dev_boot_phase);
6533
1da177e4
LT
6534 if (dev_proc_init())
6535 goto out;
6536
8b41d188 6537 if (netdev_kobject_init())
1da177e4
LT
6538 goto out;
6539
6540 INIT_LIST_HEAD(&ptype_all);
82d8a867 6541 for (i = 0; i < PTYPE_HASH_SIZE; i++)
1da177e4
LT
6542 INIT_LIST_HEAD(&ptype_base[i]);
6543
881d966b
EB
6544 if (register_pernet_subsys(&netdev_net_ops))
6545 goto out;
1da177e4
LT
6546
6547 /*
6548 * Initialise the packet receive queues.
6549 */
6550
6f912042 6551 for_each_possible_cpu(i) {
e36fa2f7 6552 struct softnet_data *sd = &per_cpu(softnet_data, i);
1da177e4 6553
dee42870 6554 memset(sd, 0, sizeof(*sd));
e36fa2f7 6555 skb_queue_head_init(&sd->input_pkt_queue);
6e7676c1 6556 skb_queue_head_init(&sd->process_queue);
e36fa2f7
ED
6557 sd->completion_queue = NULL;
6558 INIT_LIST_HEAD(&sd->poll_list);
a9cbd588
CG
6559 sd->output_queue = NULL;
6560 sd->output_queue_tailp = &sd->output_queue;
df334545 6561#ifdef CONFIG_RPS
e36fa2f7
ED
6562 sd->csd.func = rps_trigger_softirq;
6563 sd->csd.info = sd;
6564 sd->csd.flags = 0;
6565 sd->cpu = i;
1e94d72f 6566#endif
0a9627f2 6567
e36fa2f7
ED
6568 sd->backlog.poll = process_backlog;
6569 sd->backlog.weight = weight_p;
6570 sd->backlog.gro_list = NULL;
6571 sd->backlog.gro_count = 0;
1da177e4
LT
6572 }
6573
1da177e4
LT
6574 dev_boot_phase = 0;
6575
505d4f73
EB
6576 /* The loopback device is special if any other network devices
6577 * is present in a network namespace the loopback device must
6578 * be present. Since we now dynamically allocate and free the
6579 * loopback device ensure this invariant is maintained by
6580 * keeping the loopback device as the first device on the
6581 * list of network devices. Ensuring the loopback devices
6582 * is the first device that appears and the last network device
6583 * that disappears.
6584 */
6585 if (register_pernet_device(&loopback_net_ops))
6586 goto out;
6587
6588 if (register_pernet_device(&default_device_ops))
6589 goto out;
6590
962cf36c
CM
6591 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
6592 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
1da177e4
LT
6593
6594 hotcpu_notifier(dev_cpu_callback, 0);
6595 dst_init();
6596 dev_mcast_init();
6597 rc = 0;
6598out:
6599 return rc;
6600}
6601
6602subsys_initcall(net_dev_init);
6603
e88721f8
KK
6604static int __init initialize_hashrnd(void)
6605{
0a9627f2 6606 get_random_bytes(&hashrnd, sizeof(hashrnd));
e88721f8
KK
6607 return 0;
6608}
6609
6610late_initcall_sync(initialize_hashrnd);
6611