2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Definitions for the Interfaces handler.
8 * Version: @(#)dev.h 1.0.10 08/12/93
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
14 * Alan Cox, <Alan.Cox@linux.org>
15 * Bjorn Ekwall. <bj0rn@blox.se>
16 * Pekka Riikonen <priikone@poseidon.pspt.fi>
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
23 * Moved to /usr/include/linux for NET3
25 #ifndef _LINUX_NETDEVICE_H
26 #define _LINUX_NETDEVICE_H
29 #include <linux/if_ether.h>
30 #include <linux/if_packet.h>
33 #include <linux/timer.h>
34 #include <linux/delay.h>
35 #include <asm/atomic.h>
36 #include <asm/cache.h>
37 #include <asm/byteorder.h>
39 #include <linux/device.h>
40 #include <linux/percpu.h>
41 #include <linux/dmaengine.h>
42 #include <linux/workqueue.h>
49 /* source back-compat hooks */
50 #define SET_ETHTOOL_OPS(netdev,ops) \
51 ( (netdev)->ethtool_ops = (ops) )
53 #define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev
54 functions are available. */
55 #define HAVE_FREE_NETDEV /* free_netdev() */
56 #define HAVE_NETDEV_PRIV /* netdev_priv() */
58 #define NET_XMIT_SUCCESS 0
59 #define NET_XMIT_DROP 1 /* skb dropped */
60 #define NET_XMIT_CN 2 /* congestion notification */
61 #define NET_XMIT_POLICED 3 /* skb is shot by police */
62 #define NET_XMIT_BYPASS 4 /* packet does not leave via dequeue;
63 (TC use only - dev_queue_xmit
64 returns this as NET_XMIT_SUCCESS) */
66 /* Backlog congestion levels */
67 #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
68 #define NET_RX_DROP 1 /* packet dropped */
69 #define NET_RX_CN_LOW 2 /* storm alert, just in case */
70 #define NET_RX_CN_MOD 3 /* Storm on its way! */
71 #define NET_RX_CN_HIGH 4 /* The storm is here */
72 #define NET_RX_BAD 5 /* packet dropped due to kernel error */
74 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
75 * indicates that the device will soon be dropping packets, or already drops
76 * some packets of the same priority; prompting us to send less aggressively. */
77 #define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e))
78 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
82 #define MAX_ADDR_LEN 32 /* Largest hardware address length */
84 /* Driver transmit return codes */
85 #define NETDEV_TX_OK 0 /* driver took care of packet */
86 #define NETDEV_TX_BUSY 1 /* driver tx path was busy*/
87 #define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */
90 * Compute the worst case header length according to the protocols
94 #if !defined(CONFIG_AX25) && !defined(CONFIG_AX25_MODULE) && !defined(CONFIG_TR)
95 #define LL_MAX_HEADER 32
97 #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
98 #define LL_MAX_HEADER 96
100 #define LL_MAX_HEADER 48
104 #if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
105 !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \
106 !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
107 !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
108 #define MAX_HEADER LL_MAX_HEADER
110 #define MAX_HEADER (LL_MAX_HEADER + 48)
113 struct net_device_subqueue
115 /* Give a control state for each queue. This struct may contain
116 * per-queue locks in the future.
122 * Network device statistics. Akin to the 2.0 ether stats but
123 * with byte counters.
126 struct net_device_stats
128 unsigned long rx_packets; /* total packets received */
129 unsigned long tx_packets; /* total packets transmitted */
130 unsigned long rx_bytes; /* total bytes received */
131 unsigned long tx_bytes; /* total bytes transmitted */
132 unsigned long rx_errors; /* bad packets received */
133 unsigned long tx_errors; /* packet transmit problems */
134 unsigned long rx_dropped; /* no space in linux buffers */
135 unsigned long tx_dropped; /* no space available in linux */
136 unsigned long multicast; /* multicast packets received */
137 unsigned long collisions;
139 /* detailed rx_errors: */
140 unsigned long rx_length_errors;
141 unsigned long rx_over_errors; /* receiver ring buff overflow */
142 unsigned long rx_crc_errors; /* recved pkt with crc error */
143 unsigned long rx_frame_errors; /* recv'd frame alignment error */
144 unsigned long rx_fifo_errors; /* recv'r fifo overrun */
145 unsigned long rx_missed_errors; /* receiver missed packet */
147 /* detailed tx_errors */
148 unsigned long tx_aborted_errors;
149 unsigned long tx_carrier_errors;
150 unsigned long tx_fifo_errors;
151 unsigned long tx_heartbeat_errors;
152 unsigned long tx_window_errors;
155 unsigned long rx_compressed;
156 unsigned long tx_compressed;
160 /* Media selection options. */
173 #include <linux/cache.h>
174 #include <linux/skbuff.h>
180 struct netif_rx_stats
184 unsigned time_squeeze;
185 unsigned cpu_collision;
188 DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat);
192 struct dev_addr_list *next;
193 u8 da_addr[MAX_ADDR_LEN];
201 * We tag multicasts with these structures.
204 #define dev_mc_list dev_addr_list
205 #define dmi_addr da_addr
206 #define dmi_addrlen da_addrlen
207 #define dmi_users da_users
208 #define dmi_gusers da_gusers
212 struct hh_cache *hh_next; /* Next entry */
213 atomic_t hh_refcnt; /* number of users */
215 * We want hh_output, hh_len, hh_lock and hh_data be a in a separate
217 * They are mostly read, but hh_refcnt may be changed quite frequently,
218 * incurring cache line ping pongs.
220 __be16 hh_type ____cacheline_aligned_in_smp;
221 /* protocol identifier, f.e ETH_P_IP
222 * NOTE: For VLANs, this will be the
223 * encapuslated type. --BLG
225 u16 hh_len; /* length of header */
226 int (*hh_output)(struct sk_buff *skb);
229 /* cached hardware header; allow for machine alignment needs. */
230 #define HH_DATA_MOD 16
231 #define HH_DATA_OFF(__len) \
232 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
233 #define HH_DATA_ALIGN(__len) \
234 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
235 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
238 /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
240 * dev->hard_header_len ? (dev->hard_header_len +
241 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
243 * We could use other alignment values, but we must maintain the
244 * relationship HH alignment <= LL alignment.
246 #define LL_RESERVED_SPACE(dev) \
247 (((dev)->hard_header_len&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
248 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
249 ((((dev)->hard_header_len+extra)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
251 /* These flag bits are private to the generic network queueing
252 * layer, they may not be explicitly referenced by any other
260 __LINK_STATE_PRESENT,
262 __LINK_STATE_NOCARRIER,
263 __LINK_STATE_LINKWATCH_PENDING,
264 __LINK_STATE_DORMANT,
265 __LINK_STATE_QDISC_RUNNING,
270 * This structure holds at boot time configured netdevice settings. They
271 * are then used in the device probing.
273 struct netdev_boot_setup {
277 #define NETDEV_BOOT_SETUP_MAX 8
279 extern int __init netdev_boot_setup(char *str);
282 * Structure for NAPI scheduling similar to tasklet but with weighting
285 /* The poll_list must only be managed by the entity which
286 * changes the state of the NAPI_STATE_SCHED bit. This means
287 * whoever atomically sets that bit can add this napi_struct
288 * to the per-cpu poll_list, and whoever clears that bit
289 * can remove from the list right before clearing the bit.
291 struct list_head poll_list;
295 int (*poll)(struct napi_struct *, int);
296 #ifdef CONFIG_NETPOLL
297 spinlock_t poll_lock;
299 struct net_device *dev;
300 struct list_head dev_list;
306 NAPI_STATE_SCHED, /* Poll is scheduled */
309 extern void FASTCALL(__napi_schedule(struct napi_struct *n));
312 * napi_schedule_prep - check if napi can be scheduled
315 * Test if NAPI routine is already running, and if not mark
316 * it as running. This is used as a condition variable
317 * insure only one NAPI poll instance runs
319 static inline int napi_schedule_prep(struct napi_struct *n)
321 return !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
325 * napi_schedule - schedule NAPI poll
328 * Schedule NAPI poll routine to be called if it is not already
331 static inline void napi_schedule(struct napi_struct *n)
333 if (napi_schedule_prep(n))
338 * napi_complete - NAPI processing complete
341 * Mark NAPI processing as complete.
343 static inline void __napi_complete(struct napi_struct *n)
345 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
346 list_del(&n->poll_list);
347 smp_mb__before_clear_bit();
348 clear_bit(NAPI_STATE_SCHED, &n->state);
351 static inline void napi_complete(struct napi_struct *n)
359 * napi_disable - prevent NAPI from scheduling
362 * Stop NAPI from being scheduled on this context.
363 * Waits till any outstanding processing completes.
365 static inline void napi_disable(struct napi_struct *n)
367 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
368 msleep_interruptible(1);
372 * napi_enable - enable NAPI scheduling
375 * Resume NAPI from being scheduled on this context.
376 * Must be paired with napi_disable.
378 static inline void napi_enable(struct napi_struct *n)
380 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
381 smp_mb__before_clear_bit();
382 clear_bit(NAPI_STATE_SCHED, &n->state);
386 * The DEVICE structure.
387 * Actually, this whole structure is a big mistake. It mixes I/O
388 * data with strictly "high-level" data, and it has to know about
389 * almost every data structure used in the INET module.
391 * FIXME: cleanup struct net_device such that network protocol info
399 * This is the first field of the "visible" part of this structure
400 * (i.e. as seen by users in the "Space.c" file). It is the name
404 /* device name hash chain */
405 struct hlist_node name_hlist;
408 * I/O specific fields
409 * FIXME: Merge these and struct ifmap into one
411 unsigned long mem_end; /* shared mem end */
412 unsigned long mem_start; /* shared mem start */
413 unsigned long base_addr; /* device I/O address */
414 unsigned int irq; /* device IRQ number */
417 * Some hardware also needs these fields, but they are not
418 * part of the usual set specified in Space.c.
421 unsigned char if_port; /* Selectable AUI, TP,..*/
422 unsigned char dma; /* DMA channel */
426 struct list_head dev_list;
427 #ifdef CONFIG_NETPOLL
428 struct list_head napi_list;
431 /* The device initialization function. Called only once. */
432 int (*init)(struct net_device *dev);
434 /* ------- Fields preinitialized in Space.c finish here ------- */
436 /* Net device features */
437 unsigned long features;
438 #define NETIF_F_SG 1 /* Scatter/gather IO. */
439 #define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */
440 #define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */
441 #define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */
442 #define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */
443 #define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */
444 #define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */
445 #define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */
446 #define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
447 #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
448 #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
449 #define NETIF_F_GSO 2048 /* Enable software GSO. */
450 #define NETIF_F_LLTX 4096 /* LockLess TX */
451 #define NETIF_F_MULTI_QUEUE 16384 /* Has multiple TX/RX queues */
453 /* Segmentation offload features */
454 #define NETIF_F_GSO_SHIFT 16
455 #define NETIF_F_GSO_MASK 0xffff0000
456 #define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
457 #define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
458 #define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
459 #define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
460 #define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
462 /* List of features with software fallbacks. */
463 #define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
466 #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
467 #define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
468 #define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
469 #define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
471 struct net_device *next_sched;
473 /* Interface index. Unique device identifier */
478 struct net_device_stats* (*get_stats)(struct net_device *dev);
479 struct net_device_stats stats;
481 #ifdef CONFIG_WIRELESS_EXT
482 /* List of functions to handle Wireless Extensions (instead of ioctl).
483 * See <net/iw_handler.h> for details. Jean II */
484 const struct iw_handler_def * wireless_handlers;
485 /* Instance data managed by the core of Wireless Extensions. */
486 struct iw_public_data * wireless_data;
488 const struct ethtool_ops *ethtool_ops;
491 * This marks the end of the "visible" part of the structure. All
492 * fields hereafter are internal to the system, and may change at
493 * will (read: may be cleaned up at will).
497 unsigned int flags; /* interface flags (a la BSD) */
498 unsigned short gflags;
499 unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */
500 unsigned short padded; /* How much padding added by alloc_netdev() */
502 unsigned char operstate; /* RFC2863 operstate */
503 unsigned char link_mode; /* mapping policy to operstate */
505 unsigned mtu; /* interface MTU value */
506 unsigned short type; /* interface hardware type */
507 unsigned short hard_header_len; /* hardware hdr length */
509 struct net_device *master; /* Pointer to master device of a group,
510 * which this device is member of.
513 /* Interface address info. */
514 unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
515 unsigned char addr_len; /* hardware address length */
516 unsigned short dev_id; /* for shared network cards */
518 struct dev_addr_list *uc_list; /* Secondary unicast mac addresses */
519 int uc_count; /* Number of installed ucasts */
521 struct dev_addr_list *mc_list; /* Multicast mac addresses */
522 int mc_count; /* Number of installed mcasts */
527 /* Protocol specific pointers */
529 void *atalk_ptr; /* AppleTalk link */
530 void *ip_ptr; /* IPv4 specific data */
531 void *dn_ptr; /* DECnet specific data */
532 void *ip6_ptr; /* IPv6 specific data */
533 void *ec_ptr; /* Econet specific data */
534 void *ax25_ptr; /* AX.25 specific data */
535 struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
536 assign before registering */
539 * Cache line mostly used on receive path (including eth_type_trans())
541 unsigned long last_rx; /* Time of last Rx */
542 /* Interface address info used in eth_type_trans() */
543 unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast
544 because most packets are unicast) */
546 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
549 * Cache line mostly used on queue transmit path (qdisc)
551 /* device queue lock */
552 spinlock_t queue_lock ____cacheline_aligned_in_smp;
554 struct Qdisc *qdisc_sleeping;
555 struct list_head qdisc_list;
556 unsigned long tx_queue_len; /* Max frames per queue allowed */
558 /* Partially transmitted GSO packet. */
559 struct sk_buff *gso_skb;
561 /* ingress path synchronizer */
562 spinlock_t ingress_lock;
563 struct Qdisc *qdisc_ingress;
566 * One part is mostly used on xmit path (device)
568 /* hard_start_xmit synchronizer */
569 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
570 /* cpu id of processor entered to hard_start_xmit or -1,
571 if nobody entered there.
574 void *priv; /* pointer to private data */
575 int (*hard_start_xmit) (struct sk_buff *skb,
576 struct net_device *dev);
577 /* These may be needed for future network-power-down code. */
578 unsigned long trans_start; /* Time (in jiffies) of last Tx */
580 int watchdog_timeo; /* used by dev_watchdog() */
581 struct timer_list watchdog_timer;
584 * refcnt is a very hot point, so align it on SMP
586 /* Number of references to this device */
587 atomic_t refcnt ____cacheline_aligned_in_smp;
589 /* delayed register/unregister */
590 struct list_head todo_list;
591 /* device index hash chain */
592 struct hlist_node index_hlist;
594 struct net_device *link_watch_next;
596 /* register/unregister state machine */
597 enum { NETREG_UNINITIALIZED=0,
598 NETREG_REGISTERED, /* completed register_netdevice */
599 NETREG_UNREGISTERING, /* called unregister_netdevice */
600 NETREG_UNREGISTERED, /* completed unregister todo */
601 NETREG_RELEASED, /* called free_netdev */
604 /* Called after device is detached from network. */
605 void (*uninit)(struct net_device *dev);
606 /* Called after last user reference disappears. */
607 void (*destructor)(struct net_device *dev);
609 /* Pointers to interface service routines. */
610 int (*open)(struct net_device *dev);
611 int (*stop)(struct net_device *dev);
612 #define HAVE_NETDEV_POLL
613 int (*hard_header) (struct sk_buff *skb,
614 struct net_device *dev,
619 int (*rebuild_header)(struct sk_buff *skb);
620 #define HAVE_CHANGE_RX_FLAGS
621 void (*change_rx_flags)(struct net_device *dev,
623 #define HAVE_SET_RX_MODE
624 void (*set_rx_mode)(struct net_device *dev);
625 #define HAVE_MULTICAST
626 void (*set_multicast_list)(struct net_device *dev);
627 #define HAVE_SET_MAC_ADDR
628 int (*set_mac_address)(struct net_device *dev,
630 #define HAVE_PRIVATE_IOCTL
631 int (*do_ioctl)(struct net_device *dev,
632 struct ifreq *ifr, int cmd);
633 #define HAVE_SET_CONFIG
634 int (*set_config)(struct net_device *dev,
636 #define HAVE_HEADER_CACHE
637 int (*hard_header_cache)(struct neighbour *neigh,
638 struct hh_cache *hh);
639 void (*header_cache_update)(struct hh_cache *hh,
640 struct net_device *dev,
641 unsigned char * haddr);
642 #define HAVE_CHANGE_MTU
643 int (*change_mtu)(struct net_device *dev, int new_mtu);
645 #define HAVE_TX_TIMEOUT
646 void (*tx_timeout) (struct net_device *dev);
648 void (*vlan_rx_register)(struct net_device *dev,
649 struct vlan_group *grp);
650 void (*vlan_rx_add_vid)(struct net_device *dev,
652 void (*vlan_rx_kill_vid)(struct net_device *dev,
655 int (*hard_header_parse)(struct sk_buff *skb,
656 unsigned char *haddr);
657 int (*neigh_setup)(struct net_device *dev, struct neigh_parms *);
658 #ifdef CONFIG_NETPOLL
659 struct netpoll_info *npinfo;
661 #ifdef CONFIG_NET_POLL_CONTROLLER
662 void (*poll_controller)(struct net_device *dev);
666 struct net_bridge_port *br_port;
668 struct macvlan_port *macvlan_port;
670 /* class/net/name entry */
672 /* space for optional statistics and wireless sysfs groups */
673 struct attribute_group *sysfs_groups[3];
675 /* rtnetlink link ops */
676 const struct rtnl_link_ops *rtnl_link_ops;
678 /* The TX queue control structures */
679 unsigned int egress_subqueue_count;
680 struct net_device_subqueue egress_subqueue[1];
682 #define to_net_dev(d) container_of(d, struct net_device, dev)
684 #define NETDEV_ALIGN 32
685 #define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1)
688 * netdev_priv - access network device private data
689 * @dev: network device
691 * Get network device private data
693 static inline void *netdev_priv(const struct net_device *dev)
698 #define SET_MODULE_OWNER(dev) do { } while (0)
699 /* Set the sysfs physical device reference for the network logical device
700 * if set prior to registration will cause a symlink during initialization.
702 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
704 static inline void netif_napi_add(struct net_device *dev,
705 struct napi_struct *napi,
706 int (*poll)(struct napi_struct *, int),
709 INIT_LIST_HEAD(&napi->poll_list);
711 napi->weight = weight;
712 #ifdef CONFIG_NETPOLL
714 list_add(&napi->dev_list, &dev->napi_list);
715 spin_lock_init(&napi->poll_lock);
716 napi->poll_owner = -1;
718 set_bit(NAPI_STATE_SCHED, &napi->state);
722 __be16 type; /* This is really htons(ether_type). */
723 struct net_device *dev; /* NULL is wildcarded here */
724 int (*func) (struct sk_buff *,
726 struct packet_type *,
727 struct net_device *);
728 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
730 int (*gso_send_check)(struct sk_buff *skb);
731 void *af_packet_priv;
732 struct list_head list;
735 #include <linux/interrupt.h>
736 #include <linux/notifier.h>
738 extern struct net_device loopback_dev; /* The loopback */
739 extern struct list_head dev_base_head; /* All devices */
740 extern rwlock_t dev_base_lock; /* Device list lock */
742 #define for_each_netdev(d) \
743 list_for_each_entry(d, &dev_base_head, dev_list)
744 #define for_each_netdev_safe(d, n) \
745 list_for_each_entry_safe(d, n, &dev_base_head, dev_list)
746 #define for_each_netdev_continue(d) \
747 list_for_each_entry_continue(d, &dev_base_head, dev_list)
748 #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
750 static inline struct net_device *next_net_device(struct net_device *dev)
752 struct list_head *lh;
754 lh = dev->dev_list.next;
755 return lh == &dev_base_head ? NULL : net_device_entry(lh);
758 static inline struct net_device *first_net_device(void)
760 return list_empty(&dev_base_head) ? NULL :
761 net_device_entry(dev_base_head.next);
764 extern int netdev_boot_setup_check(struct net_device *dev);
765 extern unsigned long netdev_boot_base(const char *prefix, int unit);
766 extern struct net_device *dev_getbyhwaddr(unsigned short type, char *hwaddr);
767 extern struct net_device *dev_getfirstbyhwtype(unsigned short type);
768 extern struct net_device *__dev_getfirstbyhwtype(unsigned short type);
769 extern void dev_add_pack(struct packet_type *pt);
770 extern void dev_remove_pack(struct packet_type *pt);
771 extern void __dev_remove_pack(struct packet_type *pt);
773 extern struct net_device *dev_get_by_flags(unsigned short flags,
774 unsigned short mask);
775 extern struct net_device *dev_get_by_name(const char *name);
776 extern struct net_device *__dev_get_by_name(const char *name);
777 extern int dev_alloc_name(struct net_device *dev, const char *name);
778 extern int dev_open(struct net_device *dev);
779 extern int dev_close(struct net_device *dev);
780 extern int dev_queue_xmit(struct sk_buff *skb);
781 extern int register_netdevice(struct net_device *dev);
782 extern void unregister_netdevice(struct net_device *dev);
783 extern void free_netdev(struct net_device *dev);
784 extern void synchronize_net(void);
785 extern int register_netdevice_notifier(struct notifier_block *nb);
786 extern int unregister_netdevice_notifier(struct notifier_block *nb);
787 extern int call_netdevice_notifiers(unsigned long val, void *v);
788 extern struct net_device *dev_get_by_index(int ifindex);
789 extern struct net_device *__dev_get_by_index(int ifindex);
790 extern int dev_restart(struct net_device *dev);
791 #ifdef CONFIG_NETPOLL_TRAP
792 extern int netpoll_trap(void);
795 typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
796 extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
797 static inline int unregister_gifconf(unsigned int family)
799 return register_gifconf(family, NULL);
803 * Incoming packets are placed on per-cpu queues so that
804 * no locking is needed.
808 struct net_device *output_queue;
809 struct sk_buff_head input_pkt_queue;
810 struct list_head poll_list;
811 struct sk_buff *completion_queue;
813 struct napi_struct backlog;
814 #ifdef CONFIG_NET_DMA
815 struct dma_chan *net_dma;
819 DECLARE_PER_CPU(struct softnet_data,softnet_data);
821 #define HAVE_NETIF_QUEUE
823 extern void __netif_schedule(struct net_device *dev);
825 static inline void netif_schedule(struct net_device *dev)
827 if (!test_bit(__LINK_STATE_XOFF, &dev->state))
828 __netif_schedule(dev);
832 * netif_start_queue - allow transmit
833 * @dev: network device
835 * Allow upper layers to call the device hard_start_xmit routine.
837 static inline void netif_start_queue(struct net_device *dev)
839 clear_bit(__LINK_STATE_XOFF, &dev->state);
843 * netif_wake_queue - restart transmit
844 * @dev: network device
846 * Allow upper layers to call the device hard_start_xmit routine.
847 * Used for flow control when transmit resources are available.
849 static inline void netif_wake_queue(struct net_device *dev)
851 #ifdef CONFIG_NETPOLL_TRAP
852 if (netpoll_trap()) {
853 clear_bit(__LINK_STATE_XOFF, &dev->state);
857 if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state))
858 __netif_schedule(dev);
862 * netif_stop_queue - stop transmitted packets
863 * @dev: network device
865 * Stop upper layers calling the device hard_start_xmit routine.
866 * Used for flow control when transmit resources are unavailable.
868 static inline void netif_stop_queue(struct net_device *dev)
870 set_bit(__LINK_STATE_XOFF, &dev->state);
874 * netif_queue_stopped - test if transmit queue is flowblocked
875 * @dev: network device
877 * Test if transmit queue on device is currently unable to send.
879 static inline int netif_queue_stopped(const struct net_device *dev)
881 return test_bit(__LINK_STATE_XOFF, &dev->state);
885 * netif_running - test if up
886 * @dev: network device
888 * Test if the device has been brought up.
890 static inline int netif_running(const struct net_device *dev)
892 return test_bit(__LINK_STATE_START, &dev->state);
896 * Routines to manage the subqueues on a device. We only need start
897 * stop, and a check if it's stopped. All other device management is
898 * done at the overall netdevice level.
899 * Also test the device if we're multiqueue.
903 * netif_start_subqueue - allow sending packets on subqueue
904 * @dev: network device
905 * @queue_index: sub queue index
907 * Start individual transmit queue of a device with multiple transmit queues.
909 static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
911 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
912 clear_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
917 * netif_stop_subqueue - stop sending packets on subqueue
918 * @dev: network device
919 * @queue_index: sub queue index
921 * Stop individual transmit queue of a device with multiple transmit queues.
923 static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
925 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
926 #ifdef CONFIG_NETPOLL_TRAP
930 set_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
935 * netif_subqueue_stopped - test status of subqueue
936 * @dev: network device
937 * @queue_index: sub queue index
939 * Check individual transmit queue of a device with multiple transmit queues.
941 static inline int netif_subqueue_stopped(const struct net_device *dev,
944 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
945 return test_bit(__LINK_STATE_XOFF,
946 &dev->egress_subqueue[queue_index].state);
954 * netif_wake_subqueue - allow sending packets on subqueue
955 * @dev: network device
956 * @queue_index: sub queue index
958 * Resume individual transmit queue of a device with multiple transmit queues.
960 static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
962 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
963 #ifdef CONFIG_NETPOLL_TRAP
967 if (test_and_clear_bit(__LINK_STATE_XOFF,
968 &dev->egress_subqueue[queue_index].state))
969 __netif_schedule(dev);
974 * netif_is_multiqueue - test if device has multiple transmit queues
975 * @dev: network device
977 * Check if device has multiple transmit queues
978 * Always falls if NETDEVICE_MULTIQUEUE is not configured
980 static inline int netif_is_multiqueue(const struct net_device *dev)
982 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
983 return (!!(NETIF_F_MULTI_QUEUE & dev->features));
989 /* Use this variant when it is known for sure that it
990 * is executing from interrupt context.
992 extern void dev_kfree_skb_irq(struct sk_buff *skb);
994 /* Use this variant in places where it could be invoked
995 * either from interrupt or non-interrupt context.
997 extern void dev_kfree_skb_any(struct sk_buff *skb);
999 #define HAVE_NETIF_RX 1
1000 extern int netif_rx(struct sk_buff *skb);
1001 extern int netif_rx_ni(struct sk_buff *skb);
1002 #define HAVE_NETIF_RECEIVE_SKB 1
1003 extern int netif_receive_skb(struct sk_buff *skb);
1004 extern int dev_valid_name(const char *name);
1005 extern int dev_ioctl(unsigned int cmd, void __user *);
1006 extern int dev_ethtool(struct ifreq *);
1007 extern unsigned dev_get_flags(const struct net_device *);
1008 extern int dev_change_flags(struct net_device *, unsigned);
1009 extern int dev_change_name(struct net_device *, char *);
1010 extern int dev_set_mtu(struct net_device *, int);
1011 extern int dev_set_mac_address(struct net_device *,
1013 extern int dev_hard_start_xmit(struct sk_buff *skb,
1014 struct net_device *dev);
1016 extern int netdev_budget;
1018 /* Called by rtnetlink.c:rtnl_unlock() */
1019 extern void netdev_run_todo(void);
1022 * dev_put - release reference to device
1023 * @dev: network device
1025 * Hold reference to device to keep it from being freed.
1027 static inline void dev_put(struct net_device *dev)
1029 atomic_dec(&dev->refcnt);
1033 * dev_hold - get reference to device
1034 * @dev: network device
1036 * Release reference to device to allow it to be freed.
1038 static inline void dev_hold(struct net_device *dev)
1040 atomic_inc(&dev->refcnt);
1043 /* Carrier loss detection, dial on demand. The functions netif_carrier_on
1044 * and _off may be called from IRQ context, but it is caller
1045 * who is responsible for serialization of these calls.
1047 * The name carrier is inappropriate, these functions should really be
1048 * called netif_lowerlayer_*() because they represent the state of any
1049 * kind of lower layer not just hardware media.
1052 extern void linkwatch_fire_event(struct net_device *dev);
1055 * netif_carrier_ok - test if carrier present
1056 * @dev: network device
1058 * Check if carrier is present on device
1060 static inline int netif_carrier_ok(const struct net_device *dev)
1062 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
1065 extern void __netdev_watchdog_up(struct net_device *dev);
1067 extern void netif_carrier_on(struct net_device *dev);
1069 extern void netif_carrier_off(struct net_device *dev);
1072 * netif_dormant_on - mark device as dormant.
1073 * @dev: network device
1075 * Mark device as dormant (as per RFC2863).
1077 * The dormant state indicates that the relevant interface is not
1078 * actually in a condition to pass packets (i.e., it is not 'up') but is
1079 * in a "pending" state, waiting for some external event. For "on-
1080 * demand" interfaces, this new state identifies the situation where the
1081 * interface is waiting for events to place it in the up state.
1084 static inline void netif_dormant_on(struct net_device *dev)
1086 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
1087 linkwatch_fire_event(dev);
1091 * netif_dormant_off - set device as not dormant.
1092 * @dev: network device
1094 * Device is not in dormant state.
1096 static inline void netif_dormant_off(struct net_device *dev)
1098 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
1099 linkwatch_fire_event(dev);
1103 * netif_dormant - test if carrier present
1104 * @dev: network device
1106 * Check if carrier is present on device
1108 static inline int netif_dormant(const struct net_device *dev)
1110 return test_bit(__LINK_STATE_DORMANT, &dev->state);
1115 * netif_oper_up - test if device is operational
1116 * @dev: network device
1118 * Check if carrier is operational
1120 static inline int netif_oper_up(const struct net_device *dev) {
1121 return (dev->operstate == IF_OPER_UP ||
1122 dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
1126 * netif_device_present - is device available or removed
1127 * @dev: network device
1129 * Check if device has not been removed from system.
1131 static inline int netif_device_present(struct net_device *dev)
1133 return test_bit(__LINK_STATE_PRESENT, &dev->state);
1136 extern void netif_device_detach(struct net_device *dev);
1138 extern void netif_device_attach(struct net_device *dev);
1141 * Network interface message level settings
1143 #define HAVE_NETIF_MSG 1
1146 NETIF_MSG_DRV = 0x0001,
1147 NETIF_MSG_PROBE = 0x0002,
1148 NETIF_MSG_LINK = 0x0004,
1149 NETIF_MSG_TIMER = 0x0008,
1150 NETIF_MSG_IFDOWN = 0x0010,
1151 NETIF_MSG_IFUP = 0x0020,
1152 NETIF_MSG_RX_ERR = 0x0040,
1153 NETIF_MSG_TX_ERR = 0x0080,
1154 NETIF_MSG_TX_QUEUED = 0x0100,
1155 NETIF_MSG_INTR = 0x0200,
1156 NETIF_MSG_TX_DONE = 0x0400,
1157 NETIF_MSG_RX_STATUS = 0x0800,
1158 NETIF_MSG_PKTDATA = 0x1000,
1159 NETIF_MSG_HW = 0x2000,
1160 NETIF_MSG_WOL = 0x4000,
1163 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
1164 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
1165 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
1166 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
1167 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
1168 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
1169 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
1170 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
1171 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
1172 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
1173 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
1174 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
1175 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
1176 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
1177 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
1179 static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
1182 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
1183 return default_msg_enable_bits;
1184 if (debug_value == 0) /* no output */
1186 /* set low N bits */
1187 return (1 << debug_value) - 1;
1190 /* Test if receive needs to be scheduled but only if up */
1191 static inline int netif_rx_schedule_prep(struct net_device *dev,
1192 struct napi_struct *napi)
1194 return netif_running(dev) && napi_schedule_prep(napi);
1197 /* Add interface to tail of rx poll list. This assumes that _prep has
1198 * already been called and returned 1.
1200 static inline void __netif_rx_schedule(struct net_device *dev,
1201 struct napi_struct *napi)
1204 __napi_schedule(napi);
1207 /* Try to reschedule poll. Called by irq handler. */
1209 static inline void netif_rx_schedule(struct net_device *dev,
1210 struct napi_struct *napi)
1212 if (netif_rx_schedule_prep(dev, napi))
1213 __netif_rx_schedule(dev, napi);
1216 /* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */
1217 static inline int netif_rx_reschedule(struct net_device *dev,
1218 struct napi_struct *napi)
1220 if (napi_schedule_prep(napi)) {
1221 __netif_rx_schedule(dev, napi);
1227 /* same as netif_rx_complete, except that local_irq_save(flags)
1228 * has already been issued
1230 static inline void __netif_rx_complete(struct net_device *dev,
1231 struct napi_struct *napi)
1233 __napi_complete(napi);
1237 /* Remove interface from poll list: it must be in the poll list
1238 * on current cpu. This primitive is called by dev->poll(), when
1239 * it completes the work. The device cannot be out of poll list at this
1240 * moment, it is BUG().
1242 static inline void netif_rx_complete(struct net_device *dev,
1243 struct napi_struct *napi)
1245 unsigned long flags;
1247 local_irq_save(flags);
1248 __netif_rx_complete(dev, napi);
1249 local_irq_restore(flags);
1253 * netif_tx_lock - grab network device transmit lock
1254 * @dev: network device
1256 * Get network device transmit lock
1258 static inline void netif_tx_lock(struct net_device *dev)
1260 spin_lock(&dev->_xmit_lock);
1261 dev->xmit_lock_owner = smp_processor_id();
1264 static inline void netif_tx_lock_bh(struct net_device *dev)
1266 spin_lock_bh(&dev->_xmit_lock);
1267 dev->xmit_lock_owner = smp_processor_id();
1270 static inline int netif_tx_trylock(struct net_device *dev)
1272 int ok = spin_trylock(&dev->_xmit_lock);
1274 dev->xmit_lock_owner = smp_processor_id();
1278 static inline void netif_tx_unlock(struct net_device *dev)
1280 dev->xmit_lock_owner = -1;
1281 spin_unlock(&dev->_xmit_lock);
1284 static inline void netif_tx_unlock_bh(struct net_device *dev)
1286 dev->xmit_lock_owner = -1;
1287 spin_unlock_bh(&dev->_xmit_lock);
1290 static inline void netif_tx_disable(struct net_device *dev)
1292 netif_tx_lock_bh(dev);
1293 netif_stop_queue(dev);
1294 netif_tx_unlock_bh(dev);
1297 /* These functions live elsewhere (drivers/net/net_init.c, but related) */
1299 extern void ether_setup(struct net_device *dev);
1301 /* Support for loadable net-drivers */
1302 extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
1303 void (*setup)(struct net_device *),
1304 unsigned int queue_count);
1305 #define alloc_netdev(sizeof_priv, name, setup) \
1306 alloc_netdev_mq(sizeof_priv, name, setup, 1)
1307 extern int register_netdev(struct net_device *dev);
1308 extern void unregister_netdev(struct net_device *dev);
1309 /* Functions used for secondary unicast and multicast support */
1310 extern void dev_set_rx_mode(struct net_device *dev);
1311 extern void __dev_set_rx_mode(struct net_device *dev);
1312 extern int dev_unicast_delete(struct net_device *dev, void *addr, int alen);
1313 extern int dev_unicast_add(struct net_device *dev, void *addr, int alen);
1314 extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all);
1315 extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly);
1316 extern int dev_mc_sync(struct net_device *to, struct net_device *from);
1317 extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
1318 extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all);
1319 extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly);
1320 extern void dev_set_promiscuity(struct net_device *dev, int inc);
1321 extern void dev_set_allmulti(struct net_device *dev, int inc);
1322 extern void netdev_state_change(struct net_device *dev);
1323 extern void netdev_features_change(struct net_device *dev);
1324 /* Load a device via the kmod */
1325 extern void dev_load(const char *name);
1326 extern void dev_mcast_init(void);
1327 extern int netdev_max_backlog;
1328 extern int weight_p;
1329 extern int netdev_set_master(struct net_device *dev, struct net_device *master);
1330 extern int skb_checksum_help(struct sk_buff *skb);
1331 extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
1333 extern void netdev_rx_csum_fault(struct net_device *dev);
1335 static inline void netdev_rx_csum_fault(struct net_device *dev)
1339 /* rx skb timestamps */
1340 extern void net_enable_timestamp(void);
1341 extern void net_disable_timestamp(void);
1343 #ifdef CONFIG_PROC_FS
1344 extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
1345 extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
1346 extern void dev_seq_stop(struct seq_file *seq, void *v);
1349 extern void linkwatch_run_queue(void);
1351 extern int netdev_compute_features(unsigned long all, unsigned long one);
1353 static inline int net_gso_ok(int features, int gso_type)
1355 int feature = gso_type << NETIF_F_GSO_SHIFT;
1356 return (features & feature) == feature;
1359 static inline int skb_gso_ok(struct sk_buff *skb, int features)
1361 return net_gso_ok(features, skb_shinfo(skb)->gso_type);
1364 static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
1366 return skb_is_gso(skb) &&
1367 (!skb_gso_ok(skb, dev->features) ||
1368 unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
1371 /* On bonding slaves other than the currently active slave, suppress
1372 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
1373 * ARP on active-backup slaves with arp_validate enabled.
1375 static inline int skb_bond_should_drop(struct sk_buff *skb)
1377 struct net_device *dev = skb->dev;
1378 struct net_device *master = dev->master;
1381 (dev->priv_flags & IFF_SLAVE_INACTIVE)) {
1382 if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
1383 skb->protocol == __constant_htons(ETH_P_ARP))
1386 if (master->priv_flags & IFF_MASTER_ALB) {
1387 if (skb->pkt_type != PACKET_BROADCAST &&
1388 skb->pkt_type != PACKET_MULTICAST)
1391 if (master->priv_flags & IFF_MASTER_8023AD &&
1392 skb->protocol == __constant_htons(ETH_P_SLOW))
1400 #endif /* __KERNEL__ */
1402 #endif /* _LINUX_DEV_H */