Commit | Line | Data |
---|---|---|
2874c5fd | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
1da177e4 LT |
2 | /* |
3 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
4 | * operating system. INET is implemented using the BSD Socket | |
5 | * interface as the means of communication with the user level. | |
6 | * | |
7 | * Definitions for the Interfaces handler. | |
8 | * | |
9 | * Version: @(#)dev.h 1.0.10 08/12/93 | |
10 | * | |
02c30a84 | 11 | * Authors: Ross Biro |
1da177e4 LT |
12 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
13 | * Corey Minyard <wf-rch!minyard@relay.EU.net> | |
14 | * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov> | |
113aa838 | 15 | * Alan Cox, <alan@lxorguk.ukuu.org.uk> |
1da177e4 LT |
16 | * Bjorn Ekwall. <bj0rn@blox.se> |
17 | * Pekka Riikonen <priikone@poseidon.pspt.fi> | |
18 | * | |
1da177e4 LT |
19 | * Moved to /usr/include/linux for NET3 |
20 | */ | |
21 | #ifndef _LINUX_NETDEVICE_H | |
22 | #define _LINUX_NETDEVICE_H | |
23 | ||
d7fe0f24 | 24 | #include <linux/timer.h> |
187f1882 | 25 | #include <linux/bug.h> |
bea3348e | 26 | #include <linux/delay.h> |
60063497 | 27 | #include <linux/atomic.h> |
53511453 | 28 | #include <linux/prefetch.h> |
1da177e4 LT |
29 | #include <asm/cache.h> |
30 | #include <asm/byteorder.h> | |
31 | ||
1da177e4 | 32 | #include <linux/percpu.h> |
4d5b78c0 | 33 | #include <linux/rculist.h> |
bea3348e | 34 | #include <linux/workqueue.h> |
114cf580 | 35 | #include <linux/dynamic_queue_limits.h> |
1da177e4 | 36 | |
a050c33f | 37 | #include <net/net_namespace.h> |
7a6b6f51 | 38 | #ifdef CONFIG_DCB |
2f90b865 AD |
39 | #include <net/dcbnl.h> |
40 | #endif | |
5bc1421e | 41 | #include <net/netprio_cgroup.h> |
e817f856 | 42 | #include <net/xdp.h> |
a050c33f | 43 | |
a59e2ecb | 44 | #include <linux/netdev_features.h> |
77162022 | 45 | #include <linux/neighbour.h> |
607ca46e | 46 | #include <uapi/linux/netdevice.h> |
61bd3857 | 47 | #include <uapi/linux/if_bonding.h> |
e4c6734e | 48 | #include <uapi/linux/pkt_cls.h> |
59cc1f61 | 49 | #include <linux/hashtable.h> |
a59e2ecb | 50 | |
115c1d6e | 51 | struct netpoll_info; |
313162d0 | 52 | struct device; |
cc69837f | 53 | struct ethtool_ops; |
c1f19b51 | 54 | struct phy_device; |
2f657a60 | 55 | struct dsa_port; |
607259a6 | 56 | struct ip_tunnel_parm; |
30e9bb84 AT |
57 | struct macsec_context; |
58 | struct macsec_ops; | |
c6e970a0 | 59 | |
e679c9c1 | 60 | struct sfp_bus; |
704232c2 JB |
61 | /* 802.11 specific */ |
62 | struct wireless_dev; | |
98a18b6f AA |
63 | /* 802.15.4 specific */ |
64 | struct wpan_dev; | |
03c57747 | 65 | struct mpls_dev; |
7c46a640 AD |
66 | /* UDP Tunnel offloads */ |
67 | struct udp_tunnel_info; | |
cc4e3835 JK |
68 | struct udp_tunnel_nic_info; |
69 | struct udp_tunnel_nic; | |
a7862b45 | 70 | struct bpf_prog; |
814abfab | 71 | struct xdp_buff; |
1da177e4 | 72 | |
5198d545 | 73 | void synchronize_net(void); |
f629d208 JP |
74 | void netdev_set_default_ethtool_ops(struct net_device *dev, |
75 | const struct ethtool_ops *ops); | |
d07d7507 | 76 | |
9a1654ba JP |
77 | /* Backlog congestion levels */ |
78 | #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ | |
79 | #define NET_RX_DROP 1 /* packet dropped */ | |
80 | ||
7151affe TY |
81 | #define MAX_NEST_DEV 8 |
82 | ||
572a9d7b PM |
83 | /* |
84 | * Transmit return codes: transmit return codes originate from three different | |
85 | * namespaces: | |
86 | * | |
87 | * - qdisc return codes | |
88 | * - driver transmit return codes | |
89 | * - errno values | |
90 | * | |
91 | * Drivers are allowed to return any one of those in their hard_start_xmit() | |
92 | * function. Real network devices commonly used with qdiscs should only return | |
93 | * the driver transmit return codes though - when qdiscs are used, the actual | |
94 | * transmission happens asynchronously, so the value is not propagated to | |
5e82b4b2 BH |
95 | * higher layers. Virtual network devices transmit synchronously; in this case |
96 | * the driver transmit return codes are consumed by dev_queue_xmit(), and all | |
572a9d7b PM |
97 | * others are propagated to higher layers. |
98 | */ | |
99 | ||
100 | /* qdisc ->enqueue() return codes. */ | |
101 | #define NET_XMIT_SUCCESS 0x00 | |
9a1654ba JP |
102 | #define NET_XMIT_DROP 0x01 /* skb dropped */ |
103 | #define NET_XMIT_CN 0x02 /* congestion notification */ | |
9a1654ba | 104 | #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */ |
1da177e4 | 105 | |
b9df3cb8 GR |
106 | /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It |
107 | * indicates that the device will soon be dropping packets, or already drops | |
108 | * some packets of the same priority; prompting us to send less aggressively. */ | |
572a9d7b | 109 | #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e)) |
1da177e4 LT |
110 | #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) |
111 | ||
dc1f8bf6 | 112 | /* Driver transmit return codes */ |
9a1654ba | 113 | #define NETDEV_TX_MASK 0xf0 |
572a9d7b | 114 | |
dc1f8bf6 | 115 | enum netdev_tx { |
572a9d7b | 116 | __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */ |
9a1654ba JP |
117 | NETDEV_TX_OK = 0x00, /* driver took care of packet */ |
118 | NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/ | |
dc1f8bf6 SH |
119 | }; |
120 | typedef enum netdev_tx netdev_tx_t; | |
121 | ||
9a1654ba JP |
122 | /* |
123 | * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant; | |
124 | * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed. | |
125 | */ | |
126 | static inline bool dev_xmit_complete(int rc) | |
127 | { | |
128 | /* | |
129 | * Positive cases with an skb consumed by a driver: | |
130 | * - successful transmission (rc == NETDEV_TX_OK) | |
131 | * - error while transmitting (rc < 0) | |
132 | * - error while queueing to a different device (rc & NET_XMIT_MASK) | |
133 | */ | |
134 | if (likely(rc < NET_XMIT_MASK)) | |
135 | return true; | |
136 | ||
137 | return false; | |
138 | } | |
139 | ||
1da177e4 | 140 | /* |
5e82b4b2 | 141 | * Compute the worst-case header length according to the protocols |
1da177e4 LT |
142 | * used. |
143 | */ | |
fe2918b0 | 144 | |
c0eb4540 KS |
145 | #if defined(CONFIG_HYPERV_NET) |
146 | # define LL_MAX_HEADER 128 | |
147 | #elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25) | |
8388e3da DM |
148 | # if defined(CONFIG_MAC80211_MESH) |
149 | # define LL_MAX_HEADER 128 | |
150 | # else | |
151 | # define LL_MAX_HEADER 96 | |
152 | # endif | |
1da177e4 | 153 | #else |
8388e3da | 154 | # define LL_MAX_HEADER 32 |
1da177e4 LT |
155 | #endif |
156 | ||
d11ead75 BH |
157 | #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \ |
158 | !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL) | |
1da177e4 LT |
159 | #define MAX_HEADER LL_MAX_HEADER |
160 | #else | |
161 | #define MAX_HEADER (LL_MAX_HEADER + 48) | |
162 | #endif | |
163 | ||
164 | /* | |
be1f3c2c BH |
165 | * Old network device statistics. Fields are native words |
166 | * (unsigned long) so they can be read and written atomically. | |
1da177e4 | 167 | */ |
fe2918b0 | 168 | |
d94d9fee | 169 | struct net_device_stats { |
3cfde79c BH |
170 | unsigned long rx_packets; |
171 | unsigned long tx_packets; | |
172 | unsigned long rx_bytes; | |
173 | unsigned long tx_bytes; | |
174 | unsigned long rx_errors; | |
175 | unsigned long tx_errors; | |
176 | unsigned long rx_dropped; | |
177 | unsigned long tx_dropped; | |
178 | unsigned long multicast; | |
1da177e4 | 179 | unsigned long collisions; |
1da177e4 | 180 | unsigned long rx_length_errors; |
3cfde79c BH |
181 | unsigned long rx_over_errors; |
182 | unsigned long rx_crc_errors; | |
183 | unsigned long rx_frame_errors; | |
184 | unsigned long rx_fifo_errors; | |
185 | unsigned long rx_missed_errors; | |
1da177e4 LT |
186 | unsigned long tx_aborted_errors; |
187 | unsigned long tx_carrier_errors; | |
188 | unsigned long tx_fifo_errors; | |
189 | unsigned long tx_heartbeat_errors; | |
190 | unsigned long tx_window_errors; | |
1da177e4 LT |
191 | unsigned long rx_compressed; |
192 | unsigned long tx_compressed; | |
193 | }; | |
194 | ||
1da177e4 LT |
195 | |
196 | #include <linux/cache.h> | |
197 | #include <linux/skbuff.h> | |
198 | ||
adc9300e | 199 | #ifdef CONFIG_RPS |
c5905afb | 200 | #include <linux/static_key.h> |
dc05360f ED |
201 | extern struct static_key_false rps_needed; |
202 | extern struct static_key_false rfs_needed; | |
adc9300e ED |
203 | #endif |
204 | ||
1da177e4 LT |
205 | struct neighbour; |
206 | struct neigh_parms; | |
207 | struct sk_buff; | |
208 | ||
f001fde5 JP |
209 | struct netdev_hw_addr { |
210 | struct list_head list; | |
211 | unsigned char addr[MAX_ADDR_LEN]; | |
212 | unsigned char type; | |
ccffad25 JP |
213 | #define NETDEV_HW_ADDR_T_LAN 1 |
214 | #define NETDEV_HW_ADDR_T_SAN 2 | |
8e1b3884 TY |
215 | #define NETDEV_HW_ADDR_T_UNICAST 3 |
216 | #define NETDEV_HW_ADDR_T_MULTICAST 4 | |
22bedad3 | 217 | bool global_use; |
4cd729b0 | 218 | int sync_cnt; |
8f8f103d | 219 | int refcount; |
4543fbef | 220 | int synced; |
f001fde5 JP |
221 | struct rcu_head rcu_head; |
222 | }; | |
223 | ||
31278e71 JP |
224 | struct netdev_hw_addr_list { |
225 | struct list_head list; | |
226 | int count; | |
227 | }; | |
228 | ||
22bedad3 JP |
229 | #define netdev_hw_addr_list_count(l) ((l)->count) |
230 | #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0) | |
231 | #define netdev_hw_addr_list_for_each(ha, l) \ | |
232 | list_for_each_entry(ha, &(l)->list, list) | |
32e7bfc4 | 233 | |
22bedad3 JP |
234 | #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc) |
235 | #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc) | |
236 | #define netdev_for_each_uc_addr(ha, dev) \ | |
237 | netdev_hw_addr_list_for_each(ha, &(dev)->uc) | |
6683ece3 | 238 | |
22bedad3 JP |
239 | #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc) |
240 | #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc) | |
18e225f2 | 241 | #define netdev_for_each_mc_addr(ha, dev) \ |
22bedad3 | 242 | netdev_hw_addr_list_for_each(ha, &(dev)->mc) |
6683ece3 | 243 | |
d94d9fee | 244 | struct hh_cache { |
5b3dc2f3 | 245 | unsigned int hh_len; |
3644f0ce | 246 | seqlock_t hh_lock; |
1da177e4 LT |
247 | |
248 | /* cached hardware header; allow for machine alignment needs. */ | |
249 | #define HH_DATA_MOD 16 | |
250 | #define HH_DATA_OFF(__len) \ | |
5ba0eac6 | 251 | (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1)) |
1da177e4 LT |
252 | #define HH_DATA_ALIGN(__len) \ |
253 | (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1)) | |
254 | unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; | |
255 | }; | |
256 | ||
5e82b4b2 | 257 | /* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much. |
1da177e4 LT |
258 | * Alternative is: |
259 | * dev->hard_header_len ? (dev->hard_header_len + | |
260 | * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0 | |
261 | * | |
262 | * We could use other alignment values, but we must maintain the | |
263 | * relationship HH alignment <= LL alignment. | |
264 | */ | |
265 | #define LL_RESERVED_SPACE(dev) \ | |
f5184d26 | 266 | ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) |
1da177e4 | 267 | #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ |
f5184d26 | 268 | ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) |
1da177e4 | 269 | |
3b04ddde SH |
270 | struct header_ops { |
271 | int (*create) (struct sk_buff *skb, struct net_device *dev, | |
272 | unsigned short type, const void *daddr, | |
95c96174 | 273 | const void *saddr, unsigned int len); |
3b04ddde | 274 | int (*parse)(const struct sk_buff *skb, unsigned char *haddr); |
e69dd336 | 275 | int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); |
3b04ddde SH |
276 | void (*cache_update)(struct hh_cache *hh, |
277 | const struct net_device *dev, | |
278 | const unsigned char *haddr); | |
2793a23a | 279 | bool (*validate)(const char *ll_header, unsigned int len); |
e78b2915 | 280 | __be16 (*parse_protocol)(const struct sk_buff *skb); |
3b04ddde SH |
281 | }; |
282 | ||
1da177e4 | 283 | /* These flag bits are private to the generic network queueing |
5e82b4b2 | 284 | * layer; they may not be explicitly referenced by any other |
1da177e4 LT |
285 | * code. |
286 | */ | |
287 | ||
d94d9fee | 288 | enum netdev_state_t { |
1da177e4 LT |
289 | __LINK_STATE_START, |
290 | __LINK_STATE_PRESENT, | |
1da177e4 | 291 | __LINK_STATE_NOCARRIER, |
b00055aa SR |
292 | __LINK_STATE_LINKWATCH_PENDING, |
293 | __LINK_STATE_DORMANT, | |
eec517cd | 294 | __LINK_STATE_TESTING, |
1da177e4 LT |
295 | }; |
296 | ||
297 | ||
298 | /* | |
5e82b4b2 | 299 | * This structure holds boot-time configured netdevice settings. They |
fe2918b0 | 300 | * are then used in the device probing. |
1da177e4 LT |
301 | */ |
302 | struct netdev_boot_setup { | |
303 | char name[IFNAMSIZ]; | |
304 | struct ifmap map; | |
305 | }; | |
306 | #define NETDEV_BOOT_SETUP_MAX 8 | |
307 | ||
f629d208 | 308 | int __init netdev_boot_setup(char *str); |
1da177e4 | 309 | |
6312fe77 LR |
310 | struct gro_list { |
311 | struct list_head list; | |
312 | int count; | |
313 | }; | |
314 | ||
bea3348e | 315 | /* |
d9f37d01 LR |
316 | * size of gro hash buckets, must less than bit number of |
317 | * napi_struct::gro_bitmask | |
bea3348e | 318 | */ |
07d78363 | 319 | #define GRO_HASH_BUCKETS 8 |
d9f37d01 LR |
320 | |
321 | /* | |
322 | * Structure for NAPI scheduling similar to tasklet but with weighting | |
323 | */ | |
bea3348e SH |
324 | struct napi_struct { |
325 | /* The poll_list must only be managed by the entity which | |
326 | * changes the state of the NAPI_STATE_SCHED bit. This means | |
327 | * whoever atomically sets that bit can add this napi_struct | |
5e82b4b2 | 328 | * to the per-CPU poll_list, and whoever clears that bit |
bea3348e SH |
329 | * can remove from the list right before clearing the bit. |
330 | */ | |
331 | struct list_head poll_list; | |
332 | ||
333 | unsigned long state; | |
334 | int weight; | |
6f8b12d6 | 335 | int defer_hard_irqs_count; |
d9f37d01 | 336 | unsigned long gro_bitmask; |
bea3348e SH |
337 | int (*poll)(struct napi_struct *, int); |
338 | #ifdef CONFIG_NETPOLL | |
bea3348e | 339 | int poll_owner; |
bea3348e | 340 | #endif |
5d38a079 | 341 | struct net_device *dev; |
6312fe77 | 342 | struct gro_list gro_hash[GRO_HASH_BUCKETS]; |
5d38a079 | 343 | struct sk_buff *skb; |
323ebb61 EC |
344 | struct list_head rx_list; /* Pending GRO_NORMAL skbs */ |
345 | int rx_count; /* length of rx_list */ | |
3b47d303 | 346 | struct hrtimer timer; |
404f7c9e | 347 | struct list_head dev_list; |
af12fa6e ET |
348 | struct hlist_node napi_hash_node; |
349 | unsigned int napi_id; | |
29863d41 | 350 | struct task_struct *thread; |
bea3348e SH |
351 | }; |
352 | ||
d94d9fee | 353 | enum { |
7fd3253a BT |
354 | NAPI_STATE_SCHED, /* Poll is scheduled */ |
355 | NAPI_STATE_MISSED, /* reschedule a napi */ | |
356 | NAPI_STATE_DISABLE, /* Disable pending */ | |
357 | NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ | |
358 | NAPI_STATE_LISTED, /* NAPI added to system lists */ | |
359 | NAPI_STATE_NO_BUSY_POLL, /* Do not add in napi_hash, no busy polling */ | |
360 | NAPI_STATE_IN_BUSY_POLL, /* sk_busy_loop() owns this NAPI */ | |
361 | NAPI_STATE_PREFER_BUSY_POLL, /* prefer busy-polling over softirq processing*/ | |
29863d41 | 362 | NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/ |
217f6974 ED |
363 | }; |
364 | ||
365 | enum { | |
7fd3253a BT |
366 | NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED), |
367 | NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED), | |
368 | NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE), | |
369 | NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC), | |
370 | NAPIF_STATE_LISTED = BIT(NAPI_STATE_LISTED), | |
371 | NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL), | |
372 | NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL), | |
373 | NAPIF_STATE_PREFER_BUSY_POLL = BIT(NAPI_STATE_PREFER_BUSY_POLL), | |
29863d41 | 374 | NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED), |
bea3348e SH |
375 | }; |
376 | ||
5b252f0c | 377 | enum gro_result { |
d1c76af9 HX |
378 | GRO_MERGED, |
379 | GRO_MERGED_FREE, | |
380 | GRO_HELD, | |
381 | GRO_NORMAL, | |
25393d3f | 382 | GRO_CONSUMED, |
d1c76af9 | 383 | }; |
5b252f0c | 384 | typedef enum gro_result gro_result_t; |
d1c76af9 | 385 | |
8a4eb573 JP |
386 | /* |
387 | * enum rx_handler_result - Possible return values for rx_handlers. | |
388 | * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it | |
389 | * further. | |
390 | * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in | |
391 | * case skb->dev was changed by rx_handler. | |
392 | * @RX_HANDLER_EXACT: Force exact delivery, no wildcard. | |
5e82b4b2 | 393 | * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called. |
8a4eb573 JP |
394 | * |
395 | * rx_handlers are functions called from inside __netif_receive_skb(), to do | |
396 | * special processing of the skb, prior to delivery to protocol handlers. | |
397 | * | |
398 | * Currently, a net_device can only have a single rx_handler registered. Trying | |
399 | * to register a second rx_handler will return -EBUSY. | |
400 | * | |
401 | * To register a rx_handler on a net_device, use netdev_rx_handler_register(). | |
402 | * To unregister a rx_handler on a net_device, use | |
403 | * netdev_rx_handler_unregister(). | |
404 | * | |
405 | * Upon return, rx_handler is expected to tell __netif_receive_skb() what to | |
406 | * do with the skb. | |
407 | * | |
5e82b4b2 | 408 | * If the rx_handler consumed the skb in some way, it should return |
8a4eb573 | 409 | * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for |
5e82b4b2 | 410 | * the skb to be delivered in some other way. |
8a4eb573 JP |
411 | * |
412 | * If the rx_handler changed skb->dev, to divert the skb to another | |
413 | * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the | |
414 | * new device will be called if it exists. | |
415 | * | |
5e82b4b2 | 416 | * If the rx_handler decides the skb should be ignored, it should return |
8a4eb573 | 417 | * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that |
d93cf068 | 418 | * are registered on exact device (ptype->dev == skb->dev). |
8a4eb573 | 419 | * |
5e82b4b2 | 420 | * If the rx_handler didn't change skb->dev, but wants the skb to be normally |
8a4eb573 JP |
421 | * delivered, it should return RX_HANDLER_PASS. |
422 | * | |
423 | * A device without a registered rx_handler will behave as if rx_handler | |
424 | * returned RX_HANDLER_PASS. | |
425 | */ | |
426 | ||
427 | enum rx_handler_result { | |
428 | RX_HANDLER_CONSUMED, | |
429 | RX_HANDLER_ANOTHER, | |
430 | RX_HANDLER_EXACT, | |
431 | RX_HANDLER_PASS, | |
432 | }; | |
433 | typedef enum rx_handler_result rx_handler_result_t; | |
434 | typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); | |
ab95bfe0 | 435 | |
f629d208 | 436 | void __napi_schedule(struct napi_struct *n); |
bc9ad166 | 437 | void __napi_schedule_irqoff(struct napi_struct *n); |
bea3348e | 438 | |
4d29515f | 439 | static inline bool napi_disable_pending(struct napi_struct *n) |
a0a46196 DM |
440 | { |
441 | return test_bit(NAPI_STATE_DISABLE, &n->state); | |
442 | } | |
443 | ||
7fd3253a BT |
444 | static inline bool napi_prefer_busy_poll(struct napi_struct *n) |
445 | { | |
446 | return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state); | |
447 | } | |
448 | ||
39e6c820 | 449 | bool napi_schedule_prep(struct napi_struct *n); |
bea3348e SH |
450 | |
451 | /** | |
452 | * napi_schedule - schedule NAPI poll | |
5e82b4b2 | 453 | * @n: NAPI context |
bea3348e SH |
454 | * |
455 | * Schedule NAPI poll routine to be called if it is not already | |
456 | * running. | |
457 | */ | |
458 | static inline void napi_schedule(struct napi_struct *n) | |
459 | { | |
460 | if (napi_schedule_prep(n)) | |
461 | __napi_schedule(n); | |
462 | } | |
463 | ||
bc9ad166 ED |
464 | /** |
465 | * napi_schedule_irqoff - schedule NAPI poll | |
5e82b4b2 | 466 | * @n: NAPI context |
bc9ad166 ED |
467 | * |
468 | * Variant of napi_schedule(), assuming hard irqs are masked. | |
469 | */ | |
470 | static inline void napi_schedule_irqoff(struct napi_struct *n) | |
471 | { | |
472 | if (napi_schedule_prep(n)) | |
473 | __napi_schedule_irqoff(n); | |
474 | } | |
475 | ||
bfe13f54 | 476 | /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ |
4d29515f | 477 | static inline bool napi_reschedule(struct napi_struct *napi) |
bfe13f54 RD |
478 | { |
479 | if (napi_schedule_prep(napi)) { | |
480 | __napi_schedule(napi); | |
4d29515f | 481 | return true; |
bfe13f54 | 482 | } |
4d29515f | 483 | return false; |
bfe13f54 RD |
484 | } |
485 | ||
364b6055 | 486 | bool napi_complete_done(struct napi_struct *n, int work_done); |
bea3348e SH |
487 | /** |
488 | * napi_complete - NAPI processing complete | |
5e82b4b2 | 489 | * @n: NAPI context |
bea3348e SH |
490 | * |
491 | * Mark NAPI processing as complete. | |
3b47d303 | 492 | * Consider using napi_complete_done() instead. |
364b6055 | 493 | * Return false if device should avoid rearming interrupts. |
bea3348e | 494 | */ |
364b6055 | 495 | static inline bool napi_complete(struct napi_struct *n) |
3b47d303 ED |
496 | { |
497 | return napi_complete_done(n, 0); | |
498 | } | |
bea3348e | 499 | |
5fdd2f0e WW |
500 | int dev_set_threaded(struct net_device *dev, bool threaded); |
501 | ||
bea3348e SH |
502 | /** |
503 | * napi_disable - prevent NAPI from scheduling | |
5e82b4b2 | 504 | * @n: NAPI context |
bea3348e SH |
505 | * |
506 | * Stop NAPI from being scheduled on this context. | |
507 | * Waits till any outstanding processing completes. | |
508 | */ | |
3b47d303 | 509 | void napi_disable(struct napi_struct *n); |
bea3348e | 510 | |
29863d41 | 511 | void napi_enable(struct napi_struct *n); |
bea3348e | 512 | |
c264c3de SH |
513 | /** |
514 | * napi_synchronize - wait until NAPI is not running | |
5e82b4b2 | 515 | * @n: NAPI context |
c264c3de SH |
516 | * |
517 | * Wait until NAPI is done being scheduled on this context. | |
518 | * Waits till any outstanding processing completes but | |
519 | * does not disable future activations. | |
520 | */ | |
521 | static inline void napi_synchronize(const struct napi_struct *n) | |
522 | { | |
facc432f AB |
523 | if (IS_ENABLED(CONFIG_SMP)) |
524 | while (test_bit(NAPI_STATE_SCHED, &n->state)) | |
525 | msleep(1); | |
526 | else | |
527 | barrier(); | |
c264c3de | 528 | } |
c264c3de | 529 | |
6c5c9581 MK |
530 | /** |
531 | * napi_if_scheduled_mark_missed - if napi is running, set the | |
532 | * NAPIF_STATE_MISSED | |
533 | * @n: NAPI context | |
534 | * | |
535 | * If napi is running, set the NAPIF_STATE_MISSED, and return true if | |
536 | * NAPI is scheduled. | |
537 | **/ | |
538 | static inline bool napi_if_scheduled_mark_missed(struct napi_struct *n) | |
539 | { | |
540 | unsigned long val, new; | |
541 | ||
542 | do { | |
543 | val = READ_ONCE(n->state); | |
544 | if (val & NAPIF_STATE_DISABLE) | |
545 | return true; | |
546 | ||
547 | if (!(val & NAPIF_STATE_SCHED)) | |
548 | return false; | |
549 | ||
550 | new = val | NAPIF_STATE_MISSED; | |
551 | } while (cmpxchg(&n->state, val, new) != val); | |
552 | ||
553 | return true; | |
554 | } | |
555 | ||
d94d9fee | 556 | enum netdev_queue_state_t { |
73466498 TH |
557 | __QUEUE_STATE_DRV_XOFF, |
558 | __QUEUE_STATE_STACK_XOFF, | |
c3f26a26 | 559 | __QUEUE_STATE_FROZEN, |
79d16385 | 560 | }; |
8e2f1a63 DB |
561 | |
562 | #define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF) | |
563 | #define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF) | |
564 | #define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN) | |
565 | ||
566 | #define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF) | |
567 | #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \ | |
568 | QUEUE_STATE_FROZEN) | |
569 | #define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \ | |
570 | QUEUE_STATE_FROZEN) | |
571 | ||
73466498 TH |
572 | /* |
573 | * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The | |
574 | * netif_tx_* functions below are used to manipulate this flag. The | |
575 | * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit | |
576 | * queue independently. The netif_xmit_*stopped functions below are called | |
577 | * to check if the queue has been stopped by the driver or stack (either | |
578 | * of the XOFF bits are set in the state). Drivers should not need to call | |
579 | * netif_xmit*stopped functions, they should only be using netif_tx_*. | |
580 | */ | |
79d16385 | 581 | |
bb949fbd | 582 | struct netdev_queue { |
6a321cb3 | 583 | /* |
5e82b4b2 | 584 | * read-mostly part |
6a321cb3 | 585 | */ |
bb949fbd | 586 | struct net_device *dev; |
46e5da40 | 587 | struct Qdisc __rcu *qdisc; |
b0e1e646 | 588 | struct Qdisc *qdisc_sleeping; |
ccf5ff69 | 589 | #ifdef CONFIG_SYSFS |
1d24eb48 TH |
590 | struct kobject kobj; |
591 | #endif | |
f2cd2d3e ED |
592 | #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) |
593 | int numa_node; | |
594 | #endif | |
c0ef079c FW |
595 | unsigned long tx_maxrate; |
596 | /* | |
597 | * Number of TX timeouts for this queue | |
598 | * (/sys/class/net/DEV/Q/trans_timeout) | |
599 | */ | |
600 | unsigned long trans_timeout; | |
ffcfe25b AD |
601 | |
602 | /* Subordinate device that the queue has been assigned to */ | |
603 | struct net_device *sb_dev; | |
661b8d1b | 604 | #ifdef CONFIG_XDP_SOCKETS |
1742b3d5 | 605 | struct xsk_buff_pool *pool; |
661b8d1b | 606 | #endif |
6a321cb3 | 607 | /* |
5e82b4b2 | 608 | * write-mostly part |
6a321cb3 ED |
609 | */ |
610 | spinlock_t _xmit_lock ____cacheline_aligned_in_smp; | |
611 | int xmit_lock_owner; | |
9d21493b | 612 | /* |
9b36627a | 613 | * Time (in jiffies) of last Tx |
9d21493b ED |
614 | */ |
615 | unsigned long trans_start; | |
ccf5ff69 | 616 | |
114cf580 TH |
617 | unsigned long state; |
618 | ||
619 | #ifdef CONFIG_BQL | |
620 | struct dql dql; | |
621 | #endif | |
e8a0464c | 622 | } ____cacheline_aligned_in_smp; |
bb949fbd | 623 | |
79134e6c | 624 | extern int sysctl_fb_tunnels_only_for_init_net; |
856c395c | 625 | extern int sysctl_devconf_inherit_init_net; |
79134e6c | 626 | |
316cdaa1 MB |
627 | /* |
628 | * sysctl_fb_tunnels_only_for_init_net == 0 : For all netns | |
629 | * == 1 : For initns only | |
630 | * == 2 : For none. | |
631 | */ | |
79134e6c ED |
632 | static inline bool net_has_fallback_tunnels(const struct net *net) |
633 | { | |
3753d977 MB |
634 | return !IS_ENABLED(CONFIG_SYSCTL) || |
635 | !sysctl_fb_tunnels_only_for_init_net || | |
636 | (net == &init_net && sysctl_fb_tunnels_only_for_init_net == 1); | |
79134e6c ED |
637 | } |
638 | ||
f2cd2d3e ED |
639 | static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) |
640 | { | |
641 | #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) | |
642 | return q->numa_node; | |
643 | #else | |
b236da69 | 644 | return NUMA_NO_NODE; |
f2cd2d3e ED |
645 | #endif |
646 | } | |
647 | ||
648 | static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node) | |
649 | { | |
650 | #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) | |
651 | q->numa_node = node; | |
652 | #endif | |
653 | } | |
654 | ||
df334545 | 655 | #ifdef CONFIG_RPS |
0a9627f2 TH |
656 | /* |
657 | * This structure holds an RPS map which can be of variable length. The | |
658 | * map is an array of CPUs. | |
659 | */ | |
660 | struct rps_map { | |
661 | unsigned int len; | |
662 | struct rcu_head rcu; | |
bb4cf02d | 663 | u16 cpus[]; |
0a9627f2 | 664 | }; |
60b778ce | 665 | #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16))) |
0a9627f2 | 666 | |
fec5e652 | 667 | /* |
c445477d BH |
668 | * The rps_dev_flow structure contains the mapping of a flow to a CPU, the |
669 | * tail pointer for that CPU's input queue at the time of last enqueue, and | |
670 | * a hardware filter index. | |
fec5e652 TH |
671 | */ |
672 | struct rps_dev_flow { | |
673 | u16 cpu; | |
c445477d | 674 | u16 filter; |
fec5e652 TH |
675 | unsigned int last_qtail; |
676 | }; | |
c445477d | 677 | #define RPS_NO_FILTER 0xffff |
fec5e652 TH |
678 | |
679 | /* | |
680 | * The rps_dev_flow_table structure contains a table of flow mappings. | |
681 | */ | |
682 | struct rps_dev_flow_table { | |
683 | unsigned int mask; | |
684 | struct rcu_head rcu; | |
bb4cf02d | 685 | struct rps_dev_flow flows[]; |
fec5e652 TH |
686 | }; |
687 | #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \ | |
60b778ce | 688 | ((_num) * sizeof(struct rps_dev_flow))) |
fec5e652 TH |
689 | |
690 | /* | |
691 | * The rps_sock_flow_table contains mappings of flows to the last CPU | |
692 | * on which they were processed by the application (set in recvmsg). | |
5e82b4b2 BH |
693 | * Each entry is a 32bit value. Upper part is the high-order bits |
694 | * of flow hash, lower part is CPU number. | |
567e4b79 | 695 | * rps_cpu_mask is used to partition the space, depending on number of |
5e82b4b2 BH |
696 | * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1 |
697 | * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f, | |
567e4b79 | 698 | * meaning we use 32-6=26 bits for the hash. |
fec5e652 TH |
699 | */ |
700 | struct rps_sock_flow_table { | |
567e4b79 | 701 | u32 mask; |
93c1af6c | 702 | |
bb4cf02d | 703 | u32 ents[] ____cacheline_aligned_in_smp; |
fec5e652 | 704 | }; |
567e4b79 | 705 | #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num])) |
fec5e652 TH |
706 | |
707 | #define RPS_NO_CPU 0xffff | |
708 | ||
567e4b79 ED |
709 | extern u32 rps_cpu_mask; |
710 | extern struct rps_sock_flow_table __rcu *rps_sock_flow_table; | |
711 | ||
fec5e652 TH |
712 | static inline void rps_record_sock_flow(struct rps_sock_flow_table *table, |
713 | u32 hash) | |
714 | { | |
715 | if (table && hash) { | |
567e4b79 ED |
716 | unsigned int index = hash & table->mask; |
717 | u32 val = hash & ~rps_cpu_mask; | |
fec5e652 | 718 | |
5e82b4b2 | 719 | /* We only give a hint, preemption can change CPU under us */ |
567e4b79 | 720 | val |= raw_smp_processor_id(); |
fec5e652 | 721 | |
567e4b79 ED |
722 | if (table->ents[index] != val) |
723 | table->ents[index] = val; | |
fec5e652 TH |
724 | } |
725 | } | |
726 | ||
c445477d | 727 | #ifdef CONFIG_RFS_ACCEL |
f629d208 JP |
728 | bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id, |
729 | u16 filter_id); | |
c445477d | 730 | #endif |
a953be53 | 731 | #endif /* CONFIG_RPS */ |
c445477d | 732 | |
0a9627f2 TH |
733 | /* This structure contains an instance of an RX queue. */ |
734 | struct netdev_rx_queue { | |
a953be53 | 735 | #ifdef CONFIG_RPS |
6e3f7faf ED |
736 | struct rps_map __rcu *rps_map; |
737 | struct rps_dev_flow_table __rcu *rps_flow_table; | |
a953be53 | 738 | #endif |
6e3f7faf | 739 | struct kobject kobj; |
fe822240 | 740 | struct net_device *dev; |
e817f856 | 741 | struct xdp_rxq_info xdp_rxq; |
661b8d1b | 742 | #ifdef CONFIG_XDP_SOCKETS |
1742b3d5 | 743 | struct xsk_buff_pool *pool; |
661b8d1b | 744 | #endif |
0a9627f2 | 745 | } ____cacheline_aligned_in_smp; |
a953be53 MD |
746 | |
747 | /* | |
748 | * RX queue sysfs structures and functions. | |
749 | */ | |
750 | struct rx_queue_attribute { | |
751 | struct attribute attr; | |
718ad681 | 752 | ssize_t (*show)(struct netdev_rx_queue *queue, char *buf); |
a953be53 | 753 | ssize_t (*store)(struct netdev_rx_queue *queue, |
718ad681 | 754 | const char *buf, size_t len); |
a953be53 | 755 | }; |
d314774c | 756 | |
044ab86d AT |
757 | /* XPS map type and offset of the xps map within net_device->xps_maps[]. */ |
758 | enum xps_map_type { | |
759 | XPS_CPUS = 0, | |
760 | XPS_RXQS, | |
761 | XPS_MAPS_MAX, | |
762 | }; | |
763 | ||
bf264145 TH |
764 | #ifdef CONFIG_XPS |
765 | /* | |
766 | * This structure holds an XPS map which can be of variable length. The | |
767 | * map is an array of queues. | |
768 | */ | |
769 | struct xps_map { | |
770 | unsigned int len; | |
771 | unsigned int alloc_len; | |
772 | struct rcu_head rcu; | |
bb4cf02d | 773 | u16 queues[]; |
bf264145 | 774 | }; |
60b778ce | 775 | #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16))) |
c59f419b HD |
776 | #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \ |
777 | - sizeof(struct xps_map)) / sizeof(u16)) | |
bf264145 TH |
778 | |
779 | /* | |
780 | * This structure holds all XPS maps for device. Maps are indexed by CPU. | |
255c04a8 | 781 | * |
5478fcd0 AT |
782 | * We keep track of the number of cpus/rxqs used when the struct is allocated, |
783 | * in nr_ids. This will help not accessing out-of-bound memory. | |
784 | * | |
255c04a8 AT |
785 | * We keep track of the number of traffic classes used when the struct is |
786 | * allocated, in num_tc. This will be used to navigate the maps, to ensure we're | |
787 | * not crossing its upper bound, as the original dev->num_tc can be updated in | |
788 | * the meantime. | |
bf264145 TH |
789 | */ |
790 | struct xps_dev_maps { | |
791 | struct rcu_head rcu; | |
5478fcd0 | 792 | unsigned int nr_ids; |
255c04a8 | 793 | s16 num_tc; |
bb4cf02d | 794 | struct xps_map __rcu *attr_map[]; /* Either CPUs map or RXQs map */ |
bf264145 | 795 | }; |
80d19669 AN |
796 | |
797 | #define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \ | |
184c449f | 798 | (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *))) |
80d19669 AN |
799 | |
800 | #define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\ | |
801 | (_rxqs * (_tcs) * sizeof(struct xps_map *))) | |
802 | ||
bf264145 TH |
803 | #endif /* CONFIG_XPS */ |
804 | ||
4f57c087 JF |
805 | #define TC_MAX_QUEUE 16 |
806 | #define TC_BITMASK 15 | |
807 | /* HW offloaded queuing disciplines txq count and offset maps */ | |
808 | struct netdev_tc_txq { | |
809 | u16 count; | |
810 | u16 offset; | |
811 | }; | |
812 | ||
68bad94e NP |
813 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) |
814 | /* | |
815 | * This structure is to hold information about the device | |
816 | * configured to run FCoE protocol stack. | |
817 | */ | |
818 | struct netdev_fcoe_hbainfo { | |
819 | char manufacturer[64]; | |
820 | char serial_number[64]; | |
821 | char hardware_version[64]; | |
822 | char driver_version[64]; | |
823 | char optionrom_version[64]; | |
824 | char firmware_version[64]; | |
825 | char model[256]; | |
826 | char model_description[256]; | |
827 | }; | |
828 | #endif | |
829 | ||
02637fce | 830 | #define MAX_PHYS_ITEM_ID_LEN 32 |
66b52b0d | 831 | |
02637fce JP |
832 | /* This structure holds a unique identifier to identify some |
833 | * physical item (port for example) used by a netdevice. | |
66b52b0d | 834 | */ |
02637fce JP |
835 | struct netdev_phys_item_id { |
836 | unsigned char id[MAX_PHYS_ITEM_ID_LEN]; | |
66b52b0d JP |
837 | unsigned char id_len; |
838 | }; | |
839 | ||
d754f98b SF |
840 | static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a, |
841 | struct netdev_phys_item_id *b) | |
842 | { | |
843 | return a->id_len == b->id_len && | |
844 | memcmp(a->id, b->id, a->id_len) == 0; | |
845 | } | |
846 | ||
99932d4f | 847 | typedef u16 (*select_queue_fallback_t)(struct net_device *dev, |
8ec56fc3 AD |
848 | struct sk_buff *skb, |
849 | struct net_device *sb_dev); | |
99932d4f | 850 | |
ddb94eaf PNA |
851 | enum net_device_path_type { |
852 | DEV_PATH_ETHERNET = 0, | |
e4417d69 | 853 | DEV_PATH_VLAN, |
ec9d16ba | 854 | DEV_PATH_BRIDGE, |
f6efc675 | 855 | DEV_PATH_PPPOE, |
0994d492 | 856 | DEV_PATH_DSA, |
ddb94eaf PNA |
857 | }; |
858 | ||
859 | struct net_device_path { | |
860 | enum net_device_path_type type; | |
861 | const struct net_device *dev; | |
e4417d69 PNA |
862 | union { |
863 | struct { | |
864 | u16 id; | |
865 | __be16 proto; | |
f6efc675 | 866 | u8 h_dest[ETH_ALEN]; |
e4417d69 | 867 | } encap; |
bcf2766b FF |
868 | struct { |
869 | enum { | |
870 | DEV_PATH_BR_VLAN_KEEP, | |
871 | DEV_PATH_BR_VLAN_TAG, | |
872 | DEV_PATH_BR_VLAN_UNTAG, | |
873 | } vlan_mode; | |
874 | u16 vlan_id; | |
875 | __be16 vlan_proto; | |
876 | } bridge; | |
0994d492 FF |
877 | struct { |
878 | int port; | |
879 | u16 proto; | |
880 | } dsa; | |
e4417d69 | 881 | }; |
ddb94eaf PNA |
882 | }; |
883 | ||
884 | #define NET_DEVICE_PATH_STACK_MAX 5 | |
bcf2766b | 885 | #define NET_DEVICE_PATH_VLAN_MAX 2 |
ddb94eaf PNA |
886 | |
887 | struct net_device_path_stack { | |
888 | int num_paths; | |
889 | struct net_device_path path[NET_DEVICE_PATH_STACK_MAX]; | |
890 | }; | |
891 | ||
892 | struct net_device_path_ctx { | |
893 | const struct net_device *dev; | |
894 | const u8 *daddr; | |
bcf2766b FF |
895 | |
896 | int num_vlans; | |
897 | struct { | |
898 | u16 id; | |
899 | __be16 proto; | |
900 | } vlan[NET_DEVICE_PATH_VLAN_MAX]; | |
ddb94eaf PNA |
901 | }; |
902 | ||
2572ac53 | 903 | enum tc_setup_type { |
575ed7d3 | 904 | TC_SETUP_QDISC_MQPRIO, |
a1b7c5fd | 905 | TC_SETUP_CLSU32, |
5b33f488 | 906 | TC_SETUP_CLSFLOWER, |
ade9b658 | 907 | TC_SETUP_CLSMATCHALL, |
332ae8e2 | 908 | TC_SETUP_CLSBPF, |
8c4083b3 | 909 | TC_SETUP_BLOCK, |
8521db4c | 910 | TC_SETUP_QDISC_CBS, |
602f3baf | 911 | TC_SETUP_QDISC_RED, |
7fdb61b4 | 912 | TC_SETUP_QDISC_PRIO, |
f971b132 | 913 | TC_SETUP_QDISC_MQ, |
25db26a9 | 914 | TC_SETUP_QDISC_ETF, |
98b0e5f6 | 915 | TC_SETUP_ROOT_QDISC, |
890d8d23 | 916 | TC_SETUP_QDISC_GRED, |
9c66d156 | 917 | TC_SETUP_QDISC_TAPRIO, |
c29f74e0 | 918 | TC_SETUP_FT, |
d35eb52b | 919 | TC_SETUP_QDISC_ETS, |
ef6aadcc | 920 | TC_SETUP_QDISC_TBF, |
aaca9408 | 921 | TC_SETUP_QDISC_FIFO, |
d03b195b | 922 | TC_SETUP_QDISC_HTB, |
16e5cc64 JF |
923 | }; |
924 | ||
f4e63525 JK |
925 | /* These structures hold the attributes of bpf state that are being passed |
926 | * to the netdevice through the bpf op. | |
a7862b45 | 927 | */ |
f4e63525 | 928 | enum bpf_netdev_command { |
a7862b45 BB |
929 | /* Set or clear a bpf program used in the earliest stages of packet |
930 | * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee | |
931 | * is responsible for calling bpf_prog_put on any old progs that are | |
932 | * stored. In case of error, the callee need not release the new prog | |
933 | * reference, but on success it takes ownership and must bpf_prog_put | |
934 | * when it is no longer used. | |
935 | */ | |
936 | XDP_SETUP_PROG, | |
ee5d032f | 937 | XDP_SETUP_PROG_HW, |
ab3f0063 | 938 | /* BPF program for offload callbacks, invoked at program load time. */ |
a3884572 JK |
939 | BPF_OFFLOAD_MAP_ALLOC, |
940 | BPF_OFFLOAD_MAP_FREE, | |
1742b3d5 | 941 | XDP_SETUP_XSK_POOL, |
a7862b45 BB |
942 | }; |
943 | ||
cae1927c | 944 | struct bpf_prog_offload_ops; |
ddf9f970 | 945 | struct netlink_ext_ack; |
74515c57 | 946 | struct xdp_umem; |
75ccae62 | 947 | struct xdp_dev_bulk_queue; |
aa8d3a71 | 948 | struct bpf_xdp_link; |
ddf9f970 | 949 | |
7f0a8382 AN |
950 | enum bpf_xdp_mode { |
951 | XDP_MODE_SKB = 0, | |
952 | XDP_MODE_DRV = 1, | |
953 | XDP_MODE_HW = 2, | |
954 | __MAX_XDP_MODE | |
955 | }; | |
956 | ||
957 | struct bpf_xdp_entity { | |
958 | struct bpf_prog *prog; | |
aa8d3a71 | 959 | struct bpf_xdp_link *link; |
7f0a8382 | 960 | }; |
ddf9f970 | 961 | |
f4e63525 JK |
962 | struct netdev_bpf { |
963 | enum bpf_netdev_command command; | |
a7862b45 BB |
964 | union { |
965 | /* XDP_SETUP_PROG */ | |
ddf9f970 | 966 | struct { |
32d60277 | 967 | u32 flags; |
ddf9f970 JK |
968 | struct bpf_prog *prog; |
969 | struct netlink_ext_ack *extack; | |
970 | }; | |
a3884572 JK |
971 | /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */ |
972 | struct { | |
973 | struct bpf_offloaded_map *offmap; | |
974 | }; | |
1742b3d5 | 975 | /* XDP_SETUP_XSK_POOL */ |
74515c57 | 976 | struct { |
1742b3d5 | 977 | struct xsk_buff_pool *pool; |
f8ebfaf6 | 978 | u16 queue_id; |
74515c57 | 979 | } xsk; |
a7862b45 BB |
980 | }; |
981 | }; | |
16e5cc64 | 982 | |
9116e5e2 MK |
983 | /* Flags for ndo_xsk_wakeup. */ |
984 | #define XDP_WAKEUP_RX (1 << 0) | |
985 | #define XDP_WAKEUP_TX (1 << 1) | |
986 | ||
d77e38e6 SK |
987 | #ifdef CONFIG_XFRM_OFFLOAD |
988 | struct xfrmdev_ops { | |
989 | int (*xdo_dev_state_add) (struct xfrm_state *x); | |
990 | void (*xdo_dev_state_delete) (struct xfrm_state *x); | |
991 | void (*xdo_dev_state_free) (struct xfrm_state *x); | |
992 | bool (*xdo_dev_offload_ok) (struct sk_buff *skb, | |
993 | struct xfrm_state *x); | |
50bd870a | 994 | void (*xdo_dev_state_advance_esn) (struct xfrm_state *x); |
d77e38e6 SK |
995 | }; |
996 | #endif | |
997 | ||
6c557001 FW |
998 | struct dev_ifalias { |
999 | struct rcu_head rcuhead; | |
1000 | char ifalias[]; | |
1001 | }; | |
1002 | ||
b473b0d2 | 1003 | struct devlink; |
da68b4ad | 1004 | struct tlsdev_ops; |
b473b0d2 | 1005 | |
ff927412 JP |
1006 | struct netdev_name_node { |
1007 | struct hlist_node hlist; | |
36fbf1e5 | 1008 | struct list_head list; |
ff927412 JP |
1009 | struct net_device *dev; |
1010 | const char *name; | |
1011 | }; | |
1012 | ||
36fbf1e5 JP |
1013 | int netdev_name_node_alt_create(struct net_device *dev, const char *name); |
1014 | int netdev_name_node_alt_destroy(struct net_device *dev, const char *name); | |
1015 | ||
93642e14 JP |
1016 | struct netdev_net_notifier { |
1017 | struct list_head list; | |
1018 | struct notifier_block *nb; | |
1019 | }; | |
1020 | ||
d314774c SH |
1021 | /* |
1022 | * This structure defines the management hooks for network devices. | |
00829823 SH |
1023 | * The following hooks can be defined; unless noted otherwise, they are |
1024 | * optional and can be filled with a null pointer. | |
d314774c SH |
1025 | * |
1026 | * int (*ndo_init)(struct net_device *dev); | |
5e82b4b2 BH |
1027 | * This function is called once when a network device is registered. |
1028 | * The network device can use this for any late stage initialization | |
1029 | * or semantic validation. It can fail with an error code which will | |
1030 | * be propagated back to register_netdev. | |
d314774c SH |
1031 | * |
1032 | * void (*ndo_uninit)(struct net_device *dev); | |
1033 | * This function is called when device is unregistered or when registration | |
1034 | * fails. It is not called if init fails. | |
1035 | * | |
1036 | * int (*ndo_open)(struct net_device *dev); | |
5e82b4b2 | 1037 | * This function is called when a network device transitions to the up |
d314774c SH |
1038 | * state. |
1039 | * | |
1040 | * int (*ndo_stop)(struct net_device *dev); | |
5e82b4b2 | 1041 | * This function is called when a network device transitions to the down |
d314774c SH |
1042 | * state. |
1043 | * | |
dc1f8bf6 SH |
1044 | * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, |
1045 | * struct net_device *dev); | |
00829823 | 1046 | * Called when a packet needs to be transmitted. |
e79d8429 RR |
1047 | * Returns NETDEV_TX_OK. Can return NETDEV_TX_BUSY, but you should stop |
1048 | * the queue before that can happen; it's for obsolete devices and weird | |
1049 | * corner cases, but the stack really does a non-trivial amount | |
1050 | * of useless work if you return NETDEV_TX_BUSY. | |
5e82b4b2 | 1051 | * Required; cannot be NULL. |
00829823 | 1052 | * |
1a2a1444 DM |
1053 | * netdev_features_t (*ndo_features_check)(struct sk_buff *skb, |
1054 | * struct net_device *dev | |
1055 | * netdev_features_t features); | |
1056 | * Called by core transmit path to determine if device is capable of | |
1057 | * performing offload operations on a given packet. This is to give | |
1058 | * the device an opportunity to implement any restrictions that cannot | |
1059 | * be otherwise expressed by feature flags. The check is called with | |
1060 | * the set of features that the stack has calculated and it returns | |
1061 | * those the driver believes to be appropriate. | |
cdba756f | 1062 | * |
f663dd9a | 1063 | * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, |
a350ecce | 1064 | * struct net_device *sb_dev); |
5e82b4b2 | 1065 | * Called to decide which queue to use when device supports multiple |
00829823 SH |
1066 | * transmit queues. |
1067 | * | |
d314774c SH |
1068 | * void (*ndo_change_rx_flags)(struct net_device *dev, int flags); |
1069 | * This function is called to allow device receiver to make | |
5e82b4b2 | 1070 | * changes to configuration when multicast or promiscuous is enabled. |
d314774c SH |
1071 | * |
1072 | * void (*ndo_set_rx_mode)(struct net_device *dev); | |
1073 | * This function is called device changes address list filtering. | |
01789349 | 1074 | * If driver handles unicast address filtering, it should set |
5e82b4b2 | 1075 | * IFF_UNICAST_FLT in its priv_flags. |
d314774c SH |
1076 | * |
1077 | * int (*ndo_set_mac_address)(struct net_device *dev, void *addr); | |
1078 | * This function is called when the Media Access Control address | |
37b607c5 | 1079 | * needs to be changed. If this interface is not defined, the |
5e82b4b2 | 1080 | * MAC address can not be changed. |
d314774c SH |
1081 | * |
1082 | * int (*ndo_validate_addr)(struct net_device *dev); | |
1083 | * Test if Media Access Control address is valid for the device. | |
1084 | * | |
1085 | * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); | |
5e82b4b2 BH |
1086 | * Called when a user requests an ioctl which can't be handled by |
1087 | * the generic interface code. If not defined ioctls return | |
d314774c SH |
1088 | * not supported error code. |
1089 | * | |
1090 | * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); | |
1091 | * Used to set network devices bus interface parameters. This interface | |
5e82b4b2 | 1092 | * is retained for legacy reasons; new devices should use the bus |
d314774c SH |
1093 | * interface (PCI) for low level management. |
1094 | * | |
1095 | * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); | |
1096 | * Called when a user wants to change the Maximum Transfer Unit | |
db46a0e1 | 1097 | * of a device. |
d314774c | 1098 | * |
0290bd29 | 1099 | * void (*ndo_tx_timeout)(struct net_device *dev, unsigned int txqueue); |
5e82b4b2 | 1100 | * Callback used when the transmitter has not made any progress |
d314774c SH |
1101 | * for dev->watchdog ticks. |
1102 | * | |
bc1f4470 | 1103 | * void (*ndo_get_stats64)(struct net_device *dev, |
1104 | * struct rtnl_link_stats64 *storage); | |
d308e38f | 1105 | * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); |
d314774c | 1106 | * Called when a user wants to get the network device usage |
be1f3c2c | 1107 | * statistics. Drivers must do one of the following: |
3cfde79c BH |
1108 | * 1. Define @ndo_get_stats64 to fill in a zero-initialised |
1109 | * rtnl_link_stats64 structure passed by the caller. | |
82695d9b | 1110 | * 2. Define @ndo_get_stats to update a net_device_stats structure |
be1f3c2c BH |
1111 | * (which should normally be dev->stats) and return a pointer to |
1112 | * it. The structure may be changed asynchronously only if each | |
1113 | * field is written atomically. | |
1114 | * 3. Update dev->stats asynchronously and atomically, and define | |
1115 | * neither operation. | |
d314774c | 1116 | * |
3df5b3c6 | 1117 | * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id) |
2c9d85d4 NF |
1118 | * Return true if this device supports offload stats of this attr_id. |
1119 | * | |
1120 | * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev, | |
1121 | * void *attr_data) | |
1122 | * Get statistics for offload operations by attr_id. Write it into the | |
1123 | * attr_data pointer. | |
1124 | * | |
5d632cb7 | 1125 | * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid); |
5e82b4b2 | 1126 | * If device supports VLAN filtering this function is called when a |
80d5c368 | 1127 | * VLAN id is registered. |
d314774c | 1128 | * |
5d632cb7 | 1129 | * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid); |
5e82b4b2 | 1130 | * If device supports VLAN filtering this function is called when a |
80d5c368 | 1131 | * VLAN id is unregistered. |
d314774c SH |
1132 | * |
1133 | * void (*ndo_poll_controller)(struct net_device *dev); | |
95c26df8 WM |
1134 | * |
1135 | * SR-IOV management functions. | |
1136 | * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac); | |
79aab093 MS |
1137 | * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, |
1138 | * u8 qos, __be16 proto); | |
ed616689 SC |
1139 | * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate, |
1140 | * int max_tx_rate); | |
5f8444a3 | 1141 | * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); |
dd461d6a | 1142 | * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting); |
95c26df8 WM |
1143 | * int (*ndo_get_vf_config)(struct net_device *dev, |
1144 | * int vf, struct ifla_vf_info *ivf); | |
1d8faf48 | 1145 | * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state); |
57b61080 SF |
1146 | * int (*ndo_set_vf_port)(struct net_device *dev, int vf, |
1147 | * struct nlattr *port[]); | |
01a3d796 VZ |
1148 | * |
1149 | * Enable or disable the VF ability to query its RSS Redirection Table and | |
1150 | * Hash Key. This is needed since on some devices VF share this information | |
5e82b4b2 | 1151 | * with PF and querying it may introduce a theoretical security risk. |
01a3d796 | 1152 | * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting); |
57b61080 | 1153 | * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); |
2572ac53 | 1154 | * int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type, |
de4784ca | 1155 | * void *type_data); |
6a4bc2b4 FF |
1156 | * Called to setup any 'tc' scheduler, classifier or action on @dev. |
1157 | * This is always called from the stack with the rtnl lock held and netif | |
1158 | * tx queues stopped. This allows the netdevice to perform queue | |
1159 | * management safely. | |
c445477d | 1160 | * |
e9bce845 YZ |
1161 | * Fiber Channel over Ethernet (FCoE) offload functions. |
1162 | * int (*ndo_fcoe_enable)(struct net_device *dev); | |
1163 | * Called when the FCoE protocol stack wants to start using LLD for FCoE | |
1164 | * so the underlying device can perform whatever needed configuration or | |
1165 | * initialization to support acceleration of FCoE traffic. | |
1166 | * | |
1167 | * int (*ndo_fcoe_disable)(struct net_device *dev); | |
1168 | * Called when the FCoE protocol stack wants to stop using LLD for FCoE | |
1169 | * so the underlying device can perform whatever needed clean-ups to | |
1170 | * stop supporting acceleration of FCoE traffic. | |
1171 | * | |
1172 | * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid, | |
1173 | * struct scatterlist *sgl, unsigned int sgc); | |
1174 | * Called when the FCoE Initiator wants to initialize an I/O that | |
1175 | * is a possible candidate for Direct Data Placement (DDP). The LLD can | |
1176 | * perform necessary setup and returns 1 to indicate the device is set up | |
1177 | * successfully to perform DDP on this I/O, otherwise this returns 0. | |
1178 | * | |
1179 | * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid); | |
1180 | * Called when the FCoE Initiator/Target is done with the DDPed I/O as | |
1181 | * indicated by the FC exchange id 'xid', so the underlying device can | |
1182 | * clean up and reuse resources for later DDP requests. | |
1183 | * | |
1184 | * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid, | |
1185 | * struct scatterlist *sgl, unsigned int sgc); | |
1186 | * Called when the FCoE Target wants to initialize an I/O that | |
1187 | * is a possible candidate for Direct Data Placement (DDP). The LLD can | |
1188 | * perform necessary setup and returns 1 to indicate the device is set up | |
1189 | * successfully to perform DDP on this I/O, otherwise this returns 0. | |
1190 | * | |
68bad94e NP |
1191 | * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, |
1192 | * struct netdev_fcoe_hbainfo *hbainfo); | |
1193 | * Called when the FCoE Protocol stack wants information on the underlying | |
1194 | * device. This information is utilized by the FCoE protocol stack to | |
1195 | * register attributes with Fiber Channel management service as per the | |
1196 | * FC-GS Fabric Device Management Information(FDMI) specification. | |
1197 | * | |
e9bce845 YZ |
1198 | * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type); |
1199 | * Called when the underlying device wants to override default World Wide | |
1200 | * Name (WWN) generation mechanism in FCoE protocol stack to pass its own | |
1201 | * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE | |
1202 | * protocol stack to use. | |
1203 | * | |
c445477d BH |
1204 | * RFS acceleration. |
1205 | * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb, | |
1206 | * u16 rxq_index, u32 flow_id); | |
1207 | * Set hardware filter for RFS. rxq_index is the target queue index; | |
1208 | * flow_id is a flow ID to be passed to rps_may_expire_flow() later. | |
1209 | * Return the filter ID on success, or a negative error code. | |
fbaec0ea | 1210 | * |
8b98a70c | 1211 | * Slave management functions (for bridge, bonding, etc). |
fbaec0ea JP |
1212 | * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev); |
1213 | * Called to make another netdev an underling. | |
1214 | * | |
1215 | * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); | |
1216 | * Called to release previously enslaved netdev. | |
5455c699 | 1217 | * |
cff9f12b MG |
1218 | * struct net_device *(*ndo_get_xmit_slave)(struct net_device *dev, |
1219 | * struct sk_buff *skb, | |
1220 | * bool all_slaves); | |
1221 | * Get the xmit slave of master device. If all_slaves is true, function | |
1222 | * assume all the slaves can transmit. | |
1223 | * | |
5455c699 | 1224 | * Feature/offload setting functions. |
1a2a1444 DM |
1225 | * netdev_features_t (*ndo_fix_features)(struct net_device *dev, |
1226 | * netdev_features_t features); | |
1227 | * Adjusts the requested feature flags according to device-specific | |
1228 | * constraints, and returns the resulting flags. Must not modify | |
1229 | * the device state. | |
1230 | * | |
c8f44aff | 1231 | * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); |
5455c699 MM |
1232 | * Called to update device configuration to new features. Passed |
1233 | * feature set might be less than what was returned by ndo_fix_features()). | |
1234 | * Must return >0 or -errno if it changed dev->features itself. | |
1235 | * | |
edc7d573 | 1236 | * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], |
1237 | * struct net_device *dev, | |
87b0984e PM |
1238 | * const unsigned char *addr, u16 vid, u16 flags, |
1239 | * struct netlink_ext_ack *extack); | |
77162022 | 1240 | * Adds an FDB entry to dev for addr. |
1690be63 VY |
1241 | * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], |
1242 | * struct net_device *dev, | |
f6f6424b | 1243 | * const unsigned char *addr, u16 vid) |
77162022 JF |
1244 | * Deletes the FDB entry from dev coresponding to addr. |
1245 | * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, | |
5d5eacb3 | 1246 | * struct net_device *dev, struct net_device *filter_dev, |
d297653d | 1247 | * int *idx) |
77162022 JF |
1248 | * Used to add FDB entries to dump requests. Implementers should add |
1249 | * entries to skb and update idx with the number of entries. | |
e5a55a89 | 1250 | * |
ad41faa8 | 1251 | * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, |
2fd527b7 | 1252 | * u16 flags, struct netlink_ext_ack *extack) |
e5a55a89 | 1253 | * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, |
46c264da ND |
1254 | * struct net_device *dev, u32 filter_mask, |
1255 | * int nlflags) | |
ad41faa8 ND |
1256 | * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh, |
1257 | * u16 flags); | |
4bf84c35 JP |
1258 | * |
1259 | * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); | |
1260 | * Called to change device carrier. Soft-devices (like dummy, team, etc) | |
1261 | * which do not represent real hardware may define this to allow their | |
1262 | * userspace components to manage their virtual carrier state. Devices | |
1263 | * that determine carrier state from physical hardware properties (eg | |
1264 | * network cables) or protocol-dependent mechanisms (eg | |
1265 | * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function. | |
66b52b0d JP |
1266 | * |
1267 | * int (*ndo_get_phys_port_id)(struct net_device *dev, | |
02637fce | 1268 | * struct netdev_phys_item_id *ppid); |
66b52b0d JP |
1269 | * Called to get ID of physical port of this device. If driver does |
1270 | * not implement this, it is assumed that the hw is not able to have | |
1271 | * multiple net devices on single physical port. | |
53cf5275 | 1272 | * |
d6abc596 FF |
1273 | * int (*ndo_get_port_parent_id)(struct net_device *dev, |
1274 | * struct netdev_phys_item_id *ppid) | |
1275 | * Called to get the parent ID of the physical port of this device. | |
1276 | * | |
a6cc0cfa JF |
1277 | * void* (*ndo_dfwd_add_station)(struct net_device *pdev, |
1278 | * struct net_device *dev) | |
1279 | * Called by upper layer devices to accelerate switching or other | |
1280 | * station functionality into hardware. 'pdev is the lowerdev | |
1281 | * to use for the offload and 'dev' is the net device that will | |
1282 | * back the offload. Returns a pointer to the private structure | |
1283 | * the upper layer will maintain. | |
1284 | * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv) | |
1285 | * Called by upper layer device to delete the station created | |
1286 | * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing | |
1287 | * the station and priv is the structure returned by the add | |
1288 | * operation. | |
822b3b2e JF |
1289 | * int (*ndo_set_tx_maxrate)(struct net_device *dev, |
1290 | * int queue_index, u32 maxrate); | |
1291 | * Called when a user wants to set a max-rate limitation of specific | |
1292 | * TX queue. | |
a54acb3a ND |
1293 | * int (*ndo_get_iflink)(const struct net_device *dev); |
1294 | * Called to get the iflink value of this device. | |
d746d707 | 1295 | * void (*ndo_change_proto_down)(struct net_device *dev, |
5e82b4b2 | 1296 | * bool proto_down); |
d746d707 AK |
1297 | * This function is used to pass protocol port error state information |
1298 | * to the switch driver. The switch driver can react to the proto_down | |
1299 | * by doing a phys down on the associated switch port. | |
fc4099f1 PS |
1300 | * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb); |
1301 | * This function is used to get egress tunnel information for given skb. | |
1302 | * This is useful for retrieving outer tunnel header parameters while | |
1303 | * sampling packet. | |
871b642a PA |
1304 | * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom); |
1305 | * This function is used to specify the headroom that the skb must | |
1306 | * consider when allocation skb during packet reception. Setting | |
1307 | * appropriate rx headroom value allows avoiding skb head copy on | |
5e82b4b2 | 1308 | * forward. Setting a negative value resets the rx headroom to the |
871b642a | 1309 | * default value. |
f4e63525 | 1310 | * int (*ndo_bpf)(struct net_device *dev, struct netdev_bpf *bpf); |
a7862b45 | 1311 | * This function is used to set or query state related to XDP on the |
f4e63525 JK |
1312 | * netdevice and manage BPF offload. See definition of |
1313 | * enum bpf_netdev_command for details. | |
42b33468 JDB |
1314 | * int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp, |
1315 | * u32 flags); | |
735fc405 JDB |
1316 | * This function is used to submit @n XDP packets for transmit on a |
1317 | * netdevice. Returns number of frames successfully transmitted, frames | |
1318 | * that got dropped are freed/returned via xdp_return_frame(). | |
1319 | * Returns negative number, means general error invoking ndo, meaning | |
1320 | * no frames were xmit'ed and core-caller will free all frames. | |
9116e5e2 MK |
1321 | * int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags); |
1322 | * This function is used to wake up the softirq, ksoftirqd or kthread | |
1323 | * responsible for sending and/or receiving packets on a specific | |
1324 | * queue id bound to an AF_XDP socket. The flags field specifies if | |
1325 | * only RX, only Tx, or both should be woken up using the flags | |
1326 | * XDP_WAKEUP_RX and XDP_WAKEUP_TX. | |
5dc37bb9 JP |
1327 | * struct devlink_port *(*ndo_get_devlink_port)(struct net_device *dev); |
1328 | * Get devlink port instance associated with a given netdev. | |
b473b0d2 JK |
1329 | * Called with a reference on the netdevice and devlink locks only, |
1330 | * rtnl_lock is not held. | |
607259a6 CH |
1331 | * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm *p, |
1332 | * int cmd); | |
1333 | * Add, change, delete or get information on an IPv4 tunnel. | |
9aa1206e DB |
1334 | * struct net_device *(*ndo_get_peer_dev)(struct net_device *dev); |
1335 | * If a device is paired with a peer device, return the peer instance. | |
1336 | * The caller must be under RCU read context. | |
ddb94eaf PNA |
1337 | * int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, struct net_device_path *path); |
1338 | * Get the forwarding path to reach the real device from the HW destination address | |
d314774c SH |
1339 | */ |
1340 | struct net_device_ops { | |
1341 | int (*ndo_init)(struct net_device *dev); | |
1342 | void (*ndo_uninit)(struct net_device *dev); | |
1343 | int (*ndo_open)(struct net_device *dev); | |
1344 | int (*ndo_stop)(struct net_device *dev); | |
cdba756f ED |
1345 | netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, |
1346 | struct net_device *dev); | |
1347 | netdev_features_t (*ndo_features_check)(struct sk_buff *skb, | |
1348 | struct net_device *dev, | |
1349 | netdev_features_t features); | |
00829823 | 1350 | u16 (*ndo_select_queue)(struct net_device *dev, |
f663dd9a | 1351 | struct sk_buff *skb, |
a350ecce | 1352 | struct net_device *sb_dev); |
d314774c SH |
1353 | void (*ndo_change_rx_flags)(struct net_device *dev, |
1354 | int flags); | |
d314774c | 1355 | void (*ndo_set_rx_mode)(struct net_device *dev); |
d314774c SH |
1356 | int (*ndo_set_mac_address)(struct net_device *dev, |
1357 | void *addr); | |
d314774c | 1358 | int (*ndo_validate_addr)(struct net_device *dev); |
d314774c SH |
1359 | int (*ndo_do_ioctl)(struct net_device *dev, |
1360 | struct ifreq *ifr, int cmd); | |
d314774c SH |
1361 | int (*ndo_set_config)(struct net_device *dev, |
1362 | struct ifmap *map); | |
00829823 SH |
1363 | int (*ndo_change_mtu)(struct net_device *dev, |
1364 | int new_mtu); | |
1365 | int (*ndo_neigh_setup)(struct net_device *dev, | |
1366 | struct neigh_parms *); | |
0290bd29 MT |
1367 | void (*ndo_tx_timeout) (struct net_device *dev, |
1368 | unsigned int txqueue); | |
d314774c | 1369 | |
bc1f4470 | 1370 | void (*ndo_get_stats64)(struct net_device *dev, |
1371 | struct rtnl_link_stats64 *storage); | |
3df5b3c6 | 1372 | bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id); |
2c9d85d4 NF |
1373 | int (*ndo_get_offload_stats)(int attr_id, |
1374 | const struct net_device *dev, | |
1375 | void *attr_data); | |
d314774c SH |
1376 | struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); |
1377 | ||
8e586137 | 1378 | int (*ndo_vlan_rx_add_vid)(struct net_device *dev, |
80d5c368 | 1379 | __be16 proto, u16 vid); |
8e586137 | 1380 | int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, |
80d5c368 | 1381 | __be16 proto, u16 vid); |
d314774c | 1382 | #ifdef CONFIG_NET_POLL_CONTROLLER |
d314774c | 1383 | void (*ndo_poll_controller)(struct net_device *dev); |
4247e161 | 1384 | int (*ndo_netpoll_setup)(struct net_device *dev, |
a8779ec1 | 1385 | struct netpoll_info *info); |
0e34e931 | 1386 | void (*ndo_netpoll_cleanup)(struct net_device *dev); |
d314774c | 1387 | #endif |
95c26df8 WM |
1388 | int (*ndo_set_vf_mac)(struct net_device *dev, |
1389 | int queue, u8 *mac); | |
1390 | int (*ndo_set_vf_vlan)(struct net_device *dev, | |
79aab093 MS |
1391 | int queue, u16 vlan, |
1392 | u8 qos, __be16 proto); | |
ed616689 SC |
1393 | int (*ndo_set_vf_rate)(struct net_device *dev, |
1394 | int vf, int min_tx_rate, | |
1395 | int max_tx_rate); | |
5f8444a3 GR |
1396 | int (*ndo_set_vf_spoofchk)(struct net_device *dev, |
1397 | int vf, bool setting); | |
dd461d6a HS |
1398 | int (*ndo_set_vf_trust)(struct net_device *dev, |
1399 | int vf, bool setting); | |
95c26df8 WM |
1400 | int (*ndo_get_vf_config)(struct net_device *dev, |
1401 | int vf, | |
1402 | struct ifla_vf_info *ivf); | |
1d8faf48 RE |
1403 | int (*ndo_set_vf_link_state)(struct net_device *dev, |
1404 | int vf, int link_state); | |
3b766cd8 EBE |
1405 | int (*ndo_get_vf_stats)(struct net_device *dev, |
1406 | int vf, | |
1407 | struct ifla_vf_stats | |
1408 | *vf_stats); | |
57b61080 SF |
1409 | int (*ndo_set_vf_port)(struct net_device *dev, |
1410 | int vf, | |
1411 | struct nlattr *port[]); | |
1412 | int (*ndo_get_vf_port)(struct net_device *dev, | |
1413 | int vf, struct sk_buff *skb); | |
30aad417 DG |
1414 | int (*ndo_get_vf_guid)(struct net_device *dev, |
1415 | int vf, | |
1416 | struct ifla_vf_guid *node_guid, | |
1417 | struct ifla_vf_guid *port_guid); | |
cc8e27cc EC |
1418 | int (*ndo_set_vf_guid)(struct net_device *dev, |
1419 | int vf, u64 guid, | |
1420 | int guid_type); | |
01a3d796 VZ |
1421 | int (*ndo_set_vf_rss_query_en)( |
1422 | struct net_device *dev, | |
1423 | int vf, bool setting); | |
16e5cc64 | 1424 | int (*ndo_setup_tc)(struct net_device *dev, |
2572ac53 | 1425 | enum tc_setup_type type, |
de4784ca | 1426 | void *type_data); |
d11ead75 | 1427 | #if IS_ENABLED(CONFIG_FCOE) |
cb454399 YZ |
1428 | int (*ndo_fcoe_enable)(struct net_device *dev); |
1429 | int (*ndo_fcoe_disable)(struct net_device *dev); | |
4d288d57 YZ |
1430 | int (*ndo_fcoe_ddp_setup)(struct net_device *dev, |
1431 | u16 xid, | |
1432 | struct scatterlist *sgl, | |
1433 | unsigned int sgc); | |
1434 | int (*ndo_fcoe_ddp_done)(struct net_device *dev, | |
1435 | u16 xid); | |
6247e086 YZ |
1436 | int (*ndo_fcoe_ddp_target)(struct net_device *dev, |
1437 | u16 xid, | |
1438 | struct scatterlist *sgl, | |
1439 | unsigned int sgc); | |
68bad94e NP |
1440 | int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, |
1441 | struct netdev_fcoe_hbainfo *hbainfo); | |
3c9c36bc BPG |
1442 | #endif |
1443 | ||
d11ead75 | 1444 | #if IS_ENABLED(CONFIG_LIBFCOE) |
df5c7945 YZ |
1445 | #define NETDEV_FCOE_WWNN 0 |
1446 | #define NETDEV_FCOE_WWPN 1 | |
1447 | int (*ndo_fcoe_get_wwn)(struct net_device *dev, | |
1448 | u64 *wwn, int type); | |
4d288d57 | 1449 | #endif |
3c9c36bc | 1450 | |
c445477d BH |
1451 | #ifdef CONFIG_RFS_ACCEL |
1452 | int (*ndo_rx_flow_steer)(struct net_device *dev, | |
1453 | const struct sk_buff *skb, | |
1454 | u16 rxq_index, | |
1455 | u32 flow_id); | |
1456 | #endif | |
fbaec0ea | 1457 | int (*ndo_add_slave)(struct net_device *dev, |
33eaf2a6 DA |
1458 | struct net_device *slave_dev, |
1459 | struct netlink_ext_ack *extack); | |
fbaec0ea JP |
1460 | int (*ndo_del_slave)(struct net_device *dev, |
1461 | struct net_device *slave_dev); | |
cff9f12b MG |
1462 | struct net_device* (*ndo_get_xmit_slave)(struct net_device *dev, |
1463 | struct sk_buff *skb, | |
1464 | bool all_slaves); | |
719a402c TT |
1465 | struct net_device* (*ndo_sk_get_lower_dev)(struct net_device *dev, |
1466 | struct sock *sk); | |
c8f44aff MM |
1467 | netdev_features_t (*ndo_fix_features)(struct net_device *dev, |
1468 | netdev_features_t features); | |
5455c699 | 1469 | int (*ndo_set_features)(struct net_device *dev, |
c8f44aff | 1470 | netdev_features_t features); |
503eebc2 JP |
1471 | int (*ndo_neigh_construct)(struct net_device *dev, |
1472 | struct neighbour *n); | |
1473 | void (*ndo_neigh_destroy)(struct net_device *dev, | |
1474 | struct neighbour *n); | |
77162022 JF |
1475 | |
1476 | int (*ndo_fdb_add)(struct ndmsg *ndm, | |
edc7d573 | 1477 | struct nlattr *tb[], |
77162022 | 1478 | struct net_device *dev, |
6b6e2725 | 1479 | const unsigned char *addr, |
f6f6424b | 1480 | u16 vid, |
87b0984e PM |
1481 | u16 flags, |
1482 | struct netlink_ext_ack *extack); | |
77162022 | 1483 | int (*ndo_fdb_del)(struct ndmsg *ndm, |
1690be63 | 1484 | struct nlattr *tb[], |
77162022 | 1485 | struct net_device *dev, |
f6f6424b JP |
1486 | const unsigned char *addr, |
1487 | u16 vid); | |
77162022 JF |
1488 | int (*ndo_fdb_dump)(struct sk_buff *skb, |
1489 | struct netlink_callback *cb, | |
1490 | struct net_device *dev, | |
5d5eacb3 | 1491 | struct net_device *filter_dev, |
d297653d | 1492 | int *idx); |
5b2f94b2 RP |
1493 | int (*ndo_fdb_get)(struct sk_buff *skb, |
1494 | struct nlattr *tb[], | |
1495 | struct net_device *dev, | |
1496 | const unsigned char *addr, | |
1497 | u16 vid, u32 portid, u32 seq, | |
1498 | struct netlink_ext_ack *extack); | |
e5a55a89 | 1499 | int (*ndo_bridge_setlink)(struct net_device *dev, |
add511b3 | 1500 | struct nlmsghdr *nlh, |
2fd527b7 PM |
1501 | u16 flags, |
1502 | struct netlink_ext_ack *extack); | |
e5a55a89 JF |
1503 | int (*ndo_bridge_getlink)(struct sk_buff *skb, |
1504 | u32 pid, u32 seq, | |
6cbdceeb | 1505 | struct net_device *dev, |
46c264da ND |
1506 | u32 filter_mask, |
1507 | int nlflags); | |
407af329 | 1508 | int (*ndo_bridge_dellink)(struct net_device *dev, |
add511b3 RP |
1509 | struct nlmsghdr *nlh, |
1510 | u16 flags); | |
4bf84c35 JP |
1511 | int (*ndo_change_carrier)(struct net_device *dev, |
1512 | bool new_carrier); | |
66b52b0d | 1513 | int (*ndo_get_phys_port_id)(struct net_device *dev, |
02637fce | 1514 | struct netdev_phys_item_id *ppid); |
d6abc596 FF |
1515 | int (*ndo_get_port_parent_id)(struct net_device *dev, |
1516 | struct netdev_phys_item_id *ppid); | |
db24a904 DA |
1517 | int (*ndo_get_phys_port_name)(struct net_device *dev, |
1518 | char *name, size_t len); | |
a6cc0cfa JF |
1519 | void* (*ndo_dfwd_add_station)(struct net_device *pdev, |
1520 | struct net_device *dev); | |
1521 | void (*ndo_dfwd_del_station)(struct net_device *pdev, | |
1522 | void *priv); | |
1523 | ||
822b3b2e JF |
1524 | int (*ndo_set_tx_maxrate)(struct net_device *dev, |
1525 | int queue_index, | |
1526 | u32 maxrate); | |
a54acb3a | 1527 | int (*ndo_get_iflink)(const struct net_device *dev); |
d746d707 AK |
1528 | int (*ndo_change_proto_down)(struct net_device *dev, |
1529 | bool proto_down); | |
fc4099f1 PS |
1530 | int (*ndo_fill_metadata_dst)(struct net_device *dev, |
1531 | struct sk_buff *skb); | |
871b642a PA |
1532 | void (*ndo_set_rx_headroom)(struct net_device *dev, |
1533 | int needed_headroom); | |
f4e63525 JK |
1534 | int (*ndo_bpf)(struct net_device *dev, |
1535 | struct netdev_bpf *bpf); | |
735fc405 | 1536 | int (*ndo_xdp_xmit)(struct net_device *dev, int n, |
42b33468 JDB |
1537 | struct xdp_frame **xdp, |
1538 | u32 flags); | |
9116e5e2 MK |
1539 | int (*ndo_xsk_wakeup)(struct net_device *dev, |
1540 | u32 queue_id, u32 flags); | |
5dc37bb9 | 1541 | struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev); |
607259a6 CH |
1542 | int (*ndo_tunnel_ctl)(struct net_device *dev, |
1543 | struct ip_tunnel_parm *p, int cmd); | |
9aa1206e | 1544 | struct net_device * (*ndo_get_peer_dev)(struct net_device *dev); |
ddb94eaf PNA |
1545 | int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, |
1546 | struct net_device_path *path); | |
d314774c SH |
1547 | }; |
1548 | ||
7aa98047 | 1549 | /** |
270f3385 | 1550 | * enum netdev_priv_flags - &struct net_device priv_flags |
7aa98047 LR |
1551 | * |
1552 | * These are the &struct net_device, they are only set internally | |
1553 | * by drivers and used in the kernel. These flags are invisible to | |
5e82b4b2 | 1554 | * userspace; this means that the order of these flags can change |
7aa98047 LR |
1555 | * during any kernel release. |
1556 | * | |
1557 | * You should have a pretty good reason to be extending these flags. | |
1558 | * | |
1559 | * @IFF_802_1Q_VLAN: 802.1Q VLAN device | |
1560 | * @IFF_EBRIDGE: Ethernet bridging device | |
7aa98047 | 1561 | * @IFF_BONDING: bonding master or slave |
7aa98047 | 1562 | * @IFF_ISATAP: ISATAP interface (RFC4214) |
7aa98047 LR |
1563 | * @IFF_WAN_HDLC: WAN HDLC device |
1564 | * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to | |
1565 | * release skb->dst | |
1566 | * @IFF_DONT_BRIDGE: disallow bridging this ether dev | |
1567 | * @IFF_DISABLE_NETPOLL: disable netpoll at run-time | |
1568 | * @IFF_MACVLAN_PORT: device used as macvlan port | |
1569 | * @IFF_BRIDGE_PORT: device used as bridge port | |
1570 | * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port | |
1571 | * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit | |
1572 | * @IFF_UNICAST_FLT: Supports unicast filtering | |
1573 | * @IFF_TEAM_PORT: device used as team port | |
1574 | * @IFF_SUPP_NOFCS: device supports sending custom FCS | |
1575 | * @IFF_LIVE_ADDR_CHANGE: device supports hardware address | |
1576 | * change when it's running | |
1577 | * @IFF_MACVLAN: Macvlan device | |
6d0e24cd LB |
1578 | * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account |
1579 | * underlying stacked devices | |
007979ea | 1580 | * @IFF_L3MDEV_MASTER: device is an L3 master device |
fa8187c9 | 1581 | * @IFF_NO_QUEUE: device can run without qdisc attached |
35d4e172 | 1582 | * @IFF_OPENVSWITCH: device is a Open vSwitch master |
fee6d4c7 | 1583 | * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device |
c981e421 | 1584 | * @IFF_TEAM: device is a team device |
d4ab4286 | 1585 | * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured |
871b642a PA |
1586 | * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external |
1587 | * entity (i.e. the master device for bridged veth) | |
3c175784 | 1588 | * @IFF_MACSEC: device is a MACsec device |
f5426250 | 1589 | * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook |
30c8bd5a SS |
1590 | * @IFF_FAILOVER: device is a failover master device |
1591 | * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device | |
d5256083 | 1592 | * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device |
8065a779 | 1593 | * @IFF_LIVE_RENAME_OK: rename is allowed while device is up and running |
c2ff53d8 XZ |
1594 | * @IFF_TX_SKB_NO_LINEAR: device/driver is capable of xmitting frames with |
1595 | * skb_headlen(skb) == 0 (data starts from frag0) | |
7aa98047 LR |
1596 | */ |
1597 | enum netdev_priv_flags { | |
1598 | IFF_802_1Q_VLAN = 1<<0, | |
1599 | IFF_EBRIDGE = 1<<1, | |
0dc1549b JP |
1600 | IFF_BONDING = 1<<2, |
1601 | IFF_ISATAP = 1<<3, | |
1602 | IFF_WAN_HDLC = 1<<4, | |
1603 | IFF_XMIT_DST_RELEASE = 1<<5, | |
1604 | IFF_DONT_BRIDGE = 1<<6, | |
1605 | IFF_DISABLE_NETPOLL = 1<<7, | |
1606 | IFF_MACVLAN_PORT = 1<<8, | |
1607 | IFF_BRIDGE_PORT = 1<<9, | |
1608 | IFF_OVS_DATAPATH = 1<<10, | |
1609 | IFF_TX_SKB_SHARING = 1<<11, | |
1610 | IFF_UNICAST_FLT = 1<<12, | |
1611 | IFF_TEAM_PORT = 1<<13, | |
1612 | IFF_SUPP_NOFCS = 1<<14, | |
1613 | IFF_LIVE_ADDR_CHANGE = 1<<15, | |
1614 | IFF_MACVLAN = 1<<16, | |
1615 | IFF_XMIT_DST_RELEASE_PERM = 1<<17, | |
1ec54cb4 PA |
1616 | IFF_L3MDEV_MASTER = 1<<18, |
1617 | IFF_NO_QUEUE = 1<<19, | |
1618 | IFF_OPENVSWITCH = 1<<20, | |
1619 | IFF_L3MDEV_SLAVE = 1<<21, | |
1620 | IFF_TEAM = 1<<22, | |
1621 | IFF_RXFH_CONFIGURED = 1<<23, | |
1622 | IFF_PHONY_HEADROOM = 1<<24, | |
1623 | IFF_MACSEC = 1<<25, | |
f5426250 | 1624 | IFF_NO_RX_HANDLER = 1<<26, |
30c8bd5a SS |
1625 | IFF_FAILOVER = 1<<27, |
1626 | IFF_FAILOVER_SLAVE = 1<<28, | |
d5256083 | 1627 | IFF_L3MDEV_RX_HANDLER = 1<<29, |
8065a779 | 1628 | IFF_LIVE_RENAME_OK = 1<<30, |
c2ff53d8 | 1629 | IFF_TX_SKB_NO_LINEAR = 1<<31, |
7aa98047 LR |
1630 | }; |
1631 | ||
1632 | #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN | |
1633 | #define IFF_EBRIDGE IFF_EBRIDGE | |
7aa98047 | 1634 | #define IFF_BONDING IFF_BONDING |
7aa98047 | 1635 | #define IFF_ISATAP IFF_ISATAP |
7aa98047 LR |
1636 | #define IFF_WAN_HDLC IFF_WAN_HDLC |
1637 | #define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE | |
1638 | #define IFF_DONT_BRIDGE IFF_DONT_BRIDGE | |
1639 | #define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL | |
1640 | #define IFF_MACVLAN_PORT IFF_MACVLAN_PORT | |
1641 | #define IFF_BRIDGE_PORT IFF_BRIDGE_PORT | |
1642 | #define IFF_OVS_DATAPATH IFF_OVS_DATAPATH | |
1643 | #define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING | |
1644 | #define IFF_UNICAST_FLT IFF_UNICAST_FLT | |
1645 | #define IFF_TEAM_PORT IFF_TEAM_PORT | |
1646 | #define IFF_SUPP_NOFCS IFF_SUPP_NOFCS | |
1647 | #define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE | |
1648 | #define IFF_MACVLAN IFF_MACVLAN | |
02875878 | 1649 | #define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM |
007979ea | 1650 | #define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER |
fa8187c9 | 1651 | #define IFF_NO_QUEUE IFF_NO_QUEUE |
35d4e172 | 1652 | #define IFF_OPENVSWITCH IFF_OPENVSWITCH |
8f25348b | 1653 | #define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE |
c981e421 | 1654 | #define IFF_TEAM IFF_TEAM |
d4ab4286 | 1655 | #define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED |
2463e073 | 1656 | #define IFF_PHONY_HEADROOM IFF_PHONY_HEADROOM |
3c175784 | 1657 | #define IFF_MACSEC IFF_MACSEC |
f5426250 | 1658 | #define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER |
30c8bd5a SS |
1659 | #define IFF_FAILOVER IFF_FAILOVER |
1660 | #define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE | |
d5256083 | 1661 | #define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER |
8065a779 | 1662 | #define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK |
c2ff53d8 | 1663 | #define IFF_TX_SKB_NO_LINEAR IFF_TX_SKB_NO_LINEAR |
7aa98047 | 1664 | |
4e096a18 OR |
1665 | /* Specifies the type of the struct net_device::ml_priv pointer */ |
1666 | enum netdev_ml_priv_type { | |
1667 | ML_PRIV_NONE, | |
1668 | ML_PRIV_CAN, | |
1669 | }; | |
1670 | ||
536721b1 KK |
1671 | /** |
1672 | * struct net_device - The DEVICE structure. | |
d651983d MCC |
1673 | * |
1674 | * Actually, this whole structure is a big mistake. It mixes I/O | |
1675 | * data with strictly "high-level" data, and it has to know about | |
1676 | * almost every data structure used in the INET module. | |
536721b1 KK |
1677 | * |
1678 | * @name: This is the first field of the "visible" part of this structure | |
1679 | * (i.e. as seen by users in the "Space.c" file). It is the name | |
d651983d | 1680 | * of the interface. |
536721b1 | 1681 | * |
ff927412 | 1682 | * @name_node: Name hashlist node |
536721b1 KK |
1683 | * @ifalias: SNMP alias |
1684 | * @mem_end: Shared memory end | |
1685 | * @mem_start: Shared memory start | |
1686 | * @base_addr: Device I/O address | |
1687 | * @irq: Device IRQ number | |
1688 | * | |
1689 | * @state: Generic network queuing layer state, see netdev_state_t | |
1690 | * @dev_list: The global list of network devices | |
5e82b4b2 BH |
1691 | * @napi_list: List entry used for polling NAPI devices |
1692 | * @unreg_list: List entry when we are unregistering the | |
1693 | * device; see the function unregister_netdev | |
1694 | * @close_list: List entry used when we are closing the device | |
62d885fe BP |
1695 | * @ptype_all: Device-specific packet handlers for all protocols |
1696 | * @ptype_specific: Device-specific, protocol-specific packet handlers | |
536721b1 KK |
1697 | * |
1698 | * @adj_list: Directly linked devices, like slaves for bonding | |
536721b1 KK |
1699 | * @features: Currently active device features |
1700 | * @hw_features: User-changeable features | |
1701 | * | |
1702 | * @wanted_features: User-requested features | |
1703 | * @vlan_features: Mask of features inheritable by VLAN devices | |
1704 | * | |
1705 | * @hw_enc_features: Mask of features inherited by encapsulating devices | |
1706 | * This field indicates what encapsulation | |
1707 | * offloads the hardware is capable of doing, | |
1708 | * and drivers will need to set them appropriately. | |
1709 | * | |
1710 | * @mpls_features: Mask of features inheritable by MPLS | |
a1fa83bd | 1711 | * @gso_partial_features: value(s) from NETIF_F_GSO\* |
536721b1 KK |
1712 | * |
1713 | * @ifindex: interface index | |
5e82b4b2 | 1714 | * @group: The group the device belongs to |
536721b1 KK |
1715 | * |
1716 | * @stats: Statistics struct, which was left as a legacy, use | |
1717 | * rtnl_link_stats64 instead | |
1718 | * | |
1719 | * @rx_dropped: Dropped packets by core network, | |
1720 | * do not use this in drivers | |
1721 | * @tx_dropped: Dropped packets by core network, | |
1722 | * do not use this in drivers | |
6e7333d3 JW |
1723 | * @rx_nohandler: nohandler dropped packets by core network on |
1724 | * inactive devices, do not use this in drivers | |
9e55e5d3 FF |
1725 | * @carrier_up_count: Number of times the carrier has been up |
1726 | * @carrier_down_count: Number of times the carrier has been down | |
536721b1 | 1727 | * |
536721b1 KK |
1728 | * @wireless_handlers: List of functions to handle Wireless Extensions, |
1729 | * instead of ioctl, | |
1730 | * see <net/iw_handler.h> for details. | |
1731 | * @wireless_data: Instance data managed by the core of wireless extensions | |
1732 | * | |
1733 | * @netdev_ops: Includes several pointers to callbacks, | |
1734 | * if one wants to override the ndo_*() functions | |
1735 | * @ethtool_ops: Management operations | |
a1fa83bd | 1736 | * @l3mdev_ops: Layer 3 master device operations |
f997c55c AA |
1737 | * @ndisc_ops: Includes callbacks for different IPv6 neighbour |
1738 | * discovery handling. Necessary for e.g. 6LoWPAN. | |
a1fa83bd RD |
1739 | * @xfrmdev_ops: Transformation offload operations |
1740 | * @tlsdev_ops: Transport Layer Security offload operations | |
d476059e | 1741 | * @header_ops: Includes callbacks for creating,parsing,caching,etc |
536721b1 KK |
1742 | * of Layer 2 headers. |
1743 | * | |
1744 | * @flags: Interface flags (a la BSD) | |
1745 | * @priv_flags: Like 'flags' but invisible to userspace, | |
1746 | * see if.h for the definitions | |
1747 | * @gflags: Global flags ( kept as legacy ) | |
1748 | * @padded: How much padding added by alloc_netdev() | |
1749 | * @operstate: RFC2863 operstate | |
1750 | * @link_mode: Mapping policy to operstate | |
1751 | * @if_port: Selectable AUI, TP, ... | |
1752 | * @dma: DMA channel | |
1753 | * @mtu: Interface MTU value | |
61e84623 JW |
1754 | * @min_mtu: Interface Minimum MTU value |
1755 | * @max_mtu: Interface Maximum MTU value | |
536721b1 | 1756 | * @type: Interface hardware type |
2793a23a | 1757 | * @hard_header_len: Maximum hardware header length. |
217e6fa2 | 1758 | * @min_header_len: Minimum hardware header length |
536721b1 KK |
1759 | * |
1760 | * @needed_headroom: Extra headroom the hardware may need, but not in all | |
1761 | * cases can this be guaranteed | |
1762 | * @needed_tailroom: Extra tailroom the hardware may need, but not in all | |
1763 | * cases can this be guaranteed. Some cases also use | |
1764 | * LL_MAX_HEADER instead to allocate the skb | |
1765 | * | |
1766 | * interface address info: | |
1767 | * | |
1768 | * @perm_addr: Permanent hw address | |
1769 | * @addr_assign_type: Hw address assignment type | |
1770 | * @addr_len: Hardware address length | |
5343da4c TY |
1771 | * @upper_level: Maximum depth level of upper devices. |
1772 | * @lower_level: Maximum depth level of lower devices. | |
8626a0c8 | 1773 | * @neigh_priv_len: Used in neigh_alloc() |
536721b1 KK |
1774 | * @dev_id: Used to differentiate devices that share |
1775 | * the same link layer address | |
1776 | * @dev_port: Used to differentiate devices that share | |
1777 | * the same function | |
1778 | * @addr_list_lock: XXX: need comments on this one | |
a1fa83bd | 1779 | * @name_assign_type: network interface name assignment type |
5e82b4b2 | 1780 | * @uc_promisc: Counter that indicates promiscuous mode |
536721b1 KK |
1781 | * has been enabled due to the need to listen to |
1782 | * additional unicast addresses in a device that | |
1783 | * does not implement ndo_set_rx_mode() | |
14ffbbb8 TG |
1784 | * @uc: unicast mac addresses |
1785 | * @mc: multicast mac addresses | |
1786 | * @dev_addrs: list of device hw addresses | |
1787 | * @queues_kset: Group of all Kobjects in the Tx and RX queues | |
5e82b4b2 BH |
1788 | * @promiscuity: Number of times the NIC is told to work in |
1789 | * promiscuous mode; if it becomes 0 the NIC will | |
1790 | * exit promiscuous mode | |
536721b1 KK |
1791 | * @allmulti: Counter, enables or disables allmulticast mode |
1792 | * | |
1793 | * @vlan_info: VLAN info | |
1794 | * @dsa_ptr: dsa specific data | |
1795 | * @tipc_ptr: TIPC specific data | |
1796 | * @atalk_ptr: AppleTalk link | |
1797 | * @ip_ptr: IPv4 specific data | |
1798 | * @dn_ptr: DECnet specific data | |
1799 | * @ip6_ptr: IPv6 specific data | |
1800 | * @ax25_ptr: AX.25 specific data | |
1801 | * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering | |
a1fa83bd RD |
1802 | * @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network |
1803 | * device struct | |
1804 | * @mpls_ptr: mpls_dev struct pointer | |
536721b1 | 1805 | * |
536721b1 KK |
1806 | * @dev_addr: Hw address (before bcast, |
1807 | * because most packets are unicast) | |
1808 | * | |
1809 | * @_rx: Array of RX queues | |
1810 | * @num_rx_queues: Number of RX queues | |
1811 | * allocated at register_netdev() time | |
1812 | * @real_num_rx_queues: Number of RX queues currently active in device | |
a1fa83bd RD |
1813 | * @xdp_prog: XDP sockets filter program pointer |
1814 | * @gro_flush_timeout: timeout for GRO layer in NAPI | |
5c45a918 MCC |
1815 | * @napi_defer_hard_irqs: If not zero, provides a counter that would |
1816 | * allow to avoid NIC hard IRQ, on busy queues. | |
536721b1 KK |
1817 | * |
1818 | * @rx_handler: handler for received packets | |
1819 | * @rx_handler_data: XXX: need comments on this one | |
46209401 JP |
1820 | * @miniq_ingress: ingress/clsact qdisc specific data for |
1821 | * ingress processing | |
536721b1 | 1822 | * @ingress_queue: XXX: need comments on this one |
2f5e70c8 | 1823 | * @nf_hooks_ingress: netfilter hooks executed for ingress packets |
536721b1 KK |
1824 | * @broadcast: hw bcast address |
1825 | * | |
14ffbbb8 TG |
1826 | * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts, |
1827 | * indexed by RX queue number. Assigned by driver. | |
1828 | * This must only be set if the ndo_rx_flow_steer | |
1829 | * operation is defined | |
1830 | * @index_hlist: Device index hash chain | |
1831 | * | |
536721b1 KK |
1832 | * @_tx: Array of TX queues |
1833 | * @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time | |
1834 | * @real_num_tx_queues: Number of TX queues currently active in device | |
1835 | * @qdisc: Root qdisc from userspace point of view | |
1836 | * @tx_queue_len: Max frames per queue allowed | |
1837 | * @tx_global_lock: XXX: need comments on this one | |
a1fa83bd | 1838 | * @xdp_bulkq: XDP device bulk queue |
044ab86d | 1839 | * @xps_maps: all CPUs/RXQs maps for XPS device |
536721b1 KK |
1840 | * |
1841 | * @xps_maps: XXX: need comments on this one | |
46209401 JP |
1842 | * @miniq_egress: clsact qdisc specific data for |
1843 | * egress processing | |
a1fa83bd | 1844 | * @qdisc_hash: qdisc hash table |
536721b1 | 1845 | * @watchdog_timeo: Represents the timeout that is used by |
5e82b4b2 | 1846 | * the watchdog (see dev_watchdog()) |
536721b1 KK |
1847 | * @watchdog_timer: List of timers |
1848 | * | |
eb02d39a | 1849 | * @proto_down_reason: reason a netdev interface is held down |
536721b1 | 1850 | * @pcpu_refcnt: Number of references to this device |
add2d736 | 1851 | * @dev_refcnt: Number of references to this device |
536721b1 | 1852 | * @todo_list: Delayed register/unregister |
536721b1 KK |
1853 | * @link_watch_list: XXX: need comments on this one |
1854 | * | |
1855 | * @reg_state: Register/unregister state machine | |
1856 | * @dismantle: Device is going to be freed | |
1857 | * @rtnl_link_state: This enum represents the phases of creating | |
1858 | * a new link | |
1859 | * | |
cf124db5 DM |
1860 | * @needs_free_netdev: Should unregister perform free_netdev? |
1861 | * @priv_destructor: Called from unregister | |
536721b1 KK |
1862 | * @npinfo: XXX: need comments on this one |
1863 | * @nd_net: Network namespace this network device is inside | |
1864 | * | |
1865 | * @ml_priv: Mid-layer private | |
4e096a18 | 1866 | * @ml_priv_type: Mid-layer private type |
536721b1 KK |
1867 | * @lstats: Loopback statistics |
1868 | * @tstats: Tunnel statistics | |
1869 | * @dstats: Dummy statistics | |
1870 | * @vstats: Virtual ethernet statistics | |
1871 | * | |
1872 | * @garp_port: GARP | |
1873 | * @mrp_port: MRP | |
1874 | * | |
1875 | * @dev: Class/net/name entry | |
1876 | * @sysfs_groups: Space for optional device, statistics and wireless | |
1877 | * sysfs groups | |
1878 | * | |
1879 | * @sysfs_rx_queue_group: Space for optional per-rx queue attributes | |
1880 | * @rtnl_link_ops: Rtnl_link_ops | |
1881 | * | |
1882 | * @gso_max_size: Maximum size of generic segmentation offload | |
1883 | * @gso_max_segs: Maximum number of segments that can be passed to the | |
1884 | * NIC for GSO | |
1885 | * | |
1886 | * @dcbnl_ops: Data Center Bridging netlink ops | |
1887 | * @num_tc: Number of traffic classes in the net device | |
1888 | * @tc_to_txq: XXX: need comments on this one | |
920c1cd3 | 1889 | * @prio_tc_map: XXX: need comments on this one |
536721b1 KK |
1890 | * |
1891 | * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp | |
1892 | * | |
1893 | * @priomap: XXX: need comments on this one | |
1894 | * @phydev: Physical device may attach itself | |
1895 | * for hardware timestamping | |
e679c9c1 | 1896 | * @sfp_bus: attached &struct sfp_bus structure. |
1a33e10e | 1897 | * |
1a33e10e CW |
1898 | * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock |
1899 | * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount | |
536721b1 | 1900 | * |
d746d707 AK |
1901 | * @proto_down: protocol port state information can be sent to the |
1902 | * switch driver and used to set the phys state of the | |
1903 | * switch port. | |
1904 | * | |
61941143 HK |
1905 | * @wol_enabled: Wake-on-LAN is enabled |
1906 | * | |
29863d41 WW |
1907 | * @threaded: napi threaded mode is enabled |
1908 | * | |
93642e14 JP |
1909 | * @net_notifier_list: List of per-net netdev notifier block |
1910 | * that follow this device when it is moved | |
1911 | * to another network namespace. | |
1912 | * | |
30e9bb84 AT |
1913 | * @macsec_ops: MACsec offloading ops |
1914 | * | |
cc4e3835 JK |
1915 | * @udp_tunnel_nic_info: static structure describing the UDP tunnel |
1916 | * offload capabilities of the device | |
1917 | * @udp_tunnel_nic: UDP tunnel offload state | |
ffa59b0b | 1918 | * @xdp_state: stores info on attached XDP BPF programs |
cc4e3835 | 1919 | * |
a93bdcb9 MCC |
1920 | * @nested_level: Used as as a parameter of spin_lock_nested() of |
1921 | * dev->addr_list_lock. | |
1922 | * @unlink_list: As netif_addr_lock() can be called recursively, | |
1923 | * keep a list of interfaces to be deleted. | |
1924 | * | |
1da177e4 LT |
1925 | * FIXME: cleanup struct net_device such that network protocol info |
1926 | * moves out. | |
1927 | */ | |
1928 | ||
d94d9fee | 1929 | struct net_device { |
1da177e4 | 1930 | char name[IFNAMSIZ]; |
ff927412 | 1931 | struct netdev_name_node *name_node; |
6c557001 | 1932 | struct dev_ifalias __rcu *ifalias; |
1da177e4 LT |
1933 | /* |
1934 | * I/O specific fields | |
1935 | * FIXME: Merge these and struct ifmap into one | |
1936 | */ | |
536721b1 KK |
1937 | unsigned long mem_end; |
1938 | unsigned long mem_start; | |
1939 | unsigned long base_addr; | |
1da177e4 LT |
1940 | |
1941 | /* | |
536721b1 KK |
1942 | * Some hardware also needs these fields (state,dev_list, |
1943 | * napi_list,unreg_list,close_list) but they are not | |
1da177e4 LT |
1944 | * part of the usual set specified in Space.c. |
1945 | */ | |
1946 | ||
1da177e4 LT |
1947 | unsigned long state; |
1948 | ||
7562f876 | 1949 | struct list_head dev_list; |
bea3348e | 1950 | struct list_head napi_list; |
44a0873d | 1951 | struct list_head unreg_list; |
5cde2829 | 1952 | struct list_head close_list; |
7866a621 SN |
1953 | struct list_head ptype_all; |
1954 | struct list_head ptype_specific; | |
2f268f12 | 1955 | |
2f268f12 VF |
1956 | struct { |
1957 | struct list_head upper; | |
1958 | struct list_head lower; | |
1959 | } adj_list; | |
1960 | ||
28af22c6 JDB |
1961 | /* Read-mostly cache-line for fast-path access */ |
1962 | unsigned int flags; | |
1963 | unsigned int priv_flags; | |
1964 | const struct net_device_ops *netdev_ops; | |
1965 | int ifindex; | |
1966 | unsigned short gflags; | |
1967 | unsigned short hard_header_len; | |
1968 | ||
1969 | /* Note : dev->mtu is often read without holding a lock. | |
1970 | * Writers usually hold RTNL. | |
1971 | * It is recommended to use READ_ONCE() to annotate the reads, | |
1972 | * and to use WRITE_ONCE() to annotate the writes. | |
1973 | */ | |
1974 | unsigned int mtu; | |
1975 | unsigned short needed_headroom; | |
1976 | unsigned short needed_tailroom; | |
1977 | ||
c8f44aff | 1978 | netdev_features_t features; |
c8f44aff | 1979 | netdev_features_t hw_features; |
c8f44aff | 1980 | netdev_features_t wanted_features; |
c8f44aff | 1981 | netdev_features_t vlan_features; |
6a674e9c | 1982 | netdev_features_t hw_enc_features; |
0d89d203 | 1983 | netdev_features_t mpls_features; |
802ab55a | 1984 | netdev_features_t gso_partial_features; |
04ed3e74 | 1985 | |
28af22c6 JDB |
1986 | unsigned int min_mtu; |
1987 | unsigned int max_mtu; | |
1988 | unsigned short type; | |
1989 | unsigned char min_header_len; | |
1990 | unsigned char name_assign_type; | |
1991 | ||
7a66bbc9 | 1992 | int group; |
1da177e4 | 1993 | |
28af22c6 | 1994 | struct net_device_stats stats; /* not used by modern drivers */ |
015f0688 | 1995 | |
015f0688 ED |
1996 | atomic_long_t rx_dropped; |
1997 | atomic_long_t tx_dropped; | |
6e7333d3 | 1998 | atomic_long_t rx_nohandler; |
1da177e4 | 1999 | |
b2d3bcfa DD |
2000 | /* Stats to monitor link on/off, flapping */ |
2001 | atomic_t carrier_up_count; | |
2002 | atomic_t carrier_down_count; | |
2003 | ||
b86e0280 | 2004 | #ifdef CONFIG_WIRELESS_EXT |
5e82b4b2 BH |
2005 | const struct iw_handler_def *wireless_handlers; |
2006 | struct iw_public_data *wireless_data; | |
b86e0280 | 2007 | #endif |
76fd8593 | 2008 | const struct ethtool_ops *ethtool_ops; |
1b69c6d0 DA |
2009 | #ifdef CONFIG_NET_L3_MASTER_DEV |
2010 | const struct l3mdev_ops *l3mdev_ops; | |
2011 | #endif | |
f997c55c AA |
2012 | #if IS_ENABLED(CONFIG_IPV6) |
2013 | const struct ndisc_ops *ndisc_ops; | |
2014 | #endif | |
1da177e4 | 2015 | |
9cb0d21d | 2016 | #ifdef CONFIG_XFRM_OFFLOAD |
d77e38e6 SK |
2017 | const struct xfrmdev_ops *xfrmdev_ops; |
2018 | #endif | |
2019 | ||
a5c37c63 IL |
2020 | #if IS_ENABLED(CONFIG_TLS_DEVICE) |
2021 | const struct tlsdev_ops *tlsdev_ops; | |
2022 | #endif | |
2023 | ||
3b04ddde SH |
2024 | const struct header_ops *header_ops; |
2025 | ||
536721b1 KK |
2026 | unsigned char operstate; |
2027 | unsigned char link_mode; | |
b00055aa | 2028 | |
536721b1 KK |
2029 | unsigned char if_port; |
2030 | unsigned char dma; | |
bdc220da | 2031 | |
1da177e4 | 2032 | /* Interface address info. */ |
536721b1 KK |
2033 | unsigned char perm_addr[MAX_ADDR_LEN]; |
2034 | unsigned char addr_assign_type; | |
2035 | unsigned char addr_len; | |
5343da4c TY |
2036 | unsigned char upper_level; |
2037 | unsigned char lower_level; | |
1fc70edb | 2038 | |
a0a9663d | 2039 | unsigned short neigh_priv_len; |
536721b1 KK |
2040 | unsigned short dev_id; |
2041 | unsigned short dev_port; | |
28af22c6 JDB |
2042 | unsigned short padded; |
2043 | ||
ccffad25 | 2044 | spinlock_t addr_list_lock; |
28af22c6 | 2045 | int irq; |
1fc70edb | 2046 | |
536721b1 KK |
2047 | struct netdev_hw_addr_list uc; |
2048 | struct netdev_hw_addr_list mc; | |
2049 | struct netdev_hw_addr_list dev_addrs; | |
2050 | ||
4c3d5e7b ED |
2051 | #ifdef CONFIG_SYSFS |
2052 | struct kset *queues_kset; | |
1fc70edb TY |
2053 | #endif |
2054 | #ifdef CONFIG_LOCKDEP | |
2055 | struct list_head unlink_list; | |
4c3d5e7b | 2056 | #endif |
9d45abe1 WC |
2057 | unsigned int promiscuity; |
2058 | unsigned int allmulti; | |
1fc70edb TY |
2059 | bool uc_promisc; |
2060 | #ifdef CONFIG_LOCKDEP | |
2061 | unsigned char nested_level; | |
2062 | #endif | |
1da177e4 | 2063 | |
1da177e4 | 2064 | |
5e82b4b2 | 2065 | /* Protocol-specific pointers */ |
65ac6a5f | 2066 | |
d11ead75 | 2067 | #if IS_ENABLED(CONFIG_VLAN_8021Q) |
536721b1 | 2068 | struct vlan_info __rcu *vlan_info; |
65ac6a5f | 2069 | #endif |
34a430d7 | 2070 | #if IS_ENABLED(CONFIG_NET_DSA) |
2f657a60 | 2071 | struct dsa_port *dsa_ptr; |
37cb0620 YX |
2072 | #endif |
2073 | #if IS_ENABLED(CONFIG_TIPC) | |
536721b1 | 2074 | struct tipc_bearer __rcu *tipc_ptr; |
91da11f8 | 2075 | #endif |
89e58148 | 2076 | #if IS_ENABLED(CONFIG_IRDA) || IS_ENABLED(CONFIG_ATALK) |
536721b1 | 2077 | void *atalk_ptr; |
89e58148 | 2078 | #endif |
536721b1 | 2079 | struct in_device __rcu *ip_ptr; |
330c7272 | 2080 | #if IS_ENABLED(CONFIG_DECNET) |
536721b1 | 2081 | struct dn_dev __rcu *dn_ptr; |
330c7272 | 2082 | #endif |
536721b1 | 2083 | struct inet6_dev __rcu *ip6_ptr; |
19ff13f2 | 2084 | #if IS_ENABLED(CONFIG_AX25) |
536721b1 | 2085 | void *ax25_ptr; |
19ff13f2 | 2086 | #endif |
536721b1 | 2087 | struct wireless_dev *ieee80211_ptr; |
98a18b6f | 2088 | struct wpan_dev *ieee802154_ptr; |
03c57747 RS |
2089 | #if IS_ENABLED(CONFIG_MPLS_ROUTING) |
2090 | struct mpls_dev __rcu *mpls_ptr; | |
2091 | #endif | |
1da177e4 | 2092 | |
9356b8fc | 2093 | /* |
cd13539b | 2094 | * Cache lines mostly used on receive path (including eth_type_trans()) |
9356b8fc | 2095 | */ |
9356b8fc | 2096 | /* Interface address info used in eth_type_trans() */ |
536721b1 | 2097 | unsigned char *dev_addr; |
f001fde5 | 2098 | |
0a9627f2 | 2099 | struct netdev_rx_queue *_rx; |
0a9627f2 | 2100 | unsigned int num_rx_queues; |
62fe0b40 | 2101 | unsigned int real_num_rx_queues; |
0a9627f2 | 2102 | |
7acedaf5 | 2103 | struct bpf_prog __rcu *xdp_prog; |
3b47d303 | 2104 | unsigned long gro_flush_timeout; |
6f8b12d6 | 2105 | int napi_defer_hard_irqs; |
61391cde | 2106 | rx_handler_func_t __rcu *rx_handler; |
2107 | void __rcu *rx_handler_data; | |
e8a0464c | 2108 | |
4cda01e8 | 2109 | #ifdef CONFIG_NET_CLS_ACT |
46209401 | 2110 | struct mini_Qdisc __rcu *miniq_ingress; |
d2788d34 | 2111 | #endif |
24824a09 | 2112 | struct netdev_queue __rcu *ingress_queue; |
e687ad60 | 2113 | #ifdef CONFIG_NETFILTER_INGRESS |
960632ec | 2114 | struct nf_hook_entries __rcu *nf_hooks_ingress; |
e687ad60 | 2115 | #endif |
d2788d34 | 2116 | |
536721b1 | 2117 | unsigned char broadcast[MAX_ADDR_LEN]; |
14ffbbb8 TG |
2118 | #ifdef CONFIG_RFS_ACCEL |
2119 | struct cpu_rmap *rx_cpu_rmap; | |
2120 | #endif | |
2121 | struct hlist_node index_hlist; | |
cd13539b ED |
2122 | |
2123 | /* | |
2124 | * Cache lines mostly used on transmit path | |
2125 | */ | |
e8a0464c DM |
2126 | struct netdev_queue *_tx ____cacheline_aligned_in_smp; |
2127 | unsigned int num_tx_queues; | |
fd2ea0a7 | 2128 | unsigned int real_num_tx_queues; |
af356afa | 2129 | struct Qdisc *qdisc; |
0cd29503 | 2130 | unsigned int tx_queue_len; |
c3f26a26 | 2131 | spinlock_t tx_global_lock; |
75ccae62 THJ |
2132 | |
2133 | struct xdp_dev_bulk_queue __percpu *xdp_bulkq; | |
cd13539b | 2134 | |
bf264145 | 2135 | #ifdef CONFIG_XPS |
044ab86d | 2136 | struct xps_dev_maps __rcu *xps_maps[XPS_MAPS_MAX]; |
bf264145 | 2137 | #endif |
1f211a1b | 2138 | #ifdef CONFIG_NET_CLS_ACT |
46209401 | 2139 | struct mini_Qdisc __rcu *miniq_egress; |
1f211a1b | 2140 | #endif |
0c4f691f | 2141 | |
75ccae62 THJ |
2142 | #ifdef CONFIG_NET_SCHED |
2143 | DECLARE_HASHTABLE (qdisc_hash, 4); | |
2144 | #endif | |
9356b8fc | 2145 | /* These may be needed for future network-power-down code. */ |
9356b8fc | 2146 | struct timer_list watchdog_timer; |
75ccae62 | 2147 | int watchdog_timeo; |
9356b8fc | 2148 | |
829eb208 RP |
2149 | u32 proto_down_reason; |
2150 | ||
1da177e4 | 2151 | struct list_head todo_list; |
919067cc ED |
2152 | |
2153 | #ifdef CONFIG_PCPU_DEV_REFCNT | |
75ccae62 | 2154 | int __percpu *pcpu_refcnt; |
919067cc ED |
2155 | #else |
2156 | refcount_t dev_refcnt; | |
2157 | #endif | |
1da177e4 | 2158 | |
e014debe | 2159 | struct list_head link_watch_list; |
572a103d | 2160 | |
1da177e4 | 2161 | enum { NETREG_UNINITIALIZED=0, |
b17a7c17 | 2162 | NETREG_REGISTERED, /* completed register_netdevice */ |
1da177e4 LT |
2163 | NETREG_UNREGISTERING, /* called unregister_netdevice */ |
2164 | NETREG_UNREGISTERED, /* completed unregister todo */ | |
2165 | NETREG_RELEASED, /* called free_netdev */ | |
937f1ba5 | 2166 | NETREG_DUMMY, /* dummy device for NAPI poll */ |
449f4544 ED |
2167 | } reg_state:8; |
2168 | ||
536721b1 | 2169 | bool dismantle; |
a2835763 PM |
2170 | |
2171 | enum { | |
2172 | RTNL_LINK_INITIALIZED, | |
2173 | RTNL_LINK_INITIALIZING, | |
2174 | } rtnl_link_state:16; | |
1da177e4 | 2175 | |
cf124db5 DM |
2176 | bool needs_free_netdev; |
2177 | void (*priv_destructor)(struct net_device *dev); | |
1da177e4 | 2178 | |
1da177e4 | 2179 | #ifdef CONFIG_NETPOLL |
5fbee843 | 2180 | struct netpoll_info __rcu *npinfo; |
1da177e4 | 2181 | #endif |
eae792b7 | 2182 | |
0c5c9fb5 | 2183 | possible_net_t nd_net; |
4a1c5371 | 2184 | |
4951704b | 2185 | /* mid-layer private */ |
4e096a18 OR |
2186 | void *ml_priv; |
2187 | enum netdev_ml_priv_type ml_priv_type; | |
2188 | ||
a7855c78 | 2189 | union { |
536721b1 | 2190 | struct pcpu_lstats __percpu *lstats; |
8f84985f | 2191 | struct pcpu_sw_netstats __percpu *tstats; |
536721b1 | 2192 | struct pcpu_dstats __percpu *dstats; |
a7855c78 | 2193 | }; |
536721b1 | 2194 | |
fb585b44 | 2195 | #if IS_ENABLED(CONFIG_GARP) |
3cc77ec7 | 2196 | struct garp_port __rcu *garp_port; |
fb585b44 TK |
2197 | #endif |
2198 | #if IS_ENABLED(CONFIG_MRP) | |
febf018d | 2199 | struct mrp_port __rcu *mrp_port; |
fb585b44 | 2200 | #endif |
1da177e4 | 2201 | |
5e82b4b2 | 2202 | struct device dev; |
0c509a6c | 2203 | const struct attribute_group *sysfs_groups[4]; |
a953be53 | 2204 | const struct attribute_group *sysfs_rx_queue_group; |
38f7b870 | 2205 | |
38f7b870 | 2206 | const struct rtnl_link_ops *rtnl_link_ops; |
f25f4e44 | 2207 | |
82cc1a7a PWJ |
2208 | /* for setting kernel sock attribute on TCP connection setup */ |
2209 | #define GSO_MAX_SIZE 65536 | |
2210 | unsigned int gso_max_size; | |
30b678d8 BH |
2211 | #define GSO_MAX_SEGS 65535 |
2212 | u16 gso_max_segs; | |
743b03a8 | 2213 | |
7a6b6f51 | 2214 | #ifdef CONFIG_DCB |
32953543 | 2215 | const struct dcbnl_rtnl_ops *dcbnl_ops; |
2f90b865 | 2216 | #endif |
ffcfe25b | 2217 | s16 num_tc; |
5e82b4b2 BH |
2218 | struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; |
2219 | u8 prio_tc_map[TC_BITMASK + 1]; | |
2f90b865 | 2220 | |
d11ead75 | 2221 | #if IS_ENABLED(CONFIG_FCOE) |
4d288d57 | 2222 | unsigned int fcoe_ddp_xid; |
5bc1421e | 2223 | #endif |
86f8515f | 2224 | #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) |
5bc1421e | 2225 | struct netprio_map __rcu *priomap; |
4d288d57 | 2226 | #endif |
5e82b4b2 | 2227 | struct phy_device *phydev; |
e679c9c1 | 2228 | struct sfp_bus *sfp_bus; |
1a33e10e CW |
2229 | struct lock_class_key *qdisc_tx_busylock; |
2230 | struct lock_class_key *qdisc_running_key; | |
5e82b4b2 | 2231 | bool proto_down; |
61941143 | 2232 | unsigned wol_enabled:1; |
29863d41 | 2233 | unsigned threaded:1; |
93642e14 JP |
2234 | |
2235 | struct list_head net_notifier_list; | |
30e9bb84 AT |
2236 | |
2237 | #if IS_ENABLED(CONFIG_MACSEC) | |
2238 | /* MACsec management functions */ | |
2239 | const struct macsec_ops *macsec_ops; | |
2240 | #endif | |
cc4e3835 JK |
2241 | const struct udp_tunnel_nic_info *udp_tunnel_nic_info; |
2242 | struct udp_tunnel_nic *udp_tunnel_nic; | |
7f0a8382 AN |
2243 | |
2244 | /* protected by rtnl_lock */ | |
2245 | struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE]; | |
1da177e4 | 2246 | }; |
43cb76d9 | 2247 | #define to_net_dev(d) container_of(d, struct net_device, dev) |
1da177e4 | 2248 | |
b5cdae32 DM |
2249 | static inline bool netif_elide_gro(const struct net_device *dev) |
2250 | { | |
2251 | if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog) | |
2252 | return true; | |
2253 | return false; | |
2254 | } | |
2255 | ||
1da177e4 | 2256 | #define NETDEV_ALIGN 32 |
1da177e4 | 2257 | |
4f57c087 JF |
2258 | static inline |
2259 | int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio) | |
2260 | { | |
2261 | return dev->prio_tc_map[prio & TC_BITMASK]; | |
2262 | } | |
2263 | ||
2264 | static inline | |
2265 | int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc) | |
2266 | { | |
2267 | if (tc >= dev->num_tc) | |
2268 | return -EINVAL; | |
2269 | ||
2270 | dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK; | |
2271 | return 0; | |
2272 | } | |
2273 | ||
8d059b0f | 2274 | int netdev_txq_to_tc(struct net_device *dev, unsigned int txq); |
9cf1f6a8 AD |
2275 | void netdev_reset_tc(struct net_device *dev); |
2276 | int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset); | |
2277 | int netdev_set_num_tc(struct net_device *dev, u8 num_tc); | |
4f57c087 JF |
2278 | |
2279 | static inline | |
2280 | int netdev_get_num_tc(struct net_device *dev) | |
2281 | { | |
2282 | return dev->num_tc; | |
2283 | } | |
2284 | ||
f468f21b TT |
2285 | static inline void net_prefetch(void *p) |
2286 | { | |
2287 | prefetch(p); | |
2288 | #if L1_CACHE_BYTES < 128 | |
2289 | prefetch((u8 *)p + L1_CACHE_BYTES); | |
2290 | #endif | |
2291 | } | |
2292 | ||
2293 | static inline void net_prefetchw(void *p) | |
2294 | { | |
2295 | prefetchw(p); | |
2296 | #if L1_CACHE_BYTES < 128 | |
2297 | prefetchw((u8 *)p + L1_CACHE_BYTES); | |
2298 | #endif | |
2299 | } | |
2300 | ||
ffcfe25b AD |
2301 | void netdev_unbind_sb_channel(struct net_device *dev, |
2302 | struct net_device *sb_dev); | |
2303 | int netdev_bind_sb_channel_queue(struct net_device *dev, | |
2304 | struct net_device *sb_dev, | |
2305 | u8 tc, u16 count, u16 offset); | |
2306 | int netdev_set_sb_channel(struct net_device *dev, u16 channel); | |
2307 | static inline int netdev_get_sb_channel(struct net_device *dev) | |
2308 | { | |
2309 | return max_t(int, -dev->num_tc, 0); | |
2310 | } | |
2311 | ||
e8a0464c DM |
2312 | static inline |
2313 | struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, | |
2314 | unsigned int index) | |
2315 | { | |
2316 | return &dev->_tx[index]; | |
2317 | } | |
2318 | ||
10c51b56 DB |
2319 | static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev, |
2320 | const struct sk_buff *skb) | |
2321 | { | |
2322 | return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); | |
2323 | } | |
2324 | ||
e8a0464c DM |
2325 | static inline void netdev_for_each_tx_queue(struct net_device *dev, |
2326 | void (*f)(struct net_device *, | |
2327 | struct netdev_queue *, | |
2328 | void *), | |
2329 | void *arg) | |
2330 | { | |
2331 | unsigned int i; | |
2332 | ||
2333 | for (i = 0; i < dev->num_tx_queues; i++) | |
2334 | f(dev, &dev->_tx[i], arg); | |
2335 | } | |
2336 | ||
1a33e10e CW |
2337 | #define netdev_lockdep_set_classes(dev) \ |
2338 | { \ | |
2339 | static struct lock_class_key qdisc_tx_busylock_key; \ | |
2340 | static struct lock_class_key qdisc_running_key; \ | |
2341 | static struct lock_class_key qdisc_xmit_lock_key; \ | |
845e0ebb | 2342 | static struct lock_class_key dev_addr_list_lock_key; \ |
1a33e10e CW |
2343 | unsigned int i; \ |
2344 | \ | |
2345 | (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \ | |
2346 | (dev)->qdisc_running_key = &qdisc_running_key; \ | |
845e0ebb CW |
2347 | lockdep_set_class(&(dev)->addr_list_lock, \ |
2348 | &dev_addr_list_lock_key); \ | |
1a33e10e CW |
2349 | for (i = 0; i < (dev)->num_tx_queues; i++) \ |
2350 | lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \ | |
2351 | &qdisc_xmit_lock_key); \ | |
2352 | } | |
2353 | ||
b71b5837 PA |
2354 | u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, |
2355 | struct net_device *sb_dev); | |
4bd97d51 PA |
2356 | struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, |
2357 | struct sk_buff *skb, | |
2358 | struct net_device *sb_dev); | |
8c4c49df | 2359 | |
871b642a PA |
2360 | /* returns the headroom that the master device needs to take in account |
2361 | * when forwarding to this dev | |
2362 | */ | |
2363 | static inline unsigned netdev_get_fwd_headroom(struct net_device *dev) | |
2364 | { | |
2365 | return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom; | |
2366 | } | |
2367 | ||
2368 | static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr) | |
2369 | { | |
2370 | if (dev->netdev_ops->ndo_set_rx_headroom) | |
2371 | dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr); | |
2372 | } | |
2373 | ||
2374 | /* set the device rx headroom to the dev's default */ | |
2375 | static inline void netdev_reset_rx_headroom(struct net_device *dev) | |
2376 | { | |
2377 | netdev_set_rx_headroom(dev, -1); | |
2378 | } | |
2379 | ||
4e096a18 OR |
2380 | static inline void *netdev_get_ml_priv(struct net_device *dev, |
2381 | enum netdev_ml_priv_type type) | |
2382 | { | |
2383 | if (dev->ml_priv_type != type) | |
2384 | return NULL; | |
2385 | ||
2386 | return dev->ml_priv; | |
2387 | } | |
2388 | ||
2389 | static inline void netdev_set_ml_priv(struct net_device *dev, | |
2390 | void *ml_priv, | |
2391 | enum netdev_ml_priv_type type) | |
2392 | { | |
2393 | WARN(dev->ml_priv_type && dev->ml_priv_type != type, | |
2394 | "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n", | |
2395 | dev->ml_priv_type, type); | |
2396 | WARN(!dev->ml_priv_type && dev->ml_priv, | |
2397 | "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n"); | |
2398 | ||
2399 | dev->ml_priv = ml_priv; | |
2400 | dev->ml_priv_type = type; | |
2401 | } | |
2402 | ||
c346dca1 YH |
2403 | /* |
2404 | * Net namespace inlines | |
2405 | */ | |
2406 | static inline | |
2407 | struct net *dev_net(const struct net_device *dev) | |
2408 | { | |
c2d9ba9b | 2409 | return read_pnet(&dev->nd_net); |
c346dca1 YH |
2410 | } |
2411 | ||
2412 | static inline | |
f5aa23fd | 2413 | void dev_net_set(struct net_device *dev, struct net *net) |
c346dca1 | 2414 | { |
0c5c9fb5 | 2415 | write_pnet(&dev->nd_net, net); |
c346dca1 YH |
2416 | } |
2417 | ||
bea3348e SH |
2418 | /** |
2419 | * netdev_priv - access network device private data | |
2420 | * @dev: network device | |
2421 | * | |
2422 | * Get network device private data | |
2423 | */ | |
6472ce60 | 2424 | static inline void *netdev_priv(const struct net_device *dev) |
1da177e4 | 2425 | { |
1ce8e7b5 | 2426 | return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN); |
1da177e4 LT |
2427 | } |
2428 | ||
1da177e4 LT |
2429 | /* Set the sysfs physical device reference for the network logical device |
2430 | * if set prior to registration will cause a symlink during initialization. | |
2431 | */ | |
43cb76d9 | 2432 | #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) |
1da177e4 | 2433 | |
384912ed | 2434 | /* Set the sysfs device type for the network logical device to allow |
3f79410c | 2435 | * fine-grained identification of different network device types. For |
5e82b4b2 | 2436 | * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc. |
384912ed MH |
2437 | */ |
2438 | #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) | |
2439 | ||
82dc3c63 ED |
2440 | /* Default NAPI poll() weight |
2441 | * Device drivers are strongly advised to not use bigger value | |
2442 | */ | |
2443 | #define NAPI_POLL_WEIGHT 64 | |
2444 | ||
3b582cc1 | 2445 | /** |
5e82b4b2 | 2446 | * netif_napi_add - initialize a NAPI context |
3b582cc1 | 2447 | * @dev: network device |
5e82b4b2 | 2448 | * @napi: NAPI context |
3b582cc1 SH |
2449 | * @poll: polling function |
2450 | * @weight: default weight | |
2451 | * | |
5e82b4b2 BH |
2452 | * netif_napi_add() must be used to initialize a NAPI context prior to calling |
2453 | * *any* of the other NAPI-related functions. | |
3b582cc1 | 2454 | */ |
d565b0a1 HX |
2455 | void netif_napi_add(struct net_device *dev, struct napi_struct *napi, |
2456 | int (*poll)(struct napi_struct *, int), int weight); | |
bea3348e | 2457 | |
d64b5e85 | 2458 | /** |
5e82b4b2 | 2459 | * netif_tx_napi_add - initialize a NAPI context |
d64b5e85 | 2460 | * @dev: network device |
5e82b4b2 | 2461 | * @napi: NAPI context |
d64b5e85 ED |
2462 | * @poll: polling function |
2463 | * @weight: default weight | |
2464 | * | |
2465 | * This variant of netif_napi_add() should be used from drivers using NAPI | |
2466 | * to exclusively poll a TX queue. | |
2467 | * This will avoid we add it into napi_hash[], thus polluting this hash table. | |
2468 | */ | |
2469 | static inline void netif_tx_napi_add(struct net_device *dev, | |
2470 | struct napi_struct *napi, | |
2471 | int (*poll)(struct napi_struct *, int), | |
2472 | int weight) | |
2473 | { | |
2474 | set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state); | |
2475 | netif_napi_add(dev, napi, poll, weight); | |
2476 | } | |
2477 | ||
5198d545 JK |
2478 | /** |
2479 | * __netif_napi_del - remove a NAPI context | |
2480 | * @napi: NAPI context | |
2481 | * | |
2482 | * Warning: caller must observe RCU grace period before freeing memory | |
2483 | * containing @napi. Drivers might want to call this helper to combine | |
2484 | * all the needed RCU grace periods into a single one. | |
2485 | */ | |
2486 | void __netif_napi_del(struct napi_struct *napi); | |
2487 | ||
d8156534 | 2488 | /** |
5e82b4b2 BH |
2489 | * netif_napi_del - remove a NAPI context |
2490 | * @napi: NAPI context | |
d8156534 | 2491 | * |
5e82b4b2 | 2492 | * netif_napi_del() removes a NAPI context from the network device NAPI list |
d8156534 | 2493 | */ |
5198d545 JK |
2494 | static inline void netif_napi_del(struct napi_struct *napi) |
2495 | { | |
2496 | __netif_napi_del(napi); | |
2497 | synchronize_net(); | |
2498 | } | |
d565b0a1 HX |
2499 | |
2500 | struct napi_gro_cb { | |
78a478d0 | 2501 | /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */ |
5e82b4b2 | 2502 | void *frag0; |
78a478d0 | 2503 | |
7489594c HX |
2504 | /* Length of frag0. */ |
2505 | unsigned int frag0_len; | |
2506 | ||
86911732 | 2507 | /* This indicates where we are processing relative to skb->data. */ |
5e82b4b2 | 2508 | int data_offset; |
86911732 | 2509 | |
d565b0a1 | 2510 | /* This is non-zero if the packet cannot be merged with the new skb. */ |
bf5a755f JC |
2511 | u16 flush; |
2512 | ||
2513 | /* Save the IP ID here and check when we get to the transport layer */ | |
2514 | u16 flush_id; | |
d565b0a1 HX |
2515 | |
2516 | /* Number of segments aggregated. */ | |
2e71a6f8 ED |
2517 | u16 count; |
2518 | ||
15e2396d TH |
2519 | /* Start offset for remote checksum offload */ |
2520 | u16 gro_remcsum_start; | |
2521 | ||
2e71a6f8 ED |
2522 | /* jiffies when first packet was created/queued */ |
2523 | unsigned long age; | |
86347245 | 2524 | |
afe93325 | 2525 | /* Used in ipv6_gro_receive() and foo-over-udp */ |
b582ef09 OG |
2526 | u16 proto; |
2527 | ||
baa32ff4 TH |
2528 | /* This is non-zero if the packet may be of the same flow. */ |
2529 | u8 same_flow:1; | |
2530 | ||
fac8e0f5 JG |
2531 | /* Used in tunnel GRO receive */ |
2532 | u8 encap_mark:1; | |
573e8fca TH |
2533 | |
2534 | /* GRO checksum is valid */ | |
2535 | u8 csum_valid:1; | |
2536 | ||
662880f4 TH |
2537 | /* Number of checksums via CHECKSUM_UNNECESSARY */ |
2538 | u8 csum_cnt:3; | |
c3c7c254 | 2539 | |
baa32ff4 TH |
2540 | /* Free the skb? */ |
2541 | u8 free:2; | |
2542 | #define NAPI_GRO_FREE 1 | |
2543 | #define NAPI_GRO_FREE_STOLEN_HEAD 2 | |
2544 | ||
efc98d08 TH |
2545 | /* Used in foo-over-udp, set in udp[46]_gro_receive */ |
2546 | u8 is_ipv6:1; | |
2547 | ||
a0ca153f AD |
2548 | /* Used in GRE, set in fou/gue_gro_receive */ |
2549 | u8 is_fou:1; | |
2550 | ||
1530545e AD |
2551 | /* Used to determine if flush_id can be ignored */ |
2552 | u8 is_atomic:1; | |
2553 | ||
fcd91dd4 SD |
2554 | /* Number of gro_receive callbacks this packet already went through */ |
2555 | u8 recursion_counter:4; | |
2556 | ||
3a1296a3 SK |
2557 | /* GRO is done by frag_list pointer chaining. */ |
2558 | u8 is_flist:1; | |
baa32ff4 | 2559 | |
bf5a755f JC |
2560 | /* used to support CHECKSUM_COMPLETE for tunneling protocols */ |
2561 | __wsum csum; | |
2562 | ||
c3c7c254 ED |
2563 | /* used in skb_gro_receive() slow path */ |
2564 | struct sk_buff *last; | |
d565b0a1 HX |
2565 | }; |
2566 | ||
2567 | #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) | |
d8156534 | 2568 | |
fcd91dd4 SD |
2569 | #define GRO_RECURSION_LIMIT 15 |
2570 | static inline int gro_recursion_inc_test(struct sk_buff *skb) | |
2571 | { | |
2572 | return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT; | |
2573 | } | |
2574 | ||
d4546c25 DM |
2575 | typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *); |
2576 | static inline struct sk_buff *call_gro_receive(gro_receive_t cb, | |
2577 | struct list_head *head, | |
2578 | struct sk_buff *skb) | |
fcd91dd4 SD |
2579 | { |
2580 | if (unlikely(gro_recursion_inc_test(skb))) { | |
2581 | NAPI_GRO_CB(skb)->flush |= 1; | |
2582 | return NULL; | |
2583 | } | |
2584 | ||
2585 | return cb(head, skb); | |
2586 | } | |
2587 | ||
d4546c25 DM |
2588 | typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *, |
2589 | struct sk_buff *); | |
2590 | static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb, | |
2591 | struct sock *sk, | |
2592 | struct list_head *head, | |
2593 | struct sk_buff *skb) | |
fcd91dd4 SD |
2594 | { |
2595 | if (unlikely(gro_recursion_inc_test(skb))) { | |
2596 | NAPI_GRO_CB(skb)->flush |= 1; | |
2597 | return NULL; | |
2598 | } | |
2599 | ||
2600 | return cb(sk, head, skb); | |
2601 | } | |
2602 | ||
1da177e4 | 2603 | struct packet_type { |
f2ccd8fa | 2604 | __be16 type; /* This is really htons(ether_type). */ |
fa788d98 | 2605 | bool ignore_outgoing; |
f2ccd8fa DM |
2606 | struct net_device *dev; /* NULL is wildcarded here */ |
2607 | int (*func) (struct sk_buff *, | |
2608 | struct net_device *, | |
2609 | struct packet_type *, | |
2610 | struct net_device *); | |
17266ee9 EC |
2611 | void (*list_func) (struct list_head *, |
2612 | struct packet_type *, | |
2613 | struct net_device *); | |
c0de08d0 EL |
2614 | bool (*id_match)(struct packet_type *ptype, |
2615 | struct sock *sk); | |
1da177e4 LT |
2616 | void *af_packet_priv; |
2617 | struct list_head list; | |
2618 | }; | |
2619 | ||
f191a1d1 | 2620 | struct offload_callbacks { |
576a30eb | 2621 | struct sk_buff *(*gso_segment)(struct sk_buff *skb, |
c8f44aff | 2622 | netdev_features_t features); |
d4546c25 DM |
2623 | struct sk_buff *(*gro_receive)(struct list_head *head, |
2624 | struct sk_buff *skb); | |
299603e8 | 2625 | int (*gro_complete)(struct sk_buff *skb, int nhoff); |
f191a1d1 VY |
2626 | }; |
2627 | ||
2628 | struct packet_offload { | |
2629 | __be16 type; /* This is really htons(ether_type). */ | |
bdef7de4 | 2630 | u16 priority; |
f191a1d1 VY |
2631 | struct offload_callbacks callbacks; |
2632 | struct list_head list; | |
1da177e4 LT |
2633 | }; |
2634 | ||
5e82b4b2 | 2635 | /* often modified stats are per-CPU, other are shared (netdev->stats) */ |
8f84985f LR |
2636 | struct pcpu_sw_netstats { |
2637 | u64 rx_packets; | |
2638 | u64 rx_bytes; | |
2639 | u64 tx_packets; | |
2640 | u64 tx_bytes; | |
2641 | struct u64_stats_sync syncp; | |
9a5ee462 | 2642 | } __aligned(4 * sizeof(u64)); |
52bb6677 LR |
2643 | |
2644 | struct pcpu_lstats { | |
fd2f4737 ED |
2645 | u64_stats_t packets; |
2646 | u64_stats_t bytes; | |
52bb6677 | 2647 | struct u64_stats_sync syncp; |
9a5ee462 | 2648 | } __aligned(2 * sizeof(u64)); |
8f84985f | 2649 | |
de7d5084 ED |
2650 | void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes); |
2651 | ||
451b05f4 FF |
2652 | static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int len) |
2653 | { | |
2654 | struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); | |
2655 | ||
2656 | u64_stats_update_begin(&tstats->syncp); | |
2657 | tstats->rx_bytes += len; | |
2658 | tstats->rx_packets++; | |
2659 | u64_stats_update_end(&tstats->syncp); | |
2660 | } | |
2661 | ||
d3fd6548 HK |
2662 | static inline void dev_sw_netstats_tx_add(struct net_device *dev, |
2663 | unsigned int packets, | |
2664 | unsigned int len) | |
2665 | { | |
2666 | struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); | |
2667 | ||
2668 | u64_stats_update_begin(&tstats->syncp); | |
2669 | tstats->tx_bytes += len; | |
2670 | tstats->tx_packets += packets; | |
2671 | u64_stats_update_end(&tstats->syncp); | |
2672 | } | |
2673 | ||
dd5382a0 ED |
2674 | static inline void dev_lstats_add(struct net_device *dev, unsigned int len) |
2675 | { | |
2676 | struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats); | |
2677 | ||
2678 | u64_stats_update_begin(&lstats->syncp); | |
fd2f4737 ED |
2679 | u64_stats_add(&lstats->bytes, len); |
2680 | u64_stats_inc(&lstats->packets); | |
dd5382a0 ED |
2681 | u64_stats_update_end(&lstats->syncp); |
2682 | } | |
2683 | ||
aabc92bb PNA |
2684 | #define __netdev_alloc_pcpu_stats(type, gfp) \ |
2685 | ({ \ | |
2686 | typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\ | |
2687 | if (pcpu_stats) { \ | |
2688 | int __cpu; \ | |
2689 | for_each_possible_cpu(__cpu) { \ | |
2690 | typeof(type) *stat; \ | |
2691 | stat = per_cpu_ptr(pcpu_stats, __cpu); \ | |
2692 | u64_stats_init(&stat->syncp); \ | |
2693 | } \ | |
2694 | } \ | |
2695 | pcpu_stats; \ | |
1c213bd2 WC |
2696 | }) |
2697 | ||
aabc92bb | 2698 | #define netdev_alloc_pcpu_stats(type) \ |
326fcfa5 | 2699 | __netdev_alloc_pcpu_stats(type, GFP_KERNEL) |
aabc92bb | 2700 | |
81b01894 HK |
2701 | #define devm_netdev_alloc_pcpu_stats(dev, type) \ |
2702 | ({ \ | |
2703 | typeof(type) __percpu *pcpu_stats = devm_alloc_percpu(dev, type);\ | |
2704 | if (pcpu_stats) { \ | |
2705 | int __cpu; \ | |
2706 | for_each_possible_cpu(__cpu) { \ | |
2707 | typeof(type) *stat; \ | |
2708 | stat = per_cpu_ptr(pcpu_stats, __cpu); \ | |
2709 | u64_stats_init(&stat->syncp); \ | |
2710 | } \ | |
2711 | } \ | |
2712 | pcpu_stats; \ | |
2713 | }) | |
2714 | ||
764f5e54 JP |
2715 | enum netdev_lag_tx_type { |
2716 | NETDEV_LAG_TX_TYPE_UNKNOWN, | |
2717 | NETDEV_LAG_TX_TYPE_RANDOM, | |
2718 | NETDEV_LAG_TX_TYPE_BROADCAST, | |
2719 | NETDEV_LAG_TX_TYPE_ROUNDROBIN, | |
2720 | NETDEV_LAG_TX_TYPE_ACTIVEBACKUP, | |
2721 | NETDEV_LAG_TX_TYPE_HASH, | |
2722 | }; | |
2723 | ||
f44aa9ef JH |
2724 | enum netdev_lag_hash { |
2725 | NETDEV_LAG_HASH_NONE, | |
2726 | NETDEV_LAG_HASH_L2, | |
2727 | NETDEV_LAG_HASH_L34, | |
2728 | NETDEV_LAG_HASH_L23, | |
2729 | NETDEV_LAG_HASH_E23, | |
2730 | NETDEV_LAG_HASH_E34, | |
7b8fc010 | 2731 | NETDEV_LAG_HASH_VLAN_SRCMAC, |
f44aa9ef JH |
2732 | NETDEV_LAG_HASH_UNKNOWN, |
2733 | }; | |
2734 | ||
764f5e54 JP |
2735 | struct netdev_lag_upper_info { |
2736 | enum netdev_lag_tx_type tx_type; | |
f44aa9ef | 2737 | enum netdev_lag_hash hash_type; |
764f5e54 JP |
2738 | }; |
2739 | ||
fb1b2e3c JP |
2740 | struct netdev_lag_lower_state_info { |
2741 | u8 link_up : 1, | |
2742 | tx_enabled : 1; | |
2743 | }; | |
2744 | ||
1da177e4 LT |
2745 | #include <linux/notifier.h> |
2746 | ||
ede2762d KT |
2747 | /* netdevice notifier chain. Please remember to update netdev_cmd_to_name() |
2748 | * and the rtnetlink notification exclusion list in rtnetlink_event() when | |
2749 | * adding new types. | |
dcfe1421 | 2750 | */ |
ede2762d KT |
2751 | enum netdev_cmd { |
2752 | NETDEV_UP = 1, /* For now you can't veto a device up/down */ | |
2753 | NETDEV_DOWN, | |
2754 | NETDEV_REBOOT, /* Tell a protocol stack a network interface | |
dcfe1421 AW |
2755 | detected a hardware crash and restarted |
2756 | - we can use this eg to kick tcp sessions | |
2757 | once done */ | |
ede2762d KT |
2758 | NETDEV_CHANGE, /* Notify device state change */ |
2759 | NETDEV_REGISTER, | |
2760 | NETDEV_UNREGISTER, | |
2761 | NETDEV_CHANGEMTU, /* notify after mtu change happened */ | |
1570415f PM |
2762 | NETDEV_CHANGEADDR, /* notify after the address change */ |
2763 | NETDEV_PRE_CHANGEADDR, /* notify before the address change */ | |
ede2762d KT |
2764 | NETDEV_GOING_DOWN, |
2765 | NETDEV_CHANGENAME, | |
2766 | NETDEV_FEAT_CHANGE, | |
2767 | NETDEV_BONDING_FAILOVER, | |
2768 | NETDEV_PRE_UP, | |
2769 | NETDEV_PRE_TYPE_CHANGE, | |
2770 | NETDEV_POST_TYPE_CHANGE, | |
2771 | NETDEV_POST_INIT, | |
ede2762d KT |
2772 | NETDEV_RELEASE, |
2773 | NETDEV_NOTIFY_PEERS, | |
2774 | NETDEV_JOIN, | |
2775 | NETDEV_CHANGEUPPER, | |
2776 | NETDEV_RESEND_IGMP, | |
2777 | NETDEV_PRECHANGEMTU, /* notify before mtu change happened */ | |
2778 | NETDEV_CHANGEINFODATA, | |
2779 | NETDEV_BONDING_INFO, | |
2780 | NETDEV_PRECHANGEUPPER, | |
2781 | NETDEV_CHANGELOWERSTATE, | |
2782 | NETDEV_UDP_TUNNEL_PUSH_INFO, | |
2783 | NETDEV_UDP_TUNNEL_DROP_INFO, | |
2784 | NETDEV_CHANGE_TX_QUEUE_LEN, | |
9daae9bd GP |
2785 | NETDEV_CVLAN_FILTER_PUSH_INFO, |
2786 | NETDEV_CVLAN_FILTER_DROP_INFO, | |
2787 | NETDEV_SVLAN_FILTER_PUSH_INFO, | |
2788 | NETDEV_SVLAN_FILTER_DROP_INFO, | |
ede2762d KT |
2789 | }; |
2790 | const char *netdev_cmd_to_name(enum netdev_cmd cmd); | |
dcfe1421 | 2791 | |
f629d208 JP |
2792 | int register_netdevice_notifier(struct notifier_block *nb); |
2793 | int unregister_netdevice_notifier(struct notifier_block *nb); | |
a30c7b42 JP |
2794 | int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb); |
2795 | int unregister_netdevice_notifier_net(struct net *net, | |
2796 | struct notifier_block *nb); | |
93642e14 JP |
2797 | int register_netdevice_notifier_dev_net(struct net_device *dev, |
2798 | struct notifier_block *nb, | |
2799 | struct netdev_net_notifier *nn); | |
2800 | int unregister_netdevice_notifier_dev_net(struct net_device *dev, | |
2801 | struct notifier_block *nb, | |
2802 | struct netdev_net_notifier *nn); | |
351638e7 JP |
2803 | |
2804 | struct netdev_notifier_info { | |
51d0c047 DA |
2805 | struct net_device *dev; |
2806 | struct netlink_ext_ack *extack; | |
351638e7 JP |
2807 | }; |
2808 | ||
af7d6cce SD |
2809 | struct netdev_notifier_info_ext { |
2810 | struct netdev_notifier_info info; /* must be first */ | |
2811 | union { | |
2812 | u32 mtu; | |
2813 | } ext; | |
2814 | }; | |
2815 | ||
be9efd36 JP |
2816 | struct netdev_notifier_change_info { |
2817 | struct netdev_notifier_info info; /* must be first */ | |
2818 | unsigned int flags_changed; | |
2819 | }; | |
2820 | ||
0e4ead9d JP |
2821 | struct netdev_notifier_changeupper_info { |
2822 | struct netdev_notifier_info info; /* must be first */ | |
2823 | struct net_device *upper_dev; /* new upper dev */ | |
2824 | bool master; /* is upper dev master */ | |
5e82b4b2 | 2825 | bool linking; /* is the notification for link or unlink */ |
29bf24af | 2826 | void *upper_info; /* upper dev info */ |
0e4ead9d JP |
2827 | }; |
2828 | ||
04d48266 JP |
2829 | struct netdev_notifier_changelowerstate_info { |
2830 | struct netdev_notifier_info info; /* must be first */ | |
2831 | void *lower_state_info; /* is lower dev state */ | |
2832 | }; | |
2833 | ||
1570415f PM |
2834 | struct netdev_notifier_pre_changeaddr_info { |
2835 | struct netdev_notifier_info info; /* must be first */ | |
2836 | const unsigned char *dev_addr; | |
2837 | }; | |
2838 | ||
75538c2b CW |
2839 | static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, |
2840 | struct net_device *dev) | |
2841 | { | |
2842 | info->dev = dev; | |
51d0c047 | 2843 | info->extack = NULL; |
75538c2b CW |
2844 | } |
2845 | ||
351638e7 JP |
2846 | static inline struct net_device * |
2847 | netdev_notifier_info_to_dev(const struct netdev_notifier_info *info) | |
2848 | { | |
2849 | return info->dev; | |
2850 | } | |
2851 | ||
51d0c047 DA |
2852 | static inline struct netlink_ext_ack * |
2853 | netdev_notifier_info_to_extack(const struct netdev_notifier_info *info) | |
2854 | { | |
2855 | return info->extack; | |
2856 | } | |
2857 | ||
f629d208 | 2858 | int call_netdevice_notifiers(unsigned long val, struct net_device *dev); |
dcfe1421 AW |
2859 | |
2860 | ||
1da177e4 LT |
2861 | extern rwlock_t dev_base_lock; /* Device list lock */ |
2862 | ||
881d966b EB |
2863 | #define for_each_netdev(net, d) \ |
2864 | list_for_each_entry(d, &(net)->dev_base_head, dev_list) | |
dcbccbd4 EB |
2865 | #define for_each_netdev_reverse(net, d) \ |
2866 | list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list) | |
c6d14c84 ED |
2867 | #define for_each_netdev_rcu(net, d) \ |
2868 | list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list) | |
881d966b EB |
2869 | #define for_each_netdev_safe(net, d, n) \ |
2870 | list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) | |
2871 | #define for_each_netdev_continue(net, d) \ | |
2872 | list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) | |
afa0df59 JP |
2873 | #define for_each_netdev_continue_reverse(net, d) \ |
2874 | list_for_each_entry_continue_reverse(d, &(net)->dev_base_head, \ | |
2875 | dev_list) | |
254245d2 | 2876 | #define for_each_netdev_continue_rcu(net, d) \ |
2877 | list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) | |
8a7fbfab | 2878 | #define for_each_netdev_in_bond_rcu(bond, slave) \ |
2879 | for_each_netdev_rcu(&init_net, slave) \ | |
4ccce02e | 2880 | if (netdev_master_upper_dev_get_rcu(slave) == (bond)) |
881d966b | 2881 | #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) |
7562f876 | 2882 | |
a050c33f DL |
2883 | static inline struct net_device *next_net_device(struct net_device *dev) |
2884 | { | |
2885 | struct list_head *lh; | |
2886 | struct net *net; | |
2887 | ||
c346dca1 | 2888 | net = dev_net(dev); |
a050c33f DL |
2889 | lh = dev->dev_list.next; |
2890 | return lh == &net->dev_base_head ? NULL : net_device_entry(lh); | |
2891 | } | |
2892 | ||
ce81b76a ED |
2893 | static inline struct net_device *next_net_device_rcu(struct net_device *dev) |
2894 | { | |
2895 | struct list_head *lh; | |
2896 | struct net *net; | |
2897 | ||
2898 | net = dev_net(dev); | |
ccf43438 | 2899 | lh = rcu_dereference(list_next_rcu(&dev->dev_list)); |
ce81b76a ED |
2900 | return lh == &net->dev_base_head ? NULL : net_device_entry(lh); |
2901 | } | |
2902 | ||
a050c33f DL |
2903 | static inline struct net_device *first_net_device(struct net *net) |
2904 | { | |
2905 | return list_empty(&net->dev_base_head) ? NULL : | |
2906 | net_device_entry(net->dev_base_head.next); | |
2907 | } | |
7562f876 | 2908 | |
ccf43438 ED |
2909 | static inline struct net_device *first_net_device_rcu(struct net *net) |
2910 | { | |
2911 | struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head)); | |
2912 | ||
2913 | return lh == &net->dev_base_head ? NULL : net_device_entry(lh); | |
2914 | } | |
2915 | ||
f629d208 JP |
2916 | int netdev_boot_setup_check(struct net_device *dev); |
2917 | unsigned long netdev_boot_base(const char *prefix, int unit); | |
2918 | struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, | |
2919 | const char *hwaddr); | |
2920 | struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); | |
f629d208 JP |
2921 | void dev_add_pack(struct packet_type *pt); |
2922 | void dev_remove_pack(struct packet_type *pt); | |
2923 | void __dev_remove_pack(struct packet_type *pt); | |
2924 | void dev_add_offload(struct packet_offload *po); | |
2925 | void dev_remove_offload(struct packet_offload *po); | |
f629d208 | 2926 | |
a54acb3a | 2927 | int dev_get_iflink(const struct net_device *dev); |
fc4099f1 | 2928 | int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb); |
ddb94eaf PNA |
2929 | int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr, |
2930 | struct net_device_path_stack *stack); | |
6c555490 WC |
2931 | struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags, |
2932 | unsigned short mask); | |
f629d208 JP |
2933 | struct net_device *dev_get_by_name(struct net *net, const char *name); |
2934 | struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); | |
2935 | struct net_device *__dev_get_by_name(struct net *net, const char *name); | |
2936 | int dev_alloc_name(struct net_device *dev, const char *name); | |
00f54e68 | 2937 | int dev_open(struct net_device *dev, struct netlink_ext_ack *extack); |
7051b88a | 2938 | void dev_close(struct net_device *dev); |
2939 | void dev_close_many(struct list_head *head, bool unlink); | |
f629d208 | 2940 | void dev_disable_lro(struct net_device *dev); |
0c4b51f0 | 2941 | int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); |
a4ea8a3d | 2942 | u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, |
a350ecce | 2943 | struct net_device *sb_dev); |
a4ea8a3d | 2944 | u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, |
a350ecce | 2945 | struct net_device *sb_dev); |
36ccdf85 | 2946 | |
2b4aa3ce | 2947 | int dev_queue_xmit(struct sk_buff *skb); |
eadec877 | 2948 | int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev); |
36ccdf85 BT |
2949 | int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id); |
2950 | ||
2951 | static inline int dev_direct_xmit(struct sk_buff *skb, u16 queue_id) | |
2952 | { | |
2953 | int ret; | |
2954 | ||
2955 | ret = __dev_direct_xmit(skb, queue_id); | |
2956 | if (!dev_xmit_complete(ret)) | |
2957 | kfree_skb(skb); | |
2958 | return ret; | |
2959 | } | |
2960 | ||
f629d208 JP |
2961 | int register_netdevice(struct net_device *dev); |
2962 | void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); | |
2963 | void unregister_netdevice_many(struct list_head *head); | |
44a0873d ED |
2964 | static inline void unregister_netdevice(struct net_device *dev) |
2965 | { | |
2966 | unregister_netdevice_queue(dev, NULL); | |
2967 | } | |
2968 | ||
f629d208 JP |
2969 | int netdev_refcnt_read(const struct net_device *dev); |
2970 | void free_netdev(struct net_device *dev); | |
74d332c1 | 2971 | void netdev_freemem(struct net_device *dev); |
f629d208 | 2972 | int init_dummy_netdev(struct net_device *dev); |
937f1ba5 | 2973 | |
cff9f12b MG |
2974 | struct net_device *netdev_get_xmit_slave(struct net_device *dev, |
2975 | struct sk_buff *skb, | |
2976 | bool all_slaves); | |
719a402c TT |
2977 | struct net_device *netdev_sk_get_lowest_dev(struct net_device *dev, |
2978 | struct sock *sk); | |
f629d208 JP |
2979 | struct net_device *dev_get_by_index(struct net *net, int ifindex); |
2980 | struct net_device *__dev_get_by_index(struct net *net, int ifindex); | |
2981 | struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); | |
90b602f8 | 2982 | struct net_device *dev_get_by_napi_id(unsigned int napi_id); |
f629d208 JP |
2983 | int netdev_get_name(struct net *net, char *name, int ifindex); |
2984 | int dev_restart(struct net_device *dev); | |
d4546c25 | 2985 | int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb); |
3a1296a3 | 2986 | int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb); |
86911732 HX |
2987 | |
2988 | static inline unsigned int skb_gro_offset(const struct sk_buff *skb) | |
2989 | { | |
2990 | return NAPI_GRO_CB(skb)->data_offset; | |
2991 | } | |
2992 | ||
2993 | static inline unsigned int skb_gro_len(const struct sk_buff *skb) | |
2994 | { | |
2995 | return skb->len - NAPI_GRO_CB(skb)->data_offset; | |
2996 | } | |
2997 | ||
2998 | static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len) | |
2999 | { | |
3000 | NAPI_GRO_CB(skb)->data_offset += len; | |
3001 | } | |
3002 | ||
a5b1cf28 HX |
3003 | static inline void *skb_gro_header_fast(struct sk_buff *skb, |
3004 | unsigned int offset) | |
86911732 | 3005 | { |
a5b1cf28 HX |
3006 | return NAPI_GRO_CB(skb)->frag0 + offset; |
3007 | } | |
78a478d0 | 3008 | |
a5b1cf28 HX |
3009 | static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen) |
3010 | { | |
3011 | return NAPI_GRO_CB(skb)->frag0_len < hlen; | |
3012 | } | |
78a478d0 | 3013 | |
57ea52a8 HX |
3014 | static inline void skb_gro_frag0_invalidate(struct sk_buff *skb) |
3015 | { | |
3016 | NAPI_GRO_CB(skb)->frag0 = NULL; | |
3017 | NAPI_GRO_CB(skb)->frag0_len = 0; | |
3018 | } | |
3019 | ||
a5b1cf28 HX |
3020 | static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen, |
3021 | unsigned int offset) | |
3022 | { | |
17dd759c HX |
3023 | if (!pskb_may_pull(skb, hlen)) |
3024 | return NULL; | |
3025 | ||
57ea52a8 | 3026 | skb_gro_frag0_invalidate(skb); |
17dd759c | 3027 | return skb->data + offset; |
86911732 | 3028 | } |
1da177e4 | 3029 | |
36e7b1b8 HX |
3030 | static inline void *skb_gro_network_header(struct sk_buff *skb) |
3031 | { | |
78d3fd0b HX |
3032 | return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) + |
3033 | skb_network_offset(skb); | |
36e7b1b8 HX |
3034 | } |
3035 | ||
bf5a755f JC |
3036 | static inline void skb_gro_postpull_rcsum(struct sk_buff *skb, |
3037 | const void *start, unsigned int len) | |
3038 | { | |
573e8fca | 3039 | if (NAPI_GRO_CB(skb)->csum_valid) |
bf5a755f JC |
3040 | NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum, |
3041 | csum_partial(start, len, 0)); | |
3042 | } | |
3043 | ||
573e8fca TH |
3044 | /* GRO checksum functions. These are logical equivalents of the normal |
3045 | * checksum functions (in skbuff.h) except that they operate on the GRO | |
3046 | * offsets and fields in sk_buff. | |
3047 | */ | |
3048 | ||
3049 | __sum16 __skb_gro_checksum_complete(struct sk_buff *skb); | |
3050 | ||
15e2396d TH |
3051 | static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb) |
3052 | { | |
b7fe10e5 | 3053 | return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb)); |
15e2396d TH |
3054 | } |
3055 | ||
573e8fca TH |
3056 | static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb, |
3057 | bool zero_okay, | |
3058 | __sum16 check) | |
3059 | { | |
6edec0e6 TH |
3060 | return ((skb->ip_summed != CHECKSUM_PARTIAL || |
3061 | skb_checksum_start_offset(skb) < | |
3062 | skb_gro_offset(skb)) && | |
15e2396d | 3063 | !skb_at_gro_remcsum_start(skb) && |
662880f4 | 3064 | NAPI_GRO_CB(skb)->csum_cnt == 0 && |
573e8fca TH |
3065 | (!zero_okay || check)); |
3066 | } | |
3067 | ||
3068 | static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb, | |
3069 | __wsum psum) | |
3070 | { | |
3071 | if (NAPI_GRO_CB(skb)->csum_valid && | |
3072 | !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum))) | |
3073 | return 0; | |
3074 | ||
3075 | NAPI_GRO_CB(skb)->csum = psum; | |
3076 | ||
3077 | return __skb_gro_checksum_complete(skb); | |
3078 | } | |
3079 | ||
573e8fca TH |
3080 | static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb) |
3081 | { | |
662880f4 TH |
3082 | if (NAPI_GRO_CB(skb)->csum_cnt > 0) { |
3083 | /* Consume a checksum from CHECKSUM_UNNECESSARY */ | |
3084 | NAPI_GRO_CB(skb)->csum_cnt--; | |
3085 | } else { | |
3086 | /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we | |
3087 | * verified a new top level checksum or an encapsulated one | |
3088 | * during GRO. This saves work if we fallback to normal path. | |
3089 | */ | |
3090 | __skb_incr_checksum_unnecessary(skb); | |
573e8fca TH |
3091 | } |
3092 | } | |
3093 | ||
3094 | #define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \ | |
3095 | compute_pseudo) \ | |
3096 | ({ \ | |
3097 | __sum16 __ret = 0; \ | |
3098 | if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \ | |
3099 | __ret = __skb_gro_checksum_validate_complete(skb, \ | |
3100 | compute_pseudo(skb, proto)); \ | |
219f1d79 | 3101 | if (!__ret) \ |
573e8fca TH |
3102 | skb_gro_incr_csum_unnecessary(skb); \ |
3103 | __ret; \ | |
3104 | }) | |
3105 | ||
3106 | #define skb_gro_checksum_validate(skb, proto, compute_pseudo) \ | |
3107 | __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo) | |
3108 | ||
3109 | #define skb_gro_checksum_validate_zero_check(skb, proto, check, \ | |
3110 | compute_pseudo) \ | |
3111 | __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo) | |
3112 | ||
3113 | #define skb_gro_checksum_simple_validate(skb) \ | |
3114 | __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo) | |
3115 | ||
d96535a1 TH |
3116 | static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb) |
3117 | { | |
3118 | return (NAPI_GRO_CB(skb)->csum_cnt == 0 && | |
3119 | !NAPI_GRO_CB(skb)->csum_valid); | |
3120 | } | |
3121 | ||
3122 | static inline void __skb_gro_checksum_convert(struct sk_buff *skb, | |
b39c78b2 | 3123 | __wsum pseudo) |
d96535a1 TH |
3124 | { |
3125 | NAPI_GRO_CB(skb)->csum = ~pseudo; | |
3126 | NAPI_GRO_CB(skb)->csum_valid = 1; | |
3127 | } | |
3128 | ||
b39c78b2 | 3129 | #define skb_gro_checksum_try_convert(skb, proto, compute_pseudo) \ |
d96535a1 TH |
3130 | do { \ |
3131 | if (__skb_gro_checksum_convert_check(skb)) \ | |
b39c78b2 | 3132 | __skb_gro_checksum_convert(skb, \ |
d96535a1 TH |
3133 | compute_pseudo(skb, proto)); \ |
3134 | } while (0) | |
3135 | ||
26c4f7da TH |
3136 | struct gro_remcsum { |
3137 | int offset; | |
3138 | __wsum delta; | |
3139 | }; | |
3140 | ||
3141 | static inline void skb_gro_remcsum_init(struct gro_remcsum *grc) | |
3142 | { | |
846cd667 | 3143 | grc->offset = 0; |
26c4f7da TH |
3144 | grc->delta = 0; |
3145 | } | |
3146 | ||
b7fe10e5 TH |
3147 | static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr, |
3148 | unsigned int off, size_t hdrlen, | |
3149 | int start, int offset, | |
3150 | struct gro_remcsum *grc, | |
3151 | bool nopartial) | |
dcdc8994 TH |
3152 | { |
3153 | __wsum delta; | |
b7fe10e5 | 3154 | size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start); |
dcdc8994 TH |
3155 | |
3156 | BUG_ON(!NAPI_GRO_CB(skb)->csum_valid); | |
3157 | ||
15e2396d | 3158 | if (!nopartial) { |
b7fe10e5 TH |
3159 | NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start; |
3160 | return ptr; | |
3161 | } | |
3162 | ||
3163 | ptr = skb_gro_header_fast(skb, off); | |
3164 | if (skb_gro_header_hard(skb, off + plen)) { | |
3165 | ptr = skb_gro_header_slow(skb, off + plen, off); | |
3166 | if (!ptr) | |
3167 | return NULL; | |
15e2396d TH |
3168 | } |
3169 | ||
b7fe10e5 TH |
3170 | delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum, |
3171 | start, offset); | |
dcdc8994 TH |
3172 | |
3173 | /* Adjust skb->csum since we changed the packet */ | |
dcdc8994 | 3174 | NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta); |
26c4f7da | 3175 | |
b7fe10e5 | 3176 | grc->offset = off + hdrlen + offset; |
26c4f7da | 3177 | grc->delta = delta; |
b7fe10e5 TH |
3178 | |
3179 | return ptr; | |
dcdc8994 TH |
3180 | } |
3181 | ||
26c4f7da TH |
3182 | static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb, |
3183 | struct gro_remcsum *grc) | |
3184 | { | |
b7fe10e5 TH |
3185 | void *ptr; |
3186 | size_t plen = grc->offset + sizeof(u16); | |
3187 | ||
26c4f7da TH |
3188 | if (!grc->delta) |
3189 | return; | |
3190 | ||
b7fe10e5 TH |
3191 | ptr = skb_gro_header_fast(skb, grc->offset); |
3192 | if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) { | |
3193 | ptr = skb_gro_header_slow(skb, plen, grc->offset); | |
3194 | if (!ptr) | |
3195 | return; | |
3196 | } | |
3197 | ||
3198 | remcsum_unadjust((__sum16 *)ptr, grc->delta); | |
26c4f7da | 3199 | } |
dcdc8994 | 3200 | |
25393d3f | 3201 | #ifdef CONFIG_XFRM_OFFLOAD |
d4546c25 | 3202 | static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush) |
25393d3f SK |
3203 | { |
3204 | if (PTR_ERR(pp) != -EINPROGRESS) | |
3205 | NAPI_GRO_CB(skb)->flush |= flush; | |
3206 | } | |
603d4cf8 | 3207 | static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb, |
5cd3da4b | 3208 | struct sk_buff *pp, |
603d4cf8 SD |
3209 | int flush, |
3210 | struct gro_remcsum *grc) | |
3211 | { | |
3212 | if (PTR_ERR(pp) != -EINPROGRESS) { | |
3213 | NAPI_GRO_CB(skb)->flush |= flush; | |
3214 | skb_gro_remcsum_cleanup(skb, grc); | |
3215 | skb->remcsum_offload = 0; | |
3216 | } | |
3217 | } | |
25393d3f | 3218 | #else |
d4546c25 | 3219 | static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush) |
5f114163 SK |
3220 | { |
3221 | NAPI_GRO_CB(skb)->flush |= flush; | |
3222 | } | |
603d4cf8 | 3223 | static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb, |
5cd3da4b | 3224 | struct sk_buff *pp, |
603d4cf8 SD |
3225 | int flush, |
3226 | struct gro_remcsum *grc) | |
3227 | { | |
3228 | NAPI_GRO_CB(skb)->flush |= flush; | |
3229 | skb_gro_remcsum_cleanup(skb, grc); | |
3230 | skb->remcsum_offload = 0; | |
3231 | } | |
25393d3f | 3232 | #endif |
5f114163 | 3233 | |
0c4e8581 SH |
3234 | static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, |
3235 | unsigned short type, | |
3b04ddde | 3236 | const void *daddr, const void *saddr, |
95c96174 | 3237 | unsigned int len) |
0c4e8581 | 3238 | { |
f1ecfd5d | 3239 | if (!dev->header_ops || !dev->header_ops->create) |
0c4e8581 | 3240 | return 0; |
3b04ddde SH |
3241 | |
3242 | return dev->header_ops->create(skb, dev, type, daddr, saddr, len); | |
0c4e8581 SH |
3243 | } |
3244 | ||
b95cce35 SH |
3245 | static inline int dev_parse_header(const struct sk_buff *skb, |
3246 | unsigned char *haddr) | |
3247 | { | |
3248 | const struct net_device *dev = skb->dev; | |
3249 | ||
1b83336b | 3250 | if (!dev->header_ops || !dev->header_ops->parse) |
b95cce35 | 3251 | return 0; |
3b04ddde | 3252 | return dev->header_ops->parse(skb, haddr); |
b95cce35 SH |
3253 | } |
3254 | ||
e78b2915 MM |
3255 | static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb) |
3256 | { | |
3257 | const struct net_device *dev = skb->dev; | |
3258 | ||
3259 | if (!dev->header_ops || !dev->header_ops->parse_protocol) | |
3260 | return 0; | |
3261 | return dev->header_ops->parse_protocol(skb); | |
3262 | } | |
3263 | ||
2793a23a WB |
3264 | /* ll_header must have at least hard_header_len allocated */ |
3265 | static inline bool dev_validate_header(const struct net_device *dev, | |
3266 | char *ll_header, int len) | |
3267 | { | |
3268 | if (likely(len >= dev->hard_header_len)) | |
3269 | return true; | |
217e6fa2 WB |
3270 | if (len < dev->min_header_len) |
3271 | return false; | |
2793a23a WB |
3272 | |
3273 | if (capable(CAP_SYS_RAWIO)) { | |
3274 | memset(ll_header + len, 0, dev->hard_header_len - len); | |
3275 | return true; | |
3276 | } | |
3277 | ||
3278 | if (dev->header_ops && dev->header_ops->validate) | |
3279 | return dev->header_ops->validate(ll_header, len); | |
3280 | ||
3281 | return false; | |
3282 | } | |
3283 | ||
d5496990 EB |
3284 | static inline bool dev_has_header(const struct net_device *dev) |
3285 | { | |
3286 | return dev->header_ops && dev->header_ops->create; | |
3287 | } | |
3288 | ||
36fd633e AV |
3289 | typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, |
3290 | int len, int size); | |
f629d208 | 3291 | int register_gifconf(unsigned int family, gifconf_func_t *gifconf); |
1da177e4 LT |
3292 | static inline int unregister_gifconf(unsigned int family) |
3293 | { | |
3294 | return register_gifconf(family, NULL); | |
3295 | } | |
3296 | ||
99bbc707 | 3297 | #ifdef CONFIG_NET_FLOW_LIMIT |
5f121b9a | 3298 | #define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */ |
99bbc707 WB |
3299 | struct sd_flow_limit { |
3300 | u64 count; | |
3301 | unsigned int num_buckets; | |
3302 | unsigned int history_head; | |
3303 | u16 history[FLOW_LIMIT_HISTORY]; | |
3304 | u8 buckets[]; | |
3305 | }; | |
3306 | ||
3307 | extern int netdev_flow_limit_table_len; | |
3308 | #endif /* CONFIG_NET_FLOW_LIMIT */ | |
3309 | ||
1da177e4 | 3310 | /* |
5e82b4b2 | 3311 | * Incoming packets are placed on per-CPU queues |
1da177e4 | 3312 | */ |
d94d9fee | 3313 | struct softnet_data { |
1da177e4 | 3314 | struct list_head poll_list; |
6e7676c1 | 3315 | struct sk_buff_head process_queue; |
1da177e4 | 3316 | |
dee42870 | 3317 | /* stats */ |
cd7b5396 DM |
3318 | unsigned int processed; |
3319 | unsigned int time_squeeze; | |
cd7b5396 | 3320 | unsigned int received_rps; |
fd793d89 | 3321 | #ifdef CONFIG_RPS |
88751275 | 3322 | struct softnet_data *rps_ipi_list; |
4cdb1e2e ED |
3323 | #endif |
3324 | #ifdef CONFIG_NET_FLOW_LIMIT | |
3325 | struct sd_flow_limit __rcu *flow_limit; | |
3326 | #endif | |
3327 | struct Qdisc *output_queue; | |
3328 | struct Qdisc **output_queue_tailp; | |
3329 | struct sk_buff *completion_queue; | |
f53c7239 SK |
3330 | #ifdef CONFIG_XFRM_OFFLOAD |
3331 | struct sk_buff_head xfrm_backlog; | |
3332 | #endif | |
97cdcf37 FW |
3333 | /* written and read only by owning cpu: */ |
3334 | struct { | |
3335 | u16 recursion; | |
3336 | u8 more; | |
3337 | } xmit; | |
4cdb1e2e | 3338 | #ifdef CONFIG_RPS |
501e7ef5 ED |
3339 | /* input_queue_head should be written by cpu owning this struct, |
3340 | * and only read by other cpus. Worth using a cache line. | |
3341 | */ | |
3342 | unsigned int input_queue_head ____cacheline_aligned_in_smp; | |
3343 | ||
3344 | /* Elements below can be accessed between CPUs for RPS/RFS */ | |
966a9671 | 3345 | call_single_data_t csd ____cacheline_aligned_in_smp; |
88751275 ED |
3346 | struct softnet_data *rps_ipi_next; |
3347 | unsigned int cpu; | |
76cc8b13 | 3348 | unsigned int input_queue_tail; |
1e94d72f | 3349 | #endif |
95c96174 | 3350 | unsigned int dropped; |
0a9627f2 | 3351 | struct sk_buff_head input_pkt_queue; |
bea3348e | 3352 | struct napi_struct backlog; |
99bbc707 | 3353 | |
1da177e4 LT |
3354 | }; |
3355 | ||
76cc8b13 | 3356 | static inline void input_queue_head_incr(struct softnet_data *sd) |
fec5e652 TH |
3357 | { |
3358 | #ifdef CONFIG_RPS | |
76cc8b13 TH |
3359 | sd->input_queue_head++; |
3360 | #endif | |
3361 | } | |
3362 | ||
3363 | static inline void input_queue_tail_incr_save(struct softnet_data *sd, | |
3364 | unsigned int *qtail) | |
3365 | { | |
3366 | #ifdef CONFIG_RPS | |
3367 | *qtail = ++sd->input_queue_tail; | |
fec5e652 TH |
3368 | #endif |
3369 | } | |
3370 | ||
0a9627f2 | 3371 | DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); |
1da177e4 | 3372 | |
97cdcf37 FW |
3373 | static inline int dev_recursion_level(void) |
3374 | { | |
28b05b92 | 3375 | return this_cpu_read(softnet_data.xmit.recursion); |
97cdcf37 FW |
3376 | } |
3377 | ||
fb7861d1 | 3378 | #define XMIT_RECURSION_LIMIT 8 |
97cdcf37 FW |
3379 | static inline bool dev_xmit_recursion(void) |
3380 | { | |
3381 | return unlikely(__this_cpu_read(softnet_data.xmit.recursion) > | |
3382 | XMIT_RECURSION_LIMIT); | |
3383 | } | |
3384 | ||
3385 | static inline void dev_xmit_recursion_inc(void) | |
3386 | { | |
3387 | __this_cpu_inc(softnet_data.xmit.recursion); | |
3388 | } | |
3389 | ||
3390 | static inline void dev_xmit_recursion_dec(void) | |
3391 | { | |
3392 | __this_cpu_dec(softnet_data.xmit.recursion); | |
3393 | } | |
3394 | ||
f629d208 | 3395 | void __netif_schedule(struct Qdisc *q); |
46e5da40 | 3396 | void netif_schedule_queue(struct netdev_queue *txq); |
86d804e1 | 3397 | |
fd2ea0a7 DM |
3398 | static inline void netif_tx_schedule_all(struct net_device *dev) |
3399 | { | |
3400 | unsigned int i; | |
3401 | ||
3402 | for (i = 0; i < dev->num_tx_queues; i++) | |
3403 | netif_schedule_queue(netdev_get_tx_queue(dev, i)); | |
3404 | } | |
3405 | ||
f9a7cbbf | 3406 | static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue) |
d29f749e | 3407 | { |
73466498 | 3408 | clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); |
d29f749e DJ |
3409 | } |
3410 | ||
bea3348e SH |
3411 | /** |
3412 | * netif_start_queue - allow transmit | |
3413 | * @dev: network device | |
3414 | * | |
3415 | * Allow upper layers to call the device hard_start_xmit routine. | |
3416 | */ | |
1da177e4 LT |
3417 | static inline void netif_start_queue(struct net_device *dev) |
3418 | { | |
e8a0464c | 3419 | netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); |
1da177e4 LT |
3420 | } |
3421 | ||
fd2ea0a7 DM |
3422 | static inline void netif_tx_start_all_queues(struct net_device *dev) |
3423 | { | |
3424 | unsigned int i; | |
3425 | ||
3426 | for (i = 0; i < dev->num_tx_queues; i++) { | |
3427 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
3428 | netif_tx_start_queue(txq); | |
3429 | } | |
3430 | } | |
3431 | ||
46e5da40 | 3432 | void netif_tx_wake_queue(struct netdev_queue *dev_queue); |
79d16385 | 3433 | |
d29f749e DJ |
3434 | /** |
3435 | * netif_wake_queue - restart transmit | |
3436 | * @dev: network device | |
3437 | * | |
3438 | * Allow upper layers to call the device hard_start_xmit routine. | |
3439 | * Used for flow control when transmit resources are available. | |
3440 | */ | |
79d16385 DM |
3441 | static inline void netif_wake_queue(struct net_device *dev) |
3442 | { | |
e8a0464c | 3443 | netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); |
1da177e4 LT |
3444 | } |
3445 | ||
fd2ea0a7 DM |
3446 | static inline void netif_tx_wake_all_queues(struct net_device *dev) |
3447 | { | |
3448 | unsigned int i; | |
3449 | ||
3450 | for (i = 0; i < dev->num_tx_queues; i++) { | |
3451 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
3452 | netif_tx_wake_queue(txq); | |
3453 | } | |
3454 | } | |
3455 | ||
f9a7cbbf | 3456 | static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) |
d29f749e | 3457 | { |
73466498 | 3458 | set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); |
d29f749e DJ |
3459 | } |
3460 | ||
bea3348e SH |
3461 | /** |
3462 | * netif_stop_queue - stop transmitted packets | |
3463 | * @dev: network device | |
3464 | * | |
3465 | * Stop upper layers calling the device hard_start_xmit routine. | |
3466 | * Used for flow control when transmit resources are unavailable. | |
3467 | */ | |
1da177e4 LT |
3468 | static inline void netif_stop_queue(struct net_device *dev) |
3469 | { | |
e8a0464c | 3470 | netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); |
1da177e4 LT |
3471 | } |
3472 | ||
a2029240 | 3473 | void netif_tx_stop_all_queues(struct net_device *dev); |
fd2ea0a7 | 3474 | |
4d29515f | 3475 | static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue) |
d29f749e | 3476 | { |
73466498 | 3477 | return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); |
d29f749e DJ |
3478 | } |
3479 | ||
bea3348e SH |
3480 | /** |
3481 | * netif_queue_stopped - test if transmit queue is flowblocked | |
3482 | * @dev: network device | |
3483 | * | |
3484 | * Test if transmit queue on device is currently unable to send. | |
3485 | */ | |
4d29515f | 3486 | static inline bool netif_queue_stopped(const struct net_device *dev) |
1da177e4 | 3487 | { |
e8a0464c | 3488 | return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); |
1da177e4 LT |
3489 | } |
3490 | ||
4d29515f | 3491 | static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue) |
c3f26a26 | 3492 | { |
73466498 TH |
3493 | return dev_queue->state & QUEUE_STATE_ANY_XOFF; |
3494 | } | |
3495 | ||
8e2f1a63 DB |
3496 | static inline bool |
3497 | netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue) | |
73466498 TH |
3498 | { |
3499 | return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN; | |
3500 | } | |
3501 | ||
8e2f1a63 DB |
3502 | static inline bool |
3503 | netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue) | |
3504 | { | |
3505 | return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN; | |
3506 | } | |
3507 | ||
f57bac3c VM |
3508 | /** |
3509 | * netdev_queue_set_dql_min_limit - set dql minimum limit | |
3510 | * @dev_queue: pointer to transmit queue | |
3511 | * @min_limit: dql minimum limit | |
3512 | * | |
3513 | * Forces xmit_more() to return true until the minimum threshold | |
3514 | * defined by @min_limit is reached (or until the tx queue is | |
3515 | * empty). Warning: to be use with care, misuse will impact the | |
3516 | * latency. | |
3517 | */ | |
3518 | static inline void netdev_queue_set_dql_min_limit(struct netdev_queue *dev_queue, | |
3519 | unsigned int min_limit) | |
3520 | { | |
3521 | #ifdef CONFIG_BQL | |
3522 | dev_queue->dql.min_limit = min_limit; | |
3523 | #endif | |
3524 | } | |
3525 | ||
53511453 ED |
3526 | /** |
3527 | * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write | |
3528 | * @dev_queue: pointer to transmit queue | |
3529 | * | |
3530 | * BQL enabled drivers might use this helper in their ndo_start_xmit(), | |
5e82b4b2 | 3531 | * to give appropriate hint to the CPU. |
53511453 ED |
3532 | */ |
3533 | static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue) | |
3534 | { | |
3535 | #ifdef CONFIG_BQL | |
3536 | prefetchw(&dev_queue->dql.num_queued); | |
3537 | #endif | |
3538 | } | |
3539 | ||
3540 | /** | |
3541 | * netdev_txq_bql_complete_prefetchw - prefetch bql data for write | |
3542 | * @dev_queue: pointer to transmit queue | |
3543 | * | |
3544 | * BQL enabled drivers might use this helper in their TX completion path, | |
5e82b4b2 | 3545 | * to give appropriate hint to the CPU. |
53511453 ED |
3546 | */ |
3547 | static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue) | |
3548 | { | |
3549 | #ifdef CONFIG_BQL | |
3550 | prefetchw(&dev_queue->dql.limit); | |
3551 | #endif | |
3552 | } | |
3553 | ||
c5d67bd7 TH |
3554 | static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, |
3555 | unsigned int bytes) | |
3556 | { | |
114cf580 TH |
3557 | #ifdef CONFIG_BQL |
3558 | dql_queued(&dev_queue->dql, bytes); | |
b37c0fbe AD |
3559 | |
3560 | if (likely(dql_avail(&dev_queue->dql) >= 0)) | |
3561 | return; | |
3562 | ||
3563 | set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); | |
3564 | ||
3565 | /* | |
3566 | * The XOFF flag must be set before checking the dql_avail below, | |
3567 | * because in netdev_tx_completed_queue we update the dql_completed | |
3568 | * before checking the XOFF flag. | |
3569 | */ | |
3570 | smp_mb(); | |
3571 | ||
3572 | /* check again in case another CPU has just made room avail */ | |
3573 | if (unlikely(dql_avail(&dev_queue->dql) >= 0)) | |
3574 | clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); | |
114cf580 | 3575 | #endif |
c5d67bd7 TH |
3576 | } |
3577 | ||
3e59020a ED |
3578 | /* Variant of netdev_tx_sent_queue() for drivers that are aware |
3579 | * that they should not test BQL status themselves. | |
3580 | * We do want to change __QUEUE_STATE_STACK_XOFF only for the last | |
3581 | * skb of a batch. | |
3582 | * Returns true if the doorbell must be used to kick the NIC. | |
3583 | */ | |
3584 | static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue, | |
3585 | unsigned int bytes, | |
3586 | bool xmit_more) | |
3587 | { | |
3588 | if (xmit_more) { | |
3589 | #ifdef CONFIG_BQL | |
3590 | dql_queued(&dev_queue->dql, bytes); | |
3591 | #endif | |
3592 | return netif_tx_queue_stopped(dev_queue); | |
3593 | } | |
3594 | netdev_tx_sent_queue(dev_queue, bytes); | |
3595 | return true; | |
3596 | } | |
3597 | ||
0042d0c8 FF |
3598 | /** |
3599 | * netdev_sent_queue - report the number of bytes queued to hardware | |
3600 | * @dev: network device | |
3601 | * @bytes: number of bytes queued to the hardware device queue | |
3602 | * | |
3603 | * Report the number of bytes queued for sending/completion to the network | |
3604 | * device hardware queue. @bytes should be a good approximation and should | |
3605 | * exactly match netdev_completed_queue() @bytes | |
3606 | */ | |
c5d67bd7 TH |
3607 | static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) |
3608 | { | |
3609 | netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes); | |
3610 | } | |
3611 | ||
620344c4 HK |
3612 | static inline bool __netdev_sent_queue(struct net_device *dev, |
3613 | unsigned int bytes, | |
3614 | bool xmit_more) | |
3615 | { | |
3616 | return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes, | |
3617 | xmit_more); | |
3618 | } | |
3619 | ||
c5d67bd7 | 3620 | static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, |
95c96174 | 3621 | unsigned int pkts, unsigned int bytes) |
c5d67bd7 | 3622 | { |
114cf580 | 3623 | #ifdef CONFIG_BQL |
b37c0fbe AD |
3624 | if (unlikely(!bytes)) |
3625 | return; | |
3626 | ||
3627 | dql_completed(&dev_queue->dql, bytes); | |
3628 | ||
3629 | /* | |
3630 | * Without the memory barrier there is a small possiblity that | |
3631 | * netdev_tx_sent_queue will miss the update and cause the queue to | |
3632 | * be stopped forever | |
3633 | */ | |
3634 | smp_mb(); | |
3635 | ||
f3acd33d | 3636 | if (unlikely(dql_avail(&dev_queue->dql) < 0)) |
b37c0fbe AD |
3637 | return; |
3638 | ||
3639 | if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state)) | |
3640 | netif_schedule_queue(dev_queue); | |
114cf580 | 3641 | #endif |
c5d67bd7 TH |
3642 | } |
3643 | ||
0042d0c8 FF |
3644 | /** |
3645 | * netdev_completed_queue - report bytes and packets completed by device | |
3646 | * @dev: network device | |
3647 | * @pkts: actual number of packets sent over the medium | |
3648 | * @bytes: actual number of bytes sent over the medium | |
3649 | * | |
3650 | * Report the number of bytes and packets transmitted by the network device | |
3651 | * hardware queue over the physical medium, @bytes must exactly match the | |
3652 | * @bytes amount passed to netdev_sent_queue() | |
3653 | */ | |
c5d67bd7 | 3654 | static inline void netdev_completed_queue(struct net_device *dev, |
95c96174 | 3655 | unsigned int pkts, unsigned int bytes) |
c5d67bd7 TH |
3656 | { |
3657 | netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes); | |
3658 | } | |
3659 | ||
3660 | static inline void netdev_tx_reset_queue(struct netdev_queue *q) | |
3661 | { | |
114cf580 | 3662 | #ifdef CONFIG_BQL |
5c490354 | 3663 | clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state); |
114cf580 TH |
3664 | dql_reset(&q->dql); |
3665 | #endif | |
c5d67bd7 TH |
3666 | } |
3667 | ||
0042d0c8 FF |
3668 | /** |
3669 | * netdev_reset_queue - reset the packets and bytes count of a network device | |
3670 | * @dev_queue: network device | |
3671 | * | |
3672 | * Reset the bytes and packet count of a network device and clear the | |
3673 | * software flow control OFF bit for this network device | |
3674 | */ | |
c5d67bd7 TH |
3675 | static inline void netdev_reset_queue(struct net_device *dev_queue) |
3676 | { | |
3677 | netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0)); | |
c3f26a26 DM |
3678 | } |
3679 | ||
b9507bda DB |
3680 | /** |
3681 | * netdev_cap_txqueue - check if selected tx queue exceeds device queues | |
3682 | * @dev: network device | |
3683 | * @queue_index: given tx queue index | |
3684 | * | |
3685 | * Returns 0 if given tx queue index >= number of device tx queues, | |
3686 | * otherwise returns the originally passed tx queue index. | |
3687 | */ | |
3688 | static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index) | |
3689 | { | |
3690 | if (unlikely(queue_index >= dev->real_num_tx_queues)) { | |
3691 | net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n", | |
3692 | dev->name, queue_index, | |
3693 | dev->real_num_tx_queues); | |
3694 | return 0; | |
3695 | } | |
3696 | ||
3697 | return queue_index; | |
3698 | } | |
3699 | ||
bea3348e SH |
3700 | /** |
3701 | * netif_running - test if up | |
3702 | * @dev: network device | |
3703 | * | |
3704 | * Test if the device has been brought up. | |
3705 | */ | |
4d29515f | 3706 | static inline bool netif_running(const struct net_device *dev) |
1da177e4 LT |
3707 | { |
3708 | return test_bit(__LINK_STATE_START, &dev->state); | |
3709 | } | |
3710 | ||
f25f4e44 | 3711 | /* |
5e82b4b2 | 3712 | * Routines to manage the subqueues on a device. We only need start, |
f25f4e44 PWJ |
3713 | * stop, and a check if it's stopped. All other device management is |
3714 | * done at the overall netdevice level. | |
3715 | * Also test the device if we're multiqueue. | |
3716 | */ | |
bea3348e SH |
3717 | |
3718 | /** | |
3719 | * netif_start_subqueue - allow sending packets on subqueue | |
3720 | * @dev: network device | |
3721 | * @queue_index: sub queue index | |
3722 | * | |
3723 | * Start individual transmit queue of a device with multiple transmit queues. | |
3724 | */ | |
f25f4e44 PWJ |
3725 | static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) |
3726 | { | |
fd2ea0a7 | 3727 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
7b3d3e4f KK |
3728 | |
3729 | netif_tx_start_queue(txq); | |
f25f4e44 PWJ |
3730 | } |
3731 | ||
bea3348e SH |
3732 | /** |
3733 | * netif_stop_subqueue - stop sending packets on subqueue | |
3734 | * @dev: network device | |
3735 | * @queue_index: sub queue index | |
3736 | * | |
3737 | * Stop individual transmit queue of a device with multiple transmit queues. | |
3738 | */ | |
f25f4e44 PWJ |
3739 | static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) |
3740 | { | |
fd2ea0a7 | 3741 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
7b3d3e4f | 3742 | netif_tx_stop_queue(txq); |
f25f4e44 PWJ |
3743 | } |
3744 | ||
bea3348e | 3745 | /** |
270f3385 | 3746 | * __netif_subqueue_stopped - test status of subqueue |
bea3348e SH |
3747 | * @dev: network device |
3748 | * @queue_index: sub queue index | |
3749 | * | |
3750 | * Check individual transmit queue of a device with multiple transmit queues. | |
3751 | */ | |
4d29515f DM |
3752 | static inline bool __netif_subqueue_stopped(const struct net_device *dev, |
3753 | u16 queue_index) | |
f25f4e44 | 3754 | { |
fd2ea0a7 | 3755 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
7b3d3e4f KK |
3756 | |
3757 | return netif_tx_queue_stopped(txq); | |
f25f4e44 PWJ |
3758 | } |
3759 | ||
270f3385 MCC |
3760 | /** |
3761 | * netif_subqueue_stopped - test status of subqueue | |
3762 | * @dev: network device | |
3763 | * @skb: sub queue buffer pointer | |
3764 | * | |
3765 | * Check individual transmit queue of a device with multiple transmit queues. | |
3766 | */ | |
4d29515f DM |
3767 | static inline bool netif_subqueue_stopped(const struct net_device *dev, |
3768 | struct sk_buff *skb) | |
668f895a PE |
3769 | { |
3770 | return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); | |
3771 | } | |
bea3348e | 3772 | |
738b35cc FF |
3773 | /** |
3774 | * netif_wake_subqueue - allow sending packets on subqueue | |
3775 | * @dev: network device | |
3776 | * @queue_index: sub queue index | |
3777 | * | |
3778 | * Resume individual transmit queue of a device with multiple transmit queues. | |
3779 | */ | |
3780 | static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) | |
3781 | { | |
3782 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); | |
3783 | ||
3784 | netif_tx_wake_queue(txq); | |
3785 | } | |
f25f4e44 | 3786 | |
537c00de | 3787 | #ifdef CONFIG_XPS |
53af53ae | 3788 | int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, |
f629d208 | 3789 | u16 index); |
80d19669 | 3790 | int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, |
044ab86d | 3791 | u16 index, enum xps_map_type type); |
80d19669 AN |
3792 | |
3793 | /** | |
3794 | * netif_attr_test_mask - Test a CPU or Rx queue set in a mask | |
3795 | * @j: CPU/Rx queue index | |
3796 | * @mask: bitmask of all cpus/rx queues | |
3797 | * @nr_bits: number of bits in the bitmask | |
3798 | * | |
3799 | * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues. | |
3800 | */ | |
3801 | static inline bool netif_attr_test_mask(unsigned long j, | |
3802 | const unsigned long *mask, | |
3803 | unsigned int nr_bits) | |
3804 | { | |
3805 | cpu_max_bits_warn(j, nr_bits); | |
3806 | return test_bit(j, mask); | |
3807 | } | |
3808 | ||
3809 | /** | |
3810 | * netif_attr_test_online - Test for online CPU/Rx queue | |
3811 | * @j: CPU/Rx queue index | |
3812 | * @online_mask: bitmask for CPUs/Rx queues that are online | |
3813 | * @nr_bits: number of bits in the bitmask | |
3814 | * | |
3815 | * Returns true if a CPU/Rx queue is online. | |
3816 | */ | |
3817 | static inline bool netif_attr_test_online(unsigned long j, | |
3818 | const unsigned long *online_mask, | |
3819 | unsigned int nr_bits) | |
3820 | { | |
3821 | cpu_max_bits_warn(j, nr_bits); | |
3822 | ||
3823 | if (online_mask) | |
3824 | return test_bit(j, online_mask); | |
3825 | ||
3826 | return (j < nr_bits); | |
3827 | } | |
3828 | ||
3829 | /** | |
3830 | * netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask | |
3831 | * @n: CPU/Rx queue index | |
3832 | * @srcp: the cpumask/Rx queue mask pointer | |
3833 | * @nr_bits: number of bits in the bitmask | |
3834 | * | |
3835 | * Returns >= nr_bits if no further CPUs/Rx queues set. | |
3836 | */ | |
3837 | static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp, | |
3838 | unsigned int nr_bits) | |
3839 | { | |
3840 | /* -1 is a legal arg here. */ | |
3841 | if (n != -1) | |
3842 | cpu_max_bits_warn(n, nr_bits); | |
3843 | ||
3844 | if (srcp) | |
3845 | return find_next_bit(srcp, nr_bits, n + 1); | |
3846 | ||
3847 | return n + 1; | |
3848 | } | |
3849 | ||
3850 | /** | |
a1fa83bd | 3851 | * netif_attrmask_next_and - get the next CPU/Rx queue in \*src1p & \*src2p |
80d19669 AN |
3852 | * @n: CPU/Rx queue index |
3853 | * @src1p: the first CPUs/Rx queues mask pointer | |
3854 | * @src2p: the second CPUs/Rx queues mask pointer | |
3855 | * @nr_bits: number of bits in the bitmask | |
3856 | * | |
3857 | * Returns >= nr_bits if no further CPUs/Rx queues set in both. | |
3858 | */ | |
3859 | static inline int netif_attrmask_next_and(int n, const unsigned long *src1p, | |
3860 | const unsigned long *src2p, | |
3861 | unsigned int nr_bits) | |
3862 | { | |
3863 | /* -1 is a legal arg here. */ | |
3864 | if (n != -1) | |
3865 | cpu_max_bits_warn(n, nr_bits); | |
3866 | ||
3867 | if (src1p && src2p) | |
3868 | return find_next_and_bit(src1p, src2p, nr_bits, n + 1); | |
3869 | else if (src1p) | |
3870 | return find_next_bit(src1p, nr_bits, n + 1); | |
3871 | else if (src2p) | |
3872 | return find_next_bit(src2p, nr_bits, n + 1); | |
3873 | ||
3874 | return n + 1; | |
3875 | } | |
537c00de AD |
3876 | #else |
3877 | static inline int netif_set_xps_queue(struct net_device *dev, | |
3573540c | 3878 | const struct cpumask *mask, |
537c00de AD |
3879 | u16 index) |
3880 | { | |
3881 | return 0; | |
3882 | } | |
c9fbb2d2 KK |
3883 | |
3884 | static inline int __netif_set_xps_queue(struct net_device *dev, | |
3885 | const unsigned long *mask, | |
044ab86d | 3886 | u16 index, enum xps_map_type type) |
c9fbb2d2 KK |
3887 | { |
3888 | return 0; | |
3889 | } | |
537c00de AD |
3890 | #endif |
3891 | ||
bea3348e SH |
3892 | /** |
3893 | * netif_is_multiqueue - test if device has multiple transmit queues | |
3894 | * @dev: network device | |
3895 | * | |
3896 | * Check if device has multiple transmit queues | |
bea3348e | 3897 | */ |
4d29515f | 3898 | static inline bool netif_is_multiqueue(const struct net_device *dev) |
f25f4e44 | 3899 | { |
a02cec21 | 3900 | return dev->num_tx_queues > 1; |
f25f4e44 | 3901 | } |
1da177e4 | 3902 | |
f629d208 | 3903 | int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq); |
f0796d5c | 3904 | |
a953be53 | 3905 | #ifdef CONFIG_SYSFS |
f629d208 | 3906 | int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq); |
62fe0b40 BH |
3907 | #else |
3908 | static inline int netif_set_real_num_rx_queues(struct net_device *dev, | |
c29c2ebd | 3909 | unsigned int rxqs) |
62fe0b40 | 3910 | { |
c29c2ebd | 3911 | dev->real_num_rx_queues = rxqs; |
62fe0b40 BH |
3912 | return 0; |
3913 | } | |
3914 | #endif | |
3915 | ||
65073a67 DB |
3916 | static inline struct netdev_rx_queue * |
3917 | __netif_get_rx_queue(struct net_device *dev, unsigned int rxq) | |
3918 | { | |
3919 | return dev->_rx + rxq; | |
3920 | } | |
3921 | ||
a953be53 MD |
3922 | #ifdef CONFIG_SYSFS |
3923 | static inline unsigned int get_netdev_rx_queue_index( | |
3924 | struct netdev_rx_queue *queue) | |
3925 | { | |
3926 | struct net_device *dev = queue->dev; | |
3927 | int index = queue - dev->_rx; | |
3928 | ||
3929 | BUG_ON(index >= dev->num_rx_queues); | |
3930 | return index; | |
3931 | } | |
3932 | #endif | |
3933 | ||
16917b87 | 3934 | #define DEFAULT_MAX_NUM_RSS_QUEUES (8) |
f629d208 | 3935 | int netif_get_num_default_rss_queues(void); |
16917b87 | 3936 | |
e6247027 ED |
3937 | enum skb_free_reason { |
3938 | SKB_REASON_CONSUMED, | |
3939 | SKB_REASON_DROPPED, | |
3940 | }; | |
3941 | ||
3942 | void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason); | |
3943 | void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason); | |
1da177e4 | 3944 | |
e6247027 ED |
3945 | /* |
3946 | * It is not allowed to call kfree_skb() or consume_skb() from hardware | |
3947 | * interrupt context or with hardware interrupts being disabled. | |
3948 | * (in_irq() || irqs_disabled()) | |
3949 | * | |
3950 | * We provide four helpers that can be used in following contexts : | |
3951 | * | |
3952 | * dev_kfree_skb_irq(skb) when caller drops a packet from irq context, | |
3953 | * replacing kfree_skb(skb) | |
3954 | * | |
3955 | * dev_consume_skb_irq(skb) when caller consumes a packet from irq context. | |
3956 | * Typically used in place of consume_skb(skb) in TX completion path | |
3957 | * | |
3958 | * dev_kfree_skb_any(skb) when caller doesn't know its current irq context, | |
3959 | * replacing kfree_skb(skb) | |
3960 | * | |
3961 | * dev_consume_skb_any(skb) when caller doesn't know its current irq context, | |
3962 | * and consumed a packet. Used in place of consume_skb(skb) | |
1da177e4 | 3963 | */ |
e6247027 ED |
3964 | static inline void dev_kfree_skb_irq(struct sk_buff *skb) |
3965 | { | |
3966 | __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED); | |
3967 | } | |
3968 | ||
3969 | static inline void dev_consume_skb_irq(struct sk_buff *skb) | |
3970 | { | |
3971 | __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED); | |
3972 | } | |
3973 | ||
3974 | static inline void dev_kfree_skb_any(struct sk_buff *skb) | |
3975 | { | |
3976 | __dev_kfree_skb_any(skb, SKB_REASON_DROPPED); | |
3977 | } | |
3978 | ||
3979 | static inline void dev_consume_skb_any(struct sk_buff *skb) | |
3980 | { | |
3981 | __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED); | |
3982 | } | |
1da177e4 | 3983 | |
7c497478 JW |
3984 | void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog); |
3985 | int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb); | |
f629d208 JP |
3986 | int netif_rx(struct sk_buff *skb); |
3987 | int netif_rx_ni(struct sk_buff *skb); | |
c11171a4 | 3988 | int netif_rx_any_context(struct sk_buff *skb); |
04eb4489 | 3989 | int netif_receive_skb(struct sk_buff *skb); |
1c601d82 | 3990 | int netif_receive_skb_core(struct sk_buff *skb); |
f6ad8c1b | 3991 | void netif_receive_skb_list(struct list_head *head); |
f629d208 JP |
3992 | gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); |
3993 | void napi_gro_flush(struct napi_struct *napi, bool flush_old); | |
3994 | struct sk_buff *napi_get_frags(struct napi_struct *napi); | |
3995 | gro_result_t napi_gro_frags(struct napi_struct *napi); | |
bf5a755f JC |
3996 | struct packet_offload *gro_find_receive_by_type(__be16 type); |
3997 | struct packet_offload *gro_find_complete_by_type(__be16 type); | |
76620aaf HX |
3998 | |
3999 | static inline void napi_free_frags(struct napi_struct *napi) | |
4000 | { | |
4001 | kfree_skb(napi->skb); | |
4002 | napi->skb = NULL; | |
4003 | } | |
4004 | ||
24b27fc4 | 4005 | bool netdev_is_rx_handler_busy(struct net_device *dev); |
f629d208 JP |
4006 | int netdev_rx_handler_register(struct net_device *dev, |
4007 | rx_handler_func_t *rx_handler, | |
4008 | void *rx_handler_data); | |
4009 | void netdev_rx_handler_unregister(struct net_device *dev); | |
4010 | ||
4011 | bool dev_valid_name(const char *name); | |
44c02a2c AV |
4012 | int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, |
4013 | bool *need_copyout); | |
36fd633e | 4014 | int dev_ifconf(struct net *net, struct ifconf *, int); |
f629d208 JP |
4015 | int dev_ethtool(struct net *net, struct ifreq *); |
4016 | unsigned int dev_get_flags(const struct net_device *); | |
6d040321 PM |
4017 | int __dev_change_flags(struct net_device *dev, unsigned int flags, |
4018 | struct netlink_ext_ack *extack); | |
567c5e13 PM |
4019 | int dev_change_flags(struct net_device *dev, unsigned int flags, |
4020 | struct netlink_ext_ack *extack); | |
cb178190 DM |
4021 | void __dev_notify_flags(struct net_device *, unsigned int old_flags, |
4022 | unsigned int gchanges); | |
f629d208 JP |
4023 | int dev_change_name(struct net_device *, const char *); |
4024 | int dev_set_alias(struct net_device *, const char *, size_t); | |
6c557001 | 4025 | int dev_get_alias(const struct net_device *, char *, size_t); |
f629d208 | 4026 | int dev_change_net_namespace(struct net_device *, struct net *, const char *); |
f51048c3 | 4027 | int __dev_set_mtu(struct net_device *, int); |
d836f5c6 ED |
4028 | int dev_validate_mtu(struct net_device *dev, int mtu, |
4029 | struct netlink_ext_ack *extack); | |
7a4c53be SH |
4030 | int dev_set_mtu_ext(struct net_device *dev, int mtu, |
4031 | struct netlink_ext_ack *extack); | |
f629d208 | 4032 | int dev_set_mtu(struct net_device *, int); |
6a643ddb | 4033 | int dev_change_tx_queue_len(struct net_device *, unsigned long); |
f629d208 | 4034 | void dev_set_group(struct net_device *, int); |
d59cdf94 PM |
4035 | int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, |
4036 | struct netlink_ext_ack *extack); | |
3a37a963 PM |
4037 | int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, |
4038 | struct netlink_ext_ack *extack); | |
3b23a32a CW |
4039 | int dev_set_mac_address_user(struct net_device *dev, struct sockaddr *sa, |
4040 | struct netlink_ext_ack *extack); | |
4041 | int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name); | |
f629d208 JP |
4042 | int dev_change_carrier(struct net_device *, bool new_carrier); |
4043 | int dev_get_phys_port_id(struct net_device *dev, | |
02637fce | 4044 | struct netdev_phys_item_id *ppid); |
db24a904 DA |
4045 | int dev_get_phys_port_name(struct net_device *dev, |
4046 | char *name, size_t len); | |
d6abc596 FF |
4047 | int dev_get_port_parent_id(struct net_device *dev, |
4048 | struct netdev_phys_item_id *ppid, bool recurse); | |
4049 | bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b); | |
d746d707 | 4050 | int dev_change_proto_down(struct net_device *dev, bool proto_down); |
b5899679 | 4051 | int dev_change_proto_down_generic(struct net_device *dev, bool proto_down); |
829eb208 RP |
4052 | void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask, |
4053 | u32 value); | |
f53c7239 | 4054 | struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again); |
ce93718f DM |
4055 | struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, |
4056 | struct netdev_queue *txq, int *ret); | |
d67b9cd2 | 4057 | |
f4e63525 | 4058 | typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf); |
d67b9cd2 | 4059 | int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, |
92234c8f | 4060 | int fd, int expected_fd, u32 flags); |
aa8d3a71 | 4061 | int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); |
7f0a8382 | 4062 | u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode); |
aa8d3a71 | 4063 | |
a0265d28 | 4064 | int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); |
f629d208 | 4065 | int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); |
5f7d5728 | 4066 | int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb); |
f4b05d27 NA |
4067 | bool is_skb_forwardable(const struct net_device *dev, |
4068 | const struct sk_buff *skb); | |
1da177e4 | 4069 | |
5f7d5728 JDB |
4070 | static __always_inline bool __is_skb_forwardable(const struct net_device *dev, |
4071 | const struct sk_buff *skb, | |
4072 | const bool check_mtu) | |
4073 | { | |
4074 | const u32 vlan_hdr_len = 4; /* VLAN_HLEN */ | |
4075 | unsigned int len; | |
4076 | ||
4077 | if (!(dev->flags & IFF_UP)) | |
4078 | return false; | |
4079 | ||
4080 | if (!check_mtu) | |
4081 | return true; | |
4082 | ||
4083 | len = dev->mtu + dev->hard_header_len + vlan_hdr_len; | |
4084 | if (skb->len <= len) | |
4085 | return true; | |
4086 | ||
4087 | /* if TSO is enabled, we don't care about the length as the packet | |
4088 | * could be forwarded without being segmented before | |
4089 | */ | |
4090 | if (skb_is_gso(skb)) | |
4091 | return true; | |
4092 | ||
4093 | return false; | |
4094 | } | |
4095 | ||
4e3264d2 | 4096 | static __always_inline int ____dev_forward_skb(struct net_device *dev, |
5f7d5728 JDB |
4097 | struct sk_buff *skb, |
4098 | const bool check_mtu) | |
4e3264d2 MKL |
4099 | { |
4100 | if (skb_orphan_frags(skb, GFP_ATOMIC) || | |
5f7d5728 | 4101 | unlikely(!__is_skb_forwardable(dev, skb, check_mtu))) { |
4e3264d2 MKL |
4102 | atomic_long_inc(&dev->rx_dropped); |
4103 | kfree_skb(skb); | |
4104 | return NET_RX_DROP; | |
4105 | } | |
4106 | ||
4107 | skb_scrub_packet(skb, true); | |
4108 | skb->priority = 0; | |
4109 | return 0; | |
4110 | } | |
4111 | ||
9f9a742d | 4112 | bool dev_nit_active(struct net_device *dev); |
74b20582 DA |
4113 | void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); |
4114 | ||
20380731 | 4115 | extern int netdev_budget; |
7acf8a1e | 4116 | extern unsigned int netdev_budget_usecs; |
1da177e4 LT |
4117 | |
4118 | /* Called by rtnetlink.c:rtnl_unlock() */ | |
f629d208 | 4119 | void netdev_run_todo(void); |
1da177e4 | 4120 | |
bea3348e SH |
4121 | /** |
4122 | * dev_put - release reference to device | |
4123 | * @dev: network device | |
4124 | * | |
9ef4429b | 4125 | * Release reference to device to allow it to be freed. |
bea3348e | 4126 | */ |
1da177e4 LT |
4127 | static inline void dev_put(struct net_device *dev) |
4128 | { | |
919067cc | 4129 | #ifdef CONFIG_PCPU_DEV_REFCNT |
933393f5 | 4130 | this_cpu_dec(*dev->pcpu_refcnt); |
919067cc ED |
4131 | #else |
4132 | refcount_dec(&dev->dev_refcnt); | |
4133 | #endif | |
1da177e4 LT |
4134 | } |
4135 | ||
bea3348e SH |
4136 | /** |
4137 | * dev_hold - get reference to device | |
4138 | * @dev: network device | |
4139 | * | |
9ef4429b | 4140 | * Hold reference to device to keep it from being freed. |
bea3348e | 4141 | */ |
15333061 SH |
4142 | static inline void dev_hold(struct net_device *dev) |
4143 | { | |
919067cc | 4144 | #ifdef CONFIG_PCPU_DEV_REFCNT |
933393f5 | 4145 | this_cpu_inc(*dev->pcpu_refcnt); |
919067cc ED |
4146 | #else |
4147 | refcount_inc(&dev->dev_refcnt); | |
4148 | #endif | |
15333061 | 4149 | } |
1da177e4 LT |
4150 | |
4151 | /* Carrier loss detection, dial on demand. The functions netif_carrier_on | |
4152 | * and _off may be called from IRQ context, but it is caller | |
4153 | * who is responsible for serialization of these calls. | |
b00055aa SR |
4154 | * |
4155 | * The name carrier is inappropriate, these functions should really be | |
4156 | * called netif_lowerlayer_*() because they represent the state of any | |
4157 | * kind of lower layer not just hardware media. | |
1da177e4 LT |
4158 | */ |
4159 | ||
f629d208 JP |
4160 | void linkwatch_init_dev(struct net_device *dev); |
4161 | void linkwatch_fire_event(struct net_device *dev); | |
4162 | void linkwatch_forget_dev(struct net_device *dev); | |
1da177e4 | 4163 | |
bea3348e SH |
4164 | /** |
4165 | * netif_carrier_ok - test if carrier present | |
4166 | * @dev: network device | |
4167 | * | |
4168 | * Check if carrier is present on device | |
4169 | */ | |
4d29515f | 4170 | static inline bool netif_carrier_ok(const struct net_device *dev) |
1da177e4 LT |
4171 | { |
4172 | return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); | |
4173 | } | |
4174 | ||
f629d208 | 4175 | unsigned long dev_trans_start(struct net_device *dev); |
9d21493b | 4176 | |
f629d208 | 4177 | void __netdev_watchdog_up(struct net_device *dev); |
1da177e4 | 4178 | |
f629d208 | 4179 | void netif_carrier_on(struct net_device *dev); |
1da177e4 | 4180 | |
f629d208 | 4181 | void netif_carrier_off(struct net_device *dev); |
1da177e4 | 4182 | |
bea3348e SH |
4183 | /** |
4184 | * netif_dormant_on - mark device as dormant. | |
4185 | * @dev: network device | |
4186 | * | |
4187 | * Mark device as dormant (as per RFC2863). | |
4188 | * | |
4189 | * The dormant state indicates that the relevant interface is not | |
4190 | * actually in a condition to pass packets (i.e., it is not 'up') but is | |
4191 | * in a "pending" state, waiting for some external event. For "on- | |
4192 | * demand" interfaces, this new state identifies the situation where the | |
4193 | * interface is waiting for events to place it in the up state. | |
bea3348e | 4194 | */ |
b00055aa SR |
4195 | static inline void netif_dormant_on(struct net_device *dev) |
4196 | { | |
4197 | if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) | |
4198 | linkwatch_fire_event(dev); | |
4199 | } | |
4200 | ||
bea3348e SH |
4201 | /** |
4202 | * netif_dormant_off - set device as not dormant. | |
4203 | * @dev: network device | |
4204 | * | |
4205 | * Device is not in dormant state. | |
4206 | */ | |
b00055aa SR |
4207 | static inline void netif_dormant_off(struct net_device *dev) |
4208 | { | |
4209 | if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) | |
4210 | linkwatch_fire_event(dev); | |
4211 | } | |
4212 | ||
bea3348e | 4213 | /** |
8ecbc40a | 4214 | * netif_dormant - test if device is dormant |
bea3348e SH |
4215 | * @dev: network device |
4216 | * | |
8ecbc40a | 4217 | * Check if device is dormant. |
bea3348e | 4218 | */ |
4d29515f | 4219 | static inline bool netif_dormant(const struct net_device *dev) |
b00055aa SR |
4220 | { |
4221 | return test_bit(__LINK_STATE_DORMANT, &dev->state); | |
4222 | } | |
4223 | ||
4224 | ||
eec517cd AL |
4225 | /** |
4226 | * netif_testing_on - mark device as under test. | |
4227 | * @dev: network device | |
4228 | * | |
4229 | * Mark device as under test (as per RFC2863). | |
4230 | * | |
4231 | * The testing state indicates that some test(s) must be performed on | |
4232 | * the interface. After completion, of the test, the interface state | |
4233 | * will change to up, dormant, or down, as appropriate. | |
4234 | */ | |
4235 | static inline void netif_testing_on(struct net_device *dev) | |
4236 | { | |
4237 | if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state)) | |
4238 | linkwatch_fire_event(dev); | |
4239 | } | |
4240 | ||
4241 | /** | |
4242 | * netif_testing_off - set device as not under test. | |
4243 | * @dev: network device | |
4244 | * | |
4245 | * Device is not in testing state. | |
4246 | */ | |
4247 | static inline void netif_testing_off(struct net_device *dev) | |
4248 | { | |
4249 | if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state)) | |
4250 | linkwatch_fire_event(dev); | |
4251 | } | |
4252 | ||
4253 | /** | |
4254 | * netif_testing - test if device is under test | |
4255 | * @dev: network device | |
4256 | * | |
4257 | * Check if device is under test | |
4258 | */ | |
4259 | static inline bool netif_testing(const struct net_device *dev) | |
4260 | { | |
4261 | return test_bit(__LINK_STATE_TESTING, &dev->state); | |
4262 | } | |
4263 | ||
4264 | ||
bea3348e SH |
4265 | /** |
4266 | * netif_oper_up - test if device is operational | |
4267 | * @dev: network device | |
4268 | * | |
4269 | * Check if carrier is operational | |
4270 | */ | |
4d29515f | 4271 | static inline bool netif_oper_up(const struct net_device *dev) |
d94d9fee | 4272 | { |
b00055aa SR |
4273 | return (dev->operstate == IF_OPER_UP || |
4274 | dev->operstate == IF_OPER_UNKNOWN /* backward compat */); | |
4275 | } | |
4276 | ||
bea3348e SH |
4277 | /** |
4278 | * netif_device_present - is device available or removed | |
4279 | * @dev: network device | |
4280 | * | |
4281 | * Check if device has not been removed from system. | |
4282 | */ | |
7a126a43 | 4283 | static inline bool netif_device_present(const struct net_device *dev) |
1da177e4 LT |
4284 | { |
4285 | return test_bit(__LINK_STATE_PRESENT, &dev->state); | |
4286 | } | |
4287 | ||
f629d208 | 4288 | void netif_device_detach(struct net_device *dev); |
1da177e4 | 4289 | |
f629d208 | 4290 | void netif_device_attach(struct net_device *dev); |
1da177e4 LT |
4291 | |
4292 | /* | |
4293 | * Network interface message level settings | |
4294 | */ | |
1da177e4 LT |
4295 | |
4296 | enum { | |
6a94b8cc MK |
4297 | NETIF_MSG_DRV_BIT, |
4298 | NETIF_MSG_PROBE_BIT, | |
4299 | NETIF_MSG_LINK_BIT, | |
4300 | NETIF_MSG_TIMER_BIT, | |
4301 | NETIF_MSG_IFDOWN_BIT, | |
4302 | NETIF_MSG_IFUP_BIT, | |
4303 | NETIF_MSG_RX_ERR_BIT, | |
4304 | NETIF_MSG_TX_ERR_BIT, | |
4305 | NETIF_MSG_TX_QUEUED_BIT, | |
4306 | NETIF_MSG_INTR_BIT, | |
4307 | NETIF_MSG_TX_DONE_BIT, | |
4308 | NETIF_MSG_RX_STATUS_BIT, | |
4309 | NETIF_MSG_PKTDATA_BIT, | |
4310 | NETIF_MSG_HW_BIT, | |
4311 | NETIF_MSG_WOL_BIT, | |
4312 | ||
4313 | /* When you add a new bit above, update netif_msg_class_names array | |
4314 | * in net/ethtool/common.c | |
4315 | */ | |
4316 | NETIF_MSG_CLASS_COUNT, | |
1da177e4 | 4317 | }; |
6a94b8cc MK |
4318 | /* Both ethtool_ops interface and internal driver implementation use u32 */ |
4319 | static_assert(NETIF_MSG_CLASS_COUNT <= 32); | |
4320 | ||
4321 | #define __NETIF_MSG_BIT(bit) ((u32)1 << (bit)) | |
4322 | #define __NETIF_MSG(name) __NETIF_MSG_BIT(NETIF_MSG_ ## name ## _BIT) | |
4323 | ||
4324 | #define NETIF_MSG_DRV __NETIF_MSG(DRV) | |
4325 | #define NETIF_MSG_PROBE __NETIF_MSG(PROBE) | |
4326 | #define NETIF_MSG_LINK __NETIF_MSG(LINK) | |
4327 | #define NETIF_MSG_TIMER __NETIF_MSG(TIMER) | |
4328 | #define NETIF_MSG_IFDOWN __NETIF_MSG(IFDOWN) | |
4329 | #define NETIF_MSG_IFUP __NETIF_MSG(IFUP) | |
4330 | #define NETIF_MSG_RX_ERR __NETIF_MSG(RX_ERR) | |
4331 | #define NETIF_MSG_TX_ERR __NETIF_MSG(TX_ERR) | |
4332 | #define NETIF_MSG_TX_QUEUED __NETIF_MSG(TX_QUEUED) | |
4333 | #define NETIF_MSG_INTR __NETIF_MSG(INTR) | |
4334 | #define NETIF_MSG_TX_DONE __NETIF_MSG(TX_DONE) | |
4335 | #define NETIF_MSG_RX_STATUS __NETIF_MSG(RX_STATUS) | |
4336 | #define NETIF_MSG_PKTDATA __NETIF_MSG(PKTDATA) | |
4337 | #define NETIF_MSG_HW __NETIF_MSG(HW) | |
4338 | #define NETIF_MSG_WOL __NETIF_MSG(WOL) | |
1da177e4 LT |
4339 | |
4340 | #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) | |
4341 | #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) | |
4342 | #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) | |
4343 | #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) | |
4344 | #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) | |
4345 | #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) | |
4346 | #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) | |
4347 | #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) | |
4348 | #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) | |
4349 | #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) | |
4350 | #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) | |
4351 | #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) | |
4352 | #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) | |
4353 | #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) | |
4354 | #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) | |
4355 | ||
4356 | static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) | |
4357 | { | |
4358 | /* use default */ | |
4359 | if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) | |
4360 | return default_msg_enable_bits; | |
4361 | if (debug_value == 0) /* no output */ | |
4362 | return 0; | |
4363 | /* set low N bits */ | |
f4d7b3e2 | 4364 | return (1U << debug_value) - 1; |
1da177e4 LT |
4365 | } |
4366 | ||
c773e847 | 4367 | static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) |
932ff279 | 4368 | { |
c773e847 DM |
4369 | spin_lock(&txq->_xmit_lock); |
4370 | txq->xmit_lock_owner = cpu; | |
22dd7495 JHS |
4371 | } |
4372 | ||
5a717f4f MT |
4373 | static inline bool __netif_tx_acquire(struct netdev_queue *txq) |
4374 | { | |
4375 | __acquire(&txq->_xmit_lock); | |
4376 | return true; | |
4377 | } | |
4378 | ||
4379 | static inline void __netif_tx_release(struct netdev_queue *txq) | |
4380 | { | |
4381 | __release(&txq->_xmit_lock); | |
4382 | } | |
4383 | ||
fd2ea0a7 DM |
4384 | static inline void __netif_tx_lock_bh(struct netdev_queue *txq) |
4385 | { | |
4386 | spin_lock_bh(&txq->_xmit_lock); | |
4387 | txq->xmit_lock_owner = smp_processor_id(); | |
4388 | } | |
4389 | ||
4d29515f | 4390 | static inline bool __netif_tx_trylock(struct netdev_queue *txq) |
c3f26a26 | 4391 | { |
4d29515f | 4392 | bool ok = spin_trylock(&txq->_xmit_lock); |
c3f26a26 DM |
4393 | if (likely(ok)) |
4394 | txq->xmit_lock_owner = smp_processor_id(); | |
4395 | return ok; | |
4396 | } | |
4397 | ||
4398 | static inline void __netif_tx_unlock(struct netdev_queue *txq) | |
4399 | { | |
4400 | txq->xmit_lock_owner = -1; | |
4401 | spin_unlock(&txq->_xmit_lock); | |
4402 | } | |
4403 | ||
4404 | static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) | |
4405 | { | |
4406 | txq->xmit_lock_owner = -1; | |
4407 | spin_unlock_bh(&txq->_xmit_lock); | |
4408 | } | |
4409 | ||
08baf561 ED |
4410 | static inline void txq_trans_update(struct netdev_queue *txq) |
4411 | { | |
4412 | if (txq->xmit_lock_owner != -1) | |
4413 | txq->trans_start = jiffies; | |
4414 | } | |
4415 | ||
ba162f8e FW |
4416 | /* legacy drivers only, netdev_start_xmit() sets txq->trans_start */ |
4417 | static inline void netif_trans_update(struct net_device *dev) | |
4418 | { | |
9b36627a FW |
4419 | struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); |
4420 | ||
4421 | if (txq->trans_start != jiffies) | |
4422 | txq->trans_start = jiffies; | |
ba162f8e FW |
4423 | } |
4424 | ||
d29f749e DJ |
4425 | /** |
4426 | * netif_tx_lock - grab network device transmit lock | |
4427 | * @dev: network device | |
d29f749e DJ |
4428 | * |
4429 | * Get network device transmit lock | |
4430 | */ | |
22dd7495 JHS |
4431 | static inline void netif_tx_lock(struct net_device *dev) |
4432 | { | |
e8a0464c | 4433 | unsigned int i; |
c3f26a26 | 4434 | int cpu; |
c773e847 | 4435 | |
c3f26a26 DM |
4436 | spin_lock(&dev->tx_global_lock); |
4437 | cpu = smp_processor_id(); | |
e8a0464c DM |
4438 | for (i = 0; i < dev->num_tx_queues; i++) { |
4439 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
c3f26a26 DM |
4440 | |
4441 | /* We are the only thread of execution doing a | |
4442 | * freeze, but we have to grab the _xmit_lock in | |
4443 | * order to synchronize with threads which are in | |
4444 | * the ->hard_start_xmit() handler and already | |
4445 | * checked the frozen bit. | |
4446 | */ | |
e8a0464c | 4447 | __netif_tx_lock(txq, cpu); |
c3f26a26 DM |
4448 | set_bit(__QUEUE_STATE_FROZEN, &txq->state); |
4449 | __netif_tx_unlock(txq); | |
e8a0464c | 4450 | } |
932ff279 HX |
4451 | } |
4452 | ||
4453 | static inline void netif_tx_lock_bh(struct net_device *dev) | |
4454 | { | |
e8a0464c DM |
4455 | local_bh_disable(); |
4456 | netif_tx_lock(dev); | |
932ff279 HX |
4457 | } |
4458 | ||
932ff279 HX |
4459 | static inline void netif_tx_unlock(struct net_device *dev) |
4460 | { | |
e8a0464c DM |
4461 | unsigned int i; |
4462 | ||
4463 | for (i = 0; i < dev->num_tx_queues; i++) { | |
4464 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
c773e847 | 4465 | |
c3f26a26 DM |
4466 | /* No need to grab the _xmit_lock here. If the |
4467 | * queue is not stopped for another reason, we | |
4468 | * force a schedule. | |
4469 | */ | |
4470 | clear_bit(__QUEUE_STATE_FROZEN, &txq->state); | |
7b3d3e4f | 4471 | netif_schedule_queue(txq); |
c3f26a26 DM |
4472 | } |
4473 | spin_unlock(&dev->tx_global_lock); | |
932ff279 HX |
4474 | } |
4475 | ||
4476 | static inline void netif_tx_unlock_bh(struct net_device *dev) | |
4477 | { | |
e8a0464c DM |
4478 | netif_tx_unlock(dev); |
4479 | local_bh_enable(); | |
932ff279 HX |
4480 | } |
4481 | ||
c773e847 | 4482 | #define HARD_TX_LOCK(dev, txq, cpu) { \ |
22dd7495 | 4483 | if ((dev->features & NETIF_F_LLTX) == 0) { \ |
c773e847 | 4484 | __netif_tx_lock(txq, cpu); \ |
5a717f4f MT |
4485 | } else { \ |
4486 | __netif_tx_acquire(txq); \ | |
22dd7495 JHS |
4487 | } \ |
4488 | } | |
4489 | ||
5efeac44 EB |
4490 | #define HARD_TX_TRYLOCK(dev, txq) \ |
4491 | (((dev->features & NETIF_F_LLTX) == 0) ? \ | |
4492 | __netif_tx_trylock(txq) : \ | |
5a717f4f | 4493 | __netif_tx_acquire(txq)) |
5efeac44 | 4494 | |
c773e847 | 4495 | #define HARD_TX_UNLOCK(dev, txq) { \ |
22dd7495 | 4496 | if ((dev->features & NETIF_F_LLTX) == 0) { \ |
c773e847 | 4497 | __netif_tx_unlock(txq); \ |
5a717f4f MT |
4498 | } else { \ |
4499 | __netif_tx_release(txq); \ | |
22dd7495 JHS |
4500 | } \ |
4501 | } | |
4502 | ||
1da177e4 LT |
4503 | static inline void netif_tx_disable(struct net_device *dev) |
4504 | { | |
fd2ea0a7 | 4505 | unsigned int i; |
c3f26a26 | 4506 | int cpu; |
fd2ea0a7 | 4507 | |
c3f26a26 DM |
4508 | local_bh_disable(); |
4509 | cpu = smp_processor_id(); | |
3aa6bce9 | 4510 | spin_lock(&dev->tx_global_lock); |
fd2ea0a7 DM |
4511 | for (i = 0; i < dev->num_tx_queues; i++) { |
4512 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
c3f26a26 DM |
4513 | |
4514 | __netif_tx_lock(txq, cpu); | |
fd2ea0a7 | 4515 | netif_tx_stop_queue(txq); |
c3f26a26 | 4516 | __netif_tx_unlock(txq); |
fd2ea0a7 | 4517 | } |
3aa6bce9 | 4518 | spin_unlock(&dev->tx_global_lock); |
c3f26a26 | 4519 | local_bh_enable(); |
1da177e4 LT |
4520 | } |
4521 | ||
e308a5d8 DM |
4522 | static inline void netif_addr_lock(struct net_device *dev) |
4523 | { | |
1fc70edb | 4524 | unsigned char nest_level = 0; |
e308a5d8 | 4525 | |
1fc70edb TY |
4526 | #ifdef CONFIG_LOCKDEP |
4527 | nest_level = dev->nested_level; | |
4528 | #endif | |
4529 | spin_lock_nested(&dev->addr_list_lock, nest_level); | |
845e0ebb CW |
4530 | } |
4531 | ||
e308a5d8 DM |
4532 | static inline void netif_addr_lock_bh(struct net_device *dev) |
4533 | { | |
1fc70edb TY |
4534 | unsigned char nest_level = 0; |
4535 | ||
4536 | #ifdef CONFIG_LOCKDEP | |
4537 | nest_level = dev->nested_level; | |
4538 | #endif | |
4539 | local_bh_disable(); | |
4540 | spin_lock_nested(&dev->addr_list_lock, nest_level); | |
e308a5d8 DM |
4541 | } |
4542 | ||
4543 | static inline void netif_addr_unlock(struct net_device *dev) | |
4544 | { | |
4545 | spin_unlock(&dev->addr_list_lock); | |
4546 | } | |
4547 | ||
4548 | static inline void netif_addr_unlock_bh(struct net_device *dev) | |
4549 | { | |
4550 | spin_unlock_bh(&dev->addr_list_lock); | |
4551 | } | |
4552 | ||
f001fde5 | 4553 | /* |
31278e71 | 4554 | * dev_addrs walker. Should be used only for read access. Call with |
f001fde5 JP |
4555 | * rcu_read_lock held. |
4556 | */ | |
4557 | #define for_each_dev_addr(dev, ha) \ | |
31278e71 | 4558 | list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list) |
f001fde5 | 4559 | |
1da177e4 LT |
4560 | /* These functions live elsewhere (drivers/net/net_init.c, but related) */ |
4561 | ||
f629d208 | 4562 | void ether_setup(struct net_device *dev); |
1da177e4 LT |
4563 | |
4564 | /* Support for loadable net-drivers */ | |
f629d208 | 4565 | struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, |
c835a677 | 4566 | unsigned char name_assign_type, |
f629d208 JP |
4567 | void (*setup)(struct net_device *), |
4568 | unsigned int txqs, unsigned int rxqs); | |
c835a677 TG |
4569 | #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \ |
4570 | alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1) | |
36909ea4 | 4571 | |
c835a677 TG |
4572 | #define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \ |
4573 | alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \ | |
4574 | count) | |
36909ea4 | 4575 | |
f629d208 JP |
4576 | int register_netdev(struct net_device *dev); |
4577 | void unregister_netdev(struct net_device *dev); | |
f001fde5 | 4578 | |
cd16627f BG |
4579 | int devm_register_netdev(struct device *dev, struct net_device *ndev); |
4580 | ||
22bedad3 | 4581 | /* General hardware address lists handling functions */ |
f629d208 JP |
4582 | int __hw_addr_sync(struct netdev_hw_addr_list *to_list, |
4583 | struct netdev_hw_addr_list *from_list, int addr_len); | |
4584 | void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, | |
4585 | struct netdev_hw_addr_list *from_list, int addr_len); | |
670e5b8e AD |
4586 | int __hw_addr_sync_dev(struct netdev_hw_addr_list *list, |
4587 | struct net_device *dev, | |
4588 | int (*sync)(struct net_device *, const unsigned char *), | |
4589 | int (*unsync)(struct net_device *, | |
4590 | const unsigned char *)); | |
e7946760 IK |
4591 | int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list, |
4592 | struct net_device *dev, | |
4593 | int (*sync)(struct net_device *, | |
4594 | const unsigned char *, int), | |
4595 | int (*unsync)(struct net_device *, | |
4596 | const unsigned char *, int)); | |
4597 | void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list, | |
4598 | struct net_device *dev, | |
4599 | int (*unsync)(struct net_device *, | |
4600 | const unsigned char *, int)); | |
670e5b8e AD |
4601 | void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list, |
4602 | struct net_device *dev, | |
4603 | int (*unsync)(struct net_device *, | |
4604 | const unsigned char *)); | |
f629d208 | 4605 | void __hw_addr_init(struct netdev_hw_addr_list *list); |
22bedad3 | 4606 | |
f001fde5 | 4607 | /* Functions used for device addresses handling */ |
f629d208 JP |
4608 | int dev_addr_add(struct net_device *dev, const unsigned char *addr, |
4609 | unsigned char addr_type); | |
4610 | int dev_addr_del(struct net_device *dev, const unsigned char *addr, | |
4611 | unsigned char addr_type); | |
f629d208 JP |
4612 | void dev_addr_flush(struct net_device *dev); |
4613 | int dev_addr_init(struct net_device *dev); | |
a748ee24 JP |
4614 | |
4615 | /* Functions used for unicast addresses handling */ | |
f629d208 JP |
4616 | int dev_uc_add(struct net_device *dev, const unsigned char *addr); |
4617 | int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr); | |
4618 | int dev_uc_del(struct net_device *dev, const unsigned char *addr); | |
4619 | int dev_uc_sync(struct net_device *to, struct net_device *from); | |
4620 | int dev_uc_sync_multiple(struct net_device *to, struct net_device *from); | |
4621 | void dev_uc_unsync(struct net_device *to, struct net_device *from); | |
4622 | void dev_uc_flush(struct net_device *dev); | |
4623 | void dev_uc_init(struct net_device *dev); | |
f001fde5 | 4624 | |
670e5b8e AD |
4625 | /** |
4626 | * __dev_uc_sync - Synchonize device's unicast list | |
4627 | * @dev: device to sync | |
4628 | * @sync: function to call if address should be added | |
4629 | * @unsync: function to call if address should be removed | |
4630 | * | |
4631 | * Add newly added addresses to the interface, and release | |
4632 | * addresses that have been deleted. | |
5e82b4b2 | 4633 | */ |
670e5b8e AD |
4634 | static inline int __dev_uc_sync(struct net_device *dev, |
4635 | int (*sync)(struct net_device *, | |
4636 | const unsigned char *), | |
4637 | int (*unsync)(struct net_device *, | |
4638 | const unsigned char *)) | |
4639 | { | |
4640 | return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync); | |
4641 | } | |
4642 | ||
4643 | /** | |
e793c0f7 | 4644 | * __dev_uc_unsync - Remove synchronized addresses from device |
670e5b8e AD |
4645 | * @dev: device to sync |
4646 | * @unsync: function to call if address should be removed | |
4647 | * | |
4648 | * Remove all addresses that were added to the device by dev_uc_sync(). | |
5e82b4b2 | 4649 | */ |
670e5b8e AD |
4650 | static inline void __dev_uc_unsync(struct net_device *dev, |
4651 | int (*unsync)(struct net_device *, | |
4652 | const unsigned char *)) | |
4653 | { | |
4654 | __hw_addr_unsync_dev(&dev->uc, dev, unsync); | |
4655 | } | |
4656 | ||
22bedad3 | 4657 | /* Functions used for multicast addresses handling */ |
f629d208 JP |
4658 | int dev_mc_add(struct net_device *dev, const unsigned char *addr); |
4659 | int dev_mc_add_global(struct net_device *dev, const unsigned char *addr); | |
4660 | int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr); | |
4661 | int dev_mc_del(struct net_device *dev, const unsigned char *addr); | |
4662 | int dev_mc_del_global(struct net_device *dev, const unsigned char *addr); | |
4663 | int dev_mc_sync(struct net_device *to, struct net_device *from); | |
4664 | int dev_mc_sync_multiple(struct net_device *to, struct net_device *from); | |
4665 | void dev_mc_unsync(struct net_device *to, struct net_device *from); | |
4666 | void dev_mc_flush(struct net_device *dev); | |
4667 | void dev_mc_init(struct net_device *dev); | |
f001fde5 | 4668 | |
670e5b8e AD |
4669 | /** |
4670 | * __dev_mc_sync - Synchonize device's multicast list | |
4671 | * @dev: device to sync | |
4672 | * @sync: function to call if address should be added | |
4673 | * @unsync: function to call if address should be removed | |
4674 | * | |
4675 | * Add newly added addresses to the interface, and release | |
4676 | * addresses that have been deleted. | |
5e82b4b2 | 4677 | */ |
670e5b8e AD |
4678 | static inline int __dev_mc_sync(struct net_device *dev, |
4679 | int (*sync)(struct net_device *, | |
4680 | const unsigned char *), | |
4681 | int (*unsync)(struct net_device *, | |
4682 | const unsigned char *)) | |
4683 | { | |
4684 | return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync); | |
4685 | } | |
4686 | ||
4687 | /** | |
e793c0f7 | 4688 | * __dev_mc_unsync - Remove synchronized addresses from device |
670e5b8e AD |
4689 | * @dev: device to sync |
4690 | * @unsync: function to call if address should be removed | |
4691 | * | |
4692 | * Remove all addresses that were added to the device by dev_mc_sync(). | |
5e82b4b2 | 4693 | */ |
670e5b8e AD |
4694 | static inline void __dev_mc_unsync(struct net_device *dev, |
4695 | int (*unsync)(struct net_device *, | |
4696 | const unsigned char *)) | |
4697 | { | |
4698 | __hw_addr_unsync_dev(&dev->mc, dev, unsync); | |
4699 | } | |
4700 | ||
4417da66 | 4701 | /* Functions used for secondary unicast and multicast support */ |
f629d208 JP |
4702 | void dev_set_rx_mode(struct net_device *dev); |
4703 | void __dev_set_rx_mode(struct net_device *dev); | |
4704 | int dev_set_promiscuity(struct net_device *dev, int inc); | |
4705 | int dev_set_allmulti(struct net_device *dev, int inc); | |
4706 | void netdev_state_change(struct net_device *dev); | |
7061eb8c | 4707 | void __netdev_notify_peers(struct net_device *dev); |
f629d208 JP |
4708 | void netdev_notify_peers(struct net_device *dev); |
4709 | void netdev_features_change(struct net_device *dev); | |
1da177e4 | 4710 | /* Load a device via the kmod */ |
f629d208 JP |
4711 | void dev_load(struct net *net, const char *name); |
4712 | struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, | |
4713 | struct rtnl_link_stats64 *storage); | |
4714 | void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, | |
4715 | const struct net_device_stats *netdev_stats); | |
44fa32f0 HK |
4716 | void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s, |
4717 | const struct pcpu_sw_netstats __percpu *netstats); | |
a1839426 | 4718 | void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s); |
eeda3fd6 | 4719 | |
1da177e4 | 4720 | extern int netdev_max_backlog; |
3b098e2d | 4721 | extern int netdev_tstamp_prequeue; |
5aa3afe1 | 4722 | extern int netdev_unregister_timeout_secs; |
1da177e4 | 4723 | extern int weight_p; |
3d48b53f MT |
4724 | extern int dev_weight_rx_bias; |
4725 | extern int dev_weight_tx_bias; | |
4726 | extern int dev_rx_weight; | |
4727 | extern int dev_tx_weight; | |
323ebb61 | 4728 | extern int gro_normal_batch; |
9ff162a8 | 4729 | |
1fc70edb TY |
4730 | enum { |
4731 | NESTED_SYNC_IMM_BIT, | |
4732 | NESTED_SYNC_TODO_BIT, | |
4733 | }; | |
4734 | ||
4735 | #define __NESTED_SYNC_BIT(bit) ((u32)1 << (bit)) | |
4736 | #define __NESTED_SYNC(name) __NESTED_SYNC_BIT(NESTED_SYNC_ ## name ## _BIT) | |
4737 | ||
4738 | #define NESTED_SYNC_IMM __NESTED_SYNC(IMM) | |
4739 | #define NESTED_SYNC_TODO __NESTED_SYNC(TODO) | |
4740 | ||
eff74233 | 4741 | struct netdev_nested_priv { |
1fc70edb | 4742 | unsigned char flags; |
eff74233 TY |
4743 | void *data; |
4744 | }; | |
4745 | ||
f629d208 | 4746 | bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); |
44a40855 VY |
4747 | struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, |
4748 | struct list_head **iter); | |
f629d208 JP |
4749 | struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev, |
4750 | struct list_head **iter); | |
8b5be856 | 4751 | |
1fc70edb TY |
4752 | #ifdef CONFIG_LOCKDEP |
4753 | static LIST_HEAD(net_unlink_list); | |
4754 | ||
4755 | static inline void net_unlink_todo(struct net_device *dev) | |
4756 | { | |
4757 | if (list_empty(&dev->unlink_list)) | |
4758 | list_add_tail(&dev->unlink_list, &net_unlink_list); | |
4759 | } | |
4760 | #endif | |
4761 | ||
44a40855 VY |
4762 | /* iterate through upper list, must be called under RCU read lock */ |
4763 | #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \ | |
4764 | for (iter = &(dev)->adj_list.upper, \ | |
4765 | updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \ | |
4766 | updev; \ | |
4767 | updev = netdev_upper_get_next_dev_rcu(dev, &(iter))) | |
4768 | ||
1a3f060c DA |
4769 | int netdev_walk_all_upper_dev_rcu(struct net_device *dev, |
4770 | int (*fn)(struct net_device *upper_dev, | |
eff74233 TY |
4771 | struct netdev_nested_priv *priv), |
4772 | struct netdev_nested_priv *priv); | |
1a3f060c DA |
4773 | |
4774 | bool netdev_has_upper_dev_all_rcu(struct net_device *dev, | |
4775 | struct net_device *upper_dev); | |
4776 | ||
25cc72a3 IS |
4777 | bool netdev_has_any_upper_dev(struct net_device *dev); |
4778 | ||
f629d208 JP |
4779 | void *netdev_lower_get_next_private(struct net_device *dev, |
4780 | struct list_head **iter); | |
4781 | void *netdev_lower_get_next_private_rcu(struct net_device *dev, | |
4782 | struct list_head **iter); | |
31088a11 VF |
4783 | |
4784 | #define netdev_for_each_lower_private(dev, priv, iter) \ | |
4785 | for (iter = (dev)->adj_list.lower.next, \ | |
4786 | priv = netdev_lower_get_next_private(dev, &(iter)); \ | |
4787 | priv; \ | |
4788 | priv = netdev_lower_get_next_private(dev, &(iter))) | |
4789 | ||
4790 | #define netdev_for_each_lower_private_rcu(dev, priv, iter) \ | |
4791 | for (iter = &(dev)->adj_list.lower, \ | |
4792 | priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \ | |
4793 | priv; \ | |
4794 | priv = netdev_lower_get_next_private_rcu(dev, &(iter))) | |
4795 | ||
4085ebe8 VY |
4796 | void *netdev_lower_get_next(struct net_device *dev, |
4797 | struct list_head **iter); | |
7ce856aa | 4798 | |
4085ebe8 | 4799 | #define netdev_for_each_lower_dev(dev, ldev, iter) \ |
cfdd28be | 4800 | for (iter = (dev)->adj_list.lower.next, \ |
4085ebe8 VY |
4801 | ldev = netdev_lower_get_next(dev, &(iter)); \ |
4802 | ldev; \ | |
4803 | ldev = netdev_lower_get_next(dev, &(iter))) | |
4804 | ||
7151affe | 4805 | struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, |
7ce856aa | 4806 | struct list_head **iter); |
1a3f060c DA |
4807 | int netdev_walk_all_lower_dev(struct net_device *dev, |
4808 | int (*fn)(struct net_device *lower_dev, | |
eff74233 TY |
4809 | struct netdev_nested_priv *priv), |
4810 | struct netdev_nested_priv *priv); | |
1a3f060c DA |
4811 | int netdev_walk_all_lower_dev_rcu(struct net_device *dev, |
4812 | int (*fn)(struct net_device *lower_dev, | |
eff74233 TY |
4813 | struct netdev_nested_priv *priv), |
4814 | struct netdev_nested_priv *priv); | |
1a3f060c | 4815 | |
f629d208 | 4816 | void *netdev_adjacent_get_private(struct list_head *adj_list); |
e001bfad | 4817 | void *netdev_lower_get_first_private_rcu(struct net_device *dev); |
f629d208 JP |
4818 | struct net_device *netdev_master_upper_dev_get(struct net_device *dev); |
4819 | struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev); | |
42ab19ee DA |
4820 | int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev, |
4821 | struct netlink_ext_ack *extack); | |
f629d208 | 4822 | int netdev_master_upper_dev_link(struct net_device *dev, |
6dffb044 | 4823 | struct net_device *upper_dev, |
42ab19ee DA |
4824 | void *upper_priv, void *upper_info, |
4825 | struct netlink_ext_ack *extack); | |
f629d208 JP |
4826 | void netdev_upper_dev_unlink(struct net_device *dev, |
4827 | struct net_device *upper_dev); | |
32b6d34f TY |
4828 | int netdev_adjacent_change_prepare(struct net_device *old_dev, |
4829 | struct net_device *new_dev, | |
4830 | struct net_device *dev, | |
4831 | struct netlink_ext_ack *extack); | |
4832 | void netdev_adjacent_change_commit(struct net_device *old_dev, | |
4833 | struct net_device *new_dev, | |
4834 | struct net_device *dev); | |
4835 | void netdev_adjacent_change_abort(struct net_device *old_dev, | |
4836 | struct net_device *new_dev, | |
4837 | struct net_device *dev); | |
5bb025fa | 4838 | void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); |
f629d208 JP |
4839 | void *netdev_lower_dev_get_private(struct net_device *dev, |
4840 | struct net_device *lower_dev); | |
04d48266 JP |
4841 | void netdev_lower_state_changed(struct net_device *lower_dev, |
4842 | void *lower_state_info); | |
960fb622 ED |
4843 | |
4844 | /* RSS keys are 40 or 52 bytes long */ | |
4845 | #define NETDEV_RSS_KEY_LEN 52 | |
ba905f5e | 4846 | extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly; |
960fb622 ED |
4847 | void netdev_rss_key_fill(void *buffer, size_t len); |
4848 | ||
f629d208 | 4849 | int skb_checksum_help(struct sk_buff *skb); |
b72b5bf6 | 4850 | int skb_crc32c_csum_help(struct sk_buff *skb); |
43c26a1a DC |
4851 | int skb_csum_hwoffload_help(struct sk_buff *skb, |
4852 | const netdev_features_t features); | |
4853 | ||
f629d208 JP |
4854 | struct sk_buff *__skb_gso_segment(struct sk_buff *skb, |
4855 | netdev_features_t features, bool tx_path); | |
4856 | struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, | |
4857 | netdev_features_t features); | |
12b0004d | 4858 | |
61bd3857 MS |
4859 | struct netdev_bonding_info { |
4860 | ifslave slave; | |
4861 | ifbond master; | |
4862 | }; | |
4863 | ||
4864 | struct netdev_notifier_bonding_info { | |
4865 | struct netdev_notifier_info info; /* must be first */ | |
4866 | struct netdev_bonding_info bonding_info; | |
4867 | }; | |
4868 | ||
4869 | void netdev_bonding_info_change(struct net_device *dev, | |
4870 | struct netdev_bonding_info *bonding_info); | |
4871 | ||
6b08d6c1 MK |
4872 | #if IS_ENABLED(CONFIG_ETHTOOL_NETLINK) |
4873 | void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data); | |
4874 | #else | |
4875 | static inline void ethtool_notify(struct net_device *dev, unsigned int cmd, | |
4876 | const void *data) | |
4877 | { | |
4878 | } | |
4879 | #endif | |
4880 | ||
12b0004d CW |
4881 | static inline |
4882 | struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features) | |
4883 | { | |
4884 | return __skb_gso_segment(skb, features, true); | |
4885 | } | |
53d6471c | 4886 | __be16 skb_network_protocol(struct sk_buff *skb, int *depth); |
ec5f0615 PS |
4887 | |
4888 | static inline bool can_checksum_protocol(netdev_features_t features, | |
4889 | __be16 protocol) | |
4890 | { | |
c8cd0989 TH |
4891 | if (protocol == htons(ETH_P_FCOE)) |
4892 | return !!(features & NETIF_F_FCOE_CRC); | |
4893 | ||
4894 | /* Assume this is an IP checksum (not SCTP CRC) */ | |
4895 | ||
4896 | if (features & NETIF_F_HW_CSUM) { | |
4897 | /* Can checksum everything */ | |
4898 | return true; | |
4899 | } | |
4900 | ||
4901 | switch (protocol) { | |
4902 | case htons(ETH_P_IP): | |
4903 | return !!(features & NETIF_F_IP_CSUM); | |
4904 | case htons(ETH_P_IPV6): | |
4905 | return !!(features & NETIF_F_IPV6_CSUM); | |
4906 | default: | |
4907 | return false; | |
4908 | } | |
ec5f0615 | 4909 | } |
12b0004d | 4910 | |
fb286bb2 | 4911 | #ifdef CONFIG_BUG |
7fe50ac8 | 4912 | void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb); |
fb286bb2 | 4913 | #else |
7fe50ac8 CW |
4914 | static inline void netdev_rx_csum_fault(struct net_device *dev, |
4915 | struct sk_buff *skb) | |
fb286bb2 HX |
4916 | { |
4917 | } | |
4918 | #endif | |
1da177e4 | 4919 | /* rx skb timestamps */ |
f629d208 JP |
4920 | void net_enable_timestamp(void); |
4921 | void net_disable_timestamp(void); | |
1da177e4 | 4922 | |
20380731 | 4923 | #ifdef CONFIG_PROC_FS |
f629d208 | 4924 | int __init dev_proc_init(void); |
900ff8c6 CW |
4925 | #else |
4926 | #define dev_proc_init() 0 | |
20380731 ACM |
4927 | #endif |
4928 | ||
4798248e | 4929 | static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops, |
fa2dbdc2 DM |
4930 | struct sk_buff *skb, struct net_device *dev, |
4931 | bool more) | |
4798248e | 4932 | { |
6b16f9ee | 4933 | __this_cpu_write(softnet_data.xmit.more, more); |
0b725a2c | 4934 | return ops->ndo_start_xmit(skb, dev); |
4798248e DM |
4935 | } |
4936 | ||
97cdcf37 FW |
4937 | static inline bool netdev_xmit_more(void) |
4938 | { | |
4939 | return __this_cpu_read(softnet_data.xmit.more); | |
4940 | } | |
4941 | ||
10b3ad8c | 4942 | static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev, |
fa2dbdc2 | 4943 | struct netdev_queue *txq, bool more) |
4798248e DM |
4944 | { |
4945 | const struct net_device_ops *ops = dev->netdev_ops; | |
2183435c | 4946 | netdev_tx_t rc; |
4798248e | 4947 | |
fa2dbdc2 | 4948 | rc = __netdev_start_xmit(ops, skb, dev, more); |
10b3ad8c DM |
4949 | if (rc == NETDEV_TX_OK) |
4950 | txq_trans_update(txq); | |
4951 | ||
4952 | return rc; | |
4798248e DM |
4953 | } |
4954 | ||
b793dc5c | 4955 | int netdev_class_create_file_ns(const struct class_attribute *class_attr, |
42a2d923 | 4956 | const void *ns); |
b793dc5c | 4957 | void netdev_class_remove_file_ns(const struct class_attribute *class_attr, |
42a2d923 | 4958 | const void *ns); |
58292cbe | 4959 | |
737aec57 | 4960 | extern const struct kobj_ns_type_operations net_ns_type_operations; |
04600794 | 4961 | |
f629d208 | 4962 | const char *netdev_drivername(const struct net_device *dev); |
6579e57b | 4963 | |
f629d208 | 4964 | void linkwatch_run_queue(void); |
20380731 | 4965 | |
da08143b MK |
4966 | static inline netdev_features_t netdev_intersect_features(netdev_features_t f1, |
4967 | netdev_features_t f2) | |
4968 | { | |
c8cd0989 TH |
4969 | if ((f1 ^ f2) & NETIF_F_HW_CSUM) { |
4970 | if (f1 & NETIF_F_HW_CSUM) | |
b6a0e72a | 4971 | f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); |
c8cd0989 | 4972 | else |
b6a0e72a | 4973 | f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); |
c8cd0989 | 4974 | } |
da08143b | 4975 | |
c8cd0989 | 4976 | return f1 & f2; |
da08143b MK |
4977 | } |
4978 | ||
c8f44aff MM |
4979 | static inline netdev_features_t netdev_get_wanted_features( |
4980 | struct net_device *dev) | |
5455c699 MM |
4981 | { |
4982 | return (dev->features & ~dev->hw_features) | dev->wanted_features; | |
4983 | } | |
c8f44aff MM |
4984 | netdev_features_t netdev_increment_features(netdev_features_t all, |
4985 | netdev_features_t one, netdev_features_t mask); | |
b0ce3508 ED |
4986 | |
4987 | /* Allow TSO being used on stacked device : | |
4988 | * Performing the GSO segmentation before last device | |
4989 | * is a performance improvement. | |
4990 | */ | |
4991 | static inline netdev_features_t netdev_add_tso_features(netdev_features_t features, | |
4992 | netdev_features_t mask) | |
4993 | { | |
4994 | return netdev_increment_features(features, NETIF_F_ALL_TSO, mask); | |
4995 | } | |
4996 | ||
6cb6a27c | 4997 | int __netdev_update_features(struct net_device *dev); |
5455c699 | 4998 | void netdev_update_features(struct net_device *dev); |
afe12cc8 | 4999 | void netdev_change_features(struct net_device *dev); |
7f353bf2 | 5000 | |
fc4a7489 PM |
5001 | void netif_stacked_transfer_operstate(const struct net_device *rootdev, |
5002 | struct net_device *dev); | |
5003 | ||
e38f3025 TM |
5004 | netdev_features_t passthru_features_check(struct sk_buff *skb, |
5005 | struct net_device *dev, | |
5006 | netdev_features_t features); | |
c1e756bf | 5007 | netdev_features_t netif_skb_features(struct sk_buff *skb); |
58e998c6 | 5008 | |
4d29515f | 5009 | static inline bool net_gso_ok(netdev_features_t features, int gso_type) |
576a30eb | 5010 | { |
7b748340 | 5011 | netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT; |
0345e186 MM |
5012 | |
5013 | /* check flags correspondence */ | |
5014 | BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT)); | |
0345e186 MM |
5015 | BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT)); |
5016 | BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)); | |
cbc53e08 | 5017 | BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT)); |
0345e186 MM |
5018 | BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT)); |
5019 | BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT)); | |
4b28252c TH |
5020 | BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT)); |
5021 | BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT)); | |
7e13318d TH |
5022 | BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT)); |
5023 | BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT)); | |
4b28252c TH |
5024 | BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)); |
5025 | BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); | |
802ab55a | 5026 | BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT)); |
e585f236 | 5027 | BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); |
90017acc | 5028 | BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT)); |
c7ef8f0c | 5029 | BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT)); |
0c19f846 | 5030 | BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT)); |
83aa025f | 5031 | BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT)); |
3b335832 | 5032 | BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT)); |
0345e186 | 5033 | |
d6b4991a | 5034 | return (features & feature) == feature; |
576a30eb HX |
5035 | } |
5036 | ||
4d29515f | 5037 | static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features) |
bcd76111 | 5038 | { |
278b2513 | 5039 | return net_gso_ok(features, skb_shinfo(skb)->gso_type) && |
21dc3301 | 5040 | (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); |
bcd76111 HX |
5041 | } |
5042 | ||
8b86a61d | 5043 | static inline bool netif_needs_gso(struct sk_buff *skb, |
4d29515f | 5044 | netdev_features_t features) |
7967168c | 5045 | { |
fc741216 | 5046 | return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || |
cdbee74c YZ |
5047 | unlikely((skb->ip_summed != CHECKSUM_PARTIAL) && |
5048 | (skb->ip_summed != CHECKSUM_UNNECESSARY))); | |
7967168c HX |
5049 | } |
5050 | ||
82cc1a7a PWJ |
5051 | static inline void netif_set_gso_max_size(struct net_device *dev, |
5052 | unsigned int size) | |
5053 | { | |
5054 | dev->gso_max_size = size; | |
5055 | } | |
5056 | ||
7a7ffbab WCC |
5057 | static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol, |
5058 | int pulled_hlen, u16 mac_offset, | |
5059 | int mac_len) | |
5060 | { | |
5061 | skb->protocol = protocol; | |
5062 | skb->encapsulation = 1; | |
5063 | skb_push(skb, pulled_hlen); | |
5064 | skb_reset_transport_header(skb); | |
5065 | skb->mac_header = mac_offset; | |
5066 | skb->network_header = skb->mac_header + mac_len; | |
5067 | skb->mac_len = mac_len; | |
5068 | } | |
5069 | ||
3c175784 SD |
5070 | static inline bool netif_is_macsec(const struct net_device *dev) |
5071 | { | |
5072 | return dev->priv_flags & IFF_MACSEC; | |
5073 | } | |
5074 | ||
b618aaa9 | 5075 | static inline bool netif_is_macvlan(const struct net_device *dev) |
a6cc0cfa JF |
5076 | { |
5077 | return dev->priv_flags & IFF_MACVLAN; | |
5078 | } | |
5079 | ||
b618aaa9 | 5080 | static inline bool netif_is_macvlan_port(const struct net_device *dev) |
2f33e7d5 MB |
5081 | { |
5082 | return dev->priv_flags & IFF_MACVLAN_PORT; | |
5083 | } | |
5084 | ||
b618aaa9 | 5085 | static inline bool netif_is_bond_master(const struct net_device *dev) |
8a7fbfab | 5086 | { |
5087 | return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; | |
5088 | } | |
5089 | ||
b618aaa9 | 5090 | static inline bool netif_is_bond_slave(const struct net_device *dev) |
1765a575 JP |
5091 | { |
5092 | return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; | |
5093 | } | |
5094 | ||
3bdc0eba BG |
5095 | static inline bool netif_supports_nofcs(struct net_device *dev) |
5096 | { | |
5097 | return dev->priv_flags & IFF_SUPP_NOFCS; | |
5098 | } | |
5099 | ||
d5256083 DB |
5100 | static inline bool netif_has_l3_rx_handler(const struct net_device *dev) |
5101 | { | |
5102 | return dev->priv_flags & IFF_L3MDEV_RX_HANDLER; | |
5103 | } | |
5104 | ||
007979ea | 5105 | static inline bool netif_is_l3_master(const struct net_device *dev) |
4e3c8992 | 5106 | { |
007979ea | 5107 | return dev->priv_flags & IFF_L3MDEV_MASTER; |
4e3c8992 DA |
5108 | } |
5109 | ||
fee6d4c7 DA |
5110 | static inline bool netif_is_l3_slave(const struct net_device *dev) |
5111 | { | |
5112 | return dev->priv_flags & IFF_L3MDEV_SLAVE; | |
5113 | } | |
5114 | ||
0894ae3f JP |
5115 | static inline bool netif_is_bridge_master(const struct net_device *dev) |
5116 | { | |
5117 | return dev->priv_flags & IFF_EBRIDGE; | |
5118 | } | |
5119 | ||
28f9ee22 VY |
5120 | static inline bool netif_is_bridge_port(const struct net_device *dev) |
5121 | { | |
5122 | return dev->priv_flags & IFF_BRIDGE_PORT; | |
5123 | } | |
5124 | ||
35d4e172 JP |
5125 | static inline bool netif_is_ovs_master(const struct net_device *dev) |
5126 | { | |
5127 | return dev->priv_flags & IFF_OPENVSWITCH; | |
5128 | } | |
5129 | ||
5be66141 JP |
5130 | static inline bool netif_is_ovs_port(const struct net_device *dev) |
5131 | { | |
5132 | return dev->priv_flags & IFF_OVS_DATAPATH; | |
5133 | } | |
5134 | ||
df23bb18 SB |
5135 | static inline bool netif_is_any_bridge_port(const struct net_device *dev) |
5136 | { | |
5137 | return netif_is_bridge_port(dev) || netif_is_ovs_port(dev); | |
5138 | } | |
5139 | ||
b618aaa9 | 5140 | static inline bool netif_is_team_master(const struct net_device *dev) |
c981e421 JP |
5141 | { |
5142 | return dev->priv_flags & IFF_TEAM; | |
5143 | } | |
5144 | ||
b618aaa9 | 5145 | static inline bool netif_is_team_port(const struct net_device *dev) |
f7f019ee JP |
5146 | { |
5147 | return dev->priv_flags & IFF_TEAM_PORT; | |
5148 | } | |
5149 | ||
b618aaa9 | 5150 | static inline bool netif_is_lag_master(const struct net_device *dev) |
7be61833 JP |
5151 | { |
5152 | return netif_is_bond_master(dev) || netif_is_team_master(dev); | |
5153 | } | |
5154 | ||
b618aaa9 | 5155 | static inline bool netif_is_lag_port(const struct net_device *dev) |
e0ba1414 JP |
5156 | { |
5157 | return netif_is_bond_slave(dev) || netif_is_team_port(dev); | |
5158 | } | |
5159 | ||
d4ab4286 KJ |
5160 | static inline bool netif_is_rxfh_configured(const struct net_device *dev) |
5161 | { | |
5162 | return dev->priv_flags & IFF_RXFH_CONFIGURED; | |
5163 | } | |
5164 | ||
30c8bd5a SS |
5165 | static inline bool netif_is_failover(const struct net_device *dev) |
5166 | { | |
5167 | return dev->priv_flags & IFF_FAILOVER; | |
5168 | } | |
5169 | ||
5170 | static inline bool netif_is_failover_slave(const struct net_device *dev) | |
5171 | { | |
5172 | return dev->priv_flags & IFF_FAILOVER_SLAVE; | |
5173 | } | |
5174 | ||
02875878 ED |
5175 | /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */ |
5176 | static inline void netif_keep_dst(struct net_device *dev) | |
5177 | { | |
5178 | dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM); | |
5179 | } | |
5180 | ||
18d3df3e PA |
5181 | /* return true if dev can't cope with mtu frames that need vlan tag insertion */ |
5182 | static inline bool netif_reduces_vlan_mtu(struct net_device *dev) | |
5183 | { | |
5184 | /* TODO: reserve and use an additional IFF bit, if we get more users */ | |
5185 | return dev->priv_flags & IFF_MACSEC; | |
5186 | } | |
5187 | ||
505d4f73 | 5188 | extern struct pernet_operations __net_initdata loopback_net_ops; |
b1b67dd4 | 5189 | |
571ba423 JP |
5190 | /* Logging, debugging and troubleshooting/diagnostic helpers. */ |
5191 | ||
5192 | /* netdev_printk helpers, similar to dev_printk */ | |
5193 | ||
5194 | static inline const char *netdev_name(const struct net_device *dev) | |
5195 | { | |
c6f854d5 VF |
5196 | if (!dev->name[0] || strchr(dev->name, '%')) |
5197 | return "(unnamed net_device)"; | |
571ba423 JP |
5198 | return dev->name; |
5199 | } | |
5200 | ||
8397ed36 DA |
5201 | static inline bool netdev_unregistering(const struct net_device *dev) |
5202 | { | |
5203 | return dev->reg_state == NETREG_UNREGISTERING; | |
5204 | } | |
5205 | ||
ccc7f496 VF |
5206 | static inline const char *netdev_reg_state(const struct net_device *dev) |
5207 | { | |
5208 | switch (dev->reg_state) { | |
5209 | case NETREG_UNINITIALIZED: return " (uninitialized)"; | |
5210 | case NETREG_REGISTERED: return ""; | |
5211 | case NETREG_UNREGISTERING: return " (unregistering)"; | |
5212 | case NETREG_UNREGISTERED: return " (unregistered)"; | |
5213 | case NETREG_RELEASED: return " (released)"; | |
5214 | case NETREG_DUMMY: return " (dummy)"; | |
5215 | } | |
5216 | ||
5217 | WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state); | |
5218 | return " (unknown)"; | |
5219 | } | |
5220 | ||
ce3fdb69 | 5221 | __printf(3, 4) __cold |
6ea754eb JP |
5222 | void netdev_printk(const char *level, const struct net_device *dev, |
5223 | const char *format, ...); | |
ce3fdb69 | 5224 | __printf(2, 3) __cold |
6ea754eb | 5225 | void netdev_emerg(const struct net_device *dev, const char *format, ...); |
ce3fdb69 | 5226 | __printf(2, 3) __cold |
6ea754eb | 5227 | void netdev_alert(const struct net_device *dev, const char *format, ...); |
ce3fdb69 | 5228 | __printf(2, 3) __cold |
6ea754eb | 5229 | void netdev_crit(const struct net_device *dev, const char *format, ...); |
ce3fdb69 | 5230 | __printf(2, 3) __cold |
6ea754eb | 5231 | void netdev_err(const struct net_device *dev, const char *format, ...); |
ce3fdb69 | 5232 | __printf(2, 3) __cold |
6ea754eb | 5233 | void netdev_warn(const struct net_device *dev, const char *format, ...); |
ce3fdb69 | 5234 | __printf(2, 3) __cold |
6ea754eb | 5235 | void netdev_notice(const struct net_device *dev, const char *format, ...); |
ce3fdb69 | 5236 | __printf(2, 3) __cold |
6ea754eb | 5237 | void netdev_info(const struct net_device *dev, const char *format, ...); |
571ba423 | 5238 | |
375ef2b1 GP |
5239 | #define netdev_level_once(level, dev, fmt, ...) \ |
5240 | do { \ | |
5241 | static bool __print_once __read_mostly; \ | |
5242 | \ | |
5243 | if (!__print_once) { \ | |
5244 | __print_once = true; \ | |
5245 | netdev_printk(level, dev, fmt, ##__VA_ARGS__); \ | |
5246 | } \ | |
5247 | } while (0) | |
5248 | ||
5249 | #define netdev_emerg_once(dev, fmt, ...) \ | |
5250 | netdev_level_once(KERN_EMERG, dev, fmt, ##__VA_ARGS__) | |
5251 | #define netdev_alert_once(dev, fmt, ...) \ | |
5252 | netdev_level_once(KERN_ALERT, dev, fmt, ##__VA_ARGS__) | |
5253 | #define netdev_crit_once(dev, fmt, ...) \ | |
5254 | netdev_level_once(KERN_CRIT, dev, fmt, ##__VA_ARGS__) | |
5255 | #define netdev_err_once(dev, fmt, ...) \ | |
5256 | netdev_level_once(KERN_ERR, dev, fmt, ##__VA_ARGS__) | |
5257 | #define netdev_warn_once(dev, fmt, ...) \ | |
5258 | netdev_level_once(KERN_WARNING, dev, fmt, ##__VA_ARGS__) | |
5259 | #define netdev_notice_once(dev, fmt, ...) \ | |
5260 | netdev_level_once(KERN_NOTICE, dev, fmt, ##__VA_ARGS__) | |
5261 | #define netdev_info_once(dev, fmt, ...) \ | |
5262 | netdev_level_once(KERN_INFO, dev, fmt, ##__VA_ARGS__) | |
5263 | ||
8909c9ad VK |
5264 | #define MODULE_ALIAS_NETDEV(device) \ |
5265 | MODULE_ALIAS("netdev-" device) | |
5266 | ||
ceabef7d OZ |
5267 | #if defined(CONFIG_DYNAMIC_DEBUG) || \ |
5268 | (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) | |
571ba423 JP |
5269 | #define netdev_dbg(__dev, format, args...) \ |
5270 | do { \ | |
ffa10cb4 | 5271 | dynamic_netdev_dbg(__dev, format, ##args); \ |
571ba423 | 5272 | } while (0) |
b558c96f JC |
5273 | #elif defined(DEBUG) |
5274 | #define netdev_dbg(__dev, format, args...) \ | |
5275 | netdev_printk(KERN_DEBUG, __dev, format, ##args) | |
571ba423 JP |
5276 | #else |
5277 | #define netdev_dbg(__dev, format, args...) \ | |
5278 | ({ \ | |
5279 | if (0) \ | |
5280 | netdev_printk(KERN_DEBUG, __dev, format, ##args); \ | |
571ba423 JP |
5281 | }) |
5282 | #endif | |
5283 | ||
5284 | #if defined(VERBOSE_DEBUG) | |
5285 | #define netdev_vdbg netdev_dbg | |
5286 | #else | |
5287 | ||
5288 | #define netdev_vdbg(dev, format, args...) \ | |
5289 | ({ \ | |
5290 | if (0) \ | |
5291 | netdev_printk(KERN_DEBUG, dev, format, ##args); \ | |
5292 | 0; \ | |
5293 | }) | |
5294 | #endif | |
5295 | ||
5296 | /* | |
5297 | * netdev_WARN() acts like dev_printk(), but with the key difference | |
5298 | * of using a WARN/WARN_ON to get the message out, including the | |
5299 | * file/line information and a backtrace. | |
5300 | */ | |
5301 | #define netdev_WARN(dev, format, args...) \ | |
e1cfe3d0 | 5302 | WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \ |
ccc7f496 | 5303 | netdev_reg_state(dev), ##args) |
571ba423 | 5304 | |
72dd831e | 5305 | #define netdev_WARN_ONCE(dev, format, args...) \ |
e1cfe3d0 | 5306 | WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \ |
375ef2b1 GP |
5307 | netdev_reg_state(dev), ##args) |
5308 | ||
b3d95c5c JP |
5309 | /* netif printk helpers, similar to netdev_printk */ |
5310 | ||
5311 | #define netif_printk(priv, type, level, dev, fmt, args...) \ | |
5312 | do { \ | |
5313 | if (netif_msg_##type(priv)) \ | |
5314 | netdev_printk(level, (dev), fmt, ##args); \ | |
5315 | } while (0) | |
5316 | ||
f45f4321 JP |
5317 | #define netif_level(level, priv, type, dev, fmt, args...) \ |
5318 | do { \ | |
5319 | if (netif_msg_##type(priv)) \ | |
5320 | netdev_##level(dev, fmt, ##args); \ | |
5321 | } while (0) | |
5322 | ||
b3d95c5c | 5323 | #define netif_emerg(priv, type, dev, fmt, args...) \ |
f45f4321 | 5324 | netif_level(emerg, priv, type, dev, fmt, ##args) |
b3d95c5c | 5325 | #define netif_alert(priv, type, dev, fmt, args...) \ |
f45f4321 | 5326 | netif_level(alert, priv, type, dev, fmt, ##args) |
b3d95c5c | 5327 | #define netif_crit(priv, type, dev, fmt, args...) \ |
f45f4321 | 5328 | netif_level(crit, priv, type, dev, fmt, ##args) |
b3d95c5c | 5329 | #define netif_err(priv, type, dev, fmt, args...) \ |
f45f4321 | 5330 | netif_level(err, priv, type, dev, fmt, ##args) |
b3d95c5c | 5331 | #define netif_warn(priv, type, dev, fmt, args...) \ |
f45f4321 | 5332 | netif_level(warn, priv, type, dev, fmt, ##args) |
b3d95c5c | 5333 | #define netif_notice(priv, type, dev, fmt, args...) \ |
f45f4321 | 5334 | netif_level(notice, priv, type, dev, fmt, ##args) |
b3d95c5c | 5335 | #define netif_info(priv, type, dev, fmt, args...) \ |
f45f4321 | 5336 | netif_level(info, priv, type, dev, fmt, ##args) |
b3d95c5c | 5337 | |
ceabef7d OZ |
5338 | #if defined(CONFIG_DYNAMIC_DEBUG) || \ |
5339 | (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) | |
b3d95c5c JP |
5340 | #define netif_dbg(priv, type, netdev, format, args...) \ |
5341 | do { \ | |
5342 | if (netif_msg_##type(priv)) \ | |
b5fb0a03 | 5343 | dynamic_netdev_dbg(netdev, format, ##args); \ |
b3d95c5c | 5344 | } while (0) |
0053ea9c JP |
5345 | #elif defined(DEBUG) |
5346 | #define netif_dbg(priv, type, dev, format, args...) \ | |
5347 | netif_printk(priv, type, KERN_DEBUG, dev, format, ##args) | |
b3d95c5c JP |
5348 | #else |
5349 | #define netif_dbg(priv, type, dev, format, args...) \ | |
5350 | ({ \ | |
5351 | if (0) \ | |
5352 | netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ | |
5353 | 0; \ | |
5354 | }) | |
5355 | #endif | |
5356 | ||
f617f276 EC |
5357 | /* if @cond then downgrade to debug, else print at @level */ |
5358 | #define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...) \ | |
5359 | do { \ | |
5360 | if (cond) \ | |
5361 | netif_dbg(priv, type, netdev, fmt, ##args); \ | |
5362 | else \ | |
5363 | netif_ ## level(priv, type, netdev, fmt, ##args); \ | |
5364 | } while (0) | |
5365 | ||
b3d95c5c | 5366 | #if defined(VERBOSE_DEBUG) |
bcfcc450 | 5367 | #define netif_vdbg netif_dbg |
b3d95c5c JP |
5368 | #else |
5369 | #define netif_vdbg(priv, type, dev, format, args...) \ | |
5370 | ({ \ | |
5371 | if (0) \ | |
a4ed89cb | 5372 | netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ |
b3d95c5c JP |
5373 | 0; \ |
5374 | }) | |
5375 | #endif | |
571ba423 | 5376 | |
900ff8c6 CW |
5377 | /* |
5378 | * The list of packet types we will receive (as opposed to discard) | |
5379 | * and the routines to invoke. | |
5380 | * | |
5381 | * Why 16. Because with 16 the only overlap we get on a hash of the | |
5382 | * low nibble of the protocol value is RARP/SNAP/X.25. | |
5383 | * | |
900ff8c6 | 5384 | * 0800 IP |
900ff8c6 CW |
5385 | * 0001 802.3 |
5386 | * 0002 AX.25 | |
5387 | * 0004 802.2 | |
5388 | * 8035 RARP | |
5389 | * 0005 SNAP | |
5390 | * 0805 X.25 | |
5391 | * 0806 ARP | |
5392 | * 8137 IPX | |
5393 | * 0009 Localtalk | |
5394 | * 86DD IPv6 | |
5395 | */ | |
5396 | #define PTYPE_HASH_SIZE (16) | |
5397 | #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1) | |
5398 | ||
744b8376 VO |
5399 | extern struct list_head ptype_all __read_mostly; |
5400 | extern struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; | |
5401 | ||
4de83b88 MB |
5402 | extern struct net_device *blackhole_netdev; |
5403 | ||
385a154c | 5404 | #endif /* _LINUX_NETDEVICE_H */ |