Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * Definitions for the Interfaces handler. | |
7 | * | |
8 | * Version: @(#)dev.h 1.0.10 08/12/93 | |
9 | * | |
02c30a84 | 10 | * Authors: Ross Biro |
1da177e4 LT |
11 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
12 | * Corey Minyard <wf-rch!minyard@relay.EU.net> | |
13 | * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov> | |
113aa838 | 14 | * Alan Cox, <alan@lxorguk.ukuu.org.uk> |
1da177e4 LT |
15 | * Bjorn Ekwall. <bj0rn@blox.se> |
16 | * Pekka Riikonen <priikone@poseidon.pspt.fi> | |
17 | * | |
18 | * This program is free software; you can redistribute it and/or | |
19 | * modify it under the terms of the GNU General Public License | |
20 | * as published by the Free Software Foundation; either version | |
21 | * 2 of the License, or (at your option) any later version. | |
22 | * | |
23 | * Moved to /usr/include/linux for NET3 | |
24 | */ | |
25 | #ifndef _LINUX_NETDEVICE_H | |
26 | #define _LINUX_NETDEVICE_H | |
27 | ||
d7fe0f24 | 28 | #include <linux/timer.h> |
187f1882 | 29 | #include <linux/bug.h> |
bea3348e | 30 | #include <linux/delay.h> |
60063497 | 31 | #include <linux/atomic.h> |
53511453 | 32 | #include <linux/prefetch.h> |
1da177e4 LT |
33 | #include <asm/cache.h> |
34 | #include <asm/byteorder.h> | |
35 | ||
1da177e4 | 36 | #include <linux/percpu.h> |
4d5b78c0 | 37 | #include <linux/rculist.h> |
bea3348e | 38 | #include <linux/workqueue.h> |
114cf580 | 39 | #include <linux/dynamic_queue_limits.h> |
1da177e4 | 40 | |
b1b67dd4 | 41 | #include <linux/ethtool.h> |
a050c33f | 42 | #include <net/net_namespace.h> |
7a6b6f51 | 43 | #ifdef CONFIG_DCB |
2f90b865 AD |
44 | #include <net/dcbnl.h> |
45 | #endif | |
5bc1421e | 46 | #include <net/netprio_cgroup.h> |
e817f856 | 47 | #include <net/xdp.h> |
a050c33f | 48 | |
a59e2ecb | 49 | #include <linux/netdev_features.h> |
77162022 | 50 | #include <linux/neighbour.h> |
607ca46e | 51 | #include <uapi/linux/netdevice.h> |
61bd3857 | 52 | #include <uapi/linux/if_bonding.h> |
e4c6734e | 53 | #include <uapi/linux/pkt_cls.h> |
59cc1f61 | 54 | #include <linux/hashtable.h> |
a59e2ecb | 55 | |
115c1d6e | 56 | struct netpoll_info; |
313162d0 | 57 | struct device; |
c1f19b51 | 58 | struct phy_device; |
2f657a60 | 59 | struct dsa_port; |
c6e970a0 | 60 | |
e679c9c1 | 61 | struct sfp_bus; |
704232c2 JB |
62 | /* 802.11 specific */ |
63 | struct wireless_dev; | |
98a18b6f AA |
64 | /* 802.15.4 specific */ |
65 | struct wpan_dev; | |
03c57747 | 66 | struct mpls_dev; |
7c46a640 AD |
67 | /* UDP Tunnel offloads */ |
68 | struct udp_tunnel_info; | |
a7862b45 | 69 | struct bpf_prog; |
814abfab | 70 | struct xdp_buff; |
1da177e4 | 71 | |
f629d208 JP |
72 | void netdev_set_default_ethtool_ops(struct net_device *dev, |
73 | const struct ethtool_ops *ops); | |
d07d7507 | 74 | |
9a1654ba JP |
75 | /* Backlog congestion levels */ |
76 | #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ | |
77 | #define NET_RX_DROP 1 /* packet dropped */ | |
78 | ||
572a9d7b PM |
79 | /* |
80 | * Transmit return codes: transmit return codes originate from three different | |
81 | * namespaces: | |
82 | * | |
83 | * - qdisc return codes | |
84 | * - driver transmit return codes | |
85 | * - errno values | |
86 | * | |
87 | * Drivers are allowed to return any one of those in their hard_start_xmit() | |
88 | * function. Real network devices commonly used with qdiscs should only return | |
89 | * the driver transmit return codes though - when qdiscs are used, the actual | |
90 | * transmission happens asynchronously, so the value is not propagated to | |
5e82b4b2 BH |
91 | * higher layers. Virtual network devices transmit synchronously; in this case |
92 | * the driver transmit return codes are consumed by dev_queue_xmit(), and all | |
572a9d7b PM |
93 | * others are propagated to higher layers. |
94 | */ | |
95 | ||
96 | /* qdisc ->enqueue() return codes. */ | |
97 | #define NET_XMIT_SUCCESS 0x00 | |
9a1654ba JP |
98 | #define NET_XMIT_DROP 0x01 /* skb dropped */ |
99 | #define NET_XMIT_CN 0x02 /* congestion notification */ | |
9a1654ba | 100 | #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */ |
1da177e4 | 101 | |
b9df3cb8 GR |
102 | /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It |
103 | * indicates that the device will soon be dropping packets, or already drops | |
104 | * some packets of the same priority; prompting us to send less aggressively. */ | |
572a9d7b | 105 | #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e)) |
1da177e4 LT |
106 | #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) |
107 | ||
dc1f8bf6 | 108 | /* Driver transmit return codes */ |
9a1654ba | 109 | #define NETDEV_TX_MASK 0xf0 |
572a9d7b | 110 | |
dc1f8bf6 | 111 | enum netdev_tx { |
572a9d7b | 112 | __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */ |
9a1654ba JP |
113 | NETDEV_TX_OK = 0x00, /* driver took care of packet */ |
114 | NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/ | |
dc1f8bf6 SH |
115 | }; |
116 | typedef enum netdev_tx netdev_tx_t; | |
117 | ||
9a1654ba JP |
118 | /* |
119 | * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant; | |
120 | * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed. | |
121 | */ | |
122 | static inline bool dev_xmit_complete(int rc) | |
123 | { | |
124 | /* | |
125 | * Positive cases with an skb consumed by a driver: | |
126 | * - successful transmission (rc == NETDEV_TX_OK) | |
127 | * - error while transmitting (rc < 0) | |
128 | * - error while queueing to a different device (rc & NET_XMIT_MASK) | |
129 | */ | |
130 | if (likely(rc < NET_XMIT_MASK)) | |
131 | return true; | |
132 | ||
133 | return false; | |
134 | } | |
135 | ||
1da177e4 | 136 | /* |
5e82b4b2 | 137 | * Compute the worst-case header length according to the protocols |
1da177e4 LT |
138 | * used. |
139 | */ | |
fe2918b0 | 140 | |
c0eb4540 KS |
141 | #if defined(CONFIG_HYPERV_NET) |
142 | # define LL_MAX_HEADER 128 | |
143 | #elif defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25) | |
8388e3da DM |
144 | # if defined(CONFIG_MAC80211_MESH) |
145 | # define LL_MAX_HEADER 128 | |
146 | # else | |
147 | # define LL_MAX_HEADER 96 | |
148 | # endif | |
1da177e4 | 149 | #else |
8388e3da | 150 | # define LL_MAX_HEADER 32 |
1da177e4 LT |
151 | #endif |
152 | ||
d11ead75 BH |
153 | #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \ |
154 | !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL) | |
1da177e4 LT |
155 | #define MAX_HEADER LL_MAX_HEADER |
156 | #else | |
157 | #define MAX_HEADER (LL_MAX_HEADER + 48) | |
158 | #endif | |
159 | ||
160 | /* | |
be1f3c2c BH |
161 | * Old network device statistics. Fields are native words |
162 | * (unsigned long) so they can be read and written atomically. | |
1da177e4 | 163 | */ |
fe2918b0 | 164 | |
d94d9fee | 165 | struct net_device_stats { |
3cfde79c BH |
166 | unsigned long rx_packets; |
167 | unsigned long tx_packets; | |
168 | unsigned long rx_bytes; | |
169 | unsigned long tx_bytes; | |
170 | unsigned long rx_errors; | |
171 | unsigned long tx_errors; | |
172 | unsigned long rx_dropped; | |
173 | unsigned long tx_dropped; | |
174 | unsigned long multicast; | |
1da177e4 | 175 | unsigned long collisions; |
1da177e4 | 176 | unsigned long rx_length_errors; |
3cfde79c BH |
177 | unsigned long rx_over_errors; |
178 | unsigned long rx_crc_errors; | |
179 | unsigned long rx_frame_errors; | |
180 | unsigned long rx_fifo_errors; | |
181 | unsigned long rx_missed_errors; | |
1da177e4 LT |
182 | unsigned long tx_aborted_errors; |
183 | unsigned long tx_carrier_errors; | |
184 | unsigned long tx_fifo_errors; | |
185 | unsigned long tx_heartbeat_errors; | |
186 | unsigned long tx_window_errors; | |
1da177e4 LT |
187 | unsigned long rx_compressed; |
188 | unsigned long tx_compressed; | |
189 | }; | |
190 | ||
1da177e4 LT |
191 | |
192 | #include <linux/cache.h> | |
193 | #include <linux/skbuff.h> | |
194 | ||
adc9300e | 195 | #ifdef CONFIG_RPS |
c5905afb IM |
196 | #include <linux/static_key.h> |
197 | extern struct static_key rps_needed; | |
13bfff25 | 198 | extern struct static_key rfs_needed; |
adc9300e ED |
199 | #endif |
200 | ||
1da177e4 LT |
201 | struct neighbour; |
202 | struct neigh_parms; | |
203 | struct sk_buff; | |
204 | ||
f001fde5 JP |
205 | struct netdev_hw_addr { |
206 | struct list_head list; | |
207 | unsigned char addr[MAX_ADDR_LEN]; | |
208 | unsigned char type; | |
ccffad25 JP |
209 | #define NETDEV_HW_ADDR_T_LAN 1 |
210 | #define NETDEV_HW_ADDR_T_SAN 2 | |
211 | #define NETDEV_HW_ADDR_T_SLAVE 3 | |
212 | #define NETDEV_HW_ADDR_T_UNICAST 4 | |
22bedad3 | 213 | #define NETDEV_HW_ADDR_T_MULTICAST 5 |
22bedad3 | 214 | bool global_use; |
4cd729b0 | 215 | int sync_cnt; |
8f8f103d | 216 | int refcount; |
4543fbef | 217 | int synced; |
f001fde5 JP |
218 | struct rcu_head rcu_head; |
219 | }; | |
220 | ||
31278e71 JP |
221 | struct netdev_hw_addr_list { |
222 | struct list_head list; | |
223 | int count; | |
224 | }; | |
225 | ||
22bedad3 JP |
226 | #define netdev_hw_addr_list_count(l) ((l)->count) |
227 | #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0) | |
228 | #define netdev_hw_addr_list_for_each(ha, l) \ | |
229 | list_for_each_entry(ha, &(l)->list, list) | |
32e7bfc4 | 230 | |
22bedad3 JP |
231 | #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc) |
232 | #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc) | |
233 | #define netdev_for_each_uc_addr(ha, dev) \ | |
234 | netdev_hw_addr_list_for_each(ha, &(dev)->uc) | |
6683ece3 | 235 | |
22bedad3 JP |
236 | #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc) |
237 | #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc) | |
18e225f2 | 238 | #define netdev_for_each_mc_addr(ha, dev) \ |
22bedad3 | 239 | netdev_hw_addr_list_for_each(ha, &(dev)->mc) |
6683ece3 | 240 | |
d94d9fee | 241 | struct hh_cache { |
5b3dc2f3 | 242 | unsigned int hh_len; |
3644f0ce | 243 | seqlock_t hh_lock; |
1da177e4 LT |
244 | |
245 | /* cached hardware header; allow for machine alignment needs. */ | |
246 | #define HH_DATA_MOD 16 | |
247 | #define HH_DATA_OFF(__len) \ | |
5ba0eac6 | 248 | (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1)) |
1da177e4 LT |
249 | #define HH_DATA_ALIGN(__len) \ |
250 | (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1)) | |
251 | unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; | |
252 | }; | |
253 | ||
5e82b4b2 | 254 | /* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much. |
1da177e4 LT |
255 | * Alternative is: |
256 | * dev->hard_header_len ? (dev->hard_header_len + | |
257 | * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0 | |
258 | * | |
259 | * We could use other alignment values, but we must maintain the | |
260 | * relationship HH alignment <= LL alignment. | |
261 | */ | |
262 | #define LL_RESERVED_SPACE(dev) \ | |
f5184d26 | 263 | ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) |
1da177e4 | 264 | #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ |
f5184d26 | 265 | ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) |
1da177e4 | 266 | |
3b04ddde SH |
267 | struct header_ops { |
268 | int (*create) (struct sk_buff *skb, struct net_device *dev, | |
269 | unsigned short type, const void *daddr, | |
95c96174 | 270 | const void *saddr, unsigned int len); |
3b04ddde | 271 | int (*parse)(const struct sk_buff *skb, unsigned char *haddr); |
e69dd336 | 272 | int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); |
3b04ddde SH |
273 | void (*cache_update)(struct hh_cache *hh, |
274 | const struct net_device *dev, | |
275 | const unsigned char *haddr); | |
2793a23a | 276 | bool (*validate)(const char *ll_header, unsigned int len); |
3b04ddde SH |
277 | }; |
278 | ||
1da177e4 | 279 | /* These flag bits are private to the generic network queueing |
5e82b4b2 | 280 | * layer; they may not be explicitly referenced by any other |
1da177e4 LT |
281 | * code. |
282 | */ | |
283 | ||
d94d9fee | 284 | enum netdev_state_t { |
1da177e4 LT |
285 | __LINK_STATE_START, |
286 | __LINK_STATE_PRESENT, | |
1da177e4 | 287 | __LINK_STATE_NOCARRIER, |
b00055aa SR |
288 | __LINK_STATE_LINKWATCH_PENDING, |
289 | __LINK_STATE_DORMANT, | |
1da177e4 LT |
290 | }; |
291 | ||
292 | ||
293 | /* | |
5e82b4b2 | 294 | * This structure holds boot-time configured netdevice settings. They |
fe2918b0 | 295 | * are then used in the device probing. |
1da177e4 LT |
296 | */ |
297 | struct netdev_boot_setup { | |
298 | char name[IFNAMSIZ]; | |
299 | struct ifmap map; | |
300 | }; | |
301 | #define NETDEV_BOOT_SETUP_MAX 8 | |
302 | ||
f629d208 | 303 | int __init netdev_boot_setup(char *str); |
1da177e4 | 304 | |
bea3348e SH |
305 | /* |
306 | * Structure for NAPI scheduling similar to tasklet but with weighting | |
307 | */ | |
07d78363 | 308 | #define GRO_HASH_BUCKETS 8 |
bea3348e SH |
309 | struct napi_struct { |
310 | /* The poll_list must only be managed by the entity which | |
311 | * changes the state of the NAPI_STATE_SCHED bit. This means | |
312 | * whoever atomically sets that bit can add this napi_struct | |
5e82b4b2 | 313 | * to the per-CPU poll_list, and whoever clears that bit |
bea3348e SH |
314 | * can remove from the list right before clearing the bit. |
315 | */ | |
316 | struct list_head poll_list; | |
317 | ||
318 | unsigned long state; | |
319 | int weight; | |
404f7c9e | 320 | unsigned int gro_count; |
bea3348e SH |
321 | int (*poll)(struct napi_struct *, int); |
322 | #ifdef CONFIG_NETPOLL | |
bea3348e | 323 | int poll_owner; |
bea3348e | 324 | #endif |
5d38a079 | 325 | struct net_device *dev; |
07d78363 | 326 | struct list_head gro_hash[GRO_HASH_BUCKETS]; |
5d38a079 | 327 | struct sk_buff *skb; |
3b47d303 | 328 | struct hrtimer timer; |
404f7c9e | 329 | struct list_head dev_list; |
af12fa6e ET |
330 | struct hlist_node napi_hash_node; |
331 | unsigned int napi_id; | |
bea3348e SH |
332 | }; |
333 | ||
d94d9fee | 334 | enum { |
bea3348e | 335 | NAPI_STATE_SCHED, /* Poll is scheduled */ |
39e6c820 | 336 | NAPI_STATE_MISSED, /* reschedule a napi */ |
a0a46196 | 337 | NAPI_STATE_DISABLE, /* Disable pending */ |
7b363e44 | 338 | NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ |
d64b5e85 ED |
339 | NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */ |
340 | NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */ | |
217f6974 ED |
341 | NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */ |
342 | }; | |
343 | ||
344 | enum { | |
39e6c820 ED |
345 | NAPIF_STATE_SCHED = BIT(NAPI_STATE_SCHED), |
346 | NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED), | |
347 | NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE), | |
348 | NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC), | |
349 | NAPIF_STATE_HASHED = BIT(NAPI_STATE_HASHED), | |
350 | NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL), | |
351 | NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL), | |
bea3348e SH |
352 | }; |
353 | ||
5b252f0c | 354 | enum gro_result { |
d1c76af9 HX |
355 | GRO_MERGED, |
356 | GRO_MERGED_FREE, | |
357 | GRO_HELD, | |
358 | GRO_NORMAL, | |
359 | GRO_DROP, | |
25393d3f | 360 | GRO_CONSUMED, |
d1c76af9 | 361 | }; |
5b252f0c | 362 | typedef enum gro_result gro_result_t; |
d1c76af9 | 363 | |
8a4eb573 JP |
364 | /* |
365 | * enum rx_handler_result - Possible return values for rx_handlers. | |
366 | * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it | |
367 | * further. | |
368 | * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in | |
369 | * case skb->dev was changed by rx_handler. | |
370 | * @RX_HANDLER_EXACT: Force exact delivery, no wildcard. | |
5e82b4b2 | 371 | * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called. |
8a4eb573 JP |
372 | * |
373 | * rx_handlers are functions called from inside __netif_receive_skb(), to do | |
374 | * special processing of the skb, prior to delivery to protocol handlers. | |
375 | * | |
376 | * Currently, a net_device can only have a single rx_handler registered. Trying | |
377 | * to register a second rx_handler will return -EBUSY. | |
378 | * | |
379 | * To register a rx_handler on a net_device, use netdev_rx_handler_register(). | |
380 | * To unregister a rx_handler on a net_device, use | |
381 | * netdev_rx_handler_unregister(). | |
382 | * | |
383 | * Upon return, rx_handler is expected to tell __netif_receive_skb() what to | |
384 | * do with the skb. | |
385 | * | |
5e82b4b2 | 386 | * If the rx_handler consumed the skb in some way, it should return |
8a4eb573 | 387 | * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for |
5e82b4b2 | 388 | * the skb to be delivered in some other way. |
8a4eb573 JP |
389 | * |
390 | * If the rx_handler changed skb->dev, to divert the skb to another | |
391 | * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the | |
392 | * new device will be called if it exists. | |
393 | * | |
5e82b4b2 | 394 | * If the rx_handler decides the skb should be ignored, it should return |
8a4eb573 | 395 | * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that |
d93cf068 | 396 | * are registered on exact device (ptype->dev == skb->dev). |
8a4eb573 | 397 | * |
5e82b4b2 | 398 | * If the rx_handler didn't change skb->dev, but wants the skb to be normally |
8a4eb573 JP |
399 | * delivered, it should return RX_HANDLER_PASS. |
400 | * | |
401 | * A device without a registered rx_handler will behave as if rx_handler | |
402 | * returned RX_HANDLER_PASS. | |
403 | */ | |
404 | ||
405 | enum rx_handler_result { | |
406 | RX_HANDLER_CONSUMED, | |
407 | RX_HANDLER_ANOTHER, | |
408 | RX_HANDLER_EXACT, | |
409 | RX_HANDLER_PASS, | |
410 | }; | |
411 | typedef enum rx_handler_result rx_handler_result_t; | |
412 | typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); | |
ab95bfe0 | 413 | |
f629d208 | 414 | void __napi_schedule(struct napi_struct *n); |
bc9ad166 | 415 | void __napi_schedule_irqoff(struct napi_struct *n); |
bea3348e | 416 | |
4d29515f | 417 | static inline bool napi_disable_pending(struct napi_struct *n) |
a0a46196 DM |
418 | { |
419 | return test_bit(NAPI_STATE_DISABLE, &n->state); | |
420 | } | |
421 | ||
39e6c820 | 422 | bool napi_schedule_prep(struct napi_struct *n); |
bea3348e SH |
423 | |
424 | /** | |
425 | * napi_schedule - schedule NAPI poll | |
5e82b4b2 | 426 | * @n: NAPI context |
bea3348e SH |
427 | * |
428 | * Schedule NAPI poll routine to be called if it is not already | |
429 | * running. | |
430 | */ | |
431 | static inline void napi_schedule(struct napi_struct *n) | |
432 | { | |
433 | if (napi_schedule_prep(n)) | |
434 | __napi_schedule(n); | |
435 | } | |
436 | ||
bc9ad166 ED |
437 | /** |
438 | * napi_schedule_irqoff - schedule NAPI poll | |
5e82b4b2 | 439 | * @n: NAPI context |
bc9ad166 ED |
440 | * |
441 | * Variant of napi_schedule(), assuming hard irqs are masked. | |
442 | */ | |
443 | static inline void napi_schedule_irqoff(struct napi_struct *n) | |
444 | { | |
445 | if (napi_schedule_prep(n)) | |
446 | __napi_schedule_irqoff(n); | |
447 | } | |
448 | ||
bfe13f54 | 449 | /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ |
4d29515f | 450 | static inline bool napi_reschedule(struct napi_struct *napi) |
bfe13f54 RD |
451 | { |
452 | if (napi_schedule_prep(napi)) { | |
453 | __napi_schedule(napi); | |
4d29515f | 454 | return true; |
bfe13f54 | 455 | } |
4d29515f | 456 | return false; |
bfe13f54 RD |
457 | } |
458 | ||
364b6055 | 459 | bool napi_complete_done(struct napi_struct *n, int work_done); |
bea3348e SH |
460 | /** |
461 | * napi_complete - NAPI processing complete | |
5e82b4b2 | 462 | * @n: NAPI context |
bea3348e SH |
463 | * |
464 | * Mark NAPI processing as complete. | |
3b47d303 | 465 | * Consider using napi_complete_done() instead. |
364b6055 | 466 | * Return false if device should avoid rearming interrupts. |
bea3348e | 467 | */ |
364b6055 | 468 | static inline bool napi_complete(struct napi_struct *n) |
3b47d303 ED |
469 | { |
470 | return napi_complete_done(n, 0); | |
471 | } | |
bea3348e | 472 | |
af12fa6e ET |
473 | /** |
474 | * napi_hash_del - remove a NAPI from global table | |
5e82b4b2 | 475 | * @napi: NAPI context |
af12fa6e | 476 | * |
5e82b4b2 | 477 | * Warning: caller must observe RCU grace period |
34cbe27e ED |
478 | * before freeing memory containing @napi, if |
479 | * this function returns true. | |
93d05d4a | 480 | * Note: core networking stack automatically calls it |
5e82b4b2 | 481 | * from netif_napi_del(). |
93d05d4a | 482 | * Drivers might want to call this helper to combine all |
5e82b4b2 | 483 | * the needed RCU grace periods into a single one. |
af12fa6e | 484 | */ |
34cbe27e | 485 | bool napi_hash_del(struct napi_struct *napi); |
af12fa6e | 486 | |
bea3348e SH |
487 | /** |
488 | * napi_disable - prevent NAPI from scheduling | |
5e82b4b2 | 489 | * @n: NAPI context |
bea3348e SH |
490 | * |
491 | * Stop NAPI from being scheduled on this context. | |
492 | * Waits till any outstanding processing completes. | |
493 | */ | |
3b47d303 | 494 | void napi_disable(struct napi_struct *n); |
bea3348e SH |
495 | |
496 | /** | |
497 | * napi_enable - enable NAPI scheduling | |
5e82b4b2 | 498 | * @n: NAPI context |
bea3348e SH |
499 | * |
500 | * Resume NAPI from being scheduled on this context. | |
501 | * Must be paired with napi_disable. | |
502 | */ | |
503 | static inline void napi_enable(struct napi_struct *n) | |
504 | { | |
505 | BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); | |
4e857c58 | 506 | smp_mb__before_atomic(); |
bea3348e | 507 | clear_bit(NAPI_STATE_SCHED, &n->state); |
2d8bff12 | 508 | clear_bit(NAPI_STATE_NPSVC, &n->state); |
bea3348e SH |
509 | } |
510 | ||
c264c3de SH |
511 | /** |
512 | * napi_synchronize - wait until NAPI is not running | |
5e82b4b2 | 513 | * @n: NAPI context |
c264c3de SH |
514 | * |
515 | * Wait until NAPI is done being scheduled on this context. | |
516 | * Waits till any outstanding processing completes but | |
517 | * does not disable future activations. | |
518 | */ | |
519 | static inline void napi_synchronize(const struct napi_struct *n) | |
520 | { | |
facc432f AB |
521 | if (IS_ENABLED(CONFIG_SMP)) |
522 | while (test_bit(NAPI_STATE_SCHED, &n->state)) | |
523 | msleep(1); | |
524 | else | |
525 | barrier(); | |
c264c3de | 526 | } |
c264c3de | 527 | |
d94d9fee | 528 | enum netdev_queue_state_t { |
73466498 TH |
529 | __QUEUE_STATE_DRV_XOFF, |
530 | __QUEUE_STATE_STACK_XOFF, | |
c3f26a26 | 531 | __QUEUE_STATE_FROZEN, |
79d16385 | 532 | }; |
8e2f1a63 DB |
533 | |
534 | #define QUEUE_STATE_DRV_XOFF (1 << __QUEUE_STATE_DRV_XOFF) | |
535 | #define QUEUE_STATE_STACK_XOFF (1 << __QUEUE_STATE_STACK_XOFF) | |
536 | #define QUEUE_STATE_FROZEN (1 << __QUEUE_STATE_FROZEN) | |
537 | ||
538 | #define QUEUE_STATE_ANY_XOFF (QUEUE_STATE_DRV_XOFF | QUEUE_STATE_STACK_XOFF) | |
539 | #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \ | |
540 | QUEUE_STATE_FROZEN) | |
541 | #define QUEUE_STATE_DRV_XOFF_OR_FROZEN (QUEUE_STATE_DRV_XOFF | \ | |
542 | QUEUE_STATE_FROZEN) | |
543 | ||
73466498 TH |
544 | /* |
545 | * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The | |
546 | * netif_tx_* functions below are used to manipulate this flag. The | |
547 | * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit | |
548 | * queue independently. The netif_xmit_*stopped functions below are called | |
549 | * to check if the queue has been stopped by the driver or stack (either | |
550 | * of the XOFF bits are set in the state). Drivers should not need to call | |
551 | * netif_xmit*stopped functions, they should only be using netif_tx_*. | |
552 | */ | |
79d16385 | 553 | |
bb949fbd | 554 | struct netdev_queue { |
6a321cb3 | 555 | /* |
5e82b4b2 | 556 | * read-mostly part |
6a321cb3 | 557 | */ |
bb949fbd | 558 | struct net_device *dev; |
46e5da40 | 559 | struct Qdisc __rcu *qdisc; |
b0e1e646 | 560 | struct Qdisc *qdisc_sleeping; |
ccf5ff69 | 561 | #ifdef CONFIG_SYSFS |
1d24eb48 TH |
562 | struct kobject kobj; |
563 | #endif | |
f2cd2d3e ED |
564 | #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) |
565 | int numa_node; | |
566 | #endif | |
c0ef079c FW |
567 | unsigned long tx_maxrate; |
568 | /* | |
569 | * Number of TX timeouts for this queue | |
570 | * (/sys/class/net/DEV/Q/trans_timeout) | |
571 | */ | |
572 | unsigned long trans_timeout; | |
6a321cb3 | 573 | /* |
5e82b4b2 | 574 | * write-mostly part |
6a321cb3 ED |
575 | */ |
576 | spinlock_t _xmit_lock ____cacheline_aligned_in_smp; | |
577 | int xmit_lock_owner; | |
9d21493b | 578 | /* |
9b36627a | 579 | * Time (in jiffies) of last Tx |
9d21493b ED |
580 | */ |
581 | unsigned long trans_start; | |
ccf5ff69 | 582 | |
114cf580 TH |
583 | unsigned long state; |
584 | ||
585 | #ifdef CONFIG_BQL | |
586 | struct dql dql; | |
587 | #endif | |
e8a0464c | 588 | } ____cacheline_aligned_in_smp; |
bb949fbd | 589 | |
79134e6c ED |
590 | extern int sysctl_fb_tunnels_only_for_init_net; |
591 | ||
592 | static inline bool net_has_fallback_tunnels(const struct net *net) | |
593 | { | |
be9fc097 AB |
594 | return net == &init_net || |
595 | !IS_ENABLED(CONFIG_SYSCTL) || | |
596 | !sysctl_fb_tunnels_only_for_init_net; | |
79134e6c ED |
597 | } |
598 | ||
f2cd2d3e ED |
599 | static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) |
600 | { | |
601 | #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) | |
602 | return q->numa_node; | |
603 | #else | |
b236da69 | 604 | return NUMA_NO_NODE; |
f2cd2d3e ED |
605 | #endif |
606 | } | |
607 | ||
608 | static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node) | |
609 | { | |
610 | #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) | |
611 | q->numa_node = node; | |
612 | #endif | |
613 | } | |
614 | ||
df334545 | 615 | #ifdef CONFIG_RPS |
0a9627f2 TH |
616 | /* |
617 | * This structure holds an RPS map which can be of variable length. The | |
618 | * map is an array of CPUs. | |
619 | */ | |
620 | struct rps_map { | |
621 | unsigned int len; | |
622 | struct rcu_head rcu; | |
623 | u16 cpus[0]; | |
624 | }; | |
60b778ce | 625 | #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16))) |
0a9627f2 | 626 | |
fec5e652 | 627 | /* |
c445477d BH |
628 | * The rps_dev_flow structure contains the mapping of a flow to a CPU, the |
629 | * tail pointer for that CPU's input queue at the time of last enqueue, and | |
630 | * a hardware filter index. | |
fec5e652 TH |
631 | */ |
632 | struct rps_dev_flow { | |
633 | u16 cpu; | |
c445477d | 634 | u16 filter; |
fec5e652 TH |
635 | unsigned int last_qtail; |
636 | }; | |
c445477d | 637 | #define RPS_NO_FILTER 0xffff |
fec5e652 TH |
638 | |
639 | /* | |
640 | * The rps_dev_flow_table structure contains a table of flow mappings. | |
641 | */ | |
642 | struct rps_dev_flow_table { | |
643 | unsigned int mask; | |
644 | struct rcu_head rcu; | |
fec5e652 TH |
645 | struct rps_dev_flow flows[0]; |
646 | }; | |
647 | #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \ | |
60b778ce | 648 | ((_num) * sizeof(struct rps_dev_flow))) |
fec5e652 TH |
649 | |
650 | /* | |
651 | * The rps_sock_flow_table contains mappings of flows to the last CPU | |
652 | * on which they were processed by the application (set in recvmsg). | |
5e82b4b2 BH |
653 | * Each entry is a 32bit value. Upper part is the high-order bits |
654 | * of flow hash, lower part is CPU number. | |
567e4b79 | 655 | * rps_cpu_mask is used to partition the space, depending on number of |
5e82b4b2 BH |
656 | * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1 |
657 | * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f, | |
567e4b79 | 658 | * meaning we use 32-6=26 bits for the hash. |
fec5e652 TH |
659 | */ |
660 | struct rps_sock_flow_table { | |
567e4b79 | 661 | u32 mask; |
93c1af6c ED |
662 | |
663 | u32 ents[0] ____cacheline_aligned_in_smp; | |
fec5e652 | 664 | }; |
567e4b79 | 665 | #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (offsetof(struct rps_sock_flow_table, ents[_num])) |
fec5e652 TH |
666 | |
667 | #define RPS_NO_CPU 0xffff | |
668 | ||
567e4b79 ED |
669 | extern u32 rps_cpu_mask; |
670 | extern struct rps_sock_flow_table __rcu *rps_sock_flow_table; | |
671 | ||
fec5e652 TH |
672 | static inline void rps_record_sock_flow(struct rps_sock_flow_table *table, |
673 | u32 hash) | |
674 | { | |
675 | if (table && hash) { | |
567e4b79 ED |
676 | unsigned int index = hash & table->mask; |
677 | u32 val = hash & ~rps_cpu_mask; | |
fec5e652 | 678 | |
5e82b4b2 | 679 | /* We only give a hint, preemption can change CPU under us */ |
567e4b79 | 680 | val |= raw_smp_processor_id(); |
fec5e652 | 681 | |
567e4b79 ED |
682 | if (table->ents[index] != val) |
683 | table->ents[index] = val; | |
fec5e652 TH |
684 | } |
685 | } | |
686 | ||
c445477d | 687 | #ifdef CONFIG_RFS_ACCEL |
f629d208 JP |
688 | bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id, |
689 | u16 filter_id); | |
c445477d | 690 | #endif |
a953be53 | 691 | #endif /* CONFIG_RPS */ |
c445477d | 692 | |
0a9627f2 TH |
693 | /* This structure contains an instance of an RX queue. */ |
694 | struct netdev_rx_queue { | |
a953be53 | 695 | #ifdef CONFIG_RPS |
6e3f7faf ED |
696 | struct rps_map __rcu *rps_map; |
697 | struct rps_dev_flow_table __rcu *rps_flow_table; | |
a953be53 | 698 | #endif |
6e3f7faf | 699 | struct kobject kobj; |
fe822240 | 700 | struct net_device *dev; |
e817f856 | 701 | struct xdp_rxq_info xdp_rxq; |
0a9627f2 | 702 | } ____cacheline_aligned_in_smp; |
a953be53 MD |
703 | |
704 | /* | |
705 | * RX queue sysfs structures and functions. | |
706 | */ | |
707 | struct rx_queue_attribute { | |
708 | struct attribute attr; | |
718ad681 | 709 | ssize_t (*show)(struct netdev_rx_queue *queue, char *buf); |
a953be53 | 710 | ssize_t (*store)(struct netdev_rx_queue *queue, |
718ad681 | 711 | const char *buf, size_t len); |
a953be53 | 712 | }; |
d314774c | 713 | |
bf264145 TH |
714 | #ifdef CONFIG_XPS |
715 | /* | |
716 | * This structure holds an XPS map which can be of variable length. The | |
717 | * map is an array of queues. | |
718 | */ | |
719 | struct xps_map { | |
720 | unsigned int len; | |
721 | unsigned int alloc_len; | |
722 | struct rcu_head rcu; | |
723 | u16 queues[0]; | |
724 | }; | |
60b778ce | 725 | #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16))) |
c59f419b HD |
726 | #define XPS_MIN_MAP_ALLOC ((L1_CACHE_ALIGN(offsetof(struct xps_map, queues[1])) \ |
727 | - sizeof(struct xps_map)) / sizeof(u16)) | |
bf264145 TH |
728 | |
729 | /* | |
730 | * This structure holds all XPS maps for device. Maps are indexed by CPU. | |
731 | */ | |
732 | struct xps_dev_maps { | |
733 | struct rcu_head rcu; | |
80d19669 | 734 | struct xps_map __rcu *attr_map[0]; /* Either CPUs map or RXQs map */ |
bf264145 | 735 | }; |
80d19669 AN |
736 | |
737 | #define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \ | |
184c449f | 738 | (nr_cpu_ids * (_tcs) * sizeof(struct xps_map *))) |
80d19669 AN |
739 | |
740 | #define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\ | |
741 | (_rxqs * (_tcs) * sizeof(struct xps_map *))) | |
742 | ||
bf264145 TH |
743 | #endif /* CONFIG_XPS */ |
744 | ||
4f57c087 JF |
745 | #define TC_MAX_QUEUE 16 |
746 | #define TC_BITMASK 15 | |
747 | /* HW offloaded queuing disciplines txq count and offset maps */ | |
748 | struct netdev_tc_txq { | |
749 | u16 count; | |
750 | u16 offset; | |
751 | }; | |
752 | ||
68bad94e NP |
753 | #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) |
754 | /* | |
755 | * This structure is to hold information about the device | |
756 | * configured to run FCoE protocol stack. | |
757 | */ | |
758 | struct netdev_fcoe_hbainfo { | |
759 | char manufacturer[64]; | |
760 | char serial_number[64]; | |
761 | char hardware_version[64]; | |
762 | char driver_version[64]; | |
763 | char optionrom_version[64]; | |
764 | char firmware_version[64]; | |
765 | char model[256]; | |
766 | char model_description[256]; | |
767 | }; | |
768 | #endif | |
769 | ||
02637fce | 770 | #define MAX_PHYS_ITEM_ID_LEN 32 |
66b52b0d | 771 | |
02637fce JP |
772 | /* This structure holds a unique identifier to identify some |
773 | * physical item (port for example) used by a netdevice. | |
66b52b0d | 774 | */ |
02637fce JP |
775 | struct netdev_phys_item_id { |
776 | unsigned char id[MAX_PHYS_ITEM_ID_LEN]; | |
66b52b0d JP |
777 | unsigned char id_len; |
778 | }; | |
779 | ||
d754f98b SF |
780 | static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a, |
781 | struct netdev_phys_item_id *b) | |
782 | { | |
783 | return a->id_len == b->id_len && | |
784 | memcmp(a->id, b->id, a->id_len) == 0; | |
785 | } | |
786 | ||
99932d4f DB |
787 | typedef u16 (*select_queue_fallback_t)(struct net_device *dev, |
788 | struct sk_buff *skb); | |
789 | ||
2572ac53 | 790 | enum tc_setup_type { |
575ed7d3 | 791 | TC_SETUP_QDISC_MQPRIO, |
a1b7c5fd | 792 | TC_SETUP_CLSU32, |
5b33f488 | 793 | TC_SETUP_CLSFLOWER, |
ade9b658 | 794 | TC_SETUP_CLSMATCHALL, |
332ae8e2 | 795 | TC_SETUP_CLSBPF, |
8c4083b3 | 796 | TC_SETUP_BLOCK, |
8521db4c | 797 | TC_SETUP_QDISC_CBS, |
602f3baf | 798 | TC_SETUP_QDISC_RED, |
7fdb61b4 | 799 | TC_SETUP_QDISC_PRIO, |
f971b132 | 800 | TC_SETUP_QDISC_MQ, |
16e5cc64 JF |
801 | }; |
802 | ||
f4e63525 JK |
803 | /* These structures hold the attributes of bpf state that are being passed |
804 | * to the netdevice through the bpf op. | |
a7862b45 | 805 | */ |
f4e63525 | 806 | enum bpf_netdev_command { |
a7862b45 BB |
807 | /* Set or clear a bpf program used in the earliest stages of packet |
808 | * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee | |
809 | * is responsible for calling bpf_prog_put on any old progs that are | |
810 | * stored. In case of error, the callee need not release the new prog | |
811 | * reference, but on success it takes ownership and must bpf_prog_put | |
812 | * when it is no longer used. | |
813 | */ | |
814 | XDP_SETUP_PROG, | |
ee5d032f | 815 | XDP_SETUP_PROG_HW, |
a7862b45 | 816 | /* Check if a bpf program is set on the device. The callee should |
ce158e58 JK |
817 | * set @prog_attached to one of XDP_ATTACHED_* values, note that "true" |
818 | * is equivalent to XDP_ATTACHED_DRV. | |
a7862b45 BB |
819 | */ |
820 | XDP_QUERY_PROG, | |
ab3f0063 JK |
821 | /* BPF program for offload callbacks, invoked at program load time. */ |
822 | BPF_OFFLOAD_VERIFIER_PREP, | |
823 | BPF_OFFLOAD_TRANSLATE, | |
824 | BPF_OFFLOAD_DESTROY, | |
a3884572 JK |
825 | BPF_OFFLOAD_MAP_ALLOC, |
826 | BPF_OFFLOAD_MAP_FREE, | |
74515c57 BT |
827 | XDP_QUERY_XSK_UMEM, |
828 | XDP_SETUP_XSK_UMEM, | |
a7862b45 BB |
829 | }; |
830 | ||
cae1927c | 831 | struct bpf_prog_offload_ops; |
ddf9f970 | 832 | struct netlink_ext_ack; |
74515c57 | 833 | struct xdp_umem; |
ddf9f970 | 834 | |
f4e63525 JK |
835 | struct netdev_bpf { |
836 | enum bpf_netdev_command command; | |
a7862b45 BB |
837 | union { |
838 | /* XDP_SETUP_PROG */ | |
ddf9f970 | 839 | struct { |
32d60277 | 840 | u32 flags; |
ddf9f970 JK |
841 | struct bpf_prog *prog; |
842 | struct netlink_ext_ack *extack; | |
843 | }; | |
a7862b45 | 844 | /* XDP_QUERY_PROG */ |
58038695 | 845 | struct { |
ce158e58 | 846 | u8 prog_attached; |
58038695 | 847 | u32 prog_id; |
92f0292b JK |
848 | /* flags with which program was installed */ |
849 | u32 prog_flags; | |
58038695 | 850 | }; |
ab3f0063 JK |
851 | /* BPF_OFFLOAD_VERIFIER_PREP */ |
852 | struct { | |
853 | struct bpf_prog *prog; | |
cae1927c | 854 | const struct bpf_prog_offload_ops *ops; /* callee set */ |
ab3f0063 JK |
855 | } verifier; |
856 | /* BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_DESTROY */ | |
857 | struct { | |
858 | struct bpf_prog *prog; | |
859 | } offload; | |
a3884572 JK |
860 | /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */ |
861 | struct { | |
862 | struct bpf_offloaded_map *offmap; | |
863 | }; | |
74515c57 BT |
864 | /* XDP_SETUP_XSK_UMEM */ |
865 | struct { | |
866 | struct xdp_umem *umem; | |
867 | u16 queue_id; | |
868 | } xsk; | |
a7862b45 BB |
869 | }; |
870 | }; | |
16e5cc64 | 871 | |
d77e38e6 SK |
872 | #ifdef CONFIG_XFRM_OFFLOAD |
873 | struct xfrmdev_ops { | |
874 | int (*xdo_dev_state_add) (struct xfrm_state *x); | |
875 | void (*xdo_dev_state_delete) (struct xfrm_state *x); | |
876 | void (*xdo_dev_state_free) (struct xfrm_state *x); | |
877 | bool (*xdo_dev_offload_ok) (struct sk_buff *skb, | |
878 | struct xfrm_state *x); | |
50bd870a | 879 | void (*xdo_dev_state_advance_esn) (struct xfrm_state *x); |
d77e38e6 SK |
880 | }; |
881 | #endif | |
882 | ||
a5c37c63 IL |
883 | #if IS_ENABLED(CONFIG_TLS_DEVICE) |
884 | enum tls_offload_ctx_dir { | |
885 | TLS_OFFLOAD_CTX_DIR_RX, | |
886 | TLS_OFFLOAD_CTX_DIR_TX, | |
887 | }; | |
888 | ||
889 | struct tls_crypto_info; | |
890 | struct tls_context; | |
891 | ||
892 | struct tlsdev_ops { | |
893 | int (*tls_dev_add)(struct net_device *netdev, struct sock *sk, | |
894 | enum tls_offload_ctx_dir direction, | |
895 | struct tls_crypto_info *crypto_info, | |
896 | u32 start_offload_tcp_sn); | |
897 | void (*tls_dev_del)(struct net_device *netdev, | |
898 | struct tls_context *ctx, | |
899 | enum tls_offload_ctx_dir direction); | |
900 | }; | |
901 | #endif | |
902 | ||
6c557001 FW |
903 | struct dev_ifalias { |
904 | struct rcu_head rcuhead; | |
905 | char ifalias[]; | |
906 | }; | |
907 | ||
d314774c SH |
908 | /* |
909 | * This structure defines the management hooks for network devices. | |
00829823 SH |
910 | * The following hooks can be defined; unless noted otherwise, they are |
911 | * optional and can be filled with a null pointer. | |
d314774c SH |
912 | * |
913 | * int (*ndo_init)(struct net_device *dev); | |
5e82b4b2 BH |
914 | * This function is called once when a network device is registered. |
915 | * The network device can use this for any late stage initialization | |
916 | * or semantic validation. It can fail with an error code which will | |
917 | * be propagated back to register_netdev. | |
d314774c SH |
918 | * |
919 | * void (*ndo_uninit)(struct net_device *dev); | |
920 | * This function is called when device is unregistered or when registration | |
921 | * fails. It is not called if init fails. | |
922 | * | |
923 | * int (*ndo_open)(struct net_device *dev); | |
5e82b4b2 | 924 | * This function is called when a network device transitions to the up |
d314774c SH |
925 | * state. |
926 | * | |
927 | * int (*ndo_stop)(struct net_device *dev); | |
5e82b4b2 | 928 | * This function is called when a network device transitions to the down |
d314774c SH |
929 | * state. |
930 | * | |
dc1f8bf6 SH |
931 | * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, |
932 | * struct net_device *dev); | |
00829823 | 933 | * Called when a packet needs to be transmitted. |
e79d8429 RR |
934 | * Returns NETDEV_TX_OK. Can return NETDEV_TX_BUSY, but you should stop |
935 | * the queue before that can happen; it's for obsolete devices and weird | |
936 | * corner cases, but the stack really does a non-trivial amount | |
937 | * of useless work if you return NETDEV_TX_BUSY. | |
5e82b4b2 | 938 | * Required; cannot be NULL. |
00829823 | 939 | * |
1a2a1444 DM |
940 | * netdev_features_t (*ndo_features_check)(struct sk_buff *skb, |
941 | * struct net_device *dev | |
942 | * netdev_features_t features); | |
943 | * Called by core transmit path to determine if device is capable of | |
944 | * performing offload operations on a given packet. This is to give | |
945 | * the device an opportunity to implement any restrictions that cannot | |
946 | * be otherwise expressed by feature flags. The check is called with | |
947 | * the set of features that the stack has calculated and it returns | |
948 | * those the driver believes to be appropriate. | |
cdba756f | 949 | * |
f663dd9a | 950 | * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, |
99932d4f | 951 | * void *accel_priv, select_queue_fallback_t fallback); |
5e82b4b2 | 952 | * Called to decide which queue to use when device supports multiple |
00829823 SH |
953 | * transmit queues. |
954 | * | |
d314774c SH |
955 | * void (*ndo_change_rx_flags)(struct net_device *dev, int flags); |
956 | * This function is called to allow device receiver to make | |
5e82b4b2 | 957 | * changes to configuration when multicast or promiscuous is enabled. |
d314774c SH |
958 | * |
959 | * void (*ndo_set_rx_mode)(struct net_device *dev); | |
960 | * This function is called device changes address list filtering. | |
01789349 | 961 | * If driver handles unicast address filtering, it should set |
5e82b4b2 | 962 | * IFF_UNICAST_FLT in its priv_flags. |
d314774c SH |
963 | * |
964 | * int (*ndo_set_mac_address)(struct net_device *dev, void *addr); | |
965 | * This function is called when the Media Access Control address | |
37b607c5 | 966 | * needs to be changed. If this interface is not defined, the |
5e82b4b2 | 967 | * MAC address can not be changed. |
d314774c SH |
968 | * |
969 | * int (*ndo_validate_addr)(struct net_device *dev); | |
970 | * Test if Media Access Control address is valid for the device. | |
971 | * | |
972 | * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); | |
5e82b4b2 BH |
973 | * Called when a user requests an ioctl which can't be handled by |
974 | * the generic interface code. If not defined ioctls return | |
d314774c SH |
975 | * not supported error code. |
976 | * | |
977 | * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); | |
978 | * Used to set network devices bus interface parameters. This interface | |
5e82b4b2 | 979 | * is retained for legacy reasons; new devices should use the bus |
d314774c SH |
980 | * interface (PCI) for low level management. |
981 | * | |
982 | * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); | |
983 | * Called when a user wants to change the Maximum Transfer Unit | |
db46a0e1 | 984 | * of a device. |
d314774c | 985 | * |
00829823 | 986 | * void (*ndo_tx_timeout)(struct net_device *dev); |
5e82b4b2 | 987 | * Callback used when the transmitter has not made any progress |
d314774c SH |
988 | * for dev->watchdog ticks. |
989 | * | |
bc1f4470 | 990 | * void (*ndo_get_stats64)(struct net_device *dev, |
991 | * struct rtnl_link_stats64 *storage); | |
d308e38f | 992 | * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); |
d314774c | 993 | * Called when a user wants to get the network device usage |
be1f3c2c | 994 | * statistics. Drivers must do one of the following: |
3cfde79c BH |
995 | * 1. Define @ndo_get_stats64 to fill in a zero-initialised |
996 | * rtnl_link_stats64 structure passed by the caller. | |
82695d9b | 997 | * 2. Define @ndo_get_stats to update a net_device_stats structure |
be1f3c2c BH |
998 | * (which should normally be dev->stats) and return a pointer to |
999 | * it. The structure may be changed asynchronously only if each | |
1000 | * field is written atomically. | |
1001 | * 3. Update dev->stats asynchronously and atomically, and define | |
1002 | * neither operation. | |
d314774c | 1003 | * |
3df5b3c6 | 1004 | * bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id) |
2c9d85d4 NF |
1005 | * Return true if this device supports offload stats of this attr_id. |
1006 | * | |
1007 | * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev, | |
1008 | * void *attr_data) | |
1009 | * Get statistics for offload operations by attr_id. Write it into the | |
1010 | * attr_data pointer. | |
1011 | * | |
5d632cb7 | 1012 | * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid); |
5e82b4b2 | 1013 | * If device supports VLAN filtering this function is called when a |
80d5c368 | 1014 | * VLAN id is registered. |
d314774c | 1015 | * |
5d632cb7 | 1016 | * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid); |
5e82b4b2 | 1017 | * If device supports VLAN filtering this function is called when a |
80d5c368 | 1018 | * VLAN id is unregistered. |
d314774c SH |
1019 | * |
1020 | * void (*ndo_poll_controller)(struct net_device *dev); | |
95c26df8 WM |
1021 | * |
1022 | * SR-IOV management functions. | |
1023 | * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac); | |
79aab093 MS |
1024 | * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, |
1025 | * u8 qos, __be16 proto); | |
ed616689 SC |
1026 | * int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate, |
1027 | * int max_tx_rate); | |
5f8444a3 | 1028 | * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); |
dd461d6a | 1029 | * int (*ndo_set_vf_trust)(struct net_device *dev, int vf, bool setting); |
95c26df8 WM |
1030 | * int (*ndo_get_vf_config)(struct net_device *dev, |
1031 | * int vf, struct ifla_vf_info *ivf); | |
1d8faf48 | 1032 | * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state); |
57b61080 SF |
1033 | * int (*ndo_set_vf_port)(struct net_device *dev, int vf, |
1034 | * struct nlattr *port[]); | |
01a3d796 VZ |
1035 | * |
1036 | * Enable or disable the VF ability to query its RSS Redirection Table and | |
1037 | * Hash Key. This is needed since on some devices VF share this information | |
5e82b4b2 | 1038 | * with PF and querying it may introduce a theoretical security risk. |
01a3d796 | 1039 | * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting); |
57b61080 | 1040 | * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); |
2572ac53 | 1041 | * int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type, |
de4784ca | 1042 | * void *type_data); |
6a4bc2b4 FF |
1043 | * Called to setup any 'tc' scheduler, classifier or action on @dev. |
1044 | * This is always called from the stack with the rtnl lock held and netif | |
1045 | * tx queues stopped. This allows the netdevice to perform queue | |
1046 | * management safely. | |
c445477d | 1047 | * |
e9bce845 YZ |
1048 | * Fiber Channel over Ethernet (FCoE) offload functions. |
1049 | * int (*ndo_fcoe_enable)(struct net_device *dev); | |
1050 | * Called when the FCoE protocol stack wants to start using LLD for FCoE | |
1051 | * so the underlying device can perform whatever needed configuration or | |
1052 | * initialization to support acceleration of FCoE traffic. | |
1053 | * | |
1054 | * int (*ndo_fcoe_disable)(struct net_device *dev); | |
1055 | * Called when the FCoE protocol stack wants to stop using LLD for FCoE | |
1056 | * so the underlying device can perform whatever needed clean-ups to | |
1057 | * stop supporting acceleration of FCoE traffic. | |
1058 | * | |
1059 | * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid, | |
1060 | * struct scatterlist *sgl, unsigned int sgc); | |
1061 | * Called when the FCoE Initiator wants to initialize an I/O that | |
1062 | * is a possible candidate for Direct Data Placement (DDP). The LLD can | |
1063 | * perform necessary setup and returns 1 to indicate the device is set up | |
1064 | * successfully to perform DDP on this I/O, otherwise this returns 0. | |
1065 | * | |
1066 | * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid); | |
1067 | * Called when the FCoE Initiator/Target is done with the DDPed I/O as | |
1068 | * indicated by the FC exchange id 'xid', so the underlying device can | |
1069 | * clean up and reuse resources for later DDP requests. | |
1070 | * | |
1071 | * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid, | |
1072 | * struct scatterlist *sgl, unsigned int sgc); | |
1073 | * Called when the FCoE Target wants to initialize an I/O that | |
1074 | * is a possible candidate for Direct Data Placement (DDP). The LLD can | |
1075 | * perform necessary setup and returns 1 to indicate the device is set up | |
1076 | * successfully to perform DDP on this I/O, otherwise this returns 0. | |
1077 | * | |
68bad94e NP |
1078 | * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, |
1079 | * struct netdev_fcoe_hbainfo *hbainfo); | |
1080 | * Called when the FCoE Protocol stack wants information on the underlying | |
1081 | * device. This information is utilized by the FCoE protocol stack to | |
1082 | * register attributes with Fiber Channel management service as per the | |
1083 | * FC-GS Fabric Device Management Information(FDMI) specification. | |
1084 | * | |
e9bce845 YZ |
1085 | * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type); |
1086 | * Called when the underlying device wants to override default World Wide | |
1087 | * Name (WWN) generation mechanism in FCoE protocol stack to pass its own | |
1088 | * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE | |
1089 | * protocol stack to use. | |
1090 | * | |
c445477d BH |
1091 | * RFS acceleration. |
1092 | * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb, | |
1093 | * u16 rxq_index, u32 flow_id); | |
1094 | * Set hardware filter for RFS. rxq_index is the target queue index; | |
1095 | * flow_id is a flow ID to be passed to rps_may_expire_flow() later. | |
1096 | * Return the filter ID on success, or a negative error code. | |
fbaec0ea | 1097 | * |
8b98a70c | 1098 | * Slave management functions (for bridge, bonding, etc). |
fbaec0ea JP |
1099 | * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev); |
1100 | * Called to make another netdev an underling. | |
1101 | * | |
1102 | * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); | |
1103 | * Called to release previously enslaved netdev. | |
5455c699 MM |
1104 | * |
1105 | * Feature/offload setting functions. | |
1a2a1444 DM |
1106 | * netdev_features_t (*ndo_fix_features)(struct net_device *dev, |
1107 | * netdev_features_t features); | |
1108 | * Adjusts the requested feature flags according to device-specific | |
1109 | * constraints, and returns the resulting flags. Must not modify | |
1110 | * the device state. | |
1111 | * | |
c8f44aff | 1112 | * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); |
5455c699 MM |
1113 | * Called to update device configuration to new features. Passed |
1114 | * feature set might be less than what was returned by ndo_fix_features()). | |
1115 | * Must return >0 or -errno if it changed dev->features itself. | |
1116 | * | |
edc7d573 | 1117 | * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], |
1118 | * struct net_device *dev, | |
f6f6424b | 1119 | * const unsigned char *addr, u16 vid, u16 flags) |
77162022 | 1120 | * Adds an FDB entry to dev for addr. |
1690be63 VY |
1121 | * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], |
1122 | * struct net_device *dev, | |
f6f6424b | 1123 | * const unsigned char *addr, u16 vid) |
77162022 JF |
1124 | * Deletes the FDB entry from dev coresponding to addr. |
1125 | * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, | |
5d5eacb3 | 1126 | * struct net_device *dev, struct net_device *filter_dev, |
d297653d | 1127 | * int *idx) |
77162022 JF |
1128 | * Used to add FDB entries to dump requests. Implementers should add |
1129 | * entries to skb and update idx with the number of entries. | |
e5a55a89 | 1130 | * |
ad41faa8 ND |
1131 | * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, |
1132 | * u16 flags) | |
e5a55a89 | 1133 | * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, |
46c264da ND |
1134 | * struct net_device *dev, u32 filter_mask, |
1135 | * int nlflags) | |
ad41faa8 ND |
1136 | * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh, |
1137 | * u16 flags); | |
4bf84c35 JP |
1138 | * |
1139 | * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); | |
1140 | * Called to change device carrier. Soft-devices (like dummy, team, etc) | |
1141 | * which do not represent real hardware may define this to allow their | |
1142 | * userspace components to manage their virtual carrier state. Devices | |
1143 | * that determine carrier state from physical hardware properties (eg | |
1144 | * network cables) or protocol-dependent mechanisms (eg | |
1145 | * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function. | |
66b52b0d JP |
1146 | * |
1147 | * int (*ndo_get_phys_port_id)(struct net_device *dev, | |
02637fce | 1148 | * struct netdev_phys_item_id *ppid); |
66b52b0d JP |
1149 | * Called to get ID of physical port of this device. If driver does |
1150 | * not implement this, it is assumed that the hw is not able to have | |
1151 | * multiple net devices on single physical port. | |
53cf5275 | 1152 | * |
7c46a640 AD |
1153 | * void (*ndo_udp_tunnel_add)(struct net_device *dev, |
1154 | * struct udp_tunnel_info *ti); | |
1155 | * Called by UDP tunnel to notify a driver about the UDP port and socket | |
1156 | * address family that a UDP tunnel is listnening to. It is called only | |
1157 | * when a new port starts listening. The operation is protected by the | |
1158 | * RTNL. | |
1159 | * | |
1160 | * void (*ndo_udp_tunnel_del)(struct net_device *dev, | |
1161 | * struct udp_tunnel_info *ti); | |
1162 | * Called by UDP tunnel to notify the driver about a UDP port and socket | |
1163 | * address family that the UDP tunnel is not listening to anymore. The | |
1164 | * operation is protected by the RTNL. | |
1165 | * | |
a6cc0cfa JF |
1166 | * void* (*ndo_dfwd_add_station)(struct net_device *pdev, |
1167 | * struct net_device *dev) | |
1168 | * Called by upper layer devices to accelerate switching or other | |
1169 | * station functionality into hardware. 'pdev is the lowerdev | |
1170 | * to use for the offload and 'dev' is the net device that will | |
1171 | * back the offload. Returns a pointer to the private structure | |
1172 | * the upper layer will maintain. | |
1173 | * void (*ndo_dfwd_del_station)(struct net_device *pdev, void *priv) | |
1174 | * Called by upper layer device to delete the station created | |
1175 | * by 'ndo_dfwd_add_station'. 'pdev' is the net device backing | |
1176 | * the station and priv is the structure returned by the add | |
1177 | * operation. | |
822b3b2e JF |
1178 | * int (*ndo_set_tx_maxrate)(struct net_device *dev, |
1179 | * int queue_index, u32 maxrate); | |
1180 | * Called when a user wants to set a max-rate limitation of specific | |
1181 | * TX queue. | |
a54acb3a ND |
1182 | * int (*ndo_get_iflink)(const struct net_device *dev); |
1183 | * Called to get the iflink value of this device. | |
d746d707 | 1184 | * void (*ndo_change_proto_down)(struct net_device *dev, |
5e82b4b2 | 1185 | * bool proto_down); |
d746d707 AK |
1186 | * This function is used to pass protocol port error state information |
1187 | * to the switch driver. The switch driver can react to the proto_down | |
1188 | * by doing a phys down on the associated switch port. | |
fc4099f1 PS |
1189 | * int (*ndo_fill_metadata_dst)(struct net_device *dev, struct sk_buff *skb); |
1190 | * This function is used to get egress tunnel information for given skb. | |
1191 | * This is useful for retrieving outer tunnel header parameters while | |
1192 | * sampling packet. | |
871b642a PA |
1193 | * void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom); |
1194 | * This function is used to specify the headroom that the skb must | |
1195 | * consider when allocation skb during packet reception. Setting | |
1196 | * appropriate rx headroom value allows avoiding skb head copy on | |
5e82b4b2 | 1197 | * forward. Setting a negative value resets the rx headroom to the |
871b642a | 1198 | * default value. |
f4e63525 | 1199 | * int (*ndo_bpf)(struct net_device *dev, struct netdev_bpf *bpf); |
a7862b45 | 1200 | * This function is used to set or query state related to XDP on the |
f4e63525 JK |
1201 | * netdevice and manage BPF offload. See definition of |
1202 | * enum bpf_netdev_command for details. | |
42b33468 JDB |
1203 | * int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp, |
1204 | * u32 flags); | |
735fc405 JDB |
1205 | * This function is used to submit @n XDP packets for transmit on a |
1206 | * netdevice. Returns number of frames successfully transmitted, frames | |
1207 | * that got dropped are freed/returned via xdp_return_frame(). | |
1208 | * Returns negative number, means general error invoking ndo, meaning | |
1209 | * no frames were xmit'ed and core-caller will free all frames. | |
d314774c SH |
1210 | */ |
1211 | struct net_device_ops { | |
1212 | int (*ndo_init)(struct net_device *dev); | |
1213 | void (*ndo_uninit)(struct net_device *dev); | |
1214 | int (*ndo_open)(struct net_device *dev); | |
1215 | int (*ndo_stop)(struct net_device *dev); | |
cdba756f ED |
1216 | netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, |
1217 | struct net_device *dev); | |
1218 | netdev_features_t (*ndo_features_check)(struct sk_buff *skb, | |
1219 | struct net_device *dev, | |
1220 | netdev_features_t features); | |
00829823 | 1221 | u16 (*ndo_select_queue)(struct net_device *dev, |
f663dd9a | 1222 | struct sk_buff *skb, |
99932d4f DB |
1223 | void *accel_priv, |
1224 | select_queue_fallback_t fallback); | |
d314774c SH |
1225 | void (*ndo_change_rx_flags)(struct net_device *dev, |
1226 | int flags); | |
d314774c | 1227 | void (*ndo_set_rx_mode)(struct net_device *dev); |
d314774c SH |
1228 | int (*ndo_set_mac_address)(struct net_device *dev, |
1229 | void *addr); | |
d314774c | 1230 | int (*ndo_validate_addr)(struct net_device *dev); |
d314774c SH |
1231 | int (*ndo_do_ioctl)(struct net_device *dev, |
1232 | struct ifreq *ifr, int cmd); | |
d314774c SH |
1233 | int (*ndo_set_config)(struct net_device *dev, |
1234 | struct ifmap *map); | |
00829823 SH |
1235 | int (*ndo_change_mtu)(struct net_device *dev, |
1236 | int new_mtu); | |
1237 | int (*ndo_neigh_setup)(struct net_device *dev, | |
1238 | struct neigh_parms *); | |
d314774c SH |
1239 | void (*ndo_tx_timeout) (struct net_device *dev); |
1240 | ||
bc1f4470 | 1241 | void (*ndo_get_stats64)(struct net_device *dev, |
1242 | struct rtnl_link_stats64 *storage); | |
3df5b3c6 | 1243 | bool (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id); |
2c9d85d4 NF |
1244 | int (*ndo_get_offload_stats)(int attr_id, |
1245 | const struct net_device *dev, | |
1246 | void *attr_data); | |
d314774c SH |
1247 | struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); |
1248 | ||
8e586137 | 1249 | int (*ndo_vlan_rx_add_vid)(struct net_device *dev, |
80d5c368 | 1250 | __be16 proto, u16 vid); |
8e586137 | 1251 | int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, |
80d5c368 | 1252 | __be16 proto, u16 vid); |
d314774c | 1253 | #ifdef CONFIG_NET_POLL_CONTROLLER |
d314774c | 1254 | void (*ndo_poll_controller)(struct net_device *dev); |
4247e161 | 1255 | int (*ndo_netpoll_setup)(struct net_device *dev, |
a8779ec1 | 1256 | struct netpoll_info *info); |
0e34e931 | 1257 | void (*ndo_netpoll_cleanup)(struct net_device *dev); |
d314774c | 1258 | #endif |
95c26df8 WM |
1259 | int (*ndo_set_vf_mac)(struct net_device *dev, |
1260 | int queue, u8 *mac); | |
1261 | int (*ndo_set_vf_vlan)(struct net_device *dev, | |
79aab093 MS |
1262 | int queue, u16 vlan, |
1263 | u8 qos, __be16 proto); | |
ed616689 SC |
1264 | int (*ndo_set_vf_rate)(struct net_device *dev, |
1265 | int vf, int min_tx_rate, | |
1266 | int max_tx_rate); | |
5f8444a3 GR |
1267 | int (*ndo_set_vf_spoofchk)(struct net_device *dev, |
1268 | int vf, bool setting); | |
dd461d6a HS |
1269 | int (*ndo_set_vf_trust)(struct net_device *dev, |
1270 | int vf, bool setting); | |
95c26df8 WM |
1271 | int (*ndo_get_vf_config)(struct net_device *dev, |
1272 | int vf, | |
1273 | struct ifla_vf_info *ivf); | |
1d8faf48 RE |
1274 | int (*ndo_set_vf_link_state)(struct net_device *dev, |
1275 | int vf, int link_state); | |
3b766cd8 EBE |
1276 | int (*ndo_get_vf_stats)(struct net_device *dev, |
1277 | int vf, | |
1278 | struct ifla_vf_stats | |
1279 | *vf_stats); | |
57b61080 SF |
1280 | int (*ndo_set_vf_port)(struct net_device *dev, |
1281 | int vf, | |
1282 | struct nlattr *port[]); | |
1283 | int (*ndo_get_vf_port)(struct net_device *dev, | |
1284 | int vf, struct sk_buff *skb); | |
cc8e27cc EC |
1285 | int (*ndo_set_vf_guid)(struct net_device *dev, |
1286 | int vf, u64 guid, | |
1287 | int guid_type); | |
01a3d796 VZ |
1288 | int (*ndo_set_vf_rss_query_en)( |
1289 | struct net_device *dev, | |
1290 | int vf, bool setting); | |
16e5cc64 | 1291 | int (*ndo_setup_tc)(struct net_device *dev, |
2572ac53 | 1292 | enum tc_setup_type type, |
de4784ca | 1293 | void *type_data); |
d11ead75 | 1294 | #if IS_ENABLED(CONFIG_FCOE) |
cb454399 YZ |
1295 | int (*ndo_fcoe_enable)(struct net_device *dev); |
1296 | int (*ndo_fcoe_disable)(struct net_device *dev); | |
4d288d57 YZ |
1297 | int (*ndo_fcoe_ddp_setup)(struct net_device *dev, |
1298 | u16 xid, | |
1299 | struct scatterlist *sgl, | |
1300 | unsigned int sgc); | |
1301 | int (*ndo_fcoe_ddp_done)(struct net_device *dev, | |
1302 | u16 xid); | |
6247e086 YZ |
1303 | int (*ndo_fcoe_ddp_target)(struct net_device *dev, |
1304 | u16 xid, | |
1305 | struct scatterlist *sgl, | |
1306 | unsigned int sgc); | |
68bad94e NP |
1307 | int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, |
1308 | struct netdev_fcoe_hbainfo *hbainfo); | |
3c9c36bc BPG |
1309 | #endif |
1310 | ||
d11ead75 | 1311 | #if IS_ENABLED(CONFIG_LIBFCOE) |
df5c7945 YZ |
1312 | #define NETDEV_FCOE_WWNN 0 |
1313 | #define NETDEV_FCOE_WWPN 1 | |
1314 | int (*ndo_fcoe_get_wwn)(struct net_device *dev, | |
1315 | u64 *wwn, int type); | |
4d288d57 | 1316 | #endif |
3c9c36bc | 1317 | |
c445477d BH |
1318 | #ifdef CONFIG_RFS_ACCEL |
1319 | int (*ndo_rx_flow_steer)(struct net_device *dev, | |
1320 | const struct sk_buff *skb, | |
1321 | u16 rxq_index, | |
1322 | u32 flow_id); | |
1323 | #endif | |
fbaec0ea | 1324 | int (*ndo_add_slave)(struct net_device *dev, |
33eaf2a6 DA |
1325 | struct net_device *slave_dev, |
1326 | struct netlink_ext_ack *extack); | |
fbaec0ea JP |
1327 | int (*ndo_del_slave)(struct net_device *dev, |
1328 | struct net_device *slave_dev); | |
c8f44aff MM |
1329 | netdev_features_t (*ndo_fix_features)(struct net_device *dev, |
1330 | netdev_features_t features); | |
5455c699 | 1331 | int (*ndo_set_features)(struct net_device *dev, |
c8f44aff | 1332 | netdev_features_t features); |
503eebc2 JP |
1333 | int (*ndo_neigh_construct)(struct net_device *dev, |
1334 | struct neighbour *n); | |
1335 | void (*ndo_neigh_destroy)(struct net_device *dev, | |
1336 | struct neighbour *n); | |
77162022 JF |
1337 | |
1338 | int (*ndo_fdb_add)(struct ndmsg *ndm, | |
edc7d573 | 1339 | struct nlattr *tb[], |
77162022 | 1340 | struct net_device *dev, |
6b6e2725 | 1341 | const unsigned char *addr, |
f6f6424b | 1342 | u16 vid, |
77162022 JF |
1343 | u16 flags); |
1344 | int (*ndo_fdb_del)(struct ndmsg *ndm, | |
1690be63 | 1345 | struct nlattr *tb[], |
77162022 | 1346 | struct net_device *dev, |
f6f6424b JP |
1347 | const unsigned char *addr, |
1348 | u16 vid); | |
77162022 JF |
1349 | int (*ndo_fdb_dump)(struct sk_buff *skb, |
1350 | struct netlink_callback *cb, | |
1351 | struct net_device *dev, | |
5d5eacb3 | 1352 | struct net_device *filter_dev, |
d297653d | 1353 | int *idx); |
e5a55a89 JF |
1354 | |
1355 | int (*ndo_bridge_setlink)(struct net_device *dev, | |
add511b3 RP |
1356 | struct nlmsghdr *nlh, |
1357 | u16 flags); | |
e5a55a89 JF |
1358 | int (*ndo_bridge_getlink)(struct sk_buff *skb, |
1359 | u32 pid, u32 seq, | |
6cbdceeb | 1360 | struct net_device *dev, |
46c264da ND |
1361 | u32 filter_mask, |
1362 | int nlflags); | |
407af329 | 1363 | int (*ndo_bridge_dellink)(struct net_device *dev, |
add511b3 RP |
1364 | struct nlmsghdr *nlh, |
1365 | u16 flags); | |
4bf84c35 JP |
1366 | int (*ndo_change_carrier)(struct net_device *dev, |
1367 | bool new_carrier); | |
66b52b0d | 1368 | int (*ndo_get_phys_port_id)(struct net_device *dev, |
02637fce | 1369 | struct netdev_phys_item_id *ppid); |
db24a904 DA |
1370 | int (*ndo_get_phys_port_name)(struct net_device *dev, |
1371 | char *name, size_t len); | |
7c46a640 AD |
1372 | void (*ndo_udp_tunnel_add)(struct net_device *dev, |
1373 | struct udp_tunnel_info *ti); | |
1374 | void (*ndo_udp_tunnel_del)(struct net_device *dev, | |
1375 | struct udp_tunnel_info *ti); | |
a6cc0cfa JF |
1376 | void* (*ndo_dfwd_add_station)(struct net_device *pdev, |
1377 | struct net_device *dev); | |
1378 | void (*ndo_dfwd_del_station)(struct net_device *pdev, | |
1379 | void *priv); | |
1380 | ||
25175ba5 | 1381 | int (*ndo_get_lock_subclass)(struct net_device *dev); |
822b3b2e JF |
1382 | int (*ndo_set_tx_maxrate)(struct net_device *dev, |
1383 | int queue_index, | |
1384 | u32 maxrate); | |
a54acb3a | 1385 | int (*ndo_get_iflink)(const struct net_device *dev); |
d746d707 AK |
1386 | int (*ndo_change_proto_down)(struct net_device *dev, |
1387 | bool proto_down); | |
fc4099f1 PS |
1388 | int (*ndo_fill_metadata_dst)(struct net_device *dev, |
1389 | struct sk_buff *skb); | |
871b642a PA |
1390 | void (*ndo_set_rx_headroom)(struct net_device *dev, |
1391 | int needed_headroom); | |
f4e63525 JK |
1392 | int (*ndo_bpf)(struct net_device *dev, |
1393 | struct netdev_bpf *bpf); | |
735fc405 | 1394 | int (*ndo_xdp_xmit)(struct net_device *dev, int n, |
42b33468 JDB |
1395 | struct xdp_frame **xdp, |
1396 | u32 flags); | |
e3760c7e MK |
1397 | int (*ndo_xsk_async_xmit)(struct net_device *dev, |
1398 | u32 queue_id); | |
d314774c SH |
1399 | }; |
1400 | ||
7aa98047 LR |
1401 | /** |
1402 | * enum net_device_priv_flags - &struct net_device priv_flags | |
1403 | * | |
1404 | * These are the &struct net_device, they are only set internally | |
1405 | * by drivers and used in the kernel. These flags are invisible to | |
5e82b4b2 | 1406 | * userspace; this means that the order of these flags can change |
7aa98047 LR |
1407 | * during any kernel release. |
1408 | * | |
1409 | * You should have a pretty good reason to be extending these flags. | |
1410 | * | |
1411 | * @IFF_802_1Q_VLAN: 802.1Q VLAN device | |
1412 | * @IFF_EBRIDGE: Ethernet bridging device | |
7aa98047 | 1413 | * @IFF_BONDING: bonding master or slave |
7aa98047 | 1414 | * @IFF_ISATAP: ISATAP interface (RFC4214) |
7aa98047 LR |
1415 | * @IFF_WAN_HDLC: WAN HDLC device |
1416 | * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to | |
1417 | * release skb->dst | |
1418 | * @IFF_DONT_BRIDGE: disallow bridging this ether dev | |
1419 | * @IFF_DISABLE_NETPOLL: disable netpoll at run-time | |
1420 | * @IFF_MACVLAN_PORT: device used as macvlan port | |
1421 | * @IFF_BRIDGE_PORT: device used as bridge port | |
1422 | * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port | |
1423 | * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit | |
1424 | * @IFF_UNICAST_FLT: Supports unicast filtering | |
1425 | * @IFF_TEAM_PORT: device used as team port | |
1426 | * @IFF_SUPP_NOFCS: device supports sending custom FCS | |
1427 | * @IFF_LIVE_ADDR_CHANGE: device supports hardware address | |
1428 | * change when it's running | |
1429 | * @IFF_MACVLAN: Macvlan device | |
6d0e24cd LB |
1430 | * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account |
1431 | * underlying stacked devices | |
007979ea | 1432 | * @IFF_L3MDEV_MASTER: device is an L3 master device |
fa8187c9 | 1433 | * @IFF_NO_QUEUE: device can run without qdisc attached |
35d4e172 | 1434 | * @IFF_OPENVSWITCH: device is a Open vSwitch master |
fee6d4c7 | 1435 | * @IFF_L3MDEV_SLAVE: device is enslaved to an L3 master device |
c981e421 | 1436 | * @IFF_TEAM: device is a team device |
d4ab4286 | 1437 | * @IFF_RXFH_CONFIGURED: device has had Rx Flow indirection table configured |
871b642a PA |
1438 | * @IFF_PHONY_HEADROOM: the headroom value is controlled by an external |
1439 | * entity (i.e. the master device for bridged veth) | |
3c175784 | 1440 | * @IFF_MACSEC: device is a MACsec device |
f5426250 | 1441 | * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook |
30c8bd5a SS |
1442 | * @IFF_FAILOVER: device is a failover master device |
1443 | * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device | |
7aa98047 LR |
1444 | */ |
1445 | enum netdev_priv_flags { | |
1446 | IFF_802_1Q_VLAN = 1<<0, | |
1447 | IFF_EBRIDGE = 1<<1, | |
0dc1549b JP |
1448 | IFF_BONDING = 1<<2, |
1449 | IFF_ISATAP = 1<<3, | |
1450 | IFF_WAN_HDLC = 1<<4, | |
1451 | IFF_XMIT_DST_RELEASE = 1<<5, | |
1452 | IFF_DONT_BRIDGE = 1<<6, | |
1453 | IFF_DISABLE_NETPOLL = 1<<7, | |
1454 | IFF_MACVLAN_PORT = 1<<8, | |
1455 | IFF_BRIDGE_PORT = 1<<9, | |
1456 | IFF_OVS_DATAPATH = 1<<10, | |
1457 | IFF_TX_SKB_SHARING = 1<<11, | |
1458 | IFF_UNICAST_FLT = 1<<12, | |
1459 | IFF_TEAM_PORT = 1<<13, | |
1460 | IFF_SUPP_NOFCS = 1<<14, | |
1461 | IFF_LIVE_ADDR_CHANGE = 1<<15, | |
1462 | IFF_MACVLAN = 1<<16, | |
1463 | IFF_XMIT_DST_RELEASE_PERM = 1<<17, | |
1ec54cb4 PA |
1464 | IFF_L3MDEV_MASTER = 1<<18, |
1465 | IFF_NO_QUEUE = 1<<19, | |
1466 | IFF_OPENVSWITCH = 1<<20, | |
1467 | IFF_L3MDEV_SLAVE = 1<<21, | |
1468 | IFF_TEAM = 1<<22, | |
1469 | IFF_RXFH_CONFIGURED = 1<<23, | |
1470 | IFF_PHONY_HEADROOM = 1<<24, | |
1471 | IFF_MACSEC = 1<<25, | |
f5426250 | 1472 | IFF_NO_RX_HANDLER = 1<<26, |
30c8bd5a SS |
1473 | IFF_FAILOVER = 1<<27, |
1474 | IFF_FAILOVER_SLAVE = 1<<28, | |
7aa98047 LR |
1475 | }; |
1476 | ||
1477 | #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN | |
1478 | #define IFF_EBRIDGE IFF_EBRIDGE | |
7aa98047 | 1479 | #define IFF_BONDING IFF_BONDING |
7aa98047 | 1480 | #define IFF_ISATAP IFF_ISATAP |
7aa98047 LR |
1481 | #define IFF_WAN_HDLC IFF_WAN_HDLC |
1482 | #define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE | |
1483 | #define IFF_DONT_BRIDGE IFF_DONT_BRIDGE | |
1484 | #define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL | |
1485 | #define IFF_MACVLAN_PORT IFF_MACVLAN_PORT | |
1486 | #define IFF_BRIDGE_PORT IFF_BRIDGE_PORT | |
1487 | #define IFF_OVS_DATAPATH IFF_OVS_DATAPATH | |
1488 | #define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING | |
1489 | #define IFF_UNICAST_FLT IFF_UNICAST_FLT | |
1490 | #define IFF_TEAM_PORT IFF_TEAM_PORT | |
1491 | #define IFF_SUPP_NOFCS IFF_SUPP_NOFCS | |
1492 | #define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE | |
1493 | #define IFF_MACVLAN IFF_MACVLAN | |
02875878 | 1494 | #define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM |
007979ea | 1495 | #define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER |
fa8187c9 | 1496 | #define IFF_NO_QUEUE IFF_NO_QUEUE |
35d4e172 | 1497 | #define IFF_OPENVSWITCH IFF_OPENVSWITCH |
8f25348b | 1498 | #define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE |
c981e421 | 1499 | #define IFF_TEAM IFF_TEAM |
d4ab4286 | 1500 | #define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED |
3c175784 | 1501 | #define IFF_MACSEC IFF_MACSEC |
f5426250 | 1502 | #define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER |
30c8bd5a SS |
1503 | #define IFF_FAILOVER IFF_FAILOVER |
1504 | #define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE | |
7aa98047 | 1505 | |
536721b1 KK |
1506 | /** |
1507 | * struct net_device - The DEVICE structure. | |
d651983d MCC |
1508 | * |
1509 | * Actually, this whole structure is a big mistake. It mixes I/O | |
1510 | * data with strictly "high-level" data, and it has to know about | |
1511 | * almost every data structure used in the INET module. | |
536721b1 KK |
1512 | * |
1513 | * @name: This is the first field of the "visible" part of this structure | |
1514 | * (i.e. as seen by users in the "Space.c" file). It is the name | |
d651983d | 1515 | * of the interface. |
536721b1 KK |
1516 | * |
1517 | * @name_hlist: Device name hash chain, please keep it close to name[] | |
1518 | * @ifalias: SNMP alias | |
1519 | * @mem_end: Shared memory end | |
1520 | * @mem_start: Shared memory start | |
1521 | * @base_addr: Device I/O address | |
1522 | * @irq: Device IRQ number | |
1523 | * | |
1524 | * @state: Generic network queuing layer state, see netdev_state_t | |
1525 | * @dev_list: The global list of network devices | |
5e82b4b2 BH |
1526 | * @napi_list: List entry used for polling NAPI devices |
1527 | * @unreg_list: List entry when we are unregistering the | |
1528 | * device; see the function unregister_netdev | |
1529 | * @close_list: List entry used when we are closing the device | |
62d885fe BP |
1530 | * @ptype_all: Device-specific packet handlers for all protocols |
1531 | * @ptype_specific: Device-specific, protocol-specific packet handlers | |
536721b1 KK |
1532 | * |
1533 | * @adj_list: Directly linked devices, like slaves for bonding | |
536721b1 KK |
1534 | * @features: Currently active device features |
1535 | * @hw_features: User-changeable features | |
1536 | * | |
1537 | * @wanted_features: User-requested features | |
1538 | * @vlan_features: Mask of features inheritable by VLAN devices | |
1539 | * | |
1540 | * @hw_enc_features: Mask of features inherited by encapsulating devices | |
1541 | * This field indicates what encapsulation | |
1542 | * offloads the hardware is capable of doing, | |
1543 | * and drivers will need to set them appropriately. | |
1544 | * | |
1545 | * @mpls_features: Mask of features inheritable by MPLS | |
1546 | * | |
1547 | * @ifindex: interface index | |
5e82b4b2 | 1548 | * @group: The group the device belongs to |
536721b1 KK |
1549 | * |
1550 | * @stats: Statistics struct, which was left as a legacy, use | |
1551 | * rtnl_link_stats64 instead | |
1552 | * | |
1553 | * @rx_dropped: Dropped packets by core network, | |
1554 | * do not use this in drivers | |
1555 | * @tx_dropped: Dropped packets by core network, | |
1556 | * do not use this in drivers | |
6e7333d3 JW |
1557 | * @rx_nohandler: nohandler dropped packets by core network on |
1558 | * inactive devices, do not use this in drivers | |
9e55e5d3 FF |
1559 | * @carrier_up_count: Number of times the carrier has been up |
1560 | * @carrier_down_count: Number of times the carrier has been down | |
536721b1 | 1561 | * |
536721b1 KK |
1562 | * @wireless_handlers: List of functions to handle Wireless Extensions, |
1563 | * instead of ioctl, | |
1564 | * see <net/iw_handler.h> for details. | |
1565 | * @wireless_data: Instance data managed by the core of wireless extensions | |
1566 | * | |
1567 | * @netdev_ops: Includes several pointers to callbacks, | |
1568 | * if one wants to override the ndo_*() functions | |
1569 | * @ethtool_ops: Management operations | |
f997c55c AA |
1570 | * @ndisc_ops: Includes callbacks for different IPv6 neighbour |
1571 | * discovery handling. Necessary for e.g. 6LoWPAN. | |
d476059e | 1572 | * @header_ops: Includes callbacks for creating,parsing,caching,etc |
536721b1 KK |
1573 | * of Layer 2 headers. |
1574 | * | |
1575 | * @flags: Interface flags (a la BSD) | |
1576 | * @priv_flags: Like 'flags' but invisible to userspace, | |
1577 | * see if.h for the definitions | |
1578 | * @gflags: Global flags ( kept as legacy ) | |
1579 | * @padded: How much padding added by alloc_netdev() | |
1580 | * @operstate: RFC2863 operstate | |
1581 | * @link_mode: Mapping policy to operstate | |
1582 | * @if_port: Selectable AUI, TP, ... | |
1583 | * @dma: DMA channel | |
1584 | * @mtu: Interface MTU value | |
61e84623 JW |
1585 | * @min_mtu: Interface Minimum MTU value |
1586 | * @max_mtu: Interface Maximum MTU value | |
536721b1 | 1587 | * @type: Interface hardware type |
2793a23a | 1588 | * @hard_header_len: Maximum hardware header length. |
217e6fa2 | 1589 | * @min_header_len: Minimum hardware header length |
536721b1 KK |
1590 | * |
1591 | * @needed_headroom: Extra headroom the hardware may need, but not in all | |
1592 | * cases can this be guaranteed | |
1593 | * @needed_tailroom: Extra tailroom the hardware may need, but not in all | |
1594 | * cases can this be guaranteed. Some cases also use | |
1595 | * LL_MAX_HEADER instead to allocate the skb | |
1596 | * | |
1597 | * interface address info: | |
1598 | * | |
1599 | * @perm_addr: Permanent hw address | |
1600 | * @addr_assign_type: Hw address assignment type | |
1601 | * @addr_len: Hardware address length | |
8626a0c8 | 1602 | * @neigh_priv_len: Used in neigh_alloc() |
536721b1 KK |
1603 | * @dev_id: Used to differentiate devices that share |
1604 | * the same link layer address | |
1605 | * @dev_port: Used to differentiate devices that share | |
1606 | * the same function | |
1607 | * @addr_list_lock: XXX: need comments on this one | |
5e82b4b2 | 1608 | * @uc_promisc: Counter that indicates promiscuous mode |
536721b1 KK |
1609 | * has been enabled due to the need to listen to |
1610 | * additional unicast addresses in a device that | |
1611 | * does not implement ndo_set_rx_mode() | |
14ffbbb8 TG |
1612 | * @uc: unicast mac addresses |
1613 | * @mc: multicast mac addresses | |
1614 | * @dev_addrs: list of device hw addresses | |
1615 | * @queues_kset: Group of all Kobjects in the Tx and RX queues | |
5e82b4b2 BH |
1616 | * @promiscuity: Number of times the NIC is told to work in |
1617 | * promiscuous mode; if it becomes 0 the NIC will | |
1618 | * exit promiscuous mode | |
536721b1 KK |
1619 | * @allmulti: Counter, enables or disables allmulticast mode |
1620 | * | |
1621 | * @vlan_info: VLAN info | |
1622 | * @dsa_ptr: dsa specific data | |
1623 | * @tipc_ptr: TIPC specific data | |
1624 | * @atalk_ptr: AppleTalk link | |
1625 | * @ip_ptr: IPv4 specific data | |
1626 | * @dn_ptr: DECnet specific data | |
1627 | * @ip6_ptr: IPv6 specific data | |
1628 | * @ax25_ptr: AX.25 specific data | |
1629 | * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering | |
1630 | * | |
536721b1 KK |
1631 | * @dev_addr: Hw address (before bcast, |
1632 | * because most packets are unicast) | |
1633 | * | |
1634 | * @_rx: Array of RX queues | |
1635 | * @num_rx_queues: Number of RX queues | |
1636 | * allocated at register_netdev() time | |
1637 | * @real_num_rx_queues: Number of RX queues currently active in device | |
1638 | * | |
1639 | * @rx_handler: handler for received packets | |
1640 | * @rx_handler_data: XXX: need comments on this one | |
46209401 JP |
1641 | * @miniq_ingress: ingress/clsact qdisc specific data for |
1642 | * ingress processing | |
536721b1 KK |
1643 | * @ingress_queue: XXX: need comments on this one |
1644 | * @broadcast: hw bcast address | |
1645 | * | |
14ffbbb8 TG |
1646 | * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts, |
1647 | * indexed by RX queue number. Assigned by driver. | |
1648 | * This must only be set if the ndo_rx_flow_steer | |
1649 | * operation is defined | |
1650 | * @index_hlist: Device index hash chain | |
1651 | * | |
536721b1 KK |
1652 | * @_tx: Array of TX queues |
1653 | * @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time | |
1654 | * @real_num_tx_queues: Number of TX queues currently active in device | |
1655 | * @qdisc: Root qdisc from userspace point of view | |
1656 | * @tx_queue_len: Max frames per queue allowed | |
1657 | * @tx_global_lock: XXX: need comments on this one | |
1658 | * | |
1659 | * @xps_maps: XXX: need comments on this one | |
46209401 JP |
1660 | * @miniq_egress: clsact qdisc specific data for |
1661 | * egress processing | |
536721b1 | 1662 | * @watchdog_timeo: Represents the timeout that is used by |
5e82b4b2 | 1663 | * the watchdog (see dev_watchdog()) |
536721b1 KK |
1664 | * @watchdog_timer: List of timers |
1665 | * | |
1666 | * @pcpu_refcnt: Number of references to this device | |
1667 | * @todo_list: Delayed register/unregister | |
536721b1 KK |
1668 | * @link_watch_list: XXX: need comments on this one |
1669 | * | |
1670 | * @reg_state: Register/unregister state machine | |
1671 | * @dismantle: Device is going to be freed | |
1672 | * @rtnl_link_state: This enum represents the phases of creating | |
1673 | * a new link | |
1674 | * | |
cf124db5 DM |
1675 | * @needs_free_netdev: Should unregister perform free_netdev? |
1676 | * @priv_destructor: Called from unregister | |
536721b1 KK |
1677 | * @npinfo: XXX: need comments on this one |
1678 | * @nd_net: Network namespace this network device is inside | |
1679 | * | |
1680 | * @ml_priv: Mid-layer private | |
1681 | * @lstats: Loopback statistics | |
1682 | * @tstats: Tunnel statistics | |
1683 | * @dstats: Dummy statistics | |
1684 | * @vstats: Virtual ethernet statistics | |
1685 | * | |
1686 | * @garp_port: GARP | |
1687 | * @mrp_port: MRP | |
1688 | * | |
1689 | * @dev: Class/net/name entry | |
1690 | * @sysfs_groups: Space for optional device, statistics and wireless | |
1691 | * sysfs groups | |
1692 | * | |
1693 | * @sysfs_rx_queue_group: Space for optional per-rx queue attributes | |
1694 | * @rtnl_link_ops: Rtnl_link_ops | |
1695 | * | |
1696 | * @gso_max_size: Maximum size of generic segmentation offload | |
1697 | * @gso_max_segs: Maximum number of segments that can be passed to the | |
1698 | * NIC for GSO | |
1699 | * | |
1700 | * @dcbnl_ops: Data Center Bridging netlink ops | |
1701 | * @num_tc: Number of traffic classes in the net device | |
1702 | * @tc_to_txq: XXX: need comments on this one | |
920c1cd3 | 1703 | * @prio_tc_map: XXX: need comments on this one |
536721b1 KK |
1704 | * |
1705 | * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp | |
1706 | * | |
1707 | * @priomap: XXX: need comments on this one | |
1708 | * @phydev: Physical device may attach itself | |
1709 | * for hardware timestamping | |
e679c9c1 | 1710 | * @sfp_bus: attached &struct sfp_bus structure. |
536721b1 | 1711 | * |
123b3652 ED |
1712 | * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock |
1713 | * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount | |
536721b1 | 1714 | * |
d746d707 AK |
1715 | * @proto_down: protocol port state information can be sent to the |
1716 | * switch driver and used to set the phys state of the | |
1717 | * switch port. | |
1718 | * | |
1da177e4 LT |
1719 | * FIXME: cleanup struct net_device such that network protocol info |
1720 | * moves out. | |
1721 | */ | |
1722 | ||
d94d9fee | 1723 | struct net_device { |
1da177e4 | 1724 | char name[IFNAMSIZ]; |
9356b8fc | 1725 | struct hlist_node name_hlist; |
6c557001 | 1726 | struct dev_ifalias __rcu *ifalias; |
1da177e4 LT |
1727 | /* |
1728 | * I/O specific fields | |
1729 | * FIXME: Merge these and struct ifmap into one | |
1730 | */ | |
536721b1 KK |
1731 | unsigned long mem_end; |
1732 | unsigned long mem_start; | |
1733 | unsigned long base_addr; | |
1734 | int irq; | |
1da177e4 LT |
1735 | |
1736 | /* | |
536721b1 KK |
1737 | * Some hardware also needs these fields (state,dev_list, |
1738 | * napi_list,unreg_list,close_list) but they are not | |
1da177e4 LT |
1739 | * part of the usual set specified in Space.c. |
1740 | */ | |
1741 | ||
1da177e4 LT |
1742 | unsigned long state; |
1743 | ||
7562f876 | 1744 | struct list_head dev_list; |
bea3348e | 1745 | struct list_head napi_list; |
44a0873d | 1746 | struct list_head unreg_list; |
5cde2829 | 1747 | struct list_head close_list; |
7866a621 SN |
1748 | struct list_head ptype_all; |
1749 | struct list_head ptype_specific; | |
2f268f12 | 1750 | |
2f268f12 VF |
1751 | struct { |
1752 | struct list_head upper; | |
1753 | struct list_head lower; | |
1754 | } adj_list; | |
1755 | ||
c8f44aff | 1756 | netdev_features_t features; |
c8f44aff | 1757 | netdev_features_t hw_features; |
c8f44aff | 1758 | netdev_features_t wanted_features; |
c8f44aff | 1759 | netdev_features_t vlan_features; |
6a674e9c | 1760 | netdev_features_t hw_enc_features; |
0d89d203 | 1761 | netdev_features_t mpls_features; |
802ab55a | 1762 | netdev_features_t gso_partial_features; |
04ed3e74 | 1763 | |
1da177e4 | 1764 | int ifindex; |
7a66bbc9 | 1765 | int group; |
1da177e4 | 1766 | |
c45d286e | 1767 | struct net_device_stats stats; |
015f0688 | 1768 | |
015f0688 ED |
1769 | atomic_long_t rx_dropped; |
1770 | atomic_long_t tx_dropped; | |
6e7333d3 | 1771 | atomic_long_t rx_nohandler; |
1da177e4 | 1772 | |
b2d3bcfa DD |
1773 | /* Stats to monitor link on/off, flapping */ |
1774 | atomic_t carrier_up_count; | |
1775 | atomic_t carrier_down_count; | |
1776 | ||
b86e0280 | 1777 | #ifdef CONFIG_WIRELESS_EXT |
5e82b4b2 BH |
1778 | const struct iw_handler_def *wireless_handlers; |
1779 | struct iw_public_data *wireless_data; | |
b86e0280 | 1780 | #endif |
d314774c | 1781 | const struct net_device_ops *netdev_ops; |
76fd8593 | 1782 | const struct ethtool_ops *ethtool_ops; |
4170604f | 1783 | #ifdef CONFIG_NET_SWITCHDEV |
9d47c0a2 | 1784 | const struct switchdev_ops *switchdev_ops; |
4170604f | 1785 | #endif |
1b69c6d0 DA |
1786 | #ifdef CONFIG_NET_L3_MASTER_DEV |
1787 | const struct l3mdev_ops *l3mdev_ops; | |
1788 | #endif | |
f997c55c AA |
1789 | #if IS_ENABLED(CONFIG_IPV6) |
1790 | const struct ndisc_ops *ndisc_ops; | |
1791 | #endif | |
1da177e4 | 1792 | |
9cb0d21d | 1793 | #ifdef CONFIG_XFRM_OFFLOAD |
d77e38e6 SK |
1794 | const struct xfrmdev_ops *xfrmdev_ops; |
1795 | #endif | |
1796 | ||
a5c37c63 IL |
1797 | #if IS_ENABLED(CONFIG_TLS_DEVICE) |
1798 | const struct tlsdev_ops *tlsdev_ops; | |
1799 | #endif | |
1800 | ||
3b04ddde SH |
1801 | const struct header_ops *header_ops; |
1802 | ||
536721b1 KK |
1803 | unsigned int flags; |
1804 | unsigned int priv_flags; | |
1805 | ||
1da177e4 | 1806 | unsigned short gflags; |
536721b1 | 1807 | unsigned short padded; |
1da177e4 | 1808 | |
536721b1 KK |
1809 | unsigned char operstate; |
1810 | unsigned char link_mode; | |
b00055aa | 1811 | |
536721b1 KK |
1812 | unsigned char if_port; |
1813 | unsigned char dma; | |
bdc220da | 1814 | |
536721b1 | 1815 | unsigned int mtu; |
61e84623 JW |
1816 | unsigned int min_mtu; |
1817 | unsigned int max_mtu; | |
536721b1 KK |
1818 | unsigned short type; |
1819 | unsigned short hard_header_len; | |
d92be7a4 | 1820 | unsigned char min_header_len; |
1da177e4 | 1821 | |
f5184d26 JB |
1822 | unsigned short needed_headroom; |
1823 | unsigned short needed_tailroom; | |
1824 | ||
1da177e4 | 1825 | /* Interface address info. */ |
536721b1 KK |
1826 | unsigned char perm_addr[MAX_ADDR_LEN]; |
1827 | unsigned char addr_assign_type; | |
1828 | unsigned char addr_len; | |
a0a9663d | 1829 | unsigned short neigh_priv_len; |
536721b1 KK |
1830 | unsigned short dev_id; |
1831 | unsigned short dev_port; | |
ccffad25 | 1832 | spinlock_t addr_list_lock; |
14ffbbb8 TG |
1833 | unsigned char name_assign_type; |
1834 | bool uc_promisc; | |
536721b1 KK |
1835 | struct netdev_hw_addr_list uc; |
1836 | struct netdev_hw_addr_list mc; | |
1837 | struct netdev_hw_addr_list dev_addrs; | |
1838 | ||
4c3d5e7b ED |
1839 | #ifdef CONFIG_SYSFS |
1840 | struct kset *queues_kset; | |
1841 | #endif | |
9d45abe1 WC |
1842 | unsigned int promiscuity; |
1843 | unsigned int allmulti; | |
1da177e4 | 1844 | |
1da177e4 | 1845 | |
5e82b4b2 | 1846 | /* Protocol-specific pointers */ |
65ac6a5f | 1847 | |
d11ead75 | 1848 | #if IS_ENABLED(CONFIG_VLAN_8021Q) |
536721b1 | 1849 | struct vlan_info __rcu *vlan_info; |
65ac6a5f | 1850 | #endif |
34a430d7 | 1851 | #if IS_ENABLED(CONFIG_NET_DSA) |
2f657a60 | 1852 | struct dsa_port *dsa_ptr; |
37cb0620 YX |
1853 | #endif |
1854 | #if IS_ENABLED(CONFIG_TIPC) | |
536721b1 | 1855 | struct tipc_bearer __rcu *tipc_ptr; |
91da11f8 | 1856 | #endif |
89e58148 | 1857 | #if IS_ENABLED(CONFIG_IRDA) || IS_ENABLED(CONFIG_ATALK) |
536721b1 | 1858 | void *atalk_ptr; |
89e58148 | 1859 | #endif |
536721b1 | 1860 | struct in_device __rcu *ip_ptr; |
330c7272 | 1861 | #if IS_ENABLED(CONFIG_DECNET) |
536721b1 | 1862 | struct dn_dev __rcu *dn_ptr; |
330c7272 | 1863 | #endif |
536721b1 | 1864 | struct inet6_dev __rcu *ip6_ptr; |
19ff13f2 | 1865 | #if IS_ENABLED(CONFIG_AX25) |
536721b1 | 1866 | void *ax25_ptr; |
19ff13f2 | 1867 | #endif |
536721b1 | 1868 | struct wireless_dev *ieee80211_ptr; |
98a18b6f | 1869 | struct wpan_dev *ieee802154_ptr; |
03c57747 RS |
1870 | #if IS_ENABLED(CONFIG_MPLS_ROUTING) |
1871 | struct mpls_dev __rcu *mpls_ptr; | |
1872 | #endif | |
1da177e4 | 1873 | |
9356b8fc | 1874 | /* |
cd13539b | 1875 | * Cache lines mostly used on receive path (including eth_type_trans()) |
9356b8fc | 1876 | */ |
9356b8fc | 1877 | /* Interface address info used in eth_type_trans() */ |
536721b1 | 1878 | unsigned char *dev_addr; |
f001fde5 | 1879 | |
0a9627f2 | 1880 | struct netdev_rx_queue *_rx; |
0a9627f2 | 1881 | unsigned int num_rx_queues; |
62fe0b40 | 1882 | unsigned int real_num_rx_queues; |
0a9627f2 | 1883 | |
7acedaf5 | 1884 | struct bpf_prog __rcu *xdp_prog; |
3b47d303 | 1885 | unsigned long gro_flush_timeout; |
61391cde | 1886 | rx_handler_func_t __rcu *rx_handler; |
1887 | void __rcu *rx_handler_data; | |
e8a0464c | 1888 | |
4cda01e8 | 1889 | #ifdef CONFIG_NET_CLS_ACT |
46209401 | 1890 | struct mini_Qdisc __rcu *miniq_ingress; |
d2788d34 | 1891 | #endif |
24824a09 | 1892 | struct netdev_queue __rcu *ingress_queue; |
e687ad60 | 1893 | #ifdef CONFIG_NETFILTER_INGRESS |
960632ec | 1894 | struct nf_hook_entries __rcu *nf_hooks_ingress; |
e687ad60 | 1895 | #endif |
d2788d34 | 1896 | |
536721b1 | 1897 | unsigned char broadcast[MAX_ADDR_LEN]; |
14ffbbb8 TG |
1898 | #ifdef CONFIG_RFS_ACCEL |
1899 | struct cpu_rmap *rx_cpu_rmap; | |
1900 | #endif | |
1901 | struct hlist_node index_hlist; | |
cd13539b ED |
1902 | |
1903 | /* | |
1904 | * Cache lines mostly used on transmit path | |
1905 | */ | |
e8a0464c DM |
1906 | struct netdev_queue *_tx ____cacheline_aligned_in_smp; |
1907 | unsigned int num_tx_queues; | |
fd2ea0a7 | 1908 | unsigned int real_num_tx_queues; |
af356afa | 1909 | struct Qdisc *qdisc; |
59cc1f61 JK |
1910 | #ifdef CONFIG_NET_SCHED |
1911 | DECLARE_HASHTABLE (qdisc_hash, 4); | |
1912 | #endif | |
0cd29503 | 1913 | unsigned int tx_queue_len; |
c3f26a26 | 1914 | spinlock_t tx_global_lock; |
14ffbbb8 | 1915 | int watchdog_timeo; |
cd13539b | 1916 | |
bf264145 | 1917 | #ifdef CONFIG_XPS |
80d19669 AN |
1918 | struct xps_dev_maps __rcu *xps_cpus_map; |
1919 | struct xps_dev_maps __rcu *xps_rxqs_map; | |
bf264145 | 1920 | #endif |
1f211a1b | 1921 | #ifdef CONFIG_NET_CLS_ACT |
46209401 | 1922 | struct mini_Qdisc __rcu *miniq_egress; |
1f211a1b | 1923 | #endif |
0c4f691f | 1924 | |
9356b8fc | 1925 | /* These may be needed for future network-power-down code. */ |
9356b8fc ED |
1926 | struct timer_list watchdog_timer; |
1927 | ||
29b4433d | 1928 | int __percpu *pcpu_refcnt; |
1da177e4 | 1929 | struct list_head todo_list; |
1da177e4 | 1930 | |
e014debe | 1931 | struct list_head link_watch_list; |
572a103d | 1932 | |
1da177e4 | 1933 | enum { NETREG_UNINITIALIZED=0, |
b17a7c17 | 1934 | NETREG_REGISTERED, /* completed register_netdevice */ |
1da177e4 LT |
1935 | NETREG_UNREGISTERING, /* called unregister_netdevice */ |
1936 | NETREG_UNREGISTERED, /* completed unregister todo */ | |
1937 | NETREG_RELEASED, /* called free_netdev */ | |
937f1ba5 | 1938 | NETREG_DUMMY, /* dummy device for NAPI poll */ |
449f4544 ED |
1939 | } reg_state:8; |
1940 | ||
536721b1 | 1941 | bool dismantle; |
a2835763 PM |
1942 | |
1943 | enum { | |
1944 | RTNL_LINK_INITIALIZED, | |
1945 | RTNL_LINK_INITIALIZING, | |
1946 | } rtnl_link_state:16; | |
1da177e4 | 1947 | |
cf124db5 DM |
1948 | bool needs_free_netdev; |
1949 | void (*priv_destructor)(struct net_device *dev); | |
1da177e4 | 1950 | |
1da177e4 | 1951 | #ifdef CONFIG_NETPOLL |
5fbee843 | 1952 | struct netpoll_info __rcu *npinfo; |
1da177e4 | 1953 | #endif |
eae792b7 | 1954 | |
0c5c9fb5 | 1955 | possible_net_t nd_net; |
4a1c5371 | 1956 | |
4951704b | 1957 | /* mid-layer private */ |
a7855c78 | 1958 | union { |
536721b1 KK |
1959 | void *ml_priv; |
1960 | struct pcpu_lstats __percpu *lstats; | |
8f84985f | 1961 | struct pcpu_sw_netstats __percpu *tstats; |
536721b1 KK |
1962 | struct pcpu_dstats __percpu *dstats; |
1963 | struct pcpu_vstats __percpu *vstats; | |
a7855c78 | 1964 | }; |
536721b1 | 1965 | |
fb585b44 | 1966 | #if IS_ENABLED(CONFIG_GARP) |
3cc77ec7 | 1967 | struct garp_port __rcu *garp_port; |
fb585b44 TK |
1968 | #endif |
1969 | #if IS_ENABLED(CONFIG_MRP) | |
febf018d | 1970 | struct mrp_port __rcu *mrp_port; |
fb585b44 | 1971 | #endif |
1da177e4 | 1972 | |
5e82b4b2 | 1973 | struct device dev; |
0c509a6c | 1974 | const struct attribute_group *sysfs_groups[4]; |
a953be53 | 1975 | const struct attribute_group *sysfs_rx_queue_group; |
38f7b870 | 1976 | |
38f7b870 | 1977 | const struct rtnl_link_ops *rtnl_link_ops; |
f25f4e44 | 1978 | |
82cc1a7a PWJ |
1979 | /* for setting kernel sock attribute on TCP connection setup */ |
1980 | #define GSO_MAX_SIZE 65536 | |
1981 | unsigned int gso_max_size; | |
30b678d8 BH |
1982 | #define GSO_MAX_SEGS 65535 |
1983 | u16 gso_max_segs; | |
743b03a8 | 1984 | |
7a6b6f51 | 1985 | #ifdef CONFIG_DCB |
32953543 | 1986 | const struct dcbnl_rtnl_ops *dcbnl_ops; |
2f90b865 | 1987 | #endif |
5e82b4b2 BH |
1988 | u8 num_tc; |
1989 | struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; | |
1990 | u8 prio_tc_map[TC_BITMASK + 1]; | |
2f90b865 | 1991 | |
d11ead75 | 1992 | #if IS_ENABLED(CONFIG_FCOE) |
4d288d57 | 1993 | unsigned int fcoe_ddp_xid; |
5bc1421e | 1994 | #endif |
86f8515f | 1995 | #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) |
5bc1421e | 1996 | struct netprio_map __rcu *priomap; |
4d288d57 | 1997 | #endif |
5e82b4b2 | 1998 | struct phy_device *phydev; |
e679c9c1 | 1999 | struct sfp_bus *sfp_bus; |
5e82b4b2 | 2000 | struct lock_class_key *qdisc_tx_busylock; |
f9eb8aea | 2001 | struct lock_class_key *qdisc_running_key; |
5e82b4b2 | 2002 | bool proto_down; |
1da177e4 | 2003 | }; |
43cb76d9 | 2004 | #define to_net_dev(d) container_of(d, struct net_device, dev) |
1da177e4 | 2005 | |
b5cdae32 DM |
2006 | static inline bool netif_elide_gro(const struct net_device *dev) |
2007 | { | |
2008 | if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog) | |
2009 | return true; | |
2010 | return false; | |
2011 | } | |
2012 | ||
1da177e4 | 2013 | #define NETDEV_ALIGN 32 |
1da177e4 | 2014 | |
4f57c087 JF |
2015 | static inline |
2016 | int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio) | |
2017 | { | |
2018 | return dev->prio_tc_map[prio & TC_BITMASK]; | |
2019 | } | |
2020 | ||
2021 | static inline | |
2022 | int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc) | |
2023 | { | |
2024 | if (tc >= dev->num_tc) | |
2025 | return -EINVAL; | |
2026 | ||
2027 | dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK; | |
2028 | return 0; | |
2029 | } | |
2030 | ||
8d059b0f | 2031 | int netdev_txq_to_tc(struct net_device *dev, unsigned int txq); |
9cf1f6a8 AD |
2032 | void netdev_reset_tc(struct net_device *dev); |
2033 | int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset); | |
2034 | int netdev_set_num_tc(struct net_device *dev, u8 num_tc); | |
4f57c087 JF |
2035 | |
2036 | static inline | |
2037 | int netdev_get_num_tc(struct net_device *dev) | |
2038 | { | |
2039 | return dev->num_tc; | |
2040 | } | |
2041 | ||
e8a0464c DM |
2042 | static inline |
2043 | struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, | |
2044 | unsigned int index) | |
2045 | { | |
2046 | return &dev->_tx[index]; | |
2047 | } | |
2048 | ||
10c51b56 DB |
2049 | static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev, |
2050 | const struct sk_buff *skb) | |
2051 | { | |
2052 | return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); | |
2053 | } | |
2054 | ||
e8a0464c DM |
2055 | static inline void netdev_for_each_tx_queue(struct net_device *dev, |
2056 | void (*f)(struct net_device *, | |
2057 | struct netdev_queue *, | |
2058 | void *), | |
2059 | void *arg) | |
2060 | { | |
2061 | unsigned int i; | |
2062 | ||
2063 | for (i = 0; i < dev->num_tx_queues; i++) | |
2064 | f(dev, &dev->_tx[i], arg); | |
2065 | } | |
2066 | ||
d3fff6c4 ED |
2067 | #define netdev_lockdep_set_classes(dev) \ |
2068 | { \ | |
2069 | static struct lock_class_key qdisc_tx_busylock_key; \ | |
2070 | static struct lock_class_key qdisc_running_key; \ | |
2071 | static struct lock_class_key qdisc_xmit_lock_key; \ | |
2072 | static struct lock_class_key dev_addr_list_lock_key; \ | |
2073 | unsigned int i; \ | |
2074 | \ | |
2075 | (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \ | |
2076 | (dev)->qdisc_running_key = &qdisc_running_key; \ | |
2077 | lockdep_set_class(&(dev)->addr_list_lock, \ | |
2078 | &dev_addr_list_lock_key); \ | |
2079 | for (i = 0; i < (dev)->num_tx_queues; i++) \ | |
2080 | lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \ | |
2081 | &qdisc_xmit_lock_key); \ | |
2082 | } | |
2083 | ||
f629d208 | 2084 | struct netdev_queue *netdev_pick_tx(struct net_device *dev, |
f663dd9a JW |
2085 | struct sk_buff *skb, |
2086 | void *accel_priv); | |
8c4c49df | 2087 | |
871b642a PA |
2088 | /* returns the headroom that the master device needs to take in account |
2089 | * when forwarding to this dev | |
2090 | */ | |
2091 | static inline unsigned netdev_get_fwd_headroom(struct net_device *dev) | |
2092 | { | |
2093 | return dev->priv_flags & IFF_PHONY_HEADROOM ? 0 : dev->needed_headroom; | |
2094 | } | |
2095 | ||
2096 | static inline void netdev_set_rx_headroom(struct net_device *dev, int new_hr) | |
2097 | { | |
2098 | if (dev->netdev_ops->ndo_set_rx_headroom) | |
2099 | dev->netdev_ops->ndo_set_rx_headroom(dev, new_hr); | |
2100 | } | |
2101 | ||
2102 | /* set the device rx headroom to the dev's default */ | |
2103 | static inline void netdev_reset_rx_headroom(struct net_device *dev) | |
2104 | { | |
2105 | netdev_set_rx_headroom(dev, -1); | |
2106 | } | |
2107 | ||
c346dca1 YH |
2108 | /* |
2109 | * Net namespace inlines | |
2110 | */ | |
2111 | static inline | |
2112 | struct net *dev_net(const struct net_device *dev) | |
2113 | { | |
c2d9ba9b | 2114 | return read_pnet(&dev->nd_net); |
c346dca1 YH |
2115 | } |
2116 | ||
2117 | static inline | |
f5aa23fd | 2118 | void dev_net_set(struct net_device *dev, struct net *net) |
c346dca1 | 2119 | { |
0c5c9fb5 | 2120 | write_pnet(&dev->nd_net, net); |
c346dca1 YH |
2121 | } |
2122 | ||
bea3348e SH |
2123 | /** |
2124 | * netdev_priv - access network device private data | |
2125 | * @dev: network device | |
2126 | * | |
2127 | * Get network device private data | |
2128 | */ | |
6472ce60 | 2129 | static inline void *netdev_priv(const struct net_device *dev) |
1da177e4 | 2130 | { |
1ce8e7b5 | 2131 | return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN); |
1da177e4 LT |
2132 | } |
2133 | ||
1da177e4 LT |
2134 | /* Set the sysfs physical device reference for the network logical device |
2135 | * if set prior to registration will cause a symlink during initialization. | |
2136 | */ | |
43cb76d9 | 2137 | #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) |
1da177e4 | 2138 | |
384912ed | 2139 | /* Set the sysfs device type for the network logical device to allow |
3f79410c | 2140 | * fine-grained identification of different network device types. For |
5e82b4b2 | 2141 | * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc. |
384912ed MH |
2142 | */ |
2143 | #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) | |
2144 | ||
82dc3c63 ED |
2145 | /* Default NAPI poll() weight |
2146 | * Device drivers are strongly advised to not use bigger value | |
2147 | */ | |
2148 | #define NAPI_POLL_WEIGHT 64 | |
2149 | ||
3b582cc1 | 2150 | /** |
5e82b4b2 | 2151 | * netif_napi_add - initialize a NAPI context |
3b582cc1 | 2152 | * @dev: network device |
5e82b4b2 | 2153 | * @napi: NAPI context |
3b582cc1 SH |
2154 | * @poll: polling function |
2155 | * @weight: default weight | |
2156 | * | |
5e82b4b2 BH |
2157 | * netif_napi_add() must be used to initialize a NAPI context prior to calling |
2158 | * *any* of the other NAPI-related functions. | |
3b582cc1 | 2159 | */ |
d565b0a1 HX |
2160 | void netif_napi_add(struct net_device *dev, struct napi_struct *napi, |
2161 | int (*poll)(struct napi_struct *, int), int weight); | |
bea3348e | 2162 | |
d64b5e85 | 2163 | /** |
5e82b4b2 | 2164 | * netif_tx_napi_add - initialize a NAPI context |
d64b5e85 | 2165 | * @dev: network device |
5e82b4b2 | 2166 | * @napi: NAPI context |
d64b5e85 ED |
2167 | * @poll: polling function |
2168 | * @weight: default weight | |
2169 | * | |
2170 | * This variant of netif_napi_add() should be used from drivers using NAPI | |
2171 | * to exclusively poll a TX queue. | |
2172 | * This will avoid we add it into napi_hash[], thus polluting this hash table. | |
2173 | */ | |
2174 | static inline void netif_tx_napi_add(struct net_device *dev, | |
2175 | struct napi_struct *napi, | |
2176 | int (*poll)(struct napi_struct *, int), | |
2177 | int weight) | |
2178 | { | |
2179 | set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state); | |
2180 | netif_napi_add(dev, napi, poll, weight); | |
2181 | } | |
2182 | ||
d8156534 | 2183 | /** |
5e82b4b2 BH |
2184 | * netif_napi_del - remove a NAPI context |
2185 | * @napi: NAPI context | |
d8156534 | 2186 | * |
5e82b4b2 | 2187 | * netif_napi_del() removes a NAPI context from the network device NAPI list |
d8156534 | 2188 | */ |
d565b0a1 HX |
2189 | void netif_napi_del(struct napi_struct *napi); |
2190 | ||
2191 | struct napi_gro_cb { | |
78a478d0 | 2192 | /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */ |
5e82b4b2 | 2193 | void *frag0; |
78a478d0 | 2194 | |
7489594c HX |
2195 | /* Length of frag0. */ |
2196 | unsigned int frag0_len; | |
2197 | ||
86911732 | 2198 | /* This indicates where we are processing relative to skb->data. */ |
5e82b4b2 | 2199 | int data_offset; |
86911732 | 2200 | |
d565b0a1 | 2201 | /* This is non-zero if the packet cannot be merged with the new skb. */ |
bf5a755f JC |
2202 | u16 flush; |
2203 | ||
2204 | /* Save the IP ID here and check when we get to the transport layer */ | |
2205 | u16 flush_id; | |
d565b0a1 HX |
2206 | |
2207 | /* Number of segments aggregated. */ | |
2e71a6f8 ED |
2208 | u16 count; |
2209 | ||
15e2396d TH |
2210 | /* Start offset for remote checksum offload */ |
2211 | u16 gro_remcsum_start; | |
2212 | ||
2e71a6f8 ED |
2213 | /* jiffies when first packet was created/queued */ |
2214 | unsigned long age; | |
86347245 | 2215 | |
afe93325 | 2216 | /* Used in ipv6_gro_receive() and foo-over-udp */ |
b582ef09 OG |
2217 | u16 proto; |
2218 | ||
baa32ff4 TH |
2219 | /* This is non-zero if the packet may be of the same flow. */ |
2220 | u8 same_flow:1; | |
2221 | ||
fac8e0f5 JG |
2222 | /* Used in tunnel GRO receive */ |
2223 | u8 encap_mark:1; | |
573e8fca TH |
2224 | |
2225 | /* GRO checksum is valid */ | |
2226 | u8 csum_valid:1; | |
2227 | ||
662880f4 TH |
2228 | /* Number of checksums via CHECKSUM_UNNECESSARY */ |
2229 | u8 csum_cnt:3; | |
c3c7c254 | 2230 | |
baa32ff4 TH |
2231 | /* Free the skb? */ |
2232 | u8 free:2; | |
2233 | #define NAPI_GRO_FREE 1 | |
2234 | #define NAPI_GRO_FREE_STOLEN_HEAD 2 | |
2235 | ||
efc98d08 TH |
2236 | /* Used in foo-over-udp, set in udp[46]_gro_receive */ |
2237 | u8 is_ipv6:1; | |
2238 | ||
a0ca153f AD |
2239 | /* Used in GRE, set in fou/gue_gro_receive */ |
2240 | u8 is_fou:1; | |
2241 | ||
1530545e AD |
2242 | /* Used to determine if flush_id can be ignored */ |
2243 | u8 is_atomic:1; | |
2244 | ||
fcd91dd4 SD |
2245 | /* Number of gro_receive callbacks this packet already went through */ |
2246 | u8 recursion_counter:4; | |
2247 | ||
2248 | /* 1 bit hole */ | |
baa32ff4 | 2249 | |
bf5a755f JC |
2250 | /* used to support CHECKSUM_COMPLETE for tunneling protocols */ |
2251 | __wsum csum; | |
2252 | ||
c3c7c254 ED |
2253 | /* used in skb_gro_receive() slow path */ |
2254 | struct sk_buff *last; | |
d565b0a1 HX |
2255 | }; |
2256 | ||
2257 | #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) | |
d8156534 | 2258 | |
fcd91dd4 SD |
2259 | #define GRO_RECURSION_LIMIT 15 |
2260 | static inline int gro_recursion_inc_test(struct sk_buff *skb) | |
2261 | { | |
2262 | return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT; | |
2263 | } | |
2264 | ||
d4546c25 DM |
2265 | typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *); |
2266 | static inline struct sk_buff *call_gro_receive(gro_receive_t cb, | |
2267 | struct list_head *head, | |
2268 | struct sk_buff *skb) | |
fcd91dd4 SD |
2269 | { |
2270 | if (unlikely(gro_recursion_inc_test(skb))) { | |
2271 | NAPI_GRO_CB(skb)->flush |= 1; | |
2272 | return NULL; | |
2273 | } | |
2274 | ||
2275 | return cb(head, skb); | |
2276 | } | |
2277 | ||
d4546c25 DM |
2278 | typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *, |
2279 | struct sk_buff *); | |
2280 | static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb, | |
2281 | struct sock *sk, | |
2282 | struct list_head *head, | |
2283 | struct sk_buff *skb) | |
fcd91dd4 SD |
2284 | { |
2285 | if (unlikely(gro_recursion_inc_test(skb))) { | |
2286 | NAPI_GRO_CB(skb)->flush |= 1; | |
2287 | return NULL; | |
2288 | } | |
2289 | ||
2290 | return cb(sk, head, skb); | |
2291 | } | |
2292 | ||
1da177e4 | 2293 | struct packet_type { |
f2ccd8fa DM |
2294 | __be16 type; /* This is really htons(ether_type). */ |
2295 | struct net_device *dev; /* NULL is wildcarded here */ | |
2296 | int (*func) (struct sk_buff *, | |
2297 | struct net_device *, | |
2298 | struct packet_type *, | |
2299 | struct net_device *); | |
c0de08d0 EL |
2300 | bool (*id_match)(struct packet_type *ptype, |
2301 | struct sock *sk); | |
1da177e4 LT |
2302 | void *af_packet_priv; |
2303 | struct list_head list; | |
2304 | }; | |
2305 | ||
f191a1d1 | 2306 | struct offload_callbacks { |
576a30eb | 2307 | struct sk_buff *(*gso_segment)(struct sk_buff *skb, |
c8f44aff | 2308 | netdev_features_t features); |
d4546c25 DM |
2309 | struct sk_buff *(*gro_receive)(struct list_head *head, |
2310 | struct sk_buff *skb); | |
299603e8 | 2311 | int (*gro_complete)(struct sk_buff *skb, int nhoff); |
f191a1d1 VY |
2312 | }; |
2313 | ||
2314 | struct packet_offload { | |
2315 | __be16 type; /* This is really htons(ether_type). */ | |
bdef7de4 | 2316 | u16 priority; |
f191a1d1 VY |
2317 | struct offload_callbacks callbacks; |
2318 | struct list_head list; | |
1da177e4 LT |
2319 | }; |
2320 | ||
5e82b4b2 | 2321 | /* often modified stats are per-CPU, other are shared (netdev->stats) */ |
8f84985f LR |
2322 | struct pcpu_sw_netstats { |
2323 | u64 rx_packets; | |
2324 | u64 rx_bytes; | |
2325 | u64 tx_packets; | |
2326 | u64 tx_bytes; | |
2327 | struct u64_stats_sync syncp; | |
2328 | }; | |
2329 | ||
aabc92bb PNA |
2330 | #define __netdev_alloc_pcpu_stats(type, gfp) \ |
2331 | ({ \ | |
2332 | typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\ | |
2333 | if (pcpu_stats) { \ | |
2334 | int __cpu; \ | |
2335 | for_each_possible_cpu(__cpu) { \ | |
2336 | typeof(type) *stat; \ | |
2337 | stat = per_cpu_ptr(pcpu_stats, __cpu); \ | |
2338 | u64_stats_init(&stat->syncp); \ | |
2339 | } \ | |
2340 | } \ | |
2341 | pcpu_stats; \ | |
1c213bd2 WC |
2342 | }) |
2343 | ||
aabc92bb | 2344 | #define netdev_alloc_pcpu_stats(type) \ |
326fcfa5 | 2345 | __netdev_alloc_pcpu_stats(type, GFP_KERNEL) |
aabc92bb | 2346 | |
764f5e54 JP |
2347 | enum netdev_lag_tx_type { |
2348 | NETDEV_LAG_TX_TYPE_UNKNOWN, | |
2349 | NETDEV_LAG_TX_TYPE_RANDOM, | |
2350 | NETDEV_LAG_TX_TYPE_BROADCAST, | |
2351 | NETDEV_LAG_TX_TYPE_ROUNDROBIN, | |
2352 | NETDEV_LAG_TX_TYPE_ACTIVEBACKUP, | |
2353 | NETDEV_LAG_TX_TYPE_HASH, | |
2354 | }; | |
2355 | ||
f44aa9ef JH |
2356 | enum netdev_lag_hash { |
2357 | NETDEV_LAG_HASH_NONE, | |
2358 | NETDEV_LAG_HASH_L2, | |
2359 | NETDEV_LAG_HASH_L34, | |
2360 | NETDEV_LAG_HASH_L23, | |
2361 | NETDEV_LAG_HASH_E23, | |
2362 | NETDEV_LAG_HASH_E34, | |
2363 | NETDEV_LAG_HASH_UNKNOWN, | |
2364 | }; | |
2365 | ||
764f5e54 JP |
2366 | struct netdev_lag_upper_info { |
2367 | enum netdev_lag_tx_type tx_type; | |
f44aa9ef | 2368 | enum netdev_lag_hash hash_type; |
764f5e54 JP |
2369 | }; |
2370 | ||
fb1b2e3c JP |
2371 | struct netdev_lag_lower_state_info { |
2372 | u8 link_up : 1, | |
2373 | tx_enabled : 1; | |
2374 | }; | |
2375 | ||
1da177e4 LT |
2376 | #include <linux/notifier.h> |
2377 | ||
ede2762d KT |
2378 | /* netdevice notifier chain. Please remember to update netdev_cmd_to_name() |
2379 | * and the rtnetlink notification exclusion list in rtnetlink_event() when | |
2380 | * adding new types. | |
dcfe1421 | 2381 | */ |
ede2762d KT |
2382 | enum netdev_cmd { |
2383 | NETDEV_UP = 1, /* For now you can't veto a device up/down */ | |
2384 | NETDEV_DOWN, | |
2385 | NETDEV_REBOOT, /* Tell a protocol stack a network interface | |
dcfe1421 AW |
2386 | detected a hardware crash and restarted |
2387 | - we can use this eg to kick tcp sessions | |
2388 | once done */ | |
ede2762d KT |
2389 | NETDEV_CHANGE, /* Notify device state change */ |
2390 | NETDEV_REGISTER, | |
2391 | NETDEV_UNREGISTER, | |
2392 | NETDEV_CHANGEMTU, /* notify after mtu change happened */ | |
2393 | NETDEV_CHANGEADDR, | |
2394 | NETDEV_GOING_DOWN, | |
2395 | NETDEV_CHANGENAME, | |
2396 | NETDEV_FEAT_CHANGE, | |
2397 | NETDEV_BONDING_FAILOVER, | |
2398 | NETDEV_PRE_UP, | |
2399 | NETDEV_PRE_TYPE_CHANGE, | |
2400 | NETDEV_POST_TYPE_CHANGE, | |
2401 | NETDEV_POST_INIT, | |
ede2762d KT |
2402 | NETDEV_RELEASE, |
2403 | NETDEV_NOTIFY_PEERS, | |
2404 | NETDEV_JOIN, | |
2405 | NETDEV_CHANGEUPPER, | |
2406 | NETDEV_RESEND_IGMP, | |
2407 | NETDEV_PRECHANGEMTU, /* notify before mtu change happened */ | |
2408 | NETDEV_CHANGEINFODATA, | |
2409 | NETDEV_BONDING_INFO, | |
2410 | NETDEV_PRECHANGEUPPER, | |
2411 | NETDEV_CHANGELOWERSTATE, | |
2412 | NETDEV_UDP_TUNNEL_PUSH_INFO, | |
2413 | NETDEV_UDP_TUNNEL_DROP_INFO, | |
2414 | NETDEV_CHANGE_TX_QUEUE_LEN, | |
9daae9bd GP |
2415 | NETDEV_CVLAN_FILTER_PUSH_INFO, |
2416 | NETDEV_CVLAN_FILTER_DROP_INFO, | |
2417 | NETDEV_SVLAN_FILTER_PUSH_INFO, | |
2418 | NETDEV_SVLAN_FILTER_DROP_INFO, | |
ede2762d KT |
2419 | }; |
2420 | const char *netdev_cmd_to_name(enum netdev_cmd cmd); | |
dcfe1421 | 2421 | |
f629d208 JP |
2422 | int register_netdevice_notifier(struct notifier_block *nb); |
2423 | int unregister_netdevice_notifier(struct notifier_block *nb); | |
351638e7 JP |
2424 | |
2425 | struct netdev_notifier_info { | |
51d0c047 DA |
2426 | struct net_device *dev; |
2427 | struct netlink_ext_ack *extack; | |
351638e7 JP |
2428 | }; |
2429 | ||
be9efd36 JP |
2430 | struct netdev_notifier_change_info { |
2431 | struct netdev_notifier_info info; /* must be first */ | |
2432 | unsigned int flags_changed; | |
2433 | }; | |
2434 | ||
0e4ead9d JP |
2435 | struct netdev_notifier_changeupper_info { |
2436 | struct netdev_notifier_info info; /* must be first */ | |
2437 | struct net_device *upper_dev; /* new upper dev */ | |
2438 | bool master; /* is upper dev master */ | |
5e82b4b2 | 2439 | bool linking; /* is the notification for link or unlink */ |
29bf24af | 2440 | void *upper_info; /* upper dev info */ |
0e4ead9d JP |
2441 | }; |
2442 | ||
04d48266 JP |
2443 | struct netdev_notifier_changelowerstate_info { |
2444 | struct netdev_notifier_info info; /* must be first */ | |
2445 | void *lower_state_info; /* is lower dev state */ | |
2446 | }; | |
2447 | ||
75538c2b CW |
2448 | static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, |
2449 | struct net_device *dev) | |
2450 | { | |
2451 | info->dev = dev; | |
51d0c047 | 2452 | info->extack = NULL; |
75538c2b CW |
2453 | } |
2454 | ||
351638e7 JP |
2455 | static inline struct net_device * |
2456 | netdev_notifier_info_to_dev(const struct netdev_notifier_info *info) | |
2457 | { | |
2458 | return info->dev; | |
2459 | } | |
2460 | ||
51d0c047 DA |
2461 | static inline struct netlink_ext_ack * |
2462 | netdev_notifier_info_to_extack(const struct netdev_notifier_info *info) | |
2463 | { | |
2464 | return info->extack; | |
2465 | } | |
2466 | ||
f629d208 | 2467 | int call_netdevice_notifiers(unsigned long val, struct net_device *dev); |
dcfe1421 AW |
2468 | |
2469 | ||
1da177e4 LT |
2470 | extern rwlock_t dev_base_lock; /* Device list lock */ |
2471 | ||
881d966b EB |
2472 | #define for_each_netdev(net, d) \ |
2473 | list_for_each_entry(d, &(net)->dev_base_head, dev_list) | |
dcbccbd4 EB |
2474 | #define for_each_netdev_reverse(net, d) \ |
2475 | list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list) | |
c6d14c84 ED |
2476 | #define for_each_netdev_rcu(net, d) \ |
2477 | list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list) | |
881d966b EB |
2478 | #define for_each_netdev_safe(net, d, n) \ |
2479 | list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) | |
2480 | #define for_each_netdev_continue(net, d) \ | |
2481 | list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) | |
254245d2 | 2482 | #define for_each_netdev_continue_rcu(net, d) \ |
2483 | list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) | |
8a7fbfab | 2484 | #define for_each_netdev_in_bond_rcu(bond, slave) \ |
2485 | for_each_netdev_rcu(&init_net, slave) \ | |
4ccce02e | 2486 | if (netdev_master_upper_dev_get_rcu(slave) == (bond)) |
881d966b | 2487 | #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) |
7562f876 | 2488 | |
a050c33f DL |
2489 | static inline struct net_device *next_net_device(struct net_device *dev) |
2490 | { | |
2491 | struct list_head *lh; | |
2492 | struct net *net; | |
2493 | ||
c346dca1 | 2494 | net = dev_net(dev); |
a050c33f DL |
2495 | lh = dev->dev_list.next; |
2496 | return lh == &net->dev_base_head ? NULL : net_device_entry(lh); | |
2497 | } | |
2498 | ||
ce81b76a ED |
2499 | static inline struct net_device *next_net_device_rcu(struct net_device *dev) |
2500 | { | |
2501 | struct list_head *lh; | |
2502 | struct net *net; | |
2503 | ||
2504 | net = dev_net(dev); | |
ccf43438 | 2505 | lh = rcu_dereference(list_next_rcu(&dev->dev_list)); |
ce81b76a ED |
2506 | return lh == &net->dev_base_head ? NULL : net_device_entry(lh); |
2507 | } | |
2508 | ||
a050c33f DL |
2509 | static inline struct net_device *first_net_device(struct net *net) |
2510 | { | |
2511 | return list_empty(&net->dev_base_head) ? NULL : | |
2512 | net_device_entry(net->dev_base_head.next); | |
2513 | } | |
7562f876 | 2514 | |
ccf43438 ED |
2515 | static inline struct net_device *first_net_device_rcu(struct net *net) |
2516 | { | |
2517 | struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head)); | |
2518 | ||
2519 | return lh == &net->dev_base_head ? NULL : net_device_entry(lh); | |
2520 | } | |
2521 | ||
f629d208 JP |
2522 | int netdev_boot_setup_check(struct net_device *dev); |
2523 | unsigned long netdev_boot_base(const char *prefix, int unit); | |
2524 | struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, | |
2525 | const char *hwaddr); | |
2526 | struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); | |
2527 | struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type); | |
2528 | void dev_add_pack(struct packet_type *pt); | |
2529 | void dev_remove_pack(struct packet_type *pt); | |
2530 | void __dev_remove_pack(struct packet_type *pt); | |
2531 | void dev_add_offload(struct packet_offload *po); | |
2532 | void dev_remove_offload(struct packet_offload *po); | |
f629d208 | 2533 | |
a54acb3a | 2534 | int dev_get_iflink(const struct net_device *dev); |
fc4099f1 | 2535 | int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb); |
6c555490 WC |
2536 | struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags, |
2537 | unsigned short mask); | |
f629d208 JP |
2538 | struct net_device *dev_get_by_name(struct net *net, const char *name); |
2539 | struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); | |
2540 | struct net_device *__dev_get_by_name(struct net *net, const char *name); | |
2541 | int dev_alloc_name(struct net_device *dev, const char *name); | |
2542 | int dev_open(struct net_device *dev); | |
7051b88a | 2543 | void dev_close(struct net_device *dev); |
2544 | void dev_close_many(struct list_head *head, bool unlink); | |
f629d208 | 2545 | void dev_disable_lro(struct net_device *dev); |
0c4b51f0 | 2546 | int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); |
2b4aa3ce | 2547 | int dev_queue_xmit(struct sk_buff *skb); |
f663dd9a | 2548 | int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv); |
865b03f2 | 2549 | int dev_direct_xmit(struct sk_buff *skb, u16 queue_id); |
f629d208 JP |
2550 | int register_netdevice(struct net_device *dev); |
2551 | void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); | |
2552 | void unregister_netdevice_many(struct list_head *head); | |
44a0873d ED |
2553 | static inline void unregister_netdevice(struct net_device *dev) |
2554 | { | |
2555 | unregister_netdevice_queue(dev, NULL); | |
2556 | } | |
2557 | ||
f629d208 JP |
2558 | int netdev_refcnt_read(const struct net_device *dev); |
2559 | void free_netdev(struct net_device *dev); | |
74d332c1 | 2560 | void netdev_freemem(struct net_device *dev); |
f629d208 JP |
2561 | void synchronize_net(void); |
2562 | int init_dummy_netdev(struct net_device *dev); | |
937f1ba5 | 2563 | |
f60e5990 | 2564 | DECLARE_PER_CPU(int, xmit_recursion); |
a70b506e DB |
2565 | #define XMIT_RECURSION_LIMIT 10 |
2566 | ||
f60e5990 | 2567 | static inline int dev_recursion_level(void) |
2568 | { | |
2569 | return this_cpu_read(xmit_recursion); | |
2570 | } | |
2571 | ||
f629d208 JP |
2572 | struct net_device *dev_get_by_index(struct net *net, int ifindex); |
2573 | struct net_device *__dev_get_by_index(struct net *net, int ifindex); | |
2574 | struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); | |
90b602f8 | 2575 | struct net_device *dev_get_by_napi_id(unsigned int napi_id); |
f629d208 JP |
2576 | int netdev_get_name(struct net *net, char *name, int ifindex); |
2577 | int dev_restart(struct net_device *dev); | |
d4546c25 | 2578 | int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb); |
86911732 HX |
2579 | |
2580 | static inline unsigned int skb_gro_offset(const struct sk_buff *skb) | |
2581 | { | |
2582 | return NAPI_GRO_CB(skb)->data_offset; | |
2583 | } | |
2584 | ||
2585 | static inline unsigned int skb_gro_len(const struct sk_buff *skb) | |
2586 | { | |
2587 | return skb->len - NAPI_GRO_CB(skb)->data_offset; | |
2588 | } | |
2589 | ||
2590 | static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len) | |
2591 | { | |
2592 | NAPI_GRO_CB(skb)->data_offset += len; | |
2593 | } | |
2594 | ||
a5b1cf28 HX |
2595 | static inline void *skb_gro_header_fast(struct sk_buff *skb, |
2596 | unsigned int offset) | |
86911732 | 2597 | { |
a5b1cf28 HX |
2598 | return NAPI_GRO_CB(skb)->frag0 + offset; |
2599 | } | |
78a478d0 | 2600 | |
a5b1cf28 HX |
2601 | static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen) |
2602 | { | |
2603 | return NAPI_GRO_CB(skb)->frag0_len < hlen; | |
2604 | } | |
78a478d0 | 2605 | |
57ea52a8 HX |
2606 | static inline void skb_gro_frag0_invalidate(struct sk_buff *skb) |
2607 | { | |
2608 | NAPI_GRO_CB(skb)->frag0 = NULL; | |
2609 | NAPI_GRO_CB(skb)->frag0_len = 0; | |
2610 | } | |
2611 | ||
a5b1cf28 HX |
2612 | static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen, |
2613 | unsigned int offset) | |
2614 | { | |
17dd759c HX |
2615 | if (!pskb_may_pull(skb, hlen)) |
2616 | return NULL; | |
2617 | ||
57ea52a8 | 2618 | skb_gro_frag0_invalidate(skb); |
17dd759c | 2619 | return skb->data + offset; |
86911732 | 2620 | } |
1da177e4 | 2621 | |
36e7b1b8 HX |
2622 | static inline void *skb_gro_network_header(struct sk_buff *skb) |
2623 | { | |
78d3fd0b HX |
2624 | return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) + |
2625 | skb_network_offset(skb); | |
36e7b1b8 HX |
2626 | } |
2627 | ||
bf5a755f JC |
2628 | static inline void skb_gro_postpull_rcsum(struct sk_buff *skb, |
2629 | const void *start, unsigned int len) | |
2630 | { | |
573e8fca | 2631 | if (NAPI_GRO_CB(skb)->csum_valid) |
bf5a755f JC |
2632 | NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum, |
2633 | csum_partial(start, len, 0)); | |
2634 | } | |
2635 | ||
573e8fca TH |
2636 | /* GRO checksum functions. These are logical equivalents of the normal |
2637 | * checksum functions (in skbuff.h) except that they operate on the GRO | |
2638 | * offsets and fields in sk_buff. | |
2639 | */ | |
2640 | ||
2641 | __sum16 __skb_gro_checksum_complete(struct sk_buff *skb); | |
2642 | ||
15e2396d TH |
2643 | static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb) |
2644 | { | |
b7fe10e5 | 2645 | return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb)); |
15e2396d TH |
2646 | } |
2647 | ||
573e8fca TH |
2648 | static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb, |
2649 | bool zero_okay, | |
2650 | __sum16 check) | |
2651 | { | |
6edec0e6 TH |
2652 | return ((skb->ip_summed != CHECKSUM_PARTIAL || |
2653 | skb_checksum_start_offset(skb) < | |
2654 | skb_gro_offset(skb)) && | |
15e2396d | 2655 | !skb_at_gro_remcsum_start(skb) && |
662880f4 | 2656 | NAPI_GRO_CB(skb)->csum_cnt == 0 && |
573e8fca TH |
2657 | (!zero_okay || check)); |
2658 | } | |
2659 | ||
2660 | static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb, | |
2661 | __wsum psum) | |
2662 | { | |
2663 | if (NAPI_GRO_CB(skb)->csum_valid && | |
2664 | !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum))) | |
2665 | return 0; | |
2666 | ||
2667 | NAPI_GRO_CB(skb)->csum = psum; | |
2668 | ||
2669 | return __skb_gro_checksum_complete(skb); | |
2670 | } | |
2671 | ||
573e8fca TH |
2672 | static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb) |
2673 | { | |
662880f4 TH |
2674 | if (NAPI_GRO_CB(skb)->csum_cnt > 0) { |
2675 | /* Consume a checksum from CHECKSUM_UNNECESSARY */ | |
2676 | NAPI_GRO_CB(skb)->csum_cnt--; | |
2677 | } else { | |
2678 | /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we | |
2679 | * verified a new top level checksum or an encapsulated one | |
2680 | * during GRO. This saves work if we fallback to normal path. | |
2681 | */ | |
2682 | __skb_incr_checksum_unnecessary(skb); | |
573e8fca TH |
2683 | } |
2684 | } | |
2685 | ||
2686 | #define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \ | |
2687 | compute_pseudo) \ | |
2688 | ({ \ | |
2689 | __sum16 __ret = 0; \ | |
2690 | if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \ | |
2691 | __ret = __skb_gro_checksum_validate_complete(skb, \ | |
2692 | compute_pseudo(skb, proto)); \ | |
219f1d79 | 2693 | if (!__ret) \ |
573e8fca TH |
2694 | skb_gro_incr_csum_unnecessary(skb); \ |
2695 | __ret; \ | |
2696 | }) | |
2697 | ||
2698 | #define skb_gro_checksum_validate(skb, proto, compute_pseudo) \ | |
2699 | __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo) | |
2700 | ||
2701 | #define skb_gro_checksum_validate_zero_check(skb, proto, check, \ | |
2702 | compute_pseudo) \ | |
2703 | __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo) | |
2704 | ||
2705 | #define skb_gro_checksum_simple_validate(skb) \ | |
2706 | __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo) | |
2707 | ||
d96535a1 TH |
2708 | static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb) |
2709 | { | |
2710 | return (NAPI_GRO_CB(skb)->csum_cnt == 0 && | |
2711 | !NAPI_GRO_CB(skb)->csum_valid); | |
2712 | } | |
2713 | ||
2714 | static inline void __skb_gro_checksum_convert(struct sk_buff *skb, | |
2715 | __sum16 check, __wsum pseudo) | |
2716 | { | |
2717 | NAPI_GRO_CB(skb)->csum = ~pseudo; | |
2718 | NAPI_GRO_CB(skb)->csum_valid = 1; | |
2719 | } | |
2720 | ||
2721 | #define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \ | |
2722 | do { \ | |
2723 | if (__skb_gro_checksum_convert_check(skb)) \ | |
2724 | __skb_gro_checksum_convert(skb, check, \ | |
2725 | compute_pseudo(skb, proto)); \ | |
2726 | } while (0) | |
2727 | ||
26c4f7da TH |
2728 | struct gro_remcsum { |
2729 | int offset; | |
2730 | __wsum delta; | |
2731 | }; | |
2732 | ||
2733 | static inline void skb_gro_remcsum_init(struct gro_remcsum *grc) | |
2734 | { | |
846cd667 | 2735 | grc->offset = 0; |
26c4f7da TH |
2736 | grc->delta = 0; |
2737 | } | |
2738 | ||
b7fe10e5 TH |
2739 | static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr, |
2740 | unsigned int off, size_t hdrlen, | |
2741 | int start, int offset, | |
2742 | struct gro_remcsum *grc, | |
2743 | bool nopartial) | |
dcdc8994 TH |
2744 | { |
2745 | __wsum delta; | |
b7fe10e5 | 2746 | size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start); |
dcdc8994 TH |
2747 | |
2748 | BUG_ON(!NAPI_GRO_CB(skb)->csum_valid); | |
2749 | ||
15e2396d | 2750 | if (!nopartial) { |
b7fe10e5 TH |
2751 | NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start; |
2752 | return ptr; | |
2753 | } | |
2754 | ||
2755 | ptr = skb_gro_header_fast(skb, off); | |
2756 | if (skb_gro_header_hard(skb, off + plen)) { | |
2757 | ptr = skb_gro_header_slow(skb, off + plen, off); | |
2758 | if (!ptr) | |
2759 | return NULL; | |
15e2396d TH |
2760 | } |
2761 | ||
b7fe10e5 TH |
2762 | delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum, |
2763 | start, offset); | |
dcdc8994 TH |
2764 | |
2765 | /* Adjust skb->csum since we changed the packet */ | |
dcdc8994 | 2766 | NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta); |
26c4f7da | 2767 | |
b7fe10e5 | 2768 | grc->offset = off + hdrlen + offset; |
26c4f7da | 2769 | grc->delta = delta; |
b7fe10e5 TH |
2770 | |
2771 | return ptr; | |
dcdc8994 TH |
2772 | } |
2773 | ||
26c4f7da TH |
2774 | static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb, |
2775 | struct gro_remcsum *grc) | |
2776 | { | |
b7fe10e5 TH |
2777 | void *ptr; |
2778 | size_t plen = grc->offset + sizeof(u16); | |
2779 | ||
26c4f7da TH |
2780 | if (!grc->delta) |
2781 | return; | |
2782 | ||
b7fe10e5 TH |
2783 | ptr = skb_gro_header_fast(skb, grc->offset); |
2784 | if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) { | |
2785 | ptr = skb_gro_header_slow(skb, plen, grc->offset); | |
2786 | if (!ptr) | |
2787 | return; | |
2788 | } | |
2789 | ||
2790 | remcsum_unadjust((__sum16 *)ptr, grc->delta); | |
26c4f7da | 2791 | } |
dcdc8994 | 2792 | |
25393d3f | 2793 | #ifdef CONFIG_XFRM_OFFLOAD |
d4546c25 | 2794 | static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush) |
25393d3f SK |
2795 | { |
2796 | if (PTR_ERR(pp) != -EINPROGRESS) | |
2797 | NAPI_GRO_CB(skb)->flush |= flush; | |
2798 | } | |
603d4cf8 | 2799 | static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb, |
5cd3da4b | 2800 | struct sk_buff *pp, |
603d4cf8 SD |
2801 | int flush, |
2802 | struct gro_remcsum *grc) | |
2803 | { | |
2804 | if (PTR_ERR(pp) != -EINPROGRESS) { | |
2805 | NAPI_GRO_CB(skb)->flush |= flush; | |
2806 | skb_gro_remcsum_cleanup(skb, grc); | |
2807 | skb->remcsum_offload = 0; | |
2808 | } | |
2809 | } | |
25393d3f | 2810 | #else |
d4546c25 | 2811 | static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush) |
5f114163 SK |
2812 | { |
2813 | NAPI_GRO_CB(skb)->flush |= flush; | |
2814 | } | |
603d4cf8 | 2815 | static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb, |
5cd3da4b | 2816 | struct sk_buff *pp, |
603d4cf8 SD |
2817 | int flush, |
2818 | struct gro_remcsum *grc) | |
2819 | { | |
2820 | NAPI_GRO_CB(skb)->flush |= flush; | |
2821 | skb_gro_remcsum_cleanup(skb, grc); | |
2822 | skb->remcsum_offload = 0; | |
2823 | } | |
25393d3f | 2824 | #endif |
5f114163 | 2825 | |
0c4e8581 SH |
2826 | static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, |
2827 | unsigned short type, | |
3b04ddde | 2828 | const void *daddr, const void *saddr, |
95c96174 | 2829 | unsigned int len) |
0c4e8581 | 2830 | { |
f1ecfd5d | 2831 | if (!dev->header_ops || !dev->header_ops->create) |
0c4e8581 | 2832 | return 0; |
3b04ddde SH |
2833 | |
2834 | return dev->header_ops->create(skb, dev, type, daddr, saddr, len); | |
0c4e8581 SH |
2835 | } |
2836 | ||
b95cce35 SH |
2837 | static inline int dev_parse_header(const struct sk_buff *skb, |
2838 | unsigned char *haddr) | |
2839 | { | |
2840 | const struct net_device *dev = skb->dev; | |
2841 | ||
1b83336b | 2842 | if (!dev->header_ops || !dev->header_ops->parse) |
b95cce35 | 2843 | return 0; |
3b04ddde | 2844 | return dev->header_ops->parse(skb, haddr); |
b95cce35 SH |
2845 | } |
2846 | ||
2793a23a WB |
2847 | /* ll_header must have at least hard_header_len allocated */ |
2848 | static inline bool dev_validate_header(const struct net_device *dev, | |
2849 | char *ll_header, int len) | |
2850 | { | |
2851 | if (likely(len >= dev->hard_header_len)) | |
2852 | return true; | |
217e6fa2 WB |
2853 | if (len < dev->min_header_len) |
2854 | return false; | |
2793a23a WB |
2855 | |
2856 | if (capable(CAP_SYS_RAWIO)) { | |
2857 | memset(ll_header + len, 0, dev->hard_header_len - len); | |
2858 | return true; | |
2859 | } | |
2860 | ||
2861 | if (dev->header_ops && dev->header_ops->validate) | |
2862 | return dev->header_ops->validate(ll_header, len); | |
2863 | ||
2864 | return false; | |
2865 | } | |
2866 | ||
36fd633e AV |
2867 | typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, |
2868 | int len, int size); | |
f629d208 | 2869 | int register_gifconf(unsigned int family, gifconf_func_t *gifconf); |
1da177e4 LT |
2870 | static inline int unregister_gifconf(unsigned int family) |
2871 | { | |
2872 | return register_gifconf(family, NULL); | |
2873 | } | |
2874 | ||
99bbc707 | 2875 | #ifdef CONFIG_NET_FLOW_LIMIT |
5f121b9a | 2876 | #define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */ |
99bbc707 WB |
2877 | struct sd_flow_limit { |
2878 | u64 count; | |
2879 | unsigned int num_buckets; | |
2880 | unsigned int history_head; | |
2881 | u16 history[FLOW_LIMIT_HISTORY]; | |
2882 | u8 buckets[]; | |
2883 | }; | |
2884 | ||
2885 | extern int netdev_flow_limit_table_len; | |
2886 | #endif /* CONFIG_NET_FLOW_LIMIT */ | |
2887 | ||
1da177e4 | 2888 | /* |
5e82b4b2 | 2889 | * Incoming packets are placed on per-CPU queues |
1da177e4 | 2890 | */ |
d94d9fee | 2891 | struct softnet_data { |
1da177e4 | 2892 | struct list_head poll_list; |
6e7676c1 | 2893 | struct sk_buff_head process_queue; |
1da177e4 | 2894 | |
dee42870 | 2895 | /* stats */ |
cd7b5396 DM |
2896 | unsigned int processed; |
2897 | unsigned int time_squeeze; | |
cd7b5396 | 2898 | unsigned int received_rps; |
fd793d89 | 2899 | #ifdef CONFIG_RPS |
88751275 | 2900 | struct softnet_data *rps_ipi_list; |
4cdb1e2e ED |
2901 | #endif |
2902 | #ifdef CONFIG_NET_FLOW_LIMIT | |
2903 | struct sd_flow_limit __rcu *flow_limit; | |
2904 | #endif | |
2905 | struct Qdisc *output_queue; | |
2906 | struct Qdisc **output_queue_tailp; | |
2907 | struct sk_buff *completion_queue; | |
f53c7239 SK |
2908 | #ifdef CONFIG_XFRM_OFFLOAD |
2909 | struct sk_buff_head xfrm_backlog; | |
2910 | #endif | |
4cdb1e2e | 2911 | #ifdef CONFIG_RPS |
501e7ef5 ED |
2912 | /* input_queue_head should be written by cpu owning this struct, |
2913 | * and only read by other cpus. Worth using a cache line. | |
2914 | */ | |
2915 | unsigned int input_queue_head ____cacheline_aligned_in_smp; | |
2916 | ||
2917 | /* Elements below can be accessed between CPUs for RPS/RFS */ | |
966a9671 | 2918 | call_single_data_t csd ____cacheline_aligned_in_smp; |
88751275 ED |
2919 | struct softnet_data *rps_ipi_next; |
2920 | unsigned int cpu; | |
76cc8b13 | 2921 | unsigned int input_queue_tail; |
1e94d72f | 2922 | #endif |
95c96174 | 2923 | unsigned int dropped; |
0a9627f2 | 2924 | struct sk_buff_head input_pkt_queue; |
bea3348e | 2925 | struct napi_struct backlog; |
99bbc707 | 2926 | |
1da177e4 LT |
2927 | }; |
2928 | ||
76cc8b13 | 2929 | static inline void input_queue_head_incr(struct softnet_data *sd) |
fec5e652 TH |
2930 | { |
2931 | #ifdef CONFIG_RPS | |
76cc8b13 TH |
2932 | sd->input_queue_head++; |
2933 | #endif | |
2934 | } | |
2935 | ||
2936 | static inline void input_queue_tail_incr_save(struct softnet_data *sd, | |
2937 | unsigned int *qtail) | |
2938 | { | |
2939 | #ifdef CONFIG_RPS | |
2940 | *qtail = ++sd->input_queue_tail; | |
fec5e652 TH |
2941 | #endif |
2942 | } | |
2943 | ||
0a9627f2 | 2944 | DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); |
1da177e4 | 2945 | |
f629d208 | 2946 | void __netif_schedule(struct Qdisc *q); |
46e5da40 | 2947 | void netif_schedule_queue(struct netdev_queue *txq); |
86d804e1 | 2948 | |
fd2ea0a7 DM |
2949 | static inline void netif_tx_schedule_all(struct net_device *dev) |
2950 | { | |
2951 | unsigned int i; | |
2952 | ||
2953 | for (i = 0; i < dev->num_tx_queues; i++) | |
2954 | netif_schedule_queue(netdev_get_tx_queue(dev, i)); | |
2955 | } | |
2956 | ||
f9a7cbbf | 2957 | static __always_inline void netif_tx_start_queue(struct netdev_queue *dev_queue) |
d29f749e | 2958 | { |
73466498 | 2959 | clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); |
d29f749e DJ |
2960 | } |
2961 | ||
bea3348e SH |
2962 | /** |
2963 | * netif_start_queue - allow transmit | |
2964 | * @dev: network device | |
2965 | * | |
2966 | * Allow upper layers to call the device hard_start_xmit routine. | |
2967 | */ | |
1da177e4 LT |
2968 | static inline void netif_start_queue(struct net_device *dev) |
2969 | { | |
e8a0464c | 2970 | netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); |
1da177e4 LT |
2971 | } |
2972 | ||
fd2ea0a7 DM |
2973 | static inline void netif_tx_start_all_queues(struct net_device *dev) |
2974 | { | |
2975 | unsigned int i; | |
2976 | ||
2977 | for (i = 0; i < dev->num_tx_queues; i++) { | |
2978 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
2979 | netif_tx_start_queue(txq); | |
2980 | } | |
2981 | } | |
2982 | ||
46e5da40 | 2983 | void netif_tx_wake_queue(struct netdev_queue *dev_queue); |
79d16385 | 2984 | |
d29f749e DJ |
2985 | /** |
2986 | * netif_wake_queue - restart transmit | |
2987 | * @dev: network device | |
2988 | * | |
2989 | * Allow upper layers to call the device hard_start_xmit routine. | |
2990 | * Used for flow control when transmit resources are available. | |
2991 | */ | |
79d16385 DM |
2992 | static inline void netif_wake_queue(struct net_device *dev) |
2993 | { | |
e8a0464c | 2994 | netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); |
1da177e4 LT |
2995 | } |
2996 | ||
fd2ea0a7 DM |
2997 | static inline void netif_tx_wake_all_queues(struct net_device *dev) |
2998 | { | |
2999 | unsigned int i; | |
3000 | ||
3001 | for (i = 0; i < dev->num_tx_queues; i++) { | |
3002 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
3003 | netif_tx_wake_queue(txq); | |
3004 | } | |
3005 | } | |
3006 | ||
f9a7cbbf | 3007 | static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) |
d29f749e | 3008 | { |
73466498 | 3009 | set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); |
d29f749e DJ |
3010 | } |
3011 | ||
bea3348e SH |
3012 | /** |
3013 | * netif_stop_queue - stop transmitted packets | |
3014 | * @dev: network device | |
3015 | * | |
3016 | * Stop upper layers calling the device hard_start_xmit routine. | |
3017 | * Used for flow control when transmit resources are unavailable. | |
3018 | */ | |
1da177e4 LT |
3019 | static inline void netif_stop_queue(struct net_device *dev) |
3020 | { | |
e8a0464c | 3021 | netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); |
1da177e4 LT |
3022 | } |
3023 | ||
a2029240 | 3024 | void netif_tx_stop_all_queues(struct net_device *dev); |
fd2ea0a7 | 3025 | |
4d29515f | 3026 | static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue) |
d29f749e | 3027 | { |
73466498 | 3028 | return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); |
d29f749e DJ |
3029 | } |
3030 | ||
bea3348e SH |
3031 | /** |
3032 | * netif_queue_stopped - test if transmit queue is flowblocked | |
3033 | * @dev: network device | |
3034 | * | |
3035 | * Test if transmit queue on device is currently unable to send. | |
3036 | */ | |
4d29515f | 3037 | static inline bool netif_queue_stopped(const struct net_device *dev) |
1da177e4 | 3038 | { |
e8a0464c | 3039 | return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); |
1da177e4 LT |
3040 | } |
3041 | ||
4d29515f | 3042 | static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue) |
c3f26a26 | 3043 | { |
73466498 TH |
3044 | return dev_queue->state & QUEUE_STATE_ANY_XOFF; |
3045 | } | |
3046 | ||
8e2f1a63 DB |
3047 | static inline bool |
3048 | netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue) | |
73466498 TH |
3049 | { |
3050 | return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN; | |
3051 | } | |
3052 | ||
8e2f1a63 DB |
3053 | static inline bool |
3054 | netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue) | |
3055 | { | |
3056 | return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN; | |
3057 | } | |
3058 | ||
53511453 ED |
3059 | /** |
3060 | * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write | |
3061 | * @dev_queue: pointer to transmit queue | |
3062 | * | |
3063 | * BQL enabled drivers might use this helper in their ndo_start_xmit(), | |
5e82b4b2 | 3064 | * to give appropriate hint to the CPU. |
53511453 ED |
3065 | */ |
3066 | static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue) | |
3067 | { | |
3068 | #ifdef CONFIG_BQL | |
3069 | prefetchw(&dev_queue->dql.num_queued); | |
3070 | #endif | |
3071 | } | |
3072 | ||
3073 | /** | |
3074 | * netdev_txq_bql_complete_prefetchw - prefetch bql data for write | |
3075 | * @dev_queue: pointer to transmit queue | |
3076 | * | |
3077 | * BQL enabled drivers might use this helper in their TX completion path, | |
5e82b4b2 | 3078 | * to give appropriate hint to the CPU. |
53511453 ED |
3079 | */ |
3080 | static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue) | |
3081 | { | |
3082 | #ifdef CONFIG_BQL | |
3083 | prefetchw(&dev_queue->dql.limit); | |
3084 | #endif | |
3085 | } | |
3086 | ||
c5d67bd7 TH |
3087 | static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, |
3088 | unsigned int bytes) | |
3089 | { | |
114cf580 TH |
3090 | #ifdef CONFIG_BQL |
3091 | dql_queued(&dev_queue->dql, bytes); | |
b37c0fbe AD |
3092 | |
3093 | if (likely(dql_avail(&dev_queue->dql) >= 0)) | |
3094 | return; | |
3095 | ||
3096 | set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); | |
3097 | ||
3098 | /* | |
3099 | * The XOFF flag must be set before checking the dql_avail below, | |
3100 | * because in netdev_tx_completed_queue we update the dql_completed | |
3101 | * before checking the XOFF flag. | |
3102 | */ | |
3103 | smp_mb(); | |
3104 | ||
3105 | /* check again in case another CPU has just made room avail */ | |
3106 | if (unlikely(dql_avail(&dev_queue->dql) >= 0)) | |
3107 | clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); | |
114cf580 | 3108 | #endif |
c5d67bd7 TH |
3109 | } |
3110 | ||
0042d0c8 FF |
3111 | /** |
3112 | * netdev_sent_queue - report the number of bytes queued to hardware | |
3113 | * @dev: network device | |
3114 | * @bytes: number of bytes queued to the hardware device queue | |
3115 | * | |
3116 | * Report the number of bytes queued for sending/completion to the network | |
3117 | * device hardware queue. @bytes should be a good approximation and should | |
3118 | * exactly match netdev_completed_queue() @bytes | |
3119 | */ | |
c5d67bd7 TH |
3120 | static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) |
3121 | { | |
3122 | netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes); | |
3123 | } | |
3124 | ||
3125 | static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, | |
95c96174 | 3126 | unsigned int pkts, unsigned int bytes) |
c5d67bd7 | 3127 | { |
114cf580 | 3128 | #ifdef CONFIG_BQL |
b37c0fbe AD |
3129 | if (unlikely(!bytes)) |
3130 | return; | |
3131 | ||
3132 | dql_completed(&dev_queue->dql, bytes); | |
3133 | ||
3134 | /* | |
3135 | * Without the memory barrier there is a small possiblity that | |
3136 | * netdev_tx_sent_queue will miss the update and cause the queue to | |
3137 | * be stopped forever | |
3138 | */ | |
3139 | smp_mb(); | |
3140 | ||
3141 | if (dql_avail(&dev_queue->dql) < 0) | |
3142 | return; | |
3143 | ||
3144 | if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state)) | |
3145 | netif_schedule_queue(dev_queue); | |
114cf580 | 3146 | #endif |
c5d67bd7 TH |
3147 | } |
3148 | ||
0042d0c8 FF |
3149 | /** |
3150 | * netdev_completed_queue - report bytes and packets completed by device | |
3151 | * @dev: network device | |
3152 | * @pkts: actual number of packets sent over the medium | |
3153 | * @bytes: actual number of bytes sent over the medium | |
3154 | * | |
3155 | * Report the number of bytes and packets transmitted by the network device | |
3156 | * hardware queue over the physical medium, @bytes must exactly match the | |
3157 | * @bytes amount passed to netdev_sent_queue() | |
3158 | */ | |
c5d67bd7 | 3159 | static inline void netdev_completed_queue(struct net_device *dev, |
95c96174 | 3160 | unsigned int pkts, unsigned int bytes) |
c5d67bd7 TH |
3161 | { |
3162 | netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes); | |
3163 | } | |
3164 | ||
3165 | static inline void netdev_tx_reset_queue(struct netdev_queue *q) | |
3166 | { | |
114cf580 | 3167 | #ifdef CONFIG_BQL |
5c490354 | 3168 | clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state); |
114cf580 TH |
3169 | dql_reset(&q->dql); |
3170 | #endif | |
c5d67bd7 TH |
3171 | } |
3172 | ||
0042d0c8 FF |
3173 | /** |
3174 | * netdev_reset_queue - reset the packets and bytes count of a network device | |
3175 | * @dev_queue: network device | |
3176 | * | |
3177 | * Reset the bytes and packet count of a network device and clear the | |
3178 | * software flow control OFF bit for this network device | |
3179 | */ | |
c5d67bd7 TH |
3180 | static inline void netdev_reset_queue(struct net_device *dev_queue) |
3181 | { | |
3182 | netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0)); | |
c3f26a26 DM |
3183 | } |
3184 | ||
b9507bda DB |
3185 | /** |
3186 | * netdev_cap_txqueue - check if selected tx queue exceeds device queues | |
3187 | * @dev: network device | |
3188 | * @queue_index: given tx queue index | |
3189 | * | |
3190 | * Returns 0 if given tx queue index >= number of device tx queues, | |
3191 | * otherwise returns the originally passed tx queue index. | |
3192 | */ | |
3193 | static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index) | |
3194 | { | |
3195 | if (unlikely(queue_index >= dev->real_num_tx_queues)) { | |
3196 | net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n", | |
3197 | dev->name, queue_index, | |
3198 | dev->real_num_tx_queues); | |
3199 | return 0; | |
3200 | } | |
3201 | ||
3202 | return queue_index; | |
3203 | } | |
3204 | ||
bea3348e SH |
3205 | /** |
3206 | * netif_running - test if up | |
3207 | * @dev: network device | |
3208 | * | |
3209 | * Test if the device has been brought up. | |
3210 | */ | |
4d29515f | 3211 | static inline bool netif_running(const struct net_device *dev) |
1da177e4 LT |
3212 | { |
3213 | return test_bit(__LINK_STATE_START, &dev->state); | |
3214 | } | |
3215 | ||
f25f4e44 | 3216 | /* |
5e82b4b2 | 3217 | * Routines to manage the subqueues on a device. We only need start, |
f25f4e44 PWJ |
3218 | * stop, and a check if it's stopped. All other device management is |
3219 | * done at the overall netdevice level. | |
3220 | * Also test the device if we're multiqueue. | |
3221 | */ | |
bea3348e SH |
3222 | |
3223 | /** | |
3224 | * netif_start_subqueue - allow sending packets on subqueue | |
3225 | * @dev: network device | |
3226 | * @queue_index: sub queue index | |
3227 | * | |
3228 | * Start individual transmit queue of a device with multiple transmit queues. | |
3229 | */ | |
f25f4e44 PWJ |
3230 | static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) |
3231 | { | |
fd2ea0a7 | 3232 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
7b3d3e4f KK |
3233 | |
3234 | netif_tx_start_queue(txq); | |
f25f4e44 PWJ |
3235 | } |
3236 | ||
bea3348e SH |
3237 | /** |
3238 | * netif_stop_subqueue - stop sending packets on subqueue | |
3239 | * @dev: network device | |
3240 | * @queue_index: sub queue index | |
3241 | * | |
3242 | * Stop individual transmit queue of a device with multiple transmit queues. | |
3243 | */ | |
f25f4e44 PWJ |
3244 | static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) |
3245 | { | |
fd2ea0a7 | 3246 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
7b3d3e4f | 3247 | netif_tx_stop_queue(txq); |
f25f4e44 PWJ |
3248 | } |
3249 | ||
bea3348e SH |
3250 | /** |
3251 | * netif_subqueue_stopped - test status of subqueue | |
3252 | * @dev: network device | |
3253 | * @queue_index: sub queue index | |
3254 | * | |
3255 | * Check individual transmit queue of a device with multiple transmit queues. | |
3256 | */ | |
4d29515f DM |
3257 | static inline bool __netif_subqueue_stopped(const struct net_device *dev, |
3258 | u16 queue_index) | |
f25f4e44 | 3259 | { |
fd2ea0a7 | 3260 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
7b3d3e4f KK |
3261 | |
3262 | return netif_tx_queue_stopped(txq); | |
f25f4e44 PWJ |
3263 | } |
3264 | ||
4d29515f DM |
3265 | static inline bool netif_subqueue_stopped(const struct net_device *dev, |
3266 | struct sk_buff *skb) | |
668f895a PE |
3267 | { |
3268 | return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); | |
3269 | } | |
bea3348e | 3270 | |
738b35cc FF |
3271 | /** |
3272 | * netif_wake_subqueue - allow sending packets on subqueue | |
3273 | * @dev: network device | |
3274 | * @queue_index: sub queue index | |
3275 | * | |
3276 | * Resume individual transmit queue of a device with multiple transmit queues. | |
3277 | */ | |
3278 | static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) | |
3279 | { | |
3280 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); | |
3281 | ||
3282 | netif_tx_wake_queue(txq); | |
3283 | } | |
f25f4e44 | 3284 | |
537c00de | 3285 | #ifdef CONFIG_XPS |
53af53ae | 3286 | int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, |
f629d208 | 3287 | u16 index); |
80d19669 AN |
3288 | int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, |
3289 | u16 index, bool is_rxqs_map); | |
3290 | ||
3291 | /** | |
3292 | * netif_attr_test_mask - Test a CPU or Rx queue set in a mask | |
3293 | * @j: CPU/Rx queue index | |
3294 | * @mask: bitmask of all cpus/rx queues | |
3295 | * @nr_bits: number of bits in the bitmask | |
3296 | * | |
3297 | * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues. | |
3298 | */ | |
3299 | static inline bool netif_attr_test_mask(unsigned long j, | |
3300 | const unsigned long *mask, | |
3301 | unsigned int nr_bits) | |
3302 | { | |
3303 | cpu_max_bits_warn(j, nr_bits); | |
3304 | return test_bit(j, mask); | |
3305 | } | |
3306 | ||
3307 | /** | |
3308 | * netif_attr_test_online - Test for online CPU/Rx queue | |
3309 | * @j: CPU/Rx queue index | |
3310 | * @online_mask: bitmask for CPUs/Rx queues that are online | |
3311 | * @nr_bits: number of bits in the bitmask | |
3312 | * | |
3313 | * Returns true if a CPU/Rx queue is online. | |
3314 | */ | |
3315 | static inline bool netif_attr_test_online(unsigned long j, | |
3316 | const unsigned long *online_mask, | |
3317 | unsigned int nr_bits) | |
3318 | { | |
3319 | cpu_max_bits_warn(j, nr_bits); | |
3320 | ||
3321 | if (online_mask) | |
3322 | return test_bit(j, online_mask); | |
3323 | ||
3324 | return (j < nr_bits); | |
3325 | } | |
3326 | ||
3327 | /** | |
3328 | * netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask | |
3329 | * @n: CPU/Rx queue index | |
3330 | * @srcp: the cpumask/Rx queue mask pointer | |
3331 | * @nr_bits: number of bits in the bitmask | |
3332 | * | |
3333 | * Returns >= nr_bits if no further CPUs/Rx queues set. | |
3334 | */ | |
3335 | static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp, | |
3336 | unsigned int nr_bits) | |
3337 | { | |
3338 | /* -1 is a legal arg here. */ | |
3339 | if (n != -1) | |
3340 | cpu_max_bits_warn(n, nr_bits); | |
3341 | ||
3342 | if (srcp) | |
3343 | return find_next_bit(srcp, nr_bits, n + 1); | |
3344 | ||
3345 | return n + 1; | |
3346 | } | |
3347 | ||
3348 | /** | |
3349 | * netif_attrmask_next_and - get the next CPU/Rx queue in *src1p & *src2p | |
3350 | * @n: CPU/Rx queue index | |
3351 | * @src1p: the first CPUs/Rx queues mask pointer | |
3352 | * @src2p: the second CPUs/Rx queues mask pointer | |
3353 | * @nr_bits: number of bits in the bitmask | |
3354 | * | |
3355 | * Returns >= nr_bits if no further CPUs/Rx queues set in both. | |
3356 | */ | |
3357 | static inline int netif_attrmask_next_and(int n, const unsigned long *src1p, | |
3358 | const unsigned long *src2p, | |
3359 | unsigned int nr_bits) | |
3360 | { | |
3361 | /* -1 is a legal arg here. */ | |
3362 | if (n != -1) | |
3363 | cpu_max_bits_warn(n, nr_bits); | |
3364 | ||
3365 | if (src1p && src2p) | |
3366 | return find_next_and_bit(src1p, src2p, nr_bits, n + 1); | |
3367 | else if (src1p) | |
3368 | return find_next_bit(src1p, nr_bits, n + 1); | |
3369 | else if (src2p) | |
3370 | return find_next_bit(src2p, nr_bits, n + 1); | |
3371 | ||
3372 | return n + 1; | |
3373 | } | |
537c00de AD |
3374 | #else |
3375 | static inline int netif_set_xps_queue(struct net_device *dev, | |
3573540c | 3376 | const struct cpumask *mask, |
537c00de AD |
3377 | u16 index) |
3378 | { | |
3379 | return 0; | |
3380 | } | |
3381 | #endif | |
3382 | ||
bea3348e SH |
3383 | /** |
3384 | * netif_is_multiqueue - test if device has multiple transmit queues | |
3385 | * @dev: network device | |
3386 | * | |
3387 | * Check if device has multiple transmit queues | |
bea3348e | 3388 | */ |
4d29515f | 3389 | static inline bool netif_is_multiqueue(const struct net_device *dev) |
f25f4e44 | 3390 | { |
a02cec21 | 3391 | return dev->num_tx_queues > 1; |
f25f4e44 | 3392 | } |
1da177e4 | 3393 | |
f629d208 | 3394 | int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq); |
f0796d5c | 3395 | |
a953be53 | 3396 | #ifdef CONFIG_SYSFS |
f629d208 | 3397 | int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq); |
62fe0b40 BH |
3398 | #else |
3399 | static inline int netif_set_real_num_rx_queues(struct net_device *dev, | |
3400 | unsigned int rxq) | |
3401 | { | |
3402 | return 0; | |
3403 | } | |
3404 | #endif | |
3405 | ||
65073a67 DB |
3406 | static inline struct netdev_rx_queue * |
3407 | __netif_get_rx_queue(struct net_device *dev, unsigned int rxq) | |
3408 | { | |
3409 | return dev->_rx + rxq; | |
3410 | } | |
3411 | ||
a953be53 MD |
3412 | #ifdef CONFIG_SYSFS |
3413 | static inline unsigned int get_netdev_rx_queue_index( | |
3414 | struct netdev_rx_queue *queue) | |
3415 | { | |
3416 | struct net_device *dev = queue->dev; | |
3417 | int index = queue - dev->_rx; | |
3418 | ||
3419 | BUG_ON(index >= dev->num_rx_queues); | |
3420 | return index; | |
3421 | } | |
3422 | #endif | |
3423 | ||
16917b87 | 3424 | #define DEFAULT_MAX_NUM_RSS_QUEUES (8) |
f629d208 | 3425 | int netif_get_num_default_rss_queues(void); |
16917b87 | 3426 | |
e6247027 ED |
3427 | enum skb_free_reason { |
3428 | SKB_REASON_CONSUMED, | |
3429 | SKB_REASON_DROPPED, | |
3430 | }; | |
3431 | ||
3432 | void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason); | |
3433 | void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason); | |
1da177e4 | 3434 | |
e6247027 ED |
3435 | /* |
3436 | * It is not allowed to call kfree_skb() or consume_skb() from hardware | |
3437 | * interrupt context or with hardware interrupts being disabled. | |
3438 | * (in_irq() || irqs_disabled()) | |
3439 | * | |
3440 | * We provide four helpers that can be used in following contexts : | |
3441 | * | |
3442 | * dev_kfree_skb_irq(skb) when caller drops a packet from irq context, | |
3443 | * replacing kfree_skb(skb) | |
3444 | * | |
3445 | * dev_consume_skb_irq(skb) when caller consumes a packet from irq context. | |
3446 | * Typically used in place of consume_skb(skb) in TX completion path | |
3447 | * | |
3448 | * dev_kfree_skb_any(skb) when caller doesn't know its current irq context, | |
3449 | * replacing kfree_skb(skb) | |
3450 | * | |
3451 | * dev_consume_skb_any(skb) when caller doesn't know its current irq context, | |
3452 | * and consumed a packet. Used in place of consume_skb(skb) | |
1da177e4 | 3453 | */ |
e6247027 ED |
3454 | static inline void dev_kfree_skb_irq(struct sk_buff *skb) |
3455 | { | |
3456 | __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED); | |
3457 | } | |
3458 | ||
3459 | static inline void dev_consume_skb_irq(struct sk_buff *skb) | |
3460 | { | |
3461 | __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED); | |
3462 | } | |
3463 | ||
3464 | static inline void dev_kfree_skb_any(struct sk_buff *skb) | |
3465 | { | |
3466 | __dev_kfree_skb_any(skb, SKB_REASON_DROPPED); | |
3467 | } | |
3468 | ||
3469 | static inline void dev_consume_skb_any(struct sk_buff *skb) | |
3470 | { | |
3471 | __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED); | |
3472 | } | |
1da177e4 | 3473 | |
7c497478 JW |
3474 | void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog); |
3475 | int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb); | |
f629d208 JP |
3476 | int netif_rx(struct sk_buff *skb); |
3477 | int netif_rx_ni(struct sk_buff *skb); | |
04eb4489 | 3478 | int netif_receive_skb(struct sk_buff *skb); |
1c601d82 | 3479 | int netif_receive_skb_core(struct sk_buff *skb); |
f629d208 JP |
3480 | gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb); |
3481 | void napi_gro_flush(struct napi_struct *napi, bool flush_old); | |
3482 | struct sk_buff *napi_get_frags(struct napi_struct *napi); | |
3483 | gro_result_t napi_gro_frags(struct napi_struct *napi); | |
bf5a755f JC |
3484 | struct packet_offload *gro_find_receive_by_type(__be16 type); |
3485 | struct packet_offload *gro_find_complete_by_type(__be16 type); | |
76620aaf HX |
3486 | |
3487 | static inline void napi_free_frags(struct napi_struct *napi) | |
3488 | { | |
3489 | kfree_skb(napi->skb); | |
3490 | napi->skb = NULL; | |
3491 | } | |
3492 | ||
24b27fc4 | 3493 | bool netdev_is_rx_handler_busy(struct net_device *dev); |
f629d208 JP |
3494 | int netdev_rx_handler_register(struct net_device *dev, |
3495 | rx_handler_func_t *rx_handler, | |
3496 | void *rx_handler_data); | |
3497 | void netdev_rx_handler_unregister(struct net_device *dev); | |
3498 | ||
3499 | bool dev_valid_name(const char *name); | |
44c02a2c AV |
3500 | int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, |
3501 | bool *need_copyout); | |
36fd633e | 3502 | int dev_ifconf(struct net *net, struct ifconf *, int); |
f629d208 JP |
3503 | int dev_ethtool(struct net *net, struct ifreq *); |
3504 | unsigned int dev_get_flags(const struct net_device *); | |
3505 | int __dev_change_flags(struct net_device *, unsigned int flags); | |
3506 | int dev_change_flags(struct net_device *, unsigned int); | |
cb178190 DM |
3507 | void __dev_notify_flags(struct net_device *, unsigned int old_flags, |
3508 | unsigned int gchanges); | |
f629d208 JP |
3509 | int dev_change_name(struct net_device *, const char *); |
3510 | int dev_set_alias(struct net_device *, const char *, size_t); | |
6c557001 | 3511 | int dev_get_alias(const struct net_device *, char *, size_t); |
f629d208 | 3512 | int dev_change_net_namespace(struct net_device *, struct net *, const char *); |
f51048c3 | 3513 | int __dev_set_mtu(struct net_device *, int); |
f629d208 | 3514 | int dev_set_mtu(struct net_device *, int); |
6a643ddb | 3515 | int dev_change_tx_queue_len(struct net_device *, unsigned long); |
f629d208 JP |
3516 | void dev_set_group(struct net_device *, int); |
3517 | int dev_set_mac_address(struct net_device *, struct sockaddr *); | |
3518 | int dev_change_carrier(struct net_device *, bool new_carrier); | |
3519 | int dev_get_phys_port_id(struct net_device *dev, | |
02637fce | 3520 | struct netdev_phys_item_id *ppid); |
db24a904 DA |
3521 | int dev_get_phys_port_name(struct net_device *dev, |
3522 | char *name, size_t len); | |
d746d707 | 3523 | int dev_change_proto_down(struct net_device *dev, bool proto_down); |
f53c7239 | 3524 | struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again); |
ce93718f DM |
3525 | struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, |
3526 | struct netdev_queue *txq, int *ret); | |
d67b9cd2 | 3527 | |
f4e63525 | 3528 | typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf); |
d67b9cd2 DB |
3529 | int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, |
3530 | int fd, u32 flags); | |
118b4aa2 JK |
3531 | void __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op, |
3532 | struct netdev_bpf *xdp); | |
d67b9cd2 | 3533 | |
a0265d28 | 3534 | int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); |
f629d208 | 3535 | int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); |
f4b05d27 NA |
3536 | bool is_skb_forwardable(const struct net_device *dev, |
3537 | const struct sk_buff *skb); | |
1da177e4 | 3538 | |
4e3264d2 MKL |
3539 | static __always_inline int ____dev_forward_skb(struct net_device *dev, |
3540 | struct sk_buff *skb) | |
3541 | { | |
3542 | if (skb_orphan_frags(skb, GFP_ATOMIC) || | |
3543 | unlikely(!is_skb_forwardable(dev, skb))) { | |
3544 | atomic_long_inc(&dev->rx_dropped); | |
3545 | kfree_skb(skb); | |
3546 | return NET_RX_DROP; | |
3547 | } | |
3548 | ||
3549 | skb_scrub_packet(skb, true); | |
3550 | skb->priority = 0; | |
3551 | return 0; | |
3552 | } | |
3553 | ||
74b20582 DA |
3554 | void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); |
3555 | ||
20380731 | 3556 | extern int netdev_budget; |
7acf8a1e | 3557 | extern unsigned int netdev_budget_usecs; |
1da177e4 LT |
3558 | |
3559 | /* Called by rtnetlink.c:rtnl_unlock() */ | |
f629d208 | 3560 | void netdev_run_todo(void); |
1da177e4 | 3561 | |
bea3348e SH |
3562 | /** |
3563 | * dev_put - release reference to device | |
3564 | * @dev: network device | |
3565 | * | |
9ef4429b | 3566 | * Release reference to device to allow it to be freed. |
bea3348e | 3567 | */ |
1da177e4 LT |
3568 | static inline void dev_put(struct net_device *dev) |
3569 | { | |
933393f5 | 3570 | this_cpu_dec(*dev->pcpu_refcnt); |
1da177e4 LT |
3571 | } |
3572 | ||
bea3348e SH |
3573 | /** |
3574 | * dev_hold - get reference to device | |
3575 | * @dev: network device | |
3576 | * | |
9ef4429b | 3577 | * Hold reference to device to keep it from being freed. |
bea3348e | 3578 | */ |
15333061 SH |
3579 | static inline void dev_hold(struct net_device *dev) |
3580 | { | |
933393f5 | 3581 | this_cpu_inc(*dev->pcpu_refcnt); |
15333061 | 3582 | } |
1da177e4 LT |
3583 | |
3584 | /* Carrier loss detection, dial on demand. The functions netif_carrier_on | |
3585 | * and _off may be called from IRQ context, but it is caller | |
3586 | * who is responsible for serialization of these calls. | |
b00055aa SR |
3587 | * |
3588 | * The name carrier is inappropriate, these functions should really be | |
3589 | * called netif_lowerlayer_*() because they represent the state of any | |
3590 | * kind of lower layer not just hardware media. | |
1da177e4 LT |
3591 | */ |
3592 | ||
f629d208 JP |
3593 | void linkwatch_init_dev(struct net_device *dev); |
3594 | void linkwatch_fire_event(struct net_device *dev); | |
3595 | void linkwatch_forget_dev(struct net_device *dev); | |
1da177e4 | 3596 | |
bea3348e SH |
3597 | /** |
3598 | * netif_carrier_ok - test if carrier present | |
3599 | * @dev: network device | |
3600 | * | |
3601 | * Check if carrier is present on device | |
3602 | */ | |
4d29515f | 3603 | static inline bool netif_carrier_ok(const struct net_device *dev) |
1da177e4 LT |
3604 | { |
3605 | return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); | |
3606 | } | |
3607 | ||
f629d208 | 3608 | unsigned long dev_trans_start(struct net_device *dev); |
9d21493b | 3609 | |
f629d208 | 3610 | void __netdev_watchdog_up(struct net_device *dev); |
1da177e4 | 3611 | |
f629d208 | 3612 | void netif_carrier_on(struct net_device *dev); |
1da177e4 | 3613 | |
f629d208 | 3614 | void netif_carrier_off(struct net_device *dev); |
1da177e4 | 3615 | |
bea3348e SH |
3616 | /** |
3617 | * netif_dormant_on - mark device as dormant. | |
3618 | * @dev: network device | |
3619 | * | |
3620 | * Mark device as dormant (as per RFC2863). | |
3621 | * | |
3622 | * The dormant state indicates that the relevant interface is not | |
3623 | * actually in a condition to pass packets (i.e., it is not 'up') but is | |
3624 | * in a "pending" state, waiting for some external event. For "on- | |
3625 | * demand" interfaces, this new state identifies the situation where the | |
3626 | * interface is waiting for events to place it in the up state. | |
bea3348e | 3627 | */ |
b00055aa SR |
3628 | static inline void netif_dormant_on(struct net_device *dev) |
3629 | { | |
3630 | if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) | |
3631 | linkwatch_fire_event(dev); | |
3632 | } | |
3633 | ||
bea3348e SH |
3634 | /** |
3635 | * netif_dormant_off - set device as not dormant. | |
3636 | * @dev: network device | |
3637 | * | |
3638 | * Device is not in dormant state. | |
3639 | */ | |
b00055aa SR |
3640 | static inline void netif_dormant_off(struct net_device *dev) |
3641 | { | |
3642 | if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) | |
3643 | linkwatch_fire_event(dev); | |
3644 | } | |
3645 | ||
bea3348e | 3646 | /** |
8ecbc40a | 3647 | * netif_dormant - test if device is dormant |
bea3348e SH |
3648 | * @dev: network device |
3649 | * | |
8ecbc40a | 3650 | * Check if device is dormant. |
bea3348e | 3651 | */ |
4d29515f | 3652 | static inline bool netif_dormant(const struct net_device *dev) |
b00055aa SR |
3653 | { |
3654 | return test_bit(__LINK_STATE_DORMANT, &dev->state); | |
3655 | } | |
3656 | ||
3657 | ||
bea3348e SH |
3658 | /** |
3659 | * netif_oper_up - test if device is operational | |
3660 | * @dev: network device | |
3661 | * | |
3662 | * Check if carrier is operational | |
3663 | */ | |
4d29515f | 3664 | static inline bool netif_oper_up(const struct net_device *dev) |
d94d9fee | 3665 | { |
b00055aa SR |
3666 | return (dev->operstate == IF_OPER_UP || |
3667 | dev->operstate == IF_OPER_UNKNOWN /* backward compat */); | |
3668 | } | |
3669 | ||
bea3348e SH |
3670 | /** |
3671 | * netif_device_present - is device available or removed | |
3672 | * @dev: network device | |
3673 | * | |
3674 | * Check if device has not been removed from system. | |
3675 | */ | |
4d29515f | 3676 | static inline bool netif_device_present(struct net_device *dev) |
1da177e4 LT |
3677 | { |
3678 | return test_bit(__LINK_STATE_PRESENT, &dev->state); | |
3679 | } | |
3680 | ||
f629d208 | 3681 | void netif_device_detach(struct net_device *dev); |
1da177e4 | 3682 | |
f629d208 | 3683 | void netif_device_attach(struct net_device *dev); |
1da177e4 LT |
3684 | |
3685 | /* | |
3686 | * Network interface message level settings | |
3687 | */ | |
1da177e4 LT |
3688 | |
3689 | enum { | |
3690 | NETIF_MSG_DRV = 0x0001, | |
3691 | NETIF_MSG_PROBE = 0x0002, | |
3692 | NETIF_MSG_LINK = 0x0004, | |
3693 | NETIF_MSG_TIMER = 0x0008, | |
3694 | NETIF_MSG_IFDOWN = 0x0010, | |
3695 | NETIF_MSG_IFUP = 0x0020, | |
3696 | NETIF_MSG_RX_ERR = 0x0040, | |
3697 | NETIF_MSG_TX_ERR = 0x0080, | |
3698 | NETIF_MSG_TX_QUEUED = 0x0100, | |
3699 | NETIF_MSG_INTR = 0x0200, | |
3700 | NETIF_MSG_TX_DONE = 0x0400, | |
3701 | NETIF_MSG_RX_STATUS = 0x0800, | |
3702 | NETIF_MSG_PKTDATA = 0x1000, | |
3703 | NETIF_MSG_HW = 0x2000, | |
3704 | NETIF_MSG_WOL = 0x4000, | |
3705 | }; | |
3706 | ||
3707 | #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) | |
3708 | #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) | |
3709 | #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) | |
3710 | #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) | |
3711 | #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) | |
3712 | #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) | |
3713 | #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) | |
3714 | #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) | |
3715 | #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) | |
3716 | #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) | |
3717 | #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) | |
3718 | #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) | |
3719 | #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) | |
3720 | #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) | |
3721 | #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) | |
3722 | ||
3723 | static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) | |
3724 | { | |
3725 | /* use default */ | |
3726 | if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) | |
3727 | return default_msg_enable_bits; | |
3728 | if (debug_value == 0) /* no output */ | |
3729 | return 0; | |
3730 | /* set low N bits */ | |
3731 | return (1 << debug_value) - 1; | |
3732 | } | |
3733 | ||
c773e847 | 3734 | static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) |
932ff279 | 3735 | { |
c773e847 DM |
3736 | spin_lock(&txq->_xmit_lock); |
3737 | txq->xmit_lock_owner = cpu; | |
22dd7495 JHS |
3738 | } |
3739 | ||
5a717f4f MT |
3740 | static inline bool __netif_tx_acquire(struct netdev_queue *txq) |
3741 | { | |
3742 | __acquire(&txq->_xmit_lock); | |
3743 | return true; | |
3744 | } | |
3745 | ||
3746 | static inline void __netif_tx_release(struct netdev_queue *txq) | |
3747 | { | |
3748 | __release(&txq->_xmit_lock); | |
3749 | } | |
3750 | ||
fd2ea0a7 DM |
3751 | static inline void __netif_tx_lock_bh(struct netdev_queue *txq) |
3752 | { | |
3753 | spin_lock_bh(&txq->_xmit_lock); | |
3754 | txq->xmit_lock_owner = smp_processor_id(); | |
3755 | } | |
3756 | ||
4d29515f | 3757 | static inline bool __netif_tx_trylock(struct netdev_queue *txq) |
c3f26a26 | 3758 | { |
4d29515f | 3759 | bool ok = spin_trylock(&txq->_xmit_lock); |
c3f26a26 DM |
3760 | if (likely(ok)) |
3761 | txq->xmit_lock_owner = smp_processor_id(); | |
3762 | return ok; | |
3763 | } | |
3764 | ||
3765 | static inline void __netif_tx_unlock(struct netdev_queue *txq) | |
3766 | { | |
3767 | txq->xmit_lock_owner = -1; | |
3768 | spin_unlock(&txq->_xmit_lock); | |
3769 | } | |
3770 | ||
3771 | static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) | |
3772 | { | |
3773 | txq->xmit_lock_owner = -1; | |
3774 | spin_unlock_bh(&txq->_xmit_lock); | |
3775 | } | |
3776 | ||
08baf561 ED |
3777 | static inline void txq_trans_update(struct netdev_queue *txq) |
3778 | { | |
3779 | if (txq->xmit_lock_owner != -1) | |
3780 | txq->trans_start = jiffies; | |
3781 | } | |
3782 | ||
ba162f8e FW |
3783 | /* legacy drivers only, netdev_start_xmit() sets txq->trans_start */ |
3784 | static inline void netif_trans_update(struct net_device *dev) | |
3785 | { | |
9b36627a FW |
3786 | struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); |
3787 | ||
3788 | if (txq->trans_start != jiffies) | |
3789 | txq->trans_start = jiffies; | |
ba162f8e FW |
3790 | } |
3791 | ||
d29f749e DJ |
3792 | /** |
3793 | * netif_tx_lock - grab network device transmit lock | |
3794 | * @dev: network device | |
d29f749e DJ |
3795 | * |
3796 | * Get network device transmit lock | |
3797 | */ | |
22dd7495 JHS |
3798 | static inline void netif_tx_lock(struct net_device *dev) |
3799 | { | |
e8a0464c | 3800 | unsigned int i; |
c3f26a26 | 3801 | int cpu; |
c773e847 | 3802 | |
c3f26a26 DM |
3803 | spin_lock(&dev->tx_global_lock); |
3804 | cpu = smp_processor_id(); | |
e8a0464c DM |
3805 | for (i = 0; i < dev->num_tx_queues; i++) { |
3806 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
c3f26a26 DM |
3807 | |
3808 | /* We are the only thread of execution doing a | |
3809 | * freeze, but we have to grab the _xmit_lock in | |
3810 | * order to synchronize with threads which are in | |
3811 | * the ->hard_start_xmit() handler and already | |
3812 | * checked the frozen bit. | |
3813 | */ | |
e8a0464c | 3814 | __netif_tx_lock(txq, cpu); |
c3f26a26 DM |
3815 | set_bit(__QUEUE_STATE_FROZEN, &txq->state); |
3816 | __netif_tx_unlock(txq); | |
e8a0464c | 3817 | } |
932ff279 HX |
3818 | } |
3819 | ||
3820 | static inline void netif_tx_lock_bh(struct net_device *dev) | |
3821 | { | |
e8a0464c DM |
3822 | local_bh_disable(); |
3823 | netif_tx_lock(dev); | |
932ff279 HX |
3824 | } |
3825 | ||
932ff279 HX |
3826 | static inline void netif_tx_unlock(struct net_device *dev) |
3827 | { | |
e8a0464c DM |
3828 | unsigned int i; |
3829 | ||
3830 | for (i = 0; i < dev->num_tx_queues; i++) { | |
3831 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
c773e847 | 3832 | |
c3f26a26 DM |
3833 | /* No need to grab the _xmit_lock here. If the |
3834 | * queue is not stopped for another reason, we | |
3835 | * force a schedule. | |
3836 | */ | |
3837 | clear_bit(__QUEUE_STATE_FROZEN, &txq->state); | |
7b3d3e4f | 3838 | netif_schedule_queue(txq); |
c3f26a26 DM |
3839 | } |
3840 | spin_unlock(&dev->tx_global_lock); | |
932ff279 HX |
3841 | } |
3842 | ||
3843 | static inline void netif_tx_unlock_bh(struct net_device *dev) | |
3844 | { | |
e8a0464c DM |
3845 | netif_tx_unlock(dev); |
3846 | local_bh_enable(); | |
932ff279 HX |
3847 | } |
3848 | ||
c773e847 | 3849 | #define HARD_TX_LOCK(dev, txq, cpu) { \ |
22dd7495 | 3850 | if ((dev->features & NETIF_F_LLTX) == 0) { \ |
c773e847 | 3851 | __netif_tx_lock(txq, cpu); \ |
5a717f4f MT |
3852 | } else { \ |
3853 | __netif_tx_acquire(txq); \ | |
22dd7495 JHS |
3854 | } \ |
3855 | } | |
3856 | ||
5efeac44 EB |
3857 | #define HARD_TX_TRYLOCK(dev, txq) \ |
3858 | (((dev->features & NETIF_F_LLTX) == 0) ? \ | |
3859 | __netif_tx_trylock(txq) : \ | |
5a717f4f | 3860 | __netif_tx_acquire(txq)) |
5efeac44 | 3861 | |
c773e847 | 3862 | #define HARD_TX_UNLOCK(dev, txq) { \ |
22dd7495 | 3863 | if ((dev->features & NETIF_F_LLTX) == 0) { \ |
c773e847 | 3864 | __netif_tx_unlock(txq); \ |
5a717f4f MT |
3865 | } else { \ |
3866 | __netif_tx_release(txq); \ | |
22dd7495 JHS |
3867 | } \ |
3868 | } | |
3869 | ||
1da177e4 LT |
3870 | static inline void netif_tx_disable(struct net_device *dev) |
3871 | { | |
fd2ea0a7 | 3872 | unsigned int i; |
c3f26a26 | 3873 | int cpu; |
fd2ea0a7 | 3874 | |
c3f26a26 DM |
3875 | local_bh_disable(); |
3876 | cpu = smp_processor_id(); | |
fd2ea0a7 DM |
3877 | for (i = 0; i < dev->num_tx_queues; i++) { |
3878 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
c3f26a26 DM |
3879 | |
3880 | __netif_tx_lock(txq, cpu); | |
fd2ea0a7 | 3881 | netif_tx_stop_queue(txq); |
c3f26a26 | 3882 | __netif_tx_unlock(txq); |
fd2ea0a7 | 3883 | } |
c3f26a26 | 3884 | local_bh_enable(); |
1da177e4 LT |
3885 | } |
3886 | ||
e308a5d8 DM |
3887 | static inline void netif_addr_lock(struct net_device *dev) |
3888 | { | |
3889 | spin_lock(&dev->addr_list_lock); | |
3890 | } | |
3891 | ||
2429f7ac JP |
3892 | static inline void netif_addr_lock_nested(struct net_device *dev) |
3893 | { | |
25175ba5 VY |
3894 | int subclass = SINGLE_DEPTH_NESTING; |
3895 | ||
3896 | if (dev->netdev_ops->ndo_get_lock_subclass) | |
3897 | subclass = dev->netdev_ops->ndo_get_lock_subclass(dev); | |
3898 | ||
3899 | spin_lock_nested(&dev->addr_list_lock, subclass); | |
2429f7ac JP |
3900 | } |
3901 | ||
e308a5d8 DM |
3902 | static inline void netif_addr_lock_bh(struct net_device *dev) |
3903 | { | |
3904 | spin_lock_bh(&dev->addr_list_lock); | |
3905 | } | |
3906 | ||
3907 | static inline void netif_addr_unlock(struct net_device *dev) | |
3908 | { | |
3909 | spin_unlock(&dev->addr_list_lock); | |
3910 | } | |
3911 | ||
3912 | static inline void netif_addr_unlock_bh(struct net_device *dev) | |
3913 | { | |
3914 | spin_unlock_bh(&dev->addr_list_lock); | |
3915 | } | |
3916 | ||
f001fde5 | 3917 | /* |
31278e71 | 3918 | * dev_addrs walker. Should be used only for read access. Call with |
f001fde5 JP |
3919 | * rcu_read_lock held. |
3920 | */ | |
3921 | #define for_each_dev_addr(dev, ha) \ | |
31278e71 | 3922 | list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list) |
f001fde5 | 3923 | |
1da177e4 LT |
3924 | /* These functions live elsewhere (drivers/net/net_init.c, but related) */ |
3925 | ||
f629d208 | 3926 | void ether_setup(struct net_device *dev); |
1da177e4 LT |
3927 | |
3928 | /* Support for loadable net-drivers */ | |
f629d208 | 3929 | struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, |
c835a677 | 3930 | unsigned char name_assign_type, |
f629d208 JP |
3931 | void (*setup)(struct net_device *), |
3932 | unsigned int txqs, unsigned int rxqs); | |
0ad646c8 CW |
3933 | int dev_get_valid_name(struct net *net, struct net_device *dev, |
3934 | const char *name); | |
3935 | ||
c835a677 TG |
3936 | #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \ |
3937 | alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1) | |
36909ea4 | 3938 | |
c835a677 TG |
3939 | #define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \ |
3940 | alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \ | |
3941 | count) | |
36909ea4 | 3942 | |
f629d208 JP |
3943 | int register_netdev(struct net_device *dev); |
3944 | void unregister_netdev(struct net_device *dev); | |
f001fde5 | 3945 | |
22bedad3 | 3946 | /* General hardware address lists handling functions */ |
f629d208 JP |
3947 | int __hw_addr_sync(struct netdev_hw_addr_list *to_list, |
3948 | struct netdev_hw_addr_list *from_list, int addr_len); | |
3949 | void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, | |
3950 | struct netdev_hw_addr_list *from_list, int addr_len); | |
670e5b8e AD |
3951 | int __hw_addr_sync_dev(struct netdev_hw_addr_list *list, |
3952 | struct net_device *dev, | |
3953 | int (*sync)(struct net_device *, const unsigned char *), | |
3954 | int (*unsync)(struct net_device *, | |
3955 | const unsigned char *)); | |
3956 | void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list, | |
3957 | struct net_device *dev, | |
3958 | int (*unsync)(struct net_device *, | |
3959 | const unsigned char *)); | |
f629d208 | 3960 | void __hw_addr_init(struct netdev_hw_addr_list *list); |
22bedad3 | 3961 | |
f001fde5 | 3962 | /* Functions used for device addresses handling */ |
f629d208 JP |
3963 | int dev_addr_add(struct net_device *dev, const unsigned char *addr, |
3964 | unsigned char addr_type); | |
3965 | int dev_addr_del(struct net_device *dev, const unsigned char *addr, | |
3966 | unsigned char addr_type); | |
f629d208 JP |
3967 | void dev_addr_flush(struct net_device *dev); |
3968 | int dev_addr_init(struct net_device *dev); | |
a748ee24 JP |
3969 | |
3970 | /* Functions used for unicast addresses handling */ | |
f629d208 JP |
3971 | int dev_uc_add(struct net_device *dev, const unsigned char *addr); |
3972 | int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr); | |
3973 | int dev_uc_del(struct net_device *dev, const unsigned char *addr); | |
3974 | int dev_uc_sync(struct net_device *to, struct net_device *from); | |
3975 | int dev_uc_sync_multiple(struct net_device *to, struct net_device *from); | |
3976 | void dev_uc_unsync(struct net_device *to, struct net_device *from); | |
3977 | void dev_uc_flush(struct net_device *dev); | |
3978 | void dev_uc_init(struct net_device *dev); | |
f001fde5 | 3979 | |
670e5b8e AD |
3980 | /** |
3981 | * __dev_uc_sync - Synchonize device's unicast list | |
3982 | * @dev: device to sync | |
3983 | * @sync: function to call if address should be added | |
3984 | * @unsync: function to call if address should be removed | |
3985 | * | |
3986 | * Add newly added addresses to the interface, and release | |
3987 | * addresses that have been deleted. | |
5e82b4b2 | 3988 | */ |
670e5b8e AD |
3989 | static inline int __dev_uc_sync(struct net_device *dev, |
3990 | int (*sync)(struct net_device *, | |
3991 | const unsigned char *), | |
3992 | int (*unsync)(struct net_device *, | |
3993 | const unsigned char *)) | |
3994 | { | |
3995 | return __hw_addr_sync_dev(&dev->uc, dev, sync, unsync); | |
3996 | } | |
3997 | ||
3998 | /** | |
e793c0f7 | 3999 | * __dev_uc_unsync - Remove synchronized addresses from device |
670e5b8e AD |
4000 | * @dev: device to sync |
4001 | * @unsync: function to call if address should be removed | |
4002 | * | |
4003 | * Remove all addresses that were added to the device by dev_uc_sync(). | |
5e82b4b2 | 4004 | */ |
670e5b8e AD |
4005 | static inline void __dev_uc_unsync(struct net_device *dev, |
4006 | int (*unsync)(struct net_device *, | |
4007 | const unsigned char *)) | |
4008 | { | |
4009 | __hw_addr_unsync_dev(&dev->uc, dev, unsync); | |
4010 | } | |
4011 | ||
22bedad3 | 4012 | /* Functions used for multicast addresses handling */ |
f629d208 JP |
4013 | int dev_mc_add(struct net_device *dev, const unsigned char *addr); |
4014 | int dev_mc_add_global(struct net_device *dev, const unsigned char *addr); | |
4015 | int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr); | |
4016 | int dev_mc_del(struct net_device *dev, const unsigned char *addr); | |
4017 | int dev_mc_del_global(struct net_device *dev, const unsigned char *addr); | |
4018 | int dev_mc_sync(struct net_device *to, struct net_device *from); | |
4019 | int dev_mc_sync_multiple(struct net_device *to, struct net_device *from); | |
4020 | void dev_mc_unsync(struct net_device *to, struct net_device *from); | |
4021 | void dev_mc_flush(struct net_device *dev); | |
4022 | void dev_mc_init(struct net_device *dev); | |
f001fde5 | 4023 | |
670e5b8e AD |
4024 | /** |
4025 | * __dev_mc_sync - Synchonize device's multicast list | |
4026 | * @dev: device to sync | |
4027 | * @sync: function to call if address should be added | |
4028 | * @unsync: function to call if address should be removed | |
4029 | * | |
4030 | * Add newly added addresses to the interface, and release | |
4031 | * addresses that have been deleted. | |
5e82b4b2 | 4032 | */ |
670e5b8e AD |
4033 | static inline int __dev_mc_sync(struct net_device *dev, |
4034 | int (*sync)(struct net_device *, | |
4035 | const unsigned char *), | |
4036 | int (*unsync)(struct net_device *, | |
4037 | const unsigned char *)) | |
4038 | { | |
4039 | return __hw_addr_sync_dev(&dev->mc, dev, sync, unsync); | |
4040 | } | |
4041 | ||
4042 | /** | |
e793c0f7 | 4043 | * __dev_mc_unsync - Remove synchronized addresses from device |
670e5b8e AD |
4044 | * @dev: device to sync |
4045 | * @unsync: function to call if address should be removed | |
4046 | * | |
4047 | * Remove all addresses that were added to the device by dev_mc_sync(). | |
5e82b4b2 | 4048 | */ |
670e5b8e AD |
4049 | static inline void __dev_mc_unsync(struct net_device *dev, |
4050 | int (*unsync)(struct net_device *, | |
4051 | const unsigned char *)) | |
4052 | { | |
4053 | __hw_addr_unsync_dev(&dev->mc, dev, unsync); | |
4054 | } | |
4055 | ||
4417da66 | 4056 | /* Functions used for secondary unicast and multicast support */ |
f629d208 JP |
4057 | void dev_set_rx_mode(struct net_device *dev); |
4058 | void __dev_set_rx_mode(struct net_device *dev); | |
4059 | int dev_set_promiscuity(struct net_device *dev, int inc); | |
4060 | int dev_set_allmulti(struct net_device *dev, int inc); | |
4061 | void netdev_state_change(struct net_device *dev); | |
4062 | void netdev_notify_peers(struct net_device *dev); | |
4063 | void netdev_features_change(struct net_device *dev); | |
1da177e4 | 4064 | /* Load a device via the kmod */ |
f629d208 JP |
4065 | void dev_load(struct net *net, const char *name); |
4066 | struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, | |
4067 | struct rtnl_link_stats64 *storage); | |
4068 | void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, | |
4069 | const struct net_device_stats *netdev_stats); | |
eeda3fd6 | 4070 | |
1da177e4 | 4071 | extern int netdev_max_backlog; |
3b098e2d | 4072 | extern int netdev_tstamp_prequeue; |
1da177e4 | 4073 | extern int weight_p; |
3d48b53f MT |
4074 | extern int dev_weight_rx_bias; |
4075 | extern int dev_weight_tx_bias; | |
4076 | extern int dev_rx_weight; | |
4077 | extern int dev_tx_weight; | |
9ff162a8 | 4078 | |
f629d208 | 4079 | bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); |
44a40855 VY |
4080 | struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, |
4081 | struct list_head **iter); | |
f629d208 JP |
4082 | struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev, |
4083 | struct list_head **iter); | |
8b5be856 | 4084 | |
44a40855 VY |
4085 | /* iterate through upper list, must be called under RCU read lock */ |
4086 | #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \ | |
4087 | for (iter = &(dev)->adj_list.upper, \ | |
4088 | updev = netdev_upper_get_next_dev_rcu(dev, &(iter)); \ | |
4089 | updev; \ | |
4090 | updev = netdev_upper_get_next_dev_rcu(dev, &(iter))) | |
4091 | ||
1a3f060c DA |
4092 | int netdev_walk_all_upper_dev_rcu(struct net_device *dev, |
4093 | int (*fn)(struct net_device *upper_dev, | |
4094 | void *data), | |
4095 | void *data); | |
4096 | ||
4097 | bool netdev_has_upper_dev_all_rcu(struct net_device *dev, | |
4098 | struct net_device *upper_dev); | |
4099 | ||
25cc72a3 IS |
4100 | bool netdev_has_any_upper_dev(struct net_device *dev); |
4101 | ||
f629d208 JP |
4102 | void *netdev_lower_get_next_private(struct net_device *dev, |
4103 | struct list_head **iter); | |
4104 | void *netdev_lower_get_next_private_rcu(struct net_device *dev, | |
4105 | struct list_head **iter); | |
31088a11 VF |
4106 | |
4107 | #define netdev_for_each_lower_private(dev, priv, iter) \ | |
4108 | for (iter = (dev)->adj_list.lower.next, \ | |
4109 | priv = netdev_lower_get_next_private(dev, &(iter)); \ | |
4110 | priv; \ | |
4111 | priv = netdev_lower_get_next_private(dev, &(iter))) | |
4112 | ||
4113 | #define netdev_for_each_lower_private_rcu(dev, priv, iter) \ | |
4114 | for (iter = &(dev)->adj_list.lower, \ | |
4115 | priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \ | |
4116 | priv; \ | |
4117 | priv = netdev_lower_get_next_private_rcu(dev, &(iter))) | |
4118 | ||
4085ebe8 VY |
4119 | void *netdev_lower_get_next(struct net_device *dev, |
4120 | struct list_head **iter); | |
7ce856aa | 4121 | |
4085ebe8 | 4122 | #define netdev_for_each_lower_dev(dev, ldev, iter) \ |
cfdd28be | 4123 | for (iter = (dev)->adj_list.lower.next, \ |
4085ebe8 VY |
4124 | ldev = netdev_lower_get_next(dev, &(iter)); \ |
4125 | ldev; \ | |
4126 | ldev = netdev_lower_get_next(dev, &(iter))) | |
4127 | ||
7ce856aa JP |
4128 | struct net_device *netdev_all_lower_get_next(struct net_device *dev, |
4129 | struct list_head **iter); | |
4130 | struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev, | |
4131 | struct list_head **iter); | |
4132 | ||
1a3f060c DA |
4133 | int netdev_walk_all_lower_dev(struct net_device *dev, |
4134 | int (*fn)(struct net_device *lower_dev, | |
4135 | void *data), | |
4136 | void *data); | |
4137 | int netdev_walk_all_lower_dev_rcu(struct net_device *dev, | |
4138 | int (*fn)(struct net_device *lower_dev, | |
4139 | void *data), | |
4140 | void *data); | |
4141 | ||
f629d208 | 4142 | void *netdev_adjacent_get_private(struct list_head *adj_list); |
e001bfad | 4143 | void *netdev_lower_get_first_private_rcu(struct net_device *dev); |
f629d208 JP |
4144 | struct net_device *netdev_master_upper_dev_get(struct net_device *dev); |
4145 | struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev); | |
42ab19ee DA |
4146 | int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev, |
4147 | struct netlink_ext_ack *extack); | |
f629d208 | 4148 | int netdev_master_upper_dev_link(struct net_device *dev, |
6dffb044 | 4149 | struct net_device *upper_dev, |
42ab19ee DA |
4150 | void *upper_priv, void *upper_info, |
4151 | struct netlink_ext_ack *extack); | |
f629d208 JP |
4152 | void netdev_upper_dev_unlink(struct net_device *dev, |
4153 | struct net_device *upper_dev); | |
5bb025fa | 4154 | void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); |
f629d208 JP |
4155 | void *netdev_lower_dev_get_private(struct net_device *dev, |
4156 | struct net_device *lower_dev); | |
04d48266 JP |
4157 | void netdev_lower_state_changed(struct net_device *lower_dev, |
4158 | void *lower_state_info); | |
960fb622 ED |
4159 | |
4160 | /* RSS keys are 40 or 52 bytes long */ | |
4161 | #define NETDEV_RSS_KEY_LEN 52 | |
ba905f5e | 4162 | extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly; |
960fb622 ED |
4163 | void netdev_rss_key_fill(void *buffer, size_t len); |
4164 | ||
952fcfd0 | 4165 | int dev_get_nest_level(struct net_device *dev); |
f629d208 | 4166 | int skb_checksum_help(struct sk_buff *skb); |
b72b5bf6 | 4167 | int skb_crc32c_csum_help(struct sk_buff *skb); |
43c26a1a DC |
4168 | int skb_csum_hwoffload_help(struct sk_buff *skb, |
4169 | const netdev_features_t features); | |
4170 | ||
f629d208 JP |
4171 | struct sk_buff *__skb_gso_segment(struct sk_buff *skb, |
4172 | netdev_features_t features, bool tx_path); | |
4173 | struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, | |
4174 | netdev_features_t features); | |
12b0004d | 4175 | |
61bd3857 MS |
4176 | struct netdev_bonding_info { |
4177 | ifslave slave; | |
4178 | ifbond master; | |
4179 | }; | |
4180 | ||
4181 | struct netdev_notifier_bonding_info { | |
4182 | struct netdev_notifier_info info; /* must be first */ | |
4183 | struct netdev_bonding_info bonding_info; | |
4184 | }; | |
4185 | ||
4186 | void netdev_bonding_info_change(struct net_device *dev, | |
4187 | struct netdev_bonding_info *bonding_info); | |
4188 | ||
12b0004d CW |
4189 | static inline |
4190 | struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features) | |
4191 | { | |
4192 | return __skb_gso_segment(skb, features, true); | |
4193 | } | |
53d6471c | 4194 | __be16 skb_network_protocol(struct sk_buff *skb, int *depth); |
ec5f0615 PS |
4195 | |
4196 | static inline bool can_checksum_protocol(netdev_features_t features, | |
4197 | __be16 protocol) | |
4198 | { | |
c8cd0989 TH |
4199 | if (protocol == htons(ETH_P_FCOE)) |
4200 | return !!(features & NETIF_F_FCOE_CRC); | |
4201 | ||
4202 | /* Assume this is an IP checksum (not SCTP CRC) */ | |
4203 | ||
4204 | if (features & NETIF_F_HW_CSUM) { | |
4205 | /* Can checksum everything */ | |
4206 | return true; | |
4207 | } | |
4208 | ||
4209 | switch (protocol) { | |
4210 | case htons(ETH_P_IP): | |
4211 | return !!(features & NETIF_F_IP_CSUM); | |
4212 | case htons(ETH_P_IPV6): | |
4213 | return !!(features & NETIF_F_IPV6_CSUM); | |
4214 | default: | |
4215 | return false; | |
4216 | } | |
ec5f0615 | 4217 | } |
12b0004d | 4218 | |
fb286bb2 | 4219 | #ifdef CONFIG_BUG |
f629d208 | 4220 | void netdev_rx_csum_fault(struct net_device *dev); |
fb286bb2 HX |
4221 | #else |
4222 | static inline void netdev_rx_csum_fault(struct net_device *dev) | |
4223 | { | |
4224 | } | |
4225 | #endif | |
1da177e4 | 4226 | /* rx skb timestamps */ |
f629d208 JP |
4227 | void net_enable_timestamp(void); |
4228 | void net_disable_timestamp(void); | |
1da177e4 | 4229 | |
20380731 | 4230 | #ifdef CONFIG_PROC_FS |
f629d208 | 4231 | int __init dev_proc_init(void); |
900ff8c6 CW |
4232 | #else |
4233 | #define dev_proc_init() 0 | |
20380731 ACM |
4234 | #endif |
4235 | ||
4798248e | 4236 | static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops, |
fa2dbdc2 DM |
4237 | struct sk_buff *skb, struct net_device *dev, |
4238 | bool more) | |
4798248e | 4239 | { |
fa2dbdc2 | 4240 | skb->xmit_more = more ? 1 : 0; |
0b725a2c | 4241 | return ops->ndo_start_xmit(skb, dev); |
4798248e DM |
4242 | } |
4243 | ||
10b3ad8c | 4244 | static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev, |
fa2dbdc2 | 4245 | struct netdev_queue *txq, bool more) |
4798248e DM |
4246 | { |
4247 | const struct net_device_ops *ops = dev->netdev_ops; | |
10b3ad8c | 4248 | int rc; |
4798248e | 4249 | |
fa2dbdc2 | 4250 | rc = __netdev_start_xmit(ops, skb, dev, more); |
10b3ad8c DM |
4251 | if (rc == NETDEV_TX_OK) |
4252 | txq_trans_update(txq); | |
4253 | ||
4254 | return rc; | |
4798248e DM |
4255 | } |
4256 | ||
b793dc5c | 4257 | int netdev_class_create_file_ns(const struct class_attribute *class_attr, |
42a2d923 | 4258 | const void *ns); |
b793dc5c | 4259 | void netdev_class_remove_file_ns(const struct class_attribute *class_attr, |
42a2d923 | 4260 | const void *ns); |
58292cbe | 4261 | |
b793dc5c | 4262 | static inline int netdev_class_create_file(const struct class_attribute *class_attr) |
58292cbe TH |
4263 | { |
4264 | return netdev_class_create_file_ns(class_attr, NULL); | |
4265 | } | |
4266 | ||
b793dc5c | 4267 | static inline void netdev_class_remove_file(const struct class_attribute *class_attr) |
58292cbe TH |
4268 | { |
4269 | netdev_class_remove_file_ns(class_attr, NULL); | |
4270 | } | |
b8a9787e | 4271 | |
737aec57 | 4272 | extern const struct kobj_ns_type_operations net_ns_type_operations; |
04600794 | 4273 | |
f629d208 | 4274 | const char *netdev_drivername(const struct net_device *dev); |
6579e57b | 4275 | |
f629d208 | 4276 | void linkwatch_run_queue(void); |
20380731 | 4277 | |
da08143b MK |
4278 | static inline netdev_features_t netdev_intersect_features(netdev_features_t f1, |
4279 | netdev_features_t f2) | |
4280 | { | |
c8cd0989 TH |
4281 | if ((f1 ^ f2) & NETIF_F_HW_CSUM) { |
4282 | if (f1 & NETIF_F_HW_CSUM) | |
b6a0e72a | 4283 | f1 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); |
c8cd0989 | 4284 | else |
b6a0e72a | 4285 | f2 |= (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); |
c8cd0989 | 4286 | } |
da08143b | 4287 | |
c8cd0989 | 4288 | return f1 & f2; |
da08143b MK |
4289 | } |
4290 | ||
c8f44aff MM |
4291 | static inline netdev_features_t netdev_get_wanted_features( |
4292 | struct net_device *dev) | |
5455c699 MM |
4293 | { |
4294 | return (dev->features & ~dev->hw_features) | dev->wanted_features; | |
4295 | } | |
c8f44aff MM |
4296 | netdev_features_t netdev_increment_features(netdev_features_t all, |
4297 | netdev_features_t one, netdev_features_t mask); | |
b0ce3508 ED |
4298 | |
4299 | /* Allow TSO being used on stacked device : | |
4300 | * Performing the GSO segmentation before last device | |
4301 | * is a performance improvement. | |
4302 | */ | |
4303 | static inline netdev_features_t netdev_add_tso_features(netdev_features_t features, | |
4304 | netdev_features_t mask) | |
4305 | { | |
4306 | return netdev_increment_features(features, NETIF_F_ALL_TSO, mask); | |
4307 | } | |
4308 | ||
6cb6a27c | 4309 | int __netdev_update_features(struct net_device *dev); |
5455c699 | 4310 | void netdev_update_features(struct net_device *dev); |
afe12cc8 | 4311 | void netdev_change_features(struct net_device *dev); |
7f353bf2 | 4312 | |
fc4a7489 PM |
4313 | void netif_stacked_transfer_operstate(const struct net_device *rootdev, |
4314 | struct net_device *dev); | |
4315 | ||
e38f3025 TM |
4316 | netdev_features_t passthru_features_check(struct sk_buff *skb, |
4317 | struct net_device *dev, | |
4318 | netdev_features_t features); | |
c1e756bf | 4319 | netdev_features_t netif_skb_features(struct sk_buff *skb); |
58e998c6 | 4320 | |
4d29515f | 4321 | static inline bool net_gso_ok(netdev_features_t features, int gso_type) |
576a30eb | 4322 | { |
7b748340 | 4323 | netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT; |
0345e186 MM |
4324 | |
4325 | /* check flags correspondence */ | |
4326 | BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT)); | |
0345e186 MM |
4327 | BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT)); |
4328 | BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)); | |
cbc53e08 | 4329 | BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT)); |
0345e186 MM |
4330 | BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT)); |
4331 | BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT)); | |
4b28252c TH |
4332 | BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT)); |
4333 | BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT)); | |
7e13318d TH |
4334 | BUILD_BUG_ON(SKB_GSO_IPXIP4 != (NETIF_F_GSO_IPXIP4 >> NETIF_F_GSO_SHIFT)); |
4335 | BUILD_BUG_ON(SKB_GSO_IPXIP6 != (NETIF_F_GSO_IPXIP6 >> NETIF_F_GSO_SHIFT)); | |
4b28252c TH |
4336 | BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)); |
4337 | BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); | |
802ab55a | 4338 | BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT)); |
e585f236 | 4339 | BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); |
90017acc | 4340 | BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT)); |
c7ef8f0c | 4341 | BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT)); |
0c19f846 | 4342 | BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT)); |
83aa025f | 4343 | BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT)); |
0345e186 | 4344 | |
d6b4991a | 4345 | return (features & feature) == feature; |
576a30eb HX |
4346 | } |
4347 | ||
4d29515f | 4348 | static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features) |
bcd76111 | 4349 | { |
278b2513 | 4350 | return net_gso_ok(features, skb_shinfo(skb)->gso_type) && |
21dc3301 | 4351 | (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); |
bcd76111 HX |
4352 | } |
4353 | ||
8b86a61d | 4354 | static inline bool netif_needs_gso(struct sk_buff *skb, |
4d29515f | 4355 | netdev_features_t features) |
7967168c | 4356 | { |
fc741216 | 4357 | return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || |
cdbee74c YZ |
4358 | unlikely((skb->ip_summed != CHECKSUM_PARTIAL) && |
4359 | (skb->ip_summed != CHECKSUM_UNNECESSARY))); | |
7967168c HX |
4360 | } |
4361 | ||
82cc1a7a PWJ |
4362 | static inline void netif_set_gso_max_size(struct net_device *dev, |
4363 | unsigned int size) | |
4364 | { | |
4365 | dev->gso_max_size = size; | |
4366 | } | |
4367 | ||
7a7ffbab WCC |
4368 | static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol, |
4369 | int pulled_hlen, u16 mac_offset, | |
4370 | int mac_len) | |
4371 | { | |
4372 | skb->protocol = protocol; | |
4373 | skb->encapsulation = 1; | |
4374 | skb_push(skb, pulled_hlen); | |
4375 | skb_reset_transport_header(skb); | |
4376 | skb->mac_header = mac_offset; | |
4377 | skb->network_header = skb->mac_header + mac_len; | |
4378 | skb->mac_len = mac_len; | |
4379 | } | |
4380 | ||
3c175784 SD |
4381 | static inline bool netif_is_macsec(const struct net_device *dev) |
4382 | { | |
4383 | return dev->priv_flags & IFF_MACSEC; | |
4384 | } | |
4385 | ||
b618aaa9 | 4386 | static inline bool netif_is_macvlan(const struct net_device *dev) |
a6cc0cfa JF |
4387 | { |
4388 | return dev->priv_flags & IFF_MACVLAN; | |
4389 | } | |
4390 | ||
b618aaa9 | 4391 | static inline bool netif_is_macvlan_port(const struct net_device *dev) |
2f33e7d5 MB |
4392 | { |
4393 | return dev->priv_flags & IFF_MACVLAN_PORT; | |
4394 | } | |
4395 | ||
b618aaa9 | 4396 | static inline bool netif_is_bond_master(const struct net_device *dev) |
8a7fbfab | 4397 | { |
4398 | return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; | |
4399 | } | |
4400 | ||
b618aaa9 | 4401 | static inline bool netif_is_bond_slave(const struct net_device *dev) |
1765a575 JP |
4402 | { |
4403 | return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; | |
4404 | } | |
4405 | ||
3bdc0eba BG |
4406 | static inline bool netif_supports_nofcs(struct net_device *dev) |
4407 | { | |
4408 | return dev->priv_flags & IFF_SUPP_NOFCS; | |
4409 | } | |
4410 | ||
007979ea | 4411 | static inline bool netif_is_l3_master(const struct net_device *dev) |
4e3c8992 | 4412 | { |
007979ea | 4413 | return dev->priv_flags & IFF_L3MDEV_MASTER; |
4e3c8992 DA |
4414 | } |
4415 | ||
fee6d4c7 DA |
4416 | static inline bool netif_is_l3_slave(const struct net_device *dev) |
4417 | { | |
4418 | return dev->priv_flags & IFF_L3MDEV_SLAVE; | |
4419 | } | |
4420 | ||
0894ae3f JP |
4421 | static inline bool netif_is_bridge_master(const struct net_device *dev) |
4422 | { | |
4423 | return dev->priv_flags & IFF_EBRIDGE; | |
4424 | } | |
4425 | ||
28f9ee22 VY |
4426 | static inline bool netif_is_bridge_port(const struct net_device *dev) |
4427 | { | |
4428 | return dev->priv_flags & IFF_BRIDGE_PORT; | |
4429 | } | |
4430 | ||
35d4e172 JP |
4431 | static inline bool netif_is_ovs_master(const struct net_device *dev) |
4432 | { | |
4433 | return dev->priv_flags & IFF_OPENVSWITCH; | |
4434 | } | |
4435 | ||
5be66141 JP |
4436 | static inline bool netif_is_ovs_port(const struct net_device *dev) |
4437 | { | |
4438 | return dev->priv_flags & IFF_OVS_DATAPATH; | |
4439 | } | |
4440 | ||
b618aaa9 | 4441 | static inline bool netif_is_team_master(const struct net_device *dev) |
c981e421 JP |
4442 | { |
4443 | return dev->priv_flags & IFF_TEAM; | |
4444 | } | |
4445 | ||
b618aaa9 | 4446 | static inline bool netif_is_team_port(const struct net_device *dev) |
f7f019ee JP |
4447 | { |
4448 | return dev->priv_flags & IFF_TEAM_PORT; | |
4449 | } | |
4450 | ||
b618aaa9 | 4451 | static inline bool netif_is_lag_master(const struct net_device *dev) |
7be61833 JP |
4452 | { |
4453 | return netif_is_bond_master(dev) || netif_is_team_master(dev); | |
4454 | } | |
4455 | ||
b618aaa9 | 4456 | static inline bool netif_is_lag_port(const struct net_device *dev) |
e0ba1414 JP |
4457 | { |
4458 | return netif_is_bond_slave(dev) || netif_is_team_port(dev); | |
4459 | } | |
4460 | ||
d4ab4286 KJ |
4461 | static inline bool netif_is_rxfh_configured(const struct net_device *dev) |
4462 | { | |
4463 | return dev->priv_flags & IFF_RXFH_CONFIGURED; | |
4464 | } | |
4465 | ||
30c8bd5a SS |
4466 | static inline bool netif_is_failover(const struct net_device *dev) |
4467 | { | |
4468 | return dev->priv_flags & IFF_FAILOVER; | |
4469 | } | |
4470 | ||
4471 | static inline bool netif_is_failover_slave(const struct net_device *dev) | |
4472 | { | |
4473 | return dev->priv_flags & IFF_FAILOVER_SLAVE; | |
4474 | } | |
4475 | ||
02875878 ED |
4476 | /* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */ |
4477 | static inline void netif_keep_dst(struct net_device *dev) | |
4478 | { | |
4479 | dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM); | |
4480 | } | |
4481 | ||
18d3df3e PA |
4482 | /* return true if dev can't cope with mtu frames that need vlan tag insertion */ |
4483 | static inline bool netif_reduces_vlan_mtu(struct net_device *dev) | |
4484 | { | |
4485 | /* TODO: reserve and use an additional IFF bit, if we get more users */ | |
4486 | return dev->priv_flags & IFF_MACSEC; | |
4487 | } | |
4488 | ||
505d4f73 | 4489 | extern struct pernet_operations __net_initdata loopback_net_ops; |
b1b67dd4 | 4490 | |
571ba423 JP |
4491 | /* Logging, debugging and troubleshooting/diagnostic helpers. */ |
4492 | ||
4493 | /* netdev_printk helpers, similar to dev_printk */ | |
4494 | ||
4495 | static inline const char *netdev_name(const struct net_device *dev) | |
4496 | { | |
c6f854d5 VF |
4497 | if (!dev->name[0] || strchr(dev->name, '%')) |
4498 | return "(unnamed net_device)"; | |
571ba423 JP |
4499 | return dev->name; |
4500 | } | |
4501 | ||
8397ed36 DA |
4502 | static inline bool netdev_unregistering(const struct net_device *dev) |
4503 | { | |
4504 | return dev->reg_state == NETREG_UNREGISTERING; | |
4505 | } | |
4506 | ||
ccc7f496 VF |
4507 | static inline const char *netdev_reg_state(const struct net_device *dev) |
4508 | { | |
4509 | switch (dev->reg_state) { | |
4510 | case NETREG_UNINITIALIZED: return " (uninitialized)"; | |
4511 | case NETREG_REGISTERED: return ""; | |
4512 | case NETREG_UNREGISTERING: return " (unregistering)"; | |
4513 | case NETREG_UNREGISTERED: return " (unregistered)"; | |
4514 | case NETREG_RELEASED: return " (released)"; | |
4515 | case NETREG_DUMMY: return " (dummy)"; | |
4516 | } | |
4517 | ||
4518 | WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state); | |
4519 | return " (unknown)"; | |
4520 | } | |
4521 | ||
f629d208 | 4522 | __printf(3, 4) |
6ea754eb JP |
4523 | void netdev_printk(const char *level, const struct net_device *dev, |
4524 | const char *format, ...); | |
f629d208 | 4525 | __printf(2, 3) |
6ea754eb | 4526 | void netdev_emerg(const struct net_device *dev, const char *format, ...); |
f629d208 | 4527 | __printf(2, 3) |
6ea754eb | 4528 | void netdev_alert(const struct net_device *dev, const char *format, ...); |
f629d208 | 4529 | __printf(2, 3) |
6ea754eb | 4530 | void netdev_crit(const struct net_device *dev, const char *format, ...); |
f629d208 | 4531 | __printf(2, 3) |
6ea754eb | 4532 | void netdev_err(const struct net_device *dev, const char *format, ...); |
f629d208 | 4533 | __printf(2, 3) |
6ea754eb | 4534 | void netdev_warn(const struct net_device *dev, const char *format, ...); |
f629d208 | 4535 | __printf(2, 3) |
6ea754eb | 4536 | void netdev_notice(const struct net_device *dev, const char *format, ...); |
f629d208 | 4537 | __printf(2, 3) |
6ea754eb | 4538 | void netdev_info(const struct net_device *dev, const char *format, ...); |
571ba423 | 4539 | |
375ef2b1 GP |
4540 | #define netdev_level_once(level, dev, fmt, ...) \ |
4541 | do { \ | |
4542 | static bool __print_once __read_mostly; \ | |
4543 | \ | |
4544 | if (!__print_once) { \ | |
4545 | __print_once = true; \ | |
4546 | netdev_printk(level, dev, fmt, ##__VA_ARGS__); \ | |
4547 | } \ | |
4548 | } while (0) | |
4549 | ||
4550 | #define netdev_emerg_once(dev, fmt, ...) \ | |
4551 | netdev_level_once(KERN_EMERG, dev, fmt, ##__VA_ARGS__) | |
4552 | #define netdev_alert_once(dev, fmt, ...) \ | |
4553 | netdev_level_once(KERN_ALERT, dev, fmt, ##__VA_ARGS__) | |
4554 | #define netdev_crit_once(dev, fmt, ...) \ | |
4555 | netdev_level_once(KERN_CRIT, dev, fmt, ##__VA_ARGS__) | |
4556 | #define netdev_err_once(dev, fmt, ...) \ | |
4557 | netdev_level_once(KERN_ERR, dev, fmt, ##__VA_ARGS__) | |
4558 | #define netdev_warn_once(dev, fmt, ...) \ | |
4559 | netdev_level_once(KERN_WARNING, dev, fmt, ##__VA_ARGS__) | |
4560 | #define netdev_notice_once(dev, fmt, ...) \ | |
4561 | netdev_level_once(KERN_NOTICE, dev, fmt, ##__VA_ARGS__) | |
4562 | #define netdev_info_once(dev, fmt, ...) \ | |
4563 | netdev_level_once(KERN_INFO, dev, fmt, ##__VA_ARGS__) | |
4564 | ||
8909c9ad VK |
4565 | #define MODULE_ALIAS_NETDEV(device) \ |
4566 | MODULE_ALIAS("netdev-" device) | |
4567 | ||
b558c96f | 4568 | #if defined(CONFIG_DYNAMIC_DEBUG) |
571ba423 JP |
4569 | #define netdev_dbg(__dev, format, args...) \ |
4570 | do { \ | |
ffa10cb4 | 4571 | dynamic_netdev_dbg(__dev, format, ##args); \ |
571ba423 | 4572 | } while (0) |
b558c96f JC |
4573 | #elif defined(DEBUG) |
4574 | #define netdev_dbg(__dev, format, args...) \ | |
4575 | netdev_printk(KERN_DEBUG, __dev, format, ##args) | |
571ba423 JP |
4576 | #else |
4577 | #define netdev_dbg(__dev, format, args...) \ | |
4578 | ({ \ | |
4579 | if (0) \ | |
4580 | netdev_printk(KERN_DEBUG, __dev, format, ##args); \ | |
571ba423 JP |
4581 | }) |
4582 | #endif | |
4583 | ||
4584 | #if defined(VERBOSE_DEBUG) | |
4585 | #define netdev_vdbg netdev_dbg | |
4586 | #else | |
4587 | ||
4588 | #define netdev_vdbg(dev, format, args...) \ | |
4589 | ({ \ | |
4590 | if (0) \ | |
4591 | netdev_printk(KERN_DEBUG, dev, format, ##args); \ | |
4592 | 0; \ | |
4593 | }) | |
4594 | #endif | |
4595 | ||
4596 | /* | |
4597 | * netdev_WARN() acts like dev_printk(), but with the key difference | |
4598 | * of using a WARN/WARN_ON to get the message out, including the | |
4599 | * file/line information and a backtrace. | |
4600 | */ | |
4601 | #define netdev_WARN(dev, format, args...) \ | |
e1cfe3d0 | 4602 | WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \ |
ccc7f496 | 4603 | netdev_reg_state(dev), ##args) |
571ba423 | 4604 | |
72dd831e | 4605 | #define netdev_WARN_ONCE(dev, format, args...) \ |
e1cfe3d0 | 4606 | WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \ |
375ef2b1 GP |
4607 | netdev_reg_state(dev), ##args) |
4608 | ||
b3d95c5c JP |
4609 | /* netif printk helpers, similar to netdev_printk */ |
4610 | ||
4611 | #define netif_printk(priv, type, level, dev, fmt, args...) \ | |
4612 | do { \ | |
4613 | if (netif_msg_##type(priv)) \ | |
4614 | netdev_printk(level, (dev), fmt, ##args); \ | |
4615 | } while (0) | |
4616 | ||
f45f4321 JP |
4617 | #define netif_level(level, priv, type, dev, fmt, args...) \ |
4618 | do { \ | |
4619 | if (netif_msg_##type(priv)) \ | |
4620 | netdev_##level(dev, fmt, ##args); \ | |
4621 | } while (0) | |
4622 | ||
b3d95c5c | 4623 | #define netif_emerg(priv, type, dev, fmt, args...) \ |
f45f4321 | 4624 | netif_level(emerg, priv, type, dev, fmt, ##args) |
b3d95c5c | 4625 | #define netif_alert(priv, type, dev, fmt, args...) \ |
f45f4321 | 4626 | netif_level(alert, priv, type, dev, fmt, ##args) |
b3d95c5c | 4627 | #define netif_crit(priv, type, dev, fmt, args...) \ |
f45f4321 | 4628 | netif_level(crit, priv, type, dev, fmt, ##args) |
b3d95c5c | 4629 | #define netif_err(priv, type, dev, fmt, args...) \ |
f45f4321 | 4630 | netif_level(err, priv, type, dev, fmt, ##args) |
b3d95c5c | 4631 | #define netif_warn(priv, type, dev, fmt, args...) \ |
f45f4321 | 4632 | netif_level(warn, priv, type, dev, fmt, ##args) |
b3d95c5c | 4633 | #define netif_notice(priv, type, dev, fmt, args...) \ |
f45f4321 | 4634 | netif_level(notice, priv, type, dev, fmt, ##args) |
b3d95c5c | 4635 | #define netif_info(priv, type, dev, fmt, args...) \ |
f45f4321 | 4636 | netif_level(info, priv, type, dev, fmt, ##args) |
b3d95c5c | 4637 | |
0053ea9c | 4638 | #if defined(CONFIG_DYNAMIC_DEBUG) |
b3d95c5c JP |
4639 | #define netif_dbg(priv, type, netdev, format, args...) \ |
4640 | do { \ | |
4641 | if (netif_msg_##type(priv)) \ | |
b5fb0a03 | 4642 | dynamic_netdev_dbg(netdev, format, ##args); \ |
b3d95c5c | 4643 | } while (0) |
0053ea9c JP |
4644 | #elif defined(DEBUG) |
4645 | #define netif_dbg(priv, type, dev, format, args...) \ | |
4646 | netif_printk(priv, type, KERN_DEBUG, dev, format, ##args) | |
b3d95c5c JP |
4647 | #else |
4648 | #define netif_dbg(priv, type, dev, format, args...) \ | |
4649 | ({ \ | |
4650 | if (0) \ | |
4651 | netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ | |
4652 | 0; \ | |
4653 | }) | |
4654 | #endif | |
4655 | ||
f617f276 EC |
4656 | /* if @cond then downgrade to debug, else print at @level */ |
4657 | #define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...) \ | |
4658 | do { \ | |
4659 | if (cond) \ | |
4660 | netif_dbg(priv, type, netdev, fmt, ##args); \ | |
4661 | else \ | |
4662 | netif_ ## level(priv, type, netdev, fmt, ##args); \ | |
4663 | } while (0) | |
4664 | ||
b3d95c5c | 4665 | #if defined(VERBOSE_DEBUG) |
bcfcc450 | 4666 | #define netif_vdbg netif_dbg |
b3d95c5c JP |
4667 | #else |
4668 | #define netif_vdbg(priv, type, dev, format, args...) \ | |
4669 | ({ \ | |
4670 | if (0) \ | |
a4ed89cb | 4671 | netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ |
b3d95c5c JP |
4672 | 0; \ |
4673 | }) | |
4674 | #endif | |
571ba423 | 4675 | |
900ff8c6 CW |
4676 | /* |
4677 | * The list of packet types we will receive (as opposed to discard) | |
4678 | * and the routines to invoke. | |
4679 | * | |
4680 | * Why 16. Because with 16 the only overlap we get on a hash of the | |
4681 | * low nibble of the protocol value is RARP/SNAP/X.25. | |
4682 | * | |
900ff8c6 | 4683 | * 0800 IP |
900ff8c6 CW |
4684 | * 0001 802.3 |
4685 | * 0002 AX.25 | |
4686 | * 0004 802.2 | |
4687 | * 8035 RARP | |
4688 | * 0005 SNAP | |
4689 | * 0805 X.25 | |
4690 | * 0806 ARP | |
4691 | * 8137 IPX | |
4692 | * 0009 Localtalk | |
4693 | * 86DD IPv6 | |
4694 | */ | |
4695 | #define PTYPE_HASH_SIZE (16) | |
4696 | #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1) | |
4697 | ||
385a154c | 4698 | #endif /* _LINUX_NETDEVICE_H */ |