net-qdisc-hhf: Heavy-Hitter Filter (HHF) qdisc
[linux-2.6-block.git] / include / linux / skbuff.h
CommitLineData
1da177e4
LT
1/*
2 * Definitions for the 'struct sk_buff' memory handlers.
3 *
4 * Authors:
5 * Alan Cox, <gw4pts@gw4pts.ampr.org>
6 * Florian La Roche, <rzsfl@rz.uni-sb.de>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#ifndef _LINUX_SKBUFF_H
15#define _LINUX_SKBUFF_H
16
1da177e4 17#include <linux/kernel.h>
fe55f6d5 18#include <linux/kmemcheck.h>
1da177e4
LT
19#include <linux/compiler.h>
20#include <linux/time.h>
187f1882 21#include <linux/bug.h>
1da177e4
LT
22#include <linux/cache.h>
23
60063497 24#include <linux/atomic.h>
1da177e4
LT
25#include <asm/types.h>
26#include <linux/spinlock.h>
1da177e4 27#include <linux/net.h>
3fc7e8a6 28#include <linux/textsearch.h>
1da177e4 29#include <net/checksum.h>
a80958f4 30#include <linux/rcupdate.h>
97fc2f08 31#include <linux/dmaengine.h>
b7aa0bf7 32#include <linux/hrtimer.h>
131ea667 33#include <linux/dma-mapping.h>
c8f44aff 34#include <linux/netdev_features.h>
5203cd28 35#include <net/flow_keys.h>
1da177e4 36
60476372 37/* Don't change this without changing skb_csum_unnecessary! */
1da177e4 38#define CHECKSUM_NONE 0
60476372
HX
39#define CHECKSUM_UNNECESSARY 1
40#define CHECKSUM_COMPLETE 2
41#define CHECKSUM_PARTIAL 3
1da177e4
LT
42
43#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \
44 ~(SMP_CACHE_BYTES - 1))
fc910a27 45#define SKB_WITH_OVERHEAD(X) \
deea84b0 46 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
fc910a27
DM
47#define SKB_MAX_ORDER(X, ORDER) \
48 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
1da177e4
LT
49#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
50#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
51
87fb4b7b
ED
52/* return minimum truesize of one skb containing X bytes of data */
53#define SKB_TRUESIZE(X) ((X) + \
54 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
55 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
56
1da177e4
LT
57/* A. Checksumming of received packets by device.
58 *
59 * NONE: device failed to checksum this packet.
60 * skb->csum is undefined.
61 *
62 * UNNECESSARY: device parsed packet and wouldbe verified checksum.
63 * skb->csum is undefined.
64 * It is bad option, but, unfortunately, many of vendors do this.
65 * Apparently with secret goal to sell you new device, when you
66 * will add new protocol to your host. F.e. IPv6. 8)
67 *
84fa7933 68 * COMPLETE: the most generic way. Device supplied checksum of _all_
1da177e4
LT
69 * the packet as seen by netif_rx in skb->csum.
70 * NOTE: Even if device supports only some protocols, but
84fa7933 71 * is able to produce some skb->csum, it MUST use COMPLETE,
1da177e4
LT
72 * not UNNECESSARY.
73 *
c6c6e3e0
HX
74 * PARTIAL: identical to the case for output below. This may occur
75 * on a packet received directly from another Linux OS, e.g.,
76 * a virtualised Linux kernel on the same host. The packet can
77 * be treated in the same way as UNNECESSARY except that on
78 * output (i.e., forwarding) the checksum must be filled in
79 * by the OS or the hardware.
80 *
1da177e4
LT
81 * B. Checksumming on output.
82 *
83 * NONE: skb is checksummed by protocol or csum is not required.
84 *
84fa7933 85 * PARTIAL: device is required to csum packet as seen by hard_start_xmit
c6c6e3e0
HX
86 * from skb->csum_start to the end and to record the checksum
87 * at skb->csum_start + skb->csum_offset.
1da177e4
LT
88 *
89 * Device must show its capabilities in dev->features, set
90 * at device setup time.
91 * NETIF_F_HW_CSUM - it is clever device, it is able to checksum
92 * everything.
1da177e4
LT
93 * NETIF_F_IP_CSUM - device is dumb. It is able to csum only
94 * TCP/UDP over IPv4. Sigh. Vendors like this
95 * way by an unknown reason. Though, see comment above
96 * about CHECKSUM_UNNECESSARY. 8)
c6c6e3e0 97 * NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead.
1da177e4 98 *
3af79302
YZ
99 * UNNECESSARY: device will do per protocol specific csum. Protocol drivers
100 * that do not want net to perform the checksum calculation should use
101 * this flag in their outgoing skbs.
102 * NETIF_F_FCOE_CRC this indicates the device can do FCoE FC CRC
103 * offload. Correspondingly, the FCoE protocol driver
104 * stack should use CHECKSUM_UNNECESSARY.
105 *
1da177e4
LT
106 * Any questions? No questions, good. --ANK
107 */
108
1da177e4 109struct net_device;
716ea3a7 110struct scatterlist;
9c55e01c 111struct pipe_inode_info;
1da177e4 112
5f79e0f9 113#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1da177e4
LT
114struct nf_conntrack {
115 atomic_t use;
1da177e4 116};
5f79e0f9 117#endif
1da177e4
LT
118
119#ifdef CONFIG_BRIDGE_NETFILTER
120struct nf_bridge_info {
bf1ac5ca
ED
121 atomic_t use;
122 unsigned int mask;
123 struct net_device *physindev;
124 struct net_device *physoutdev;
125 unsigned long data[32 / sizeof(unsigned long)];
1da177e4
LT
126};
127#endif
128
1da177e4
LT
129struct sk_buff_head {
130 /* These two members must be first. */
131 struct sk_buff *next;
132 struct sk_buff *prev;
133
134 __u32 qlen;
135 spinlock_t lock;
136};
137
138struct sk_buff;
139
9d4dde52
IC
140/* To allow 64K frame to be packed as single skb without frag_list we
141 * require 64K/PAGE_SIZE pages plus 1 additional page to allow for
142 * buffers which do not start on a page boundary.
143 *
144 * Since GRO uses frags we allocate at least 16 regardless of page
145 * size.
a715dea3 146 */
9d4dde52 147#if (65536/PAGE_SIZE + 1) < 16
eec00954 148#define MAX_SKB_FRAGS 16UL
a715dea3 149#else
9d4dde52 150#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
a715dea3 151#endif
1da177e4
LT
152
153typedef struct skb_frag_struct skb_frag_t;
154
155struct skb_frag_struct {
a8605c60
IC
156 struct {
157 struct page *p;
158 } page;
cb4dfe56 159#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
a309bb07
DM
160 __u32 page_offset;
161 __u32 size;
cb4dfe56
ED
162#else
163 __u16 page_offset;
164 __u16 size;
165#endif
1da177e4
LT
166};
167
9e903e08
ED
168static inline unsigned int skb_frag_size(const skb_frag_t *frag)
169{
170 return frag->size;
171}
172
173static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
174{
175 frag->size = size;
176}
177
178static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
179{
180 frag->size += delta;
181}
182
183static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
184{
185 frag->size -= delta;
186}
187
ac45f602
PO
188#define HAVE_HW_TIME_STAMP
189
190/**
d3a21be8 191 * struct skb_shared_hwtstamps - hardware time stamps
ac45f602
PO
192 * @hwtstamp: hardware time stamp transformed into duration
193 * since arbitrary point in time
194 * @syststamp: hwtstamp transformed to system time base
195 *
196 * Software time stamps generated by ktime_get_real() are stored in
197 * skb->tstamp. The relation between the different kinds of time
198 * stamps is as follows:
199 *
200 * syststamp and tstamp can be compared against each other in
201 * arbitrary combinations. The accuracy of a
202 * syststamp/tstamp/"syststamp from other device" comparison is
203 * limited by the accuracy of the transformation into system time
204 * base. This depends on the device driver and its underlying
205 * hardware.
206 *
207 * hwtstamps can only be compared against other hwtstamps from
208 * the same device.
209 *
210 * This structure is attached to packets as part of the
211 * &skb_shared_info. Use skb_hwtstamps() to get a pointer.
212 */
213struct skb_shared_hwtstamps {
214 ktime_t hwtstamp;
215 ktime_t syststamp;
216};
217
2244d07b
OH
218/* Definitions for tx_flags in struct skb_shared_info */
219enum {
220 /* generate hardware time stamp */
221 SKBTX_HW_TSTAMP = 1 << 0,
222
223 /* generate software time stamp */
224 SKBTX_SW_TSTAMP = 1 << 1,
225
226 /* device driver is going to provide hardware time stamp */
227 SKBTX_IN_PROGRESS = 1 << 2,
228
a6686f2f 229 /* device driver supports TX zero-copy buffers */
62b1a8ab 230 SKBTX_DEV_ZEROCOPY = 1 << 3,
6e3e939f
JB
231
232 /* generate wifi status information (where possible) */
62b1a8ab 233 SKBTX_WIFI_STATUS = 1 << 4,
c9af6db4
PS
234
235 /* This indicates at least one fragment might be overwritten
236 * (as in vmsplice(), sendfile() ...)
237 * If we need to compute a TX checksum, we'll need to copy
238 * all frags to avoid possible bad checksum
239 */
240 SKBTX_SHARED_FRAG = 1 << 5,
a6686f2f
SM
241};
242
243/*
244 * The callback notifies userspace to release buffers when skb DMA is done in
245 * lower device, the skb last reference should be 0 when calling this.
e19d6763
MT
246 * The zerocopy_success argument is true if zero copy transmit occurred,
247 * false on data copy or out of memory error caused by data copy attempt.
ca8f4fb2
MT
248 * The ctx field is used to track device context.
249 * The desc field is used to track userspace buffer index.
a6686f2f
SM
250 */
251struct ubuf_info {
e19d6763 252 void (*callback)(struct ubuf_info *, bool zerocopy_success);
ca8f4fb2 253 void *ctx;
a6686f2f 254 unsigned long desc;
ac45f602
PO
255};
256
1da177e4
LT
257/* This data is invariant across clones and lives at
258 * the end of the header data, ie. at skb->end.
259 */
260struct skb_shared_info {
9f42f126
IC
261 unsigned char nr_frags;
262 __u8 tx_flags;
7967168c
HX
263 unsigned short gso_size;
264 /* Warning: this field is not always filled in (UFO)! */
265 unsigned short gso_segs;
266 unsigned short gso_type;
1da177e4 267 struct sk_buff *frag_list;
ac45f602 268 struct skb_shared_hwtstamps hwtstamps;
9f42f126 269 __be32 ip6_frag_id;
ec7d2f2c
ED
270
271 /*
272 * Warning : all fields before dataref are cleared in __alloc_skb()
273 */
274 atomic_t dataref;
275
69e3c75f
JB
276 /* Intermediate layers must ensure that destructor_arg
277 * remains valid until skb destructor */
278 void * destructor_arg;
a6686f2f 279
fed66381
ED
280 /* must be last field, see pskb_expand_head() */
281 skb_frag_t frags[MAX_SKB_FRAGS];
1da177e4
LT
282};
283
284/* We divide dataref into two halves. The higher 16 bits hold references
285 * to the payload part of skb->data. The lower 16 bits hold references to
334a8132
PM
286 * the entire skb->data. A clone of a headerless skb holds the length of
287 * the header in skb->hdr_len.
1da177e4
LT
288 *
289 * All users must obey the rule that the skb->data reference count must be
290 * greater than or equal to the payload reference count.
291 *
292 * Holding a reference to the payload part means that the user does not
293 * care about modifications to the header part of skb->data.
294 */
295#define SKB_DATAREF_SHIFT 16
296#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
297
d179cd12
DM
298
299enum {
300 SKB_FCLONE_UNAVAILABLE,
301 SKB_FCLONE_ORIG,
302 SKB_FCLONE_CLONE,
303};
304
7967168c
HX
305enum {
306 SKB_GSO_TCPV4 = 1 << 0,
f83ef8c0 307 SKB_GSO_UDP = 1 << 1,
576a30eb
HX
308
309 /* This indicates the skb is from an untrusted source. */
310 SKB_GSO_DODGY = 1 << 2,
b0da8537
MC
311
312 /* This indicates the tcp segment has CWR set. */
f83ef8c0
HX
313 SKB_GSO_TCP_ECN = 1 << 3,
314
315 SKB_GSO_TCPV6 = 1 << 4,
01d5b2fc
CL
316
317 SKB_GSO_FCOE = 1 << 5,
68c33163
PS
318
319 SKB_GSO_GRE = 1 << 6,
73136267 320
cb32f511 321 SKB_GSO_IPIP = 1 << 7,
0d89d203 322
61c1db7f 323 SKB_GSO_SIT = 1 << 8,
cb32f511 324
61c1db7f
ED
325 SKB_GSO_UDP_TUNNEL = 1 << 9,
326
327 SKB_GSO_MPLS = 1 << 10,
7967168c
HX
328};
329
2e07fa9c
ACM
330#if BITS_PER_LONG > 32
331#define NET_SKBUFF_DATA_USES_OFFSET 1
332#endif
333
334#ifdef NET_SKBUFF_DATA_USES_OFFSET
335typedef unsigned int sk_buff_data_t;
336#else
337typedef unsigned char *sk_buff_data_t;
338#endif
339
1da177e4
LT
340/**
341 * struct sk_buff - socket buffer
342 * @next: Next buffer in list
343 * @prev: Previous buffer in list
325ed823 344 * @tstamp: Time we arrived
d84e0bd7 345 * @sk: Socket we are owned by
1da177e4 346 * @dev: Device we arrived on/are leaving by
d84e0bd7 347 * @cb: Control buffer. Free for use by every layer. Put private vars here
7fee226a 348 * @_skb_refdst: destination entry (with norefcount bit)
67be2dd1 349 * @sp: the security path, used for xfrm
1da177e4
LT
350 * @len: Length of actual data
351 * @data_len: Data length
352 * @mac_len: Length of link layer header
334a8132 353 * @hdr_len: writable header length of cloned skb
663ead3b
HX
354 * @csum: Checksum (must include start/offset pair)
355 * @csum_start: Offset from skb->head where checksumming should start
356 * @csum_offset: Offset from csum_start where checksum should be stored
d84e0bd7 357 * @priority: Packet queueing priority
67be2dd1 358 * @local_df: allow local fragmentation
1da177e4 359 * @cloned: Head may be cloned (check refcnt to be sure)
d84e0bd7 360 * @ip_summed: Driver fed us an IP checksum
1da177e4 361 * @nohdr: Payload reference only, must not modify header
d84e0bd7 362 * @nfctinfo: Relationship of this skb to the connection
1da177e4 363 * @pkt_type: Packet class
c83c2486 364 * @fclone: skbuff clone status
c83c2486 365 * @ipvs_property: skbuff is owned by ipvs
31729363
RD
366 * @peeked: this packet has been seen already, so stats have been
367 * done for it, don't do them again
ba9dda3a 368 * @nf_trace: netfilter packet trace flag
d84e0bd7
DB
369 * @protocol: Packet protocol from driver
370 * @destructor: Destruct function
371 * @nfct: Associated connection, if any
1da177e4 372 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
8964be4a 373 * @skb_iif: ifindex of device we arrived on
1da177e4
LT
374 * @tc_index: Traffic control index
375 * @tc_verd: traffic control verdict
d84e0bd7
DB
376 * @rxhash: the packet hash computed on receive
377 * @queue_mapping: Queue mapping for multiqueue devices
553a5672 378 * @ndisc_nodetype: router type (from link layer)
d84e0bd7 379 * @ooo_okay: allow the mapping of a socket to a queue to be changed
4ca2462e
CG
380 * @l4_rxhash: indicate rxhash is a canonical 4-tuple hash over transport
381 * ports.
6e3e939f
JB
382 * @wifi_acked_valid: wifi_acked was set
383 * @wifi_acked: whether frame was acked on wifi or not
3bdc0eba 384 * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
f4b8ea78
RD
385 * @dma_cookie: a cookie to one of several possible DMA operations
386 * done by skb DMA functions
06021292 387 * @napi_id: id of the NAPI struct this skb came from
984bc16c 388 * @secmark: security marking
d84e0bd7
DB
389 * @mark: Generic packet mark
390 * @dropcount: total number of sk_receive_queue overflows
86a9bad3 391 * @vlan_proto: vlan encapsulation protocol
6aa895b0 392 * @vlan_tci: vlan tag control information
0d89d203 393 * @inner_protocol: Protocol (encapsulation)
6a674e9c
JG
394 * @inner_transport_header: Inner transport layer header (encapsulation)
395 * @inner_network_header: Network layer header (encapsulation)
aefbd2b3 396 * @inner_mac_header: Link layer header (encapsulation)
d84e0bd7
DB
397 * @transport_header: Transport layer header
398 * @network_header: Network layer header
399 * @mac_header: Link layer header
400 * @tail: Tail pointer
401 * @end: End pointer
402 * @head: Head of buffer
403 * @data: Data head pointer
404 * @truesize: Buffer size
405 * @users: User count - see {datagram,tcp}.c
1da177e4
LT
406 */
407
408struct sk_buff {
409 /* These two members must be first. */
410 struct sk_buff *next;
411 struct sk_buff *prev;
412
b7aa0bf7 413 ktime_t tstamp;
da3f5cf1
FF
414
415 struct sock *sk;
1da177e4 416 struct net_device *dev;
1da177e4 417
1da177e4
LT
418 /*
419 * This is the control buffer. It is free to use for every
420 * layer. Please put your private variables there. If you
421 * want to keep them across layers you have to do a skb_clone()
422 * first. This is owned by whoever has the skb queued ATM.
423 */
da3f5cf1 424 char cb[48] __aligned(8);
1da177e4 425
7fee226a 426 unsigned long _skb_refdst;
da3f5cf1
FF
427#ifdef CONFIG_XFRM
428 struct sec_path *sp;
429#endif
1da177e4 430 unsigned int len,
334a8132
PM
431 data_len;
432 __u16 mac_len,
433 hdr_len;
ff1dcadb
AV
434 union {
435 __wsum csum;
663ead3b
HX
436 struct {
437 __u16 csum_start;
438 __u16 csum_offset;
439 };
ff1dcadb 440 };
1da177e4 441 __u32 priority;
fe55f6d5 442 kmemcheck_bitfield_begin(flags1);
1cbb3380
TG
443 __u8 local_df:1,
444 cloned:1,
445 ip_summed:2,
6869c4d8
HW
446 nohdr:1,
447 nfctinfo:3;
d179cd12 448 __u8 pkt_type:3,
b84f4cc9 449 fclone:2,
ba9dda3a 450 ipvs_property:1,
a59322be 451 peeked:1,
ba9dda3a 452 nf_trace:1;
fe55f6d5 453 kmemcheck_bitfield_end(flags1);
4ab408de 454 __be16 protocol;
1da177e4
LT
455
456 void (*destructor)(struct sk_buff *skb);
9fb9cbb1 457#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
5f79e0f9 458 struct nf_conntrack *nfct;
2fc72c7b 459#endif
1da177e4
LT
460#ifdef CONFIG_BRIDGE_NETFILTER
461 struct nf_bridge_info *nf_bridge;
462#endif
f25f4e44 463
8964be4a 464 int skb_iif;
4031ae6e
AD
465
466 __u32 rxhash;
467
86a9bad3 468 __be16 vlan_proto;
4031ae6e
AD
469 __u16 vlan_tci;
470
1da177e4 471#ifdef CONFIG_NET_SCHED
b6b99eb5 472 __u16 tc_index; /* traffic control index */
1da177e4 473#ifdef CONFIG_NET_CLS_ACT
b6b99eb5 474 __u16 tc_verd; /* traffic control verdict */
1da177e4 475#endif
1da177e4 476#endif
fe55f6d5 477
0a14842f 478 __u16 queue_mapping;
fe55f6d5 479 kmemcheck_bitfield_begin(flags2);
de357cc0 480#ifdef CONFIG_IPV6_NDISC_NODETYPE
8a4eb573 481 __u8 ndisc_nodetype:2;
d0f09804 482#endif
c93bdd0e 483 __u8 pfmemalloc:1;
3853b584 484 __u8 ooo_okay:1;
bdeab991 485 __u8 l4_rxhash:1;
6e3e939f
JB
486 __u8 wifi_acked_valid:1;
487 __u8 wifi_acked:1;
3bdc0eba 488 __u8 no_fcs:1;
d3836f21 489 __u8 head_frag:1;
6a674e9c
JG
490 /* Encapsulation protocol and NIC drivers should use
491 * this flag to indicate to each other if the skb contains
492 * encapsulated packet or not and maybe use the inner packet
493 * headers if needed
494 */
495 __u8 encapsulation:1;
45906723 496 /* 6/8 bit hole (depending on ndisc_nodetype presence) */
fe55f6d5
VN
497 kmemcheck_bitfield_end(flags2);
498
e0d1095a 499#if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL
06021292
ET
500 union {
501 unsigned int napi_id;
502 dma_cookie_t dma_cookie;
503 };
97fc2f08 504#endif
984bc16c
JM
505#ifdef CONFIG_NETWORK_SECMARK
506 __u32 secmark;
507#endif
3b885787
NH
508 union {
509 __u32 mark;
510 __u32 dropcount;
16fad69c 511 __u32 reserved_tailroom;
3b885787 512 };
1da177e4 513
0d89d203 514 __be16 inner_protocol;
1a37e412
SH
515 __u16 inner_transport_header;
516 __u16 inner_network_header;
517 __u16 inner_mac_header;
518 __u16 transport_header;
519 __u16 network_header;
520 __u16 mac_header;
1da177e4 521 /* These elements must be at the end, see alloc_skb() for details. */
27a884dc 522 sk_buff_data_t tail;
4305b541 523 sk_buff_data_t end;
1da177e4 524 unsigned char *head,
4305b541 525 *data;
27a884dc
ACM
526 unsigned int truesize;
527 atomic_t users;
1da177e4
LT
528};
529
530#ifdef __KERNEL__
531/*
532 * Handling routines are only of interest to the kernel
533 */
534#include <linux/slab.h>
535
1da177e4 536
c93bdd0e
MG
537#define SKB_ALLOC_FCLONE 0x01
538#define SKB_ALLOC_RX 0x02
539
540/* Returns true if the skb was allocated from PFMEMALLOC reserves */
541static inline bool skb_pfmemalloc(const struct sk_buff *skb)
542{
543 return unlikely(skb->pfmemalloc);
544}
545
7fee226a
ED
546/*
547 * skb might have a dst pointer attached, refcounted or not.
548 * _skb_refdst low order bit is set if refcount was _not_ taken
549 */
550#define SKB_DST_NOREF 1UL
551#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
552
553/**
554 * skb_dst - returns skb dst_entry
555 * @skb: buffer
556 *
557 * Returns skb dst_entry, regardless of reference taken or not.
558 */
adf30907
ED
559static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
560{
7fee226a
ED
561 /* If refdst was not refcounted, check we still are in a
562 * rcu_read_lock section
563 */
564 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
565 !rcu_read_lock_held() &&
566 !rcu_read_lock_bh_held());
567 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
adf30907
ED
568}
569
7fee226a
ED
570/**
571 * skb_dst_set - sets skb dst
572 * @skb: buffer
573 * @dst: dst entry
574 *
575 * Sets skb dst, assuming a reference was taken on dst and should
576 * be released by skb_dst_drop()
577 */
adf30907
ED
578static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
579{
7fee226a
ED
580 skb->_skb_refdst = (unsigned long)dst;
581}
582
7965bd4d
JP
583void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst,
584 bool force);
932bc4d7
JA
585
586/**
587 * skb_dst_set_noref - sets skb dst, hopefully, without taking reference
588 * @skb: buffer
589 * @dst: dst entry
590 *
591 * Sets skb dst, assuming a reference was not taken on dst.
592 * If dst entry is cached, we do not take reference and dst_release
593 * will be avoided by refdst_drop. If dst entry is not cached, we take
594 * reference, so that last dst_release can destroy the dst immediately.
595 */
596static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
597{
598 __skb_dst_set_noref(skb, dst, false);
599}
600
601/**
602 * skb_dst_set_noref_force - sets skb dst, without taking reference
603 * @skb: buffer
604 * @dst: dst entry
605 *
606 * Sets skb dst, assuming a reference was not taken on dst.
607 * No reference is taken and no dst_release will be called. While for
608 * cached dsts deferred reclaim is a basic feature, for entries that are
609 * not cached it is caller's job to guarantee that last dst_release for
610 * provided dst happens when nobody uses it, eg. after a RCU grace period.
611 */
612static inline void skb_dst_set_noref_force(struct sk_buff *skb,
613 struct dst_entry *dst)
614{
615 __skb_dst_set_noref(skb, dst, true);
616}
7fee226a
ED
617
618/**
25985edc 619 * skb_dst_is_noref - Test if skb dst isn't refcounted
7fee226a
ED
620 * @skb: buffer
621 */
622static inline bool skb_dst_is_noref(const struct sk_buff *skb)
623{
624 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
adf30907
ED
625}
626
511c3f92
ED
627static inline struct rtable *skb_rtable(const struct sk_buff *skb)
628{
adf30907 629 return (struct rtable *)skb_dst(skb);
511c3f92
ED
630}
631
7965bd4d
JP
632void kfree_skb(struct sk_buff *skb);
633void kfree_skb_list(struct sk_buff *segs);
634void skb_tx_error(struct sk_buff *skb);
635void consume_skb(struct sk_buff *skb);
636void __kfree_skb(struct sk_buff *skb);
d7e8883c 637extern struct kmem_cache *skbuff_head_cache;
bad43ca8 638
7965bd4d
JP
639void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
640bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
641 bool *fragstolen, int *delta_truesize);
bad43ca8 642
7965bd4d
JP
643struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
644 int node);
645struct sk_buff *build_skb(void *data, unsigned int frag_size);
d179cd12 646static inline struct sk_buff *alloc_skb(unsigned int size,
dd0fc66f 647 gfp_t priority)
d179cd12 648{
564824b0 649 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
d179cd12
DM
650}
651
652static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
dd0fc66f 653 gfp_t priority)
d179cd12 654{
c93bdd0e 655 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
d179cd12
DM
656}
657
7965bd4d 658struct sk_buff *__alloc_skb_head(gfp_t priority, int node);
0ebd0ac5
PM
659static inline struct sk_buff *alloc_skb_head(gfp_t priority)
660{
661 return __alloc_skb_head(priority, -1);
662}
663
7965bd4d
JP
664struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
665int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
666struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
667struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
668struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask);
669
670int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
671struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
672 unsigned int headroom);
673struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
674 int newtailroom, gfp_t priority);
675int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset,
676 int len);
677int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
678int skb_pad(struct sk_buff *skb, int pad);
ead2ceb0 679#define dev_kfree_skb(a) consume_skb(a)
1da177e4 680
7965bd4d
JP
681int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
682 int getfrag(void *from, char *to, int offset,
683 int len, int odd, struct sk_buff *skb),
684 void *from, int length);
e89e9cf5 685
d94d9fee 686struct skb_seq_state {
677e90ed
TG
687 __u32 lower_offset;
688 __u32 upper_offset;
689 __u32 frag_idx;
690 __u32 stepped_offset;
691 struct sk_buff *root_skb;
692 struct sk_buff *cur_skb;
693 __u8 *frag_data;
694};
695
7965bd4d
JP
696void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
697 unsigned int to, struct skb_seq_state *st);
698unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
699 struct skb_seq_state *st);
700void skb_abort_seq_read(struct skb_seq_state *st);
677e90ed 701
7965bd4d
JP
702unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
703 unsigned int to, struct ts_config *config,
704 struct ts_state *state);
3fc7e8a6 705
09323cc4
TH
706/*
707 * Packet hash types specify the type of hash in skb_set_hash.
708 *
709 * Hash types refer to the protocol layer addresses which are used to
710 * construct a packet's hash. The hashes are used to differentiate or identify
711 * flows of the protocol layer for the hash type. Hash types are either
712 * layer-2 (L2), layer-3 (L3), or layer-4 (L4).
713 *
714 * Properties of hashes:
715 *
716 * 1) Two packets in different flows have different hash values
717 * 2) Two packets in the same flow should have the same hash value
718 *
719 * A hash at a higher layer is considered to be more specific. A driver should
720 * set the most specific hash possible.
721 *
722 * A driver cannot indicate a more specific hash than the layer at which a hash
723 * was computed. For instance an L3 hash cannot be set as an L4 hash.
724 *
725 * A driver may indicate a hash level which is less specific than the
726 * actual layer the hash was computed on. For instance, a hash computed
727 * at L4 may be considered an L3 hash. This should only be done if the
728 * driver can't unambiguously determine that the HW computed the hash at
729 * the higher layer. Note that the "should" in the second property above
730 * permits this.
731 */
732enum pkt_hash_types {
733 PKT_HASH_TYPE_NONE, /* Undefined type */
734 PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */
735 PKT_HASH_TYPE_L3, /* Input: src_IP, dst_IP */
736 PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */
737};
738
739static inline void
740skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
741{
742 skb->l4_rxhash = (type == PKT_HASH_TYPE_L4);
743 skb->rxhash = hash;
744}
745
3958afa1
TH
746void __skb_get_hash(struct sk_buff *skb);
747static inline __u32 skb_get_hash(struct sk_buff *skb)
bfb564e7 748{
ecd5cf5d 749 if (!skb->l4_rxhash)
3958afa1 750 __skb_get_hash(skb);
bfb564e7
KK
751
752 return skb->rxhash;
753}
754
7539fadc
TH
755static inline void skb_clear_hash(struct sk_buff *skb)
756{
757 skb->rxhash = 0;
758 skb->l4_rxhash = 0;
759}
760
761static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
762{
763 if (!skb->l4_rxhash)
764 skb_clear_hash(skb);
765}
766
3df7a74e
TH
767static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
768{
769 to->rxhash = from->rxhash;
770 to->l4_rxhash = from->l4_rxhash;
771};
772
4305b541
ACM
773#ifdef NET_SKBUFF_DATA_USES_OFFSET
774static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
775{
776 return skb->head + skb->end;
777}
ec47ea82
AD
778
779static inline unsigned int skb_end_offset(const struct sk_buff *skb)
780{
781 return skb->end;
782}
4305b541
ACM
783#else
784static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
785{
786 return skb->end;
787}
ec47ea82
AD
788
789static inline unsigned int skb_end_offset(const struct sk_buff *skb)
790{
791 return skb->end - skb->head;
792}
4305b541
ACM
793#endif
794
1da177e4 795/* Internal */
4305b541 796#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
1da177e4 797
ac45f602
PO
798static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
799{
800 return &skb_shinfo(skb)->hwtstamps;
801}
802
1da177e4
LT
803/**
804 * skb_queue_empty - check if a queue is empty
805 * @list: queue head
806 *
807 * Returns true if the queue is empty, false otherwise.
808 */
809static inline int skb_queue_empty(const struct sk_buff_head *list)
810{
811 return list->next == (struct sk_buff *)list;
812}
813
fc7ebb21
DM
814/**
815 * skb_queue_is_last - check if skb is the last entry in the queue
816 * @list: queue head
817 * @skb: buffer
818 *
819 * Returns true if @skb is the last buffer on the list.
820 */
821static inline bool skb_queue_is_last(const struct sk_buff_head *list,
822 const struct sk_buff *skb)
823{
a02cec21 824 return skb->next == (struct sk_buff *)list;
fc7ebb21
DM
825}
826
832d11c5
IJ
827/**
828 * skb_queue_is_first - check if skb is the first entry in the queue
829 * @list: queue head
830 * @skb: buffer
831 *
832 * Returns true if @skb is the first buffer on the list.
833 */
834static inline bool skb_queue_is_first(const struct sk_buff_head *list,
835 const struct sk_buff *skb)
836{
a02cec21 837 return skb->prev == (struct sk_buff *)list;
832d11c5
IJ
838}
839
249c8b42
DM
840/**
841 * skb_queue_next - return the next packet in the queue
842 * @list: queue head
843 * @skb: current buffer
844 *
845 * Return the next packet in @list after @skb. It is only valid to
846 * call this if skb_queue_is_last() evaluates to false.
847 */
848static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
849 const struct sk_buff *skb)
850{
851 /* This BUG_ON may seem severe, but if we just return then we
852 * are going to dereference garbage.
853 */
854 BUG_ON(skb_queue_is_last(list, skb));
855 return skb->next;
856}
857
832d11c5
IJ
858/**
859 * skb_queue_prev - return the prev packet in the queue
860 * @list: queue head
861 * @skb: current buffer
862 *
863 * Return the prev packet in @list before @skb. It is only valid to
864 * call this if skb_queue_is_first() evaluates to false.
865 */
866static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
867 const struct sk_buff *skb)
868{
869 /* This BUG_ON may seem severe, but if we just return then we
870 * are going to dereference garbage.
871 */
872 BUG_ON(skb_queue_is_first(list, skb));
873 return skb->prev;
874}
875
1da177e4
LT
876/**
877 * skb_get - reference buffer
878 * @skb: buffer to reference
879 *
880 * Makes another reference to a socket buffer and returns a pointer
881 * to the buffer.
882 */
883static inline struct sk_buff *skb_get(struct sk_buff *skb)
884{
885 atomic_inc(&skb->users);
886 return skb;
887}
888
889/*
890 * If users == 1, we are the only owner and are can avoid redundant
891 * atomic change.
892 */
893
1da177e4
LT
894/**
895 * skb_cloned - is the buffer a clone
896 * @skb: buffer to check
897 *
898 * Returns true if the buffer was generated with skb_clone() and is
899 * one of multiple shared copies of the buffer. Cloned buffers are
900 * shared data so must not be written to under normal circumstances.
901 */
902static inline int skb_cloned(const struct sk_buff *skb)
903{
904 return skb->cloned &&
905 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
906}
907
14bbd6a5
PS
908static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
909{
910 might_sleep_if(pri & __GFP_WAIT);
911
912 if (skb_cloned(skb))
913 return pskb_expand_head(skb, 0, 0, pri);
914
915 return 0;
916}
917
1da177e4
LT
918/**
919 * skb_header_cloned - is the header a clone
920 * @skb: buffer to check
921 *
922 * Returns true if modifying the header part of the buffer requires
923 * the data to be copied.
924 */
925static inline int skb_header_cloned(const struct sk_buff *skb)
926{
927 int dataref;
928
929 if (!skb->cloned)
930 return 0;
931
932 dataref = atomic_read(&skb_shinfo(skb)->dataref);
933 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
934 return dataref != 1;
935}
936
937/**
938 * skb_header_release - release reference to header
939 * @skb: buffer to operate on
940 *
941 * Drop a reference to the header part of the buffer. This is done
942 * by acquiring a payload reference. You must not read from the header
943 * part of skb->data after this.
944 */
945static inline void skb_header_release(struct sk_buff *skb)
946{
947 BUG_ON(skb->nohdr);
948 skb->nohdr = 1;
949 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
950}
951
952/**
953 * skb_shared - is the buffer shared
954 * @skb: buffer to check
955 *
956 * Returns true if more than one person has a reference to this
957 * buffer.
958 */
959static inline int skb_shared(const struct sk_buff *skb)
960{
961 return atomic_read(&skb->users) != 1;
962}
963
964/**
965 * skb_share_check - check if buffer is shared and if so clone it
966 * @skb: buffer to check
967 * @pri: priority for memory allocation
968 *
969 * If the buffer is shared the buffer is cloned and the old copy
970 * drops a reference. A new clone with a single reference is returned.
971 * If the buffer is not shared the original buffer is returned. When
972 * being called from interrupt status or with spinlocks held pri must
973 * be GFP_ATOMIC.
974 *
975 * NULL is returned on a memory allocation failure.
976 */
47061bc4 977static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
1da177e4
LT
978{
979 might_sleep_if(pri & __GFP_WAIT);
980 if (skb_shared(skb)) {
981 struct sk_buff *nskb = skb_clone(skb, pri);
47061bc4
ED
982
983 if (likely(nskb))
984 consume_skb(skb);
985 else
986 kfree_skb(skb);
1da177e4
LT
987 skb = nskb;
988 }
989 return skb;
990}
991
992/*
993 * Copy shared buffers into a new sk_buff. We effectively do COW on
994 * packets to handle cases where we have a local reader and forward
995 * and a couple of other messy ones. The normal one is tcpdumping
996 * a packet thats being forwarded.
997 */
998
999/**
1000 * skb_unshare - make a copy of a shared buffer
1001 * @skb: buffer to check
1002 * @pri: priority for memory allocation
1003 *
1004 * If the socket buffer is a clone then this function creates a new
1005 * copy of the data, drops a reference count on the old copy and returns
1006 * the new copy with the reference count at 1. If the buffer is not a clone
1007 * the original buffer is returned. When called with a spinlock held or
1008 * from interrupt state @pri must be %GFP_ATOMIC
1009 *
1010 * %NULL is returned on a memory allocation failure.
1011 */
e2bf521d 1012static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
dd0fc66f 1013 gfp_t pri)
1da177e4
LT
1014{
1015 might_sleep_if(pri & __GFP_WAIT);
1016 if (skb_cloned(skb)) {
1017 struct sk_buff *nskb = skb_copy(skb, pri);
1018 kfree_skb(skb); /* Free our shared copy */
1019 skb = nskb;
1020 }
1021 return skb;
1022}
1023
1024/**
1a5778aa 1025 * skb_peek - peek at the head of an &sk_buff_head
1da177e4
LT
1026 * @list_: list to peek at
1027 *
1028 * Peek an &sk_buff. Unlike most other operations you _MUST_
1029 * be careful with this one. A peek leaves the buffer on the
1030 * list and someone else may run off with it. You must hold
1031 * the appropriate locks or have a private queue to do this.
1032 *
1033 * Returns %NULL for an empty list or a pointer to the head element.
1034 * The reference count is not incremented and the reference is therefore
1035 * volatile. Use with caution.
1036 */
05bdd2f1 1037static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
1da177e4 1038{
18d07000
ED
1039 struct sk_buff *skb = list_->next;
1040
1041 if (skb == (struct sk_buff *)list_)
1042 skb = NULL;
1043 return skb;
1da177e4
LT
1044}
1045
da5ef6e5
PE
1046/**
1047 * skb_peek_next - peek skb following the given one from a queue
1048 * @skb: skb to start from
1049 * @list_: list to peek at
1050 *
1051 * Returns %NULL when the end of the list is met or a pointer to the
1052 * next element. The reference count is not incremented and the
1053 * reference is therefore volatile. Use with caution.
1054 */
1055static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
1056 const struct sk_buff_head *list_)
1057{
1058 struct sk_buff *next = skb->next;
18d07000 1059
da5ef6e5
PE
1060 if (next == (struct sk_buff *)list_)
1061 next = NULL;
1062 return next;
1063}
1064
1da177e4 1065/**
1a5778aa 1066 * skb_peek_tail - peek at the tail of an &sk_buff_head
1da177e4
LT
1067 * @list_: list to peek at
1068 *
1069 * Peek an &sk_buff. Unlike most other operations you _MUST_
1070 * be careful with this one. A peek leaves the buffer on the
1071 * list and someone else may run off with it. You must hold
1072 * the appropriate locks or have a private queue to do this.
1073 *
1074 * Returns %NULL for an empty list or a pointer to the tail element.
1075 * The reference count is not incremented and the reference is therefore
1076 * volatile. Use with caution.
1077 */
05bdd2f1 1078static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
1da177e4 1079{
18d07000
ED
1080 struct sk_buff *skb = list_->prev;
1081
1082 if (skb == (struct sk_buff *)list_)
1083 skb = NULL;
1084 return skb;
1085
1da177e4
LT
1086}
1087
1088/**
1089 * skb_queue_len - get queue length
1090 * @list_: list to measure
1091 *
1092 * Return the length of an &sk_buff queue.
1093 */
1094static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
1095{
1096 return list_->qlen;
1097}
1098
67fed459
DM
1099/**
1100 * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
1101 * @list: queue to initialize
1102 *
1103 * This initializes only the list and queue length aspects of
1104 * an sk_buff_head object. This allows to initialize the list
1105 * aspects of an sk_buff_head without reinitializing things like
1106 * the spinlock. It can also be used for on-stack sk_buff_head
1107 * objects where the spinlock is known to not be used.
1108 */
1109static inline void __skb_queue_head_init(struct sk_buff_head *list)
1110{
1111 list->prev = list->next = (struct sk_buff *)list;
1112 list->qlen = 0;
1113}
1114
76f10ad0
AV
1115/*
1116 * This function creates a split out lock class for each invocation;
1117 * this is needed for now since a whole lot of users of the skb-queue
1118 * infrastructure in drivers have different locking usage (in hardirq)
1119 * than the networking core (in softirq only). In the long run either the
1120 * network layer or drivers should need annotation to consolidate the
1121 * main types of usage into 3 classes.
1122 */
1da177e4
LT
1123static inline void skb_queue_head_init(struct sk_buff_head *list)
1124{
1125 spin_lock_init(&list->lock);
67fed459 1126 __skb_queue_head_init(list);
1da177e4
LT
1127}
1128
c2ecba71
PE
1129static inline void skb_queue_head_init_class(struct sk_buff_head *list,
1130 struct lock_class_key *class)
1131{
1132 skb_queue_head_init(list);
1133 lockdep_set_class(&list->lock, class);
1134}
1135
1da177e4 1136/*
bf299275 1137 * Insert an sk_buff on a list.
1da177e4
LT
1138 *
1139 * The "__skb_xxxx()" functions are the non-atomic ones that
1140 * can only be called with interrupts disabled.
1141 */
7965bd4d
JP
1142void skb_insert(struct sk_buff *old, struct sk_buff *newsk,
1143 struct sk_buff_head *list);
bf299275
GR
1144static inline void __skb_insert(struct sk_buff *newsk,
1145 struct sk_buff *prev, struct sk_buff *next,
1146 struct sk_buff_head *list)
1147{
1148 newsk->next = next;
1149 newsk->prev = prev;
1150 next->prev = prev->next = newsk;
1151 list->qlen++;
1152}
1da177e4 1153
67fed459
DM
1154static inline void __skb_queue_splice(const struct sk_buff_head *list,
1155 struct sk_buff *prev,
1156 struct sk_buff *next)
1157{
1158 struct sk_buff *first = list->next;
1159 struct sk_buff *last = list->prev;
1160
1161 first->prev = prev;
1162 prev->next = first;
1163
1164 last->next = next;
1165 next->prev = last;
1166}
1167
1168/**
1169 * skb_queue_splice - join two skb lists, this is designed for stacks
1170 * @list: the new list to add
1171 * @head: the place to add it in the first list
1172 */
1173static inline void skb_queue_splice(const struct sk_buff_head *list,
1174 struct sk_buff_head *head)
1175{
1176 if (!skb_queue_empty(list)) {
1177 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1d4a31dd 1178 head->qlen += list->qlen;
67fed459
DM
1179 }
1180}
1181
1182/**
d9619496 1183 * skb_queue_splice_init - join two skb lists and reinitialise the emptied list
67fed459
DM
1184 * @list: the new list to add
1185 * @head: the place to add it in the first list
1186 *
1187 * The list at @list is reinitialised
1188 */
1189static inline void skb_queue_splice_init(struct sk_buff_head *list,
1190 struct sk_buff_head *head)
1191{
1192 if (!skb_queue_empty(list)) {
1193 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1d4a31dd 1194 head->qlen += list->qlen;
67fed459
DM
1195 __skb_queue_head_init(list);
1196 }
1197}
1198
1199/**
1200 * skb_queue_splice_tail - join two skb lists, each list being a queue
1201 * @list: the new list to add
1202 * @head: the place to add it in the first list
1203 */
1204static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1205 struct sk_buff_head *head)
1206{
1207 if (!skb_queue_empty(list)) {
1208 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1d4a31dd 1209 head->qlen += list->qlen;
67fed459
DM
1210 }
1211}
1212
1213/**
d9619496 1214 * skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list
67fed459
DM
1215 * @list: the new list to add
1216 * @head: the place to add it in the first list
1217 *
1218 * Each of the lists is a queue.
1219 * The list at @list is reinitialised
1220 */
1221static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
1222 struct sk_buff_head *head)
1223{
1224 if (!skb_queue_empty(list)) {
1225 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1d4a31dd 1226 head->qlen += list->qlen;
67fed459
DM
1227 __skb_queue_head_init(list);
1228 }
1229}
1230
1da177e4 1231/**
300ce174 1232 * __skb_queue_after - queue a buffer at the list head
1da177e4 1233 * @list: list to use
300ce174 1234 * @prev: place after this buffer
1da177e4
LT
1235 * @newsk: buffer to queue
1236 *
300ce174 1237 * Queue a buffer int the middle of a list. This function takes no locks
1da177e4
LT
1238 * and you must therefore hold required locks before calling it.
1239 *
1240 * A buffer cannot be placed on two lists at the same time.
1241 */
300ce174
SH
1242static inline void __skb_queue_after(struct sk_buff_head *list,
1243 struct sk_buff *prev,
1244 struct sk_buff *newsk)
1da177e4 1245{
bf299275 1246 __skb_insert(newsk, prev, prev->next, list);
1da177e4
LT
1247}
1248
7965bd4d
JP
1249void skb_append(struct sk_buff *old, struct sk_buff *newsk,
1250 struct sk_buff_head *list);
7de6c033 1251
f5572855
GR
1252static inline void __skb_queue_before(struct sk_buff_head *list,
1253 struct sk_buff *next,
1254 struct sk_buff *newsk)
1255{
1256 __skb_insert(newsk, next->prev, next, list);
1257}
1258
300ce174
SH
1259/**
1260 * __skb_queue_head - queue a buffer at the list head
1261 * @list: list to use
1262 * @newsk: buffer to queue
1263 *
1264 * Queue a buffer at the start of a list. This function takes no locks
1265 * and you must therefore hold required locks before calling it.
1266 *
1267 * A buffer cannot be placed on two lists at the same time.
1268 */
7965bd4d 1269void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
300ce174
SH
1270static inline void __skb_queue_head(struct sk_buff_head *list,
1271 struct sk_buff *newsk)
1272{
1273 __skb_queue_after(list, (struct sk_buff *)list, newsk);
1274}
1275
1da177e4
LT
1276/**
1277 * __skb_queue_tail - queue a buffer at the list tail
1278 * @list: list to use
1279 * @newsk: buffer to queue
1280 *
1281 * Queue a buffer at the end of a list. This function takes no locks
1282 * and you must therefore hold required locks before calling it.
1283 *
1284 * A buffer cannot be placed on two lists at the same time.
1285 */
7965bd4d 1286void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
1da177e4
LT
1287static inline void __skb_queue_tail(struct sk_buff_head *list,
1288 struct sk_buff *newsk)
1289{
f5572855 1290 __skb_queue_before(list, (struct sk_buff *)list, newsk);
1da177e4
LT
1291}
1292
1da177e4
LT
1293/*
1294 * remove sk_buff from list. _Must_ be called atomically, and with
1295 * the list known..
1296 */
7965bd4d 1297void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
1da177e4
LT
1298static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1299{
1300 struct sk_buff *next, *prev;
1301
1302 list->qlen--;
1303 next = skb->next;
1304 prev = skb->prev;
1305 skb->next = skb->prev = NULL;
1da177e4
LT
1306 next->prev = prev;
1307 prev->next = next;
1308}
1309
f525c06d
GR
1310/**
1311 * __skb_dequeue - remove from the head of the queue
1312 * @list: list to dequeue from
1313 *
1314 * Remove the head of the list. This function does not take any locks
1315 * so must be used with appropriate locks held only. The head item is
1316 * returned or %NULL if the list is empty.
1317 */
7965bd4d 1318struct sk_buff *skb_dequeue(struct sk_buff_head *list);
f525c06d
GR
1319static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
1320{
1321 struct sk_buff *skb = skb_peek(list);
1322 if (skb)
1323 __skb_unlink(skb, list);
1324 return skb;
1325}
1da177e4
LT
1326
1327/**
1328 * __skb_dequeue_tail - remove from the tail of the queue
1329 * @list: list to dequeue from
1330 *
1331 * Remove the tail of the list. This function does not take any locks
1332 * so must be used with appropriate locks held only. The tail item is
1333 * returned or %NULL if the list is empty.
1334 */
7965bd4d 1335struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
1da177e4
LT
1336static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
1337{
1338 struct sk_buff *skb = skb_peek_tail(list);
1339 if (skb)
1340 __skb_unlink(skb, list);
1341 return skb;
1342}
1343
1344
bdcc0924 1345static inline bool skb_is_nonlinear(const struct sk_buff *skb)
1da177e4
LT
1346{
1347 return skb->data_len;
1348}
1349
1350static inline unsigned int skb_headlen(const struct sk_buff *skb)
1351{
1352 return skb->len - skb->data_len;
1353}
1354
1355static inline int skb_pagelen(const struct sk_buff *skb)
1356{
1357 int i, len = 0;
1358
1359 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
9e903e08 1360 len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
1da177e4
LT
1361 return len + skb_headlen(skb);
1362}
1363
131ea667
IC
1364/**
1365 * __skb_fill_page_desc - initialise a paged fragment in an skb
1366 * @skb: buffer containing fragment to be initialised
1367 * @i: paged fragment index to initialise
1368 * @page: the page to use for this fragment
1369 * @off: the offset to the data with @page
1370 * @size: the length of the data
1371 *
1372 * Initialises the @i'th fragment of @skb to point to &size bytes at
1373 * offset @off within @page.
1374 *
1375 * Does not take any additional reference on the fragment.
1376 */
1377static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
1378 struct page *page, int off, int size)
1da177e4
LT
1379{
1380 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1381
c48a11c7
MG
1382 /*
1383 * Propagate page->pfmemalloc to the skb if we can. The problem is
1384 * that not all callers have unique ownership of the page. If
1385 * pfmemalloc is set, we check the mapping as a mapping implies
1386 * page->index is set (index and pfmemalloc share space).
1387 * If it's a valid mapping, we cannot use page->pfmemalloc but we
1388 * do not lose pfmemalloc information as the pages would not be
1389 * allocated using __GFP_MEMALLOC.
1390 */
a8605c60 1391 frag->page.p = page;
1da177e4 1392 frag->page_offset = off;
9e903e08 1393 skb_frag_size_set(frag, size);
cca7af38
PE
1394
1395 page = compound_head(page);
1396 if (page->pfmemalloc && !page->mapping)
1397 skb->pfmemalloc = true;
131ea667
IC
1398}
1399
1400/**
1401 * skb_fill_page_desc - initialise a paged fragment in an skb
1402 * @skb: buffer containing fragment to be initialised
1403 * @i: paged fragment index to initialise
1404 * @page: the page to use for this fragment
1405 * @off: the offset to the data with @page
1406 * @size: the length of the data
1407 *
1408 * As per __skb_fill_page_desc() -- initialises the @i'th fragment of
bc32383c 1409 * @skb to point to @size bytes at offset @off within @page. In
131ea667
IC
1410 * addition updates @skb such that @i is the last fragment.
1411 *
1412 * Does not take any additional reference on the fragment.
1413 */
1414static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
1415 struct page *page, int off, int size)
1416{
1417 __skb_fill_page_desc(skb, i, page, off, size);
1da177e4
LT
1418 skb_shinfo(skb)->nr_frags = i + 1;
1419}
1420
7965bd4d
JP
1421void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
1422 int size, unsigned int truesize);
654bed16 1423
f8e617e1
JW
1424void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
1425 unsigned int truesize);
1426
1da177e4 1427#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
21dc3301 1428#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb))
1da177e4
LT
1429#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
1430
27a884dc
ACM
1431#ifdef NET_SKBUFF_DATA_USES_OFFSET
1432static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1433{
1434 return skb->head + skb->tail;
1435}
1436
1437static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1438{
1439 skb->tail = skb->data - skb->head;
1440}
1441
1442static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1443{
1444 skb_reset_tail_pointer(skb);
1445 skb->tail += offset;
1446}
7cc46190 1447
27a884dc
ACM
1448#else /* NET_SKBUFF_DATA_USES_OFFSET */
1449static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1450{
1451 return skb->tail;
1452}
1453
1454static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1455{
1456 skb->tail = skb->data;
1457}
1458
1459static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1460{
1461 skb->tail = skb->data + offset;
1462}
4305b541 1463
27a884dc
ACM
1464#endif /* NET_SKBUFF_DATA_USES_OFFSET */
1465
1da177e4
LT
1466/*
1467 * Add data to an sk_buff
1468 */
0c7ddf36 1469unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
7965bd4d 1470unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
1da177e4
LT
1471static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
1472{
27a884dc 1473 unsigned char *tmp = skb_tail_pointer(skb);
1da177e4
LT
1474 SKB_LINEAR_ASSERT(skb);
1475 skb->tail += len;
1476 skb->len += len;
1477 return tmp;
1478}
1479
7965bd4d 1480unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
1da177e4
LT
1481static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
1482{
1483 skb->data -= len;
1484 skb->len += len;
1485 return skb->data;
1486}
1487
7965bd4d 1488unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
1da177e4
LT
1489static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
1490{
1491 skb->len -= len;
1492 BUG_ON(skb->len < skb->data_len);
1493 return skb->data += len;
1494}
1495
47d29646
DM
1496static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
1497{
1498 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
1499}
1500
7965bd4d 1501unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
1da177e4
LT
1502
1503static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
1504{
1505 if (len > skb_headlen(skb) &&
987c402a 1506 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
1da177e4
LT
1507 return NULL;
1508 skb->len -= len;
1509 return skb->data += len;
1510}
1511
1512static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
1513{
1514 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
1515}
1516
1517static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
1518{
1519 if (likely(len <= skb_headlen(skb)))
1520 return 1;
1521 if (unlikely(len > skb->len))
1522 return 0;
987c402a 1523 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
1da177e4
LT
1524}
1525
1526/**
1527 * skb_headroom - bytes at buffer head
1528 * @skb: buffer to check
1529 *
1530 * Return the number of bytes of free space at the head of an &sk_buff.
1531 */
c2636b4d 1532static inline unsigned int skb_headroom(const struct sk_buff *skb)
1da177e4
LT
1533{
1534 return skb->data - skb->head;
1535}
1536
1537/**
1538 * skb_tailroom - bytes at buffer end
1539 * @skb: buffer to check
1540 *
1541 * Return the number of bytes of free space at the tail of an sk_buff
1542 */
1543static inline int skb_tailroom(const struct sk_buff *skb)
1544{
4305b541 1545 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
1da177e4
LT
1546}
1547
a21d4572
ED
1548/**
1549 * skb_availroom - bytes at buffer end
1550 * @skb: buffer to check
1551 *
1552 * Return the number of bytes of free space at the tail of an sk_buff
1553 * allocated by sk_stream_alloc()
1554 */
1555static inline int skb_availroom(const struct sk_buff *skb)
1556{
16fad69c
ED
1557 if (skb_is_nonlinear(skb))
1558 return 0;
1559
1560 return skb->end - skb->tail - skb->reserved_tailroom;
a21d4572
ED
1561}
1562
1da177e4
LT
1563/**
1564 * skb_reserve - adjust headroom
1565 * @skb: buffer to alter
1566 * @len: bytes to move
1567 *
1568 * Increase the headroom of an empty &sk_buff by reducing the tail
1569 * room. This is only allowed for an empty buffer.
1570 */
8243126c 1571static inline void skb_reserve(struct sk_buff *skb, int len)
1da177e4
LT
1572{
1573 skb->data += len;
1574 skb->tail += len;
1575}
1576
6a674e9c
JG
1577static inline void skb_reset_inner_headers(struct sk_buff *skb)
1578{
aefbd2b3 1579 skb->inner_mac_header = skb->mac_header;
6a674e9c
JG
1580 skb->inner_network_header = skb->network_header;
1581 skb->inner_transport_header = skb->transport_header;
1582}
1583
0b5c9db1
JP
1584static inline void skb_reset_mac_len(struct sk_buff *skb)
1585{
1586 skb->mac_len = skb->network_header - skb->mac_header;
1587}
1588
6a674e9c
JG
1589static inline unsigned char *skb_inner_transport_header(const struct sk_buff
1590 *skb)
1591{
1592 return skb->head + skb->inner_transport_header;
1593}
1594
1595static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
1596{
1597 skb->inner_transport_header = skb->data - skb->head;
1598}
1599
1600static inline void skb_set_inner_transport_header(struct sk_buff *skb,
1601 const int offset)
1602{
1603 skb_reset_inner_transport_header(skb);
1604 skb->inner_transport_header += offset;
1605}
1606
1607static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
1608{
1609 return skb->head + skb->inner_network_header;
1610}
1611
1612static inline void skb_reset_inner_network_header(struct sk_buff *skb)
1613{
1614 skb->inner_network_header = skb->data - skb->head;
1615}
1616
1617static inline void skb_set_inner_network_header(struct sk_buff *skb,
1618 const int offset)
1619{
1620 skb_reset_inner_network_header(skb);
1621 skb->inner_network_header += offset;
1622}
1623
aefbd2b3
PS
1624static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
1625{
1626 return skb->head + skb->inner_mac_header;
1627}
1628
1629static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
1630{
1631 skb->inner_mac_header = skb->data - skb->head;
1632}
1633
1634static inline void skb_set_inner_mac_header(struct sk_buff *skb,
1635 const int offset)
1636{
1637 skb_reset_inner_mac_header(skb);
1638 skb->inner_mac_header += offset;
1639}
fda55eca
ED
1640static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
1641{
35d04610 1642 return skb->transport_header != (typeof(skb->transport_header))~0U;
fda55eca
ED
1643}
1644
9c70220b
ACM
1645static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1646{
2e07fa9c 1647 return skb->head + skb->transport_header;
9c70220b
ACM
1648}
1649
badff6d0
ACM
1650static inline void skb_reset_transport_header(struct sk_buff *skb)
1651{
2e07fa9c 1652 skb->transport_header = skb->data - skb->head;
badff6d0
ACM
1653}
1654
967b05f6
ACM
1655static inline void skb_set_transport_header(struct sk_buff *skb,
1656 const int offset)
1657{
2e07fa9c
ACM
1658 skb_reset_transport_header(skb);
1659 skb->transport_header += offset;
ea2ae17d
ACM
1660}
1661
d56f90a7
ACM
1662static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1663{
2e07fa9c 1664 return skb->head + skb->network_header;
d56f90a7
ACM
1665}
1666
c1d2bbe1
ACM
1667static inline void skb_reset_network_header(struct sk_buff *skb)
1668{
2e07fa9c 1669 skb->network_header = skb->data - skb->head;
c1d2bbe1
ACM
1670}
1671
c14d2450
ACM
1672static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1673{
2e07fa9c
ACM
1674 skb_reset_network_header(skb);
1675 skb->network_header += offset;
c14d2450
ACM
1676}
1677
2e07fa9c 1678static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
bbe735e4 1679{
2e07fa9c 1680 return skb->head + skb->mac_header;
bbe735e4
ACM
1681}
1682
2e07fa9c 1683static inline int skb_mac_header_was_set(const struct sk_buff *skb)
cfe1fc77 1684{
35d04610 1685 return skb->mac_header != (typeof(skb->mac_header))~0U;
2e07fa9c
ACM
1686}
1687
1688static inline void skb_reset_mac_header(struct sk_buff *skb)
1689{
1690 skb->mac_header = skb->data - skb->head;
1691}
1692
1693static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1694{
1695 skb_reset_mac_header(skb);
1696 skb->mac_header += offset;
1697}
1698
fbbdb8f0
YX
1699static inline void skb_probe_transport_header(struct sk_buff *skb,
1700 const int offset_hint)
1701{
1702 struct flow_keys keys;
1703
1704 if (skb_transport_header_was_set(skb))
1705 return;
1706 else if (skb_flow_dissect(skb, &keys))
1707 skb_set_transport_header(skb, keys.thoff);
1708 else
1709 skb_set_transport_header(skb, offset_hint);
1710}
1711
03606895
ED
1712static inline void skb_mac_header_rebuild(struct sk_buff *skb)
1713{
1714 if (skb_mac_header_was_set(skb)) {
1715 const unsigned char *old_mac = skb_mac_header(skb);
1716
1717 skb_set_mac_header(skb, -skb->mac_len);
1718 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
1719 }
1720}
1721
04fb451e
MM
1722static inline int skb_checksum_start_offset(const struct sk_buff *skb)
1723{
1724 return skb->csum_start - skb_headroom(skb);
1725}
1726
2e07fa9c
ACM
1727static inline int skb_transport_offset(const struct sk_buff *skb)
1728{
1729 return skb_transport_header(skb) - skb->data;
1730}
1731
1732static inline u32 skb_network_header_len(const struct sk_buff *skb)
1733{
1734 return skb->transport_header - skb->network_header;
1735}
1736
6a674e9c
JG
1737static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
1738{
1739 return skb->inner_transport_header - skb->inner_network_header;
1740}
1741
2e07fa9c
ACM
1742static inline int skb_network_offset(const struct sk_buff *skb)
1743{
1744 return skb_network_header(skb) - skb->data;
1745}
48d49d0c 1746
6a674e9c
JG
1747static inline int skb_inner_network_offset(const struct sk_buff *skb)
1748{
1749 return skb_inner_network_header(skb) - skb->data;
1750}
1751
f9599ce1
CG
1752static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
1753{
1754 return pskb_may_pull(skb, skb_network_offset(skb) + len);
1755}
1756
1da177e4
LT
1757/*
1758 * CPUs often take a performance hit when accessing unaligned memory
1759 * locations. The actual performance hit varies, it can be small if the
1760 * hardware handles it or large if we have to take an exception and fix it
1761 * in software.
1762 *
1763 * Since an ethernet header is 14 bytes network drivers often end up with
1764 * the IP header at an unaligned offset. The IP header can be aligned by
1765 * shifting the start of the packet by 2 bytes. Drivers should do this
1766 * with:
1767 *
8660c124 1768 * skb_reserve(skb, NET_IP_ALIGN);
1da177e4
LT
1769 *
1770 * The downside to this alignment of the IP header is that the DMA is now
1771 * unaligned. On some architectures the cost of an unaligned DMA is high
1772 * and this cost outweighs the gains made by aligning the IP header.
8660c124 1773 *
1da177e4
LT
1774 * Since this trade off varies between architectures, we allow NET_IP_ALIGN
1775 * to be overridden.
1776 */
1777#ifndef NET_IP_ALIGN
1778#define NET_IP_ALIGN 2
1779#endif
1780
025be81e
AB
1781/*
1782 * The networking layer reserves some headroom in skb data (via
1783 * dev_alloc_skb). This is used to avoid having to reallocate skb data when
1784 * the header has to grow. In the default case, if the header has to grow
d6301d3d 1785 * 32 bytes or less we avoid the reallocation.
025be81e
AB
1786 *
1787 * Unfortunately this headroom changes the DMA alignment of the resulting
1788 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
1789 * on some architectures. An architecture can override this value,
1790 * perhaps setting it to a cacheline in size (since that will maintain
1791 * cacheline alignment of the DMA). It must be a power of 2.
1792 *
d6301d3d 1793 * Various parts of the networking layer expect at least 32 bytes of
025be81e 1794 * headroom, you should not reduce this.
5933dd2f
ED
1795 *
1796 * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
1797 * to reduce average number of cache lines per packet.
1798 * get_rps_cpus() for example only access one 64 bytes aligned block :
18e8c134 1799 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
025be81e
AB
1800 */
1801#ifndef NET_SKB_PAD
5933dd2f 1802#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
025be81e
AB
1803#endif
1804
7965bd4d 1805int ___pskb_trim(struct sk_buff *skb, unsigned int len);
1da177e4
LT
1806
1807static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
1808{
c4264f27 1809 if (unlikely(skb_is_nonlinear(skb))) {
3cc0e873
HX
1810 WARN_ON(1);
1811 return;
1812 }
27a884dc
ACM
1813 skb->len = len;
1814 skb_set_tail_pointer(skb, len);
1da177e4
LT
1815}
1816
7965bd4d 1817void skb_trim(struct sk_buff *skb, unsigned int len);
1da177e4
LT
1818
1819static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
1820{
3cc0e873
HX
1821 if (skb->data_len)
1822 return ___pskb_trim(skb, len);
1823 __skb_trim(skb, len);
1824 return 0;
1da177e4
LT
1825}
1826
1827static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
1828{
1829 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
1830}
1831
e9fa4f7b
HX
1832/**
1833 * pskb_trim_unique - remove end from a paged unique (not cloned) buffer
1834 * @skb: buffer to alter
1835 * @len: new length
1836 *
1837 * This is identical to pskb_trim except that the caller knows that
1838 * the skb is not cloned so we should never get an error due to out-
1839 * of-memory.
1840 */
1841static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
1842{
1843 int err = pskb_trim(skb, len);
1844 BUG_ON(err);
1845}
1846
1da177e4
LT
1847/**
1848 * skb_orphan - orphan a buffer
1849 * @skb: buffer to orphan
1850 *
1851 * If a buffer currently has an owner then we call the owner's
1852 * destructor function and make the @skb unowned. The buffer continues
1853 * to exist but is no longer charged to its former owner.
1854 */
1855static inline void skb_orphan(struct sk_buff *skb)
1856{
c34a7612 1857 if (skb->destructor) {
1da177e4 1858 skb->destructor(skb);
c34a7612
ED
1859 skb->destructor = NULL;
1860 skb->sk = NULL;
376c7311
ED
1861 } else {
1862 BUG_ON(skb->sk);
c34a7612 1863 }
1da177e4
LT
1864}
1865
a353e0ce
MT
1866/**
1867 * skb_orphan_frags - orphan the frags contained in a buffer
1868 * @skb: buffer to orphan frags from
1869 * @gfp_mask: allocation mask for replacement pages
1870 *
1871 * For each frag in the SKB which needs a destructor (i.e. has an
1872 * owner) create a copy of that frag and release the original
1873 * page by calling the destructor.
1874 */
1875static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
1876{
1877 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)))
1878 return 0;
1879 return skb_copy_ubufs(skb, gfp_mask);
1880}
1881
1da177e4
LT
1882/**
1883 * __skb_queue_purge - empty a list
1884 * @list: list to empty
1885 *
1886 * Delete all buffers on an &sk_buff list. Each buffer is removed from
1887 * the list and one reference dropped. This function does not take the
1888 * list lock and the caller must hold the relevant locks to use it.
1889 */
7965bd4d 1890void skb_queue_purge(struct sk_buff_head *list);
1da177e4
LT
1891static inline void __skb_queue_purge(struct sk_buff_head *list)
1892{
1893 struct sk_buff *skb;
1894 while ((skb = __skb_dequeue(list)) != NULL)
1895 kfree_skb(skb);
1896}
1897
e5e67305
AD
1898#define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768)
1899#define NETDEV_FRAG_PAGE_MAX_SIZE (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER)
1900#define NETDEV_PAGECNT_MAX_BIAS NETDEV_FRAG_PAGE_MAX_SIZE
1901
7965bd4d 1902void *netdev_alloc_frag(unsigned int fragsz);
1da177e4 1903
7965bd4d
JP
1904struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
1905 gfp_t gfp_mask);
8af27456
CH
1906
1907/**
1908 * netdev_alloc_skb - allocate an skbuff for rx on a specific device
1909 * @dev: network device to receive on
1910 * @length: length to allocate
1911 *
1912 * Allocate a new &sk_buff and assign it a usage count of one. The
1913 * buffer has unspecified headroom built in. Users should allocate
1914 * the headroom they think they need without accounting for the
1915 * built in space. The built in space is used for optimisations.
1916 *
1917 * %NULL is returned if there is no free memory. Although this function
1918 * allocates memory it can be called from an interrupt.
1919 */
1920static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
6f532612 1921 unsigned int length)
8af27456
CH
1922{
1923 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
1924}
1925
6f532612
ED
1926/* legacy helper around __netdev_alloc_skb() */
1927static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1928 gfp_t gfp_mask)
1929{
1930 return __netdev_alloc_skb(NULL, length, gfp_mask);
1931}
1932
1933/* legacy helper around netdev_alloc_skb() */
1934static inline struct sk_buff *dev_alloc_skb(unsigned int length)
1935{
1936 return netdev_alloc_skb(NULL, length);
1937}
1938
1939
4915a0de
ED
1940static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
1941 unsigned int length, gfp_t gfp)
61321bbd 1942{
4915a0de 1943 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
61321bbd
ED
1944
1945 if (NET_IP_ALIGN && skb)
1946 skb_reserve(skb, NET_IP_ALIGN);
1947 return skb;
1948}
1949
4915a0de
ED
1950static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
1951 unsigned int length)
1952{
1953 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
1954}
1955
bc6fc9fa
FF
1956/**
1957 * __skb_alloc_pages - allocate pages for ps-rx on a skb and preserve pfmemalloc data
0614002b
MG
1958 * @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX
1959 * @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used
1960 * @order: size of the allocation
1961 *
1962 * Allocate a new page.
1963 *
1964 * %NULL is returned if there is no free memory.
1965*/
1966static inline struct page *__skb_alloc_pages(gfp_t gfp_mask,
1967 struct sk_buff *skb,
1968 unsigned int order)
1969{
1970 struct page *page;
1971
1972 gfp_mask |= __GFP_COLD;
1973
1974 if (!(gfp_mask & __GFP_NOMEMALLOC))
1975 gfp_mask |= __GFP_MEMALLOC;
1976
1977 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
1978 if (skb && page && page->pfmemalloc)
1979 skb->pfmemalloc = true;
1980
1981 return page;
1982}
1983
1984/**
1985 * __skb_alloc_page - allocate a page for ps-rx for a given skb and preserve pfmemalloc data
1986 * @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX
1987 * @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used
1988 *
1989 * Allocate a new page.
1990 *
1991 * %NULL is returned if there is no free memory.
1992 */
1993static inline struct page *__skb_alloc_page(gfp_t gfp_mask,
1994 struct sk_buff *skb)
1995{
1996 return __skb_alloc_pages(gfp_mask, skb, 0);
1997}
1998
1999/**
2000 * skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page
2001 * @page: The page that was allocated from skb_alloc_page
2002 * @skb: The skb that may need pfmemalloc set
2003 */
2004static inline void skb_propagate_pfmemalloc(struct page *page,
2005 struct sk_buff *skb)
2006{
2007 if (page && page->pfmemalloc)
2008 skb->pfmemalloc = true;
2009}
2010
131ea667
IC
2011/**
2012 * skb_frag_page - retrieve the page refered to by a paged fragment
2013 * @frag: the paged fragment
2014 *
2015 * Returns the &struct page associated with @frag.
2016 */
2017static inline struct page *skb_frag_page(const skb_frag_t *frag)
2018{
a8605c60 2019 return frag->page.p;
131ea667
IC
2020}
2021
2022/**
2023 * __skb_frag_ref - take an addition reference on a paged fragment.
2024 * @frag: the paged fragment
2025 *
2026 * Takes an additional reference on the paged fragment @frag.
2027 */
2028static inline void __skb_frag_ref(skb_frag_t *frag)
2029{
2030 get_page(skb_frag_page(frag));
2031}
2032
2033/**
2034 * skb_frag_ref - take an addition reference on a paged fragment of an skb.
2035 * @skb: the buffer
2036 * @f: the fragment offset.
2037 *
2038 * Takes an additional reference on the @f'th paged fragment of @skb.
2039 */
2040static inline void skb_frag_ref(struct sk_buff *skb, int f)
2041{
2042 __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
2043}
2044
2045/**
2046 * __skb_frag_unref - release a reference on a paged fragment.
2047 * @frag: the paged fragment
2048 *
2049 * Releases a reference on the paged fragment @frag.
2050 */
2051static inline void __skb_frag_unref(skb_frag_t *frag)
2052{
2053 put_page(skb_frag_page(frag));
2054}
2055
2056/**
2057 * skb_frag_unref - release a reference on a paged fragment of an skb.
2058 * @skb: the buffer
2059 * @f: the fragment offset
2060 *
2061 * Releases a reference on the @f'th paged fragment of @skb.
2062 */
2063static inline void skb_frag_unref(struct sk_buff *skb, int f)
2064{
2065 __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
2066}
2067
2068/**
2069 * skb_frag_address - gets the address of the data contained in a paged fragment
2070 * @frag: the paged fragment buffer
2071 *
2072 * Returns the address of the data within @frag. The page must already
2073 * be mapped.
2074 */
2075static inline void *skb_frag_address(const skb_frag_t *frag)
2076{
2077 return page_address(skb_frag_page(frag)) + frag->page_offset;
2078}
2079
2080/**
2081 * skb_frag_address_safe - gets the address of the data contained in a paged fragment
2082 * @frag: the paged fragment buffer
2083 *
2084 * Returns the address of the data within @frag. Checks that the page
2085 * is mapped and returns %NULL otherwise.
2086 */
2087static inline void *skb_frag_address_safe(const skb_frag_t *frag)
2088{
2089 void *ptr = page_address(skb_frag_page(frag));
2090 if (unlikely(!ptr))
2091 return NULL;
2092
2093 return ptr + frag->page_offset;
2094}
2095
2096/**
2097 * __skb_frag_set_page - sets the page contained in a paged fragment
2098 * @frag: the paged fragment
2099 * @page: the page to set
2100 *
2101 * Sets the fragment @frag to contain @page.
2102 */
2103static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
2104{
a8605c60 2105 frag->page.p = page;
131ea667
IC
2106}
2107
2108/**
2109 * skb_frag_set_page - sets the page contained in a paged fragment of an skb
2110 * @skb: the buffer
2111 * @f: the fragment offset
2112 * @page: the page to set
2113 *
2114 * Sets the @f'th fragment of @skb to contain @page.
2115 */
2116static inline void skb_frag_set_page(struct sk_buff *skb, int f,
2117 struct page *page)
2118{
2119 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
2120}
2121
400dfd3a
ED
2122bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
2123
131ea667
IC
2124/**
2125 * skb_frag_dma_map - maps a paged fragment via the DMA API
f83347df 2126 * @dev: the device to map the fragment to
131ea667
IC
2127 * @frag: the paged fragment to map
2128 * @offset: the offset within the fragment (starting at the
2129 * fragment's own offset)
2130 * @size: the number of bytes to map
f83347df 2131 * @dir: the direction of the mapping (%PCI_DMA_*)
131ea667
IC
2132 *
2133 * Maps the page associated with @frag to @device.
2134 */
2135static inline dma_addr_t skb_frag_dma_map(struct device *dev,
2136 const skb_frag_t *frag,
2137 size_t offset, size_t size,
2138 enum dma_data_direction dir)
2139{
2140 return dma_map_page(dev, skb_frag_page(frag),
2141 frag->page_offset + offset, size, dir);
2142}
2143
117632e6
ED
2144static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
2145 gfp_t gfp_mask)
2146{
2147 return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
2148}
2149
334a8132
PM
2150/**
2151 * skb_clone_writable - is the header of a clone writable
2152 * @skb: buffer to check
2153 * @len: length up to which to write
2154 *
2155 * Returns true if modifying the header part of the cloned buffer
2156 * does not requires the data to be copied.
2157 */
05bdd2f1 2158static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
334a8132
PM
2159{
2160 return !skb_header_cloned(skb) &&
2161 skb_headroom(skb) + len <= skb->hdr_len;
2162}
2163
d9cc2048
HX
2164static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
2165 int cloned)
2166{
2167 int delta = 0;
2168
d9cc2048
HX
2169 if (headroom > skb_headroom(skb))
2170 delta = headroom - skb_headroom(skb);
2171
2172 if (delta || cloned)
2173 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
2174 GFP_ATOMIC);
2175 return 0;
2176}
2177
1da177e4
LT
2178/**
2179 * skb_cow - copy header of skb when it is required
2180 * @skb: buffer to cow
2181 * @headroom: needed headroom
2182 *
2183 * If the skb passed lacks sufficient headroom or its data part
2184 * is shared, data is reallocated. If reallocation fails, an error
2185 * is returned and original skb is not changed.
2186 *
2187 * The result is skb with writable area skb->head...skb->tail
2188 * and at least @headroom of space at head.
2189 */
2190static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
2191{
d9cc2048
HX
2192 return __skb_cow(skb, headroom, skb_cloned(skb));
2193}
1da177e4 2194
d9cc2048
HX
2195/**
2196 * skb_cow_head - skb_cow but only making the head writable
2197 * @skb: buffer to cow
2198 * @headroom: needed headroom
2199 *
2200 * This function is identical to skb_cow except that we replace the
2201 * skb_cloned check by skb_header_cloned. It should be used when
2202 * you only need to push on some header and do not need to modify
2203 * the data.
2204 */
2205static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
2206{
2207 return __skb_cow(skb, headroom, skb_header_cloned(skb));
1da177e4
LT
2208}
2209
2210/**
2211 * skb_padto - pad an skbuff up to a minimal size
2212 * @skb: buffer to pad
2213 * @len: minimal length
2214 *
2215 * Pads up a buffer to ensure the trailing bytes exist and are
2216 * blanked. If the buffer already contains sufficient data it
5b057c6b
HX
2217 * is untouched. Otherwise it is extended. Returns zero on
2218 * success. The skb is freed on error.
1da177e4
LT
2219 */
2220
5b057c6b 2221static inline int skb_padto(struct sk_buff *skb, unsigned int len)
1da177e4
LT
2222{
2223 unsigned int size = skb->len;
2224 if (likely(size >= len))
5b057c6b 2225 return 0;
987c402a 2226 return skb_pad(skb, len - size);
1da177e4
LT
2227}
2228
2229static inline int skb_add_data(struct sk_buff *skb,
2230 char __user *from, int copy)
2231{
2232 const int off = skb->len;
2233
2234 if (skb->ip_summed == CHECKSUM_NONE) {
2235 int err = 0;
5084205f 2236 __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
1da177e4
LT
2237 copy, 0, &err);
2238 if (!err) {
2239 skb->csum = csum_block_add(skb->csum, csum, off);
2240 return 0;
2241 }
2242 } else if (!copy_from_user(skb_put(skb, copy), from, copy))
2243 return 0;
2244
2245 __skb_trim(skb, off);
2246 return -EFAULT;
2247}
2248
38ba0a65
ED
2249static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
2250 const struct page *page, int off)
1da177e4
LT
2251{
2252 if (i) {
9e903e08 2253 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1da177e4 2254
ea2ab693 2255 return page == skb_frag_page(frag) &&
9e903e08 2256 off == frag->page_offset + skb_frag_size(frag);
1da177e4 2257 }
38ba0a65 2258 return false;
1da177e4
LT
2259}
2260
364c6bad
HX
2261static inline int __skb_linearize(struct sk_buff *skb)
2262{
2263 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
2264}
2265
1da177e4
LT
2266/**
2267 * skb_linearize - convert paged skb to linear one
2268 * @skb: buffer to linarize
1da177e4
LT
2269 *
2270 * If there is no free memory -ENOMEM is returned, otherwise zero
2271 * is returned and the old skb data released.
2272 */
364c6bad
HX
2273static inline int skb_linearize(struct sk_buff *skb)
2274{
2275 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
2276}
2277
cef401de
ED
2278/**
2279 * skb_has_shared_frag - can any frag be overwritten
2280 * @skb: buffer to test
2281 *
2282 * Return true if the skb has at least one frag that might be modified
2283 * by an external entity (as in vmsplice()/sendfile())
2284 */
2285static inline bool skb_has_shared_frag(const struct sk_buff *skb)
2286{
c9af6db4
PS
2287 return skb_is_nonlinear(skb) &&
2288 skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
cef401de
ED
2289}
2290
364c6bad
HX
2291/**
2292 * skb_linearize_cow - make sure skb is linear and writable
2293 * @skb: buffer to process
2294 *
2295 * If there is no free memory -ENOMEM is returned, otherwise zero
2296 * is returned and the old skb data released.
2297 */
2298static inline int skb_linearize_cow(struct sk_buff *skb)
1da177e4 2299{
364c6bad
HX
2300 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
2301 __skb_linearize(skb) : 0;
1da177e4
LT
2302}
2303
2304/**
2305 * skb_postpull_rcsum - update checksum for received skb after pull
2306 * @skb: buffer to update
2307 * @start: start of data before pull
2308 * @len: length of data pulled
2309 *
2310 * After doing a pull on a received packet, you need to call this to
84fa7933
PM
2311 * update the CHECKSUM_COMPLETE checksum, or set ip_summed to
2312 * CHECKSUM_NONE so that it can be recomputed from scratch.
1da177e4
LT
2313 */
2314
2315static inline void skb_postpull_rcsum(struct sk_buff *skb,
cbb042f9 2316 const void *start, unsigned int len)
1da177e4 2317{
84fa7933 2318 if (skb->ip_summed == CHECKSUM_COMPLETE)
1da177e4
LT
2319 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
2320}
2321
cbb042f9
HX
2322unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
2323
7ce5a27f
DM
2324/**
2325 * pskb_trim_rcsum - trim received skb and update checksum
2326 * @skb: buffer to trim
2327 * @len: new length
2328 *
2329 * This is exactly the same as pskb_trim except that it ensures the
2330 * checksum of received packets are still valid after the operation.
2331 */
2332
2333static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
2334{
2335 if (likely(len >= skb->len))
2336 return 0;
2337 if (skb->ip_summed == CHECKSUM_COMPLETE)
2338 skb->ip_summed = CHECKSUM_NONE;
2339 return __pskb_trim(skb, len);
2340}
2341
1da177e4
LT
2342#define skb_queue_walk(queue, skb) \
2343 for (skb = (queue)->next; \
a1e4891f 2344 skb != (struct sk_buff *)(queue); \
1da177e4
LT
2345 skb = skb->next)
2346
46f8914e
JC
2347#define skb_queue_walk_safe(queue, skb, tmp) \
2348 for (skb = (queue)->next, tmp = skb->next; \
2349 skb != (struct sk_buff *)(queue); \
2350 skb = tmp, tmp = skb->next)
2351
1164f52a 2352#define skb_queue_walk_from(queue, skb) \
a1e4891f 2353 for (; skb != (struct sk_buff *)(queue); \
1164f52a
DM
2354 skb = skb->next)
2355
2356#define skb_queue_walk_from_safe(queue, skb, tmp) \
2357 for (tmp = skb->next; \
2358 skb != (struct sk_buff *)(queue); \
2359 skb = tmp, tmp = skb->next)
2360
300ce174
SH
2361#define skb_queue_reverse_walk(queue, skb) \
2362 for (skb = (queue)->prev; \
a1e4891f 2363 skb != (struct sk_buff *)(queue); \
300ce174
SH
2364 skb = skb->prev)
2365
686a2955
DM
2366#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
2367 for (skb = (queue)->prev, tmp = skb->prev; \
2368 skb != (struct sk_buff *)(queue); \
2369 skb = tmp, tmp = skb->prev)
2370
2371#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
2372 for (tmp = skb->prev; \
2373 skb != (struct sk_buff *)(queue); \
2374 skb = tmp, tmp = skb->prev)
1da177e4 2375
21dc3301 2376static inline bool skb_has_frag_list(const struct sk_buff *skb)
ee039871
DM
2377{
2378 return skb_shinfo(skb)->frag_list != NULL;
2379}
2380
2381static inline void skb_frag_list_init(struct sk_buff *skb)
2382{
2383 skb_shinfo(skb)->frag_list = NULL;
2384}
2385
2386static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
2387{
2388 frag->next = skb_shinfo(skb)->frag_list;
2389 skb_shinfo(skb)->frag_list = frag;
2390}
2391
2392#define skb_walk_frags(skb, iter) \
2393 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
2394
7965bd4d
JP
2395struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
2396 int *peeked, int *off, int *err);
2397struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
2398 int *err);
2399unsigned int datagram_poll(struct file *file, struct socket *sock,
2400 struct poll_table_struct *wait);
2401int skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
2402 struct iovec *to, int size);
2403int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen,
2404 struct iovec *iov);
2405int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
2406 const struct iovec *from, int from_offset,
2407 int len);
2408int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *frm,
2409 int offset, size_t count);
2410int skb_copy_datagram_const_iovec(const struct sk_buff *from, int offset,
2411 const struct iovec *to, int to_offset,
2412 int size);
2413void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
2414void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb);
2415int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
7965bd4d
JP
2416int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
2417int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
2418__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
2419 int len, __wsum csum);
2420int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
2421 struct pipe_inode_info *pipe, unsigned int len,
2422 unsigned int flags);
2423void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
2424void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
2425int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
2426void skb_scrub_packet(struct sk_buff *skb, bool xnet);
7965bd4d 2427struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
20380731 2428
2817a336
DB
2429struct skb_checksum_ops {
2430 __wsum (*update)(const void *mem, int len, __wsum wsum);
2431 __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
2432};
2433
2434__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
2435 __wsum csum, const struct skb_checksum_ops *ops);
2436__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
2437 __wsum csum);
2438
1da177e4
LT
2439static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
2440 int len, void *buffer)
2441{
2442 int hlen = skb_headlen(skb);
2443
55820ee2 2444 if (hlen - offset >= len)
1da177e4
LT
2445 return skb->data + offset;
2446
2447 if (skb_copy_bits(skb, offset, buffer, len) < 0)
2448 return NULL;
2449
2450 return buffer;
2451}
2452
4262e5cc
DB
2453/**
2454 * skb_needs_linearize - check if we need to linearize a given skb
2455 * depending on the given device features.
2456 * @skb: socket buffer to check
2457 * @features: net device features
2458 *
2459 * Returns true if either:
2460 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
2461 * 2. skb is fragmented and the device does not support SG.
2462 */
2463static inline bool skb_needs_linearize(struct sk_buff *skb,
2464 netdev_features_t features)
2465{
2466 return skb_is_nonlinear(skb) &&
2467 ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
2468 (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
2469}
2470
d626f62b
ACM
2471static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
2472 void *to,
2473 const unsigned int len)
2474{
2475 memcpy(to, skb->data, len);
2476}
2477
2478static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
2479 const int offset, void *to,
2480 const unsigned int len)
2481{
2482 memcpy(to, skb->data + offset, len);
2483}
2484
27d7ff46
ACM
2485static inline void skb_copy_to_linear_data(struct sk_buff *skb,
2486 const void *from,
2487 const unsigned int len)
2488{
2489 memcpy(skb->data, from, len);
2490}
2491
2492static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
2493 const int offset,
2494 const void *from,
2495 const unsigned int len)
2496{
2497 memcpy(skb->data + offset, from, len);
2498}
2499
7965bd4d 2500void skb_init(void);
1da177e4 2501
ac45f602
PO
2502static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
2503{
2504 return skb->tstamp;
2505}
2506
a61bbcf2
PM
2507/**
2508 * skb_get_timestamp - get timestamp from a skb
2509 * @skb: skb to get stamp from
2510 * @stamp: pointer to struct timeval to store stamp in
2511 *
2512 * Timestamps are stored in the skb as offsets to a base timestamp.
2513 * This function converts the offset back to a struct timeval and stores
2514 * it in stamp.
2515 */
ac45f602
PO
2516static inline void skb_get_timestamp(const struct sk_buff *skb,
2517 struct timeval *stamp)
a61bbcf2 2518{
b7aa0bf7 2519 *stamp = ktime_to_timeval(skb->tstamp);
a61bbcf2
PM
2520}
2521
ac45f602
PO
2522static inline void skb_get_timestampns(const struct sk_buff *skb,
2523 struct timespec *stamp)
2524{
2525 *stamp = ktime_to_timespec(skb->tstamp);
2526}
2527
b7aa0bf7 2528static inline void __net_timestamp(struct sk_buff *skb)
a61bbcf2 2529{
b7aa0bf7 2530 skb->tstamp = ktime_get_real();
a61bbcf2
PM
2531}
2532
164891aa
SH
2533static inline ktime_t net_timedelta(ktime_t t)
2534{
2535 return ktime_sub(ktime_get_real(), t);
2536}
2537
b9ce204f
IJ
2538static inline ktime_t net_invalid_timestamp(void)
2539{
2540 return ktime_set(0, 0);
2541}
a61bbcf2 2542
7965bd4d 2543void skb_timestamping_init(void);
c1f19b51
RC
2544
2545#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
2546
7965bd4d
JP
2547void skb_clone_tx_timestamp(struct sk_buff *skb);
2548bool skb_defer_rx_timestamp(struct sk_buff *skb);
c1f19b51
RC
2549
2550#else /* CONFIG_NETWORK_PHY_TIMESTAMPING */
2551
2552static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
2553{
2554}
2555
2556static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
2557{
2558 return false;
2559}
2560
2561#endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */
2562
2563/**
2564 * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps
2565 *
da92b194
RC
2566 * PHY drivers may accept clones of transmitted packets for
2567 * timestamping via their phy_driver.txtstamp method. These drivers
2568 * must call this function to return the skb back to the stack, with
2569 * or without a timestamp.
2570 *
c1f19b51 2571 * @skb: clone of the the original outgoing packet
da92b194 2572 * @hwtstamps: hardware time stamps, may be NULL if not available
c1f19b51
RC
2573 *
2574 */
2575void skb_complete_tx_timestamp(struct sk_buff *skb,
2576 struct skb_shared_hwtstamps *hwtstamps);
2577
ac45f602
PO
2578/**
2579 * skb_tstamp_tx - queue clone of skb with send time stamps
2580 * @orig_skb: the original outgoing packet
2581 * @hwtstamps: hardware time stamps, may be NULL if not available
2582 *
2583 * If the skb has a socket associated, then this function clones the
2584 * skb (thus sharing the actual data and optional structures), stores
2585 * the optional hardware time stamping information (if non NULL) or
2586 * generates a software time stamp (otherwise), then queues the clone
2587 * to the error queue of the socket. Errors are silently ignored.
2588 */
7965bd4d
JP
2589void skb_tstamp_tx(struct sk_buff *orig_skb,
2590 struct skb_shared_hwtstamps *hwtstamps);
ac45f602 2591
4507a715
RC
2592static inline void sw_tx_timestamp(struct sk_buff *skb)
2593{
2244d07b
OH
2594 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP &&
2595 !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
4507a715
RC
2596 skb_tstamp_tx(skb, NULL);
2597}
2598
2599/**
2600 * skb_tx_timestamp() - Driver hook for transmit timestamping
2601 *
2602 * Ethernet MAC Drivers should call this function in their hard_xmit()
4ff75b7c 2603 * function immediately before giving the sk_buff to the MAC hardware.
4507a715
RC
2604 *
2605 * @skb: A socket buffer.
2606 */
2607static inline void skb_tx_timestamp(struct sk_buff *skb)
2608{
c1f19b51 2609 skb_clone_tx_timestamp(skb);
4507a715
RC
2610 sw_tx_timestamp(skb);
2611}
2612
6e3e939f
JB
2613/**
2614 * skb_complete_wifi_ack - deliver skb with wifi status
2615 *
2616 * @skb: the original outgoing packet
2617 * @acked: ack status
2618 *
2619 */
2620void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
2621
7965bd4d
JP
2622__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
2623__sum16 __skb_checksum_complete(struct sk_buff *skb);
fb286bb2 2624
60476372
HX
2625static inline int skb_csum_unnecessary(const struct sk_buff *skb)
2626{
2627 return skb->ip_summed & CHECKSUM_UNNECESSARY;
2628}
2629
fb286bb2
HX
2630/**
2631 * skb_checksum_complete - Calculate checksum of an entire packet
2632 * @skb: packet to process
2633 *
2634 * This function calculates the checksum over the entire packet plus
2635 * the value of skb->csum. The latter can be used to supply the
2636 * checksum of a pseudo header as used by TCP/UDP. It returns the
2637 * checksum.
2638 *
2639 * For protocols that contain complete checksums such as ICMP/TCP/UDP,
2640 * this function can be used to verify that checksum on received
2641 * packets. In that case the function should return zero if the
2642 * checksum is correct. In particular, this function will return zero
2643 * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
2644 * hardware has already verified the correctness of the checksum.
2645 */
4381ca3c 2646static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
fb286bb2 2647{
60476372
HX
2648 return skb_csum_unnecessary(skb) ?
2649 0 : __skb_checksum_complete(skb);
fb286bb2
HX
2650}
2651
5f79e0f9 2652#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
7965bd4d 2653void nf_conntrack_destroy(struct nf_conntrack *nfct);
1da177e4
LT
2654static inline void nf_conntrack_put(struct nf_conntrack *nfct)
2655{
2656 if (nfct && atomic_dec_and_test(&nfct->use))
de6e05c4 2657 nf_conntrack_destroy(nfct);
1da177e4
LT
2658}
2659static inline void nf_conntrack_get(struct nf_conntrack *nfct)
2660{
2661 if (nfct)
2662 atomic_inc(&nfct->use);
2663}
2fc72c7b 2664#endif
1da177e4
LT
2665#ifdef CONFIG_BRIDGE_NETFILTER
2666static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
2667{
2668 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
2669 kfree(nf_bridge);
2670}
2671static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
2672{
2673 if (nf_bridge)
2674 atomic_inc(&nf_bridge->use);
2675}
2676#endif /* CONFIG_BRIDGE_NETFILTER */
a193a4ab
PM
2677static inline void nf_reset(struct sk_buff *skb)
2678{
5f79e0f9 2679#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
a193a4ab
PM
2680 nf_conntrack_put(skb->nfct);
2681 skb->nfct = NULL;
2fc72c7b 2682#endif
a193a4ab
PM
2683#ifdef CONFIG_BRIDGE_NETFILTER
2684 nf_bridge_put(skb->nf_bridge);
2685 skb->nf_bridge = NULL;
2686#endif
2687}
2688
124dff01
PM
2689static inline void nf_reset_trace(struct sk_buff *skb)
2690{
130549fe
G
2691#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
2692 skb->nf_trace = 0;
2693#endif
a193a4ab
PM
2694}
2695
edda553c
YK
2696/* Note: This doesn't put any conntrack and bridge info in dst. */
2697static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2698{
5f79e0f9 2699#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
edda553c
YK
2700 dst->nfct = src->nfct;
2701 nf_conntrack_get(src->nfct);
2702 dst->nfctinfo = src->nfctinfo;
2fc72c7b 2703#endif
edda553c
YK
2704#ifdef CONFIG_BRIDGE_NETFILTER
2705 dst->nf_bridge = src->nf_bridge;
2706 nf_bridge_get(src->nf_bridge);
2707#endif
2708}
2709
e7ac05f3
YK
2710static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2711{
e7ac05f3 2712#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
5f79e0f9 2713 nf_conntrack_put(dst->nfct);
2fc72c7b 2714#endif
e7ac05f3
YK
2715#ifdef CONFIG_BRIDGE_NETFILTER
2716 nf_bridge_put(dst->nf_bridge);
2717#endif
2718 __nf_copy(dst, src);
2719}
2720
984bc16c
JM
2721#ifdef CONFIG_NETWORK_SECMARK
2722static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
2723{
2724 to->secmark = from->secmark;
2725}
2726
2727static inline void skb_init_secmark(struct sk_buff *skb)
2728{
2729 skb->secmark = 0;
2730}
2731#else
2732static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
2733{ }
2734
2735static inline void skb_init_secmark(struct sk_buff *skb)
2736{ }
2737#endif
2738
f25f4e44
PWJ
2739static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
2740{
f25f4e44 2741 skb->queue_mapping = queue_mapping;
f25f4e44
PWJ
2742}
2743
9247744e 2744static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
4e3ab47a 2745{
4e3ab47a 2746 return skb->queue_mapping;
4e3ab47a
PE
2747}
2748
f25f4e44
PWJ
2749static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
2750{
f25f4e44 2751 to->queue_mapping = from->queue_mapping;
f25f4e44
PWJ
2752}
2753
d5a9e24a
DM
2754static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
2755{
2756 skb->queue_mapping = rx_queue + 1;
2757}
2758
9247744e 2759static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
d5a9e24a
DM
2760{
2761 return skb->queue_mapping - 1;
2762}
2763
9247744e 2764static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
d5a9e24a 2765{
a02cec21 2766 return skb->queue_mapping != 0;
d5a9e24a
DM
2767}
2768
7965bd4d
JP
2769u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
2770 unsigned int num_tx_queues);
9247744e 2771
def8b4fa
AD
2772static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2773{
0b3d8e08 2774#ifdef CONFIG_XFRM
def8b4fa 2775 return skb->sp;
def8b4fa 2776#else
def8b4fa 2777 return NULL;
def8b4fa 2778#endif
0b3d8e08 2779}
def8b4fa 2780
68c33163
PS
2781/* Keeps track of mac header offset relative to skb->head.
2782 * It is useful for TSO of Tunneling protocol. e.g. GRE.
2783 * For non-tunnel skb it points to skb_mac_header() and for
3347c960
ED
2784 * tunnel skb it points to outer mac header.
2785 * Keeps track of level of encapsulation of network headers.
2786 */
68c33163 2787struct skb_gso_cb {
3347c960
ED
2788 int mac_offset;
2789 int encap_level;
68c33163
PS
2790};
2791#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb)
2792
2793static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
2794{
2795 return (skb_mac_header(inner_skb) - inner_skb->head) -
2796 SKB_GSO_CB(inner_skb)->mac_offset;
2797}
2798
1e2bd517
PS
2799static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
2800{
2801 int new_headroom, headroom;
2802 int ret;
2803
2804 headroom = skb_headroom(skb);
2805 ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
2806 if (ret)
2807 return ret;
2808
2809 new_headroom = skb_headroom(skb);
2810 SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
2811 return 0;
2812}
2813
bdcc0924 2814static inline bool skb_is_gso(const struct sk_buff *skb)
89114afd
HX
2815{
2816 return skb_shinfo(skb)->gso_size;
2817}
2818
36a8f39e 2819/* Note: Should be called only if skb_is_gso(skb) is true */
bdcc0924 2820static inline bool skb_is_gso_v6(const struct sk_buff *skb)
eabd7e35
BG
2821{
2822 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
2823}
2824
7965bd4d 2825void __skb_warn_lro_forwarding(const struct sk_buff *skb);
4497b076
BH
2826
2827static inline bool skb_warn_if_lro(const struct sk_buff *skb)
2828{
2829 /* LRO sets gso_size but not gso_type, whereas if GSO is really
2830 * wanted then gso_type will be set. */
05bdd2f1
ED
2831 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2832
b78462eb
AD
2833 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
2834 unlikely(shinfo->gso_type == 0)) {
4497b076
BH
2835 __skb_warn_lro_forwarding(skb);
2836 return true;
2837 }
2838 return false;
2839}
2840
35fc92a9
HX
2841static inline void skb_forward_csum(struct sk_buff *skb)
2842{
2843 /* Unfortunately we don't support this one. Any brave souls? */
2844 if (skb->ip_summed == CHECKSUM_COMPLETE)
2845 skb->ip_summed = CHECKSUM_NONE;
2846}
2847
bc8acf2c
ED
2848/**
2849 * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE
2850 * @skb: skb to check
2851 *
2852 * fresh skbs have their ip_summed set to CHECKSUM_NONE.
2853 * Instead of forcing ip_summed to CHECKSUM_NONE, we can
2854 * use this helper, to document places where we make this assertion.
2855 */
05bdd2f1 2856static inline void skb_checksum_none_assert(const struct sk_buff *skb)
bc8acf2c
ED
2857{
2858#ifdef DEBUG
2859 BUG_ON(skb->ip_summed != CHECKSUM_NONE);
2860#endif
2861}
2862
f35d9d8a 2863bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
a6686f2f 2864
f77668dc
DB
2865u32 __skb_get_poff(const struct sk_buff *skb);
2866
3a7c1ee4
AD
2867/**
2868 * skb_head_is_locked - Determine if the skb->head is locked down
2869 * @skb: skb to check
2870 *
2871 * The head on skbs build around a head frag can be removed if they are
2872 * not cloned. This function returns true if the skb head is locked down
2873 * due to either being allocated via kmalloc, or by being a clone with
2874 * multiple references to the head.
2875 */
2876static inline bool skb_head_is_locked(const struct sk_buff *skb)
2877{
2878 return !skb->head_frag || skb_cloned(skb);
2879}
1da177e4
LT
2880#endif /* __KERNEL__ */
2881#endif /* _LINUX_SKBUFF_H */