net: switch secpath to use skb extension infrastructure
[linux-2.6-block.git] / net / core / skbuff.c
CommitLineData
1da177e4
LT
1/*
2 * Routines having to do with the 'struct sk_buff' memory handlers.
3 *
113aa838 4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
1da177e4
LT
5 * Florian La Roche <rzsfl@rz.uni-sb.de>
6 *
1da177e4
LT
7 * Fixes:
8 * Alan Cox : Fixed the worst of the load
9 * balancer bugs.
10 * Dave Platt : Interrupt stacking fix.
11 * Richard Kooijman : Timestamp fixes.
12 * Alan Cox : Changed buffer format.
13 * Alan Cox : destructor hook for AF_UNIX etc.
14 * Linus Torvalds : Better skb_clone.
15 * Alan Cox : Added skb_copy.
16 * Alan Cox : Added all the changed routines Linus
17 * only put in the headers
18 * Ray VanTassle : Fixed --skb->lock in free
19 * Alan Cox : skb_copy copy arp field
20 * Andi Kleen : slabified it.
21 * Robert Olsson : Removed skb_head_pool
22 *
23 * NOTE:
24 * The __skb_ routines should be called with interrupts
25 * disabled, or you better be *real* sure that the operation is atomic
26 * with respect to whatever list is being frobbed (e.g. via lock_sock()
27 * or via disabling bottom half handlers, etc).
28 *
29 * This program is free software; you can redistribute it and/or
30 * modify it under the terms of the GNU General Public License
31 * as published by the Free Software Foundation; either version
32 * 2 of the License, or (at your option) any later version.
33 */
34
35/*
36 * The functions in this file will not compile correctly with gcc 2.4.x
37 */
38
e005d193
JP
39#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40
1da177e4
LT
41#include <linux/module.h>
42#include <linux/types.h>
43#include <linux/kernel.h>
1da177e4
LT
44#include <linux/mm.h>
45#include <linux/interrupt.h>
46#include <linux/in.h>
47#include <linux/inet.h>
48#include <linux/slab.h>
de960aa9
FW
49#include <linux/tcp.h>
50#include <linux/udp.h>
90017acc 51#include <linux/sctp.h>
1da177e4
LT
52#include <linux/netdevice.h>
53#ifdef CONFIG_NET_CLS_ACT
54#include <net/pkt_sched.h>
55#endif
56#include <linux/string.h>
57#include <linux/skbuff.h>
9c55e01c 58#include <linux/splice.h>
1da177e4
LT
59#include <linux/cache.h>
60#include <linux/rtnetlink.h>
61#include <linux/init.h>
716ea3a7 62#include <linux/scatterlist.h>
ac45f602 63#include <linux/errqueue.h>
268bb0ce 64#include <linux/prefetch.h>
0d5501c1 65#include <linux/if_vlan.h>
1da177e4
LT
66
67#include <net/protocol.h>
68#include <net/dst.h>
69#include <net/sock.h>
70#include <net/checksum.h>
ed1f50c3 71#include <net/ip6_checksum.h>
1da177e4
LT
72#include <net/xfrm.h>
73
7c0f6ba6 74#include <linux/uaccess.h>
ad8d75ff 75#include <trace/events/skb.h>
51c56b00 76#include <linux/highmem.h>
b245be1f
WB
77#include <linux/capability.h>
78#include <linux/user_namespace.h>
a1f8e7f7 79
08009a76
AD
80struct kmem_cache *skbuff_head_cache __ro_after_init;
81static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
df5042f4
FW
82#ifdef CONFIG_SKB_EXTENSIONS
83static struct kmem_cache *skbuff_ext_cache __ro_after_init;
84#endif
5f74f82e
HWR
85int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
86EXPORT_SYMBOL(sysctl_max_skb_frags);
1da177e4 87
1da177e4 88/**
f05de73b
JS
89 * skb_panic - private function for out-of-line support
90 * @skb: buffer
91 * @sz: size
92 * @addr: address
99d5851e 93 * @msg: skb_over_panic or skb_under_panic
1da177e4 94 *
f05de73b
JS
95 * Out-of-line support for skb_put() and skb_push().
96 * Called via the wrapper skb_over_panic() or skb_under_panic().
97 * Keep out of line to prevent kernel bloat.
98 * __builtin_return_address is not used because it is not always reliable.
1da177e4 99 */
f05de73b 100static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
99d5851e 101 const char msg[])
1da177e4 102{
e005d193 103 pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
99d5851e 104 msg, addr, skb->len, sz, skb->head, skb->data,
e005d193
JP
105 (unsigned long)skb->tail, (unsigned long)skb->end,
106 skb->dev ? skb->dev->name : "<NULL>");
1da177e4
LT
107 BUG();
108}
109
f05de73b 110static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
1da177e4 111{
f05de73b 112 skb_panic(skb, sz, addr, __func__);
1da177e4
LT
113}
114
f05de73b
JS
115static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
116{
117 skb_panic(skb, sz, addr, __func__);
118}
c93bdd0e
MG
119
120/*
121 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
122 * the caller if emergency pfmemalloc reserves are being used. If it is and
123 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
124 * may be used. Otherwise, the packet data may be discarded until enough
125 * memory is free
126 */
127#define kmalloc_reserve(size, gfp, node, pfmemalloc) \
128 __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
61c5e88a 129
130static void *__kmalloc_reserve(size_t size, gfp_t flags, int node,
131 unsigned long ip, bool *pfmemalloc)
c93bdd0e
MG
132{
133 void *obj;
134 bool ret_pfmemalloc = false;
135
136 /*
137 * Try a regular allocation, when that fails and we're not entitled
138 * to the reserves, fail.
139 */
140 obj = kmalloc_node_track_caller(size,
141 flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
142 node);
143 if (obj || !(gfp_pfmemalloc_allowed(flags)))
144 goto out;
145
146 /* Try again but now we are using pfmemalloc reserves */
147 ret_pfmemalloc = true;
148 obj = kmalloc_node_track_caller(size, flags, node);
149
150out:
151 if (pfmemalloc)
152 *pfmemalloc = ret_pfmemalloc;
153
154 return obj;
155}
156
1da177e4
LT
157/* Allocate a new skbuff. We do this ourselves so we can fill in a few
158 * 'private' fields and also do memory statistics to find all the
159 * [BEEP] leaks.
160 *
161 */
162
163/**
d179cd12 164 * __alloc_skb - allocate a network buffer
1da177e4
LT
165 * @size: size to allocate
166 * @gfp_mask: allocation mask
c93bdd0e
MG
167 * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
168 * instead of head cache and allocate a cloned (child) skb.
169 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
170 * allocations in case the data is required for writeback
b30973f8 171 * @node: numa node to allocate memory on
1da177e4
LT
172 *
173 * Allocate a new &sk_buff. The returned buffer has no headroom and a
94b6042c
BH
174 * tail room of at least size bytes. The object has a reference count
175 * of one. The return is the buffer. On a failure the return is %NULL.
1da177e4
LT
176 *
177 * Buffers may only be allocated from interrupts using a @gfp_mask of
178 * %GFP_ATOMIC.
179 */
dd0fc66f 180struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
c93bdd0e 181 int flags, int node)
1da177e4 182{
e18b890b 183 struct kmem_cache *cache;
4947d3ef 184 struct skb_shared_info *shinfo;
1da177e4
LT
185 struct sk_buff *skb;
186 u8 *data;
c93bdd0e 187 bool pfmemalloc;
1da177e4 188
c93bdd0e
MG
189 cache = (flags & SKB_ALLOC_FCLONE)
190 ? skbuff_fclone_cache : skbuff_head_cache;
191
192 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
193 gfp_mask |= __GFP_MEMALLOC;
8798b3fb 194
1da177e4 195 /* Get the HEAD */
b30973f8 196 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
1da177e4
LT
197 if (!skb)
198 goto out;
ec7d2f2c 199 prefetchw(skb);
1da177e4 200
87fb4b7b
ED
201 /* We do our best to align skb_shared_info on a separate cache
202 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
203 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
204 * Both skb->head and skb_shared_info are cache line aligned.
205 */
bc417e30 206 size = SKB_DATA_ALIGN(size);
87fb4b7b 207 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
c93bdd0e 208 data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
1da177e4
LT
209 if (!data)
210 goto nodata;
87fb4b7b
ED
211 /* kmalloc(size) might give us more room than requested.
212 * Put skb_shared_info exactly at the end of allocated zone,
213 * to allow max possible filling before reallocation.
214 */
215 size = SKB_WITH_OVERHEAD(ksize(data));
ec7d2f2c 216 prefetchw(data + size);
1da177e4 217
ca0605a7 218 /*
c8005785
JB
219 * Only clear those fields we need to clear, not those that we will
220 * actually initialise below. Hence, don't put any more fields after
221 * the tail pointer in struct sk_buff!
ca0605a7
ACM
222 */
223 memset(skb, 0, offsetof(struct sk_buff, tail));
87fb4b7b
ED
224 /* Account for allocated memory : skb + skb->head */
225 skb->truesize = SKB_TRUESIZE(size);
c93bdd0e 226 skb->pfmemalloc = pfmemalloc;
63354797 227 refcount_set(&skb->users, 1);
1da177e4
LT
228 skb->head = data;
229 skb->data = data;
27a884dc 230 skb_reset_tail_pointer(skb);
4305b541 231 skb->end = skb->tail + size;
35d04610
CW
232 skb->mac_header = (typeof(skb->mac_header))~0U;
233 skb->transport_header = (typeof(skb->transport_header))~0U;
19633e12 234
4947d3ef
BL
235 /* make sure we initialize shinfo sequentially */
236 shinfo = skb_shinfo(skb);
ec7d2f2c 237 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
4947d3ef 238 atomic_set(&shinfo->dataref, 1);
4947d3ef 239
c93bdd0e 240 if (flags & SKB_ALLOC_FCLONE) {
d0bf4a9e 241 struct sk_buff_fclones *fclones;
1da177e4 242
d0bf4a9e
ED
243 fclones = container_of(skb, struct sk_buff_fclones, skb1);
244
d179cd12 245 skb->fclone = SKB_FCLONE_ORIG;
2638595a 246 refcount_set(&fclones->fclone_ref, 1);
d179cd12 247
6ffe75eb 248 fclones->skb2.fclone = SKB_FCLONE_CLONE;
d179cd12 249 }
1da177e4
LT
250out:
251 return skb;
252nodata:
8798b3fb 253 kmem_cache_free(cache, skb);
1da177e4
LT
254 skb = NULL;
255 goto out;
1da177e4 256}
b4ac530f 257EXPORT_SYMBOL(__alloc_skb);
1da177e4 258
b2b5ce9d 259/**
2ea2f62c 260 * __build_skb - build a network buffer
b2b5ce9d 261 * @data: data buffer provided by caller
2ea2f62c 262 * @frag_size: size of data, or 0 if head was kmalloced
b2b5ce9d
ED
263 *
264 * Allocate a new &sk_buff. Caller provides space holding head and
deceb4c0 265 * skb_shared_info. @data must have been allocated by kmalloc() only if
2ea2f62c
ED
266 * @frag_size is 0, otherwise data should come from the page allocator
267 * or vmalloc()
b2b5ce9d
ED
268 * The return is the new skb buffer.
269 * On a failure the return is %NULL, and @data is not freed.
270 * Notes :
271 * Before IO, driver allocates only data buffer where NIC put incoming frame
272 * Driver should add room at head (NET_SKB_PAD) and
273 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
274 * After IO, driver calls build_skb(), to allocate sk_buff and populate it
275 * before giving packet to stack.
276 * RX rings only contains data buffers, not full skbs.
277 */
2ea2f62c 278struct sk_buff *__build_skb(void *data, unsigned int frag_size)
b2b5ce9d
ED
279{
280 struct skb_shared_info *shinfo;
281 struct sk_buff *skb;
d3836f21 282 unsigned int size = frag_size ? : ksize(data);
b2b5ce9d
ED
283
284 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
285 if (!skb)
286 return NULL;
287
d3836f21 288 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
b2b5ce9d
ED
289
290 memset(skb, 0, offsetof(struct sk_buff, tail));
291 skb->truesize = SKB_TRUESIZE(size);
63354797 292 refcount_set(&skb->users, 1);
b2b5ce9d
ED
293 skb->head = data;
294 skb->data = data;
295 skb_reset_tail_pointer(skb);
296 skb->end = skb->tail + size;
35d04610
CW
297 skb->mac_header = (typeof(skb->mac_header))~0U;
298 skb->transport_header = (typeof(skb->transport_header))~0U;
b2b5ce9d
ED
299
300 /* make sure we initialize shinfo sequentially */
301 shinfo = skb_shinfo(skb);
302 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
303 atomic_set(&shinfo->dataref, 1);
b2b5ce9d
ED
304
305 return skb;
306}
2ea2f62c
ED
307
308/* build_skb() is wrapper over __build_skb(), that specifically
309 * takes care of skb->head and skb->pfmemalloc
310 * This means that if @frag_size is not zero, then @data must be backed
311 * by a page fragment, not kmalloc() or vmalloc()
312 */
313struct sk_buff *build_skb(void *data, unsigned int frag_size)
314{
315 struct sk_buff *skb = __build_skb(data, frag_size);
316
317 if (skb && frag_size) {
318 skb->head_frag = 1;
2f064f34 319 if (page_is_pfmemalloc(virt_to_head_page(data)))
2ea2f62c
ED
320 skb->pfmemalloc = 1;
321 }
322 return skb;
323}
b2b5ce9d
ED
324EXPORT_SYMBOL(build_skb);
325
795bb1c0
JDB
326#define NAPI_SKB_CACHE_SIZE 64
327
328struct napi_alloc_cache {
329 struct page_frag_cache page;
e0d7924a 330 unsigned int skb_count;
795bb1c0
JDB
331 void *skb_cache[NAPI_SKB_CACHE_SIZE];
332};
333
b63ae8ca 334static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
795bb1c0 335static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
ffde7328
AD
336
337static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
338{
b63ae8ca 339 struct page_frag_cache *nc;
ffde7328
AD
340 unsigned long flags;
341 void *data;
342
343 local_irq_save(flags);
9451980a 344 nc = this_cpu_ptr(&netdev_alloc_cache);
8c2dd3e4 345 data = page_frag_alloc(nc, fragsz, gfp_mask);
6f532612
ED
346 local_irq_restore(flags);
347 return data;
348}
c93bdd0e
MG
349
350/**
351 * netdev_alloc_frag - allocate a page fragment
352 * @fragsz: fragment size
353 *
354 * Allocates a frag from a page for receive buffer.
355 * Uses GFP_ATOMIC allocations.
356 */
357void *netdev_alloc_frag(unsigned int fragsz)
358{
453f85d4 359 return __netdev_alloc_frag(fragsz, GFP_ATOMIC);
c93bdd0e 360}
6f532612
ED
361EXPORT_SYMBOL(netdev_alloc_frag);
362
ffde7328
AD
363static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
364{
795bb1c0 365 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
9451980a 366
8c2dd3e4 367 return page_frag_alloc(&nc->page, fragsz, gfp_mask);
ffde7328
AD
368}
369
370void *napi_alloc_frag(unsigned int fragsz)
371{
453f85d4 372 return __napi_alloc_frag(fragsz, GFP_ATOMIC);
ffde7328
AD
373}
374EXPORT_SYMBOL(napi_alloc_frag);
375
fd11a83d
AD
376/**
377 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
378 * @dev: network device to receive on
d7499160 379 * @len: length to allocate
fd11a83d
AD
380 * @gfp_mask: get_free_pages mask, passed to alloc_skb
381 *
382 * Allocate a new &sk_buff and assign it a usage count of one. The
383 * buffer has NET_SKB_PAD headroom built in. Users should allocate
384 * the headroom they think they need without accounting for the
385 * built in space. The built in space is used for optimisations.
386 *
387 * %NULL is returned if there is no free memory.
388 */
9451980a
AD
389struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
390 gfp_t gfp_mask)
fd11a83d 391{
b63ae8ca 392 struct page_frag_cache *nc;
9451980a 393 unsigned long flags;
fd11a83d 394 struct sk_buff *skb;
9451980a
AD
395 bool pfmemalloc;
396 void *data;
397
398 len += NET_SKB_PAD;
fd11a83d 399
9451980a 400 if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
d0164adc 401 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
a080e7bd
AD
402 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
403 if (!skb)
404 goto skb_fail;
405 goto skb_success;
406 }
fd11a83d 407
9451980a
AD
408 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
409 len = SKB_DATA_ALIGN(len);
410
411 if (sk_memalloc_socks())
412 gfp_mask |= __GFP_MEMALLOC;
413
414 local_irq_save(flags);
415
416 nc = this_cpu_ptr(&netdev_alloc_cache);
8c2dd3e4 417 data = page_frag_alloc(nc, len, gfp_mask);
9451980a
AD
418 pfmemalloc = nc->pfmemalloc;
419
420 local_irq_restore(flags);
421
422 if (unlikely(!data))
423 return NULL;
424
425 skb = __build_skb(data, len);
426 if (unlikely(!skb)) {
181edb2b 427 skb_free_frag(data);
9451980a 428 return NULL;
7b2e497a 429 }
fd11a83d 430
9451980a
AD
431 /* use OR instead of assignment to avoid clearing of bits in mask */
432 if (pfmemalloc)
433 skb->pfmemalloc = 1;
434 skb->head_frag = 1;
435
a080e7bd 436skb_success:
9451980a
AD
437 skb_reserve(skb, NET_SKB_PAD);
438 skb->dev = dev;
439
a080e7bd 440skb_fail:
8af27456
CH
441 return skb;
442}
b4ac530f 443EXPORT_SYMBOL(__netdev_alloc_skb);
1da177e4 444
fd11a83d
AD
445/**
446 * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
447 * @napi: napi instance this buffer was allocated for
d7499160 448 * @len: length to allocate
fd11a83d
AD
449 * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages
450 *
451 * Allocate a new sk_buff for use in NAPI receive. This buffer will
452 * attempt to allocate the head from a special reserved region used
453 * only for NAPI Rx allocation. By doing this we can save several
454 * CPU cycles by avoiding having to disable and re-enable IRQs.
455 *
456 * %NULL is returned if there is no free memory.
457 */
9451980a
AD
458struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
459 gfp_t gfp_mask)
fd11a83d 460{
795bb1c0 461 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
fd11a83d 462 struct sk_buff *skb;
9451980a
AD
463 void *data;
464
465 len += NET_SKB_PAD + NET_IP_ALIGN;
fd11a83d 466
9451980a 467 if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
d0164adc 468 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
a080e7bd
AD
469 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
470 if (!skb)
471 goto skb_fail;
472 goto skb_success;
473 }
9451980a
AD
474
475 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
476 len = SKB_DATA_ALIGN(len);
477
478 if (sk_memalloc_socks())
479 gfp_mask |= __GFP_MEMALLOC;
fd11a83d 480
8c2dd3e4 481 data = page_frag_alloc(&nc->page, len, gfp_mask);
9451980a
AD
482 if (unlikely(!data))
483 return NULL;
484
485 skb = __build_skb(data, len);
486 if (unlikely(!skb)) {
181edb2b 487 skb_free_frag(data);
9451980a 488 return NULL;
fd11a83d
AD
489 }
490
9451980a 491 /* use OR instead of assignment to avoid clearing of bits in mask */
795bb1c0 492 if (nc->page.pfmemalloc)
9451980a
AD
493 skb->pfmemalloc = 1;
494 skb->head_frag = 1;
495
a080e7bd 496skb_success:
9451980a
AD
497 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
498 skb->dev = napi->dev;
499
a080e7bd 500skb_fail:
fd11a83d
AD
501 return skb;
502}
503EXPORT_SYMBOL(__napi_alloc_skb);
504
654bed16 505void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
50269e19 506 int size, unsigned int truesize)
654bed16
PZ
507{
508 skb_fill_page_desc(skb, i, page, off, size);
509 skb->len += size;
510 skb->data_len += size;
50269e19 511 skb->truesize += truesize;
654bed16
PZ
512}
513EXPORT_SYMBOL(skb_add_rx_frag);
514
f8e617e1
JW
515void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
516 unsigned int truesize)
517{
518 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
519
520 skb_frag_size_add(frag, size);
521 skb->len += size;
522 skb->data_len += size;
523 skb->truesize += truesize;
524}
525EXPORT_SYMBOL(skb_coalesce_rx_frag);
526
27b437c8 527static void skb_drop_list(struct sk_buff **listp)
1da177e4 528{
bd8a7036 529 kfree_skb_list(*listp);
27b437c8 530 *listp = NULL;
1da177e4
LT
531}
532
27b437c8
HX
533static inline void skb_drop_fraglist(struct sk_buff *skb)
534{
535 skb_drop_list(&skb_shinfo(skb)->frag_list);
536}
537
1da177e4
LT
538static void skb_clone_fraglist(struct sk_buff *skb)
539{
540 struct sk_buff *list;
541
fbb398a8 542 skb_walk_frags(skb, list)
1da177e4
LT
543 skb_get(list);
544}
545
d3836f21
ED
546static void skb_free_head(struct sk_buff *skb)
547{
181edb2b
AD
548 unsigned char *head = skb->head;
549
d3836f21 550 if (skb->head_frag)
181edb2b 551 skb_free_frag(head);
d3836f21 552 else
181edb2b 553 kfree(head);
d3836f21
ED
554}
555
5bba1712 556static void skb_release_data(struct sk_buff *skb)
1da177e4 557{
ff04a771
ED
558 struct skb_shared_info *shinfo = skb_shinfo(skb);
559 int i;
1da177e4 560
ff04a771
ED
561 if (skb->cloned &&
562 atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
563 &shinfo->dataref))
564 return;
a6686f2f 565
ff04a771
ED
566 for (i = 0; i < shinfo->nr_frags; i++)
567 __skb_frag_unref(&shinfo->frags[i]);
a6686f2f 568
ff04a771
ED
569 if (shinfo->frag_list)
570 kfree_skb_list(shinfo->frag_list);
571
1f8b977a 572 skb_zcopy_clear(skb, true);
ff04a771 573 skb_free_head(skb);
1da177e4
LT
574}
575
576/*
577 * Free an skbuff by memory without cleaning the state.
578 */
2d4baff8 579static void kfree_skbmem(struct sk_buff *skb)
1da177e4 580{
d0bf4a9e 581 struct sk_buff_fclones *fclones;
d179cd12 582
d179cd12
DM
583 switch (skb->fclone) {
584 case SKB_FCLONE_UNAVAILABLE:
585 kmem_cache_free(skbuff_head_cache, skb);
6ffe75eb 586 return;
d179cd12
DM
587
588 case SKB_FCLONE_ORIG:
d0bf4a9e 589 fclones = container_of(skb, struct sk_buff_fclones, skb1);
d179cd12 590
6ffe75eb
ED
591 /* We usually free the clone (TX completion) before original skb
592 * This test would have no chance to be true for the clone,
593 * while here, branch prediction will be good.
d179cd12 594 */
2638595a 595 if (refcount_read(&fclones->fclone_ref) == 1)
6ffe75eb
ED
596 goto fastpath;
597 break;
e7820e39 598
6ffe75eb
ED
599 default: /* SKB_FCLONE_CLONE */
600 fclones = container_of(skb, struct sk_buff_fclones, skb2);
d179cd12 601 break;
3ff50b79 602 }
2638595a 603 if (!refcount_dec_and_test(&fclones->fclone_ref))
6ffe75eb
ED
604 return;
605fastpath:
606 kmem_cache_free(skbuff_fclone_cache, fclones);
1da177e4
LT
607}
608
0a463c78 609void skb_release_head_state(struct sk_buff *skb)
1da177e4 610{
adf30907 611 skb_dst_drop(skb);
9c2b3328
SH
612 if (skb->destructor) {
613 WARN_ON(in_irq());
1da177e4
LT
614 skb->destructor(skb);
615 }
a3bf7ae9 616#if IS_ENABLED(CONFIG_NF_CONNTRACK)
cb9c6836 617 nf_conntrack_put(skb_nfct(skb));
1da177e4 618#endif
df5042f4 619 skb_ext_put(skb);
04a4bb55
LB
620}
621
622/* Free everything but the sk_buff shell. */
623static void skb_release_all(struct sk_buff *skb)
624{
625 skb_release_head_state(skb);
a28b1b90
FW
626 if (likely(skb->head))
627 skb_release_data(skb);
2d4baff8
HX
628}
629
630/**
631 * __kfree_skb - private function
632 * @skb: buffer
633 *
634 * Free an sk_buff. Release anything attached to the buffer.
635 * Clean the state. This is an internal helper function. Users should
636 * always call kfree_skb
637 */
1da177e4 638
2d4baff8
HX
639void __kfree_skb(struct sk_buff *skb)
640{
641 skb_release_all(skb);
1da177e4
LT
642 kfree_skbmem(skb);
643}
b4ac530f 644EXPORT_SYMBOL(__kfree_skb);
1da177e4 645