Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
1da177e4 LT |
2 | /* |
3 | * Routines having to do with the 'struct sk_buff' memory handlers. | |
4 | * | |
113aa838 | 5 | * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> |
1da177e4 LT |
6 | * Florian La Roche <rzsfl@rz.uni-sb.de> |
7 | * | |
1da177e4 LT |
8 | * Fixes: |
9 | * Alan Cox : Fixed the worst of the load | |
10 | * balancer bugs. | |
11 | * Dave Platt : Interrupt stacking fix. | |
12 | * Richard Kooijman : Timestamp fixes. | |
13 | * Alan Cox : Changed buffer format. | |
14 | * Alan Cox : destructor hook for AF_UNIX etc. | |
15 | * Linus Torvalds : Better skb_clone. | |
16 | * Alan Cox : Added skb_copy. | |
17 | * Alan Cox : Added all the changed routines Linus | |
18 | * only put in the headers | |
19 | * Ray VanTassle : Fixed --skb->lock in free | |
20 | * Alan Cox : skb_copy copy arp field | |
21 | * Andi Kleen : slabified it. | |
22 | * Robert Olsson : Removed skb_head_pool | |
23 | * | |
24 | * NOTE: | |
25 | * The __skb_ routines should be called with interrupts | |
26 | * disabled, or you better be *real* sure that the operation is atomic | |
27 | * with respect to whatever list is being frobbed (e.g. via lock_sock() | |
28 | * or via disabling bottom half handlers, etc). | |
1da177e4 LT |
29 | */ |
30 | ||
31 | /* | |
32 | * The functions in this file will not compile correctly with gcc 2.4.x | |
33 | */ | |
34 | ||
e005d193 JP |
35 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
36 | ||
1da177e4 LT |
37 | #include <linux/module.h> |
38 | #include <linux/types.h> | |
39 | #include <linux/kernel.h> | |
1da177e4 LT |
40 | #include <linux/mm.h> |
41 | #include <linux/interrupt.h> | |
42 | #include <linux/in.h> | |
43 | #include <linux/inet.h> | |
44 | #include <linux/slab.h> | |
de960aa9 FW |
45 | #include <linux/tcp.h> |
46 | #include <linux/udp.h> | |
90017acc | 47 | #include <linux/sctp.h> |
1da177e4 LT |
48 | #include <linux/netdevice.h> |
49 | #ifdef CONFIG_NET_CLS_ACT | |
50 | #include <net/pkt_sched.h> | |
51 | #endif | |
52 | #include <linux/string.h> | |
53 | #include <linux/skbuff.h> | |
9c55e01c | 54 | #include <linux/splice.h> |
1da177e4 LT |
55 | #include <linux/cache.h> |
56 | #include <linux/rtnetlink.h> | |
57 | #include <linux/init.h> | |
716ea3a7 | 58 | #include <linux/scatterlist.h> |
ac45f602 | 59 | #include <linux/errqueue.h> |
268bb0ce | 60 | #include <linux/prefetch.h> |
0d5501c1 | 61 | #include <linux/if_vlan.h> |
2a2ea508 | 62 | #include <linux/mpls.h> |
1da177e4 LT |
63 | |
64 | #include <net/protocol.h> | |
65 | #include <net/dst.h> | |
66 | #include <net/sock.h> | |
67 | #include <net/checksum.h> | |
ed1f50c3 | 68 | #include <net/ip6_checksum.h> |
1da177e4 | 69 | #include <net/xfrm.h> |
8822e270 | 70 | #include <net/mpls.h> |
3ee17bc7 | 71 | #include <net/mptcp.h> |
1da177e4 | 72 | |
7c0f6ba6 | 73 | #include <linux/uaccess.h> |
ad8d75ff | 74 | #include <trace/events/skb.h> |
51c56b00 | 75 | #include <linux/highmem.h> |
b245be1f WB |
76 | #include <linux/capability.h> |
77 | #include <linux/user_namespace.h> | |
2544af03 | 78 | #include <linux/indirect_call_wrapper.h> |
a1f8e7f7 | 79 | |
7b7ed885 BVA |
80 | #include "datagram.h" |
81 | ||
08009a76 AD |
82 | struct kmem_cache *skbuff_head_cache __ro_after_init; |
83 | static struct kmem_cache *skbuff_fclone_cache __ro_after_init; | |
df5042f4 FW |
84 | #ifdef CONFIG_SKB_EXTENSIONS |
85 | static struct kmem_cache *skbuff_ext_cache __ro_after_init; | |
86 | #endif | |
5f74f82e HWR |
87 | int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS; |
88 | EXPORT_SYMBOL(sysctl_max_skb_frags); | |
1da177e4 | 89 | |
1da177e4 | 90 | /** |
f05de73b JS |
91 | * skb_panic - private function for out-of-line support |
92 | * @skb: buffer | |
93 | * @sz: size | |
94 | * @addr: address | |
99d5851e | 95 | * @msg: skb_over_panic or skb_under_panic |
1da177e4 | 96 | * |
f05de73b JS |
97 | * Out-of-line support for skb_put() and skb_push(). |
98 | * Called via the wrapper skb_over_panic() or skb_under_panic(). | |
99 | * Keep out of line to prevent kernel bloat. | |
100 | * __builtin_return_address is not used because it is not always reliable. | |
1da177e4 | 101 | */ |
f05de73b | 102 | static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, |
99d5851e | 103 | const char msg[]) |
1da177e4 | 104 | { |
41a46913 | 105 | pr_emerg("%s: text:%px len:%d put:%d head:%px data:%px tail:%#lx end:%#lx dev:%s\n", |
99d5851e | 106 | msg, addr, skb->len, sz, skb->head, skb->data, |
e005d193 JP |
107 | (unsigned long)skb->tail, (unsigned long)skb->end, |
108 | skb->dev ? skb->dev->name : "<NULL>"); | |
1da177e4 LT |
109 | BUG(); |
110 | } | |
111 | ||
f05de73b | 112 | static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) |
1da177e4 | 113 | { |
f05de73b | 114 | skb_panic(skb, sz, addr, __func__); |
1da177e4 LT |
115 | } |
116 | ||
f05de73b JS |
117 | static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) |
118 | { | |
119 | skb_panic(skb, sz, addr, __func__); | |
120 | } | |
c93bdd0e | 121 | |
ba0509b6 | 122 | /* Caller must provide SKB that is memset cleared */ |
483126b3 AL |
123 | static void __build_skb_around(struct sk_buff *skb, void *data, |
124 | unsigned int frag_size) | |
ba0509b6 JDB |
125 | { |
126 | struct skb_shared_info *shinfo; | |
127 | unsigned int size = frag_size ? : ksize(data); | |
128 | ||
129 | size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
130 | ||
131 | /* Assumes caller memset cleared SKB */ | |
132 | skb->truesize = SKB_TRUESIZE(size); | |
133 | refcount_set(&skb->users, 1); | |
134 | skb->head = data; | |
135 | skb->data = data; | |
136 | skb_reset_tail_pointer(skb); | |
137 | skb->end = skb->tail + size; | |
138 | skb->mac_header = (typeof(skb->mac_header))~0U; | |
139 | skb->transport_header = (typeof(skb->transport_header))~0U; | |
140 | ||
141 | /* make sure we initialize shinfo sequentially */ | |
142 | shinfo = skb_shinfo(skb); | |
143 | memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); | |
144 | atomic_set(&shinfo->dataref, 1); | |
145 | ||
6370cc3b | 146 | skb_set_kcov_handle(skb, kcov_common_handle()); |
ba0509b6 JDB |
147 | } |
148 | ||
b2b5ce9d | 149 | /** |
2ea2f62c | 150 | * __build_skb - build a network buffer |
b2b5ce9d | 151 | * @data: data buffer provided by caller |
2ea2f62c | 152 | * @frag_size: size of data, or 0 if head was kmalloced |
b2b5ce9d ED |
153 | * |
154 | * Allocate a new &sk_buff. Caller provides space holding head and | |
deceb4c0 | 155 | * skb_shared_info. @data must have been allocated by kmalloc() only if |
2ea2f62c ED |
156 | * @frag_size is 0, otherwise data should come from the page allocator |
157 | * or vmalloc() | |
b2b5ce9d ED |
158 | * The return is the new skb buffer. |
159 | * On a failure the return is %NULL, and @data is not freed. | |
160 | * Notes : | |
161 | * Before IO, driver allocates only data buffer where NIC put incoming frame | |
162 | * Driver should add room at head (NET_SKB_PAD) and | |
163 | * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) | |
164 | * After IO, driver calls build_skb(), to allocate sk_buff and populate it | |
165 | * before giving packet to stack. | |
166 | * RX rings only contains data buffers, not full skbs. | |
167 | */ | |
2ea2f62c | 168 | struct sk_buff *__build_skb(void *data, unsigned int frag_size) |
b2b5ce9d | 169 | { |
b2b5ce9d | 170 | struct sk_buff *skb; |
b2b5ce9d ED |
171 | |
172 | skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); | |
ba0509b6 | 173 | if (unlikely(!skb)) |
b2b5ce9d ED |
174 | return NULL; |
175 | ||
b2b5ce9d | 176 | memset(skb, 0, offsetof(struct sk_buff, tail)); |
483126b3 | 177 | __build_skb_around(skb, data, frag_size); |
b2b5ce9d | 178 | |
483126b3 | 179 | return skb; |
b2b5ce9d | 180 | } |
2ea2f62c ED |
181 | |
182 | /* build_skb() is wrapper over __build_skb(), that specifically | |
183 | * takes care of skb->head and skb->pfmemalloc | |
184 | * This means that if @frag_size is not zero, then @data must be backed | |
185 | * by a page fragment, not kmalloc() or vmalloc() | |
186 | */ | |
187 | struct sk_buff *build_skb(void *data, unsigned int frag_size) | |
188 | { | |
189 | struct sk_buff *skb = __build_skb(data, frag_size); | |
190 | ||
191 | if (skb && frag_size) { | |
192 | skb->head_frag = 1; | |
2f064f34 | 193 | if (page_is_pfmemalloc(virt_to_head_page(data))) |
2ea2f62c ED |
194 | skb->pfmemalloc = 1; |
195 | } | |
196 | return skb; | |
197 | } | |
b2b5ce9d ED |
198 | EXPORT_SYMBOL(build_skb); |
199 | ||
ba0509b6 JDB |
200 | /** |
201 | * build_skb_around - build a network buffer around provided skb | |
202 | * @skb: sk_buff provide by caller, must be memset cleared | |
203 | * @data: data buffer provided by caller | |
204 | * @frag_size: size of data, or 0 if head was kmalloced | |
205 | */ | |
206 | struct sk_buff *build_skb_around(struct sk_buff *skb, | |
207 | void *data, unsigned int frag_size) | |
208 | { | |
209 | if (unlikely(!skb)) | |
210 | return NULL; | |
211 | ||
483126b3 | 212 | __build_skb_around(skb, data, frag_size); |
ba0509b6 | 213 | |
483126b3 | 214 | if (frag_size) { |
ba0509b6 JDB |
215 | skb->head_frag = 1; |
216 | if (page_is_pfmemalloc(virt_to_head_page(data))) | |
217 | skb->pfmemalloc = 1; | |
218 | } | |
219 | return skb; | |
220 | } | |
221 | EXPORT_SYMBOL(build_skb_around); | |
222 | ||
795bb1c0 JDB |
223 | #define NAPI_SKB_CACHE_SIZE 64 |
224 | ||
225 | struct napi_alloc_cache { | |
226 | struct page_frag_cache page; | |
e0d7924a | 227 | unsigned int skb_count; |
795bb1c0 JDB |
228 | void *skb_cache[NAPI_SKB_CACHE_SIZE]; |
229 | }; | |
230 | ||
b63ae8ca | 231 | static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); |
795bb1c0 | 232 | static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); |
ffde7328 | 233 | |
3f6e687d KH |
234 | static void *__alloc_frag_align(unsigned int fragsz, gfp_t gfp_mask, |
235 | unsigned int align_mask) | |
ffde7328 | 236 | { |
7ba7aeab | 237 | struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); |
ffde7328 | 238 | |
3f6e687d | 239 | return page_frag_alloc_align(&nc->page, fragsz, gfp_mask, align_mask); |
7ba7aeab SAS |
240 | } |
241 | ||
3f6e687d | 242 | void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) |
7ba7aeab SAS |
243 | { |
244 | fragsz = SKB_DATA_ALIGN(fragsz); | |
245 | ||
3f6e687d | 246 | return __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask); |
6f532612 | 247 | } |
3f6e687d | 248 | EXPORT_SYMBOL(__napi_alloc_frag_align); |
c93bdd0e | 249 | |
3f6e687d | 250 | void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) |
c93bdd0e | 251 | { |
7ba7aeab SAS |
252 | struct page_frag_cache *nc; |
253 | void *data; | |
ffde7328 | 254 | |
3bed3cc4 | 255 | fragsz = SKB_DATA_ALIGN(fragsz); |
7ba7aeab SAS |
256 | if (in_irq() || irqs_disabled()) { |
257 | nc = this_cpu_ptr(&netdev_alloc_cache); | |
3f6e687d | 258 | data = page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask); |
7ba7aeab SAS |
259 | } else { |
260 | local_bh_disable(); | |
3f6e687d | 261 | data = __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask); |
7ba7aeab SAS |
262 | local_bh_enable(); |
263 | } | |
264 | return data; | |
ffde7328 | 265 | } |
3f6e687d | 266 | EXPORT_SYMBOL(__netdev_alloc_frag_align); |
ffde7328 | 267 | |
5381b23d AL |
268 | /* |
269 | * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells | |
270 | * the caller if emergency pfmemalloc reserves are being used. If it is and | |
271 | * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves | |
272 | * may be used. Otherwise, the packet data may be discarded until enough | |
273 | * memory is free | |
274 | */ | |
ef28095f AL |
275 | static void *kmalloc_reserve(size_t size, gfp_t flags, int node, |
276 | bool *pfmemalloc) | |
5381b23d AL |
277 | { |
278 | void *obj; | |
279 | bool ret_pfmemalloc = false; | |
280 | ||
281 | /* | |
282 | * Try a regular allocation, when that fails and we're not entitled | |
283 | * to the reserves, fail. | |
284 | */ | |
285 | obj = kmalloc_node_track_caller(size, | |
286 | flags | __GFP_NOMEMALLOC | __GFP_NOWARN, | |
287 | node); | |
288 | if (obj || !(gfp_pfmemalloc_allowed(flags))) | |
289 | goto out; | |
290 | ||
291 | /* Try again but now we are using pfmemalloc reserves */ | |
292 | ret_pfmemalloc = true; | |
293 | obj = kmalloc_node_track_caller(size, flags, node); | |
294 | ||
295 | out: | |
296 | if (pfmemalloc) | |
297 | *pfmemalloc = ret_pfmemalloc; | |
298 | ||
299 | return obj; | |
300 | } | |
301 | ||
302 | /* Allocate a new skbuff. We do this ourselves so we can fill in a few | |
303 | * 'private' fields and also do memory statistics to find all the | |
304 | * [BEEP] leaks. | |
305 | * | |
306 | */ | |
307 | ||
308 | /** | |
309 | * __alloc_skb - allocate a network buffer | |
310 | * @size: size to allocate | |
311 | * @gfp_mask: allocation mask | |
312 | * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache | |
313 | * instead of head cache and allocate a cloned (child) skb. | |
314 | * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for | |
315 | * allocations in case the data is required for writeback | |
316 | * @node: numa node to allocate memory on | |
317 | * | |
318 | * Allocate a new &sk_buff. The returned buffer has no headroom and a | |
319 | * tail room of at least size bytes. The object has a reference count | |
320 | * of one. The return is the buffer. On a failure the return is %NULL. | |
321 | * | |
322 | * Buffers may only be allocated from interrupts using a @gfp_mask of | |
323 | * %GFP_ATOMIC. | |
324 | */ | |
325 | struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, | |
326 | int flags, int node) | |
327 | { | |
328 | struct kmem_cache *cache; | |
5381b23d AL |
329 | struct sk_buff *skb; |
330 | u8 *data; | |
331 | bool pfmemalloc; | |
332 | ||
333 | cache = (flags & SKB_ALLOC_FCLONE) | |
334 | ? skbuff_fclone_cache : skbuff_head_cache; | |
335 | ||
336 | if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) | |
337 | gfp_mask |= __GFP_MEMALLOC; | |
338 | ||
339 | /* Get the HEAD */ | |
340 | skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); | |
df1ae022 AL |
341 | if (unlikely(!skb)) |
342 | return NULL; | |
5381b23d AL |
343 | prefetchw(skb); |
344 | ||
345 | /* We do our best to align skb_shared_info on a separate cache | |
346 | * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives | |
347 | * aligned memory blocks, unless SLUB/SLAB debug is enabled. | |
348 | * Both skb->head and skb_shared_info are cache line aligned. | |
349 | */ | |
350 | size = SKB_DATA_ALIGN(size); | |
351 | size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
352 | data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc); | |
df1ae022 | 353 | if (unlikely(!data)) |
5381b23d AL |
354 | goto nodata; |
355 | /* kmalloc(size) might give us more room than requested. | |
356 | * Put skb_shared_info exactly at the end of allocated zone, | |
357 | * to allow max possible filling before reallocation. | |
358 | */ | |
359 | size = SKB_WITH_OVERHEAD(ksize(data)); | |
360 | prefetchw(data + size); | |
361 | ||
362 | /* | |
363 | * Only clear those fields we need to clear, not those that we will | |
364 | * actually initialise below. Hence, don't put any more fields after | |
365 | * the tail pointer in struct sk_buff! | |
366 | */ | |
367 | memset(skb, 0, offsetof(struct sk_buff, tail)); | |
f9d6725b | 368 | __build_skb_around(skb, data, 0); |
5381b23d | 369 | skb->pfmemalloc = pfmemalloc; |
5381b23d AL |
370 | |
371 | if (flags & SKB_ALLOC_FCLONE) { | |
372 | struct sk_buff_fclones *fclones; | |
373 | ||
374 | fclones = container_of(skb, struct sk_buff_fclones, skb1); | |
375 | ||
376 | skb->fclone = SKB_FCLONE_ORIG; | |
377 | refcount_set(&fclones->fclone_ref, 1); | |
378 | ||
379 | fclones->skb2.fclone = SKB_FCLONE_CLONE; | |
380 | } | |
381 | ||
5381b23d | 382 | return skb; |
df1ae022 | 383 | |
5381b23d AL |
384 | nodata: |
385 | kmem_cache_free(cache, skb); | |
df1ae022 | 386 | return NULL; |
5381b23d AL |
387 | } |
388 | EXPORT_SYMBOL(__alloc_skb); | |
389 | ||
fd11a83d AD |
390 | /** |
391 | * __netdev_alloc_skb - allocate an skbuff for rx on a specific device | |
392 | * @dev: network device to receive on | |
d7499160 | 393 | * @len: length to allocate |
fd11a83d AD |
394 | * @gfp_mask: get_free_pages mask, passed to alloc_skb |
395 | * | |
396 | * Allocate a new &sk_buff and assign it a usage count of one. The | |
397 | * buffer has NET_SKB_PAD headroom built in. Users should allocate | |
398 | * the headroom they think they need without accounting for the | |
399 | * built in space. The built in space is used for optimisations. | |
400 | * | |
401 | * %NULL is returned if there is no free memory. | |
402 | */ | |
9451980a AD |
403 | struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, |
404 | gfp_t gfp_mask) | |
fd11a83d | 405 | { |
b63ae8ca | 406 | struct page_frag_cache *nc; |
fd11a83d | 407 | struct sk_buff *skb; |
9451980a AD |
408 | bool pfmemalloc; |
409 | void *data; | |
410 | ||
411 | len += NET_SKB_PAD; | |
fd11a83d | 412 | |
66c55602 AL |
413 | /* If requested length is either too small or too big, |
414 | * we use kmalloc() for skb->head allocation. | |
415 | */ | |
416 | if (len <= SKB_WITH_OVERHEAD(1024) || | |
417 | len > SKB_WITH_OVERHEAD(PAGE_SIZE) || | |
d0164adc | 418 | (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { |
a080e7bd AD |
419 | skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); |
420 | if (!skb) | |
421 | goto skb_fail; | |
422 | goto skb_success; | |
423 | } | |
fd11a83d | 424 | |
9451980a AD |
425 | len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
426 | len = SKB_DATA_ALIGN(len); | |
427 | ||
428 | if (sk_memalloc_socks()) | |
429 | gfp_mask |= __GFP_MEMALLOC; | |
430 | ||
92dcabd7 SAS |
431 | if (in_irq() || irqs_disabled()) { |
432 | nc = this_cpu_ptr(&netdev_alloc_cache); | |
433 | data = page_frag_alloc(nc, len, gfp_mask); | |
434 | pfmemalloc = nc->pfmemalloc; | |
435 | } else { | |
436 | local_bh_disable(); | |
437 | nc = this_cpu_ptr(&napi_alloc_cache.page); | |
438 | data = page_frag_alloc(nc, len, gfp_mask); | |
439 | pfmemalloc = nc->pfmemalloc; | |
440 | local_bh_enable(); | |
441 | } | |
9451980a AD |
442 | |
443 | if (unlikely(!data)) | |
444 | return NULL; | |
445 | ||
446 | skb = __build_skb(data, len); | |
447 | if (unlikely(!skb)) { | |
181edb2b | 448 | skb_free_frag(data); |
9451980a | 449 | return NULL; |
7b2e497a | 450 | } |
fd11a83d | 451 | |
9451980a AD |
452 | if (pfmemalloc) |
453 | skb->pfmemalloc = 1; | |
454 | skb->head_frag = 1; | |
455 | ||
a080e7bd | 456 | skb_success: |
9451980a AD |
457 | skb_reserve(skb, NET_SKB_PAD); |
458 | skb->dev = dev; | |
459 | ||
a080e7bd | 460 | skb_fail: |
8af27456 CH |
461 | return skb; |
462 | } | |
b4ac530f | 463 | EXPORT_SYMBOL(__netdev_alloc_skb); |
1da177e4 | 464 | |
fd11a83d AD |
465 | /** |
466 | * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance | |
467 | * @napi: napi instance this buffer was allocated for | |
d7499160 | 468 | * @len: length to allocate |
fd11a83d AD |
469 | * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages |
470 | * | |
471 | * Allocate a new sk_buff for use in NAPI receive. This buffer will | |
472 | * attempt to allocate the head from a special reserved region used | |
473 | * only for NAPI Rx allocation. By doing this we can save several | |
474 | * CPU cycles by avoiding having to disable and re-enable IRQs. | |
475 | * | |
476 | * %NULL is returned if there is no free memory. | |
477 | */ | |
9451980a AD |
478 | struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, |
479 | gfp_t gfp_mask) | |
fd11a83d | 480 | { |
3226b158 | 481 | struct napi_alloc_cache *nc; |
fd11a83d | 482 | struct sk_buff *skb; |
9451980a AD |
483 | void *data; |
484 | ||
485 | len += NET_SKB_PAD + NET_IP_ALIGN; | |
fd11a83d | 486 | |
3226b158 ED |
487 | /* If requested length is either too small or too big, |
488 | * we use kmalloc() for skb->head allocation. | |
489 | */ | |
490 | if (len <= SKB_WITH_OVERHEAD(1024) || | |
491 | len > SKB_WITH_OVERHEAD(PAGE_SIZE) || | |
d0164adc | 492 | (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { |
a080e7bd AD |
493 | skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); |
494 | if (!skb) | |
495 | goto skb_fail; | |
496 | goto skb_success; | |
497 | } | |
9451980a | 498 | |
3226b158 | 499 | nc = this_cpu_ptr(&napi_alloc_cache); |
9451980a AD |
500 | len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
501 | len = SKB_DATA_ALIGN(len); | |
502 | ||
503 | if (sk_memalloc_socks()) | |
504 | gfp_mask |= __GFP_MEMALLOC; | |
fd11a83d | 505 | |
8c2dd3e4 | 506 | data = page_frag_alloc(&nc->page, len, gfp_mask); |
9451980a AD |
507 | if (unlikely(!data)) |
508 | return NULL; | |
509 | ||
510 | skb = __build_skb(data, len); | |
511 | if (unlikely(!skb)) { | |
181edb2b | 512 | skb_free_frag(data); |
9451980a | 513 | return NULL; |
fd11a83d AD |
514 | } |
515 | ||
795bb1c0 | 516 | if (nc->page.pfmemalloc) |
9451980a AD |
517 | skb->pfmemalloc = 1; |
518 | skb->head_frag = 1; | |
519 | ||
a080e7bd | 520 | skb_success: |
9451980a AD |
521 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); |
522 | skb->dev = napi->dev; | |
523 | ||
a080e7bd | 524 | skb_fail: |
fd11a83d AD |
525 | return skb; |
526 | } | |
527 | EXPORT_SYMBOL(__napi_alloc_skb); | |
528 | ||
654bed16 | 529 | void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, |
50269e19 | 530 | int size, unsigned int truesize) |
654bed16 PZ |
531 | { |
532 | skb_fill_page_desc(skb, i, page, off, size); | |
533 | skb->len += size; | |
534 | skb->data_len += size; | |
50269e19 | 535 | skb->truesize += truesize; |
654bed16 PZ |
536 | } |
537 | EXPORT_SYMBOL(skb_add_rx_frag); | |
538 | ||
f8e617e1 JW |
539 | void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, |
540 | unsigned int truesize) | |
541 | { | |
542 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
543 | ||
544 | skb_frag_size_add(frag, size); | |
545 | skb->len += size; | |
546 | skb->data_len += size; | |
547 | skb->truesize += truesize; | |
548 | } | |
549 | EXPORT_SYMBOL(skb_coalesce_rx_frag); | |
550 | ||
27b437c8 | 551 | static void skb_drop_list(struct sk_buff **listp) |
1da177e4 | 552 | { |
bd8a7036 | 553 | kfree_skb_list(*listp); |
27b437c8 | 554 | *listp = NULL; |
1da177e4 LT |
555 | } |
556 | ||
27b437c8 HX |
557 | static inline void skb_drop_fraglist(struct sk_buff *skb) |
558 | { | |
559 | skb_drop_list(&skb_shinfo(skb)->frag_list); | |
560 | } | |
561 | ||
1da177e4 LT |
562 | static void skb_clone_fraglist(struct sk_buff *skb) |
563 | { | |
564 | struct sk_buff *list; | |
565 | ||
fbb398a8 | 566 | skb_walk_frags(skb, list) |
1da177e4 LT |
567 | skb_get(list); |
568 | } | |
569 | ||
d3836f21 ED |
570 | static void skb_free_head(struct sk_buff *skb) |
571 | { | |
181edb2b AD |
572 | unsigned char *head = skb->head; |
573 | ||
d3836f21 | 574 | if (skb->head_frag) |
181edb2b | 575 | skb_free_frag(head); |
d3836f21 | 576 | else |
181edb2b | 577 | kfree(head); |
d3836f21 ED |
578 | } |
579 | ||
5bba1712 | 580 | static void skb_release_data(struct sk_buff *skb) |
1da177e4 | 581 | { |
ff04a771 ED |
582 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
583 | int i; | |
1da177e4 | 584 | |
ff04a771 ED |
585 | if (skb->cloned && |
586 | atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, | |
587 | &shinfo->dataref)) | |
588 | return; | |
a6686f2f | 589 | |
70c43167 JL |
590 | skb_zcopy_clear(skb, true); |
591 | ||
ff04a771 ED |
592 | for (i = 0; i < shinfo->nr_frags; i++) |
593 | __skb_frag_unref(&shinfo->frags[i]); | |
a6686f2f | 594 | |
ff04a771 ED |
595 | if (shinfo->frag_list) |
596 | kfree_skb_list(shinfo->frag_list); | |
597 | ||
598 | skb_free_head(skb); | |
1da177e4 LT |
599 | } |
600 | ||
601 | /* | |
602 | * Free an skbuff by memory without cleaning the state. | |
603 | */ | |
2d4baff8 | 604 | static void kfree_skbmem(struct sk_buff *skb) |
1da177e4 | 605 | { |
d0bf4a9e | 606 | struct sk_buff_fclones *fclones; |
d179cd12 | 607 | |
d179cd12 DM |
608 | switch (skb->fclone) { |
609 | case SKB_FCLONE_UNAVAILABLE: | |
610 | kmem_cache_free(skbuff_head_cache, skb); | |
6ffe75eb | 611 | return; |
d179cd12 DM |
612 | |
613 | case SKB_FCLONE_ORIG: | |
d0bf4a9e | 614 | fclones = container_of(skb, struct sk_buff_fclones, skb1); |
d179cd12 | 615 | |
6ffe75eb ED |
616 | /* We usually free the clone (TX completion) before original skb |
617 | * This test would have no chance to be true for the clone, | |
618 | * while here, branch prediction will be good. | |
d179cd12 | 619 | */ |
2638595a | 620 | if (refcount_read(&fclones->fclone_ref) == 1) |
6ffe75eb ED |
621 | goto fastpath; |
622 | break; | |
e7820e39 | 623 | |
6ffe75eb ED |
624 | default: /* SKB_FCLONE_CLONE */ |
625 | fclones = container_of(skb, struct sk_buff_fclones, skb2); | |
d179cd12 | 626 | break; |
3ff50b79 | 627 | } |
2638595a | 628 | if (!refcount_dec_and_test(&fclones->fclone_ref)) |
6ffe75eb ED |
629 | return; |
630 | fastpath: | |
631 | kmem_cache_free(skbuff_fclone_cache, fclones); | |
1da177e4 LT |
632 | } |
633 | ||
0a463c78 | 634 | void skb_release_head_state(struct sk_buff *skb) |
1da177e4 | 635 | { |
adf30907 | 636 | skb_dst_drop(skb); |
9c2b3328 SH |
637 | if (skb->destructor) { |
638 | WARN_ON(in_irq()); | |
1da177e4 LT |
639 | skb->destructor(skb); |
640 | } | |
a3bf7ae9 | 641 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) |
cb9c6836 | 642 | nf_conntrack_put(skb_nfct(skb)); |
1da177e4 | 643 | #endif |
df5042f4 | 644 | skb_ext_put(skb); |
04a4bb55 LB |
645 | } |
646 | ||
647 | /* Free everything but the sk_buff shell. */ | |
648 | static void skb_release_all(struct sk_buff *skb) | |
649 | { | |
650 | skb_release_head_state(skb); | |
a28b1b90 FW |
651 | if (likely(skb->head)) |
652 | skb_release_data(skb); | |
2d4baff8 HX |
653 | } |
654 | ||
655 | /** | |
656 | * __kfree_skb - private function | |
657 | * @skb: buffer | |
658 | * | |
659 | * Free an sk_buff. Release anything attached to the buffer. | |
660 | * Clean the state. This is an internal helper function. Users should | |
661 | * always call kfree_skb | |
662 | */ | |
1da177e4 | 663 | |
2d4baff8 HX |
664 | void __kfree_skb(struct sk_buff *skb) |
665 | { | |
666 | skb_release_all(skb); | |
1da177e4 LT |
667 | kfree_skbmem(skb); |
668 | } | |
b4ac530f | 669 | EXPORT_SYMBOL(__kfree_skb); |
1da177e4 | 670 | |