Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
1da177e4 LT |
2 | /* |
3 | * Routines having to do with the 'struct sk_buff' memory handlers. | |
4 | * | |
113aa838 | 5 | * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> |
1da177e4 LT |
6 | * Florian La Roche <rzsfl@rz.uni-sb.de> |
7 | * | |
1da177e4 LT |
8 | * Fixes: |
9 | * Alan Cox : Fixed the worst of the load | |
10 | * balancer bugs. | |
11 | * Dave Platt : Interrupt stacking fix. | |
12 | * Richard Kooijman : Timestamp fixes. | |
13 | * Alan Cox : Changed buffer format. | |
14 | * Alan Cox : destructor hook for AF_UNIX etc. | |
15 | * Linus Torvalds : Better skb_clone. | |
16 | * Alan Cox : Added skb_copy. | |
17 | * Alan Cox : Added all the changed routines Linus | |
18 | * only put in the headers | |
19 | * Ray VanTassle : Fixed --skb->lock in free | |
20 | * Alan Cox : skb_copy copy arp field | |
21 | * Andi Kleen : slabified it. | |
22 | * Robert Olsson : Removed skb_head_pool | |
23 | * | |
24 | * NOTE: | |
25 | * The __skb_ routines should be called with interrupts | |
26 | * disabled, or you better be *real* sure that the operation is atomic | |
27 | * with respect to whatever list is being frobbed (e.g. via lock_sock() | |
28 | * or via disabling bottom half handlers, etc). | |
1da177e4 LT |
29 | */ |
30 | ||
31 | /* | |
32 | * The functions in this file will not compile correctly with gcc 2.4.x | |
33 | */ | |
34 | ||
e005d193 JP |
35 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
36 | ||
1da177e4 LT |
37 | #include <linux/module.h> |
38 | #include <linux/types.h> | |
39 | #include <linux/kernel.h> | |
1da177e4 LT |
40 | #include <linux/mm.h> |
41 | #include <linux/interrupt.h> | |
42 | #include <linux/in.h> | |
43 | #include <linux/inet.h> | |
44 | #include <linux/slab.h> | |
de960aa9 FW |
45 | #include <linux/tcp.h> |
46 | #include <linux/udp.h> | |
90017acc | 47 | #include <linux/sctp.h> |
1da177e4 LT |
48 | #include <linux/netdevice.h> |
49 | #ifdef CONFIG_NET_CLS_ACT | |
50 | #include <net/pkt_sched.h> | |
51 | #endif | |
52 | #include <linux/string.h> | |
53 | #include <linux/skbuff.h> | |
9c55e01c | 54 | #include <linux/splice.h> |
1da177e4 LT |
55 | #include <linux/cache.h> |
56 | #include <linux/rtnetlink.h> | |
57 | #include <linux/init.h> | |
716ea3a7 | 58 | #include <linux/scatterlist.h> |
ac45f602 | 59 | #include <linux/errqueue.h> |
268bb0ce | 60 | #include <linux/prefetch.h> |
071c0fc6 | 61 | #include <linux/bitfield.h> |
0d5501c1 | 62 | #include <linux/if_vlan.h> |
2a2ea508 | 63 | #include <linux/mpls.h> |
183f47fc | 64 | #include <linux/kcov.h> |
6d0d4199 | 65 | #include <linux/iov_iter.h> |
1da177e4 LT |
66 | |
67 | #include <net/protocol.h> | |
68 | #include <net/dst.h> | |
69 | #include <net/sock.h> | |
70 | #include <net/checksum.h> | |
d457a0e3 | 71 | #include <net/gso.h> |
aa70d2d1 | 72 | #include <net/hotdata.h> |
ed1f50c3 | 73 | #include <net/ip6_checksum.h> |
1da177e4 | 74 | #include <net/xfrm.h> |
8822e270 | 75 | #include <net/mpls.h> |
3ee17bc7 | 76 | #include <net/mptcp.h> |
78476d31 | 77 | #include <net/mctp.h> |
75eaf63e | 78 | #include <net/page_pool/helpers.h> |
071c0fc6 | 79 | #include <net/dropreason.h> |
1da177e4 | 80 | |
7c0f6ba6 | 81 | #include <linux/uaccess.h> |
ad8d75ff | 82 | #include <trace/events/skb.h> |
51c56b00 | 83 | #include <linux/highmem.h> |
b245be1f WB |
84 | #include <linux/capability.h> |
85 | #include <linux/user_namespace.h> | |
2544af03 | 86 | #include <linux/indirect_call_wrapper.h> |
2195e2a0 | 87 | #include <linux/textsearch.h> |
a1f8e7f7 | 88 | |
39564c3f | 89 | #include "dev.h" |
7f678def | 90 | #include "sock_destructor.h" |
7b7ed885 | 91 | |
df5042f4 FW |
92 | #ifdef CONFIG_SKB_EXTENSIONS |
93 | static struct kmem_cache *skbuff_ext_cache __ro_after_init; | |
94 | #endif | |
bf9f1baa | 95 | |
bf9f1baa ED |
96 | #define SKB_SMALL_HEAD_SIZE SKB_HEAD_ALIGN(MAX_TCP_HEADER) |
97 | ||
98 | /* We want SKB_SMALL_HEAD_CACHE_SIZE to not be a power of two. | |
99 | * This should ensure that SKB_SMALL_HEAD_HEADROOM is a unique | |
100 | * size, and we can differentiate heads from skb_small_head_cache | |
101 | * vs system slabs by looking at their size (skb_end_offset()). | |
102 | */ | |
103 | #define SKB_SMALL_HEAD_CACHE_SIZE \ | |
104 | (is_power_of_2(SKB_SMALL_HEAD_SIZE) ? \ | |
105 | (SKB_SMALL_HEAD_SIZE + L1_CACHE_BYTES) : \ | |
106 | SKB_SMALL_HEAD_SIZE) | |
107 | ||
108 | #define SKB_SMALL_HEAD_HEADROOM \ | |
109 | SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE) | |
bf9f1baa | 110 | |
5f74f82e HWR |
111 | int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS; |
112 | EXPORT_SYMBOL(sysctl_max_skb_frags); | |
1da177e4 | 113 | |
21d2e673 MA |
114 | /* kcm_write_msgs() relies on casting paged frags to bio_vec to use |
115 | * iov_iter_bvec(). These static asserts ensure the cast is valid is long as the | |
116 | * netmem is a page. | |
117 | */ | |
118 | static_assert(offsetof(struct bio_vec, bv_page) == | |
119 | offsetof(skb_frag_t, netmem)); | |
120 | static_assert(sizeof_field(struct bio_vec, bv_page) == | |
121 | sizeof_field(skb_frag_t, netmem)); | |
122 | ||
123 | static_assert(offsetof(struct bio_vec, bv_len) == offsetof(skb_frag_t, len)); | |
124 | static_assert(sizeof_field(struct bio_vec, bv_len) == | |
125 | sizeof_field(skb_frag_t, len)); | |
126 | ||
127 | static_assert(offsetof(struct bio_vec, bv_offset) == | |
128 | offsetof(skb_frag_t, offset)); | |
129 | static_assert(sizeof_field(struct bio_vec, bv_offset) == | |
130 | sizeof_field(skb_frag_t, offset)); | |
131 | ||
9cb252c4 MD |
132 | #undef FN |
133 | #define FN(reason) [SKB_DROP_REASON_##reason] = #reason, | |
071c0fc6 | 134 | static const char * const drop_reasons[] = { |
0e84afe8 | 135 | [SKB_CONSUMED] = "CONSUMED", |
9cb252c4 MD |
136 | DEFINE_DROP_REASON(FN, FN) |
137 | }; | |
071c0fc6 JB |
138 | |
139 | static const struct drop_reason_list drop_reasons_core = { | |
140 | .reasons = drop_reasons, | |
141 | .n_reasons = ARRAY_SIZE(drop_reasons), | |
142 | }; | |
143 | ||
144 | const struct drop_reason_list __rcu * | |
145 | drop_reasons_by_subsys[SKB_DROP_REASON_SUBSYS_NUM] = { | |
146 | [SKB_DROP_REASON_SUBSYS_CORE] = RCU_INITIALIZER(&drop_reasons_core), | |
147 | }; | |
148 | EXPORT_SYMBOL(drop_reasons_by_subsys); | |
149 | ||
150 | /** | |
151 | * drop_reasons_register_subsys - register another drop reason subsystem | |
152 | * @subsys: the subsystem to register, must not be the core | |
153 | * @list: the list of drop reasons within the subsystem, must point to | |
154 | * a statically initialized list | |
155 | */ | |
156 | void drop_reasons_register_subsys(enum skb_drop_reason_subsys subsys, | |
157 | const struct drop_reason_list *list) | |
158 | { | |
159 | if (WARN(subsys <= SKB_DROP_REASON_SUBSYS_CORE || | |
160 | subsys >= ARRAY_SIZE(drop_reasons_by_subsys), | |
161 | "invalid subsystem %d\n", subsys)) | |
162 | return; | |
163 | ||
164 | /* must point to statically allocated memory, so INIT is OK */ | |
165 | RCU_INIT_POINTER(drop_reasons_by_subsys[subsys], list); | |
166 | } | |
167 | EXPORT_SYMBOL_GPL(drop_reasons_register_subsys); | |
168 | ||
169 | /** | |
170 | * drop_reasons_unregister_subsys - unregister a drop reason subsystem | |
171 | * @subsys: the subsystem to remove, must not be the core | |
172 | * | |
173 | * Note: This will synchronize_rcu() to ensure no users when it returns. | |
174 | */ | |
175 | void drop_reasons_unregister_subsys(enum skb_drop_reason_subsys subsys) | |
176 | { | |
177 | if (WARN(subsys <= SKB_DROP_REASON_SUBSYS_CORE || | |
178 | subsys >= ARRAY_SIZE(drop_reasons_by_subsys), | |
179 | "invalid subsystem %d\n", subsys)) | |
180 | return; | |
181 | ||
182 | RCU_INIT_POINTER(drop_reasons_by_subsys[subsys], NULL); | |
183 | ||
184 | synchronize_rcu(); | |
185 | } | |
186 | EXPORT_SYMBOL_GPL(drop_reasons_unregister_subsys); | |
ec43908d | 187 | |
1da177e4 | 188 | /** |
f05de73b JS |
189 | * skb_panic - private function for out-of-line support |
190 | * @skb: buffer | |
191 | * @sz: size | |
192 | * @addr: address | |
99d5851e | 193 | * @msg: skb_over_panic or skb_under_panic |
1da177e4 | 194 | * |
f05de73b JS |
195 | * Out-of-line support for skb_put() and skb_push(). |
196 | * Called via the wrapper skb_over_panic() or skb_under_panic(). | |
197 | * Keep out of line to prevent kernel bloat. | |
198 | * __builtin_return_address is not used because it is not always reliable. | |
1da177e4 | 199 | */ |
f05de73b | 200 | static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, |
99d5851e | 201 | const char msg[]) |
1da177e4 | 202 | { |
41a46913 | 203 | pr_emerg("%s: text:%px len:%d put:%d head:%px data:%px tail:%#lx end:%#lx dev:%s\n", |
99d5851e | 204 | msg, addr, skb->len, sz, skb->head, skb->data, |
e005d193 JP |
205 | (unsigned long)skb->tail, (unsigned long)skb->end, |
206 | skb->dev ? skb->dev->name : "<NULL>"); | |
1da177e4 LT |
207 | BUG(); |
208 | } | |
209 | ||
f05de73b | 210 | static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) |
1da177e4 | 211 | { |
f05de73b | 212 | skb_panic(skb, sz, addr, __func__); |
1da177e4 LT |
213 | } |
214 | ||
f05de73b JS |
215 | static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) |
216 | { | |
217 | skb_panic(skb, sz, addr, __func__); | |
218 | } | |
c93bdd0e | 219 | |
50fad4b5 | 220 | #define NAPI_SKB_CACHE_SIZE 64 |
f450d539 AL |
221 | #define NAPI_SKB_CACHE_BULK 16 |
222 | #define NAPI_SKB_CACHE_HALF (NAPI_SKB_CACHE_SIZE / 2) | |
50fad4b5 | 223 | |
dbae2b06 PA |
224 | #if PAGE_SIZE == SZ_4K |
225 | ||
226 | #define NAPI_HAS_SMALL_PAGE_FRAG 1 | |
227 | #define NAPI_SMALL_PAGE_PFMEMALLOC(nc) ((nc).pfmemalloc) | |
228 | ||
229 | /* specialized page frag allocator using a single order 0 page | |
230 | * and slicing it into 1K sized fragment. Constrained to systems | |
231 | * with a very limited amount of 1K fragments fitting a single | |
232 | * page - to avoid excessive truesize underestimation | |
233 | */ | |
234 | ||
235 | struct page_frag_1k { | |
236 | void *va; | |
237 | u16 offset; | |
238 | bool pfmemalloc; | |
239 | }; | |
240 | ||
241 | static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp) | |
242 | { | |
243 | struct page *page; | |
244 | int offset; | |
245 | ||
246 | offset = nc->offset - SZ_1K; | |
247 | if (likely(offset >= 0)) | |
248 | goto use_frag; | |
249 | ||
250 | page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); | |
251 | if (!page) | |
252 | return NULL; | |
253 | ||
254 | nc->va = page_address(page); | |
255 | nc->pfmemalloc = page_is_pfmemalloc(page); | |
256 | offset = PAGE_SIZE - SZ_1K; | |
257 | page_ref_add(page, offset / SZ_1K); | |
258 | ||
259 | use_frag: | |
260 | nc->offset = offset; | |
261 | return nc->va + offset; | |
262 | } | |
263 | #else | |
264 | ||
265 | /* the small page is actually unused in this build; add dummy helpers | |
266 | * to please the compiler and avoid later preprocessor's conditionals | |
267 | */ | |
268 | #define NAPI_HAS_SMALL_PAGE_FRAG 0 | |
269 | #define NAPI_SMALL_PAGE_PFMEMALLOC(nc) false | |
270 | ||
271 | struct page_frag_1k { | |
272 | }; | |
273 | ||
274 | static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp_mask) | |
275 | { | |
276 | return NULL; | |
277 | } | |
278 | ||
279 | #endif | |
280 | ||
50fad4b5 AL |
281 | struct napi_alloc_cache { |
282 | struct page_frag_cache page; | |
dbae2b06 | 283 | struct page_frag_1k page_small; |
50fad4b5 AL |
284 | unsigned int skb_count; |
285 | void *skb_cache[NAPI_SKB_CACHE_SIZE]; | |
286 | }; | |
287 | ||
288 | static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); | |
289 | static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); | |
290 | ||
dbae2b06 PA |
291 | /* Double check that napi_get_frags() allocates skbs with |
292 | * skb->head being backed by slab, not a page fragment. | |
293 | * This is to make sure bug fixed in 3226b158e67c | |
294 | * ("net: avoid 32 x truesize under-estimation for tiny skbs") | |
295 | * does not accidentally come back. | |
296 | */ | |
297 | void napi_get_frags_check(struct napi_struct *napi) | |
298 | { | |
299 | struct sk_buff *skb; | |
300 | ||
301 | local_bh_disable(); | |
302 | skb = napi_get_frags(napi); | |
303 | WARN_ON_ONCE(!NAPI_HAS_SMALL_PAGE_FRAG && skb && skb->head_frag); | |
304 | napi_free_frags(napi); | |
305 | local_bh_enable(); | |
306 | } | |
307 | ||
32e3573f | 308 | void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) |
50fad4b5 AL |
309 | { |
310 | struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); | |
311 | ||
50fad4b5 AL |
312 | fragsz = SKB_DATA_ALIGN(fragsz); |
313 | ||
411c5f36 YL |
314 | return __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, |
315 | align_mask); | |
50fad4b5 AL |
316 | } |
317 | EXPORT_SYMBOL(__napi_alloc_frag_align); | |
318 | ||
319 | void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) | |
320 | { | |
50fad4b5 AL |
321 | void *data; |
322 | ||
323 | fragsz = SKB_DATA_ALIGN(fragsz); | |
afa79d08 | 324 | if (in_hardirq() || irqs_disabled()) { |
32e3573f YD |
325 | struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache); |
326 | ||
411c5f36 YL |
327 | data = __page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, |
328 | align_mask); | |
50fad4b5 | 329 | } else { |
32e3573f YD |
330 | struct napi_alloc_cache *nc; |
331 | ||
50fad4b5 | 332 | local_bh_disable(); |
32e3573f | 333 | nc = this_cpu_ptr(&napi_alloc_cache); |
411c5f36 YL |
334 | data = __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, |
335 | align_mask); | |
50fad4b5 AL |
336 | local_bh_enable(); |
337 | } | |
338 | return data; | |
339 | } | |
340 | EXPORT_SYMBOL(__netdev_alloc_frag_align); | |
341 | ||
f450d539 AL |
342 | static struct sk_buff *napi_skb_cache_get(void) |
343 | { | |
344 | struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); | |
345 | struct sk_buff *skb; | |
346 | ||
49ae83fc | 347 | if (unlikely(!nc->skb_count)) { |
aa70d2d1 | 348 | nc->skb_count = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, |
f450d539 AL |
349 | GFP_ATOMIC, |
350 | NAPI_SKB_CACHE_BULK, | |
351 | nc->skb_cache); | |
49ae83fc SPL |
352 | if (unlikely(!nc->skb_count)) |
353 | return NULL; | |
354 | } | |
f450d539 AL |
355 | |
356 | skb = nc->skb_cache[--nc->skb_count]; | |
aa70d2d1 | 357 | kasan_mempool_unpoison_object(skb, kmem_cache_size(net_hotdata.skbuff_cache)); |
f450d539 AL |
358 | |
359 | return skb; | |
360 | } | |
361 | ||
ce098da1 KC |
362 | static inline void __finalize_skb_around(struct sk_buff *skb, void *data, |
363 | unsigned int size) | |
ba0509b6 JDB |
364 | { |
365 | struct skb_shared_info *shinfo; | |
ba0509b6 JDB |
366 | |
367 | size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
368 | ||
369 | /* Assumes caller memset cleared SKB */ | |
370 | skb->truesize = SKB_TRUESIZE(size); | |
371 | refcount_set(&skb->users, 1); | |
372 | skb->head = data; | |
373 | skb->data = data; | |
374 | skb_reset_tail_pointer(skb); | |
763087da | 375 | skb_set_end_offset(skb, size); |
ba0509b6 JDB |
376 | skb->mac_header = (typeof(skb->mac_header))~0U; |
377 | skb->transport_header = (typeof(skb->transport_header))~0U; | |
68822bdf | 378 | skb->alloc_cpu = raw_smp_processor_id(); |
ba0509b6 JDB |
379 | /* make sure we initialize shinfo sequentially */ |
380 | shinfo = skb_shinfo(skb); | |
381 | memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); | |
382 | atomic_set(&shinfo->dataref, 1); | |
383 | ||
6370cc3b | 384 | skb_set_kcov_handle(skb, kcov_common_handle()); |
ba0509b6 JDB |
385 | } |
386 | ||
ce098da1 KC |
387 | static inline void *__slab_build_skb(struct sk_buff *skb, void *data, |
388 | unsigned int *size) | |
389 | { | |
390 | void *resized; | |
391 | ||
392 | /* Must find the allocation size (and grow it to match). */ | |
393 | *size = ksize(data); | |
394 | /* krealloc() will immediately return "data" when | |
395 | * "ksize(data)" is requested: it is the existing upper | |
396 | * bounds. As a result, GFP_ATOMIC will be ignored. Note | |
397 | * that this "new" pointer needs to be passed back to the | |
398 | * caller for use so the __alloc_size hinting will be | |
399 | * tracked correctly. | |
400 | */ | |
401 | resized = krealloc(data, *size, GFP_ATOMIC); | |
402 | WARN_ON_ONCE(resized != data); | |
403 | return resized; | |
404 | } | |
405 | ||
406 | /* build_skb() variant which can operate on slab buffers. | |
407 | * Note that this should be used sparingly as slab buffers | |
408 | * cannot be combined efficiently by GRO! | |
409 | */ | |
410 | struct sk_buff *slab_build_skb(void *data) | |
411 | { | |
412 | struct sk_buff *skb; | |
413 | unsigned int size; | |
414 | ||
aa70d2d1 | 415 | skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC); |
ce098da1 KC |
416 | if (unlikely(!skb)) |
417 | return NULL; | |
418 | ||
419 | memset(skb, 0, offsetof(struct sk_buff, tail)); | |
420 | data = __slab_build_skb(skb, data, &size); | |
421 | __finalize_skb_around(skb, data, size); | |
422 | ||
423 | return skb; | |
424 | } | |
425 | EXPORT_SYMBOL(slab_build_skb); | |
426 | ||
427 | /* Caller must provide SKB that is memset cleared */ | |
428 | static void __build_skb_around(struct sk_buff *skb, void *data, | |
429 | unsigned int frag_size) | |
430 | { | |
431 | unsigned int size = frag_size; | |
432 | ||
433 | /* frag_size == 0 is considered deprecated now. Callers | |
434 | * using slab buffer should use slab_build_skb() instead. | |
435 | */ | |
436 | if (WARN_ONCE(size == 0, "Use slab_build_skb() instead")) | |
437 | data = __slab_build_skb(skb, data, &size); | |
438 | ||
439 | __finalize_skb_around(skb, data, size); | |
440 | } | |
441 | ||
b2b5ce9d | 442 | /** |
2ea2f62c | 443 | * __build_skb - build a network buffer |
b2b5ce9d | 444 | * @data: data buffer provided by caller |
ce098da1 | 445 | * @frag_size: size of data (must not be 0) |
b2b5ce9d ED |
446 | * |
447 | * Allocate a new &sk_buff. Caller provides space holding head and | |
ce098da1 KC |
448 | * skb_shared_info. @data must have been allocated from the page |
449 | * allocator or vmalloc(). (A @frag_size of 0 to indicate a kmalloc() | |
450 | * allocation is deprecated, and callers should use slab_build_skb() | |
451 | * instead.) | |
b2b5ce9d ED |
452 | * The return is the new skb buffer. |
453 | * On a failure the return is %NULL, and @data is not freed. | |
454 | * Notes : | |
455 | * Before IO, driver allocates only data buffer where NIC put incoming frame | |
456 | * Driver should add room at head (NET_SKB_PAD) and | |
457 | * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info)) | |
458 | * After IO, driver calls build_skb(), to allocate sk_buff and populate it | |
459 | * before giving packet to stack. | |
460 | * RX rings only contains data buffers, not full skbs. | |
461 | */ | |
2ea2f62c | 462 | struct sk_buff *__build_skb(void *data, unsigned int frag_size) |
b2b5ce9d | 463 | { |
b2b5ce9d | 464 | struct sk_buff *skb; |
b2b5ce9d | 465 | |
aa70d2d1 | 466 | skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC); |
ba0509b6 | 467 | if (unlikely(!skb)) |
b2b5ce9d ED |
468 | return NULL; |
469 | ||
b2b5ce9d | 470 | memset(skb, 0, offsetof(struct sk_buff, tail)); |
483126b3 | 471 | __build_skb_around(skb, data, frag_size); |
b2b5ce9d | 472 | |
483126b3 | 473 | return skb; |
b2b5ce9d | 474 | } |
2ea2f62c ED |
475 | |
476 | /* build_skb() is wrapper over __build_skb(), that specifically | |
477 | * takes care of skb->head and skb->pfmemalloc | |
2ea2f62c ED |
478 | */ |
479 | struct sk_buff *build_skb(void *data, unsigned int frag_size) | |
480 | { | |
481 | struct sk_buff *skb = __build_skb(data, frag_size); | |
482 | ||
3c640126 | 483 | if (likely(skb && frag_size)) { |
2ea2f62c | 484 | skb->head_frag = 1; |
566b6701 | 485 | skb_propagate_pfmemalloc(virt_to_head_page(data), skb); |
2ea2f62c ED |
486 | } |
487 | return skb; | |
488 | } | |
b2b5ce9d ED |
489 | EXPORT_SYMBOL(build_skb); |
490 | ||
ba0509b6 JDB |
491 | /** |
492 | * build_skb_around - build a network buffer around provided skb | |
493 | * @skb: sk_buff provide by caller, must be memset cleared | |
494 | * @data: data buffer provided by caller | |
12c1604a | 495 | * @frag_size: size of data |
ba0509b6 JDB |
496 | */ |
497 | struct sk_buff *build_skb_around(struct sk_buff *skb, | |
498 | void *data, unsigned int frag_size) | |
499 | { | |
500 | if (unlikely(!skb)) | |
501 | return NULL; | |
502 | ||
483126b3 | 503 | __build_skb_around(skb, data, frag_size); |
ba0509b6 | 504 | |
483126b3 | 505 | if (frag_size) { |
ba0509b6 | 506 | skb->head_frag = 1; |
566b6701 | 507 | skb_propagate_pfmemalloc(virt_to_head_page(data), skb); |
ba0509b6 JDB |
508 | } |
509 | return skb; | |
510 | } | |
511 | EXPORT_SYMBOL(build_skb_around); | |
512 | ||
f450d539 AL |
513 | /** |
514 | * __napi_build_skb - build a network buffer | |
515 | * @data: data buffer provided by caller | |
12c1604a | 516 | * @frag_size: size of data |
f450d539 AL |
517 | * |
518 | * Version of __build_skb() that uses NAPI percpu caches to obtain | |
519 | * skbuff_head instead of inplace allocation. | |
520 | * | |
521 | * Returns a new &sk_buff on success, %NULL on allocation failure. | |
522 | */ | |
523 | static struct sk_buff *__napi_build_skb(void *data, unsigned int frag_size) | |
524 | { | |
525 | struct sk_buff *skb; | |
526 | ||
527 | skb = napi_skb_cache_get(); | |
528 | if (unlikely(!skb)) | |
529 | return NULL; | |
530 | ||
531 | memset(skb, 0, offsetof(struct sk_buff, tail)); | |
532 | __build_skb_around(skb, data, frag_size); | |
533 | ||
534 | return skb; | |
535 | } | |
536 | ||
537 | /** | |
538 | * napi_build_skb - build a network buffer | |
539 | * @data: data buffer provided by caller | |
12c1604a | 540 | * @frag_size: size of data |
f450d539 AL |
541 | * |
542 | * Version of __napi_build_skb() that takes care of skb->head_frag | |
543 | * and skb->pfmemalloc when the data is a page or page fragment. | |
544 | * | |
545 | * Returns a new &sk_buff on success, %NULL on allocation failure. | |
546 | */ | |
547 | struct sk_buff *napi_build_skb(void *data, unsigned int frag_size) | |
548 | { | |
549 | struct sk_buff *skb = __napi_build_skb(data, frag_size); | |
550 | ||
551 | if (likely(skb) && frag_size) { | |
552 | skb->head_frag = 1; | |
553 | skb_propagate_pfmemalloc(virt_to_head_page(data), skb); | |
554 | } | |
555 | ||
556 | return skb; | |
557 | } | |
558 | EXPORT_SYMBOL(napi_build_skb); | |
559 | ||
5381b23d AL |
560 | /* |
561 | * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells | |
562 | * the caller if emergency pfmemalloc reserves are being used. If it is and | |
563 | * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves | |
564 | * may be used. Otherwise, the packet data may be discarded until enough | |
565 | * memory is free | |
566 | */ | |
5c0e820c | 567 | static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node, |
ef28095f | 568 | bool *pfmemalloc) |
5381b23d | 569 | { |
5381b23d | 570 | bool ret_pfmemalloc = false; |
915d975b | 571 | size_t obj_size; |
5c0e820c | 572 | void *obj; |
5381b23d | 573 | |
5c0e820c | 574 | obj_size = SKB_HEAD_ALIGN(*size); |
bf9f1baa ED |
575 | if (obj_size <= SKB_SMALL_HEAD_CACHE_SIZE && |
576 | !(flags & KMALLOC_NOT_NORMAL_BITS)) { | |
aa70d2d1 | 577 | obj = kmem_cache_alloc_node(net_hotdata.skb_small_head_cache, |
bf9f1baa ED |
578 | flags | __GFP_NOMEMALLOC | __GFP_NOWARN, |
579 | node); | |
880ce5f2 ED |
580 | *size = SKB_SMALL_HEAD_CACHE_SIZE; |
581 | if (obj || !(gfp_pfmemalloc_allowed(flags))) | |
bf9f1baa | 582 | goto out; |
880ce5f2 ED |
583 | /* Try again but now we are using pfmemalloc reserves */ |
584 | ret_pfmemalloc = true; | |
aa70d2d1 | 585 | obj = kmem_cache_alloc_node(net_hotdata.skb_small_head_cache, flags, node); |
880ce5f2 | 586 | goto out; |
bf9f1baa | 587 | } |
915d975b ED |
588 | |
589 | obj_size = kmalloc_size_roundup(obj_size); | |
590 | /* The following cast might truncate high-order bits of obj_size, this | |
591 | * is harmless because kmalloc(obj_size >= 2^32) will fail anyway. | |
592 | */ | |
593 | *size = (unsigned int)obj_size; | |
594 | ||
5381b23d AL |
595 | /* |
596 | * Try a regular allocation, when that fails and we're not entitled | |
597 | * to the reserves, fail. | |
598 | */ | |
5c0e820c | 599 | obj = kmalloc_node_track_caller(obj_size, |
5381b23d AL |
600 | flags | __GFP_NOMEMALLOC | __GFP_NOWARN, |
601 | node); | |
602 | if (obj || !(gfp_pfmemalloc_allowed(flags))) | |
603 | goto out; | |
604 | ||
605 | /* Try again but now we are using pfmemalloc reserves */ | |
606 | ret_pfmemalloc = true; | |
5c0e820c | 607 | obj = kmalloc_node_track_caller(obj_size, flags, node); |
5381b23d AL |
608 | |
609 | out: | |
610 | if (pfmemalloc) | |
611 | *pfmemalloc = ret_pfmemalloc; | |
612 | ||
613 | return obj; | |
614 | } | |
615 | ||
616 | /* Allocate a new skbuff. We do this ourselves so we can fill in a few | |
617 | * 'private' fields and also do memory statistics to find all the | |
618 | * [BEEP] leaks. | |
619 | * | |
620 | */ | |
621 | ||
622 | /** | |
623 | * __alloc_skb - allocate a network buffer | |
624 | * @size: size to allocate | |
625 | * @gfp_mask: allocation mask | |
626 | * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache | |
627 | * instead of head cache and allocate a cloned (child) skb. | |
628 | * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for | |
629 | * allocations in case the data is required for writeback | |
630 | * @node: numa node to allocate memory on | |
631 | * | |
632 | * Allocate a new &sk_buff. The returned buffer has no headroom and a | |
633 | * tail room of at least size bytes. The object has a reference count | |
634 | * of one. The return is the buffer. On a failure the return is %NULL. | |
635 | * | |
636 | * Buffers may only be allocated from interrupts using a @gfp_mask of | |
637 | * %GFP_ATOMIC. | |
638 | */ | |
639 | struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, | |
640 | int flags, int node) | |
641 | { | |
642 | struct kmem_cache *cache; | |
5381b23d | 643 | struct sk_buff *skb; |
5381b23d | 644 | bool pfmemalloc; |
a5df6333 | 645 | u8 *data; |
5381b23d AL |
646 | |
647 | cache = (flags & SKB_ALLOC_FCLONE) | |
aa70d2d1 | 648 | ? net_hotdata.skbuff_fclone_cache : net_hotdata.skbuff_cache; |
5381b23d AL |
649 | |
650 | if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) | |
651 | gfp_mask |= __GFP_MEMALLOC; | |
652 | ||
653 | /* Get the HEAD */ | |
d13612b5 AL |
654 | if ((flags & (SKB_ALLOC_FCLONE | SKB_ALLOC_NAPI)) == SKB_ALLOC_NAPI && |
655 | likely(node == NUMA_NO_NODE || node == numa_mem_id())) | |
656 | skb = napi_skb_cache_get(); | |
657 | else | |
658 | skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node); | |
df1ae022 AL |
659 | if (unlikely(!skb)) |
660 | return NULL; | |
5381b23d AL |
661 | prefetchw(skb); |
662 | ||
663 | /* We do our best to align skb_shared_info on a separate cache | |
664 | * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives | |
665 | * aligned memory blocks, unless SLUB/SLAB debug is enabled. | |
666 | * Both skb->head and skb_shared_info are cache line aligned. | |
667 | */ | |
5c0e820c | 668 | data = kmalloc_reserve(&size, gfp_mask, node, &pfmemalloc); |
df1ae022 | 669 | if (unlikely(!data)) |
5381b23d | 670 | goto nodata; |
12d6c1d3 | 671 | /* kmalloc_size_roundup() might give us more room than requested. |
5381b23d AL |
672 | * Put skb_shared_info exactly at the end of allocated zone, |
673 | * to allow max possible filling before reallocation. | |
674 | */ | |
65998d2b | 675 | prefetchw(data + SKB_WITH_OVERHEAD(size)); |
5381b23d AL |
676 | |
677 | /* | |
678 | * Only clear those fields we need to clear, not those that we will | |
679 | * actually initialise below. Hence, don't put any more fields after | |
680 | * the tail pointer in struct sk_buff! | |
681 | */ | |
682 | memset(skb, 0, offsetof(struct sk_buff, tail)); | |
65998d2b | 683 | __build_skb_around(skb, data, size); |
5381b23d | 684 | skb->pfmemalloc = pfmemalloc; |
5381b23d AL |
685 | |
686 | if (flags & SKB_ALLOC_FCLONE) { | |
687 | struct sk_buff_fclones *fclones; | |
688 | ||
689 | fclones = container_of(skb, struct sk_buff_fclones, skb1); | |
690 | ||
691 | skb->fclone = SKB_FCLONE_ORIG; | |
692 | refcount_set(&fclones->fclone_ref, 1); | |
5381b23d AL |
693 | } |
694 | ||
5381b23d | 695 | return skb; |
df1ae022 | 696 | |
5381b23d AL |
697 | nodata: |
698 | kmem_cache_free(cache, skb); | |
df1ae022 | 699 | return NULL; |
5381b23d AL |
700 | } |
701 | EXPORT_SYMBOL(__alloc_skb); | |
702 | ||
fd11a83d AD |
703 | /** |
704 | * __netdev_alloc_skb - allocate an skbuff for rx on a specific device | |
705 | * @dev: network device to receive on | |
d7499160 | 706 | * @len: length to allocate |
fd11a83d AD |
707 | * @gfp_mask: get_free_pages mask, passed to alloc_skb |
708 | * | |
709 | * Allocate a new &sk_buff and assign it a usage count of one. The | |
710 | * buffer has NET_SKB_PAD headroom built in. Users should allocate | |
711 | * the headroom they think they need without accounting for the | |
712 | * built in space. The built in space is used for optimisations. | |
713 | * | |
714 | * %NULL is returned if there is no free memory. | |
715 | */ | |
9451980a AD |
716 | struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, |
717 | gfp_t gfp_mask) | |
fd11a83d | 718 | { |
b63ae8ca | 719 | struct page_frag_cache *nc; |
fd11a83d | 720 | struct sk_buff *skb; |
9451980a AD |
721 | bool pfmemalloc; |
722 | void *data; | |
723 | ||
724 | len += NET_SKB_PAD; | |
fd11a83d | 725 | |
66c55602 AL |
726 | /* If requested length is either too small or too big, |
727 | * we use kmalloc() for skb->head allocation. | |
728 | */ | |
729 | if (len <= SKB_WITH_OVERHEAD(1024) || | |
730 | len > SKB_WITH_OVERHEAD(PAGE_SIZE) || | |
d0164adc | 731 | (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { |
a080e7bd AD |
732 | skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); |
733 | if (!skb) | |
734 | goto skb_fail; | |
735 | goto skb_success; | |
736 | } | |
fd11a83d | 737 | |
115f1a5c | 738 | len = SKB_HEAD_ALIGN(len); |
9451980a AD |
739 | |
740 | if (sk_memalloc_socks()) | |
741 | gfp_mask |= __GFP_MEMALLOC; | |
742 | ||
afa79d08 | 743 | if (in_hardirq() || irqs_disabled()) { |
92dcabd7 SAS |
744 | nc = this_cpu_ptr(&netdev_alloc_cache); |
745 | data = page_frag_alloc(nc, len, gfp_mask); | |
746 | pfmemalloc = nc->pfmemalloc; | |
747 | } else { | |
748 | local_bh_disable(); | |
749 | nc = this_cpu_ptr(&napi_alloc_cache.page); | |
750 | data = page_frag_alloc(nc, len, gfp_mask); | |
751 | pfmemalloc = nc->pfmemalloc; | |
752 | local_bh_enable(); | |
753 | } | |
9451980a AD |
754 | |
755 | if (unlikely(!data)) | |
756 | return NULL; | |
757 | ||
758 | skb = __build_skb(data, len); | |
759 | if (unlikely(!skb)) { | |
181edb2b | 760 | skb_free_frag(data); |
9451980a | 761 | return NULL; |
7b2e497a | 762 | } |
fd11a83d | 763 | |
9451980a AD |
764 | if (pfmemalloc) |
765 | skb->pfmemalloc = 1; | |
766 | skb->head_frag = 1; | |
767 | ||
a080e7bd | 768 | skb_success: |
9451980a AD |
769 | skb_reserve(skb, NET_SKB_PAD); |
770 | skb->dev = dev; | |
771 | ||
a080e7bd | 772 | skb_fail: |
8af27456 CH |
773 | return skb; |
774 | } | |
b4ac530f | 775 | EXPORT_SYMBOL(__netdev_alloc_skb); |
1da177e4 | 776 | |
fd11a83d | 777 | /** |
6e9b0190 | 778 | * napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance |
fd11a83d | 779 | * @napi: napi instance this buffer was allocated for |
d7499160 | 780 | * @len: length to allocate |
fd11a83d AD |
781 | * |
782 | * Allocate a new sk_buff for use in NAPI receive. This buffer will | |
783 | * attempt to allocate the head from a special reserved region used | |
784 | * only for NAPI Rx allocation. By doing this we can save several | |
785 | * CPU cycles by avoiding having to disable and re-enable IRQs. | |
786 | * | |
787 | * %NULL is returned if there is no free memory. | |
788 | */ | |
6e9b0190 | 789 | struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int len) |
fd11a83d | 790 | { |
6e9b0190 | 791 | gfp_t gfp_mask = GFP_ATOMIC | __GFP_NOWARN; |
3226b158 | 792 | struct napi_alloc_cache *nc; |
fd11a83d | 793 | struct sk_buff *skb; |
dbae2b06 | 794 | bool pfmemalloc; |
9451980a AD |
795 | void *data; |
796 | ||
ee2640df | 797 | DEBUG_NET_WARN_ON_ONCE(!in_softirq()); |
9451980a | 798 | len += NET_SKB_PAD + NET_IP_ALIGN; |
fd11a83d | 799 | |
3226b158 ED |
800 | /* If requested length is either too small or too big, |
801 | * we use kmalloc() for skb->head allocation. | |
dbae2b06 PA |
802 | * When the small frag allocator is available, prefer it over kmalloc |
803 | * for small fragments | |
3226b158 | 804 | */ |
dbae2b06 | 805 | if ((!NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) || |
3226b158 | 806 | len > SKB_WITH_OVERHEAD(PAGE_SIZE) || |
d0164adc | 807 | (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) { |
cfb8ec65 AL |
808 | skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI, |
809 | NUMA_NO_NODE); | |
a080e7bd AD |
810 | if (!skb) |
811 | goto skb_fail; | |
812 | goto skb_success; | |
813 | } | |
9451980a | 814 | |
3226b158 | 815 | nc = this_cpu_ptr(&napi_alloc_cache); |
9451980a AD |
816 | |
817 | if (sk_memalloc_socks()) | |
818 | gfp_mask |= __GFP_MEMALLOC; | |
fd11a83d | 819 | |
dbae2b06 PA |
820 | if (NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) { |
821 | /* we are artificially inflating the allocation size, but | |
822 | * that is not as bad as it may look like, as: | |
823 | * - 'len' less than GRO_MAX_HEAD makes little sense | |
824 | * - On most systems, larger 'len' values lead to fragment | |
825 | * size above 512 bytes | |
826 | * - kmalloc would use the kmalloc-1k slab for such values | |
827 | * - Builds with smaller GRO_MAX_HEAD will very likely do | |
828 | * little networking, as that implies no WiFi and no | |
829 | * tunnels support, and 32 bits arches. | |
830 | */ | |
831 | len = SZ_1K; | |
832 | ||
833 | data = page_frag_alloc_1k(&nc->page_small, gfp_mask); | |
834 | pfmemalloc = NAPI_SMALL_PAGE_PFMEMALLOC(nc->page_small); | |
835 | } else { | |
115f1a5c | 836 | len = SKB_HEAD_ALIGN(len); |
dbae2b06 PA |
837 | |
838 | data = page_frag_alloc(&nc->page, len, gfp_mask); | |
839 | pfmemalloc = nc->page.pfmemalloc; | |
840 | } | |
841 | ||
9451980a AD |
842 | if (unlikely(!data)) |
843 | return NULL; | |
844 | ||
cfb8ec65 | 845 | skb = __napi_build_skb(data, len); |
9451980a | 846 | if (unlikely(!skb)) { |
181edb2b | 847 | skb_free_frag(data); |
9451980a | 848 | return NULL; |
fd11a83d AD |
849 | } |
850 | ||
dbae2b06 | 851 | if (pfmemalloc) |
9451980a AD |
852 | skb->pfmemalloc = 1; |
853 | skb->head_frag = 1; | |
854 | ||
a080e7bd | 855 | skb_success: |
9451980a AD |
856 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); |
857 | skb->dev = napi->dev; | |
858 | ||
a080e7bd | 859 | skb_fail: |
fd11a83d AD |
860 | return skb; |
861 | } | |
6e9b0190 | 862 | EXPORT_SYMBOL(napi_alloc_skb); |
fd11a83d | 863 | |
21d2e673 MA |
864 | void skb_add_rx_frag_netmem(struct sk_buff *skb, int i, netmem_ref netmem, |
865 | int off, int size, unsigned int truesize) | |
654bed16 | 866 | { |
c123e0d3 ED |
867 | DEBUG_NET_WARN_ON_ONCE(size > truesize); |
868 | ||
21d2e673 | 869 | skb_fill_netmem_desc(skb, i, netmem, off, size); |
654bed16 PZ |
870 | skb->len += size; |
871 | skb->data_len += size; | |
50269e19 | 872 | skb->truesize += truesize; |
654bed16 | 873 | } |
21d2e673 | 874 | EXPORT_SYMBOL(skb_add_rx_frag_netmem); |
654bed16 | 875 | |
f8e617e1 JW |
876 | void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, |
877 | unsigned int truesize) | |
878 | { | |
879 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
880 | ||
c123e0d3 ED |
881 | DEBUG_NET_WARN_ON_ONCE(size > truesize); |
882 | ||
f8e617e1 JW |
883 | skb_frag_size_add(frag, size); |
884 | skb->len += size; | |
885 | skb->data_len += size; | |
886 | skb->truesize += truesize; | |
887 | } | |
888 | EXPORT_SYMBOL(skb_coalesce_rx_frag); | |
889 | ||
27b437c8 | 890 | static void skb_drop_list(struct sk_buff **listp) |
1da177e4 | 891 | { |
bd8a7036 | 892 | kfree_skb_list(*listp); |
27b437c8 | 893 | *listp = NULL; |
1da177e4 LT |
894 | } |
895 | ||
27b437c8 HX |
896 | static inline void skb_drop_fraglist(struct sk_buff *skb) |
897 | { | |
898 | skb_drop_list(&skb_shinfo(skb)->frag_list); | |
899 | } | |
900 | ||
1da177e4 LT |
901 | static void skb_clone_fraglist(struct sk_buff *skb) |
902 | { | |
903 | struct sk_buff *list; | |
904 | ||
fbb398a8 | 905 | skb_walk_frags(skb, list) |
1da177e4 LT |
906 | skb_get(list); |
907 | } | |
908 | ||
8cfa2dee LC |
909 | static bool is_pp_page(struct page *page) |
910 | { | |
911 | return (page->pp_magic & ~0x3UL) == PP_SIGNATURE; | |
912 | } | |
913 | ||
27accb3c LB |
914 | int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb, |
915 | unsigned int headroom) | |
e6d5dbdd LB |
916 | { |
917 | #if IS_ENABLED(CONFIG_PAGE_POOL) | |
918 | u32 size, truesize, len, max_head_size, off; | |
919 | struct sk_buff *skb = *pskb, *nskb; | |
920 | int err, i, head_off; | |
921 | void *data; | |
922 | ||
923 | /* XDP does not support fraglist so we need to linearize | |
924 | * the skb. | |
925 | */ | |
926 | if (skb_has_frag_list(skb)) | |
927 | return -EOPNOTSUPP; | |
928 | ||
929 | max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE - headroom); | |
930 | if (skb->len > max_head_size + MAX_SKB_FRAGS * PAGE_SIZE) | |
931 | return -ENOMEM; | |
932 | ||
933 | size = min_t(u32, skb->len, max_head_size); | |
934 | truesize = SKB_HEAD_ALIGN(size) + headroom; | |
935 | data = page_pool_dev_alloc_va(pool, &truesize); | |
936 | if (!data) | |
937 | return -ENOMEM; | |
938 | ||
939 | nskb = napi_build_skb(data, truesize); | |
940 | if (!nskb) { | |
941 | page_pool_free_va(pool, data, true); | |
942 | return -ENOMEM; | |
943 | } | |
944 | ||
945 | skb_reserve(nskb, headroom); | |
946 | skb_copy_header(nskb, skb); | |
947 | skb_mark_for_recycle(nskb); | |
948 | ||
949 | err = skb_copy_bits(skb, 0, nskb->data, size); | |
950 | if (err) { | |
951 | consume_skb(nskb); | |
952 | return err; | |
953 | } | |
954 | skb_put(nskb, size); | |
955 | ||
956 | head_off = skb_headroom(nskb) - skb_headroom(skb); | |
957 | skb_headers_offset_update(nskb, head_off); | |
958 | ||
959 | off = size; | |
960 | len = skb->len - off; | |
961 | for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) { | |
962 | struct page *page; | |
963 | u32 page_off; | |
964 | ||
965 | size = min_t(u32, len, PAGE_SIZE); | |
966 | truesize = size; | |
967 | ||
968 | page = page_pool_dev_alloc(pool, &page_off, &truesize); | |
c6a28acb | 969 | if (!page) { |
e6d5dbdd LB |
970 | consume_skb(nskb); |
971 | return -ENOMEM; | |
972 | } | |
973 | ||
974 | skb_add_rx_frag(nskb, i, page, page_off, size, truesize); | |
975 | err = skb_copy_bits(skb, off, page_address(page) + page_off, | |
976 | size); | |
977 | if (err) { | |
978 | consume_skb(nskb); | |
979 | return err; | |
980 | } | |
981 | ||
982 | len -= size; | |
983 | off += size; | |
984 | } | |
985 | ||
986 | consume_skb(skb); | |
987 | *pskb = nskb; | |
988 | ||
989 | return 0; | |
990 | #else | |
991 | return -EOPNOTSUPP; | |
992 | #endif | |
993 | } | |
27accb3c | 994 | EXPORT_SYMBOL(skb_pp_cow_data); |
e6d5dbdd LB |
995 | |
996 | int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb, | |
997 | struct bpf_prog *prog) | |
998 | { | |
999 | if (!prog->aux->xdp_has_frags) | |
1000 | return -EINVAL; | |
1001 | ||
1002 | return skb_pp_cow_data(pool, pskb, XDP_PACKET_HEADROOM); | |
1003 | } | |
1004 | EXPORT_SYMBOL(skb_cow_data_for_xdp); | |
1005 | ||
75eaf63e | 1006 | #if IS_ENABLED(CONFIG_PAGE_POOL) |
4a96a4e8 | 1007 | bool napi_pp_put_page(struct page *page) |
75eaf63e | 1008 | { |
75eaf63e AL |
1009 | page = compound_head(page); |
1010 | ||
1011 | /* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation | |
1012 | * in order to preserve any existing bits, such as bit 0 for the | |
1013 | * head page of compound page and bit 1 for pfmemalloc page, so | |
1014 | * mask those bits for freeing side when doing below checking, | |
1015 | * and page_is_pfmemalloc() is checked in __page_pool_put_page() | |
1016 | * to avoid recycling the pfmemalloc page. | |
1017 | */ | |
8cfa2dee | 1018 | if (unlikely(!is_pp_page(page))) |
75eaf63e AL |
1019 | return false; |
1020 | ||
4a96a4e8 | 1021 | page_pool_put_full_page(page->pp, page, false); |
75eaf63e AL |
1022 | |
1023 | return true; | |
1024 | } | |
1025 | EXPORT_SYMBOL(napi_pp_put_page); | |
1026 | #endif | |
1027 | ||
4a96a4e8 | 1028 | static bool skb_pp_recycle(struct sk_buff *skb, void *data) |
4727bab4 YL |
1029 | { |
1030 | if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle) | |
1031 | return false; | |
4a96a4e8 | 1032 | return napi_pp_put_page(virt_to_page(data)); |
4727bab4 YL |
1033 | } |
1034 | ||
f7dc3248 LC |
1035 | /** |
1036 | * skb_pp_frag_ref() - Increase fragment references of a page pool aware skb | |
1037 | * @skb: page pool aware skb | |
1038 | * | |
1039 | * Increase the fragment reference count (pp_ref_count) of a skb. This is | |
1040 | * intended to gain fragment references only for page pool aware skbs, | |
1041 | * i.e. when skb->pp_recycle is true, and not for fragments in a | |
1042 | * non-pp-recycling skb. It has a fallback to increase references on normal | |
1043 | * pages, as page pool aware skbs may also have normal page fragments. | |
1044 | */ | |
1045 | static int skb_pp_frag_ref(struct sk_buff *skb) | |
1046 | { | |
1047 | struct skb_shared_info *shinfo; | |
1048 | struct page *head_page; | |
1049 | int i; | |
1050 | ||
1051 | if (!skb->pp_recycle) | |
1052 | return -EINVAL; | |
1053 | ||
1054 | shinfo = skb_shinfo(skb); | |
1055 | ||
1056 | for (i = 0; i < shinfo->nr_frags; i++) { | |
1057 | head_page = compound_head(skb_frag_page(&shinfo->frags[i])); | |
1058 | if (likely(is_pp_page(head_page))) | |
1059 | page_pool_ref_page(head_page); | |
1060 | else | |
1061 | page_ref_inc(head_page); | |
1062 | } | |
1063 | return 0; | |
1064 | } | |
1065 | ||
bf9f1baa ED |
1066 | static void skb_kfree_head(void *head, unsigned int end_offset) |
1067 | { | |
bf9f1baa | 1068 | if (end_offset == SKB_SMALL_HEAD_HEADROOM) |
aa70d2d1 | 1069 | kmem_cache_free(net_hotdata.skb_small_head_cache, head); |
bf9f1baa | 1070 | else |
bf9f1baa ED |
1071 | kfree(head); |
1072 | } | |
1073 | ||
4a96a4e8 | 1074 | static void skb_free_head(struct sk_buff *skb) |
d3836f21 | 1075 | { |
181edb2b AD |
1076 | unsigned char *head = skb->head; |
1077 | ||
6a5bcd84 | 1078 | if (skb->head_frag) { |
4a96a4e8 | 1079 | if (skb_pp_recycle(skb, head)) |
6a5bcd84 | 1080 | return; |
181edb2b | 1081 | skb_free_frag(head); |
6a5bcd84 | 1082 | } else { |
bf9f1baa | 1083 | skb_kfree_head(head, skb_end_offset(skb)); |
6a5bcd84 | 1084 | } |
d3836f21 ED |
1085 | } |
1086 | ||
4a96a4e8 | 1087 | static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason) |
1da177e4 | 1088 | { |
ff04a771 ED |
1089 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
1090 | int i; | |
1da177e4 | 1091 | |
1cface55 | 1092 | if (!skb_data_unref(skb, shinfo)) |
2cc3aeb5 | 1093 | goto exit; |
a6686f2f | 1094 | |
753f1ca4 PB |
1095 | if (skb_zcopy(skb)) { |
1096 | bool skip_unref = shinfo->flags & SKBFL_MANAGED_FRAG_REFS; | |
1097 | ||
1098 | skb_zcopy_clear(skb, true); | |
1099 | if (skip_unref) | |
1100 | goto free_head; | |
1101 | } | |
70c43167 | 1102 | |
ff04a771 | 1103 | for (i = 0; i < shinfo->nr_frags; i++) |
f58f3c95 | 1104 | __skb_frag_unref(&shinfo->frags[i], skb->pp_recycle); |
a6686f2f | 1105 | |
753f1ca4 | 1106 | free_head: |
ff04a771 | 1107 | if (shinfo->frag_list) |
511a3eda | 1108 | kfree_skb_list_reason(shinfo->frag_list, reason); |
ff04a771 | 1109 | |
4a96a4e8 | 1110 | skb_free_head(skb); |
2cc3aeb5 IA |
1111 | exit: |
1112 | /* When we clone an SKB we copy the reycling bit. The pp_recycle | |
1113 | * bit is only set on the head though, so in order to avoid races | |
1114 | * while trying to recycle fragments on __skb_frag_unref() we need | |
1115 | * to make one SKB responsible for triggering the recycle path. | |
1116 | * So disable the recycling bit if an SKB is cloned and we have | |
58e61e41 | 1117 | * additional references to the fragmented part of the SKB. |
2cc3aeb5 IA |
1118 | * Eventually the last SKB will have the recycling bit set and it's |
1119 | * dataref set to 0, which will trigger the recycling | |
1120 | */ | |
1121 | skb->pp_recycle = 0; | |
1da177e4 LT |
1122 | } |
1123 | ||
1124 | /* | |
1125 | * Free an skbuff by memory without cleaning the state. | |
1126 | */ | |
2d4baff8 | 1127 | static void kfree_skbmem(struct sk_buff *skb) |
1da177e4 | 1128 | { |
d0bf4a9e | 1129 | struct sk_buff_fclones *fclones; |
d179cd12 | 1130 | |
d179cd12 DM |
1131 | switch (skb->fclone) { |
1132 | case SKB_FCLONE_UNAVAILABLE: | |
aa70d2d1 | 1133 | kmem_cache_free(net_hotdata.skbuff_cache, skb); |
6ffe75eb | 1134 | return; |
d179cd12 DM |
1135 | |
1136 | case SKB_FCLONE_ORIG: | |
d0bf4a9e | 1137 | fclones = container_of(skb, struct sk_buff_fclones, skb1); |
d179cd12 | 1138 | |
6ffe75eb ED |
1139 | /* We usually free the clone (TX completion) before original skb |
1140 | * This test would have no chance to be true for the clone, | |
1141 | * while here, branch prediction will be good. | |
d179cd12 | 1142 | */ |
2638595a | 1143 | if (refcount_read(&fclones->fclone_ref) == 1) |
6ffe75eb ED |
1144 | goto fastpath; |
1145 | break; | |
e7820e39 | 1146 | |
6ffe75eb ED |
1147 | default: /* SKB_FCLONE_CLONE */ |
1148 | fclones = container_of(skb, struct sk_buff_fclones, skb2); | |
d179cd12 | 1149 | break; |
3ff50b79 | 1150 | } |
2638595a | 1151 | if (!refcount_dec_and_test(&fclones->fclone_ref)) |
6ffe75eb ED |
1152 | return; |
1153 | fastpath: | |
aa70d2d1 | 1154 | kmem_cache_free(net_hotdata.skbuff_fclone_cache, fclones); |
1da177e4 LT |
1155 | } |
1156 | ||
0a463c78 | 1157 | void skb_release_head_state(struct sk_buff *skb) |
1da177e4 | 1158 | { |
adf30907 | 1159 | skb_dst_drop(skb); |
9c2b3328 | 1160 | if (skb->destructor) { |
7890e2f0 | 1161 | DEBUG_NET_WARN_ON_ONCE(in_hardirq()); |
1da177e4 LT |
1162 | skb->destructor(skb); |
1163 | } | |
a3bf7ae9 | 1164 | #if IS_ENABLED(CONFIG_NF_CONNTRACK) |
cb9c6836 | 1165 | nf_conntrack_put(skb_nfct(skb)); |
1da177e4 | 1166 | #endif |
df5042f4 | 1167 | skb_ext_put(skb); |
04a4bb55 LB |
1168 | } |
1169 | ||
1170 | /* Free everything but the sk_buff shell. */ | |
4a96a4e8 | 1171 | static void skb_release_all(struct sk_buff *skb, enum skb_drop_reason reason) |
04a4bb55 LB |
1172 | { |
1173 | skb_release_head_state(skb); | |
a28b1b90 | 1174 | if (likely(skb->head)) |
4a96a4e8 | 1175 | skb_release_data(skb, reason); |
2d4baff8 HX |
1176 | } |
1177 | ||
1178 | /** | |
1179 | * __kfree_skb - private function | |
1180 | * @skb: buffer | |
1181 | * | |
1182 | * Free an sk_buff. Release anything attached to the buffer. | |
1183 | * Clean the state. This is an internal helper function. Users should | |
1184 | * always call kfree_skb | |
1185 | */ | |
1da177e4 | 1186 | |
2d4baff8 HX |
1187 | void __kfree_skb(struct sk_buff *skb) |
1188 | { | |
4a96a4e8 | 1189 | skb_release_all(skb, SKB_DROP_REASON_NOT_SPECIFIED); |
1da177e4 LT |
1190 | kfree_skbmem(skb); |
1191 | } | |
b4ac530f | 1192 | EXPORT_SYMBOL(__kfree_skb); |
1da177e4 | 1193 | |
a4650da2 JDB |
1194 | static __always_inline |
1195 | bool __kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason) | |
1196 | { | |
1197 | if (unlikely(!skb_unref(skb))) | |
1198 | return false; | |
1199 | ||
071c0fc6 JB |
1200 | DEBUG_NET_WARN_ON_ONCE(reason == SKB_NOT_DROPPED_YET || |
1201 | u32_get_bits(reason, | |
1202 | SKB_DROP_REASON_SUBSYS_MASK) >= | |
1203 | SKB_DROP_REASON_SUBSYS_NUM); | |
a4650da2 JDB |
1204 | |
1205 | if (reason == SKB_CONSUMED) | |
dd1b5278 | 1206 | trace_consume_skb(skb, __builtin_return_address(0)); |
a4650da2 JDB |
1207 | else |
1208 | trace_kfree_skb(skb, __builtin_return_address(0), reason); | |
1209 | return true; | |
1210 | } | |
1211 | ||