net: do not deplete pfmemalloc reserve
[linux-2.6-block.git] / net / core / skbuff.c
... / ...
CommitLineData
1/*
2 * Routines having to do with the 'struct sk_buff' memory handlers.
3 *
4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
5 * Florian La Roche <rzsfl@rz.uni-sb.de>
6 *
7 * Fixes:
8 * Alan Cox : Fixed the worst of the load
9 * balancer bugs.
10 * Dave Platt : Interrupt stacking fix.
11 * Richard Kooijman : Timestamp fixes.
12 * Alan Cox : Changed buffer format.
13 * Alan Cox : destructor hook for AF_UNIX etc.
14 * Linus Torvalds : Better skb_clone.
15 * Alan Cox : Added skb_copy.
16 * Alan Cox : Added all the changed routines Linus
17 * only put in the headers
18 * Ray VanTassle : Fixed --skb->lock in free
19 * Alan Cox : skb_copy copy arp field
20 * Andi Kleen : slabified it.
21 * Robert Olsson : Removed skb_head_pool
22 *
23 * NOTE:
24 * The __skb_ routines should be called with interrupts
25 * disabled, or you better be *real* sure that the operation is atomic
26 * with respect to whatever list is being frobbed (e.g. via lock_sock()
27 * or via disabling bottom half handlers, etc).
28 *
29 * This program is free software; you can redistribute it and/or
30 * modify it under the terms of the GNU General Public License
31 * as published by the Free Software Foundation; either version
32 * 2 of the License, or (at your option) any later version.
33 */
34
35/*
36 * The functions in this file will not compile correctly with gcc 2.4.x
37 */
38
39#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40
41#include <linux/module.h>
42#include <linux/types.h>
43#include <linux/kernel.h>
44#include <linux/kmemcheck.h>
45#include <linux/mm.h>
46#include <linux/interrupt.h>
47#include <linux/in.h>
48#include <linux/inet.h>
49#include <linux/slab.h>
50#include <linux/tcp.h>
51#include <linux/udp.h>
52#include <linux/netdevice.h>
53#ifdef CONFIG_NET_CLS_ACT
54#include <net/pkt_sched.h>
55#endif
56#include <linux/string.h>
57#include <linux/skbuff.h>
58#include <linux/splice.h>
59#include <linux/cache.h>
60#include <linux/rtnetlink.h>
61#include <linux/init.h>
62#include <linux/scatterlist.h>
63#include <linux/errqueue.h>
64#include <linux/prefetch.h>
65#include <linux/if_vlan.h>
66
67#include <net/protocol.h>
68#include <net/dst.h>
69#include <net/sock.h>
70#include <net/checksum.h>
71#include <net/ip6_checksum.h>
72#include <net/xfrm.h>
73
74#include <asm/uaccess.h>
75#include <trace/events/skb.h>
76#include <linux/highmem.h>
77#include <linux/capability.h>
78#include <linux/user_namespace.h>
79
80struct kmem_cache *skbuff_head_cache __read_mostly;
81static struct kmem_cache *skbuff_fclone_cache __read_mostly;
82
83/**
84 * skb_panic - private function for out-of-line support
85 * @skb: buffer
86 * @sz: size
87 * @addr: address
88 * @msg: skb_over_panic or skb_under_panic
89 *
90 * Out-of-line support for skb_put() and skb_push().
91 * Called via the wrapper skb_over_panic() or skb_under_panic().
92 * Keep out of line to prevent kernel bloat.
93 * __builtin_return_address is not used because it is not always reliable.
94 */
95static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
96 const char msg[])
97{
98 pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
99 msg, addr, skb->len, sz, skb->head, skb->data,
100 (unsigned long)skb->tail, (unsigned long)skb->end,
101 skb->dev ? skb->dev->name : "<NULL>");
102 BUG();
103}
104
105static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
106{
107 skb_panic(skb, sz, addr, __func__);
108}
109
110static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
111{
112 skb_panic(skb, sz, addr, __func__);
113}
114
115/*
116 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
117 * the caller if emergency pfmemalloc reserves are being used. If it is and
118 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
119 * may be used. Otherwise, the packet data may be discarded until enough
120 * memory is free
121 */
122#define kmalloc_reserve(size, gfp, node, pfmemalloc) \
123 __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
124
125static void *__kmalloc_reserve(size_t size, gfp_t flags, int node,
126 unsigned long ip, bool *pfmemalloc)
127{
128 void *obj;
129 bool ret_pfmemalloc = false;
130
131 /*
132 * Try a regular allocation, when that fails and we're not entitled
133 * to the reserves, fail.
134 */
135 obj = kmalloc_node_track_caller(size,
136 flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
137 node);
138 if (obj || !(gfp_pfmemalloc_allowed(flags)))
139 goto out;
140
141 /* Try again but now we are using pfmemalloc reserves */
142 ret_pfmemalloc = true;
143 obj = kmalloc_node_track_caller(size, flags, node);
144
145out:
146 if (pfmemalloc)
147 *pfmemalloc = ret_pfmemalloc;
148
149 return obj;
150}
151
152/* Allocate a new skbuff. We do this ourselves so we can fill in a few
153 * 'private' fields and also do memory statistics to find all the
154 * [BEEP] leaks.
155 *
156 */
157
158struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node)
159{
160 struct sk_buff *skb;
161
162 /* Get the HEAD */
163 skb = kmem_cache_alloc_node(skbuff_head_cache,
164 gfp_mask & ~__GFP_DMA, node);
165 if (!skb)
166 goto out;
167
168 /*
169 * Only clear those fields we need to clear, not those that we will
170 * actually initialise below. Hence, don't put any more fields after
171 * the tail pointer in struct sk_buff!
172 */
173 memset(skb, 0, offsetof(struct sk_buff, tail));
174 skb->head = NULL;
175 skb->truesize = sizeof(struct sk_buff);
176 atomic_set(&skb->users, 1);
177
178 skb->mac_header = (typeof(skb->mac_header))~0U;
179out:
180 return skb;
181}
182
183/**
184 * __alloc_skb - allocate a network buffer
185 * @size: size to allocate
186 * @gfp_mask: allocation mask
187 * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
188 * instead of head cache and allocate a cloned (child) skb.
189 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
190 * allocations in case the data is required for writeback
191 * @node: numa node to allocate memory on
192 *
193 * Allocate a new &sk_buff. The returned buffer has no headroom and a
194 * tail room of at least size bytes. The object has a reference count
195 * of one. The return is the buffer. On a failure the return is %NULL.
196 *
197 * Buffers may only be allocated from interrupts using a @gfp_mask of
198 * %GFP_ATOMIC.
199 */
200struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
201 int flags, int node)
202{
203 struct kmem_cache *cache;
204 struct skb_shared_info *shinfo;
205 struct sk_buff *skb;
206 u8 *data;
207 bool pfmemalloc;
208
209 cache = (flags & SKB_ALLOC_FCLONE)
210 ? skbuff_fclone_cache : skbuff_head_cache;
211
212 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
213 gfp_mask |= __GFP_MEMALLOC;
214
215 /* Get the HEAD */
216 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
217 if (!skb)
218 goto out;
219 prefetchw(skb);
220
221 /* We do our best to align skb_shared_info on a separate cache
222 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
223 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
224 * Both skb->head and skb_shared_info are cache line aligned.
225 */
226 size = SKB_DATA_ALIGN(size);
227 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
228 data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
229 if (!data)
230 goto nodata;
231 /* kmalloc(size) might give us more room than requested.
232 * Put skb_shared_info exactly at the end of allocated zone,
233 * to allow max possible filling before reallocation.
234 */
235 size = SKB_WITH_OVERHEAD(ksize(data));
236 prefetchw(data + size);
237
238 /*
239 * Only clear those fields we need to clear, not those that we will
240 * actually initialise below. Hence, don't put any more fields after
241 * the tail pointer in struct sk_buff!
242 */
243 memset(skb, 0, offsetof(struct sk_buff, tail));
244 /* Account for allocated memory : skb + skb->head */
245 skb->truesize = SKB_TRUESIZE(size);
246 skb->pfmemalloc = pfmemalloc;
247 atomic_set(&skb->users, 1);
248 skb->head = data;
249 skb->data = data;
250 skb_reset_tail_pointer(skb);
251 skb->end = skb->tail + size;
252 skb->mac_header = (typeof(skb->mac_header))~0U;
253 skb->transport_header = (typeof(skb->transport_header))~0U;
254
255 /* make sure we initialize shinfo sequentially */
256 shinfo = skb_shinfo(skb);
257 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
258 atomic_set(&shinfo->dataref, 1);
259 kmemcheck_annotate_variable(shinfo->destructor_arg);
260
261 if (flags & SKB_ALLOC_FCLONE) {
262 struct sk_buff_fclones *fclones;
263
264 fclones = container_of(skb, struct sk_buff_fclones, skb1);
265
266 kmemcheck_annotate_bitfield(&fclones->skb2, flags1);
267 skb->fclone = SKB_FCLONE_ORIG;
268 atomic_set(&fclones->fclone_ref, 1);
269
270 fclones->skb2.fclone = SKB_FCLONE_CLONE;
271 fclones->skb2.pfmemalloc = pfmemalloc;
272 }
273out:
274 return skb;
275nodata:
276 kmem_cache_free(cache, skb);
277 skb = NULL;
278 goto out;
279}
280EXPORT_SYMBOL(__alloc_skb);
281
282/**
283 * build_skb - build a network buffer
284 * @data: data buffer provided by caller
285 * @frag_size: size of fragment, or 0 if head was kmalloced
286 *
287 * Allocate a new &sk_buff. Caller provides space holding head and
288 * skb_shared_info. @data must have been allocated by kmalloc() only if
289 * @frag_size is 0, otherwise data should come from the page allocator.
290 * The return is the new skb buffer.
291 * On a failure the return is %NULL, and @data is not freed.
292 * Notes :
293 * Before IO, driver allocates only data buffer where NIC put incoming frame
294 * Driver should add room at head (NET_SKB_PAD) and
295 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
296 * After IO, driver calls build_skb(), to allocate sk_buff and populate it
297 * before giving packet to stack.
298 * RX rings only contains data buffers, not full skbs.
299 */
300struct sk_buff *build_skb(void *data, unsigned int frag_size)
301{
302 struct skb_shared_info *shinfo;
303 struct sk_buff *skb;
304 unsigned int size = frag_size ? : ksize(data);
305
306 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
307 if (!skb)
308 return NULL;
309
310 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
311
312 memset(skb, 0, offsetof(struct sk_buff, tail));
313 skb->truesize = SKB_TRUESIZE(size);
314 if (frag_size) {
315 skb->head_frag = 1;
316 if (virt_to_head_page(data)->pfmemalloc)
317 skb->pfmemalloc = 1;
318 }
319 atomic_set(&skb->users, 1);
320 skb->head = data;
321 skb->data = data;
322 skb_reset_tail_pointer(skb);
323 skb->end = skb->tail + size;
324 skb->mac_header = (typeof(skb->mac_header))~0U;
325 skb->transport_header = (typeof(skb->transport_header))~0U;
326
327 /* make sure we initialize shinfo sequentially */
328 shinfo = skb_shinfo(skb);
329 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
330 atomic_set(&shinfo->dataref, 1);
331 kmemcheck_annotate_variable(shinfo->destructor_arg);
332
333 return skb;
334}
335EXPORT_SYMBOL(build_skb);
336
337struct netdev_alloc_cache {
338 struct page_frag frag;
339 /* we maintain a pagecount bias, so that we dont dirty cache line
340 * containing page->_count every time we allocate a fragment.
341 */
342 unsigned int pagecnt_bias;
343};
344static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
345static DEFINE_PER_CPU(struct netdev_alloc_cache, napi_alloc_cache);
346
347static struct page *__page_frag_refill(struct netdev_alloc_cache *nc,
348 gfp_t gfp_mask)
349{
350 const unsigned int order = NETDEV_FRAG_PAGE_MAX_ORDER;
351 struct page *page = NULL;
352 gfp_t gfp = gfp_mask;
353
354 if (order) {
355 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
356 __GFP_NOMEMALLOC;
357 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
358 nc->frag.size = PAGE_SIZE << (page ? order : 0);
359 }
360
361 if (unlikely(!page))
362 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
363
364 nc->frag.page = page;
365
366 return page;
367}
368
369static void *__alloc_page_frag(struct netdev_alloc_cache __percpu *cache,
370 unsigned int fragsz, gfp_t gfp_mask)
371{
372 struct netdev_alloc_cache *nc = this_cpu_ptr(cache);
373 struct page *page = nc->frag.page;
374 unsigned int size;
375 int offset;
376
377 if (unlikely(!page)) {
378refill:
379 page = __page_frag_refill(nc, gfp_mask);
380 if (!page)
381 return NULL;
382
383 /* if size can vary use frag.size else just use PAGE_SIZE */
384 size = NETDEV_FRAG_PAGE_MAX_ORDER ? nc->frag.size : PAGE_SIZE;
385
386 /* Even if we own the page, we do not use atomic_set().
387 * This would break get_page_unless_zero() users.
388 */
389 atomic_add(size - 1, &page->_count);
390
391 /* reset page count bias and offset to start of new frag */
392 nc->pagecnt_bias = size;
393 nc->frag.offset = size;
394 }
395
396 offset = nc->frag.offset - fragsz;
397 if (unlikely(offset < 0)) {
398 if (!atomic_sub_and_test(nc->pagecnt_bias, &page->_count))
399 goto refill;
400
401 /* if size can vary use frag.size else just use PAGE_SIZE */
402 size = NETDEV_FRAG_PAGE_MAX_ORDER ? nc->frag.size : PAGE_SIZE;
403
404 /* OK, page count is 0, we can safely set it */
405 atomic_set(&page->_count, size);
406
407 /* reset page count bias and offset to start of new frag */
408 nc->pagecnt_bias = size;
409 offset = size - fragsz;
410 }
411
412 nc->pagecnt_bias--;
413 nc->frag.offset = offset;
414
415 return page_address(page) + offset;
416}
417
418static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
419{
420 unsigned long flags;
421 void *data;
422
423 local_irq_save(flags);
424 data = __alloc_page_frag(&netdev_alloc_cache, fragsz, gfp_mask);
425 local_irq_restore(flags);
426 return data;
427}
428
429/**
430 * netdev_alloc_frag - allocate a page fragment
431 * @fragsz: fragment size
432 *
433 * Allocates a frag from a page for receive buffer.
434 * Uses GFP_ATOMIC allocations.
435 */
436void *netdev_alloc_frag(unsigned int fragsz)
437{
438 return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD);
439}
440EXPORT_SYMBOL(netdev_alloc_frag);
441
442static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
443{
444 return __alloc_page_frag(&napi_alloc_cache, fragsz, gfp_mask);
445}
446
447void *napi_alloc_frag(unsigned int fragsz)
448{
449 return __napi_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD);
450}
451EXPORT_SYMBOL(napi_alloc_frag);
452
453/**
454 * __alloc_rx_skb - allocate an skbuff for rx
455 * @length: length to allocate
456 * @gfp_mask: get_free_pages mask, passed to alloc_skb
457 * @flags: If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
458 * allocations in case we have to fallback to __alloc_skb()
459 * If SKB_ALLOC_NAPI is set, page fragment will be allocated
460 * from napi_cache instead of netdev_cache.
461 *
462 * Allocate a new &sk_buff and assign it a usage count of one. The
463 * buffer has unspecified headroom built in. Users should allocate
464 * the headroom they think they need without accounting for the
465 * built in space. The built in space is used for optimisations.
466 *
467 * %NULL is returned if there is no free memory.
468 */
469static struct sk_buff *__alloc_rx_skb(unsigned int length, gfp_t gfp_mask,
470 int flags)
471{
472 struct sk_buff *skb = NULL;
473 unsigned int fragsz = SKB_DATA_ALIGN(length) +
474 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
475
476 if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) {
477 void *data;
478
479 if (sk_memalloc_socks())
480 gfp_mask |= __GFP_MEMALLOC;
481
482 data = (flags & SKB_ALLOC_NAPI) ?
483 __napi_alloc_frag(fragsz, gfp_mask) :
484 __netdev_alloc_frag(fragsz, gfp_mask);
485
486 if (likely(data)) {
487 skb = build_skb(data, fragsz);
488 if (unlikely(!skb))
489 put_page(virt_to_head_page(data));
490 }
491 } else {
492 skb = __alloc_skb(length, gfp_mask,
493 SKB_ALLOC_RX, NUMA_NO_NODE);
494 }
495 return skb;
496}
497
498/**
499 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
500 * @dev: network device to receive on
501 * @length: length to allocate
502 * @gfp_mask: get_free_pages mask, passed to alloc_skb
503 *
504 * Allocate a new &sk_buff and assign it a usage count of one. The
505 * buffer has NET_SKB_PAD headroom built in. Users should allocate
506 * the headroom they think they need without accounting for the
507 * built in space. The built in space is used for optimisations.
508 *
509 * %NULL is returned if there is no free memory.
510 */
511struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
512 unsigned int length, gfp_t gfp_mask)
513{
514 struct sk_buff *skb;
515
516 length += NET_SKB_PAD;
517 skb = __alloc_rx_skb(length, gfp_mask, 0);
518
519 if (likely(skb)) {
520 skb_reserve(skb, NET_SKB_PAD);
521 skb->dev = dev;
522 }
523
524 return skb;
525}
526EXPORT_SYMBOL(__netdev_alloc_skb);
527
528/**
529 * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
530 * @napi: napi instance this buffer was allocated for
531 * @length: length to allocate
532 * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages
533 *
534 * Allocate a new sk_buff for use in NAPI receive. This buffer will
535 * attempt to allocate the head from a special reserved region used
536 * only for NAPI Rx allocation. By doing this we can save several
537 * CPU cycles by avoiding having to disable and re-enable IRQs.
538 *
539 * %NULL is returned if there is no free memory.
540 */
541struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
542 unsigned int length, gfp_t gfp_mask)
543{
544 struct sk_buff *skb;
545
546 length += NET_SKB_PAD + NET_IP_ALIGN;
547 skb = __alloc_rx_skb(length, gfp_mask, SKB_ALLOC_NAPI);
548
549 if (likely(skb)) {
550 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
551 skb->dev = napi->dev;
552 }
553
554 return skb;
555}
556EXPORT_SYMBOL(__napi_alloc_skb);
557
558void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
559 int size, unsigned int truesize)
560{
561 skb_fill_page_desc(skb, i, page, off, size);
562 skb->len += size;
563 skb->data_len += size;
564 skb->truesize += truesize;
565}
566EXPORT_SYMBOL(skb_add_rx_frag);
567
568void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
569 unsigned int truesize)
570{
571 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
572
573 skb_frag_size_add(frag, size);
574 skb->len += size;
575 skb->data_len += size;
576 skb->truesize += truesize;
577}
578EXPORT_SYMBOL(skb_coalesce_rx_frag);
579
580static void skb_drop_list(struct sk_buff **listp)
581{
582 kfree_skb_list(*listp);
583 *listp = NULL;
584}
585
586static inline void skb_drop_fraglist(struct sk_buff *skb)
587{
588 skb_drop_list(&skb_shinfo(skb)->frag_list);
589}
590
591static void skb_clone_fraglist(struct sk_buff *skb)
592{
593 struct sk_buff *list;
594
595 skb_walk_frags(skb, list)
596 skb_get(list);
597}
598
599static void skb_free_head(struct sk_buff *skb)
600{
601 if (skb->head_frag)
602 put_page(virt_to_head_page(skb->head));
603 else
604 kfree(skb->head);
605}
606
607static void skb_release_data(struct sk_buff *skb)
608{
609 struct skb_shared_info *shinfo = skb_shinfo(skb);
610 int i;
611
612 if (skb->cloned &&
613 atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
614 &shinfo->dataref))
615 return;
616
617 for (i = 0; i < shinfo->nr_frags; i++)
618 __skb_frag_unref(&shinfo->frags[i]);
619
620 /*
621 * If skb buf is from userspace, we need to notify the caller
622 * the lower device DMA has done;
623 */
624 if (shinfo->tx_flags & SKBTX_DEV_ZEROCOPY) {
625 struct ubuf_info *uarg;
626
627 uarg = shinfo->destructor_arg;
628 if (uarg->callback)
629 uarg->callback(uarg, true);
630 }
631
632 if (shinfo->frag_list)
633 kfree_skb_list(shinfo->frag_list);
634
635 skb_free_head(skb);
636}
637
638/*
639 * Free an skbuff by memory without cleaning the state.
640 */
641static void kfree_skbmem(struct sk_buff *skb)
642{
643 struct sk_buff_fclones *fclones;
644
645 switch (skb->fclone) {
646 case SKB_FCLONE_UNAVAILABLE:
647 kmem_cache_free(skbuff_head_cache, skb);
648 return;
649
650 case SKB_FCLONE_ORIG:
651 fclones = container_of(skb, struct sk_buff_fclones, skb1);
652
653 /* We usually free the clone (TX completion) before original skb
654 * This test would have no chance to be true for the clone,
655 * while here, branch prediction will be good.
656 */
657 if (atomic_read(&fclones->fclone_ref) == 1)
658 goto fastpath;
659 break;
660
661 default: /* SKB_FCLONE_CLONE */
662 fclones = container_of(skb, struct sk_buff_fclones, skb2);
663 break;
664 }
665 if (!atomic_dec_and_test(&fclones->fclone_ref))
666 return;
667fastpath:
668 kmem_cache_free(skbuff_fclone_cache, fclones);
669}
670
671static void skb_release_head_state(struct sk_buff *skb)
672{
673 skb_dst_drop(skb);
674#ifdef CONFIG_XFRM
675 secpath_put(skb->sp);
676#endif
677 if (skb->destructor) {
678 WARN_ON(in_irq());
679 skb->destructor(skb);
680 }
681#if IS_ENABLED(CONFIG_NF_CONNTRACK)
682 nf_conntrack_put(skb->nfct);
683#endif
684#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
685 nf_bridge_put(skb->nf_bridge);
686#endif
687}
688
689/* Free everything but the sk_buff shell. */
690static void skb_release_all(struct sk_buff *skb)
691{
692 skb_release_head_state(skb);
693 if (likely(skb->head))
694 skb_release_data(skb);
695}
696
697/**
698 * __kfree_skb - private function
699 * @skb: buffer
700 *
701 * Free an sk_buff. Release anything attached to the buffer.
702 * Clean the state. This is an internal helper function. Users should
703 * always call kfree_skb
704 */
705
706void __kfree_skb(struct sk_buff *skb)
707{
708 skb_release_all(skb);
709 kfree_skbmem(skb);
710}
711EXPORT_SYMBOL(__kfree_skb);
712
713/**
714 * kfree_skb - free an sk_buff
715 * @skb: buffer to free
716 *
717 * Drop a reference to the buffer and free it if the usage count has
718 * hit zero.
719 */
720void kfree_skb(struct sk_buff *skb)
721{
722 if (unlikely(!skb))
723 return;
724 if (likely(atomic_read(&skb->users) == 1))
725 smp_rmb();
726 else if (likely(!atomic_dec_and_test(&skb->users)))
727 return;
728 trace_kfree_skb(skb, __builtin_return_address(0));
729 __kfree_skb(skb);
730}
731EXPORT_SYMBOL(kfree_skb);
732
733void kfree_skb_list(struct sk_buff *segs)
734{
735 while (segs) {
736 struct sk_buff *next = segs->next;
737
738 kfree_skb(segs);
739 segs = next;
740 }
741}
742EXPORT_SYMBOL(kfree_skb_list);
743
744/**
745 * skb_tx_error - report an sk_buff xmit error
746 * @skb: buffer that triggered an error
747 *
748 * Report xmit error if a device callback is tracking this skb.
749 * skb must be freed afterwards.
750 */
751void skb_tx_error(struct sk_buff *skb)
752{
753 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
754 struct ubuf_info *uarg;
755
756 uarg = skb_shinfo(skb)->destructor_arg;
757 if (uarg->callback)
758 uarg->callback(uarg, false);
759 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
760 }
761}
762EXPORT_SYMBOL(skb_tx_error);
763
764/**
765 * consume_skb - free an skbuff
766 * @skb: buffer to free
767 *
768 * Drop a ref to the buffer and free it if the usage count has hit zero
769 * Functions identically to kfree_skb, but kfree_skb assumes that the frame
770 * is being dropped after a failure and notes that
771 */
772void consume_skb(struct sk_buff *skb)
773{
774 if (unlikely(!skb))
775 return;
776 if (likely(atomic_read(&skb->users) == 1))
777 smp_rmb();
778 else if (likely(!atomic_dec_and_test(&skb->users)))
779 return;
780 trace_consume_skb(skb);
781 __kfree_skb(skb);
782}
783EXPORT_SYMBOL(consume_skb);
784
785/* Make sure a field is enclosed inside headers_start/headers_end section */
786#define CHECK_SKB_FIELD(field) \
787 BUILD_BUG_ON(offsetof(struct sk_buff, field) < \
788 offsetof(struct sk_buff, headers_start)); \
789 BUILD_BUG_ON(offsetof(struct sk_buff, field) > \
790 offsetof(struct sk_buff, headers_end)); \
791
792static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
793{
794 new->tstamp = old->tstamp;
795 /* We do not copy old->sk */
796 new->dev = old->dev;
797 memcpy(new->cb, old->cb, sizeof(old->cb));
798 skb_dst_copy(new, old);
799#ifdef CONFIG_XFRM
800 new->sp = secpath_get(old->sp);
801#endif
802 __nf_copy(new, old, false);
803
804 /* Note : this field could be in headers_start/headers_end section
805 * It is not yet because we do not want to have a 16 bit hole
806 */
807 new->queue_mapping = old->queue_mapping;
808
809 memcpy(&new->headers_start, &old->headers_start,
810 offsetof(struct sk_buff, headers_end) -
811 offsetof(struct sk_buff, headers_start));
812 CHECK_SKB_FIELD(protocol);
813 CHECK_SKB_FIELD(csum);
814 CHECK_SKB_FIELD(hash);
815 CHECK_SKB_FIELD(priority);
816 CHECK_SKB_FIELD(skb_iif);
817 CHECK_SKB_FIELD(vlan_proto);
818 CHECK_SKB_FIELD(vlan_tci);
819 CHECK_SKB_FIELD(transport_header);
820 CHECK_SKB_FIELD(network_header);
821 CHECK_SKB_FIELD(mac_header);
822 CHECK_SKB_FIELD(inner_protocol);
823 CHECK_SKB_FIELD(inner_transport_header);
824 CHECK_SKB_FIELD(inner_network_header);
825 CHECK_SKB_FIELD(inner_mac_header);
826 CHECK_SKB_FIELD(mark);
827#ifdef CONFIG_NETWORK_SECMARK
828 CHECK_SKB_FIELD(secmark);
829#endif
830#ifdef CONFIG_NET_RX_BUSY_POLL
831 CHECK_SKB_FIELD(napi_id);
832#endif
833#ifdef CONFIG_XPS
834 CHECK_SKB_FIELD(sender_cpu);
835#endif
836#ifdef CONFIG_NET_SCHED
837 CHECK_SKB_FIELD(tc_index);
838#ifdef CONFIG_NET_CLS_ACT
839 CHECK_SKB_FIELD(tc_verd);
840#endif
841#endif
842
843}
844
845/*
846 * You should not add any new code to this function. Add it to
847 * __copy_skb_header above instead.
848 */
849static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
850{
851#define C(x) n->x = skb->x
852
853 n->next = n->prev = NULL;
854 n->sk = NULL;
855 __copy_skb_header(n, skb);
856
857 C(len);
858 C(data_len);
859 C(mac_len);
860 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
861 n->cloned = 1;
862 n->nohdr = 0;
863 n->destructor = NULL;
864 C(tail);
865 C(end);
866 C(head);
867 C(head_frag);
868 C(data);
869 C(truesize);
870 atomic_set(&n->users, 1);
871
872 atomic_inc(&(skb_shinfo(skb)->dataref));
873 skb->cloned = 1;
874
875 return n;
876#undef C
877}
878
879/**
880 * skb_morph - morph one skb into another
881 * @dst: the skb to receive the contents
882 * @src: the skb to supply the contents
883 *
884 * This is identical to skb_clone except that the target skb is
885 * supplied by the user.
886 *
887 * The target skb is returned upon exit.
888 */
889struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
890{
891 skb_release_all(dst);
892 return __skb_clone(dst, src);
893}
894EXPORT_SYMBOL_GPL(skb_morph);
895
896/**
897 * skb_copy_ubufs - copy userspace skb frags buffers to kernel
898 * @skb: the skb to modify
899 * @gfp_mask: allocation priority
900 *
901 * This must be called on SKBTX_DEV_ZEROCOPY skb.
902 * It will copy all frags into kernel and drop the reference
903 * to userspace pages.
904 *
905 * If this function is called from an interrupt gfp_mask() must be
906 * %GFP_ATOMIC.
907 *
908 * Returns 0 on success or a negative error code on failure
909 * to allocate kernel memory to copy to.
910 */
911int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
912{
913 int i;
914 int num_frags = skb_shinfo(skb)->nr_frags;
915 struct page *page, *head = NULL;
916 struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg;
917
918 for (i = 0; i < num_frags; i++) {
919 u8 *vaddr;
920 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
921
922 page = alloc_page(gfp_mask);
923 if (!page) {
924 while (head) {
925 struct page *next = (struct page *)page_private(head);
926 put_page(head);
927 head = next;
928 }
929 return -ENOMEM;
930 }
931 vaddr = kmap_atomic(skb_frag_page(f));
932 memcpy(page_address(page),
933 vaddr + f->page_offset, skb_frag_size(f));
934 kunmap_atomic(vaddr);
935 set_page_private(page, (unsigned long)head);
936 head = page;
937 }
938
939 /* skb frags release userspace buffers */
940 for (i = 0; i < num_frags; i++)
941 skb_frag_unref(skb, i);
942
943 uarg->callback(uarg, false);
944
945 /* skb frags point to kernel buffers */
946 for (i = num_frags - 1; i >= 0; i--) {
947 __skb_fill_page_desc(skb, i, head, 0,
948 skb_shinfo(skb)->frags[i].size);
949 head = (struct page *)page_private(head);
950 }
951
952 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
953 return 0;
954}
955EXPORT_SYMBOL_GPL(skb_copy_ubufs);
956
957/**
958 * skb_clone - duplicate an sk_buff
959 * @skb: buffer to clone
960 * @gfp_mask: allocation priority
961 *
962 * Duplicate an &sk_buff. The new one is not owned by a socket. Both
963 * copies share the same packet data but not structure. The new
964 * buffer has a reference count of 1. If the allocation fails the
965 * function returns %NULL otherwise the new buffer is returned.
966 *
967 * If this function is called from an interrupt gfp_mask() must be
968 * %GFP_ATOMIC.
969 */
970
971struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
972{
973 struct sk_buff_fclones *fclones = container_of(skb,
974 struct sk_buff_fclones,
975 skb1);
976 struct sk_buff *n;
977
978 if (skb_orphan_frags(skb, gfp_mask))
979 return NULL;
980
981 if (skb->fclone == SKB_FCLONE_ORIG &&
982 atomic_read(&fclones->fclone_ref) == 1) {
983 n = &fclones->skb2;
984 atomic_set(&fclones->fclone_ref, 2);
985 } else {
986 if (skb_pfmemalloc(skb))
987 gfp_mask |= __GFP_MEMALLOC;
988
989 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
990 if (!n)
991 return NULL;
992
993 kmemcheck_annotate_bitfield(n, flags1);
994 n->fclone = SKB_FCLONE_UNAVAILABLE;
995 }
996
997 return __skb_clone(n, skb);
998}
999EXPORT_SYMBOL(skb_clone);
1000
1001static void skb_headers_offset_update(struct sk_buff *skb, int off)
1002{
1003 /* Only adjust this if it actually is csum_start rather than csum */
1004 if (skb->ip_summed == CHECKSUM_PARTIAL)
1005 skb->csum_start += off;
1006 /* {transport,network,mac}_header and tail are relative to skb->head */
1007 skb->transport_header += off;
1008 skb->network_header += off;
1009 if (skb_mac_header_was_set(skb))
1010 skb->mac_header += off;
1011 skb->inner_transport_header += off;
1012 skb->inner_network_header += off;
1013 skb->inner_mac_header += off;
1014}
1015
1016static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
1017{
1018 __copy_skb_header(new, old);
1019
1020 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
1021 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
1022 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
1023}
1024
1025static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
1026{
1027 if (skb_pfmemalloc(skb))
1028 return SKB_ALLOC_RX;
1029 return 0;
1030}
1031
1032/**
1033 * skb_copy - create private copy of an sk_buff
1034 * @skb: buffer to copy
1035 * @gfp_mask: allocation priority
1036 *
1037 * Make a copy of both an &sk_buff and its data. This is used when the
1038 * caller wishes to modify the data and needs a private copy of the
1039 * data to alter. Returns %NULL on failure or the pointer to the buffer
1040 * on success. The returned buffer has a reference count of 1.
1041 *
1042 * As by-product this function converts non-linear &sk_buff to linear
1043 * one, so that &sk_buff becomes completely private and caller is allowed
1044 * to modify all the data of returned buffer. This means that this
1045 * function is not recommended for use in circumstances when only
1046 * header is going to be modified. Use pskb_copy() instead.
1047 */
1048
1049struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
1050{
1051 int headerlen = skb_headroom(skb);
1052 unsigned int size = skb_end_offset(skb) + skb->data_len;
1053 struct sk_buff *n = __alloc_skb(size, gfp_mask,
1054 skb_alloc_rx_flag(skb), NUMA_NO_NODE);
1055
1056 if (!n)
1057 return NULL;
1058
1059 /* Set the data pointer */
1060 skb_reserve(n, headerlen);
1061 /* Set the tail pointer and length */
1062 skb_put(n, skb->len);
1063
1064 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
1065 BUG();
1066
1067 copy_skb_header(n, skb);
1068 return n;
1069}
1070EXPORT_SYMBOL(skb_copy);
1071
1072/**
1073 * __pskb_copy_fclone - create copy of an sk_buff with private head.
1074 * @skb: buffer to copy
1075 * @headroom: headroom of new skb
1076 * @gfp_mask: allocation priority
1077 * @fclone: if true allocate the copy of the skb from the fclone
1078 * cache instead of the head cache; it is recommended to set this
1079 * to true for the cases where the copy will likely be cloned
1080 *
1081 * Make a copy of both an &sk_buff and part of its data, located
1082 * in header. Fragmented data remain shared. This is used when
1083 * the caller wishes to modify only header of &sk_buff and needs
1084 * private copy of the header to alter. Returns %NULL on failure
1085 * or the pointer to the buffer on success.
1086 * The returned buffer has a reference count of 1.
1087 */
1088
1089struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1090 gfp_t gfp_mask, bool fclone)
1091{
1092 unsigned int size = skb_headlen(skb) + headroom;
1093 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0);
1094 struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE);
1095
1096 if (!n)
1097 goto out;
1098
1099 /* Set the data pointer */
1100 skb_reserve(n, headroom);
1101 /* Set the tail pointer and length */
1102 skb_put(n, skb_headlen(skb));
1103 /* Copy the bytes */
1104 skb_copy_from_linear_data(skb, n->data, n->len);
1105
1106 n->truesize += skb->data_len;
1107 n->data_len = skb->data_len;
1108 n->len = skb->len;
1109
1110 if (skb_shinfo(skb)->nr_frags) {
1111 int i;
1112
1113 if (skb_orphan_frags(skb, gfp_mask)) {
1114 kfree_skb(n);
1115 n = NULL;
1116 goto out;
1117 }
1118 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1119 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
1120 skb_frag_ref(skb, i);
1121 }
1122 skb_shinfo(n)->nr_frags = i;
1123 }
1124
1125 if (skb_has_frag_list(skb)) {
1126 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
1127 skb_clone_fraglist(n);
1128 }
1129
1130 copy_skb_header(n, skb);
1131out:
1132 return n;
1133}
1134EXPORT_SYMBOL(__pskb_copy_fclone);
1135
1136/**
1137 * pskb_expand_head - reallocate header of &sk_buff
1138 * @skb: buffer to reallocate
1139 * @nhead: room to add at head
1140 * @ntail: room to add at tail
1141 * @gfp_mask: allocation priority
1142 *
1143 * Expands (or creates identical copy, if @nhead and @ntail are zero)
1144 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
1145 * reference count of 1. Returns zero in the case of success or error,
1146 * if expansion failed. In the last case, &sk_buff is not changed.
1147 *
1148 * All the pointers pointing into skb header may change and must be
1149 * reloaded after call to this function.
1150 */
1151
1152int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
1153 gfp_t gfp_mask)
1154{
1155 int i;
1156 u8 *data;
1157 int size = nhead + skb_end_offset(skb) + ntail;
1158 long off;
1159
1160 BUG_ON(nhead < 0);
1161
1162 if (skb_shared(skb))
1163 BUG();
1164
1165 size = SKB_DATA_ALIGN(size);
1166
1167 if (skb_pfmemalloc(skb))
1168 gfp_mask |= __GFP_MEMALLOC;
1169 data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
1170 gfp_mask, NUMA_NO_NODE, NULL);
1171 if (!data)
1172 goto nodata;
1173 size = SKB_WITH_OVERHEAD(ksize(data));
1174
1175 /* Copy only real data... and, alas, header. This should be
1176 * optimized for the cases when header is void.
1177 */
1178 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
1179
1180 memcpy((struct skb_shared_info *)(data + size),
1181 skb_shinfo(skb),
1182 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
1183
1184 /*
1185 * if shinfo is shared we must drop the old head gracefully, but if it
1186 * is not we can just drop the old head and let the existing refcount
1187 * be since all we did is relocate the values
1188 */
1189 if (skb_cloned(skb)) {
1190 /* copy this zero copy skb frags */
1191 if (skb_orphan_frags(skb, gfp_mask))
1192 goto nofrags;
1193 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1194 skb_frag_ref(skb, i);
1195
1196 if (skb_has_frag_list(skb))
1197 skb_clone_fraglist(skb);
1198
1199 skb_release_data(skb);
1200 } else {
1201 skb_free_head(skb);
1202 }
1203 off = (data + nhead) - skb->head;
1204
1205 skb->head = data;
1206 skb->head_frag = 0;
1207 skb->data += off;
1208#ifdef NET_SKBUFF_DATA_USES_OFFSET
1209 skb->end = size;
1210 off = nhead;
1211#else
1212 skb->end = skb->head + size;
1213#endif
1214 skb->tail += off;
1215 skb_headers_offset_update(skb, nhead);
1216 skb->cloned = 0;
1217 skb->hdr_len = 0;
1218 skb->nohdr = 0;
1219 atomic_set(&skb_shinfo(skb)->dataref, 1);
1220 return 0;
1221
1222nofrags:
1223 kfree(data);
1224nodata:
1225 return -ENOMEM;
1226}
1227EXPORT_SYMBOL(pskb_expand_head);
1228
1229/* Make private copy of skb with writable head and some headroom */
1230
1231struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
1232{
1233 struct sk_buff *skb2;
1234 int delta = headroom - skb_headroom(skb);
1235
1236 if (delta <= 0)
1237 skb2 = pskb_copy(skb, GFP_ATOMIC);
1238 else {
1239 skb2 = skb_clone(skb, GFP_ATOMIC);
1240 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
1241 GFP_ATOMIC)) {
1242 kfree_skb(skb2);
1243 skb2 = NULL;
1244 }
1245 }
1246 return skb2;
1247}
1248EXPORT_SYMBOL(skb_realloc_headroom);
1249
1250/**
1251 * skb_copy_expand - copy and expand sk_buff
1252 * @skb: buffer to copy
1253 * @newheadroom: new free bytes at head
1254 * @newtailroom: new free bytes at tail
1255 * @gfp_mask: allocation priority
1256 *
1257 * Make a copy of both an &sk_buff and its data and while doing so
1258 * allocate additional space.
1259 *
1260 * This is used when the caller wishes to modify the data and needs a
1261 * private copy of the data to alter as well as more space for new fields.
1262 * Returns %NULL on failure or the pointer to the buffer
1263 * on success. The returned buffer has a reference count of 1.
1264 *
1265 * You must pass %GFP_ATOMIC as the allocation priority if this function
1266 * is called from an interrupt.
1267 */
1268struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
1269 int newheadroom, int newtailroom,
1270 gfp_t gfp_mask)
1271{
1272 /*
1273 * Allocate the copy buffer
1274 */
1275 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
1276 gfp_mask, skb_alloc_rx_flag(skb),
1277 NUMA_NO_NODE);
1278 int oldheadroom = skb_headroom(skb);
1279 int head_copy_len, head_copy_off;
1280
1281 if (!n)
1282 return NULL;
1283
1284 skb_reserve(n, newheadroom);
1285
1286 /* Set the tail pointer and length */
1287 skb_put(n, skb->len);
1288
1289 head_copy_len = oldheadroom;
1290 head_copy_off = 0;
1291 if (newheadroom <= head_copy_len)
1292 head_copy_len = newheadroom;
1293 else
1294 head_copy_off = newheadroom - head_copy_len;
1295
1296 /* Copy the linear header and data. */
1297 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
1298 skb->len + head_copy_len))
1299 BUG();
1300
1301 copy_skb_header(n, skb);
1302
1303 skb_headers_offset_update(n, newheadroom - oldheadroom);
1304
1305 return n;
1306}
1307EXPORT_SYMBOL(skb_copy_expand);
1308
1309/**
1310 * skb_pad - zero pad the tail of an skb
1311 * @skb: buffer to pad
1312 * @pad: space to pad
1313 *
1314 * Ensure that a buffer is followed by a padding area that is zero
1315 * filled. Used by network drivers which may DMA or transfer data
1316 * beyond the buffer end onto the wire.
1317 *
1318 * May return error in out of memory cases. The skb is freed on error.
1319 */
1320
1321int skb_pad(struct sk_buff *skb, int pad)
1322{
1323 int err;
1324 int ntail;
1325
1326 /* If the skbuff is non linear tailroom is always zero.. */
1327 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
1328 memset(skb->data+skb->len, 0, pad);
1329 return 0;
1330 }
1331
1332 ntail = skb->data_len + pad - (skb->end - skb->tail);
1333 if (likely(skb_cloned(skb) || ntail > 0)) {
1334 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
1335 if (unlikely(err))
1336 goto free_skb;
1337 }
1338
1339 /* FIXME: The use of this function with non-linear skb's really needs
1340 * to be audited.
1341 */
1342 err = skb_linearize(skb);
1343 if (unlikely(err))
1344 goto free_skb;
1345
1346 memset(skb->data + skb->len, 0, pad);
1347 return 0;
1348
1349free_skb:
1350 kfree_skb(skb);
1351 return err;
1352}
1353EXPORT_SYMBOL(skb_pad);
1354
1355/**
1356 * pskb_put - add data to the tail of a potentially fragmented buffer
1357 * @skb: start of the buffer to use
1358 * @tail: tail fragment of the buffer to use
1359 * @len: amount of data to add
1360 *
1361 * This function extends the used data area of the potentially
1362 * fragmented buffer. @tail must be the last fragment of @skb -- or
1363 * @skb itself. If this would exceed the total buffer size the kernel
1364 * will panic. A pointer to the first byte of the extra data is
1365 * returned.
1366 */
1367
1368unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
1369{
1370 if (tail != skb) {
1371 skb->data_len += len;
1372 skb->len += len;
1373 }
1374 return skb_put(tail, len);
1375}
1376EXPORT_SYMBOL_GPL(pskb_put);
1377
1378/**
1379 * skb_put - add data to a buffer
1380 * @skb: buffer to use
1381 * @len: amount of data to add
1382 *
1383 * This function extends the used data area of the buffer. If this would
1384 * exceed the total buffer size the kernel will panic. A pointer to the
1385 * first byte of the extra data is returned.
1386 */
1387unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
1388{
1389 unsigned char *tmp = skb_tail_pointer(skb);
1390 SKB_LINEAR_ASSERT(skb);
1391 skb->tail += len;
1392 skb->len += len;
1393 if (unlikely(skb->tail > skb->end))
1394 skb_over_panic(skb, len, __builtin_return_address(0));
1395 return tmp;
1396}
1397EXPORT_SYMBOL(skb_put);
1398
1399/**
1400 * skb_push - add data to the start of a buffer
1401 * @skb: buffer to use
1402 * @len: amount of data to add
1403 *
1404 * This function extends the used data area of the buffer at the buffer
1405 * start. If this would exceed the total buffer headroom the kernel will
1406 * panic. A pointer to the first byte of the extra data is returned.
1407 */
1408unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
1409{
1410 skb->data -= len;
1411 skb->len += len;
1412 if (unlikely(skb->data<skb->head))
1413 skb_under_panic(skb, len, __builtin_return_address(0));
1414 return skb->data;
1415}
1416EXPORT_SYMBOL(skb_push);
1417
1418/**
1419 * skb_pull - remove data from the start of a buffer
1420 * @skb: buffer to use
1421 * @len: amount of data to remove
1422 *
1423 * This function removes data from the start of a buffer, returning
1424 * the memory to the headroom. A pointer to the next data in the buffer
1425 * is returned. Once the data has been pulled future pushes will overwrite
1426 * the old data.
1427 */
1428unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
1429{
1430 return skb_pull_inline(skb, len);
1431}
1432EXPORT_SYMBOL(skb_pull);
1433
1434/**
1435 * skb_trim - remove end from a buffer
1436 * @skb: buffer to alter
1437 * @len: new length
1438 *
1439 * Cut the length of a buffer down by removing data from the tail. If
1440 * the buffer is already under the length specified it is not modified.
1441 * The skb must be linear.
1442 */
1443void skb_trim(struct sk_buff *skb, unsigned int len)
1444{
1445 if (skb->len > len)
1446 __skb_trim(skb, len);
1447}
1448EXPORT_SYMBOL(skb_trim);
1449
1450/* Trims skb to length len. It can change skb pointers.
1451 */
1452
1453int ___pskb_trim(struct sk_buff *skb, unsigned int len)
1454{
1455 struct sk_buff **fragp;
1456 struct sk_buff *frag;
1457 int offset = skb_headlen(skb);
1458 int nfrags = skb_shinfo(skb)->nr_frags;
1459 int i;
1460 int err;
1461
1462 if (skb_cloned(skb) &&
1463 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
1464 return err;
1465
1466 i = 0;
1467 if (offset >= len)
1468 goto drop_pages;
1469
1470 for (; i < nfrags; i++) {
1471 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1472
1473 if (end < len) {
1474 offset = end;
1475 continue;
1476 }
1477
1478 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
1479
1480drop_pages:
1481 skb_shinfo(skb)->nr_frags = i;
1482
1483 for (; i < nfrags; i++)
1484 skb_frag_unref(skb, i);
1485
1486 if (skb_has_frag_list(skb))
1487 skb_drop_fraglist(skb);
1488 goto done;
1489 }
1490
1491 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
1492 fragp = &frag->next) {
1493 int end = offset + frag->len;
1494
1495 if (skb_shared(frag)) {
1496 struct sk_buff *nfrag;
1497
1498 nfrag = skb_clone(frag, GFP_ATOMIC);
1499 if (unlikely(!nfrag))
1500 return -ENOMEM;
1501
1502 nfrag->next = frag->next;
1503 consume_skb(frag);
1504 frag = nfrag;
1505 *fragp = frag;
1506 }
1507
1508 if (end < len) {
1509 offset = end;
1510 continue;
1511 }
1512
1513 if (end > len &&
1514 unlikely((err = pskb_trim(frag, len - offset))))
1515 return err;
1516
1517 if (frag->next)
1518 skb_drop_list(&frag->next);
1519 break;
1520 }
1521
1522done:
1523 if (len > skb_headlen(skb)) {
1524 skb->data_len -= skb->len - len;
1525 skb->len = len;
1526 } else {
1527 skb->len = len;
1528 skb->data_len = 0;
1529 skb_set_tail_pointer(skb, len);
1530 }
1531
1532 return 0;
1533}
1534EXPORT_SYMBOL(___pskb_trim);
1535
1536/**
1537 * __pskb_pull_tail - advance tail of skb header
1538 * @skb: buffer to reallocate
1539 * @delta: number of bytes to advance tail
1540 *
1541 * The function makes a sense only on a fragmented &sk_buff,
1542 * it expands header moving its tail forward and copying necessary
1543 * data from fragmented part.
1544 *
1545 * &sk_buff MUST have reference count of 1.
1546 *
1547 * Returns %NULL (and &sk_buff does not change) if pull failed
1548 * or value of new tail of skb in the case of success.
1549 *
1550 * All the pointers pointing into skb header may change and must be
1551 * reloaded after call to this function.
1552 */
1553
1554/* Moves tail of skb head forward, copying data from fragmented part,
1555 * when it is necessary.
1556 * 1. It may fail due to malloc failure.
1557 * 2. It may change skb pointers.
1558 *
1559 * It is pretty complicated. Luckily, it is called only in exceptional cases.
1560 */
1561unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
1562{
1563 /* If skb has not enough free space at tail, get new one
1564 * plus 128 bytes for future expansions. If we have enough
1565 * room at tail, reallocate without expansion only if skb is cloned.
1566 */
1567 int i, k, eat = (skb->tail + delta) - skb->end;
1568
1569 if (eat > 0 || skb_cloned(skb)) {
1570 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
1571 GFP_ATOMIC))
1572 return NULL;
1573 }
1574
1575 if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta))
1576 BUG();
1577
1578 /* Optimization: no fragments, no reasons to preestimate
1579 * size of pulled pages. Superb.
1580 */
1581 if (!skb_has_frag_list(skb))
1582 goto pull_pages;
1583
1584 /* Estimate size of pulled pages. */
1585 eat = delta;
1586 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1587 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1588
1589 if (size >= eat)
1590 goto pull_pages;
1591 eat -= size;
1592 }
1593
1594 /* If we need update frag list, we are in troubles.
1595 * Certainly, it possible to add an offset to skb data,
1596 * but taking into account that pulling is expected to
1597 * be very rare operation, it is worth to fight against
1598 * further bloating skb head and crucify ourselves here instead.
1599 * Pure masohism, indeed. 8)8)
1600 */
1601 if (eat) {
1602 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1603 struct sk_buff *clone = NULL;
1604 struct sk_buff *insp = NULL;
1605
1606 do {
1607 BUG_ON(!list);
1608
1609 if (list->len <= eat) {
1610 /* Eaten as whole. */
1611 eat -= list->len;
1612 list = list->next;
1613 insp = list;
1614 } else {
1615 /* Eaten partially. */
1616
1617 if (skb_shared(list)) {
1618 /* Sucks! We need to fork list. :-( */
1619 clone = skb_clone(list, GFP_ATOMIC);
1620 if (!clone)
1621 return NULL;
1622 insp = list->next;
1623 list = clone;
1624 } else {
1625 /* This may be pulled without
1626 * problems. */
1627 insp = list;
1628 }
1629 if (!pskb_pull(list, eat)) {
1630 kfree_skb(clone);
1631 return NULL;
1632 }
1633 break;
1634 }
1635 } while (eat);
1636
1637 /* Free pulled out fragments. */
1638 while ((list = skb_shinfo(skb)->frag_list) != insp) {
1639 skb_shinfo(skb)->frag_list = list->next;
1640 kfree_skb(list);
1641 }
1642 /* And insert new clone at head. */
1643 if (clone) {
1644 clone->next = list;
1645 skb_shinfo(skb)->frag_list = clone;
1646 }
1647 }
1648 /* Success! Now we may commit changes to skb data. */
1649
1650pull_pages:
1651 eat = delta;
1652 k = 0;
1653 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1654 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1655
1656 if (size <= eat) {
1657 skb_frag_unref(skb, i);
1658 eat -= size;
1659 } else {
1660 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1661 if (eat) {
1662 skb_shinfo(skb)->frags[k].page_offset += eat;
1663 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
1664 eat = 0;
1665 }
1666 k++;
1667 }
1668 }
1669 skb_shinfo(skb)->nr_frags = k;
1670
1671 skb->tail += delta;
1672 skb->data_len -= delta;
1673
1674 return skb_tail_pointer(skb);
1675}
1676EXPORT_SYMBOL(__pskb_pull_tail);
1677
1678/**
1679 * skb_copy_bits - copy bits from skb to kernel buffer
1680 * @skb: source skb
1681 * @offset: offset in source
1682 * @to: destination buffer
1683 * @len: number of bytes to copy
1684 *
1685 * Copy the specified number of bytes from the source skb to the
1686 * destination buffer.
1687 *
1688 * CAUTION ! :
1689 * If its prototype is ever changed,
1690 * check arch/{*}/net/{*}.S files,
1691 * since it is called from BPF assembly code.
1692 */
1693int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1694{
1695 int start = skb_headlen(skb);
1696 struct sk_buff *frag_iter;
1697 int i, copy;
1698
1699 if (offset > (int)skb->len - len)
1700 goto fault;
1701
1702 /* Copy header. */
1703 if ((copy = start - offset) > 0) {
1704 if (copy > len)
1705 copy = len;
1706 skb_copy_from_linear_data_offset(skb, offset, to, copy);
1707 if ((len -= copy) == 0)
1708 return 0;
1709 offset += copy;
1710 to += copy;
1711 }
1712
1713 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1714 int end;
1715 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1716
1717 WARN_ON(start > offset + len);
1718
1719 end = start + skb_frag_size(f);
1720 if ((copy = end - offset) > 0) {
1721 u8 *vaddr;
1722
1723 if (copy > len)
1724 copy = len;
1725
1726 vaddr = kmap_atomic(skb_frag_page(f));
1727 memcpy(to,
1728 vaddr + f->page_offset + offset - start,
1729 copy);
1730 kunmap_atomic(vaddr);
1731
1732 if ((len -= copy) == 0)
1733 return 0;
1734 offset += copy;
1735 to += copy;
1736 }
1737 start = end;
1738 }
1739
1740 skb_walk_frags(skb, frag_iter) {
1741 int end;
1742
1743 WARN_ON(start > offset + len);
1744
1745 end = start + frag_iter->len;
1746 if ((copy = end - offset) > 0) {
1747 if (copy > len)
1748 copy = len;
1749 if (skb_copy_bits(frag_iter, offset - start, to, copy))
1750 goto fault;
1751 if ((len -= copy) == 0)
1752 return 0;
1753 offset += copy;
1754 to += copy;
1755 }
1756 start = end;
1757 }
1758
1759 if (!len)
1760 return 0;
1761
1762fault:
1763 return -EFAULT;
1764}
1765EXPORT_SYMBOL(skb_copy_bits);
1766
1767/*
1768 * Callback from splice_to_pipe(), if we need to release some pages
1769 * at the end of the spd in case we error'ed out in filling the pipe.
1770 */
1771static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1772{
1773 put_page(spd->pages[i]);
1774}
1775
1776static struct page *linear_to_page(struct page *page, unsigned int *len,
1777 unsigned int *offset,
1778 struct sock *sk)
1779{
1780 struct page_frag *pfrag = sk_page_frag(sk);
1781
1782 if (!sk_page_frag_refill(sk, pfrag))
1783 return NULL;
1784
1785 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
1786
1787 memcpy(page_address(pfrag->page) + pfrag->offset,
1788 page_address(page) + *offset, *len);
1789 *offset = pfrag->offset;
1790 pfrag->offset += *len;
1791
1792 return pfrag->page;
1793}
1794
1795static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
1796 struct page *page,
1797 unsigned int offset)
1798{
1799 return spd->nr_pages &&
1800 spd->pages[spd->nr_pages - 1] == page &&
1801 (spd->partial[spd->nr_pages - 1].offset +
1802 spd->partial[spd->nr_pages - 1].len == offset);
1803}
1804
1805/*
1806 * Fill page/offset/length into spd, if it can hold more pages.
1807 */
1808static bool spd_fill_page(struct splice_pipe_desc *spd,
1809 struct pipe_inode_info *pipe, struct page *page,
1810 unsigned int *len, unsigned int offset,
1811 bool linear,
1812 struct sock *sk)
1813{
1814 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
1815 return true;
1816
1817 if (linear) {
1818 page = linear_to_page(page, len, &offset, sk);
1819 if (!page)
1820 return true;
1821 }
1822 if (spd_can_coalesce(spd, page, offset)) {
1823 spd->partial[spd->nr_pages - 1].len += *len;
1824 return false;
1825 }
1826 get_page(page);
1827 spd->pages[spd->nr_pages] = page;
1828 spd->partial[spd->nr_pages].len = *len;
1829 spd->partial[spd->nr_pages].offset = offset;
1830 spd->nr_pages++;
1831
1832 return false;
1833}
1834
1835static bool __splice_segment(struct page *page, unsigned int poff,
1836 unsigned int plen, unsigned int *off,
1837 unsigned int *len,
1838 struct splice_pipe_desc *spd, bool linear,
1839 struct sock *sk,
1840 struct pipe_inode_info *pipe)
1841{
1842 if (!*len)
1843 return true;
1844
1845 /* skip this segment if already processed */
1846 if (*off >= plen) {
1847 *off -= plen;
1848 return false;
1849 }
1850
1851 /* ignore any bits we already processed */
1852 poff += *off;
1853 plen -= *off;
1854 *off = 0;
1855
1856 do {
1857 unsigned int flen = min(*len, plen);
1858
1859 if (spd_fill_page(spd, pipe, page, &flen, poff,
1860 linear, sk))
1861 return true;
1862 poff += flen;
1863 plen -= flen;
1864 *len -= flen;
1865 } while (*len && plen);
1866
1867 return false;
1868}
1869
1870/*
1871 * Map linear and fragment data from the skb to spd. It reports true if the
1872 * pipe is full or if we already spliced the requested length.
1873 */
1874static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1875 unsigned int *offset, unsigned int *len,
1876 struct splice_pipe_desc *spd, struct sock *sk)
1877{
1878 int seg;
1879
1880 /* map the linear part :
1881 * If skb->head_frag is set, this 'linear' part is backed by a
1882 * fragment, and if the head is not shared with any clones then
1883 * we can avoid a copy since we own the head portion of this page.
1884 */
1885 if (__splice_segment(virt_to_page(skb->data),
1886 (unsigned long) skb->data & (PAGE_SIZE - 1),
1887 skb_headlen(skb),
1888 offset, len, spd,
1889 skb_head_is_locked(skb),
1890 sk, pipe))
1891 return true;
1892
1893 /*
1894 * then map the fragments
1895 */
1896 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
1897 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
1898
1899 if (__splice_segment(skb_frag_page(f),
1900 f->page_offset, skb_frag_size(f),
1901 offset, len, spd, false, sk, pipe))
1902 return true;
1903 }
1904
1905 return false;
1906}
1907
1908/*
1909 * Map data from the skb to a pipe. Should handle both the linear part,
1910 * the fragments, and the frag list. It does NOT handle frag lists within
1911 * the frag list, if such a thing exists. We'd probably need to recurse to
1912 * handle that cleanly.
1913 */
1914int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1915 struct pipe_inode_info *pipe, unsigned int tlen,
1916 unsigned int flags)
1917{
1918 struct partial_page partial[MAX_SKB_FRAGS];
1919 struct page *pages[MAX_SKB_FRAGS];
1920 struct splice_pipe_desc spd = {
1921 .pages = pages,
1922 .partial = partial,
1923 .nr_pages_max = MAX_SKB_FRAGS,
1924 .flags = flags,
1925 .ops = &nosteal_pipe_buf_ops,
1926 .spd_release = sock_spd_release,
1927 };
1928 struct sk_buff *frag_iter;
1929 struct sock *sk = skb->sk;
1930 int ret = 0;
1931
1932 /*
1933 * __skb_splice_bits() only fails if the output has no room left,
1934 * so no point in going over the frag_list for the error case.
1935 */
1936 if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk))
1937 goto done;
1938 else if (!tlen)
1939 goto done;
1940
1941 /*
1942 * now see if we have a frag_list to map
1943 */
1944 skb_walk_frags(skb, frag_iter) {
1945 if (!tlen)
1946 break;
1947 if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk))
1948 break;
1949 }
1950
1951done:
1952 if (spd.nr_pages) {
1953 /*
1954 * Drop the socket lock, otherwise we have reverse
1955 * locking dependencies between sk_lock and i_mutex
1956 * here as compared to sendfile(). We enter here
1957 * with the socket lock held, and splice_to_pipe() will
1958 * grab the pipe inode lock. For sendfile() emulation,
1959 * we call into ->sendpage() with the i_mutex lock held
1960 * and networking will grab the socket lock.
1961 */
1962 release_sock(sk);
1963 ret = splice_to_pipe(pipe, &spd);
1964 lock_sock(sk);
1965 }
1966
1967 return ret;
1968}
1969
1970/**
1971 * skb_store_bits - store bits from kernel buffer to skb
1972 * @skb: destination buffer
1973 * @offset: offset in destination
1974 * @from: source buffer
1975 * @len: number of bytes to copy
1976 *
1977 * Copy the specified number of bytes from the source buffer to the
1978 * destination skb. This function handles all the messy bits of
1979 * traversing fragment lists and such.
1980 */
1981
1982int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1983{
1984 int start = skb_headlen(skb);
1985 struct sk_buff *frag_iter;
1986 int i, copy;
1987
1988 if (offset > (int)skb->len - len)
1989 goto fault;
1990
1991 if ((copy = start - offset) > 0) {
1992 if (copy > len)
1993 copy = len;
1994 skb_copy_to_linear_data_offset(skb, offset, from, copy);
1995 if ((len -= copy) == 0)
1996 return 0;
1997 offset += copy;
1998 from += copy;
1999 }
2000
2001 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2002 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2003 int end;
2004
2005 WARN_ON(start > offset + len);
2006
2007 end = start + skb_frag_size(frag);
2008 if ((copy = end - offset) > 0) {
2009 u8 *vaddr;
2010
2011 if (copy > len)
2012 copy = len;
2013
2014 vaddr = kmap_atomic(skb_frag_page(frag));
2015 memcpy(vaddr + frag->page_offset + offset - start,
2016 from, copy);
2017 kunmap_atomic(vaddr);
2018
2019 if ((len -= copy) == 0)
2020 return 0;
2021 offset += copy;
2022 from += copy;
2023 }
2024 start = end;
2025 }
2026
2027 skb_walk_frags(skb, frag_iter) {
2028 int end;
2029
2030 WARN_ON(start > offset + len);
2031
2032 end = start + frag_iter->len;
2033 if ((copy = end - offset) > 0) {
2034 if (copy > len)
2035 copy = len;
2036 if (skb_store_bits(frag_iter, offset - start,
2037 from, copy))
2038 goto fault;
2039 if ((len -= copy) == 0)
2040 return 0;
2041 offset += copy;
2042 from += copy;
2043 }
2044 start = end;
2045 }
2046 if (!len)
2047 return 0;
2048
2049fault:
2050 return -EFAULT;
2051}
2052EXPORT_SYMBOL(skb_store_bits);
2053
2054/* Checksum skb data. */
2055__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
2056 __wsum csum, const struct skb_checksum_ops *ops)
2057{
2058 int start = skb_headlen(skb);
2059 int i, copy = start - offset;
2060 struct sk_buff *frag_iter;
2061 int pos = 0;
2062
2063 /* Checksum header. */
2064 if (copy > 0) {
2065 if (copy > len)
2066 copy = len;
2067 csum = ops->update(skb->data + offset, copy, csum);
2068 if ((len -= copy) == 0)
2069 return csum;
2070 offset += copy;
2071 pos = copy;
2072 }
2073
2074 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2075 int end;
2076 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2077
2078 WARN_ON(start > offset + len);
2079
2080 end = start + skb_frag_size(frag);
2081 if ((copy = end - offset) > 0) {
2082 __wsum csum2;
2083 u8 *vaddr;
2084
2085 if (copy > len)
2086 copy = len;
2087 vaddr = kmap_atomic(skb_frag_page(frag));
2088 csum2 = ops->update(vaddr + frag->page_offset +
2089 offset - start, copy, 0);
2090 kunmap_atomic(vaddr);
2091 csum = ops->combine(csum, csum2, pos, copy);
2092 if (!(len -= copy))
2093 return csum;
2094 offset += copy;
2095 pos += copy;
2096 }
2097 start = end;
2098 }
2099
2100 skb_walk_frags(skb, frag_iter) {
2101 int end;
2102
2103 WARN_ON(start > offset + len);
2104
2105 end = start + frag_iter->len;
2106 if ((copy = end - offset) > 0) {
2107 __wsum csum2;
2108 if (copy > len)
2109 copy = len;
2110 csum2 = __skb_checksum(frag_iter, offset - start,
2111 copy, 0, ops);
2112 csum = ops->combine(csum, csum2, pos, copy);
2113 if ((len -= copy) == 0)
2114 return csum;
2115 offset += copy;
2116 pos += copy;
2117 }
2118 start = end;
2119 }
2120 BUG_ON(len);
2121
2122 return csum;
2123}
2124EXPORT_SYMBOL(__skb_checksum);
2125
2126__wsum skb_checksum(const struct sk_buff *skb, int offset,
2127 int len, __wsum csum)
2128{
2129 const struct skb_checksum_ops ops = {
2130 .update = csum_partial_ext,
2131 .combine = csum_block_add_ext,
2132 };
2133
2134 return __skb_checksum(skb, offset, len, csum, &ops);
2135}
2136EXPORT_SYMBOL(skb_checksum);
2137
2138/* Both of above in one bottle. */
2139
2140__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
2141 u8 *to, int len, __wsum csum)
2142{
2143 int start = skb_headlen(skb);
2144 int i, copy = start - offset;
2145 struct sk_buff *frag_iter;
2146 int pos = 0;
2147
2148 /* Copy header. */
2149 if (copy > 0) {
2150 if (copy > len)
2151 copy = len;
2152 csum = csum_partial_copy_nocheck(skb->data + offset, to,
2153 copy, csum);
2154 if ((len -= copy) == 0)
2155 return csum;
2156 offset += copy;
2157 to += copy;
2158 pos = copy;
2159 }
2160
2161 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2162 int end;
2163
2164 WARN_ON(start > offset + len);
2165
2166 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
2167 if ((copy = end - offset) > 0) {
2168 __wsum csum2;
2169 u8 *vaddr;
2170 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2171
2172 if (copy > len)
2173 copy = len;
2174 vaddr = kmap_atomic(skb_frag_page(frag));
2175 csum2 = csum_partial_copy_nocheck(vaddr +
2176 frag->page_offset +
2177 offset - start, to,
2178 copy, 0);
2179 kunmap_atomic(vaddr);
2180 csum = csum_block_add(csum, csum2, pos);
2181 if (!(len -= copy))
2182 return csum;
2183 offset += copy;
2184 to += copy;
2185 pos += copy;
2186 }
2187 start = end;
2188 }
2189
2190 skb_walk_frags(skb, frag_iter) {
2191 __wsum csum2;
2192 int end;
2193
2194 WARN_ON(start > offset + len);
2195
2196 end = start + frag_iter->len;
2197 if ((copy = end - offset) > 0) {
2198 if (copy > len)
2199 copy = len;
2200 csum2 = skb_copy_and_csum_bits(frag_iter,
2201 offset - start,
2202 to, copy, 0);
2203 csum = csum_block_add(csum, csum2, pos);
2204 if ((len -= copy) == 0)
2205 return csum;
2206 offset += copy;
2207 to += copy;
2208 pos += copy;
2209 }
2210 start = end;
2211 }
2212 BUG_ON(len);
2213 return csum;
2214}
2215EXPORT_SYMBOL(skb_copy_and_csum_bits);
2216
2217 /**
2218 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
2219 * @from: source buffer
2220 *
2221 * Calculates the amount of linear headroom needed in the 'to' skb passed
2222 * into skb_zerocopy().
2223 */
2224unsigned int
2225skb_zerocopy_headlen(const struct sk_buff *from)
2226{
2227 unsigned int hlen = 0;
2228
2229 if (!from->head_frag ||
2230 skb_headlen(from) < L1_CACHE_BYTES ||
2231 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
2232 hlen = skb_headlen(from);
2233
2234 if (skb_has_frag_list(from))
2235 hlen = from->len;
2236
2237 return hlen;
2238}
2239EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
2240
2241/**
2242 * skb_zerocopy - Zero copy skb to skb
2243 * @to: destination buffer
2244 * @from: source buffer
2245 * @len: number of bytes to copy from source buffer
2246 * @hlen: size of linear headroom in destination buffer
2247 *
2248 * Copies up to `len` bytes from `from` to `to` by creating references
2249 * to the frags in the source buffer.
2250 *
2251 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the
2252 * headroom in the `to` buffer.
2253 *
2254 * Return value:
2255 * 0: everything is OK
2256 * -ENOMEM: couldn't orphan frags of @from due to lack of memory
2257 * -EFAULT: skb_copy_bits() found some problem with skb geometry
2258 */
2259int
2260skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
2261{
2262 int i, j = 0;
2263 int plen = 0; /* length of skb->head fragment */
2264 int ret;
2265 struct page *page;
2266 unsigned int offset;
2267
2268 BUG_ON(!from->head_frag && !hlen);
2269
2270 /* dont bother with small payloads */
2271 if (len <= skb_tailroom(to))
2272 return skb_copy_bits(from, 0, skb_put(to, len), len);
2273
2274 if (hlen) {
2275 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
2276 if (unlikely(ret))
2277 return ret;
2278 len -= hlen;
2279 } else {
2280 plen = min_t(int, skb_headlen(from), len);
2281 if (plen) {
2282 page = virt_to_head_page(from->head);
2283 offset = from->data - (unsigned char *)page_address(page);
2284 __skb_fill_page_desc(to, 0, page, offset, plen);
2285 get_page(page);
2286 j = 1;
2287 len -= plen;
2288 }
2289 }
2290
2291 to->truesize += len + plen;
2292 to->len += len + plen;
2293 to->data_len += len + plen;
2294
2295 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
2296 skb_tx_error(from);
2297 return -ENOMEM;
2298 }
2299
2300 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
2301 if (!len)
2302 break;
2303 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
2304 skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len);
2305 len -= skb_shinfo(to)->frags[j].size;
2306 skb_frag_ref(to, j);
2307 j++;
2308 }
2309 skb_shinfo(to)->nr_frags = j;
2310
2311 return 0;
2312}
2313EXPORT_SYMBOL_GPL(skb_zerocopy);
2314
2315void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
2316{
2317 __wsum csum;
2318 long csstart;
2319
2320 if (skb->ip_summed == CHECKSUM_PARTIAL)
2321 csstart = skb_checksum_start_offset(skb);
2322 else
2323 csstart = skb_headlen(skb);
2324
2325 BUG_ON(csstart > skb_headlen(skb));
2326
2327 skb_copy_from_linear_data(skb, to, csstart);
2328
2329 csum = 0;
2330 if (csstart != skb->len)
2331 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
2332 skb->len - csstart, 0);
2333
2334 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2335 long csstuff = csstart + skb->csum_offset;
2336
2337 *((__sum16 *)(to + csstuff)) = csum_fold(csum);
2338 }
2339}
2340EXPORT_SYMBOL(skb_copy_and_csum_dev);
2341
2342/**
2343 * skb_dequeue - remove from the head of the queue
2344 * @list: list to dequeue from
2345 *
2346 * Remove the head of the list. The list lock is taken so the function
2347 * may be used safely with other locking list functions. The head item is
2348 * returned or %NULL if the list is empty.
2349 */
2350
2351struct sk_buff *skb_dequeue(struct sk_buff_head *list)
2352{
2353 unsigned long flags;
2354 struct sk_buff *result;
2355
2356 spin_lock_irqsave(&list->lock, flags);
2357 result = __skb_dequeue(list);
2358 spin_unlock_irqrestore(&list->lock, flags);
2359 return result;
2360}
2361EXPORT_SYMBOL(skb_dequeue);
2362
2363/**
2364 * skb_dequeue_tail - remove from the tail of the queue
2365 * @list: list to dequeue from
2366 *
2367 * Remove the tail of the list. The list lock is taken so the function
2368 * may be used safely with other locking list functions. The tail item is
2369 * returned or %NULL if the list is empty.
2370 */
2371struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
2372{
2373 unsigned long flags;
2374 struct sk_buff *result;
2375
2376 spin_lock_irqsave(&list->lock, flags);
2377 result = __skb_dequeue_tail(list);
2378 spin_unlock_irqrestore(&list->lock, flags);
2379 return result;
2380}
2381EXPORT_SYMBOL(skb_dequeue_tail);
2382
2383/**
2384 * skb_queue_purge - empty a list
2385 * @list: list to empty
2386 *
2387 * Delete all buffers on an &sk_buff list. Each buffer is removed from
2388 * the list and one reference dropped. This function takes the list
2389 * lock and is atomic with respect to other list locking functions.
2390 */
2391void skb_queue_purge(struct sk_buff_head *list)
2392{
2393 struct sk_buff *skb;
2394 while ((skb = skb_dequeue(list)) != NULL)
2395 kfree_skb(skb);
2396}
2397EXPORT_SYMBOL(skb_queue_purge);
2398
2399/**
2400 * skb_queue_head - queue a buffer at the list head
2401 * @list: list to use
2402 * @newsk: buffer to queue
2403 *
2404 * Queue a buffer at the start of the list. This function takes the
2405 * list lock and can be used safely with other locking &sk_buff functions
2406 * safely.
2407 *
2408 * A buffer cannot be placed on two lists at the same time.
2409 */
2410void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
2411{
2412 unsigned long flags;
2413
2414 spin_lock_irqsave(&list->lock, flags);
2415 __skb_queue_head(list, newsk);
2416 spin_unlock_irqrestore(&list->lock, flags);
2417}
2418EXPORT_SYMBOL(skb_queue_head);
2419
2420/**
2421 * skb_queue_tail - queue a buffer at the list tail
2422 * @list: list to use
2423 * @newsk: buffer to queue
2424 *
2425 * Queue a buffer at the tail of the list. This function takes the
2426 * list lock and can be used safely with other locking &sk_buff functions
2427 * safely.
2428 *
2429 * A buffer cannot be placed on two lists at the same time.
2430 */
2431void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
2432{
2433 unsigned long flags;
2434
2435 spin_lock_irqsave(&list->lock, flags);
2436 __skb_queue_tail(list, newsk);
2437 spin_unlock_irqrestore(&list->lock, flags);
2438}
2439EXPORT_SYMBOL(skb_queue_tail);
2440
2441/**
2442 * skb_unlink - remove a buffer from a list
2443 * @skb: buffer to remove
2444 * @list: list to use
2445 *
2446 * Remove a packet from a list. The list locks are taken and this
2447 * function is atomic with respect to other list locked calls
2448 *
2449 * You must know what list the SKB is on.
2450 */
2451void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
2452{
2453 unsigned long flags;
2454
2455 spin_lock_irqsave(&list->lock, flags);
2456 __skb_unlink(skb, list);
2457 spin_unlock_irqrestore(&list->lock, flags);
2458}
2459EXPORT_SYMBOL(skb_unlink);
2460
2461/**
2462 * skb_append - append a buffer
2463 * @old: buffer to insert after
2464 * @newsk: buffer to insert
2465 * @list: list to use
2466 *
2467 * Place a packet after a given packet in a list. The list locks are taken
2468 * and this function is atomic with respect to other list locked calls.
2469 * A buffer cannot be placed on two lists at the same time.
2470 */
2471void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2472{
2473 unsigned long flags;
2474
2475 spin_lock_irqsave(&list->lock, flags);
2476 __skb_queue_after(list, old, newsk);
2477 spin_unlock_irqrestore(&list->lock, flags);
2478}
2479EXPORT_SYMBOL(skb_append);
2480
2481/**
2482 * skb_insert - insert a buffer
2483 * @old: buffer to insert before
2484 * @newsk: buffer to insert
2485 * @list: list to use
2486 *
2487 * Place a packet before a given packet in a list. The list locks are
2488 * taken and this function is atomic with respect to other list locked
2489 * calls.
2490 *
2491 * A buffer cannot be placed on two lists at the same time.
2492 */
2493void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2494{
2495 unsigned long flags;
2496
2497 spin_lock_irqsave(&list->lock, flags);
2498 __skb_insert(newsk, old->prev, old, list);
2499 spin_unlock_irqrestore(&list->lock, flags);
2500}
2501EXPORT_SYMBOL(skb_insert);
2502
2503static inline void skb_split_inside_header(struct sk_buff *skb,
2504 struct sk_buff* skb1,
2505 const u32 len, const int pos)
2506{
2507 int i;
2508
2509 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
2510 pos - len);
2511 /* And move data appendix as is. */
2512 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
2513 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
2514
2515 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
2516 skb_shinfo(skb)->nr_frags = 0;
2517 skb1->data_len = skb->data_len;
2518 skb1->len += skb1->data_len;
2519 skb->data_len = 0;
2520 skb->len = len;
2521 skb_set_tail_pointer(skb, len);
2522}
2523
2524static inline void skb_split_no_header(struct sk_buff *skb,
2525 struct sk_buff* skb1,
2526 const u32 len, int pos)
2527{
2528 int i, k = 0;
2529 const int nfrags = skb_shinfo(skb)->nr_frags;
2530
2531 skb_shinfo(skb)->nr_frags = 0;
2532 skb1->len = skb1->data_len = skb->len - len;
2533 skb->len = len;
2534 skb->data_len = len - pos;
2535
2536 for (i = 0; i < nfrags; i++) {
2537 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2538
2539 if (pos + size > len) {
2540 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
2541
2542 if (pos < len) {
2543 /* Split frag.
2544 * We have two variants in this case:
2545 * 1. Move all the frag to the second
2546 * part, if it is possible. F.e.
2547 * this approach is mandatory for TUX,
2548 * where splitting is expensive.
2549 * 2. Split is accurately. We make this.
2550 */
2551 skb_frag_ref(skb, i);
2552 skb_shinfo(skb1)->frags[0].page_offset += len - pos;
2553 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
2554 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
2555 skb_shinfo(skb)->nr_frags++;
2556 }
2557 k++;
2558 } else
2559 skb_shinfo(skb)->nr_frags++;
2560 pos += size;
2561 }
2562 skb_shinfo(skb1)->nr_frags = k;
2563}
2564
2565/**
2566 * skb_split - Split fragmented skb to two parts at length len.
2567 * @skb: the buffer to split
2568 * @skb1: the buffer to receive the second part
2569 * @len: new length for skb
2570 */
2571void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
2572{
2573 int pos = skb_headlen(skb);
2574
2575 skb_shinfo(skb1)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
2576 if (len < pos) /* Split line is inside header. */
2577 skb_split_inside_header(skb, skb1, len, pos);
2578 else /* Second chunk has no header, nothing to copy. */
2579 skb_split_no_header(skb, skb1, len, pos);
2580}
2581EXPORT_SYMBOL(skb_split);
2582
2583/* Shifting from/to a cloned skb is a no-go.
2584 *
2585 * Caller cannot keep skb_shinfo related pointers past calling here!
2586 */
2587static int skb_prepare_for_shift(struct sk_buff *skb)
2588{
2589 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2590}
2591
2592/**
2593 * skb_shift - Shifts paged data partially from skb to another
2594 * @tgt: buffer into which tail data gets added
2595 * @skb: buffer from which the paged data comes from
2596 * @shiftlen: shift up to this many bytes
2597 *
2598 * Attempts to shift up to shiftlen worth of bytes, which may be less than
2599 * the length of the skb, from skb to tgt. Returns number bytes shifted.
2600 * It's up to caller to free skb if everything was shifted.
2601 *
2602 * If @tgt runs out of frags, the whole operation is aborted.
2603 *
2604 * Skb cannot include anything else but paged data while tgt is allowed
2605 * to have non-paged data as well.
2606 *
2607 * TODO: full sized shift could be optimized but that would need
2608 * specialized skb free'er to handle frags without up-to-date nr_frags.
2609 */
2610int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2611{
2612 int from, to, merge, todo;
2613 struct skb_frag_struct *fragfrom, *fragto;
2614
2615 BUG_ON(shiftlen > skb->len);
2616 BUG_ON(skb_headlen(skb)); /* Would corrupt stream */
2617
2618 todo = shiftlen;
2619 from = 0;
2620 to = skb_shinfo(tgt)->nr_frags;
2621 fragfrom = &skb_shinfo(skb)->frags[from];
2622
2623 /* Actual merge is delayed until the point when we know we can
2624 * commit all, so that we don't have to undo partial changes
2625 */
2626 if (!to ||
2627 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
2628 fragfrom->page_offset)) {
2629 merge = -1;
2630 } else {
2631 merge = to - 1;
2632
2633 todo -= skb_frag_size(fragfrom);
2634 if (todo < 0) {
2635 if (skb_prepare_for_shift(skb) ||
2636 skb_prepare_for_shift(tgt))
2637 return 0;
2638
2639 /* All previous frag pointers might be stale! */
2640 fragfrom = &skb_shinfo(skb)->frags[from];
2641 fragto = &skb_shinfo(tgt)->frags[merge];
2642
2643 skb_frag_size_add(fragto, shiftlen);
2644 skb_frag_size_sub(fragfrom, shiftlen);
2645 fragfrom->page_offset += shiftlen;
2646
2647 goto onlymerged;
2648 }
2649
2650 from++;
2651 }
2652
2653 /* Skip full, not-fitting skb to avoid expensive operations */
2654 if ((shiftlen == skb->len) &&
2655 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
2656 return 0;
2657
2658 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
2659 return 0;
2660
2661 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
2662 if (to == MAX_SKB_FRAGS)
2663 return 0;
2664
2665 fragfrom = &skb_shinfo(skb)->frags[from];
2666 fragto = &skb_shinfo(tgt)->frags[to];
2667
2668 if (todo >= skb_frag_size(fragfrom)) {
2669 *fragto = *fragfrom;
2670 todo -= skb_frag_size(fragfrom);
2671 from++;
2672 to++;
2673
2674 } else {
2675 __skb_frag_ref(fragfrom);
2676 fragto->page = fragfrom->page;
2677 fragto->page_offset = fragfrom->page_offset;
2678 skb_frag_size_set(fragto, todo);
2679
2680 fragfrom->page_offset += todo;
2681 skb_frag_size_sub(fragfrom, todo);
2682 todo = 0;
2683
2684 to++;
2685 break;
2686 }
2687 }
2688
2689 /* Ready to "commit" this state change to tgt */
2690 skb_shinfo(tgt)->nr_frags = to;
2691
2692 if (merge >= 0) {
2693 fragfrom = &skb_shinfo(skb)->frags[0];
2694 fragto = &skb_shinfo(tgt)->frags[merge];
2695
2696 skb_frag_size_add(fragto, skb_frag_size(fragfrom));
2697 __skb_frag_unref(fragfrom);
2698 }
2699
2700 /* Reposition in the original skb */
2701 to = 0;
2702 while (from < skb_shinfo(skb)->nr_frags)
2703 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
2704 skb_shinfo(skb)->nr_frags = to;
2705
2706 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
2707
2708onlymerged:
2709 /* Most likely the tgt won't ever need its checksum anymore, skb on
2710 * the other hand might need it if it needs to be resent
2711 */
2712 tgt->ip_summed = CHECKSUM_PARTIAL;
2713 skb->ip_summed = CHECKSUM_PARTIAL;
2714
2715 /* Yak, is it really working this way? Some helper please? */
2716 skb->len -= shiftlen;
2717 skb->data_len -= shiftlen;
2718 skb->truesize -= shiftlen;
2719 tgt->len += shiftlen;
2720 tgt->data_len += shiftlen;
2721 tgt->truesize += shiftlen;
2722
2723 return shiftlen;
2724}
2725
2726/**
2727 * skb_prepare_seq_read - Prepare a sequential read of skb data
2728 * @skb: the buffer to read
2729 * @from: lower offset of data to be read
2730 * @to: upper offset of data to be read
2731 * @st: state variable
2732 *
2733 * Initializes the specified state variable. Must be called before
2734 * invoking skb_seq_read() for the first time.
2735 */
2736void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
2737 unsigned int to, struct skb_seq_state *st)
2738{
2739 st->lower_offset = from;
2740 st->upper_offset = to;
2741 st->root_skb = st->cur_skb = skb;
2742 st->frag_idx = st->stepped_offset = 0;
2743 st->frag_data = NULL;
2744}
2745EXPORT_SYMBOL(skb_prepare_seq_read);
2746
2747/**
2748 * skb_seq_read - Sequentially read skb data
2749 * @consumed: number of bytes consumed by the caller so far
2750 * @data: destination pointer for data to be returned
2751 * @st: state variable
2752 *
2753 * Reads a block of skb data at @consumed relative to the
2754 * lower offset specified to skb_prepare_seq_read(). Assigns
2755 * the head of the data block to @data and returns the length
2756 * of the block or 0 if the end of the skb data or the upper
2757 * offset has been reached.
2758 *
2759 * The caller is not required to consume all of the data
2760 * returned, i.e. @consumed is typically set to the number
2761 * of bytes already consumed and the next call to
2762 * skb_seq_read() will return the remaining part of the block.
2763 *
2764 * Note 1: The size of each block of data returned can be arbitrary,
2765 * this limitation is the cost for zerocopy sequential
2766 * reads of potentially non linear data.
2767 *
2768 * Note 2: Fragment lists within fragments are not implemented
2769 * at the moment, state->root_skb could be replaced with
2770 * a stack for this purpose.
2771 */
2772unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
2773 struct skb_seq_state *st)
2774{
2775 unsigned int block_limit, abs_offset = consumed + st->lower_offset;
2776 skb_frag_t *frag;
2777
2778 if (unlikely(abs_offset >= st->upper_offset)) {
2779 if (st->frag_data) {
2780 kunmap_atomic(st->frag_data);
2781 st->frag_data = NULL;
2782 }
2783 return 0;
2784 }
2785
2786next_skb:
2787 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
2788
2789 if (abs_offset < block_limit && !st->frag_data) {
2790 *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
2791 return block_limit - abs_offset;
2792 }
2793
2794 if (st->frag_idx == 0 && !st->frag_data)
2795 st->stepped_offset += skb_headlen(st->cur_skb);
2796
2797 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
2798 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
2799 block_limit = skb_frag_size(frag) + st->stepped_offset;
2800
2801 if (abs_offset < block_limit) {
2802 if (!st->frag_data)
2803 st->frag_data = kmap_atomic(skb_frag_page(frag));
2804
2805 *data = (u8 *) st->frag_data + frag->page_offset +
2806 (abs_offset - st->stepped_offset);
2807
2808 return block_limit - abs_offset;
2809 }
2810
2811 if (st->frag_data) {
2812 kunmap_atomic(st->frag_data);
2813 st->frag_data = NULL;
2814 }
2815
2816 st->frag_idx++;
2817 st->stepped_offset += skb_frag_size(frag);
2818 }
2819
2820 if (st->frag_data) {
2821 kunmap_atomic(st->frag_data);
2822 st->frag_data = NULL;
2823 }
2824
2825 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
2826 st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
2827 st->frag_idx = 0;
2828 goto next_skb;
2829 } else if (st->cur_skb->next) {
2830 st->cur_skb = st->cur_skb->next;
2831 st->frag_idx = 0;
2832 goto next_skb;
2833 }
2834
2835 return 0;
2836}
2837EXPORT_SYMBOL(skb_seq_read);
2838
2839/**
2840 * skb_abort_seq_read - Abort a sequential read of skb data
2841 * @st: state variable
2842 *
2843 * Must be called if skb_seq_read() was not called until it
2844 * returned 0.
2845 */
2846void skb_abort_seq_read(struct skb_seq_state *st)
2847{
2848 if (st->frag_data)
2849 kunmap_atomic(st->frag_data);
2850}
2851EXPORT_SYMBOL(skb_abort_seq_read);
2852
2853#define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
2854
2855static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
2856 struct ts_config *conf,
2857 struct ts_state *state)
2858{
2859 return skb_seq_read(offset, text, TS_SKB_CB(state));
2860}
2861
2862static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
2863{
2864 skb_abort_seq_read(TS_SKB_CB(state));
2865}
2866
2867/**
2868 * skb_find_text - Find a text pattern in skb data
2869 * @skb: the buffer to look in
2870 * @from: search offset
2871 * @to: search limit
2872 * @config: textsearch configuration
2873 *
2874 * Finds a pattern in the skb data according to the specified
2875 * textsearch configuration. Use textsearch_next() to retrieve
2876 * subsequent occurrences of the pattern. Returns the offset
2877 * to the first occurrence or UINT_MAX if no match was found.
2878 */
2879unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
2880 unsigned int to, struct ts_config *config)
2881{
2882 struct ts_state state;
2883 unsigned int ret;
2884
2885 config->get_next_block = skb_ts_get_next_block;
2886 config->finish = skb_ts_finish;
2887
2888 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state));
2889
2890 ret = textsearch_find(config, &state);
2891 return (ret <= to - from ? ret : UINT_MAX);
2892}
2893EXPORT_SYMBOL(skb_find_text);
2894
2895/**
2896 * skb_append_datato_frags - append the user data to a skb
2897 * @sk: sock structure
2898 * @skb: skb structure to be appended with user data.
2899 * @getfrag: call back function to be used for getting the user data
2900 * @from: pointer to user message iov
2901 * @length: length of the iov message
2902 *
2903 * Description: This procedure append the user data in the fragment part
2904 * of the skb if any page alloc fails user this procedure returns -ENOMEM
2905 */
2906int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
2907 int (*getfrag)(void *from, char *to, int offset,
2908 int len, int odd, struct sk_buff *skb),
2909 void *from, int length)
2910{
2911 int frg_cnt = skb_shinfo(skb)->nr_frags;
2912 int copy;
2913 int offset = 0;
2914 int ret;
2915 struct page_frag *pfrag = &current->task_frag;
2916
2917 do {
2918 /* Return error if we don't have space for new frag */
2919 if (frg_cnt >= MAX_SKB_FRAGS)
2920 return -EMSGSIZE;
2921
2922 if (!sk_page_frag_refill(sk, pfrag))
2923 return -ENOMEM;
2924
2925 /* copy the user data to page */
2926 copy = min_t(int, length, pfrag->size - pfrag->offset);
2927
2928 ret = getfrag(from, page_address(pfrag->page) + pfrag->offset,
2929 offset, copy, 0, skb);
2930 if (ret < 0)
2931 return -EFAULT;
2932
2933 /* copy was successful so update the size parameters */
2934 skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset,
2935 copy);
2936 frg_cnt++;
2937 pfrag->offset += copy;
2938 get_page(pfrag->page);
2939
2940 skb->truesize += copy;
2941 atomic_add(copy, &sk->sk_wmem_alloc);
2942 skb->len += copy;
2943 skb->data_len += copy;
2944 offset += copy;
2945 length -= copy;
2946
2947 } while (length > 0);
2948
2949 return 0;
2950}
2951EXPORT_SYMBOL(skb_append_datato_frags);
2952
2953/**
2954 * skb_pull_rcsum - pull skb and update receive checksum
2955 * @skb: buffer to update
2956 * @len: length of data pulled
2957 *
2958 * This function performs an skb_pull on the packet and updates
2959 * the CHECKSUM_COMPLETE checksum. It should be used on
2960 * receive path processing instead of skb_pull unless you know
2961 * that the checksum difference is zero (e.g., a valid IP header)
2962 * or you are setting ip_summed to CHECKSUM_NONE.
2963 */
2964unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
2965{
2966 BUG_ON(len > skb->len);
2967 skb->len -= len;
2968 BUG_ON(skb->len < skb->data_len);
2969 skb_postpull_rcsum(skb, skb->data, len);
2970 return skb->data += len;
2971}
2972EXPORT_SYMBOL_GPL(skb_pull_rcsum);
2973
2974/**
2975 * skb_segment - Perform protocol segmentation on skb.
2976 * @head_skb: buffer to segment
2977 * @features: features for the output path (see dev->features)
2978 *
2979 * This function performs segmentation on the given skb. It returns
2980 * a pointer to the first in a list of new skbs for the segments.
2981 * In case of error it returns ERR_PTR(err).
2982 */
2983struct sk_buff *skb_segment(struct sk_buff *head_skb,
2984 netdev_features_t features)
2985{
2986 struct sk_buff *segs = NULL;
2987 struct sk_buff *tail = NULL;
2988 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
2989 skb_frag_t *frag = skb_shinfo(head_skb)->frags;
2990 unsigned int mss = skb_shinfo(head_skb)->gso_size;
2991 unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
2992 struct sk_buff *frag_skb = head_skb;
2993 unsigned int offset = doffset;
2994 unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
2995 unsigned int headroom;
2996 unsigned int len;
2997 __be16 proto;
2998 bool csum;
2999 int sg = !!(features & NETIF_F_SG);
3000 int nfrags = skb_shinfo(head_skb)->nr_frags;
3001 int err = -ENOMEM;
3002 int i = 0;
3003 int pos;
3004 int dummy;
3005
3006 __skb_push(head_skb, doffset);
3007 proto = skb_network_protocol(head_skb, &dummy);
3008 if (unlikely(!proto))
3009 return ERR_PTR(-EINVAL);
3010
3011 csum = !head_skb->encap_hdr_csum &&
3012 !!can_checksum_protocol(features, proto);
3013
3014 headroom = skb_headroom(head_skb);
3015 pos = skb_headlen(head_skb);
3016
3017 do {
3018 struct sk_buff *nskb;
3019 skb_frag_t *nskb_frag;
3020 int hsize;
3021 int size;
3022
3023 len = head_skb->len - offset;
3024 if (len > mss)
3025 len = mss;
3026
3027 hsize = skb_headlen(head_skb) - offset;
3028 if (hsize < 0)
3029 hsize = 0;
3030 if (hsize > len || !sg)
3031 hsize = len;
3032
3033 if (!hsize && i >= nfrags && skb_headlen(list_skb) &&
3034 (skb_headlen(list_skb) == len || sg)) {
3035 BUG_ON(skb_headlen(list_skb) > len);
3036
3037 i = 0;
3038 nfrags = skb_shinfo(list_skb)->nr_frags;
3039 frag = skb_shinfo(list_skb)->frags;
3040 frag_skb = list_skb;
3041 pos += skb_headlen(list_skb);
3042
3043 while (pos < offset + len) {
3044 BUG_ON(i >= nfrags);
3045
3046 size = skb_frag_size(frag);
3047 if (pos + size > offset + len)
3048 break;
3049
3050 i++;
3051 pos += size;
3052 frag++;
3053 }
3054
3055 nskb = skb_clone(list_skb, GFP_ATOMIC);
3056 list_skb = list_skb->next;
3057
3058 if (unlikely(!nskb))
3059 goto err;
3060
3061 if (unlikely(pskb_trim(nskb, len))) {
3062 kfree_skb(nskb);
3063 goto err;
3064 }
3065
3066 hsize = skb_end_offset(nskb);
3067 if (skb_cow_head(nskb, doffset + headroom)) {
3068 kfree_skb(nskb);
3069 goto err;
3070 }
3071
3072 nskb->truesize += skb_end_offset(nskb) - hsize;
3073 skb_release_head_state(nskb);
3074 __skb_push(nskb, doffset);
3075 } else {
3076 nskb = __alloc_skb(hsize + doffset + headroom,
3077 GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
3078 NUMA_NO_NODE);
3079
3080 if (unlikely(!nskb))
3081 goto err;
3082
3083 skb_reserve(nskb, headroom);
3084 __skb_put(nskb, doffset);
3085 }
3086
3087 if (segs)
3088 tail->next = nskb;
3089 else
3090 segs = nskb;
3091 tail = nskb;
3092
3093 __copy_skb_header(nskb, head_skb);
3094
3095 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
3096 skb_reset_mac_len(nskb);
3097
3098 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
3099 nskb->data - tnl_hlen,
3100 doffset + tnl_hlen);
3101
3102 if (nskb->len == len + doffset)
3103 goto perform_csum_check;
3104
3105 if (!sg && !nskb->remcsum_offload) {
3106 nskb->ip_summed = CHECKSUM_NONE;
3107 nskb->csum = skb_copy_and_csum_bits(head_skb, offset,
3108 skb_put(nskb, len),
3109 len, 0);
3110 SKB_GSO_CB(nskb)->csum_start =
3111 skb_headroom(nskb) + doffset;
3112 continue;
3113 }
3114
3115 nskb_frag = skb_shinfo(nskb)->frags;
3116
3117 skb_copy_from_linear_data_offset(head_skb, offset,
3118 skb_put(nskb, hsize), hsize);
3119
3120 skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags &
3121 SKBTX_SHARED_FRAG;
3122
3123 while (pos < offset + len) {
3124 if (i >= nfrags) {
3125 BUG_ON(skb_headlen(list_skb));
3126
3127 i = 0;
3128 nfrags = skb_shinfo(list_skb)->nr_frags;
3129 frag = skb_shinfo(list_skb)->frags;
3130 frag_skb = list_skb;
3131
3132 BUG_ON(!nfrags);
3133
3134 list_skb = list_skb->next;
3135 }
3136
3137 if (unlikely(skb_shinfo(nskb)->nr_frags >=
3138 MAX_SKB_FRAGS)) {
3139 net_warn_ratelimited(
3140 "skb_segment: too many frags: %u %u\n",
3141 pos, mss);
3142 goto err;
3143 }
3144
3145 if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC)))
3146 goto err;
3147
3148 *nskb_frag = *frag;
3149 __skb_frag_ref(nskb_frag);
3150 size = skb_frag_size(nskb_frag);
3151
3152 if (pos < offset) {
3153 nskb_frag->page_offset += offset - pos;
3154 skb_frag_size_sub(nskb_frag, offset - pos);
3155 }
3156
3157 skb_shinfo(nskb)->nr_frags++;
3158
3159 if (pos + size <= offset + len) {
3160 i++;
3161 frag++;
3162 pos += size;
3163 } else {
3164 skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
3165 goto skip_fraglist;
3166 }
3167
3168 nskb_frag++;
3169 }
3170
3171skip_fraglist:
3172 nskb->data_len = len - hsize;
3173 nskb->len += nskb->data_len;
3174 nskb->truesize += nskb->data_len;
3175
3176perform_csum_check:
3177 if (!csum && !nskb->remcsum_offload) {
3178 nskb->csum = skb_checksum(nskb, doffset,
3179 nskb->len - doffset, 0);
3180 nskb->ip_summed = CHECKSUM_NONE;
3181 SKB_GSO_CB(nskb)->csum_start =
3182 skb_headroom(nskb) + doffset;
3183 }
3184 } while ((offset += len) < head_skb->len);
3185
3186 /* Some callers want to get the end of the list.
3187 * Put it in segs->prev to avoid walking the list.
3188 * (see validate_xmit_skb_list() for example)
3189 */
3190 segs->prev = tail;
3191
3192 /* Following permits correct backpressure, for protocols
3193 * using skb_set_owner_w().
3194 * Idea is to tranfert ownership from head_skb to last segment.
3195 */
3196 if (head_skb->destructor == sock_wfree) {
3197 swap(tail->truesize, head_skb->truesize);
3198 swap(tail->destructor, head_skb->destructor);
3199 swap(tail->sk, head_skb->sk);
3200 }
3201 return segs;
3202
3203err:
3204 kfree_skb_list(segs);
3205 return ERR_PTR(err);
3206}
3207EXPORT_SYMBOL_GPL(skb_segment);
3208
3209int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
3210{
3211 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
3212 unsigned int offset = skb_gro_offset(skb);
3213 unsigned int headlen = skb_headlen(skb);
3214 unsigned int len = skb_gro_len(skb);
3215 struct sk_buff *lp, *p = *head;
3216 unsigned int delta_truesize;
3217
3218 if (unlikely(p->len + len >= 65536))
3219 return -E2BIG;
3220
3221 lp = NAPI_GRO_CB(p)->last;
3222 pinfo = skb_shinfo(lp);
3223
3224 if (headlen <= offset) {
3225 skb_frag_t *frag;
3226 skb_frag_t *frag2;
3227 int i = skbinfo->nr_frags;
3228 int nr_frags = pinfo->nr_frags + i;
3229
3230 if (nr_frags > MAX_SKB_FRAGS)
3231 goto merge;
3232
3233 offset -= headlen;
3234 pinfo->nr_frags = nr_frags;
3235 skbinfo->nr_frags = 0;
3236
3237 frag = pinfo->frags + nr_frags;
3238 frag2 = skbinfo->frags + i;
3239 do {
3240 *--frag = *--frag2;
3241 } while (--i);
3242
3243 frag->page_offset += offset;
3244 skb_frag_size_sub(frag, offset);
3245
3246 /* all fragments truesize : remove (head size + sk_buff) */
3247 delta_truesize = skb->truesize -
3248 SKB_TRUESIZE(skb_end_offset(skb));
3249
3250 skb->truesize -= skb->data_len;
3251 skb->len -= skb->data_len;
3252 skb->data_len = 0;
3253
3254 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
3255 goto done;
3256 } else if (skb->head_frag) {
3257 int nr_frags = pinfo->nr_frags;
3258 skb_frag_t *frag = pinfo->frags + nr_frags;
3259 struct page *page = virt_to_head_page(skb->head);
3260 unsigned int first_size = headlen - offset;
3261 unsigned int first_offset;
3262
3263 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
3264 goto merge;
3265
3266 first_offset = skb->data -
3267 (unsigned char *)page_address(page) +
3268 offset;
3269
3270 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
3271
3272 frag->page.p = page;
3273 frag->page_offset = first_offset;
3274 skb_frag_size_set(frag, first_size);
3275
3276 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
3277 /* We dont need to clear skbinfo->nr_frags here */
3278
3279 delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
3280 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
3281 goto done;
3282 }
3283
3284merge:
3285 delta_truesize = skb->truesize;
3286 if (offset > headlen) {
3287 unsigned int eat = offset - headlen;
3288
3289 skbinfo->frags[0].page_offset += eat;
3290 skb_frag_size_sub(&skbinfo->frags[0], eat);
3291 skb->data_len -= eat;
3292 skb->len -= eat;
3293 offset = headlen;
3294 }
3295
3296 __skb_pull(skb, offset);
3297
3298 if (NAPI_GRO_CB(p)->last == p)
3299 skb_shinfo(p)->frag_list = skb;
3300 else
3301 NAPI_GRO_CB(p)->last->next = skb;
3302 NAPI_GRO_CB(p)->last = skb;
3303 __skb_header_release(skb);
3304 lp = p;
3305
3306done:
3307 NAPI_GRO_CB(p)->count++;
3308 p->data_len += len;
3309 p->truesize += delta_truesize;
3310 p->len += len;
3311 if (lp != p) {
3312 lp->data_len += len;
3313 lp->truesize += delta_truesize;
3314 lp->len += len;
3315 }
3316 NAPI_GRO_CB(skb)->same_flow = 1;
3317 return 0;
3318}
3319
3320void __init skb_init(void)
3321{
3322 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
3323 sizeof(struct sk_buff),
3324 0,
3325 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
3326 NULL);
3327 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
3328 sizeof(struct sk_buff_fclones),
3329 0,
3330 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
3331 NULL);
3332}
3333
3334/**
3335 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer
3336 * @skb: Socket buffer containing the buffers to be mapped
3337 * @sg: The scatter-gather list to map into
3338 * @offset: The offset into the buffer's contents to start mapping
3339 * @len: Length of buffer space to be mapped
3340 *
3341 * Fill the specified scatter-gather list with mappings/pointers into a
3342 * region of the buffer space attached to a socket buffer.
3343 */
3344static int
3345__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
3346{
3347 int start = skb_headlen(skb);
3348 int i, copy = start - offset;
3349 struct sk_buff *frag_iter;
3350 int elt = 0;
3351
3352 if (copy > 0) {
3353 if (copy > len)
3354 copy = len;
3355 sg_set_buf(sg, skb->data + offset, copy);
3356 elt++;
3357 if ((len -= copy) == 0)
3358 return elt;
3359 offset += copy;
3360 }
3361
3362 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3363 int end;
3364
3365 WARN_ON(start > offset + len);
3366
3367 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
3368 if ((copy = end - offset) > 0) {
3369 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3370
3371 if (copy > len)
3372 copy = len;
3373 sg_set_page(&sg[elt], skb_frag_page(frag), copy,
3374 frag->page_offset+offset-start);
3375 elt++;
3376 if (!(len -= copy))
3377 return elt;
3378 offset += copy;
3379 }
3380 start = end;
3381 }
3382
3383 skb_walk_frags(skb, frag_iter) {
3384 int end;
3385
3386 WARN_ON(start > offset + len);
3387
3388 end = start + frag_iter->len;
3389 if ((copy = end - offset) > 0) {
3390 if (copy > len)
3391 copy = len;
3392 elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
3393 copy);
3394 if ((len -= copy) == 0)
3395 return elt;
3396 offset += copy;
3397 }
3398 start = end;
3399 }
3400 BUG_ON(len);
3401 return elt;
3402}
3403
3404/* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
3405 * sglist without mark the sg which contain last skb data as the end.
3406 * So the caller can mannipulate sg list as will when padding new data after
3407 * the first call without calling sg_unmark_end to expend sg list.
3408 *
3409 * Scenario to use skb_to_sgvec_nomark:
3410 * 1. sg_init_table
3411 * 2. skb_to_sgvec_nomark(payload1)
3412 * 3. skb_to_sgvec_nomark(payload2)
3413 *
3414 * This is equivalent to:
3415 * 1. sg_init_table
3416 * 2. skb_to_sgvec(payload1)
3417 * 3. sg_unmark_end
3418 * 4. skb_to_sgvec(payload2)
3419 *
3420 * When mapping mutilple payload conditionally, skb_to_sgvec_nomark
3421 * is more preferable.
3422 */
3423int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
3424 int offset, int len)
3425{
3426 return __skb_to_sgvec(skb, sg, offset, len);
3427}
3428EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);
3429
3430int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
3431{
3432 int nsg = __skb_to_sgvec(skb, sg, offset, len);
3433
3434 sg_mark_end(&sg[nsg - 1]);
3435
3436 return nsg;
3437}
3438EXPORT_SYMBOL_GPL(skb_to_sgvec);
3439
3440/**
3441 * skb_cow_data - Check that a socket buffer's data buffers are writable
3442 * @skb: The socket buffer to check.
3443 * @tailbits: Amount of trailing space to be added
3444 * @trailer: Returned pointer to the skb where the @tailbits space begins
3445 *
3446 * Make sure that the data buffers attached to a socket buffer are
3447 * writable. If they are not, private copies are made of the data buffers
3448 * and the socket buffer is set to use these instead.
3449 *
3450 * If @tailbits is given, make sure that there is space to write @tailbits
3451 * bytes of data beyond current end of socket buffer. @trailer will be
3452 * set to point to the skb in which this space begins.
3453 *
3454 * The number of scatterlist elements required to completely map the
3455 * COW'd and extended socket buffer will be returned.
3456 */
3457int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
3458{
3459 int copyflag;
3460 int elt;
3461 struct sk_buff *skb1, **skb_p;
3462
3463 /* If skb is cloned or its head is paged, reallocate
3464 * head pulling out all the pages (pages are considered not writable
3465 * at the moment even if they are anonymous).
3466 */
3467 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
3468 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
3469 return -ENOMEM;
3470
3471 /* Easy case. Most of packets will go this way. */
3472 if (!skb_has_frag_list(skb)) {
3473 /* A little of trouble, not enough of space for trailer.
3474 * This should not happen, when stack is tuned to generate
3475 * good frames. OK, on miss we reallocate and reserve even more
3476 * space, 128 bytes is fair. */
3477
3478 if (skb_tailroom(skb) < tailbits &&
3479 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
3480 return -ENOMEM;
3481
3482 /* Voila! */
3483 *trailer = skb;
3484 return 1;
3485 }
3486
3487 /* Misery. We are in troubles, going to mincer fragments... */
3488
3489 elt = 1;
3490 skb_p = &skb_shinfo(skb)->frag_list;
3491 copyflag = 0;
3492
3493 while ((skb1 = *skb_p) != NULL) {
3494 int ntail = 0;
3495
3496 /* The fragment is partially pulled by someone,
3497 * this can happen on input. Copy it and everything
3498 * after it. */
3499
3500 if (skb_shared(skb1))
3501 copyflag = 1;
3502
3503 /* If the skb is the last, worry about trailer. */
3504
3505 if (skb1->next == NULL && tailbits) {
3506 if (skb_shinfo(skb1)->nr_frags ||
3507 skb_has_frag_list(skb1) ||
3508 skb_tailroom(skb1) < tailbits)
3509 ntail = tailbits + 128;
3510 }
3511
3512 if (copyflag ||
3513 skb_cloned(skb1) ||
3514 ntail ||
3515 skb_shinfo(skb1)->nr_frags ||
3516 skb_has_frag_list(skb1)) {
3517 struct sk_buff *skb2;
3518
3519 /* Fuck, we are miserable poor guys... */
3520 if (ntail == 0)
3521 skb2 = skb_copy(skb1, GFP_ATOMIC);
3522 else
3523 skb2 = skb_copy_expand(skb1,
3524 skb_headroom(skb1),
3525 ntail,
3526 GFP_ATOMIC);
3527 if (unlikely(skb2 == NULL))
3528 return -ENOMEM;
3529
3530 if (skb1->sk)
3531 skb_set_owner_w(skb2, skb1->sk);
3532
3533 /* Looking around. Are we still alive?
3534 * OK, link new skb, drop old one */
3535
3536 skb2->next = skb1->next;
3537 *skb_p = skb2;
3538 kfree_skb(skb1);
3539 skb1 = skb2;
3540 }
3541 elt++;
3542 *trailer = skb1;
3543 skb_p = &skb1->next;
3544 }
3545
3546 return elt;
3547}
3548EXPORT_SYMBOL_GPL(skb_cow_data);
3549
3550static void sock_rmem_free(struct sk_buff *skb)
3551{
3552 struct sock *sk = skb->sk;
3553
3554 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
3555}
3556
3557/*
3558 * Note: We dont mem charge error packets (no sk_forward_alloc changes)
3559 */
3560int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
3561{
3562 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
3563 (unsigned int)sk->sk_rcvbuf)
3564 return -ENOMEM;
3565
3566 skb_orphan(skb);
3567 skb->sk = sk;
3568 skb->destructor = sock_rmem_free;
3569 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
3570
3571 /* before exiting rcu section, make sure dst is refcounted */
3572 skb_dst_force(skb);
3573
3574 skb_queue_tail(&sk->sk_error_queue, skb);
3575 if (!sock_flag(sk, SOCK_DEAD))
3576 sk->sk_data_ready(sk);
3577 return 0;
3578}
3579EXPORT_SYMBOL(sock_queue_err_skb);
3580
3581struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
3582{
3583 struct sk_buff_head *q = &sk->sk_error_queue;
3584 struct sk_buff *skb, *skb_next;
3585 unsigned long flags;
3586 int err = 0;
3587
3588 spin_lock_irqsave(&q->lock, flags);
3589 skb = __skb_dequeue(q);
3590 if (skb && (skb_next = skb_peek(q)))
3591 err = SKB_EXT_ERR(skb_next)->ee.ee_errno;
3592 spin_unlock_irqrestore(&q->lock, flags);
3593
3594 sk->sk_err = err;
3595 if (err)
3596 sk->sk_error_report(sk);
3597
3598 return skb;
3599}
3600EXPORT_SYMBOL(sock_dequeue_err_skb);
3601
3602/**
3603 * skb_clone_sk - create clone of skb, and take reference to socket
3604 * @skb: the skb to clone
3605 *
3606 * This function creates a clone of a buffer that holds a reference on
3607 * sk_refcnt. Buffers created via this function are meant to be
3608 * returned using sock_queue_err_skb, or free via kfree_skb.
3609 *
3610 * When passing buffers allocated with this function to sock_queue_err_skb
3611 * it is necessary to wrap the call with sock_hold/sock_put in order to
3612 * prevent the socket from being released prior to being enqueued on
3613 * the sk_error_queue.
3614 */
3615struct sk_buff *skb_clone_sk(struct sk_buff *skb)
3616{
3617 struct sock *sk = skb->sk;
3618 struct sk_buff *clone;
3619
3620 if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt))
3621 return NULL;
3622
3623 clone = skb_clone(skb, GFP_ATOMIC);
3624 if (!clone) {
3625 sock_put(sk);
3626 return NULL;
3627 }
3628
3629 clone->sk = sk;
3630 clone->destructor = sock_efree;
3631
3632 return clone;
3633}
3634EXPORT_SYMBOL(skb_clone_sk);
3635
3636static void __skb_complete_tx_timestamp(struct sk_buff *skb,
3637 struct sock *sk,
3638 int tstype)
3639{
3640 struct sock_exterr_skb *serr;
3641 int err;
3642
3643 serr = SKB_EXT_ERR(skb);
3644 memset(serr, 0, sizeof(*serr));
3645 serr->ee.ee_errno = ENOMSG;
3646 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
3647 serr->ee.ee_info = tstype;
3648 if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
3649 serr->ee.ee_data = skb_shinfo(skb)->tskey;
3650 if (sk->sk_protocol == IPPROTO_TCP)
3651 serr->ee.ee_data -= sk->sk_tskey;
3652 }
3653
3654 err = sock_queue_err_skb(sk, skb);
3655
3656 if (err)
3657 kfree_skb(skb);
3658}
3659
3660static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
3661{
3662 bool ret;
3663
3664 if (likely(sysctl_tstamp_allow_data || tsonly))
3665 return true;
3666
3667 read_lock_bh(&sk->sk_callback_lock);
3668 ret = sk->sk_socket && sk->sk_socket->file &&
3669 file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW);
3670 read_unlock_bh(&sk->sk_callback_lock);
3671 return ret;
3672}
3673
3674void skb_complete_tx_timestamp(struct sk_buff *skb,
3675 struct skb_shared_hwtstamps *hwtstamps)
3676{
3677 struct sock *sk = skb->sk;
3678
3679 if (!skb_may_tx_timestamp(sk, false))
3680 return;
3681
3682 /* take a reference to prevent skb_orphan() from freeing the socket */
3683 sock_hold(sk);
3684
3685 *skb_hwtstamps(skb) = *hwtstamps;
3686 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
3687
3688 sock_put(sk);
3689}
3690EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
3691
3692void __skb_tstamp_tx(struct sk_buff *orig_skb,
3693 struct skb_shared_hwtstamps *hwtstamps,
3694 struct sock *sk, int tstype)
3695{
3696 struct sk_buff *skb;
3697 bool tsonly;
3698
3699 if (!sk)
3700 return;
3701
3702 tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
3703 if (!skb_may_tx_timestamp(sk, tsonly))
3704 return;
3705
3706 if (tsonly)
3707 skb = alloc_skb(0, GFP_ATOMIC);
3708 else
3709 skb = skb_clone(orig_skb, GFP_ATOMIC);
3710 if (!skb)
3711 return;
3712
3713 if (tsonly) {
3714 skb_shinfo(skb)->tx_flags = skb_shinfo(orig_skb)->tx_flags;
3715 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey;
3716 }
3717
3718 if (hwtstamps)
3719 *skb_hwtstamps(skb) = *hwtstamps;
3720 else
3721 skb->tstamp = ktime_get_real();
3722
3723 __skb_complete_tx_timestamp(skb, sk, tstype);
3724}
3725EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
3726
3727void skb_tstamp_tx(struct sk_buff *orig_skb,
3728 struct skb_shared_hwtstamps *hwtstamps)
3729{
3730 return __skb_tstamp_tx(orig_skb, hwtstamps, orig_skb->sk,
3731 SCM_TSTAMP_SND);
3732}
3733EXPORT_SYMBOL_GPL(skb_tstamp_tx);
3734
3735void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
3736{
3737 struct sock *sk = skb->sk;
3738 struct sock_exterr_skb *serr;
3739 int err;
3740
3741 skb->wifi_acked_valid = 1;
3742 skb->wifi_acked = acked;
3743
3744 serr = SKB_EXT_ERR(skb);
3745 memset(serr, 0, sizeof(*serr));
3746 serr->ee.ee_errno = ENOMSG;
3747 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
3748
3749 /* take a reference to prevent skb_orphan() from freeing the socket */
3750 sock_hold(sk);
3751
3752 err = sock_queue_err_skb(sk, skb);
3753 if (err)
3754 kfree_skb(skb);
3755
3756 sock_put(sk);
3757}
3758EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
3759
3760/**
3761 * skb_partial_csum_set - set up and verify partial csum values for packet
3762 * @skb: the skb to set
3763 * @start: the number of bytes after skb->data to start checksumming.
3764 * @off: the offset from start to place the checksum.
3765 *
3766 * For untrusted partially-checksummed packets, we need to make sure the values
3767 * for skb->csum_start and skb->csum_offset are valid so we don't oops.
3768 *
3769 * This function checks and sets those values and skb->ip_summed: if this
3770 * returns false you should drop the packet.
3771 */
3772bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
3773{
3774 if (unlikely(start > skb_headlen(skb)) ||
3775 unlikely((int)start + off > skb_headlen(skb) - 2)) {
3776 net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n",
3777 start, off, skb_headlen(skb));
3778 return false;
3779 }
3780 skb->ip_summed = CHECKSUM_PARTIAL;
3781 skb->csum_start = skb_headroom(skb) + start;
3782 skb->csum_offset = off;
3783 skb_set_transport_header(skb, start);
3784 return true;
3785}
3786EXPORT_SYMBOL_GPL(skb_partial_csum_set);
3787
3788static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
3789 unsigned int max)
3790{
3791 if (skb_headlen(skb) >= len)
3792 return 0;
3793
3794 /* If we need to pullup then pullup to the max, so we
3795 * won't need to do it again.
3796 */
3797 if (max > skb->len)
3798 max = skb->len;
3799
3800 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
3801 return -ENOMEM;
3802
3803 if (skb_headlen(skb) < len)
3804 return -EPROTO;
3805
3806 return 0;
3807}
3808
3809#define MAX_TCP_HDR_LEN (15 * 4)
3810
3811static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb,
3812 typeof(IPPROTO_IP) proto,
3813 unsigned int off)
3814{
3815 switch (proto) {
3816 int err;
3817
3818 case IPPROTO_TCP:
3819 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr),
3820 off + MAX_TCP_HDR_LEN);
3821 if (!err && !skb_partial_csum_set(skb, off,
3822 offsetof(struct tcphdr,
3823 check)))
3824 err = -EPROTO;
3825 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check;
3826
3827 case IPPROTO_UDP:
3828 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr),
3829 off + sizeof(struct udphdr));
3830 if (!err && !skb_partial_csum_set(skb, off,
3831 offsetof(struct udphdr,
3832 check)))
3833 err = -EPROTO;
3834 return err ? ERR_PTR(err) : &udp_hdr(skb)->check;
3835 }
3836
3837 return ERR_PTR(-EPROTO);
3838}
3839
3840/* This value should be large enough to cover a tagged ethernet header plus
3841 * maximally sized IP and TCP or UDP headers.
3842 */
3843#define MAX_IP_HDR_LEN 128
3844
3845static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate)
3846{
3847 unsigned int off;
3848 bool fragment;
3849 __sum16 *csum;
3850 int err;
3851
3852 fragment = false;
3853
3854 err = skb_maybe_pull_tail(skb,
3855 sizeof(struct iphdr),
3856 MAX_IP_HDR_LEN);
3857 if (err < 0)
3858 goto out;
3859
3860 if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
3861 fragment = true;
3862
3863 off = ip_hdrlen(skb);
3864
3865 err = -EPROTO;
3866
3867 if (fragment)
3868 goto out;
3869
3870 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off);
3871 if (IS_ERR(csum))
3872 return PTR_ERR(csum);
3873
3874 if (recalculate)
3875 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3876 ip_hdr(skb)->daddr,
3877 skb->len - off,
3878 ip_hdr(skb)->protocol, 0);
3879 err = 0;
3880
3881out:
3882 return err;
3883}
3884
3885/* This value should be large enough to cover a tagged ethernet header plus
3886 * an IPv6 header, all options, and a maximal TCP or UDP header.
3887 */
3888#define MAX_IPV6_HDR_LEN 256
3889
3890#define OPT_HDR(type, skb, off) \
3891 (type *)(skb_network_header(skb) + (off))
3892
3893static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
3894{
3895 int err;
3896 u8 nexthdr;
3897 unsigned int off;
3898 unsigned int len;
3899 bool fragment;
3900 bool done;
3901 __sum16 *csum;
3902
3903 fragment = false;
3904 done = false;
3905
3906 off = sizeof(struct ipv6hdr);
3907
3908 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
3909 if (err < 0)
3910 goto out;
3911
3912 nexthdr = ipv6_hdr(skb)->nexthdr;
3913
3914 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
3915 while (off <= len && !done) {
3916 switch (nexthdr) {
3917 case IPPROTO_DSTOPTS:
3918 case IPPROTO_HOPOPTS:
3919 case IPPROTO_ROUTING: {
3920 struct ipv6_opt_hdr *hp;
3921
3922 err = skb_maybe_pull_tail(skb,
3923 off +
3924 sizeof(struct ipv6_opt_hdr),
3925 MAX_IPV6_HDR_LEN);
3926 if (err < 0)
3927 goto out;
3928
3929 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
3930 nexthdr = hp->nexthdr;
3931 off += ipv6_optlen(hp);
3932 break;
3933 }
3934 case IPPROTO_AH: {
3935 struct ip_auth_hdr *hp;
3936
3937 err = skb_maybe_pull_tail(skb,
3938 off +
3939 sizeof(struct ip_auth_hdr),
3940 MAX_IPV6_HDR_LEN);
3941 if (err < 0)
3942 goto out;
3943
3944 hp = OPT_HDR(struct ip_auth_hdr, skb, off);
3945 nexthdr = hp->nexthdr;
3946 off += ipv6_authlen(hp);
3947 break;
3948 }
3949 case IPPROTO_FRAGMENT: {
3950 struct frag_hdr *hp;
3951
3952 err = skb_maybe_pull_tail(skb,
3953 off +
3954 sizeof(struct frag_hdr),
3955 MAX_IPV6_HDR_LEN);
3956 if (err < 0)
3957 goto out;
3958
3959 hp = OPT_HDR(struct frag_hdr, skb, off);
3960
3961 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
3962 fragment = true;
3963
3964 nexthdr = hp->nexthdr;
3965 off += sizeof(struct frag_hdr);
3966 break;
3967 }
3968 default:
3969 done = true;
3970 break;
3971 }
3972 }
3973
3974 err = -EPROTO;
3975
3976 if (!done || fragment)
3977 goto out;
3978
3979 csum = skb_checksum_setup_ip(skb, nexthdr, off);
3980 if (IS_ERR(csum))
3981 return PTR_ERR(csum);
3982
3983 if (recalculate)
3984 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3985 &ipv6_hdr(skb)->daddr,
3986 skb->len - off, nexthdr, 0);
3987 err = 0;
3988
3989out:
3990 return err;
3991}
3992
3993/**
3994 * skb_checksum_setup - set up partial checksum offset
3995 * @skb: the skb to set up
3996 * @recalculate: if true the pseudo-header checksum will be recalculated
3997 */
3998int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
3999{
4000 int err;
4001
4002 switch (skb->protocol) {
4003 case htons(ETH_P_IP):
4004 err = skb_checksum_setup_ipv4(skb, recalculate);
4005 break;
4006
4007 case htons(ETH_P_IPV6):
4008 err = skb_checksum_setup_ipv6(skb, recalculate);
4009 break;
4010
4011 default:
4012 err = -EPROTO;
4013 break;
4014 }
4015
4016 return err;
4017}
4018EXPORT_SYMBOL(skb_checksum_setup);
4019
4020void __skb_warn_lro_forwarding(const struct sk_buff *skb)
4021{
4022 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
4023 skb->dev->name);
4024}
4025EXPORT_SYMBOL(__skb_warn_lro_forwarding);
4026
4027void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
4028{
4029 if (head_stolen) {
4030 skb_release_head_state(skb);
4031 kmem_cache_free(skbuff_head_cache, skb);
4032 } else {
4033 __kfree_skb(skb);
4034 }
4035}
4036EXPORT_SYMBOL(kfree_skb_partial);
4037
4038/**
4039 * skb_try_coalesce - try to merge skb to prior one
4040 * @to: prior buffer
4041 * @from: buffer to add
4042 * @fragstolen: pointer to boolean
4043 * @delta_truesize: how much more was allocated than was requested
4044 */
4045bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
4046 bool *fragstolen, int *delta_truesize)
4047{
4048 int i, delta, len = from->len;
4049
4050 *fragstolen = false;
4051
4052 if (skb_cloned(to))
4053 return false;
4054
4055 if (len <= skb_tailroom(to)) {
4056 if (len)
4057 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
4058 *delta_truesize = 0;
4059 return true;
4060 }
4061
4062 if (skb_has_frag_list(to) || skb_has_frag_list(from))
4063 return false;
4064
4065 if (skb_headlen(from) != 0) {
4066 struct page *page;
4067 unsigned int offset;
4068
4069 if (skb_shinfo(to)->nr_frags +
4070 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
4071 return false;
4072
4073 if (skb_head_is_locked(from))
4074 return false;
4075
4076 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
4077
4078 page = virt_to_head_page(from->head);
4079 offset = from->data - (unsigned char *)page_address(page);
4080
4081 skb_fill_page_desc(to, skb_shinfo(to)->nr_frags,
4082 page, offset, skb_headlen(from));
4083 *fragstolen = true;
4084 } else {
4085 if (skb_shinfo(to)->nr_frags +
4086 skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS)
4087 return false;
4088
4089 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from));
4090 }
4091
4092 WARN_ON_ONCE(delta < len);
4093
4094 memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags,
4095 skb_shinfo(from)->frags,
4096 skb_shinfo(from)->nr_frags * sizeof(skb_frag_t));
4097 skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags;
4098
4099 if (!skb_cloned(from))
4100 skb_shinfo(from)->nr_frags = 0;
4101
4102 /* if the skb is not cloned this does nothing
4103 * since we set nr_frags to 0.
4104 */
4105 for (i = 0; i < skb_shinfo(from)->nr_frags; i++)
4106 skb_frag_ref(from, i);
4107
4108 to->truesize += delta;
4109 to->len += len;
4110 to->data_len += len;
4111
4112 *delta_truesize = delta;
4113 return true;
4114}
4115EXPORT_SYMBOL(skb_try_coalesce);
4116
4117/**
4118 * skb_scrub_packet - scrub an skb
4119 *
4120 * @skb: buffer to clean
4121 * @xnet: packet is crossing netns
4122 *
4123 * skb_scrub_packet can be used after encapsulating or decapsulting a packet
4124 * into/from a tunnel. Some information have to be cleared during these
4125 * operations.
4126 * skb_scrub_packet can also be used to clean a skb before injecting it in
4127 * another namespace (@xnet == true). We have to clear all information in the
4128 * skb that could impact namespace isolation.
4129 */
4130void skb_scrub_packet(struct sk_buff *skb, bool xnet)
4131{
4132 skb->tstamp.tv64 = 0;
4133 skb->pkt_type = PACKET_HOST;
4134 skb->skb_iif = 0;
4135 skb->ignore_df = 0;
4136 skb_dst_drop(skb);
4137 skb_sender_cpu_clear(skb);
4138 secpath_reset(skb);
4139 nf_reset(skb);
4140 nf_reset_trace(skb);
4141
4142 if (!xnet)
4143 return;
4144
4145 skb_orphan(skb);
4146 skb->mark = 0;
4147}
4148EXPORT_SYMBOL_GPL(skb_scrub_packet);
4149
4150/**
4151 * skb_gso_transport_seglen - Return length of individual segments of a gso packet
4152 *
4153 * @skb: GSO skb
4154 *
4155 * skb_gso_transport_seglen is used to determine the real size of the
4156 * individual segments, including Layer4 headers (TCP/UDP).
4157 *
4158 * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
4159 */
4160unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
4161{
4162 const struct skb_shared_info *shinfo = skb_shinfo(skb);
4163 unsigned int thlen = 0;
4164
4165 if (skb->encapsulation) {
4166 thlen = skb_inner_transport_header(skb) -
4167 skb_transport_header(skb);
4168
4169 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
4170 thlen += inner_tcp_hdrlen(skb);
4171 } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
4172 thlen = tcp_hdrlen(skb);
4173 }
4174 /* UFO sets gso_size to the size of the fragmentation
4175 * payload, i.e. the size of the L4 (UDP) header is already
4176 * accounted for.
4177 */
4178 return thlen + shinfo->gso_size;
4179}
4180EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
4181
4182static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
4183{
4184 if (skb_cow(skb, skb_headroom(skb)) < 0) {
4185 kfree_skb(skb);
4186 return NULL;
4187 }
4188
4189 memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
4190 skb->mac_header += VLAN_HLEN;
4191 return skb;
4192}
4193
4194struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
4195{
4196 struct vlan_hdr *vhdr;
4197 u16 vlan_tci;
4198
4199 if (unlikely(skb_vlan_tag_present(skb))) {
4200 /* vlan_tci is already set-up so leave this for another time */
4201 return skb;
4202 }
4203
4204 skb = skb_share_check(skb, GFP_ATOMIC);
4205 if (unlikely(!skb))
4206 goto err_free;
4207
4208 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
4209 goto err_free;
4210
4211 vhdr = (struct vlan_hdr *)skb->data;
4212 vlan_tci = ntohs(vhdr->h_vlan_TCI);
4213 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
4214
4215 skb_pull_rcsum(skb, VLAN_HLEN);
4216 vlan_set_encap_proto(skb, vhdr);
4217
4218 skb = skb_reorder_vlan_header(skb);
4219 if (unlikely(!skb))
4220 goto err_free;
4221
4222 skb_reset_network_header(skb);
4223 skb_reset_transport_header(skb);
4224 skb_reset_mac_len(skb);
4225
4226 return skb;
4227
4228err_free:
4229 kfree_skb(skb);
4230 return NULL;
4231}
4232EXPORT_SYMBOL(skb_vlan_untag);
4233
4234int skb_ensure_writable(struct sk_buff *skb, int write_len)
4235{
4236 if (!pskb_may_pull(skb, write_len))
4237 return -ENOMEM;
4238
4239 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
4240 return 0;
4241
4242 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
4243}
4244EXPORT_SYMBOL(skb_ensure_writable);
4245
4246/* remove VLAN header from packet and update csum accordingly. */
4247static int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
4248{
4249 struct vlan_hdr *vhdr;
4250 unsigned int offset = skb->data - skb_mac_header(skb);
4251 int err;
4252
4253 __skb_push(skb, offset);
4254 err = skb_ensure_writable(skb, VLAN_ETH_HLEN);
4255 if (unlikely(err))
4256 goto pull;
4257
4258 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
4259
4260 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
4261 *vlan_tci = ntohs(vhdr->h_vlan_TCI);
4262
4263 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
4264 __skb_pull(skb, VLAN_HLEN);
4265
4266 vlan_set_encap_proto(skb, vhdr);
4267 skb->mac_header += VLAN_HLEN;
4268
4269 if (skb_network_offset(skb) < ETH_HLEN)
4270 skb_set_network_header(skb, ETH_HLEN);
4271
4272 skb_reset_mac_len(skb);
4273pull:
4274 __skb_pull(skb, offset);
4275
4276 return err;
4277}
4278
4279int skb_vlan_pop(struct sk_buff *skb)
4280{
4281 u16 vlan_tci;
4282 __be16 vlan_proto;
4283 int err;
4284
4285 if (likely(skb_vlan_tag_present(skb))) {
4286 skb->vlan_tci = 0;
4287 } else {
4288 if (unlikely((skb->protocol != htons(ETH_P_8021Q) &&
4289 skb->protocol != htons(ETH_P_8021AD)) ||
4290 skb->len < VLAN_ETH_HLEN))
4291 return 0;
4292
4293 err = __skb_vlan_pop(skb, &vlan_tci);
4294 if (err)
4295 return err;
4296 }
4297 /* move next vlan tag to hw accel tag */
4298 if (likely((skb->protocol != htons(ETH_P_8021Q) &&
4299 skb->protocol != htons(ETH_P_8021AD)) ||
4300 skb->len < VLAN_ETH_HLEN))
4301 return 0;
4302
4303 vlan_proto = skb->protocol;
4304 err = __skb_vlan_pop(skb, &vlan_tci);
4305 if (unlikely(err))
4306 return err;
4307
4308 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
4309 return 0;
4310}
4311EXPORT_SYMBOL(skb_vlan_pop);
4312
4313int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
4314{
4315 if (skb_vlan_tag_present(skb)) {
4316 unsigned int offset = skb->data - skb_mac_header(skb);
4317 int err;
4318
4319 /* __vlan_insert_tag expect skb->data pointing to mac header.
4320 * So change skb->data before calling it and change back to
4321 * original position later
4322 */
4323 __skb_push(skb, offset);
4324 err = __vlan_insert_tag(skb, skb->vlan_proto,
4325 skb_vlan_tag_get(skb));
4326 if (err)
4327 return err;
4328 skb->protocol = skb->vlan_proto;
4329 skb->mac_len += VLAN_HLEN;
4330 __skb_pull(skb, offset);
4331
4332 if (skb->ip_summed == CHECKSUM_COMPLETE)
4333 skb->csum = csum_add(skb->csum, csum_partial(skb->data
4334 + (2 * ETH_ALEN), VLAN_HLEN, 0));
4335 }
4336 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
4337 return 0;
4338}
4339EXPORT_SYMBOL(skb_vlan_push);
4340
4341/**
4342 * alloc_skb_with_frags - allocate skb with page frags
4343 *
4344 * @header_len: size of linear part
4345 * @data_len: needed length in frags
4346 * @max_page_order: max page order desired.
4347 * @errcode: pointer to error code if any
4348 * @gfp_mask: allocation mask
4349 *
4350 * This can be used to allocate a paged skb, given a maximal order for frags.
4351 */
4352struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
4353 unsigned long data_len,
4354 int max_page_order,
4355 int *errcode,
4356 gfp_t gfp_mask)
4357{
4358 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
4359 unsigned long chunk;
4360 struct sk_buff *skb;
4361 struct page *page;
4362 gfp_t gfp_head;
4363 int i;
4364
4365 *errcode = -EMSGSIZE;
4366 /* Note this test could be relaxed, if we succeed to allocate
4367 * high order pages...
4368 */
4369 if (npages > MAX_SKB_FRAGS)
4370 return NULL;
4371
4372 gfp_head = gfp_mask;
4373 if (gfp_head & __GFP_WAIT)
4374 gfp_head |= __GFP_REPEAT;
4375
4376 *errcode = -ENOBUFS;
4377 skb = alloc_skb(header_len, gfp_head);
4378 if (!skb)
4379 return NULL;
4380
4381 skb->truesize += npages << PAGE_SHIFT;
4382
4383 for (i = 0; npages > 0; i++) {
4384 int order = max_page_order;
4385
4386 while (order) {
4387 if (npages >= 1 << order) {
4388 page = alloc_pages(gfp_mask |
4389 __GFP_COMP |
4390 __GFP_NOWARN |
4391 __GFP_NORETRY,
4392 order);
4393 if (page)
4394 goto fill_page;
4395 /* Do not retry other high order allocations */
4396 order = 1;
4397 max_page_order = 0;
4398 }
4399 order--;
4400 }
4401 page = alloc_page(gfp_mask);
4402 if (!page)
4403 goto failure;
4404fill_page:
4405 chunk = min_t(unsigned long, data_len,
4406 PAGE_SIZE << order);
4407 skb_fill_page_desc(skb, i, page, 0, chunk);
4408 data_len -= chunk;
4409 npages -= 1 << order;
4410 }
4411 return skb;
4412
4413failure:
4414 kfree_skb(skb);
4415 return NULL;
4416}
4417EXPORT_SYMBOL(alloc_skb_with_frags);