wifi: mac80211: add eht_capa debugfs field
[linux-2.6-block.git] / net / core / gro.c
CommitLineData
e456a18a
ED
1// SPDX-License-Identifier: GPL-2.0-or-later
2#include <net/gro.h>
587652bb
ED
3#include <net/dst_metadata.h>
4#include <net/busy_poll.h>
5#include <trace/events/net.h>
6
7#define MAX_GRO_SKBS 8
8
9/* This should be increased if a protocol with a bigger head is added. */
10#define GRO_MAX_HEAD (MAX_HEADER + 128)
11
12static DEFINE_SPINLOCK(offload_lock);
13static struct list_head offload_base __read_mostly = LIST_HEAD_INIT(offload_base);
14/* Maximum number of GRO_NORMAL skbs to batch up for list-RX */
15int gro_normal_batch __read_mostly = 8;
16
17/**
18 * dev_add_offload - register offload handlers
19 * @po: protocol offload declaration
20 *
21 * Add protocol offload handlers to the networking stack. The passed
22 * &proto_offload is linked into kernel lists and may not be freed until
23 * it has been removed from the kernel lists.
24 *
25 * This call does not sleep therefore it can not
26 * guarantee all CPU's that are in middle of receiving packets
27 * will see the new offload handlers (until the next received packet).
28 */
29void dev_add_offload(struct packet_offload *po)
30{
31 struct packet_offload *elem;
32
33 spin_lock(&offload_lock);
34 list_for_each_entry(elem, &offload_base, list) {
35 if (po->priority < elem->priority)
36 break;
37 }
38 list_add_rcu(&po->list, elem->list.prev);
39 spin_unlock(&offload_lock);
40}
41EXPORT_SYMBOL(dev_add_offload);
42
43/**
44 * __dev_remove_offload - remove offload handler
45 * @po: packet offload declaration
46 *
47 * Remove a protocol offload handler that was previously added to the
48 * kernel offload handlers by dev_add_offload(). The passed &offload_type
49 * is removed from the kernel lists and can be freed or reused once this
50 * function returns.
51 *
52 * The packet type might still be in use by receivers
53 * and must not be freed until after all the CPU's have gone
54 * through a quiescent state.
55 */
56static void __dev_remove_offload(struct packet_offload *po)
57{
58 struct list_head *head = &offload_base;
59 struct packet_offload *po1;
60
61 spin_lock(&offload_lock);
62
63 list_for_each_entry(po1, head, list) {
64 if (po == po1) {
65 list_del_rcu(&po->list);
66 goto out;
67 }
68 }
69
70 pr_warn("dev_remove_offload: %p not found\n", po);
71out:
72 spin_unlock(&offload_lock);
73}
74
75/**
76 * dev_remove_offload - remove packet offload handler
77 * @po: packet offload declaration
78 *
79 * Remove a packet offload handler that was previously added to the kernel
80 * offload handlers by dev_add_offload(). The passed &offload_type is
81 * removed from the kernel lists and can be freed or reused once this
82 * function returns.
83 *
84 * This call sleeps to guarantee that no CPU is looking at the packet
85 * type after return.
86 */
87void dev_remove_offload(struct packet_offload *po)
88{
89 __dev_remove_offload(po);
90
91 synchronize_net();
92}
93EXPORT_SYMBOL(dev_remove_offload);
94
23c7f8d7
SK
95/**
96 * skb_eth_gso_segment - segmentation handler for ethernet protocols.
97 * @skb: buffer to segment
98 * @features: features for the output path (see dev->features)
99 * @type: Ethernet Protocol ID
100 */
101struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb,
102 netdev_features_t features, __be16 type)
103{
104 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
105 struct packet_offload *ptype;
106
107 rcu_read_lock();
108 list_for_each_entry_rcu(ptype, &offload_base, list) {
109 if (ptype->type == type && ptype->callbacks.gso_segment) {
110 segs = ptype->callbacks.gso_segment(skb, features);
111 break;
112 }
113 }
114 rcu_read_unlock();
115
116 return segs;
117}
118EXPORT_SYMBOL(skb_eth_gso_segment);
119
587652bb
ED
120/**
121 * skb_mac_gso_segment - mac layer segmentation handler.
122 * @skb: buffer to segment
123 * @features: features for the output path (see dev->features)
124 */
125struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
126 netdev_features_t features)
127{
128 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
129 struct packet_offload *ptype;
130 int vlan_depth = skb->mac_len;
131 __be16 type = skb_network_protocol(skb, &vlan_depth);
132
133 if (unlikely(!type))
134 return ERR_PTR(-EINVAL);
135
136 __skb_pull(skb, vlan_depth);
137
138 rcu_read_lock();
139 list_for_each_entry_rcu(ptype, &offload_base, list) {
140 if (ptype->type == type && ptype->callbacks.gso_segment) {
141 segs = ptype->callbacks.gso_segment(skb, features);
142 break;
143 }
144 }
145 rcu_read_unlock();
146
147 __skb_push(skb, skb->data - skb_mac_header(skb));
148
149 return segs;
150}
151EXPORT_SYMBOL(skb_mac_gso_segment);
e456a18a
ED
152
153int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
154{
155 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
156 unsigned int offset = skb_gro_offset(skb);
157 unsigned int headlen = skb_headlen(skb);
158 unsigned int len = skb_gro_len(skb);
159 unsigned int delta_truesize;
eac1b93c 160 unsigned int gro_max_size;
e456a18a
ED
161 unsigned int new_truesize;
162 struct sk_buff *lp;
5eddb249 163 int segs;
e456a18a 164
7d2c89b3
AD
165 /* Do not splice page pool based packets w/ non-page pool
166 * packets. This can result in reference count issues as page
167 * pool pages will not decrement the reference count and will
168 * instead be immediately returned to the pool or have frag
169 * count decremented.
170 */
171 if (p->pp_recycle != skb->pp_recycle)
172 return -ETOOMANYREFS;
173
b1a78b9b
XL
174 /* pairs with WRITE_ONCE() in netif_set_gro(_ipv4)_max_size() */
175 gro_max_size = p->protocol == htons(ETH_P_IPV6) ?
176 READ_ONCE(p->dev->gro_max_size) :
82b4a941 177 READ_ONCE(p->dev->gro_ipv4_max_size);
eac1b93c
CL
178
179 if (unlikely(p->len + len >= gro_max_size || NAPI_GRO_CB(skb)->flush))
e456a18a
ED
180 return -E2BIG;
181
0fe79f28 182 if (unlikely(p->len + len >= GRO_LEGACY_MAX_SIZE)) {
b1a78b9b
XL
183 if (NAPI_GRO_CB(skb)->proto != IPPROTO_TCP ||
184 (p->protocol == htons(ETH_P_IPV6) &&
185 skb_headroom(p) < sizeof(struct hop_jumbo_hdr)) ||
0fe79f28
AD
186 p->encapsulation)
187 return -E2BIG;
188 }
189
5eddb249 190 segs = NAPI_GRO_CB(skb)->count;
e456a18a
ED
191 lp = NAPI_GRO_CB(p)->last;
192 pinfo = skb_shinfo(lp);
193
194 if (headlen <= offset) {
195 skb_frag_t *frag;
196 skb_frag_t *frag2;
197 int i = skbinfo->nr_frags;
198 int nr_frags = pinfo->nr_frags + i;
199
200 if (nr_frags > MAX_SKB_FRAGS)
201 goto merge;
202
203 offset -= headlen;
204 pinfo->nr_frags = nr_frags;
205 skbinfo->nr_frags = 0;
206
207 frag = pinfo->frags + nr_frags;
208 frag2 = skbinfo->frags + i;
209 do {
210 *--frag = *--frag2;
211 } while (--i);
212
213 skb_frag_off_add(frag, offset);
214 skb_frag_size_sub(frag, offset);
215
216 /* all fragments truesize : remove (head size + sk_buff) */
217 new_truesize = SKB_TRUESIZE(skb_end_offset(skb));
218 delta_truesize = skb->truesize - new_truesize;
219
220 skb->truesize = new_truesize;
221 skb->len -= skb->data_len;
222 skb->data_len = 0;
223
224 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
225 goto done;
226 } else if (skb->head_frag) {
227 int nr_frags = pinfo->nr_frags;
228 skb_frag_t *frag = pinfo->frags + nr_frags;
229 struct page *page = virt_to_head_page(skb->head);
230 unsigned int first_size = headlen - offset;
231 unsigned int first_offset;
232
233 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
234 goto merge;
235
236 first_offset = skb->data -
237 (unsigned char *)page_address(page) +
238 offset;
239
240 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
241
b51f4113 242 skb_frag_fill_page_desc(frag, page, first_offset, first_size);
e456a18a
ED
243
244 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
245 /* We dont need to clear skbinfo->nr_frags here */
246
247 new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff));
248 delta_truesize = skb->truesize - new_truesize;
249 skb->truesize = new_truesize;
250 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
251 goto done;
252 }
253
254merge:
255 /* sk owenrship - if any - completely transferred to the aggregated packet */
256 skb->destructor = NULL;
257 delta_truesize = skb->truesize;
258 if (offset > headlen) {
259 unsigned int eat = offset - headlen;
260
261 skb_frag_off_add(&skbinfo->frags[0], eat);
262 skb_frag_size_sub(&skbinfo->frags[0], eat);
263 skb->data_len -= eat;
264 skb->len -= eat;
265 offset = headlen;
266 }
267
268 __skb_pull(skb, offset);
269
270 if (NAPI_GRO_CB(p)->last == p)
271 skb_shinfo(p)->frag_list = skb;
272 else
273 NAPI_GRO_CB(p)->last->next = skb;
274 NAPI_GRO_CB(p)->last = skb;
275 __skb_header_release(skb);
276 lp = p;
277
278done:
5eddb249 279 NAPI_GRO_CB(p)->count += segs;
e456a18a
ED
280 p->data_len += len;
281 p->truesize += delta_truesize;
282 p->len += len;
283 if (lp != p) {
284 lp->data_len += len;
285 lp->truesize += delta_truesize;
286 lp->len += len;
287 }
288 NAPI_GRO_CB(skb)->same_flow = 1;
289 return 0;
290}
587652bb
ED
291
292
293static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
294{
295 struct packet_offload *ptype;
296 __be16 type = skb->protocol;
297 struct list_head *head = &offload_base;
298 int err = -ENOENT;
299
300 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
301
302 if (NAPI_GRO_CB(skb)->count == 1) {
303 skb_shinfo(skb)->gso_size = 0;
304 goto out;
305 }
306
307 rcu_read_lock();
308 list_for_each_entry_rcu(ptype, head, list) {
309 if (ptype->type != type || !ptype->callbacks.gro_complete)
310 continue;
311
312 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
313 ipv6_gro_complete, inet_gro_complete,
314 skb, 0);
315 break;
316 }
317 rcu_read_unlock();
318
319 if (err) {
320 WARN_ON(&ptype->list == head);
321 kfree_skb(skb);
322 return;
323 }
324
325out:
326 gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
327}
328
329static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
330 bool flush_old)
331{
332 struct list_head *head = &napi->gro_hash[index].list;
333 struct sk_buff *skb, *p;
334
335 list_for_each_entry_safe_reverse(skb, p, head, list) {
336 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
337 return;
338 skb_list_del_init(skb);
339 napi_gro_complete(napi, skb);
340 napi->gro_hash[index].count--;
341 }
342
343 if (!napi->gro_hash[index].count)
344 __clear_bit(index, &napi->gro_bitmask);
345}
346
347/* napi->gro_hash[].list contains packets ordered by age.
348 * youngest packets at the head of it.
349 * Complete skbs in reverse order to reduce latencies.
350 */
351void napi_gro_flush(struct napi_struct *napi, bool flush_old)
352{
353 unsigned long bitmask = napi->gro_bitmask;
354 unsigned int i, base = ~0U;
355
356 while ((i = ffs(bitmask)) != 0) {
357 bitmask >>= i;
358 base += i;
359 __napi_gro_flush_chain(napi, base, flush_old);
360 }
361}
362EXPORT_SYMBOL(napi_gro_flush);
363
364static void gro_list_prepare(const struct list_head *head,
365 const struct sk_buff *skb)
366{
367 unsigned int maclen = skb->dev->hard_header_len;
368 u32 hash = skb_get_hash_raw(skb);
369 struct sk_buff *p;
370
371 list_for_each_entry(p, head, list) {
372 unsigned long diffs;
373
374 NAPI_GRO_CB(p)->flush = 0;
375
376 if (hash != skb_get_hash_raw(p)) {
377 NAPI_GRO_CB(p)->same_flow = 0;
378 continue;
379 }
380
381 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
be3ed486 382 diffs |= p->vlan_all ^ skb->vlan_all;
587652bb
ED
383 diffs |= skb_metadata_differs(p, skb);
384 if (maclen == ETH_HLEN)
385 diffs |= compare_ether_header(skb_mac_header(p),
386 skb_mac_header(skb));
387 else if (!diffs)
388 diffs = memcmp(skb_mac_header(p),
389 skb_mac_header(skb),
390 maclen);
391
392 /* in most common scenarions 'slow_gro' is 0
393 * otherwise we are already on some slower paths
394 * either skip all the infrequent tests altogether or
395 * avoid trying too hard to skip each of them individually
396 */
397 if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) {
398#if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
399 struct tc_skb_ext *skb_ext;
400 struct tc_skb_ext *p_ext;
401#endif
402
403 diffs |= p->sk != skb->sk;
404 diffs |= skb_metadata_dst_cmp(p, skb);
405 diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
406
407#if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
408 skb_ext = skb_ext_find(skb, TC_SKB_EXT);
409 p_ext = skb_ext_find(p, TC_SKB_EXT);
410
411 diffs |= (!!p_ext) ^ (!!skb_ext);
412 if (!diffs && unlikely(skb_ext))
413 diffs |= p_ext->chain ^ skb_ext->chain;
414#endif
415 }
416
417 NAPI_GRO_CB(p)->same_flow = !diffs;
418 }
419}
420
421static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
422{
423 const struct skb_shared_info *pinfo = skb_shinfo(skb);
424 const skb_frag_t *frag0 = &pinfo->frags[0];
425
426 NAPI_GRO_CB(skb)->data_offset = 0;
427 NAPI_GRO_CB(skb)->frag0 = NULL;
428 NAPI_GRO_CB(skb)->frag0_len = 0;
429
430 if (!skb_headlen(skb) && pinfo->nr_frags &&
431 !PageHighMem(skb_frag_page(frag0)) &&
432 (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
433 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
434 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
435 skb_frag_size(frag0),
436 skb->end - skb->tail);
437 }
438}
439
440static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
441{
442 struct skb_shared_info *pinfo = skb_shinfo(skb);
443
444 BUG_ON(skb->end - skb->tail < grow);
445
446 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
447
448 skb->data_len -= grow;
449 skb->tail += grow;
450
451 skb_frag_off_add(&pinfo->frags[0], grow);
452 skb_frag_size_sub(&pinfo->frags[0], grow);
453
454 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
455 skb_frag_unref(skb, 0);
456 memmove(pinfo->frags, pinfo->frags + 1,
457 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
458 }
459}
460
7b355b76
RG
461static void gro_try_pull_from_frag0(struct sk_buff *skb)
462{
463 int grow = skb_gro_offset(skb) - skb_headlen(skb);
464
465 if (grow > 0)
466 gro_pull_from_frag0(skb, grow);
467}
468
587652bb
ED
469static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
470{
471 struct sk_buff *oldest;
472
473 oldest = list_last_entry(head, struct sk_buff, list);
474
475 /* We are called with head length >= MAX_GRO_SKBS, so this is
476 * impossible.
477 */
478 if (WARN_ON_ONCE(!oldest))
479 return;
480
481 /* Do not adjust napi->gro_hash[].count, caller is adding a new
482 * SKB to the chain.
483 */
484 skb_list_del_init(oldest);
485 napi_gro_complete(napi, oldest);
486}
487
488static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
489{
490 u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
491 struct gro_list *gro_list = &napi->gro_hash[bucket];
492 struct list_head *head = &offload_base;
493 struct packet_offload *ptype;
494 __be16 type = skb->protocol;
495 struct sk_buff *pp = NULL;
496 enum gro_result ret;
497 int same_flow;
587652bb
ED
498
499 if (netif_elide_gro(skb->dev))
500 goto normal;
501
502 gro_list_prepare(&gro_list->list, skb);
503
504 rcu_read_lock();
505 list_for_each_entry_rcu(ptype, head, list) {
e081ecf0
RG
506 if (ptype->type == type && ptype->callbacks.gro_receive)
507 goto found_ptype;
508 }
509 rcu_read_unlock();
510 goto normal;
511
512found_ptype:
513 skb_set_network_header(skb, skb_gro_offset(skb));
514 skb_reset_mac_len(skb);
515 BUILD_BUG_ON(sizeof_field(struct napi_gro_cb, zeroed) != sizeof(u32));
516 BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct napi_gro_cb, zeroed),
517 sizeof(u32))); /* Avoid slow unaligned acc */
518 *(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0;
519 NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb);
520 NAPI_GRO_CB(skb)->is_atomic = 1;
521 NAPI_GRO_CB(skb)->count = 1;
522 if (unlikely(skb_is_gso(skb))) {
523 NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs;
7871f54e
ED
524 /* Only support TCP and non DODGY users. */
525 if (!skb_is_gso_tcp(skb) ||
526 (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY))
e081ecf0
RG
527 NAPI_GRO_CB(skb)->flush = 1;
528 }
587652bb 529
e081ecf0
RG
530 /* Setup for GRO checksum validation */
531 switch (skb->ip_summed) {
532 case CHECKSUM_COMPLETE:
533 NAPI_GRO_CB(skb)->csum = skb->csum;
534 NAPI_GRO_CB(skb)->csum_valid = 1;
535 break;
536 case CHECKSUM_UNNECESSARY:
537 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
587652bb
ED
538 break;
539 }
587652bb 540
e081ecf0
RG
541 pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
542 ipv6_gro_receive, inet_gro_receive,
543 &gro_list->list, skb);
544
545 rcu_read_unlock();
587652bb
ED
546
547 if (PTR_ERR(pp) == -EINPROGRESS) {
548 ret = GRO_CONSUMED;
549 goto ok;
550 }
551
552 same_flow = NAPI_GRO_CB(skb)->same_flow;
553 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
554
555 if (pp) {
556 skb_list_del_init(pp);
557 napi_gro_complete(napi, pp);
558 gro_list->count--;
559 }
560
561 if (same_flow)
562 goto ok;
563
564 if (NAPI_GRO_CB(skb)->flush)
565 goto normal;
566
567 if (unlikely(gro_list->count >= MAX_GRO_SKBS))
568 gro_flush_oldest(napi, &gro_list->list);
569 else
570 gro_list->count++;
571
7b355b76
RG
572 /* Must be called before setting NAPI_GRO_CB(skb)->{age|last} */
573 gro_try_pull_from_frag0(skb);
587652bb
ED
574 NAPI_GRO_CB(skb)->age = jiffies;
575 NAPI_GRO_CB(skb)->last = skb;
5eddb249
CL
576 if (!skb_is_gso(skb))
577 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
587652bb
ED
578 list_add(&skb->list, &gro_list->list);
579 ret = GRO_HELD;
587652bb
ED
580ok:
581 if (gro_list->count) {
582 if (!test_bit(bucket, &napi->gro_bitmask))
583 __set_bit(bucket, &napi->gro_bitmask);
584 } else if (test_bit(bucket, &napi->gro_bitmask)) {
585 __clear_bit(bucket, &napi->gro_bitmask);
586 }
587
588 return ret;
589
590normal:
591 ret = GRO_NORMAL;
7b355b76
RG
592 gro_try_pull_from_frag0(skb);
593 goto ok;
587652bb
ED
594}
595
596struct packet_offload *gro_find_receive_by_type(__be16 type)
597{
598 struct list_head *offload_head = &offload_base;
599 struct packet_offload *ptype;
600
601 list_for_each_entry_rcu(ptype, offload_head, list) {
602 if (ptype->type != type || !ptype->callbacks.gro_receive)
603 continue;
604 return ptype;
605 }
606 return NULL;
607}
608EXPORT_SYMBOL(gro_find_receive_by_type);
609
610struct packet_offload *gro_find_complete_by_type(__be16 type)
611{
612 struct list_head *offload_head = &offload_base;
613 struct packet_offload *ptype;
614
615 list_for_each_entry_rcu(ptype, offload_head, list) {
616 if (ptype->type != type || !ptype->callbacks.gro_complete)
617 continue;
618 return ptype;
619 }
620 return NULL;
621}
622EXPORT_SYMBOL(gro_find_complete_by_type);
623
624static gro_result_t napi_skb_finish(struct napi_struct *napi,
625 struct sk_buff *skb,
626 gro_result_t ret)
627{
628 switch (ret) {
629 case GRO_NORMAL:
630 gro_normal_one(napi, skb, 1);
631 break;
632
633 case GRO_MERGED_FREE:
634 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
635 napi_skb_free_stolen_head(skb);
636 else if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
637 __kfree_skb(skb);
638 else
8fa66e4a 639 __napi_kfree_skb(skb, SKB_CONSUMED);
587652bb
ED
640 break;
641
642 case GRO_HELD:
643 case GRO_MERGED:
644 case GRO_CONSUMED:
645 break;
646 }
647
648 return ret;
649}
650
651gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
652{
653 gro_result_t ret;
654
655 skb_mark_napi_id(skb, napi);
656 trace_napi_gro_receive_entry(skb);
657
658 skb_gro_reset_offset(skb, 0);
659
660 ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
661 trace_napi_gro_receive_exit(ret);
662
663 return ret;
664}
665EXPORT_SYMBOL(napi_gro_receive);
666
667static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
668{
669 if (unlikely(skb->pfmemalloc)) {
670 consume_skb(skb);
671 return;
672 }
673 __skb_pull(skb, skb_headlen(skb));
674 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
675 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
676 __vlan_hwaccel_clear_tag(skb);
677 skb->dev = napi->dev;
678 skb->skb_iif = 0;
679
680 /* eth_type_trans() assumes pkt_type is PACKET_HOST */
681 skb->pkt_type = PACKET_HOST;
682
683 skb->encapsulation = 0;
684 skb_shinfo(skb)->gso_type = 0;
5eddb249 685 skb_shinfo(skb)->gso_size = 0;
587652bb
ED
686 if (unlikely(skb->slow_gro)) {
687 skb_orphan(skb);
688 skb_ext_reset(skb);
689 nf_reset_ct(skb);
690 skb->slow_gro = 0;
691 }
692
693 napi->skb = skb;
694}
695
696struct sk_buff *napi_get_frags(struct napi_struct *napi)
697{
698 struct sk_buff *skb = napi->skb;
699
700 if (!skb) {
701 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
702 if (skb) {
703 napi->skb = skb;
704 skb_mark_napi_id(skb, napi);
705 }
706 }
707 return skb;
708}
709EXPORT_SYMBOL(napi_get_frags);
710
711static gro_result_t napi_frags_finish(struct napi_struct *napi,
712 struct sk_buff *skb,
713 gro_result_t ret)
714{
715 switch (ret) {
716 case GRO_NORMAL:
717 case GRO_HELD:
718 __skb_push(skb, ETH_HLEN);
719 skb->protocol = eth_type_trans(skb, skb->dev);
720 if (ret == GRO_NORMAL)
721 gro_normal_one(napi, skb, 1);
722 break;
723
724 case GRO_MERGED_FREE:
725 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
726 napi_skb_free_stolen_head(skb);
727 else
728 napi_reuse_skb(napi, skb);
729 break;
730
731 case GRO_MERGED:
732 case GRO_CONSUMED:
733 break;
734 }
735
736 return ret;
737}
738
739/* Upper GRO stack assumes network header starts at gro_offset=0
740 * Drivers could call both napi_gro_frags() and napi_gro_receive()
741 * We copy ethernet header into skb->data to have a common layout.
742 */
743static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
744{
745 struct sk_buff *skb = napi->skb;
746 const struct ethhdr *eth;
747 unsigned int hlen = sizeof(*eth);
748
749 napi->skb = NULL;
750
751 skb_reset_mac_header(skb);
752 skb_gro_reset_offset(skb, hlen);
753
754 if (unlikely(skb_gro_header_hard(skb, hlen))) {
755 eth = skb_gro_header_slow(skb, hlen, 0);
756 if (unlikely(!eth)) {
757 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
758 __func__, napi->dev->name);
759 napi_reuse_skb(napi, skb);
760 return NULL;
761 }
762 } else {
763 eth = (const struct ethhdr *)skb->data;
764 gro_pull_from_frag0(skb, hlen);
765 NAPI_GRO_CB(skb)->frag0 += hlen;
766 NAPI_GRO_CB(skb)->frag0_len -= hlen;
767 }
768 __skb_pull(skb, hlen);
769
770 /*
771 * This works because the only protocols we care about don't require
772 * special handling.
773 * We'll fix it up properly in napi_frags_finish()
774 */
775 skb->protocol = eth->h_proto;
776
777 return skb;
778}
779
780gro_result_t napi_gro_frags(struct napi_struct *napi)
781{
782 gro_result_t ret;
783 struct sk_buff *skb = napi_frags_skb(napi);
784
785 trace_napi_gro_frags_entry(skb);
786
787 ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
788 trace_napi_gro_frags_exit(ret);
789
790 return ret;
791}
792EXPORT_SYMBOL(napi_gro_frags);
793
794/* Compute the checksum from gro_offset and return the folded value
795 * after adding in any pseudo checksum.
796 */
797__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
798{
799 __wsum wsum;
800 __sum16 sum;
801
802 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
803
804 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
805 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
806 /* See comments in __skb_checksum_complete(). */
807 if (likely(!sum)) {
808 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
809 !skb->csum_complete_sw)
810 netdev_rx_csum_fault(skb->dev, skb);
811 }
812
813 NAPI_GRO_CB(skb)->csum = wsum;
814 NAPI_GRO_CB(skb)->csum_valid = 1;
815
816 return sum;
817}
818EXPORT_SYMBOL(__skb_gro_checksum_complete);