Linux 6.10-rc3
[linux-block.git] / net / core / gro.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include <net/gro.h>
3 #include <net/dst_metadata.h>
4 #include <net/busy_poll.h>
5 #include <trace/events/net.h>
6
7 #define MAX_GRO_SKBS 8
8
9 /* This should be increased if a protocol with a bigger head is added. */
10 #define GRO_MAX_HEAD (MAX_HEADER + 128)
11
12 static DEFINE_SPINLOCK(offload_lock);
13
14 /**
15  *      dev_add_offload - register offload handlers
16  *      @po: protocol offload declaration
17  *
18  *      Add protocol offload handlers to the networking stack. The passed
19  *      &proto_offload is linked into kernel lists and may not be freed until
20  *      it has been removed from the kernel lists.
21  *
22  *      This call does not sleep therefore it can not
23  *      guarantee all CPU's that are in middle of receiving packets
24  *      will see the new offload handlers (until the next received packet).
25  */
26 void dev_add_offload(struct packet_offload *po)
27 {
28         struct packet_offload *elem;
29
30         spin_lock(&offload_lock);
31         list_for_each_entry(elem, &net_hotdata.offload_base, list) {
32                 if (po->priority < elem->priority)
33                         break;
34         }
35         list_add_rcu(&po->list, elem->list.prev);
36         spin_unlock(&offload_lock);
37 }
38 EXPORT_SYMBOL(dev_add_offload);
39
40 /**
41  *      __dev_remove_offload     - remove offload handler
42  *      @po: packet offload declaration
43  *
44  *      Remove a protocol offload handler that was previously added to the
45  *      kernel offload handlers by dev_add_offload(). The passed &offload_type
46  *      is removed from the kernel lists and can be freed or reused once this
47  *      function returns.
48  *
49  *      The packet type might still be in use by receivers
50  *      and must not be freed until after all the CPU's have gone
51  *      through a quiescent state.
52  */
53 static void __dev_remove_offload(struct packet_offload *po)
54 {
55         struct list_head *head = &net_hotdata.offload_base;
56         struct packet_offload *po1;
57
58         spin_lock(&offload_lock);
59
60         list_for_each_entry(po1, head, list) {
61                 if (po == po1) {
62                         list_del_rcu(&po->list);
63                         goto out;
64                 }
65         }
66
67         pr_warn("dev_remove_offload: %p not found\n", po);
68 out:
69         spin_unlock(&offload_lock);
70 }
71
72 /**
73  *      dev_remove_offload       - remove packet offload handler
74  *      @po: packet offload declaration
75  *
76  *      Remove a packet offload handler that was previously added to the kernel
77  *      offload handlers by dev_add_offload(). The passed &offload_type is
78  *      removed from the kernel lists and can be freed or reused once this
79  *      function returns.
80  *
81  *      This call sleeps to guarantee that no CPU is looking at the packet
82  *      type after return.
83  */
84 void dev_remove_offload(struct packet_offload *po)
85 {
86         __dev_remove_offload(po);
87
88         synchronize_net();
89 }
90 EXPORT_SYMBOL(dev_remove_offload);
91
92
93 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
94 {
95         struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
96         unsigned int offset = skb_gro_offset(skb);
97         unsigned int headlen = skb_headlen(skb);
98         unsigned int len = skb_gro_len(skb);
99         unsigned int delta_truesize;
100         unsigned int gro_max_size;
101         unsigned int new_truesize;
102         struct sk_buff *lp;
103         int segs;
104
105         /* Do not splice page pool based packets w/ non-page pool
106          * packets. This can result in reference count issues as page
107          * pool pages will not decrement the reference count and will
108          * instead be immediately returned to the pool or have frag
109          * count decremented.
110          */
111         if (p->pp_recycle != skb->pp_recycle)
112                 return -ETOOMANYREFS;
113
114         /* pairs with WRITE_ONCE() in netif_set_gro(_ipv4)_max_size() */
115         gro_max_size = p->protocol == htons(ETH_P_IPV6) ?
116                         READ_ONCE(p->dev->gro_max_size) :
117                         READ_ONCE(p->dev->gro_ipv4_max_size);
118
119         if (unlikely(p->len + len >= gro_max_size || NAPI_GRO_CB(skb)->flush))
120                 return -E2BIG;
121
122         if (unlikely(p->len + len >= GRO_LEGACY_MAX_SIZE)) {
123                 if (NAPI_GRO_CB(skb)->proto != IPPROTO_TCP ||
124                     (p->protocol == htons(ETH_P_IPV6) &&
125                      skb_headroom(p) < sizeof(struct hop_jumbo_hdr)) ||
126                     p->encapsulation)
127                         return -E2BIG;
128         }
129
130         segs = NAPI_GRO_CB(skb)->count;
131         lp = NAPI_GRO_CB(p)->last;
132         pinfo = skb_shinfo(lp);
133
134         if (headlen <= offset) {
135                 skb_frag_t *frag;
136                 skb_frag_t *frag2;
137                 int i = skbinfo->nr_frags;
138                 int nr_frags = pinfo->nr_frags + i;
139
140                 if (nr_frags > MAX_SKB_FRAGS)
141                         goto merge;
142
143                 offset -= headlen;
144                 pinfo->nr_frags = nr_frags;
145                 skbinfo->nr_frags = 0;
146
147                 frag = pinfo->frags + nr_frags;
148                 frag2 = skbinfo->frags + i;
149                 do {
150                         *--frag = *--frag2;
151                 } while (--i);
152
153                 skb_frag_off_add(frag, offset);
154                 skb_frag_size_sub(frag, offset);
155
156                 /* all fragments truesize : remove (head size + sk_buff) */
157                 new_truesize = SKB_TRUESIZE(skb_end_offset(skb));
158                 delta_truesize = skb->truesize - new_truesize;
159
160                 skb->truesize = new_truesize;
161                 skb->len -= skb->data_len;
162                 skb->data_len = 0;
163
164                 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
165                 goto done;
166         } else if (skb->head_frag) {
167                 int nr_frags = pinfo->nr_frags;
168                 skb_frag_t *frag = pinfo->frags + nr_frags;
169                 struct page *page = virt_to_head_page(skb->head);
170                 unsigned int first_size = headlen - offset;
171                 unsigned int first_offset;
172
173                 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
174                         goto merge;
175
176                 first_offset = skb->data -
177                                (unsigned char *)page_address(page) +
178                                offset;
179
180                 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
181
182                 skb_frag_fill_page_desc(frag, page, first_offset, first_size);
183
184                 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
185                 /* We dont need to clear skbinfo->nr_frags here */
186
187                 new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff));
188                 delta_truesize = skb->truesize - new_truesize;
189                 skb->truesize = new_truesize;
190                 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
191                 goto done;
192         }
193
194 merge:
195         /* sk ownership - if any - completely transferred to the aggregated packet */
196         skb->destructor = NULL;
197         skb->sk = NULL;
198         delta_truesize = skb->truesize;
199         if (offset > headlen) {
200                 unsigned int eat = offset - headlen;
201
202                 skb_frag_off_add(&skbinfo->frags[0], eat);
203                 skb_frag_size_sub(&skbinfo->frags[0], eat);
204                 skb->data_len -= eat;
205                 skb->len -= eat;
206                 offset = headlen;
207         }
208
209         __skb_pull(skb, offset);
210
211         if (NAPI_GRO_CB(p)->last == p)
212                 skb_shinfo(p)->frag_list = skb;
213         else
214                 NAPI_GRO_CB(p)->last->next = skb;
215         NAPI_GRO_CB(p)->last = skb;
216         __skb_header_release(skb);
217         lp = p;
218
219 done:
220         NAPI_GRO_CB(p)->count += segs;
221         p->data_len += len;
222         p->truesize += delta_truesize;
223         p->len += len;
224         if (lp != p) {
225                 lp->data_len += len;
226                 lp->truesize += delta_truesize;
227                 lp->len += len;
228         }
229         NAPI_GRO_CB(skb)->same_flow = 1;
230         return 0;
231 }
232
233
234 static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
235 {
236         struct list_head *head = &net_hotdata.offload_base;
237         struct packet_offload *ptype;
238         __be16 type = skb->protocol;
239         int err = -ENOENT;
240
241         BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
242
243         if (NAPI_GRO_CB(skb)->count == 1) {
244                 skb_shinfo(skb)->gso_size = 0;
245                 goto out;
246         }
247
248         rcu_read_lock();
249         list_for_each_entry_rcu(ptype, head, list) {
250                 if (ptype->type != type || !ptype->callbacks.gro_complete)
251                         continue;
252
253                 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
254                                          ipv6_gro_complete, inet_gro_complete,
255                                          skb, 0);
256                 break;
257         }
258         rcu_read_unlock();
259
260         if (err) {
261                 WARN_ON(&ptype->list == head);
262                 kfree_skb(skb);
263                 return;
264         }
265
266 out:
267         gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
268 }
269
270 static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
271                                    bool flush_old)
272 {
273         struct list_head *head = &napi->gro_hash[index].list;
274         struct sk_buff *skb, *p;
275
276         list_for_each_entry_safe_reverse(skb, p, head, list) {
277                 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
278                         return;
279                 skb_list_del_init(skb);
280                 napi_gro_complete(napi, skb);
281                 napi->gro_hash[index].count--;
282         }
283
284         if (!napi->gro_hash[index].count)
285                 __clear_bit(index, &napi->gro_bitmask);
286 }
287
288 /* napi->gro_hash[].list contains packets ordered by age.
289  * youngest packets at the head of it.
290  * Complete skbs in reverse order to reduce latencies.
291  */
292 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
293 {
294         unsigned long bitmask = napi->gro_bitmask;
295         unsigned int i, base = ~0U;
296
297         while ((i = ffs(bitmask)) != 0) {
298                 bitmask >>= i;
299                 base += i;
300                 __napi_gro_flush_chain(napi, base, flush_old);
301         }
302 }
303 EXPORT_SYMBOL(napi_gro_flush);
304
305 static unsigned long gro_list_prepare_tc_ext(const struct sk_buff *skb,
306                                              const struct sk_buff *p,
307                                              unsigned long diffs)
308 {
309 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
310         struct tc_skb_ext *skb_ext;
311         struct tc_skb_ext *p_ext;
312
313         skb_ext = skb_ext_find(skb, TC_SKB_EXT);
314         p_ext = skb_ext_find(p, TC_SKB_EXT);
315
316         diffs |= (!!p_ext) ^ (!!skb_ext);
317         if (!diffs && unlikely(skb_ext))
318                 diffs |= p_ext->chain ^ skb_ext->chain;
319 #endif
320         return diffs;
321 }
322
323 static void gro_list_prepare(const struct list_head *head,
324                              const struct sk_buff *skb)
325 {
326         unsigned int maclen = skb->dev->hard_header_len;
327         u32 hash = skb_get_hash_raw(skb);
328         struct sk_buff *p;
329
330         list_for_each_entry(p, head, list) {
331                 unsigned long diffs;
332
333                 NAPI_GRO_CB(p)->flush = 0;
334
335                 if (hash != skb_get_hash_raw(p)) {
336                         NAPI_GRO_CB(p)->same_flow = 0;
337                         continue;
338                 }
339
340                 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
341                 diffs |= p->vlan_all ^ skb->vlan_all;
342                 diffs |= skb_metadata_differs(p, skb);
343                 if (maclen == ETH_HLEN)
344                         diffs |= compare_ether_header(skb_mac_header(p),
345                                                       skb_mac_header(skb));
346                 else if (!diffs)
347                         diffs = memcmp(skb_mac_header(p),
348                                        skb_mac_header(skb),
349                                        maclen);
350
351                 /* in most common scenarions 'slow_gro' is 0
352                  * otherwise we are already on some slower paths
353                  * either skip all the infrequent tests altogether or
354                  * avoid trying too hard to skip each of them individually
355                  */
356                 if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) {
357                         diffs |= p->sk != skb->sk;
358                         diffs |= skb_metadata_dst_cmp(p, skb);
359                         diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
360
361                         diffs |= gro_list_prepare_tc_ext(skb, p, diffs);
362                 }
363
364                 NAPI_GRO_CB(p)->same_flow = !diffs;
365         }
366 }
367
368 static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
369 {
370         const struct skb_shared_info *pinfo;
371         const skb_frag_t *frag0;
372         unsigned int headlen;
373
374         NAPI_GRO_CB(skb)->network_offset = 0;
375         NAPI_GRO_CB(skb)->data_offset = 0;
376         headlen = skb_headlen(skb);
377         NAPI_GRO_CB(skb)->frag0 = skb->data;
378         NAPI_GRO_CB(skb)->frag0_len = headlen;
379         if (headlen)
380                 return;
381
382         pinfo = skb_shinfo(skb);
383         frag0 = &pinfo->frags[0];
384
385         if (pinfo->nr_frags && !PageHighMem(skb_frag_page(frag0)) &&
386             (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
387                 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
388                 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
389                                                     skb_frag_size(frag0),
390                                                     skb->end - skb->tail);
391         }
392 }
393
394 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
395 {
396         struct skb_shared_info *pinfo = skb_shinfo(skb);
397
398         BUG_ON(skb->end - skb->tail < grow);
399
400         memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
401
402         skb->data_len -= grow;
403         skb->tail += grow;
404
405         skb_frag_off_add(&pinfo->frags[0], grow);
406         skb_frag_size_sub(&pinfo->frags[0], grow);
407
408         if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
409                 skb_frag_unref(skb, 0);
410                 memmove(pinfo->frags, pinfo->frags + 1,
411                         --pinfo->nr_frags * sizeof(pinfo->frags[0]));
412         }
413 }
414
415 static void gro_try_pull_from_frag0(struct sk_buff *skb)
416 {
417         int grow = skb_gro_offset(skb) - skb_headlen(skb);
418
419         if (grow > 0)
420                 gro_pull_from_frag0(skb, grow);
421 }
422
423 static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
424 {
425         struct sk_buff *oldest;
426
427         oldest = list_last_entry(head, struct sk_buff, list);
428
429         /* We are called with head length >= MAX_GRO_SKBS, so this is
430          * impossible.
431          */
432         if (WARN_ON_ONCE(!oldest))
433                 return;
434
435         /* Do not adjust napi->gro_hash[].count, caller is adding a new
436          * SKB to the chain.
437          */
438         skb_list_del_init(oldest);
439         napi_gro_complete(napi, oldest);
440 }
441
442 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
443 {
444         u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
445         struct gro_list *gro_list = &napi->gro_hash[bucket];
446         struct list_head *head = &net_hotdata.offload_base;
447         struct packet_offload *ptype;
448         __be16 type = skb->protocol;
449         struct sk_buff *pp = NULL;
450         enum gro_result ret;
451         int same_flow;
452
453         if (netif_elide_gro(skb->dev))
454                 goto normal;
455
456         gro_list_prepare(&gro_list->list, skb);
457
458         rcu_read_lock();
459         list_for_each_entry_rcu(ptype, head, list) {
460                 if (ptype->type == type && ptype->callbacks.gro_receive)
461                         goto found_ptype;
462         }
463         rcu_read_unlock();
464         goto normal;
465
466 found_ptype:
467         skb_set_network_header(skb, skb_gro_offset(skb));
468         skb_reset_mac_len(skb);
469         BUILD_BUG_ON(sizeof_field(struct napi_gro_cb, zeroed) != sizeof(u32));
470         BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct napi_gro_cb, zeroed),
471                                         sizeof(u32))); /* Avoid slow unaligned acc */
472         *(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0;
473         NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb);
474         NAPI_GRO_CB(skb)->is_atomic = 1;
475         NAPI_GRO_CB(skb)->count = 1;
476         if (unlikely(skb_is_gso(skb))) {
477                 NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs;
478                 /* Only support TCP and non DODGY users. */
479                 if (!skb_is_gso_tcp(skb) ||
480                     (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY))
481                         NAPI_GRO_CB(skb)->flush = 1;
482         }
483
484         /* Setup for GRO checksum validation */
485         switch (skb->ip_summed) {
486         case CHECKSUM_COMPLETE:
487                 NAPI_GRO_CB(skb)->csum = skb->csum;
488                 NAPI_GRO_CB(skb)->csum_valid = 1;
489                 break;
490         case CHECKSUM_UNNECESSARY:
491                 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
492                 break;
493         }
494
495         pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
496                                 ipv6_gro_receive, inet_gro_receive,
497                                 &gro_list->list, skb);
498
499         rcu_read_unlock();
500
501         if (PTR_ERR(pp) == -EINPROGRESS) {
502                 ret = GRO_CONSUMED;
503                 goto ok;
504         }
505
506         same_flow = NAPI_GRO_CB(skb)->same_flow;
507         ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
508
509         if (pp) {
510                 skb_list_del_init(pp);
511                 napi_gro_complete(napi, pp);
512                 gro_list->count--;
513         }
514
515         if (same_flow)
516                 goto ok;
517
518         if (NAPI_GRO_CB(skb)->flush)
519                 goto normal;
520
521         if (unlikely(gro_list->count >= MAX_GRO_SKBS))
522                 gro_flush_oldest(napi, &gro_list->list);
523         else
524                 gro_list->count++;
525
526         /* Must be called before setting NAPI_GRO_CB(skb)->{age|last} */
527         gro_try_pull_from_frag0(skb);
528         NAPI_GRO_CB(skb)->age = jiffies;
529         NAPI_GRO_CB(skb)->last = skb;
530         if (!skb_is_gso(skb))
531                 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
532         list_add(&skb->list, &gro_list->list);
533         ret = GRO_HELD;
534 ok:
535         if (gro_list->count) {
536                 if (!test_bit(bucket, &napi->gro_bitmask))
537                         __set_bit(bucket, &napi->gro_bitmask);
538         } else if (test_bit(bucket, &napi->gro_bitmask)) {
539                 __clear_bit(bucket, &napi->gro_bitmask);
540         }
541
542         return ret;
543
544 normal:
545         ret = GRO_NORMAL;
546         gro_try_pull_from_frag0(skb);
547         goto ok;
548 }
549
550 struct packet_offload *gro_find_receive_by_type(__be16 type)
551 {
552         struct list_head *offload_head = &net_hotdata.offload_base;
553         struct packet_offload *ptype;
554
555         list_for_each_entry_rcu(ptype, offload_head, list) {
556                 if (ptype->type != type || !ptype->callbacks.gro_receive)
557                         continue;
558                 return ptype;
559         }
560         return NULL;
561 }
562 EXPORT_SYMBOL(gro_find_receive_by_type);
563
564 struct packet_offload *gro_find_complete_by_type(__be16 type)
565 {
566         struct list_head *offload_head = &net_hotdata.offload_base;
567         struct packet_offload *ptype;
568
569         list_for_each_entry_rcu(ptype, offload_head, list) {
570                 if (ptype->type != type || !ptype->callbacks.gro_complete)
571                         continue;
572                 return ptype;
573         }
574         return NULL;
575 }
576 EXPORT_SYMBOL(gro_find_complete_by_type);
577
578 static gro_result_t napi_skb_finish(struct napi_struct *napi,
579                                     struct sk_buff *skb,
580                                     gro_result_t ret)
581 {
582         switch (ret) {
583         case GRO_NORMAL:
584                 gro_normal_one(napi, skb, 1);
585                 break;
586
587         case GRO_MERGED_FREE:
588                 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
589                         napi_skb_free_stolen_head(skb);
590                 else if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
591                         __kfree_skb(skb);
592                 else
593                         __napi_kfree_skb(skb, SKB_CONSUMED);
594                 break;
595
596         case GRO_HELD:
597         case GRO_MERGED:
598         case GRO_CONSUMED:
599                 break;
600         }
601
602         return ret;
603 }
604
605 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
606 {
607         gro_result_t ret;
608
609         skb_mark_napi_id(skb, napi);
610         trace_napi_gro_receive_entry(skb);
611
612         skb_gro_reset_offset(skb, 0);
613
614         ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
615         trace_napi_gro_receive_exit(ret);
616
617         return ret;
618 }
619 EXPORT_SYMBOL(napi_gro_receive);
620
621 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
622 {
623         if (unlikely(skb->pfmemalloc)) {
624                 consume_skb(skb);
625                 return;
626         }
627         __skb_pull(skb, skb_headlen(skb));
628         /* restore the reserve we had after netdev_alloc_skb_ip_align() */
629         skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
630         __vlan_hwaccel_clear_tag(skb);
631         skb->dev = napi->dev;
632         skb->skb_iif = 0;
633
634         /* eth_type_trans() assumes pkt_type is PACKET_HOST */
635         skb->pkt_type = PACKET_HOST;
636
637         skb->encapsulation = 0;
638         skb_shinfo(skb)->gso_type = 0;
639         skb_shinfo(skb)->gso_size = 0;
640         if (unlikely(skb->slow_gro)) {
641                 skb_orphan(skb);
642                 skb_ext_reset(skb);
643                 nf_reset_ct(skb);
644                 skb->slow_gro = 0;
645         }
646
647         napi->skb = skb;
648 }
649
650 struct sk_buff *napi_get_frags(struct napi_struct *napi)
651 {
652         struct sk_buff *skb = napi->skb;
653
654         if (!skb) {
655                 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
656                 if (skb) {
657                         napi->skb = skb;
658                         skb_mark_napi_id(skb, napi);
659                 }
660         }
661         return skb;
662 }
663 EXPORT_SYMBOL(napi_get_frags);
664
665 static gro_result_t napi_frags_finish(struct napi_struct *napi,
666                                       struct sk_buff *skb,
667                                       gro_result_t ret)
668 {
669         switch (ret) {
670         case GRO_NORMAL:
671         case GRO_HELD:
672                 __skb_push(skb, ETH_HLEN);
673                 skb->protocol = eth_type_trans(skb, skb->dev);
674                 if (ret == GRO_NORMAL)
675                         gro_normal_one(napi, skb, 1);
676                 break;
677
678         case GRO_MERGED_FREE:
679                 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
680                         napi_skb_free_stolen_head(skb);
681                 else
682                         napi_reuse_skb(napi, skb);
683                 break;
684
685         case GRO_MERGED:
686         case GRO_CONSUMED:
687                 break;
688         }
689
690         return ret;
691 }
692
693 /* Upper GRO stack assumes network header starts at gro_offset=0
694  * Drivers could call both napi_gro_frags() and napi_gro_receive()
695  * We copy ethernet header into skb->data to have a common layout.
696  */
697 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
698 {
699         struct sk_buff *skb = napi->skb;
700         const struct ethhdr *eth;
701         unsigned int hlen = sizeof(*eth);
702
703         napi->skb = NULL;
704
705         skb_reset_mac_header(skb);
706         skb_gro_reset_offset(skb, hlen);
707
708         if (unlikely(!skb_gro_may_pull(skb, hlen))) {
709                 eth = skb_gro_header_slow(skb, hlen, 0);
710                 if (unlikely(!eth)) {
711                         net_warn_ratelimited("%s: dropping impossible skb from %s\n",
712                                              __func__, napi->dev->name);
713                         napi_reuse_skb(napi, skb);
714                         return NULL;
715                 }
716         } else {
717                 eth = (const struct ethhdr *)skb->data;
718
719                 if (NAPI_GRO_CB(skb)->frag0 != skb->data)
720                         gro_pull_from_frag0(skb, hlen);
721
722                 NAPI_GRO_CB(skb)->frag0 += hlen;
723                 NAPI_GRO_CB(skb)->frag0_len -= hlen;
724         }
725         __skb_pull(skb, hlen);
726
727         /*
728          * This works because the only protocols we care about don't require
729          * special handling.
730          * We'll fix it up properly in napi_frags_finish()
731          */
732         skb->protocol = eth->h_proto;
733
734         return skb;
735 }
736
737 gro_result_t napi_gro_frags(struct napi_struct *napi)
738 {
739         gro_result_t ret;
740         struct sk_buff *skb = napi_frags_skb(napi);
741
742         trace_napi_gro_frags_entry(skb);
743
744         ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
745         trace_napi_gro_frags_exit(ret);
746
747         return ret;
748 }
749 EXPORT_SYMBOL(napi_gro_frags);
750
751 /* Compute the checksum from gro_offset and return the folded value
752  * after adding in any pseudo checksum.
753  */
754 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
755 {
756         __wsum wsum;
757         __sum16 sum;
758
759         wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
760
761         /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
762         sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
763         /* See comments in __skb_checksum_complete(). */
764         if (likely(!sum)) {
765                 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
766                     !skb->csum_complete_sw)
767                         netdev_rx_csum_fault(skb->dev, skb);
768         }
769
770         NAPI_GRO_CB(skb)->csum = wsum;
771         NAPI_GRO_CB(skb)->csum_valid = 1;
772
773         return sum;
774 }
775 EXPORT_SYMBOL(__skb_gro_checksum_complete);