Merge tag 'v5.18-p1' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[linux-2.6-block.git] / net / core / gro.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include <net/gro.h>
3 #include <net/dst_metadata.h>
4 #include <net/busy_poll.h>
5 #include <trace/events/net.h>
6
7 #define MAX_GRO_SKBS 8
8
9 /* This should be increased if a protocol with a bigger head is added. */
10 #define GRO_MAX_HEAD (MAX_HEADER + 128)
11
12 static DEFINE_SPINLOCK(offload_lock);
13 static struct list_head offload_base __read_mostly = LIST_HEAD_INIT(offload_base);
14 /* Maximum number of GRO_NORMAL skbs to batch up for list-RX */
15 int gro_normal_batch __read_mostly = 8;
16
17 /**
18  *      dev_add_offload - register offload handlers
19  *      @po: protocol offload declaration
20  *
21  *      Add protocol offload handlers to the networking stack. The passed
22  *      &proto_offload is linked into kernel lists and may not be freed until
23  *      it has been removed from the kernel lists.
24  *
25  *      This call does not sleep therefore it can not
26  *      guarantee all CPU's that are in middle of receiving packets
27  *      will see the new offload handlers (until the next received packet).
28  */
29 void dev_add_offload(struct packet_offload *po)
30 {
31         struct packet_offload *elem;
32
33         spin_lock(&offload_lock);
34         list_for_each_entry(elem, &offload_base, list) {
35                 if (po->priority < elem->priority)
36                         break;
37         }
38         list_add_rcu(&po->list, elem->list.prev);
39         spin_unlock(&offload_lock);
40 }
41 EXPORT_SYMBOL(dev_add_offload);
42
43 /**
44  *      __dev_remove_offload     - remove offload handler
45  *      @po: packet offload declaration
46  *
47  *      Remove a protocol offload handler that was previously added to the
48  *      kernel offload handlers by dev_add_offload(). The passed &offload_type
49  *      is removed from the kernel lists and can be freed or reused once this
50  *      function returns.
51  *
52  *      The packet type might still be in use by receivers
53  *      and must not be freed until after all the CPU's have gone
54  *      through a quiescent state.
55  */
56 static void __dev_remove_offload(struct packet_offload *po)
57 {
58         struct list_head *head = &offload_base;
59         struct packet_offload *po1;
60
61         spin_lock(&offload_lock);
62
63         list_for_each_entry(po1, head, list) {
64                 if (po == po1) {
65                         list_del_rcu(&po->list);
66                         goto out;
67                 }
68         }
69
70         pr_warn("dev_remove_offload: %p not found\n", po);
71 out:
72         spin_unlock(&offload_lock);
73 }
74
75 /**
76  *      dev_remove_offload       - remove packet offload handler
77  *      @po: packet offload declaration
78  *
79  *      Remove a packet offload handler that was previously added to the kernel
80  *      offload handlers by dev_add_offload(). The passed &offload_type is
81  *      removed from the kernel lists and can be freed or reused once this
82  *      function returns.
83  *
84  *      This call sleeps to guarantee that no CPU is looking at the packet
85  *      type after return.
86  */
87 void dev_remove_offload(struct packet_offload *po)
88 {
89         __dev_remove_offload(po);
90
91         synchronize_net();
92 }
93 EXPORT_SYMBOL(dev_remove_offload);
94
95 /**
96  *      skb_eth_gso_segment - segmentation handler for ethernet protocols.
97  *      @skb: buffer to segment
98  *      @features: features for the output path (see dev->features)
99  *      @type: Ethernet Protocol ID
100  */
101 struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb,
102                                     netdev_features_t features, __be16 type)
103 {
104         struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
105         struct packet_offload *ptype;
106
107         rcu_read_lock();
108         list_for_each_entry_rcu(ptype, &offload_base, list) {
109                 if (ptype->type == type && ptype->callbacks.gso_segment) {
110                         segs = ptype->callbacks.gso_segment(skb, features);
111                         break;
112                 }
113         }
114         rcu_read_unlock();
115
116         return segs;
117 }
118 EXPORT_SYMBOL(skb_eth_gso_segment);
119
120 /**
121  *      skb_mac_gso_segment - mac layer segmentation handler.
122  *      @skb: buffer to segment
123  *      @features: features for the output path (see dev->features)
124  */
125 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
126                                     netdev_features_t features)
127 {
128         struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
129         struct packet_offload *ptype;
130         int vlan_depth = skb->mac_len;
131         __be16 type = skb_network_protocol(skb, &vlan_depth);
132
133         if (unlikely(!type))
134                 return ERR_PTR(-EINVAL);
135
136         __skb_pull(skb, vlan_depth);
137
138         rcu_read_lock();
139         list_for_each_entry_rcu(ptype, &offload_base, list) {
140                 if (ptype->type == type && ptype->callbacks.gso_segment) {
141                         segs = ptype->callbacks.gso_segment(skb, features);
142                         break;
143                 }
144         }
145         rcu_read_unlock();
146
147         __skb_push(skb, skb->data - skb_mac_header(skb));
148
149         return segs;
150 }
151 EXPORT_SYMBOL(skb_mac_gso_segment);
152
153 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
154 {
155         struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
156         unsigned int offset = skb_gro_offset(skb);
157         unsigned int headlen = skb_headlen(skb);
158         unsigned int len = skb_gro_len(skb);
159         unsigned int delta_truesize;
160         unsigned int gro_max_size;
161         unsigned int new_truesize;
162         struct sk_buff *lp;
163
164         /* pairs with WRITE_ONCE() in netif_set_gro_max_size() */
165         gro_max_size = READ_ONCE(p->dev->gro_max_size);
166
167         if (unlikely(p->len + len >= gro_max_size || NAPI_GRO_CB(skb)->flush))
168                 return -E2BIG;
169
170         lp = NAPI_GRO_CB(p)->last;
171         pinfo = skb_shinfo(lp);
172
173         if (headlen <= offset) {
174                 skb_frag_t *frag;
175                 skb_frag_t *frag2;
176                 int i = skbinfo->nr_frags;
177                 int nr_frags = pinfo->nr_frags + i;
178
179                 if (nr_frags > MAX_SKB_FRAGS)
180                         goto merge;
181
182                 offset -= headlen;
183                 pinfo->nr_frags = nr_frags;
184                 skbinfo->nr_frags = 0;
185
186                 frag = pinfo->frags + nr_frags;
187                 frag2 = skbinfo->frags + i;
188                 do {
189                         *--frag = *--frag2;
190                 } while (--i);
191
192                 skb_frag_off_add(frag, offset);
193                 skb_frag_size_sub(frag, offset);
194
195                 /* all fragments truesize : remove (head size + sk_buff) */
196                 new_truesize = SKB_TRUESIZE(skb_end_offset(skb));
197                 delta_truesize = skb->truesize - new_truesize;
198
199                 skb->truesize = new_truesize;
200                 skb->len -= skb->data_len;
201                 skb->data_len = 0;
202
203                 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
204                 goto done;
205         } else if (skb->head_frag) {
206                 int nr_frags = pinfo->nr_frags;
207                 skb_frag_t *frag = pinfo->frags + nr_frags;
208                 struct page *page = virt_to_head_page(skb->head);
209                 unsigned int first_size = headlen - offset;
210                 unsigned int first_offset;
211
212                 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
213                         goto merge;
214
215                 first_offset = skb->data -
216                                (unsigned char *)page_address(page) +
217                                offset;
218
219                 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
220
221                 __skb_frag_set_page(frag, page);
222                 skb_frag_off_set(frag, first_offset);
223                 skb_frag_size_set(frag, first_size);
224
225                 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
226                 /* We dont need to clear skbinfo->nr_frags here */
227
228                 new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff));
229                 delta_truesize = skb->truesize - new_truesize;
230                 skb->truesize = new_truesize;
231                 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
232                 goto done;
233         }
234
235 merge:
236         /* sk owenrship - if any - completely transferred to the aggregated packet */
237         skb->destructor = NULL;
238         delta_truesize = skb->truesize;
239         if (offset > headlen) {
240                 unsigned int eat = offset - headlen;
241
242                 skb_frag_off_add(&skbinfo->frags[0], eat);
243                 skb_frag_size_sub(&skbinfo->frags[0], eat);
244                 skb->data_len -= eat;
245                 skb->len -= eat;
246                 offset = headlen;
247         }
248
249         __skb_pull(skb, offset);
250
251         if (NAPI_GRO_CB(p)->last == p)
252                 skb_shinfo(p)->frag_list = skb;
253         else
254                 NAPI_GRO_CB(p)->last->next = skb;
255         NAPI_GRO_CB(p)->last = skb;
256         __skb_header_release(skb);
257         lp = p;
258
259 done:
260         NAPI_GRO_CB(p)->count++;
261         p->data_len += len;
262         p->truesize += delta_truesize;
263         p->len += len;
264         if (lp != p) {
265                 lp->data_len += len;
266                 lp->truesize += delta_truesize;
267                 lp->len += len;
268         }
269         NAPI_GRO_CB(skb)->same_flow = 1;
270         return 0;
271 }
272
273
274 static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
275 {
276         struct packet_offload *ptype;
277         __be16 type = skb->protocol;
278         struct list_head *head = &offload_base;
279         int err = -ENOENT;
280
281         BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
282
283         if (NAPI_GRO_CB(skb)->count == 1) {
284                 skb_shinfo(skb)->gso_size = 0;
285                 goto out;
286         }
287
288         rcu_read_lock();
289         list_for_each_entry_rcu(ptype, head, list) {
290                 if (ptype->type != type || !ptype->callbacks.gro_complete)
291                         continue;
292
293                 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
294                                          ipv6_gro_complete, inet_gro_complete,
295                                          skb, 0);
296                 break;
297         }
298         rcu_read_unlock();
299
300         if (err) {
301                 WARN_ON(&ptype->list == head);
302                 kfree_skb(skb);
303                 return;
304         }
305
306 out:
307         gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
308 }
309
310 static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
311                                    bool flush_old)
312 {
313         struct list_head *head = &napi->gro_hash[index].list;
314         struct sk_buff *skb, *p;
315
316         list_for_each_entry_safe_reverse(skb, p, head, list) {
317                 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
318                         return;
319                 skb_list_del_init(skb);
320                 napi_gro_complete(napi, skb);
321                 napi->gro_hash[index].count--;
322         }
323
324         if (!napi->gro_hash[index].count)
325                 __clear_bit(index, &napi->gro_bitmask);
326 }
327
328 /* napi->gro_hash[].list contains packets ordered by age.
329  * youngest packets at the head of it.
330  * Complete skbs in reverse order to reduce latencies.
331  */
332 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
333 {
334         unsigned long bitmask = napi->gro_bitmask;
335         unsigned int i, base = ~0U;
336
337         while ((i = ffs(bitmask)) != 0) {
338                 bitmask >>= i;
339                 base += i;
340                 __napi_gro_flush_chain(napi, base, flush_old);
341         }
342 }
343 EXPORT_SYMBOL(napi_gro_flush);
344
345 static void gro_list_prepare(const struct list_head *head,
346                              const struct sk_buff *skb)
347 {
348         unsigned int maclen = skb->dev->hard_header_len;
349         u32 hash = skb_get_hash_raw(skb);
350         struct sk_buff *p;
351
352         list_for_each_entry(p, head, list) {
353                 unsigned long diffs;
354
355                 NAPI_GRO_CB(p)->flush = 0;
356
357                 if (hash != skb_get_hash_raw(p)) {
358                         NAPI_GRO_CB(p)->same_flow = 0;
359                         continue;
360                 }
361
362                 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
363                 diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb);
364                 if (skb_vlan_tag_present(p))
365                         diffs |= skb_vlan_tag_get(p) ^ skb_vlan_tag_get(skb);
366                 diffs |= skb_metadata_differs(p, skb);
367                 if (maclen == ETH_HLEN)
368                         diffs |= compare_ether_header(skb_mac_header(p),
369                                                       skb_mac_header(skb));
370                 else if (!diffs)
371                         diffs = memcmp(skb_mac_header(p),
372                                        skb_mac_header(skb),
373                                        maclen);
374
375                 /* in most common scenarions 'slow_gro' is 0
376                  * otherwise we are already on some slower paths
377                  * either skip all the infrequent tests altogether or
378                  * avoid trying too hard to skip each of them individually
379                  */
380                 if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) {
381 #if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
382                         struct tc_skb_ext *skb_ext;
383                         struct tc_skb_ext *p_ext;
384 #endif
385
386                         diffs |= p->sk != skb->sk;
387                         diffs |= skb_metadata_dst_cmp(p, skb);
388                         diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
389
390 #if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
391                         skb_ext = skb_ext_find(skb, TC_SKB_EXT);
392                         p_ext = skb_ext_find(p, TC_SKB_EXT);
393
394                         diffs |= (!!p_ext) ^ (!!skb_ext);
395                         if (!diffs && unlikely(skb_ext))
396                                 diffs |= p_ext->chain ^ skb_ext->chain;
397 #endif
398                 }
399
400                 NAPI_GRO_CB(p)->same_flow = !diffs;
401         }
402 }
403
404 static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
405 {
406         const struct skb_shared_info *pinfo = skb_shinfo(skb);
407         const skb_frag_t *frag0 = &pinfo->frags[0];
408
409         NAPI_GRO_CB(skb)->data_offset = 0;
410         NAPI_GRO_CB(skb)->frag0 = NULL;
411         NAPI_GRO_CB(skb)->frag0_len = 0;
412
413         if (!skb_headlen(skb) && pinfo->nr_frags &&
414             !PageHighMem(skb_frag_page(frag0)) &&
415             (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
416                 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
417                 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
418                                                     skb_frag_size(frag0),
419                                                     skb->end - skb->tail);
420         }
421 }
422
423 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
424 {
425         struct skb_shared_info *pinfo = skb_shinfo(skb);
426
427         BUG_ON(skb->end - skb->tail < grow);
428
429         memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
430
431         skb->data_len -= grow;
432         skb->tail += grow;
433
434         skb_frag_off_add(&pinfo->frags[0], grow);
435         skb_frag_size_sub(&pinfo->frags[0], grow);
436
437         if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
438                 skb_frag_unref(skb, 0);
439                 memmove(pinfo->frags, pinfo->frags + 1,
440                         --pinfo->nr_frags * sizeof(pinfo->frags[0]));
441         }
442 }
443
444 static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
445 {
446         struct sk_buff *oldest;
447
448         oldest = list_last_entry(head, struct sk_buff, list);
449
450         /* We are called with head length >= MAX_GRO_SKBS, so this is
451          * impossible.
452          */
453         if (WARN_ON_ONCE(!oldest))
454                 return;
455
456         /* Do not adjust napi->gro_hash[].count, caller is adding a new
457          * SKB to the chain.
458          */
459         skb_list_del_init(oldest);
460         napi_gro_complete(napi, oldest);
461 }
462
463 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
464 {
465         u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
466         struct gro_list *gro_list = &napi->gro_hash[bucket];
467         struct list_head *head = &offload_base;
468         struct packet_offload *ptype;
469         __be16 type = skb->protocol;
470         struct sk_buff *pp = NULL;
471         enum gro_result ret;
472         int same_flow;
473         int grow;
474
475         if (netif_elide_gro(skb->dev))
476                 goto normal;
477
478         gro_list_prepare(&gro_list->list, skb);
479
480         rcu_read_lock();
481         list_for_each_entry_rcu(ptype, head, list) {
482                 if (ptype->type != type || !ptype->callbacks.gro_receive)
483                         continue;
484
485                 skb_set_network_header(skb, skb_gro_offset(skb));
486                 skb_reset_mac_len(skb);
487                 BUILD_BUG_ON(sizeof_field(struct napi_gro_cb, zeroed) != sizeof(u32));
488                 BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct napi_gro_cb, zeroed),
489                                          sizeof(u32))); /* Avoid slow unaligned acc */
490                 *(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0;
491                 NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
492                 NAPI_GRO_CB(skb)->is_atomic = 1;
493
494                 /* Setup for GRO checksum validation */
495                 switch (skb->ip_summed) {
496                 case CHECKSUM_COMPLETE:
497                         NAPI_GRO_CB(skb)->csum = skb->csum;
498                         NAPI_GRO_CB(skb)->csum_valid = 1;
499                         break;
500                 case CHECKSUM_UNNECESSARY:
501                         NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
502                         break;
503                 }
504
505                 pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
506                                         ipv6_gro_receive, inet_gro_receive,
507                                         &gro_list->list, skb);
508                 break;
509         }
510         rcu_read_unlock();
511
512         if (&ptype->list == head)
513                 goto normal;
514
515         if (PTR_ERR(pp) == -EINPROGRESS) {
516                 ret = GRO_CONSUMED;
517                 goto ok;
518         }
519
520         same_flow = NAPI_GRO_CB(skb)->same_flow;
521         ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
522
523         if (pp) {
524                 skb_list_del_init(pp);
525                 napi_gro_complete(napi, pp);
526                 gro_list->count--;
527         }
528
529         if (same_flow)
530                 goto ok;
531
532         if (NAPI_GRO_CB(skb)->flush)
533                 goto normal;
534
535         if (unlikely(gro_list->count >= MAX_GRO_SKBS))
536                 gro_flush_oldest(napi, &gro_list->list);
537         else
538                 gro_list->count++;
539
540         NAPI_GRO_CB(skb)->count = 1;
541         NAPI_GRO_CB(skb)->age = jiffies;
542         NAPI_GRO_CB(skb)->last = skb;
543         skb_shinfo(skb)->gso_size = skb_gro_len(skb);
544         list_add(&skb->list, &gro_list->list);
545         ret = GRO_HELD;
546
547 pull:
548         grow = skb_gro_offset(skb) - skb_headlen(skb);
549         if (grow > 0)
550                 gro_pull_from_frag0(skb, grow);
551 ok:
552         if (gro_list->count) {
553                 if (!test_bit(bucket, &napi->gro_bitmask))
554                         __set_bit(bucket, &napi->gro_bitmask);
555         } else if (test_bit(bucket, &napi->gro_bitmask)) {
556                 __clear_bit(bucket, &napi->gro_bitmask);
557         }
558
559         return ret;
560
561 normal:
562         ret = GRO_NORMAL;
563         goto pull;
564 }
565
566 struct packet_offload *gro_find_receive_by_type(__be16 type)
567 {
568         struct list_head *offload_head = &offload_base;
569         struct packet_offload *ptype;
570
571         list_for_each_entry_rcu(ptype, offload_head, list) {
572                 if (ptype->type != type || !ptype->callbacks.gro_receive)
573                         continue;
574                 return ptype;
575         }
576         return NULL;
577 }
578 EXPORT_SYMBOL(gro_find_receive_by_type);
579
580 struct packet_offload *gro_find_complete_by_type(__be16 type)
581 {
582         struct list_head *offload_head = &offload_base;
583         struct packet_offload *ptype;
584
585         list_for_each_entry_rcu(ptype, offload_head, list) {
586                 if (ptype->type != type || !ptype->callbacks.gro_complete)
587                         continue;
588                 return ptype;
589         }
590         return NULL;
591 }
592 EXPORT_SYMBOL(gro_find_complete_by_type);
593
594 static gro_result_t napi_skb_finish(struct napi_struct *napi,
595                                     struct sk_buff *skb,
596                                     gro_result_t ret)
597 {
598         switch (ret) {
599         case GRO_NORMAL:
600                 gro_normal_one(napi, skb, 1);
601                 break;
602
603         case GRO_MERGED_FREE:
604                 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
605                         napi_skb_free_stolen_head(skb);
606                 else if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
607                         __kfree_skb(skb);
608                 else
609                         __kfree_skb_defer(skb);
610                 break;
611
612         case GRO_HELD:
613         case GRO_MERGED:
614         case GRO_CONSUMED:
615                 break;
616         }
617
618         return ret;
619 }
620
621 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
622 {
623         gro_result_t ret;
624
625         skb_mark_napi_id(skb, napi);
626         trace_napi_gro_receive_entry(skb);
627
628         skb_gro_reset_offset(skb, 0);
629
630         ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
631         trace_napi_gro_receive_exit(ret);
632
633         return ret;
634 }
635 EXPORT_SYMBOL(napi_gro_receive);
636
637 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
638 {
639         if (unlikely(skb->pfmemalloc)) {
640                 consume_skb(skb);
641                 return;
642         }
643         __skb_pull(skb, skb_headlen(skb));
644         /* restore the reserve we had after netdev_alloc_skb_ip_align() */
645         skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
646         __vlan_hwaccel_clear_tag(skb);
647         skb->dev = napi->dev;
648         skb->skb_iif = 0;
649
650         /* eth_type_trans() assumes pkt_type is PACKET_HOST */
651         skb->pkt_type = PACKET_HOST;
652
653         skb->encapsulation = 0;
654         skb_shinfo(skb)->gso_type = 0;
655         if (unlikely(skb->slow_gro)) {
656                 skb_orphan(skb);
657                 skb_ext_reset(skb);
658                 nf_reset_ct(skb);
659                 skb->slow_gro = 0;
660         }
661
662         napi->skb = skb;
663 }
664
665 struct sk_buff *napi_get_frags(struct napi_struct *napi)
666 {
667         struct sk_buff *skb = napi->skb;
668
669         if (!skb) {
670                 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
671                 if (skb) {
672                         napi->skb = skb;
673                         skb_mark_napi_id(skb, napi);
674                 }
675         }
676         return skb;
677 }
678 EXPORT_SYMBOL(napi_get_frags);
679
680 static gro_result_t napi_frags_finish(struct napi_struct *napi,
681                                       struct sk_buff *skb,
682                                       gro_result_t ret)
683 {
684         switch (ret) {
685         case GRO_NORMAL:
686         case GRO_HELD:
687                 __skb_push(skb, ETH_HLEN);
688                 skb->protocol = eth_type_trans(skb, skb->dev);
689                 if (ret == GRO_NORMAL)
690                         gro_normal_one(napi, skb, 1);
691                 break;
692
693         case GRO_MERGED_FREE:
694                 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
695                         napi_skb_free_stolen_head(skb);
696                 else
697                         napi_reuse_skb(napi, skb);
698                 break;
699
700         case GRO_MERGED:
701         case GRO_CONSUMED:
702                 break;
703         }
704
705         return ret;
706 }
707
708 /* Upper GRO stack assumes network header starts at gro_offset=0
709  * Drivers could call both napi_gro_frags() and napi_gro_receive()
710  * We copy ethernet header into skb->data to have a common layout.
711  */
712 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
713 {
714         struct sk_buff *skb = napi->skb;
715         const struct ethhdr *eth;
716         unsigned int hlen = sizeof(*eth);
717
718         napi->skb = NULL;
719
720         skb_reset_mac_header(skb);
721         skb_gro_reset_offset(skb, hlen);
722
723         if (unlikely(skb_gro_header_hard(skb, hlen))) {
724                 eth = skb_gro_header_slow(skb, hlen, 0);
725                 if (unlikely(!eth)) {
726                         net_warn_ratelimited("%s: dropping impossible skb from %s\n",
727                                              __func__, napi->dev->name);
728                         napi_reuse_skb(napi, skb);
729                         return NULL;
730                 }
731         } else {
732                 eth = (const struct ethhdr *)skb->data;
733                 gro_pull_from_frag0(skb, hlen);
734                 NAPI_GRO_CB(skb)->frag0 += hlen;
735                 NAPI_GRO_CB(skb)->frag0_len -= hlen;
736         }
737         __skb_pull(skb, hlen);
738
739         /*
740          * This works because the only protocols we care about don't require
741          * special handling.
742          * We'll fix it up properly in napi_frags_finish()
743          */
744         skb->protocol = eth->h_proto;
745
746         return skb;
747 }
748
749 gro_result_t napi_gro_frags(struct napi_struct *napi)
750 {
751         gro_result_t ret;
752         struct sk_buff *skb = napi_frags_skb(napi);
753
754         trace_napi_gro_frags_entry(skb);
755
756         ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
757         trace_napi_gro_frags_exit(ret);
758
759         return ret;
760 }
761 EXPORT_SYMBOL(napi_gro_frags);
762
763 /* Compute the checksum from gro_offset and return the folded value
764  * after adding in any pseudo checksum.
765  */
766 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
767 {
768         __wsum wsum;
769         __sum16 sum;
770
771         wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
772
773         /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
774         sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
775         /* See comments in __skb_checksum_complete(). */
776         if (likely(!sum)) {
777                 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
778                     !skb->csum_complete_sw)
779                         netdev_rx_csum_fault(skb->dev, skb);
780         }
781
782         NAPI_GRO_CB(skb)->csum = wsum;
783         NAPI_GRO_CB(skb)->csum_valid = 1;
784
785         return sum;
786 }
787 EXPORT_SYMBOL(__skb_gro_checksum_complete);