1 // SPDX-License-Identifier: GPL-2.0-or-later
3 #include <net/dst_metadata.h>
4 #include <net/busy_poll.h>
5 #include <trace/events/net.h>
9 /* This should be increased if a protocol with a bigger head is added. */
10 #define GRO_MAX_HEAD (MAX_HEADER + 128)
12 static DEFINE_SPINLOCK(offload_lock);
15 * dev_add_offload - register offload handlers
16 * @po: protocol offload declaration
18 * Add protocol offload handlers to the networking stack. The passed
19 * &proto_offload is linked into kernel lists and may not be freed until
20 * it has been removed from the kernel lists.
22 * This call does not sleep therefore it can not
23 * guarantee all CPU's that are in middle of receiving packets
24 * will see the new offload handlers (until the next received packet).
26 void dev_add_offload(struct packet_offload *po)
28 struct packet_offload *elem;
30 spin_lock(&offload_lock);
31 list_for_each_entry(elem, &net_hotdata.offload_base, list) {
32 if (po->priority < elem->priority)
35 list_add_rcu(&po->list, elem->list.prev);
36 spin_unlock(&offload_lock);
38 EXPORT_SYMBOL(dev_add_offload);
41 * __dev_remove_offload - remove offload handler
42 * @po: packet offload declaration
44 * Remove a protocol offload handler that was previously added to the
45 * kernel offload handlers by dev_add_offload(). The passed &offload_type
46 * is removed from the kernel lists and can be freed or reused once this
49 * The packet type might still be in use by receivers
50 * and must not be freed until after all the CPU's have gone
51 * through a quiescent state.
53 static void __dev_remove_offload(struct packet_offload *po)
55 struct list_head *head = &net_hotdata.offload_base;
56 struct packet_offload *po1;
58 spin_lock(&offload_lock);
60 list_for_each_entry(po1, head, list) {
62 list_del_rcu(&po->list);
67 pr_warn("dev_remove_offload: %p not found\n", po);
69 spin_unlock(&offload_lock);
73 * dev_remove_offload - remove packet offload handler
74 * @po: packet offload declaration
76 * Remove a packet offload handler that was previously added to the kernel
77 * offload handlers by dev_add_offload(). The passed &offload_type is
78 * removed from the kernel lists and can be freed or reused once this
81 * This call sleeps to guarantee that no CPU is looking at the packet
84 void dev_remove_offload(struct packet_offload *po)
86 __dev_remove_offload(po);
90 EXPORT_SYMBOL(dev_remove_offload);
93 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
95 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
96 unsigned int offset = skb_gro_offset(skb);
97 unsigned int headlen = skb_headlen(skb);
98 unsigned int len = skb_gro_len(skb);
99 unsigned int delta_truesize;
100 unsigned int gro_max_size;
101 unsigned int new_truesize;
105 /* Do not splice page pool based packets w/ non-page pool
106 * packets. This can result in reference count issues as page
107 * pool pages will not decrement the reference count and will
108 * instead be immediately returned to the pool or have frag
111 if (p->pp_recycle != skb->pp_recycle)
112 return -ETOOMANYREFS;
114 /* pairs with WRITE_ONCE() in netif_set_gro(_ipv4)_max_size() */
115 gro_max_size = p->protocol == htons(ETH_P_IPV6) ?
116 READ_ONCE(p->dev->gro_max_size) :
117 READ_ONCE(p->dev->gro_ipv4_max_size);
119 if (unlikely(p->len + len >= gro_max_size || NAPI_GRO_CB(skb)->flush))
122 if (unlikely(p->len + len >= GRO_LEGACY_MAX_SIZE)) {
123 if (NAPI_GRO_CB(skb)->proto != IPPROTO_TCP ||
124 (p->protocol == htons(ETH_P_IPV6) &&
125 skb_headroom(p) < sizeof(struct hop_jumbo_hdr)) ||
130 segs = NAPI_GRO_CB(skb)->count;
131 lp = NAPI_GRO_CB(p)->last;
132 pinfo = skb_shinfo(lp);
134 if (headlen <= offset) {
137 int i = skbinfo->nr_frags;
138 int nr_frags = pinfo->nr_frags + i;
140 if (nr_frags > MAX_SKB_FRAGS)
144 pinfo->nr_frags = nr_frags;
145 skbinfo->nr_frags = 0;
147 frag = pinfo->frags + nr_frags;
148 frag2 = skbinfo->frags + i;
153 skb_frag_off_add(frag, offset);
154 skb_frag_size_sub(frag, offset);
156 /* all fragments truesize : remove (head size + sk_buff) */
157 new_truesize = SKB_TRUESIZE(skb_end_offset(skb));
158 delta_truesize = skb->truesize - new_truesize;
160 skb->truesize = new_truesize;
161 skb->len -= skb->data_len;
164 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
166 } else if (skb->head_frag) {
167 int nr_frags = pinfo->nr_frags;
168 skb_frag_t *frag = pinfo->frags + nr_frags;
169 struct page *page = virt_to_head_page(skb->head);
170 unsigned int first_size = headlen - offset;
171 unsigned int first_offset;
173 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
176 first_offset = skb->data -
177 (unsigned char *)page_address(page) +
180 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
182 skb_frag_fill_page_desc(frag, page, first_offset, first_size);
184 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
185 /* We dont need to clear skbinfo->nr_frags here */
187 new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff));
188 delta_truesize = skb->truesize - new_truesize;
189 skb->truesize = new_truesize;
190 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
195 /* sk owenrship - if any - completely transferred to the aggregated packet */
196 skb->destructor = NULL;
197 delta_truesize = skb->truesize;
198 if (offset > headlen) {
199 unsigned int eat = offset - headlen;
201 skb_frag_off_add(&skbinfo->frags[0], eat);
202 skb_frag_size_sub(&skbinfo->frags[0], eat);
203 skb->data_len -= eat;
208 __skb_pull(skb, offset);
210 if (NAPI_GRO_CB(p)->last == p)
211 skb_shinfo(p)->frag_list = skb;
213 NAPI_GRO_CB(p)->last->next = skb;
214 NAPI_GRO_CB(p)->last = skb;
215 __skb_header_release(skb);
219 NAPI_GRO_CB(p)->count += segs;
221 p->truesize += delta_truesize;
225 lp->truesize += delta_truesize;
228 NAPI_GRO_CB(skb)->same_flow = 1;
233 static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
235 struct list_head *head = &net_hotdata.offload_base;
236 struct packet_offload *ptype;
237 __be16 type = skb->protocol;
240 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
242 if (NAPI_GRO_CB(skb)->count == 1) {
243 skb_shinfo(skb)->gso_size = 0;
248 list_for_each_entry_rcu(ptype, head, list) {
249 if (ptype->type != type || !ptype->callbacks.gro_complete)
252 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
253 ipv6_gro_complete, inet_gro_complete,
260 WARN_ON(&ptype->list == head);
266 gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
269 static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
272 struct list_head *head = &napi->gro_hash[index].list;
273 struct sk_buff *skb, *p;
275 list_for_each_entry_safe_reverse(skb, p, head, list) {
276 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
278 skb_list_del_init(skb);
279 napi_gro_complete(napi, skb);
280 napi->gro_hash[index].count--;
283 if (!napi->gro_hash[index].count)
284 __clear_bit(index, &napi->gro_bitmask);
287 /* napi->gro_hash[].list contains packets ordered by age.
288 * youngest packets at the head of it.
289 * Complete skbs in reverse order to reduce latencies.
291 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
293 unsigned long bitmask = napi->gro_bitmask;
294 unsigned int i, base = ~0U;
296 while ((i = ffs(bitmask)) != 0) {
299 __napi_gro_flush_chain(napi, base, flush_old);
302 EXPORT_SYMBOL(napi_gro_flush);
304 static unsigned long gro_list_prepare_tc_ext(const struct sk_buff *skb,
305 const struct sk_buff *p,
308 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
309 struct tc_skb_ext *skb_ext;
310 struct tc_skb_ext *p_ext;
312 skb_ext = skb_ext_find(skb, TC_SKB_EXT);
313 p_ext = skb_ext_find(p, TC_SKB_EXT);
315 diffs |= (!!p_ext) ^ (!!skb_ext);
316 if (!diffs && unlikely(skb_ext))
317 diffs |= p_ext->chain ^ skb_ext->chain;
322 static void gro_list_prepare(const struct list_head *head,
323 const struct sk_buff *skb)
325 unsigned int maclen = skb->dev->hard_header_len;
326 u32 hash = skb_get_hash_raw(skb);
329 list_for_each_entry(p, head, list) {
332 NAPI_GRO_CB(p)->flush = 0;
334 if (hash != skb_get_hash_raw(p)) {
335 NAPI_GRO_CB(p)->same_flow = 0;
339 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
340 diffs |= p->vlan_all ^ skb->vlan_all;
341 diffs |= skb_metadata_differs(p, skb);
342 if (maclen == ETH_HLEN)
343 diffs |= compare_ether_header(skb_mac_header(p),
344 skb_mac_header(skb));
346 diffs = memcmp(skb_mac_header(p),
350 /* in most common scenarions 'slow_gro' is 0
351 * otherwise we are already on some slower paths
352 * either skip all the infrequent tests altogether or
353 * avoid trying too hard to skip each of them individually
355 if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) {
356 diffs |= p->sk != skb->sk;
357 diffs |= skb_metadata_dst_cmp(p, skb);
358 diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
360 diffs |= gro_list_prepare_tc_ext(skb, p, diffs);
363 NAPI_GRO_CB(p)->same_flow = !diffs;
367 static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
369 const struct skb_shared_info *pinfo;
370 const skb_frag_t *frag0;
371 unsigned int headlen;
373 NAPI_GRO_CB(skb)->data_offset = 0;
374 headlen = skb_headlen(skb);
375 NAPI_GRO_CB(skb)->frag0 = skb->data;
376 NAPI_GRO_CB(skb)->frag0_len = headlen;
380 pinfo = skb_shinfo(skb);
381 frag0 = &pinfo->frags[0];
383 if (pinfo->nr_frags && !PageHighMem(skb_frag_page(frag0)) &&
384 (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
385 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
386 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
387 skb_frag_size(frag0),
388 skb->end - skb->tail);
392 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
394 struct skb_shared_info *pinfo = skb_shinfo(skb);
396 BUG_ON(skb->end - skb->tail < grow);
398 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
400 skb->data_len -= grow;
403 skb_frag_off_add(&pinfo->frags[0], grow);
404 skb_frag_size_sub(&pinfo->frags[0], grow);
406 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
407 skb_frag_unref(skb, 0);
408 memmove(pinfo->frags, pinfo->frags + 1,
409 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
413 static void gro_try_pull_from_frag0(struct sk_buff *skb)
415 int grow = skb_gro_offset(skb) - skb_headlen(skb);
418 gro_pull_from_frag0(skb, grow);
421 static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
423 struct sk_buff *oldest;
425 oldest = list_last_entry(head, struct sk_buff, list);
427 /* We are called with head length >= MAX_GRO_SKBS, so this is
430 if (WARN_ON_ONCE(!oldest))
433 /* Do not adjust napi->gro_hash[].count, caller is adding a new
436 skb_list_del_init(oldest);
437 napi_gro_complete(napi, oldest);
440 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
442 u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
443 struct gro_list *gro_list = &napi->gro_hash[bucket];
444 struct list_head *head = &net_hotdata.offload_base;
445 struct packet_offload *ptype;
446 __be16 type = skb->protocol;
447 struct sk_buff *pp = NULL;
451 if (netif_elide_gro(skb->dev))
454 gro_list_prepare(&gro_list->list, skb);
457 list_for_each_entry_rcu(ptype, head, list) {
458 if (ptype->type == type && ptype->callbacks.gro_receive)
465 skb_set_network_header(skb, skb_gro_offset(skb));
466 skb_reset_mac_len(skb);
467 BUILD_BUG_ON(sizeof_field(struct napi_gro_cb, zeroed) != sizeof(u32));
468 BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct napi_gro_cb, zeroed),
469 sizeof(u32))); /* Avoid slow unaligned acc */
470 *(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0;
471 NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb);
472 NAPI_GRO_CB(skb)->is_atomic = 1;
473 NAPI_GRO_CB(skb)->count = 1;
474 if (unlikely(skb_is_gso(skb))) {
475 NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs;
476 /* Only support TCP and non DODGY users. */
477 if (!skb_is_gso_tcp(skb) ||
478 (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY))
479 NAPI_GRO_CB(skb)->flush = 1;
482 /* Setup for GRO checksum validation */
483 switch (skb->ip_summed) {
484 case CHECKSUM_COMPLETE:
485 NAPI_GRO_CB(skb)->csum = skb->csum;
486 NAPI_GRO_CB(skb)->csum_valid = 1;
488 case CHECKSUM_UNNECESSARY:
489 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
493 pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
494 ipv6_gro_receive, inet_gro_receive,
495 &gro_list->list, skb);
499 if (PTR_ERR(pp) == -EINPROGRESS) {
504 same_flow = NAPI_GRO_CB(skb)->same_flow;
505 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
508 skb_list_del_init(pp);
509 napi_gro_complete(napi, pp);
516 if (NAPI_GRO_CB(skb)->flush)
519 if (unlikely(gro_list->count >= MAX_GRO_SKBS))
520 gro_flush_oldest(napi, &gro_list->list);
524 /* Must be called before setting NAPI_GRO_CB(skb)->{age|last} */
525 gro_try_pull_from_frag0(skb);
526 NAPI_GRO_CB(skb)->age = jiffies;
527 NAPI_GRO_CB(skb)->last = skb;
528 if (!skb_is_gso(skb))
529 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
530 list_add(&skb->list, &gro_list->list);
533 if (gro_list->count) {
534 if (!test_bit(bucket, &napi->gro_bitmask))
535 __set_bit(bucket, &napi->gro_bitmask);
536 } else if (test_bit(bucket, &napi->gro_bitmask)) {
537 __clear_bit(bucket, &napi->gro_bitmask);
544 gro_try_pull_from_frag0(skb);
548 struct packet_offload *gro_find_receive_by_type(__be16 type)
550 struct list_head *offload_head = &net_hotdata.offload_base;
551 struct packet_offload *ptype;
553 list_for_each_entry_rcu(ptype, offload_head, list) {
554 if (ptype->type != type || !ptype->callbacks.gro_receive)
560 EXPORT_SYMBOL(gro_find_receive_by_type);
562 struct packet_offload *gro_find_complete_by_type(__be16 type)
564 struct list_head *offload_head = &net_hotdata.offload_base;
565 struct packet_offload *ptype;
567 list_for_each_entry_rcu(ptype, offload_head, list) {
568 if (ptype->type != type || !ptype->callbacks.gro_complete)
574 EXPORT_SYMBOL(gro_find_complete_by_type);
576 static gro_result_t napi_skb_finish(struct napi_struct *napi,
582 gro_normal_one(napi, skb, 1);
585 case GRO_MERGED_FREE:
586 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
587 napi_skb_free_stolen_head(skb);
588 else if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
591 __napi_kfree_skb(skb, SKB_CONSUMED);
603 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
607 skb_mark_napi_id(skb, napi);
608 trace_napi_gro_receive_entry(skb);
610 skb_gro_reset_offset(skb, 0);
612 ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
613 trace_napi_gro_receive_exit(ret);
617 EXPORT_SYMBOL(napi_gro_receive);
619 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
621 if (unlikely(skb->pfmemalloc)) {
625 __skb_pull(skb, skb_headlen(skb));
626 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
627 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
628 __vlan_hwaccel_clear_tag(skb);
629 skb->dev = napi->dev;
632 /* eth_type_trans() assumes pkt_type is PACKET_HOST */
633 skb->pkt_type = PACKET_HOST;
635 skb->encapsulation = 0;
636 skb_shinfo(skb)->gso_type = 0;
637 skb_shinfo(skb)->gso_size = 0;
638 if (unlikely(skb->slow_gro)) {
648 struct sk_buff *napi_get_frags(struct napi_struct *napi)
650 struct sk_buff *skb = napi->skb;
653 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
656 skb_mark_napi_id(skb, napi);
661 EXPORT_SYMBOL(napi_get_frags);
663 static gro_result_t napi_frags_finish(struct napi_struct *napi,
670 __skb_push(skb, ETH_HLEN);
671 skb->protocol = eth_type_trans(skb, skb->dev);
672 if (ret == GRO_NORMAL)
673 gro_normal_one(napi, skb, 1);
676 case GRO_MERGED_FREE:
677 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
678 napi_skb_free_stolen_head(skb);
680 napi_reuse_skb(napi, skb);
691 /* Upper GRO stack assumes network header starts at gro_offset=0
692 * Drivers could call both napi_gro_frags() and napi_gro_receive()
693 * We copy ethernet header into skb->data to have a common layout.
695 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
697 struct sk_buff *skb = napi->skb;
698 const struct ethhdr *eth;
699 unsigned int hlen = sizeof(*eth);
703 skb_reset_mac_header(skb);
704 skb_gro_reset_offset(skb, hlen);
706 if (unlikely(!skb_gro_may_pull(skb, hlen))) {
707 eth = skb_gro_header_slow(skb, hlen, 0);
708 if (unlikely(!eth)) {
709 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
710 __func__, napi->dev->name);
711 napi_reuse_skb(napi, skb);
715 eth = (const struct ethhdr *)skb->data;
717 if (NAPI_GRO_CB(skb)->frag0 != skb->data)
718 gro_pull_from_frag0(skb, hlen);
720 NAPI_GRO_CB(skb)->frag0 += hlen;
721 NAPI_GRO_CB(skb)->frag0_len -= hlen;
723 __skb_pull(skb, hlen);
726 * This works because the only protocols we care about don't require
728 * We'll fix it up properly in napi_frags_finish()
730 skb->protocol = eth->h_proto;
735 gro_result_t napi_gro_frags(struct napi_struct *napi)
738 struct sk_buff *skb = napi_frags_skb(napi);
740 trace_napi_gro_frags_entry(skb);
742 ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
743 trace_napi_gro_frags_exit(ret);
747 EXPORT_SYMBOL(napi_gro_frags);
749 /* Compute the checksum from gro_offset and return the folded value
750 * after adding in any pseudo checksum.
752 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
757 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
759 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
760 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
761 /* See comments in __skb_checksum_complete(). */
763 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
764 !skb->csum_complete_sw)
765 netdev_rx_csum_fault(skb->dev, skb);
768 NAPI_GRO_CB(skb)->csum = wsum;
769 NAPI_GRO_CB(skb)->csum_valid = 1;
773 EXPORT_SYMBOL(__skb_gro_checksum_complete);