Commit | Line | Data |
---|---|---|
e456a18a ED |
1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | #include <net/gro.h> | |
587652bb ED |
3 | #include <net/dst_metadata.h> |
4 | #include <net/busy_poll.h> | |
5 | #include <trace/events/net.h> | |
6 | ||
7 | #define MAX_GRO_SKBS 8 | |
8 | ||
9 | /* This should be increased if a protocol with a bigger head is added. */ | |
10 | #define GRO_MAX_HEAD (MAX_HEADER + 128) | |
11 | ||
12 | static DEFINE_SPINLOCK(offload_lock); | |
13 | static struct list_head offload_base __read_mostly = LIST_HEAD_INIT(offload_base); | |
14 | /* Maximum number of GRO_NORMAL skbs to batch up for list-RX */ | |
15 | int gro_normal_batch __read_mostly = 8; | |
16 | ||
17 | /** | |
18 | * dev_add_offload - register offload handlers | |
19 | * @po: protocol offload declaration | |
20 | * | |
21 | * Add protocol offload handlers to the networking stack. The passed | |
22 | * &proto_offload is linked into kernel lists and may not be freed until | |
23 | * it has been removed from the kernel lists. | |
24 | * | |
25 | * This call does not sleep therefore it can not | |
26 | * guarantee all CPU's that are in middle of receiving packets | |
27 | * will see the new offload handlers (until the next received packet). | |
28 | */ | |
29 | void dev_add_offload(struct packet_offload *po) | |
30 | { | |
31 | struct packet_offload *elem; | |
32 | ||
33 | spin_lock(&offload_lock); | |
34 | list_for_each_entry(elem, &offload_base, list) { | |
35 | if (po->priority < elem->priority) | |
36 | break; | |
37 | } | |
38 | list_add_rcu(&po->list, elem->list.prev); | |
39 | spin_unlock(&offload_lock); | |
40 | } | |
41 | EXPORT_SYMBOL(dev_add_offload); | |
42 | ||
43 | /** | |
44 | * __dev_remove_offload - remove offload handler | |
45 | * @po: packet offload declaration | |
46 | * | |
47 | * Remove a protocol offload handler that was previously added to the | |
48 | * kernel offload handlers by dev_add_offload(). The passed &offload_type | |
49 | * is removed from the kernel lists and can be freed or reused once this | |
50 | * function returns. | |
51 | * | |
52 | * The packet type might still be in use by receivers | |
53 | * and must not be freed until after all the CPU's have gone | |
54 | * through a quiescent state. | |
55 | */ | |
56 | static void __dev_remove_offload(struct packet_offload *po) | |
57 | { | |
58 | struct list_head *head = &offload_base; | |
59 | struct packet_offload *po1; | |
60 | ||
61 | spin_lock(&offload_lock); | |
62 | ||
63 | list_for_each_entry(po1, head, list) { | |
64 | if (po == po1) { | |
65 | list_del_rcu(&po->list); | |
66 | goto out; | |
67 | } | |
68 | } | |
69 | ||
70 | pr_warn("dev_remove_offload: %p not found\n", po); | |
71 | out: | |
72 | spin_unlock(&offload_lock); | |
73 | } | |
74 | ||
75 | /** | |
76 | * dev_remove_offload - remove packet offload handler | |
77 | * @po: packet offload declaration | |
78 | * | |
79 | * Remove a packet offload handler that was previously added to the kernel | |
80 | * offload handlers by dev_add_offload(). The passed &offload_type is | |
81 | * removed from the kernel lists and can be freed or reused once this | |
82 | * function returns. | |
83 | * | |
84 | * This call sleeps to guarantee that no CPU is looking at the packet | |
85 | * type after return. | |
86 | */ | |
87 | void dev_remove_offload(struct packet_offload *po) | |
88 | { | |
89 | __dev_remove_offload(po); | |
90 | ||
91 | synchronize_net(); | |
92 | } | |
93 | EXPORT_SYMBOL(dev_remove_offload); | |
94 | ||
23c7f8d7 SK |
95 | /** |
96 | * skb_eth_gso_segment - segmentation handler for ethernet protocols. | |
97 | * @skb: buffer to segment | |
98 | * @features: features for the output path (see dev->features) | |
99 | * @type: Ethernet Protocol ID | |
100 | */ | |
101 | struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb, | |
102 | netdev_features_t features, __be16 type) | |
103 | { | |
104 | struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); | |
105 | struct packet_offload *ptype; | |
106 | ||
107 | rcu_read_lock(); | |
108 | list_for_each_entry_rcu(ptype, &offload_base, list) { | |
109 | if (ptype->type == type && ptype->callbacks.gso_segment) { | |
110 | segs = ptype->callbacks.gso_segment(skb, features); | |
111 | break; | |
112 | } | |
113 | } | |
114 | rcu_read_unlock(); | |
115 | ||
116 | return segs; | |
117 | } | |
118 | EXPORT_SYMBOL(skb_eth_gso_segment); | |
119 | ||
587652bb ED |
120 | /** |
121 | * skb_mac_gso_segment - mac layer segmentation handler. | |
122 | * @skb: buffer to segment | |
123 | * @features: features for the output path (see dev->features) | |
124 | */ | |
125 | struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, | |
126 | netdev_features_t features) | |
127 | { | |
128 | struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); | |
129 | struct packet_offload *ptype; | |
130 | int vlan_depth = skb->mac_len; | |
131 | __be16 type = skb_network_protocol(skb, &vlan_depth); | |
132 | ||
133 | if (unlikely(!type)) | |
134 | return ERR_PTR(-EINVAL); | |
135 | ||
136 | __skb_pull(skb, vlan_depth); | |
137 | ||
138 | rcu_read_lock(); | |
139 | list_for_each_entry_rcu(ptype, &offload_base, list) { | |
140 | if (ptype->type == type && ptype->callbacks.gso_segment) { | |
141 | segs = ptype->callbacks.gso_segment(skb, features); | |
142 | break; | |
143 | } | |
144 | } | |
145 | rcu_read_unlock(); | |
146 | ||
147 | __skb_push(skb, skb->data - skb_mac_header(skb)); | |
148 | ||
149 | return segs; | |
150 | } | |
151 | EXPORT_SYMBOL(skb_mac_gso_segment); | |
e456a18a ED |
152 | |
153 | int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) | |
154 | { | |
155 | struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); | |
156 | unsigned int offset = skb_gro_offset(skb); | |
157 | unsigned int headlen = skb_headlen(skb); | |
158 | unsigned int len = skb_gro_len(skb); | |
159 | unsigned int delta_truesize; | |
eac1b93c | 160 | unsigned int gro_max_size; |
e456a18a ED |
161 | unsigned int new_truesize; |
162 | struct sk_buff *lp; | |
5eddb249 | 163 | int segs; |
e456a18a | 164 | |
7d2c89b3 AD |
165 | /* Do not splice page pool based packets w/ non-page pool |
166 | * packets. This can result in reference count issues as page | |
167 | * pool pages will not decrement the reference count and will | |
168 | * instead be immediately returned to the pool or have frag | |
169 | * count decremented. | |
170 | */ | |
171 | if (p->pp_recycle != skb->pp_recycle) | |
172 | return -ETOOMANYREFS; | |
173 | ||
eac1b93c CL |
174 | /* pairs with WRITE_ONCE() in netif_set_gro_max_size() */ |
175 | gro_max_size = READ_ONCE(p->dev->gro_max_size); | |
176 | ||
177 | if (unlikely(p->len + len >= gro_max_size || NAPI_GRO_CB(skb)->flush)) | |
e456a18a ED |
178 | return -E2BIG; |
179 | ||
0fe79f28 AD |
180 | if (unlikely(p->len + len >= GRO_LEGACY_MAX_SIZE)) { |
181 | if (p->protocol != htons(ETH_P_IPV6) || | |
182 | skb_headroom(p) < sizeof(struct hop_jumbo_hdr) || | |
183 | ipv6_hdr(p)->nexthdr != IPPROTO_TCP || | |
184 | p->encapsulation) | |
185 | return -E2BIG; | |
186 | } | |
187 | ||
5eddb249 | 188 | segs = NAPI_GRO_CB(skb)->count; |
e456a18a ED |
189 | lp = NAPI_GRO_CB(p)->last; |
190 | pinfo = skb_shinfo(lp); | |
191 | ||
192 | if (headlen <= offset) { | |
193 | skb_frag_t *frag; | |
194 | skb_frag_t *frag2; | |
195 | int i = skbinfo->nr_frags; | |
196 | int nr_frags = pinfo->nr_frags + i; | |
197 | ||
198 | if (nr_frags > MAX_SKB_FRAGS) | |
199 | goto merge; | |
200 | ||
201 | offset -= headlen; | |
202 | pinfo->nr_frags = nr_frags; | |
203 | skbinfo->nr_frags = 0; | |
204 | ||
205 | frag = pinfo->frags + nr_frags; | |
206 | frag2 = skbinfo->frags + i; | |
207 | do { | |
208 | *--frag = *--frag2; | |
209 | } while (--i); | |
210 | ||
211 | skb_frag_off_add(frag, offset); | |
212 | skb_frag_size_sub(frag, offset); | |
213 | ||
214 | /* all fragments truesize : remove (head size + sk_buff) */ | |
215 | new_truesize = SKB_TRUESIZE(skb_end_offset(skb)); | |
216 | delta_truesize = skb->truesize - new_truesize; | |
217 | ||
218 | skb->truesize = new_truesize; | |
219 | skb->len -= skb->data_len; | |
220 | skb->data_len = 0; | |
221 | ||
222 | NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; | |
223 | goto done; | |
224 | } else if (skb->head_frag) { | |
225 | int nr_frags = pinfo->nr_frags; | |
226 | skb_frag_t *frag = pinfo->frags + nr_frags; | |
227 | struct page *page = virt_to_head_page(skb->head); | |
228 | unsigned int first_size = headlen - offset; | |
229 | unsigned int first_offset; | |
230 | ||
231 | if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS) | |
232 | goto merge; | |
233 | ||
234 | first_offset = skb->data - | |
235 | (unsigned char *)page_address(page) + | |
236 | offset; | |
237 | ||
238 | pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags; | |
239 | ||
240 | __skb_frag_set_page(frag, page); | |
241 | skb_frag_off_set(frag, first_offset); | |
242 | skb_frag_size_set(frag, first_size); | |
243 | ||
244 | memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); | |
245 | /* We dont need to clear skbinfo->nr_frags here */ | |
246 | ||
247 | new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff)); | |
248 | delta_truesize = skb->truesize - new_truesize; | |
249 | skb->truesize = new_truesize; | |
250 | NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; | |
251 | goto done; | |
252 | } | |
253 | ||
254 | merge: | |
255 | /* sk owenrship - if any - completely transferred to the aggregated packet */ | |
256 | skb->destructor = NULL; | |
257 | delta_truesize = skb->truesize; | |
258 | if (offset > headlen) { | |
259 | unsigned int eat = offset - headlen; | |
260 | ||
261 | skb_frag_off_add(&skbinfo->frags[0], eat); | |
262 | skb_frag_size_sub(&skbinfo->frags[0], eat); | |
263 | skb->data_len -= eat; | |
264 | skb->len -= eat; | |
265 | offset = headlen; | |
266 | } | |
267 | ||
268 | __skb_pull(skb, offset); | |
269 | ||
270 | if (NAPI_GRO_CB(p)->last == p) | |
271 | skb_shinfo(p)->frag_list = skb; | |
272 | else | |
273 | NAPI_GRO_CB(p)->last->next = skb; | |
274 | NAPI_GRO_CB(p)->last = skb; | |
275 | __skb_header_release(skb); | |
276 | lp = p; | |
277 | ||
278 | done: | |
5eddb249 | 279 | NAPI_GRO_CB(p)->count += segs; |
e456a18a ED |
280 | p->data_len += len; |
281 | p->truesize += delta_truesize; | |
282 | p->len += len; | |
283 | if (lp != p) { | |
284 | lp->data_len += len; | |
285 | lp->truesize += delta_truesize; | |
286 | lp->len += len; | |
287 | } | |
288 | NAPI_GRO_CB(skb)->same_flow = 1; | |
289 | return 0; | |
290 | } | |
587652bb ED |
291 | |
292 | ||
293 | static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb) | |
294 | { | |
295 | struct packet_offload *ptype; | |
296 | __be16 type = skb->protocol; | |
297 | struct list_head *head = &offload_base; | |
298 | int err = -ENOENT; | |
299 | ||
300 | BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb)); | |
301 | ||
302 | if (NAPI_GRO_CB(skb)->count == 1) { | |
303 | skb_shinfo(skb)->gso_size = 0; | |
304 | goto out; | |
305 | } | |
306 | ||
307 | rcu_read_lock(); | |
308 | list_for_each_entry_rcu(ptype, head, list) { | |
309 | if (ptype->type != type || !ptype->callbacks.gro_complete) | |
310 | continue; | |
311 | ||
312 | err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete, | |
313 | ipv6_gro_complete, inet_gro_complete, | |
314 | skb, 0); | |
315 | break; | |
316 | } | |
317 | rcu_read_unlock(); | |
318 | ||
319 | if (err) { | |
320 | WARN_ON(&ptype->list == head); | |
321 | kfree_skb(skb); | |
322 | return; | |
323 | } | |
324 | ||
325 | out: | |
326 | gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count); | |
327 | } | |
328 | ||
329 | static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index, | |
330 | bool flush_old) | |
331 | { | |
332 | struct list_head *head = &napi->gro_hash[index].list; | |
333 | struct sk_buff *skb, *p; | |
334 | ||
335 | list_for_each_entry_safe_reverse(skb, p, head, list) { | |
336 | if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) | |
337 | return; | |
338 | skb_list_del_init(skb); | |
339 | napi_gro_complete(napi, skb); | |
340 | napi->gro_hash[index].count--; | |
341 | } | |
342 | ||
343 | if (!napi->gro_hash[index].count) | |
344 | __clear_bit(index, &napi->gro_bitmask); | |
345 | } | |
346 | ||
347 | /* napi->gro_hash[].list contains packets ordered by age. | |
348 | * youngest packets at the head of it. | |
349 | * Complete skbs in reverse order to reduce latencies. | |
350 | */ | |
351 | void napi_gro_flush(struct napi_struct *napi, bool flush_old) | |
352 | { | |
353 | unsigned long bitmask = napi->gro_bitmask; | |
354 | unsigned int i, base = ~0U; | |
355 | ||
356 | while ((i = ffs(bitmask)) != 0) { | |
357 | bitmask >>= i; | |
358 | base += i; | |
359 | __napi_gro_flush_chain(napi, base, flush_old); | |
360 | } | |
361 | } | |
362 | EXPORT_SYMBOL(napi_gro_flush); | |
363 | ||
364 | static void gro_list_prepare(const struct list_head *head, | |
365 | const struct sk_buff *skb) | |
366 | { | |
367 | unsigned int maclen = skb->dev->hard_header_len; | |
368 | u32 hash = skb_get_hash_raw(skb); | |
369 | struct sk_buff *p; | |
370 | ||
371 | list_for_each_entry(p, head, list) { | |
372 | unsigned long diffs; | |
373 | ||
374 | NAPI_GRO_CB(p)->flush = 0; | |
375 | ||
376 | if (hash != skb_get_hash_raw(p)) { | |
377 | NAPI_GRO_CB(p)->same_flow = 0; | |
378 | continue; | |
379 | } | |
380 | ||
381 | diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; | |
be3ed486 | 382 | diffs |= p->vlan_all ^ skb->vlan_all; |
587652bb ED |
383 | diffs |= skb_metadata_differs(p, skb); |
384 | if (maclen == ETH_HLEN) | |
385 | diffs |= compare_ether_header(skb_mac_header(p), | |
386 | skb_mac_header(skb)); | |
387 | else if (!diffs) | |
388 | diffs = memcmp(skb_mac_header(p), | |
389 | skb_mac_header(skb), | |
390 | maclen); | |
391 | ||
392 | /* in most common scenarions 'slow_gro' is 0 | |
393 | * otherwise we are already on some slower paths | |
394 | * either skip all the infrequent tests altogether or | |
395 | * avoid trying too hard to skip each of them individually | |
396 | */ | |
397 | if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) { | |
398 | #if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT) | |
399 | struct tc_skb_ext *skb_ext; | |
400 | struct tc_skb_ext *p_ext; | |
401 | #endif | |
402 | ||
403 | diffs |= p->sk != skb->sk; | |
404 | diffs |= skb_metadata_dst_cmp(p, skb); | |
405 | diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb); | |
406 | ||
407 | #if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT) | |
408 | skb_ext = skb_ext_find(skb, TC_SKB_EXT); | |
409 | p_ext = skb_ext_find(p, TC_SKB_EXT); | |
410 | ||
411 | diffs |= (!!p_ext) ^ (!!skb_ext); | |
412 | if (!diffs && unlikely(skb_ext)) | |
413 | diffs |= p_ext->chain ^ skb_ext->chain; | |
414 | #endif | |
415 | } | |
416 | ||
417 | NAPI_GRO_CB(p)->same_flow = !diffs; | |
418 | } | |
419 | } | |
420 | ||
421 | static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff) | |
422 | { | |
423 | const struct skb_shared_info *pinfo = skb_shinfo(skb); | |
424 | const skb_frag_t *frag0 = &pinfo->frags[0]; | |
425 | ||
426 | NAPI_GRO_CB(skb)->data_offset = 0; | |
427 | NAPI_GRO_CB(skb)->frag0 = NULL; | |
428 | NAPI_GRO_CB(skb)->frag0_len = 0; | |
429 | ||
430 | if (!skb_headlen(skb) && pinfo->nr_frags && | |
431 | !PageHighMem(skb_frag_page(frag0)) && | |
432 | (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) { | |
433 | NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0); | |
434 | NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int, | |
435 | skb_frag_size(frag0), | |
436 | skb->end - skb->tail); | |
437 | } | |
438 | } | |
439 | ||
440 | static void gro_pull_from_frag0(struct sk_buff *skb, int grow) | |
441 | { | |
442 | struct skb_shared_info *pinfo = skb_shinfo(skb); | |
443 | ||
444 | BUG_ON(skb->end - skb->tail < grow); | |
445 | ||
446 | memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow); | |
447 | ||
448 | skb->data_len -= grow; | |
449 | skb->tail += grow; | |
450 | ||
451 | skb_frag_off_add(&pinfo->frags[0], grow); | |
452 | skb_frag_size_sub(&pinfo->frags[0], grow); | |
453 | ||
454 | if (unlikely(!skb_frag_size(&pinfo->frags[0]))) { | |
455 | skb_frag_unref(skb, 0); | |
456 | memmove(pinfo->frags, pinfo->frags + 1, | |
457 | --pinfo->nr_frags * sizeof(pinfo->frags[0])); | |
458 | } | |
459 | } | |
460 | ||
461 | static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head) | |
462 | { | |
463 | struct sk_buff *oldest; | |
464 | ||
465 | oldest = list_last_entry(head, struct sk_buff, list); | |
466 | ||
467 | /* We are called with head length >= MAX_GRO_SKBS, so this is | |
468 | * impossible. | |
469 | */ | |
470 | if (WARN_ON_ONCE(!oldest)) | |
471 | return; | |
472 | ||
473 | /* Do not adjust napi->gro_hash[].count, caller is adding a new | |
474 | * SKB to the chain. | |
475 | */ | |
476 | skb_list_del_init(oldest); | |
477 | napi_gro_complete(napi, oldest); | |
478 | } | |
479 | ||
480 | static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | |
481 | { | |
482 | u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1); | |
483 | struct gro_list *gro_list = &napi->gro_hash[bucket]; | |
484 | struct list_head *head = &offload_base; | |
485 | struct packet_offload *ptype; | |
486 | __be16 type = skb->protocol; | |
487 | struct sk_buff *pp = NULL; | |
488 | enum gro_result ret; | |
489 | int same_flow; | |
490 | int grow; | |
491 | ||
492 | if (netif_elide_gro(skb->dev)) | |
493 | goto normal; | |
494 | ||
495 | gro_list_prepare(&gro_list->list, skb); | |
496 | ||
497 | rcu_read_lock(); | |
498 | list_for_each_entry_rcu(ptype, head, list) { | |
e081ecf0 RG |
499 | if (ptype->type == type && ptype->callbacks.gro_receive) |
500 | goto found_ptype; | |
501 | } | |
502 | rcu_read_unlock(); | |
503 | goto normal; | |
504 | ||
505 | found_ptype: | |
506 | skb_set_network_header(skb, skb_gro_offset(skb)); | |
507 | skb_reset_mac_len(skb); | |
508 | BUILD_BUG_ON(sizeof_field(struct napi_gro_cb, zeroed) != sizeof(u32)); | |
509 | BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct napi_gro_cb, zeroed), | |
510 | sizeof(u32))); /* Avoid slow unaligned acc */ | |
511 | *(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0; | |
512 | NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb); | |
513 | NAPI_GRO_CB(skb)->is_atomic = 1; | |
514 | NAPI_GRO_CB(skb)->count = 1; | |
515 | if (unlikely(skb_is_gso(skb))) { | |
516 | NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs; | |
7871f54e ED |
517 | /* Only support TCP and non DODGY users. */ |
518 | if (!skb_is_gso_tcp(skb) || | |
519 | (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY)) | |
e081ecf0 RG |
520 | NAPI_GRO_CB(skb)->flush = 1; |
521 | } | |
587652bb | 522 | |
e081ecf0 RG |
523 | /* Setup for GRO checksum validation */ |
524 | switch (skb->ip_summed) { | |
525 | case CHECKSUM_COMPLETE: | |
526 | NAPI_GRO_CB(skb)->csum = skb->csum; | |
527 | NAPI_GRO_CB(skb)->csum_valid = 1; | |
528 | break; | |
529 | case CHECKSUM_UNNECESSARY: | |
530 | NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1; | |
587652bb ED |
531 | break; |
532 | } | |
587652bb | 533 | |
e081ecf0 RG |
534 | pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive, |
535 | ipv6_gro_receive, inet_gro_receive, | |
536 | &gro_list->list, skb); | |
537 | ||
538 | rcu_read_unlock(); | |
587652bb ED |
539 | |
540 | if (PTR_ERR(pp) == -EINPROGRESS) { | |
541 | ret = GRO_CONSUMED; | |
542 | goto ok; | |
543 | } | |
544 | ||
545 | same_flow = NAPI_GRO_CB(skb)->same_flow; | |
546 | ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; | |
547 | ||
548 | if (pp) { | |
549 | skb_list_del_init(pp); | |
550 | napi_gro_complete(napi, pp); | |
551 | gro_list->count--; | |
552 | } | |
553 | ||
554 | if (same_flow) | |
555 | goto ok; | |
556 | ||
557 | if (NAPI_GRO_CB(skb)->flush) | |
558 | goto normal; | |
559 | ||
560 | if (unlikely(gro_list->count >= MAX_GRO_SKBS)) | |
561 | gro_flush_oldest(napi, &gro_list->list); | |
562 | else | |
563 | gro_list->count++; | |
564 | ||
587652bb ED |
565 | NAPI_GRO_CB(skb)->age = jiffies; |
566 | NAPI_GRO_CB(skb)->last = skb; | |
5eddb249 CL |
567 | if (!skb_is_gso(skb)) |
568 | skb_shinfo(skb)->gso_size = skb_gro_len(skb); | |
587652bb ED |
569 | list_add(&skb->list, &gro_list->list); |
570 | ret = GRO_HELD; | |
571 | ||
572 | pull: | |
573 | grow = skb_gro_offset(skb) - skb_headlen(skb); | |
574 | if (grow > 0) | |
575 | gro_pull_from_frag0(skb, grow); | |
576 | ok: | |
577 | if (gro_list->count) { | |
578 | if (!test_bit(bucket, &napi->gro_bitmask)) | |
579 | __set_bit(bucket, &napi->gro_bitmask); | |
580 | } else if (test_bit(bucket, &napi->gro_bitmask)) { | |
581 | __clear_bit(bucket, &napi->gro_bitmask); | |
582 | } | |
583 | ||
584 | return ret; | |
585 | ||
586 | normal: | |
587 | ret = GRO_NORMAL; | |
588 | goto pull; | |
589 | } | |
590 | ||
591 | struct packet_offload *gro_find_receive_by_type(__be16 type) | |
592 | { | |
593 | struct list_head *offload_head = &offload_base; | |
594 | struct packet_offload *ptype; | |
595 | ||
596 | list_for_each_entry_rcu(ptype, offload_head, list) { | |
597 | if (ptype->type != type || !ptype->callbacks.gro_receive) | |
598 | continue; | |
599 | return ptype; | |
600 | } | |
601 | return NULL; | |
602 | } | |
603 | EXPORT_SYMBOL(gro_find_receive_by_type); | |
604 | ||
605 | struct packet_offload *gro_find_complete_by_type(__be16 type) | |
606 | { | |
607 | struct list_head *offload_head = &offload_base; | |
608 | struct packet_offload *ptype; | |
609 | ||
610 | list_for_each_entry_rcu(ptype, offload_head, list) { | |
611 | if (ptype->type != type || !ptype->callbacks.gro_complete) | |
612 | continue; | |
613 | return ptype; | |
614 | } | |
615 | return NULL; | |
616 | } | |
617 | EXPORT_SYMBOL(gro_find_complete_by_type); | |
618 | ||
619 | static gro_result_t napi_skb_finish(struct napi_struct *napi, | |
620 | struct sk_buff *skb, | |
621 | gro_result_t ret) | |
622 | { | |
623 | switch (ret) { | |
624 | case GRO_NORMAL: | |
625 | gro_normal_one(napi, skb, 1); | |
626 | break; | |
627 | ||
628 | case GRO_MERGED_FREE: | |
629 | if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) | |
630 | napi_skb_free_stolen_head(skb); | |
631 | else if (skb->fclone != SKB_FCLONE_UNAVAILABLE) | |
632 | __kfree_skb(skb); | |
633 | else | |
634 | __kfree_skb_defer(skb); | |
635 | break; | |
636 | ||
637 | case GRO_HELD: | |
638 | case GRO_MERGED: | |
639 | case GRO_CONSUMED: | |
640 | break; | |
641 | } | |
642 | ||
643 | return ret; | |
644 | } | |
645 | ||
646 | gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) | |
647 | { | |
648 | gro_result_t ret; | |
649 | ||
650 | skb_mark_napi_id(skb, napi); | |
651 | trace_napi_gro_receive_entry(skb); | |
652 | ||
653 | skb_gro_reset_offset(skb, 0); | |
654 | ||
655 | ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb)); | |
656 | trace_napi_gro_receive_exit(ret); | |
657 | ||
658 | return ret; | |
659 | } | |
660 | EXPORT_SYMBOL(napi_gro_receive); | |
661 | ||
662 | static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) | |
663 | { | |
664 | if (unlikely(skb->pfmemalloc)) { | |
665 | consume_skb(skb); | |
666 | return; | |
667 | } | |
668 | __skb_pull(skb, skb_headlen(skb)); | |
669 | /* restore the reserve we had after netdev_alloc_skb_ip_align() */ | |
670 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); | |
671 | __vlan_hwaccel_clear_tag(skb); | |
672 | skb->dev = napi->dev; | |
673 | skb->skb_iif = 0; | |
674 | ||
675 | /* eth_type_trans() assumes pkt_type is PACKET_HOST */ | |
676 | skb->pkt_type = PACKET_HOST; | |
677 | ||
678 | skb->encapsulation = 0; | |
679 | skb_shinfo(skb)->gso_type = 0; | |
5eddb249 | 680 | skb_shinfo(skb)->gso_size = 0; |
587652bb ED |
681 | if (unlikely(skb->slow_gro)) { |
682 | skb_orphan(skb); | |
683 | skb_ext_reset(skb); | |
684 | nf_reset_ct(skb); | |
685 | skb->slow_gro = 0; | |
686 | } | |
687 | ||
688 | napi->skb = skb; | |
689 | } | |
690 | ||
691 | struct sk_buff *napi_get_frags(struct napi_struct *napi) | |
692 | { | |
693 | struct sk_buff *skb = napi->skb; | |
694 | ||
695 | if (!skb) { | |
696 | skb = napi_alloc_skb(napi, GRO_MAX_HEAD); | |
697 | if (skb) { | |
698 | napi->skb = skb; | |
699 | skb_mark_napi_id(skb, napi); | |
700 | } | |
701 | } | |
702 | return skb; | |
703 | } | |
704 | EXPORT_SYMBOL(napi_get_frags); | |
705 | ||
706 | static gro_result_t napi_frags_finish(struct napi_struct *napi, | |
707 | struct sk_buff *skb, | |
708 | gro_result_t ret) | |
709 | { | |
710 | switch (ret) { | |
711 | case GRO_NORMAL: | |
712 | case GRO_HELD: | |
713 | __skb_push(skb, ETH_HLEN); | |
714 | skb->protocol = eth_type_trans(skb, skb->dev); | |
715 | if (ret == GRO_NORMAL) | |
716 | gro_normal_one(napi, skb, 1); | |
717 | break; | |
718 | ||
719 | case GRO_MERGED_FREE: | |
720 | if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) | |
721 | napi_skb_free_stolen_head(skb); | |
722 | else | |
723 | napi_reuse_skb(napi, skb); | |
724 | break; | |
725 | ||
726 | case GRO_MERGED: | |
727 | case GRO_CONSUMED: | |
728 | break; | |
729 | } | |
730 | ||
731 | return ret; | |
732 | } | |
733 | ||
734 | /* Upper GRO stack assumes network header starts at gro_offset=0 | |
735 | * Drivers could call both napi_gro_frags() and napi_gro_receive() | |
736 | * We copy ethernet header into skb->data to have a common layout. | |
737 | */ | |
738 | static struct sk_buff *napi_frags_skb(struct napi_struct *napi) | |
739 | { | |
740 | struct sk_buff *skb = napi->skb; | |
741 | const struct ethhdr *eth; | |
742 | unsigned int hlen = sizeof(*eth); | |
743 | ||
744 | napi->skb = NULL; | |
745 | ||
746 | skb_reset_mac_header(skb); | |
747 | skb_gro_reset_offset(skb, hlen); | |
748 | ||
749 | if (unlikely(skb_gro_header_hard(skb, hlen))) { | |
750 | eth = skb_gro_header_slow(skb, hlen, 0); | |
751 | if (unlikely(!eth)) { | |
752 | net_warn_ratelimited("%s: dropping impossible skb from %s\n", | |
753 | __func__, napi->dev->name); | |
754 | napi_reuse_skb(napi, skb); | |
755 | return NULL; | |
756 | } | |
757 | } else { | |
758 | eth = (const struct ethhdr *)skb->data; | |
759 | gro_pull_from_frag0(skb, hlen); | |
760 | NAPI_GRO_CB(skb)->frag0 += hlen; | |
761 | NAPI_GRO_CB(skb)->frag0_len -= hlen; | |
762 | } | |
763 | __skb_pull(skb, hlen); | |
764 | ||
765 | /* | |
766 | * This works because the only protocols we care about don't require | |
767 | * special handling. | |
768 | * We'll fix it up properly in napi_frags_finish() | |
769 | */ | |
770 | skb->protocol = eth->h_proto; | |
771 | ||
772 | return skb; | |
773 | } | |
774 | ||
775 | gro_result_t napi_gro_frags(struct napi_struct *napi) | |
776 | { | |
777 | gro_result_t ret; | |
778 | struct sk_buff *skb = napi_frags_skb(napi); | |
779 | ||
780 | trace_napi_gro_frags_entry(skb); | |
781 | ||
782 | ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb)); | |
783 | trace_napi_gro_frags_exit(ret); | |
784 | ||
785 | return ret; | |
786 | } | |
787 | EXPORT_SYMBOL(napi_gro_frags); | |
788 | ||
789 | /* Compute the checksum from gro_offset and return the folded value | |
790 | * after adding in any pseudo checksum. | |
791 | */ | |
792 | __sum16 __skb_gro_checksum_complete(struct sk_buff *skb) | |
793 | { | |
794 | __wsum wsum; | |
795 | __sum16 sum; | |
796 | ||
797 | wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0); | |
798 | ||
799 | /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */ | |
800 | sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum)); | |
801 | /* See comments in __skb_checksum_complete(). */ | |
802 | if (likely(!sum)) { | |
803 | if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && | |
804 | !skb->csum_complete_sw) | |
805 | netdev_rx_csum_fault(skb->dev, skb); | |
806 | } | |
807 | ||
808 | NAPI_GRO_CB(skb)->csum = wsum; | |
809 | NAPI_GRO_CB(skb)->csum_valid = 1; | |
810 | ||
811 | return sum; | |
812 | } | |
813 | EXPORT_SYMBOL(__skb_gro_checksum_complete); |