Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
97e219b7 ED |
2 | #include <linux/skbuff.h> |
3 | #include <linux/slab.h> | |
4 | #include <linux/netdevice.h> | |
5 | #include <net/gro_cells.h> | |
6 | ||
7 | struct gro_cell { | |
8 | struct sk_buff_head napi_skbs; | |
9 | struct napi_struct napi; | |
10 | }; | |
11 | ||
12 | int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb) | |
13 | { | |
14 | struct net_device *dev = skb->dev; | |
15 | struct gro_cell *cell; | |
2a5ff07a | 16 | int res; |
97e219b7 | 17 | |
2a5ff07a ED |
18 | rcu_read_lock(); |
19 | if (unlikely(!(dev->flags & IFF_UP))) | |
20 | goto drop; | |
21 | ||
22 | if (!gcells->cells || skb_cloned(skb) || netif_elide_gro(dev)) { | |
23 | res = netif_rx(skb); | |
24 | goto unlock; | |
25 | } | |
97e219b7 ED |
26 | |
27 | cell = this_cpu_ptr(gcells->cells); | |
28 | ||
29 | if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) { | |
2a5ff07a | 30 | drop: |
625788b5 | 31 | dev_core_stats_rx_dropped_inc(dev); |
97e219b7 | 32 | kfree_skb(skb); |
2a5ff07a ED |
33 | res = NET_RX_DROP; |
34 | goto unlock; | |
97e219b7 ED |
35 | } |
36 | ||
37 | __skb_queue_tail(&cell->napi_skbs, skb); | |
38 | if (skb_queue_len(&cell->napi_skbs) == 1) | |
39 | napi_schedule(&cell->napi); | |
2a5ff07a ED |
40 | |
41 | res = NET_RX_SUCCESS; | |
42 | ||
43 | unlock: | |
44 | rcu_read_unlock(); | |
45 | return res; | |
97e219b7 ED |
46 | } |
47 | EXPORT_SYMBOL(gro_cells_receive); | |
48 | ||
49 | /* called under BH context */ | |
50 | static int gro_cell_poll(struct napi_struct *napi, int budget) | |
51 | { | |
52 | struct gro_cell *cell = container_of(napi, struct gro_cell, napi); | |
53 | struct sk_buff *skb; | |
54 | int work_done = 0; | |
55 | ||
56 | while (work_done < budget) { | |
57 | skb = __skb_dequeue(&cell->napi_skbs); | |
58 | if (!skb) | |
59 | break; | |
60 | napi_gro_receive(napi, skb); | |
61 | work_done++; | |
62 | } | |
63 | ||
64 | if (work_done < budget) | |
65 | napi_complete_done(napi, work_done); | |
66 | return work_done; | |
67 | } | |
68 | ||
69 | int gro_cells_init(struct gro_cells *gcells, struct net_device *dev) | |
70 | { | |
71 | int i; | |
72 | ||
73 | gcells->cells = alloc_percpu(struct gro_cell); | |
74 | if (!gcells->cells) | |
75 | return -ENOMEM; | |
76 | ||
77 | for_each_possible_cpu(i) { | |
78 | struct gro_cell *cell = per_cpu_ptr(gcells->cells, i); | |
79 | ||
80 | __skb_queue_head_init(&cell->napi_skbs); | |
81 | ||
82 | set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state); | |
83 | ||
84 | netif_napi_add(dev, &cell->napi, gro_cell_poll, | |
85 | NAPI_POLL_WEIGHT); | |
86 | napi_enable(&cell->napi); | |
87 | } | |
88 | return 0; | |
89 | } | |
90 | EXPORT_SYMBOL(gro_cells_init); | |
91 | ||
ee8f97ef ED |
92 | struct percpu_free_defer { |
93 | struct rcu_head rcu; | |
94 | void __percpu *ptr; | |
95 | }; | |
96 | ||
97 | static void percpu_free_defer_callback(struct rcu_head *head) | |
98 | { | |
99 | struct percpu_free_defer *defer; | |
100 | ||
101 | defer = container_of(head, struct percpu_free_defer, rcu); | |
102 | free_percpu(defer->ptr); | |
103 | kfree(defer); | |
104 | } | |
105 | ||
97e219b7 ED |
106 | void gro_cells_destroy(struct gro_cells *gcells) |
107 | { | |
ee8f97ef | 108 | struct percpu_free_defer *defer; |
97e219b7 ED |
109 | int i; |
110 | ||
111 | if (!gcells->cells) | |
112 | return; | |
113 | for_each_possible_cpu(i) { | |
114 | struct gro_cell *cell = per_cpu_ptr(gcells->cells, i); | |
115 | ||
8e1da73a | 116 | napi_disable(&cell->napi); |
2543a600 | 117 | __netif_napi_del(&cell->napi); |
97e219b7 ED |
118 | __skb_queue_purge(&cell->napi_skbs); |
119 | } | |
ee8f97ef ED |
120 | /* We need to observe an rcu grace period before freeing ->cells, |
121 | * because netpoll could access dev->napi_list under rcu protection. | |
122 | * Try hard using call_rcu() instead of synchronize_rcu(), | |
123 | * because we might be called from cleanup_net(), and we | |
124 | * definitely do not want to block this critical task. | |
2543a600 | 125 | */ |
ee8f97ef ED |
126 | defer = kmalloc(sizeof(*defer), GFP_KERNEL | __GFP_NOWARN); |
127 | if (likely(defer)) { | |
128 | defer->ptr = gcells->cells; | |
129 | call_rcu(&defer->rcu, percpu_free_defer_callback); | |
130 | } else { | |
131 | /* We do not hold RTNL at this point, synchronize_net() | |
132 | * would not be able to expedite this sync. | |
133 | */ | |
134 | synchronize_rcu_expedited(); | |
135 | free_percpu(gcells->cells); | |
136 | } | |
97e219b7 ED |
137 | gcells->cells = NULL; |
138 | } | |
139 | EXPORT_SYMBOL(gro_cells_destroy); |