Commit | Line | Data |
---|---|---|
f942dc25 IC |
1 | /* |
2 | * Network-device interface management. | |
3 | * | |
4 | * Copyright (c) 2004-2005, Keir Fraser | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License version 2 | |
8 | * as published by the Free Software Foundation; or, when distributed | |
9 | * separately from the Linux kernel or incorporated into other | |
10 | * software packages, subject to the following license: | |
11 | * | |
12 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
13 | * of this source file (the "Software"), to deal in the Software without | |
14 | * restriction, including without limitation the rights to use, copy, modify, | |
15 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | |
16 | * and to permit persons to whom the Software is furnished to do so, subject to | |
17 | * the following conditions: | |
18 | * | |
19 | * The above copyright notice and this permission notice shall be included in | |
20 | * all copies or substantial portions of the Software. | |
21 | * | |
22 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
23 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
24 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
25 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
26 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
27 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
28 | * IN THE SOFTWARE. | |
29 | */ | |
30 | ||
31 | #include "common.h" | |
32 | ||
b3f980bd | 33 | #include <linux/kthread.h> |
0881e7bd | 34 | #include <linux/sched/task.h> |
f942dc25 IC |
35 | #include <linux/ethtool.h> |
36 | #include <linux/rtnetlink.h> | |
37 | #include <linux/if_vlan.h> | |
e7b599d7 | 38 | #include <linux/vmalloc.h> |
f942dc25 IC |
39 | |
40 | #include <xen/events.h> | |
41 | #include <asm/xen/hypercall.h> | |
f53c3fe8 | 42 | #include <xen/balloon.h> |
f942dc25 | 43 | |
f48da8b1 DV |
44 | /* Number of bytes allowed on the internal guest Rx queue. */ |
45 | #define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE) | |
46 | ||
06b4feb3 | 47 | /* This function is used to set SKBFL_ZEROCOPY_ENABLE as well as |
a64bd934 WL |
48 | * increasing the inflight counter. We need to increase the inflight |
49 | * counter because core driver calls into xenvif_zerocopy_callback | |
50 | * which calls xenvif_skb_zerocopy_complete. | |
51 | */ | |
52 | void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue, | |
53 | struct sk_buff *skb) | |
54 | { | |
06b4feb3 | 55 | skb_shinfo(skb)->flags |= SKBFL_ZEROCOPY_ENABLE; |
a64bd934 WL |
56 | atomic_inc(&queue->inflight_packets); |
57 | } | |
58 | ||
59 | void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) | |
60 | { | |
61 | atomic_dec(&queue->inflight_packets); | |
57b22906 RL |
62 | |
63 | /* Wake the dealloc thread _after_ decrementing inflight_packets so | |
64 | * that if kthread_stop() has already been called, the dealloc thread | |
65 | * does not wait forever with nothing to wake it. | |
66 | */ | |
67 | wake_up(&queue->dealloc_wq); | |
a64bd934 WL |
68 | } |
69 | ||
5834e72e | 70 | static int xenvif_schedulable(struct xenvif *vif) |
f942dc25 | 71 | { |
3d1af1df | 72 | return netif_running(vif->dev) && |
f48da8b1 DV |
73 | test_bit(VIF_STATUS_CONNECTED, &vif->status) && |
74 | !vif->disabled; | |
f942dc25 IC |
75 | } |
76 | ||
23025393 JG |
77 | static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue) |
78 | { | |
79 | bool rc; | |
80 | ||
81 | rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx); | |
82 | if (rc) | |
83 | napi_schedule(&queue->napi); | |
84 | return rc; | |
85 | } | |
86 | ||
e1f00a69 | 87 | static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) |
f942dc25 | 88 | { |
e9ce7cb6 | 89 | struct xenvif_queue *queue = dev_id; |
23025393 | 90 | int old; |
f942dc25 | 91 | |
23025393 JG |
92 | old = atomic_fetch_or(NETBK_TX_EOI, &queue->eoi_pending); |
93 | WARN(old & NETBK_TX_EOI, "Interrupt while EOI pending\n"); | |
94 | ||
95 | if (!xenvif_handle_tx_interrupt(queue)) { | |
96 | atomic_andnot(NETBK_TX_EOI, &queue->eoi_pending); | |
97 | xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS); | |
98 | } | |
f942dc25 | 99 | |
e1f00a69 WL |
100 | return IRQ_HANDLED; |
101 | } | |
102 | ||
38741d50 | 103 | static int xenvif_poll(struct napi_struct *napi, int budget) |
b3f980bd | 104 | { |
e9ce7cb6 WL |
105 | struct xenvif_queue *queue = |
106 | container_of(napi, struct xenvif_queue, napi); | |
b3f980bd WL |
107 | int work_done; |
108 | ||
e9d8b2c2 WL |
109 | /* This vif is rogue, we pretend we've there is nothing to do |
110 | * for this vif to deschedule it from NAPI. But this interface | |
111 | * will be turned off in thread context later. | |
112 | */ | |
2561cc15 | 113 | if (unlikely(queue->vif->disabled)) { |
e9d8b2c2 WL |
114 | napi_complete(napi); |
115 | return 0; | |
116 | } | |
117 | ||
e9ce7cb6 | 118 | work_done = xenvif_tx_action(queue, budget); |
b3f980bd WL |
119 | |
120 | if (work_done < budget) { | |
6ad20165 | 121 | napi_complete_done(napi, work_done); |
dfa523ae WL |
122 | /* If the queue is rate-limited, it shall be |
123 | * rescheduled in the timer callback. | |
124 | */ | |
125 | if (likely(!queue->rate_limited)) | |
126 | xenvif_napi_schedule_or_enable_events(queue); | |
b3f980bd WL |
127 | } |
128 | ||
129 | return work_done; | |
130 | } | |
131 | ||
23025393 JG |
132 | static bool xenvif_handle_rx_interrupt(struct xenvif_queue *queue) |
133 | { | |
134 | bool rc; | |
135 | ||
136 | rc = xenvif_have_rx_work(queue, false); | |
137 | if (rc) | |
138 | xenvif_kick_thread(queue); | |
139 | return rc; | |
140 | } | |
141 | ||
e1f00a69 WL |
142 | static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) |
143 | { | |
e9ce7cb6 | 144 | struct xenvif_queue *queue = dev_id; |
23025393 | 145 | int old; |
e1f00a69 | 146 | |
23025393 JG |
147 | old = atomic_fetch_or(NETBK_RX_EOI, &queue->eoi_pending); |
148 | WARN(old & NETBK_RX_EOI, "Interrupt while EOI pending\n"); | |
149 | ||
150 | if (!xenvif_handle_rx_interrupt(queue)) { | |
151 | atomic_andnot(NETBK_RX_EOI, &queue->eoi_pending); | |
152 | xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS); | |
153 | } | |
f942dc25 IC |
154 | |
155 | return IRQ_HANDLED; | |
156 | } | |
157 | ||
f51de243 | 158 | irqreturn_t xenvif_interrupt(int irq, void *dev_id) |
e1f00a69 | 159 | { |
23025393 JG |
160 | struct xenvif_queue *queue = dev_id; |
161 | int old; | |
a3daf3d3 | 162 | bool has_rx, has_tx; |
23025393 JG |
163 | |
164 | old = atomic_fetch_or(NETBK_COMMON_EOI, &queue->eoi_pending); | |
165 | WARN(old, "Interrupt while EOI pending\n"); | |
166 | ||
a3daf3d3 JG |
167 | has_tx = xenvif_handle_tx_interrupt(queue); |
168 | has_rx = xenvif_handle_rx_interrupt(queue); | |
169 | ||
170 | if (!has_rx && !has_tx) { | |
23025393 JG |
171 | atomic_andnot(NETBK_COMMON_EOI, &queue->eoi_pending); |
172 | xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS); | |
173 | } | |
e1f00a69 WL |
174 | |
175 | return IRQ_HANDLED; | |
176 | } | |
177 | ||
40d8abde | 178 | static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb, |
a350ecce | 179 | struct net_device *sb_dev) |
40d8abde PD |
180 | { |
181 | struct xenvif *vif = netdev_priv(dev); | |
182 | unsigned int size = vif->hash.size; | |
a2288d4e ID |
183 | unsigned int num_queues; |
184 | ||
185 | /* If queues are not set up internally - always return 0 | |
186 | * as the packet going to be dropped anyway */ | |
187 | num_queues = READ_ONCE(vif->num_queues); | |
188 | if (num_queues < 1) | |
189 | return 0; | |
40d8abde | 190 | |
912e27e8 | 191 | if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) |
a350ecce PA |
192 | return netdev_pick_tx(dev, skb, NULL) % |
193 | dev->real_num_tx_queues; | |
40d8abde PD |
194 | |
195 | xenvif_set_skb_hash(vif, skb); | |
196 | ||
197 | if (size == 0) | |
198 | return skb_get_hash_raw(skb) % dev->real_num_tx_queues; | |
199 | ||
22f9cde3 JB |
200 | return vif->hash.mapping[vif->hash.mapping_sel] |
201 | [skb_get_hash_raw(skb) % size]; | |
40d8abde PD |
202 | } |
203 | ||
a9ca7f17 Y |
204 | static netdev_tx_t |
205 | xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
f942dc25 IC |
206 | { |
207 | struct xenvif *vif = netdev_priv(dev); | |
e9ce7cb6 | 208 | struct xenvif_queue *queue = NULL; |
b17075d5 | 209 | unsigned int num_queues; |
e9ce7cb6 | 210 | u16 index; |
f48da8b1 | 211 | struct xenvif_rx_cb *cb; |
f942dc25 IC |
212 | |
213 | BUG_ON(skb->dev != dev); | |
214 | ||
b17075d5 ID |
215 | /* Drop the packet if queues are not set up. |
216 | * This handler should be called inside an RCU read section | |
217 | * so we don't need to enter it here explicitly. | |
218 | */ | |
219 | num_queues = READ_ONCE(vif->num_queues); | |
e9ce7cb6 WL |
220 | if (num_queues < 1) |
221 | goto drop; | |
222 | ||
223 | /* Obtain the queue to be used to transmit this packet */ | |
224 | index = skb_get_queue_mapping(skb); | |
225 | if (index >= num_queues) { | |
cc10f871 | 226 | pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n", |
e9ce7cb6 WL |
227 | index, vif->dev->name); |
228 | index %= num_queues; | |
229 | } | |
230 | queue = &vif->queues[index]; | |
231 | ||
232 | /* Drop the packet if queue is not ready */ | |
233 | if (queue->task == NULL || | |
234 | queue->dealloc_task == NULL || | |
f53c3fe8 | 235 | !xenvif_schedulable(vif)) |
f942dc25 IC |
236 | goto drop; |
237 | ||
210c34dc PD |
238 | if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) { |
239 | struct ethhdr *eth = (struct ethhdr *)skb->data; | |
240 | ||
241 | if (!xenvif_mcast_match(vif, eth->h_dest)) | |
242 | goto drop; | |
243 | } | |
244 | ||
f48da8b1 | 245 | cb = XENVIF_RX_CB(skb); |
26c0e102 | 246 | cb->expires = jiffies + vif->drain_timeout; |
f942dc25 | 247 | |
912e27e8 PD |
248 | /* If there is no hash algorithm configured then make sure there |
249 | * is no hash information in the socket buffer otherwise it | |
250 | * would be incorrectly forwarded to the frontend. | |
251 | */ | |
252 | if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) | |
253 | skb_clear_hash(skb); | |
254 | ||
0b38d2ec LF |
255 | /* timestamp packet in software */ |
256 | skb_tx_timestamp(skb); | |
257 | ||
74e7e1ef JG |
258 | if (!xenvif_rx_queue_tail(queue, skb)) |
259 | goto drop; | |
260 | ||
e9ce7cb6 | 261 | xenvif_kick_thread(queue); |
f942dc25 IC |
262 | |
263 | return NETDEV_TX_OK; | |
264 | ||
265 | drop: | |
266 | vif->dev->stats.tx_dropped++; | |
74e7e1ef | 267 | dev_kfree_skb_any(skb); |
f942dc25 IC |
268 | return NETDEV_TX_OK; |
269 | } | |
270 | ||
f942dc25 IC |
271 | static struct net_device_stats *xenvif_get_stats(struct net_device *dev) |
272 | { | |
273 | struct xenvif *vif = netdev_priv(dev); | |
e9ce7cb6 | 274 | struct xenvif_queue *queue = NULL; |
b17075d5 | 275 | unsigned int num_queues; |
ebf692f8 MS |
276 | u64 rx_bytes = 0; |
277 | u64 rx_packets = 0; | |
278 | u64 tx_bytes = 0; | |
279 | u64 tx_packets = 0; | |
e9ce7cb6 WL |
280 | unsigned int index; |
281 | ||
b17075d5 ID |
282 | rcu_read_lock(); |
283 | num_queues = READ_ONCE(vif->num_queues); | |
e9ce7cb6 WL |
284 | |
285 | /* Aggregate tx and rx stats from each queue */ | |
b17075d5 | 286 | for (index = 0; index < num_queues; ++index) { |
e9ce7cb6 WL |
287 | queue = &vif->queues[index]; |
288 | rx_bytes += queue->stats.rx_bytes; | |
289 | rx_packets += queue->stats.rx_packets; | |
290 | tx_bytes += queue->stats.tx_bytes; | |
291 | tx_packets += queue->stats.tx_packets; | |
292 | } | |
293 | ||
b17075d5 | 294 | rcu_read_unlock(); |
f16f1df6 | 295 | |
e9ce7cb6 WL |
296 | vif->dev->stats.rx_bytes = rx_bytes; |
297 | vif->dev->stats.rx_packets = rx_packets; | |
298 | vif->dev->stats.tx_bytes = tx_bytes; | |
299 | vif->dev->stats.tx_packets = tx_packets; | |
300 | ||
f942dc25 IC |
301 | return &vif->dev->stats; |
302 | } | |
303 | ||
304 | static void xenvif_up(struct xenvif *vif) | |
305 | { | |
e9ce7cb6 | 306 | struct xenvif_queue *queue = NULL; |
f7b50c4e | 307 | unsigned int num_queues = vif->num_queues; |
e9ce7cb6 WL |
308 | unsigned int queue_index; |
309 | ||
310 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { | |
311 | queue = &vif->queues[queue_index]; | |
312 | napi_enable(&queue->napi); | |
313 | enable_irq(queue->tx_irq); | |
314 | if (queue->tx_irq != queue->rx_irq) | |
315 | enable_irq(queue->rx_irq); | |
316 | xenvif_napi_schedule_or_enable_events(queue); | |
317 | } | |
f942dc25 IC |
318 | } |
319 | ||
320 | static void xenvif_down(struct xenvif *vif) | |
321 | { | |
e9ce7cb6 | 322 | struct xenvif_queue *queue = NULL; |
f7b50c4e | 323 | unsigned int num_queues = vif->num_queues; |
e9ce7cb6 WL |
324 | unsigned int queue_index; |
325 | ||
326 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { | |
327 | queue = &vif->queues[queue_index]; | |
e9ce7cb6 WL |
328 | disable_irq(queue->tx_irq); |
329 | if (queue->tx_irq != queue->rx_irq) | |
330 | disable_irq(queue->rx_irq); | |
8fe78989 | 331 | napi_disable(&queue->napi); |
e9ce7cb6 WL |
332 | del_timer_sync(&queue->credit_timeout); |
333 | } | |
f942dc25 IC |
334 | } |
335 | ||
336 | static int xenvif_open(struct net_device *dev) | |
337 | { | |
338 | struct xenvif *vif = netdev_priv(dev); | |
3d1af1df | 339 | if (test_bit(VIF_STATUS_CONNECTED, &vif->status)) |
f942dc25 | 340 | xenvif_up(vif); |
e9ce7cb6 | 341 | netif_tx_start_all_queues(dev); |
f942dc25 IC |
342 | return 0; |
343 | } | |
344 | ||
345 | static int xenvif_close(struct net_device *dev) | |
346 | { | |
347 | struct xenvif *vif = netdev_priv(dev); | |
3d1af1df | 348 | if (test_bit(VIF_STATUS_CONNECTED, &vif->status)) |
f942dc25 | 349 | xenvif_down(vif); |
e9ce7cb6 | 350 | netif_tx_stop_all_queues(dev); |
f942dc25 IC |
351 | return 0; |
352 | } | |
353 | ||
354 | static int xenvif_change_mtu(struct net_device *dev, int mtu) | |
355 | { | |
356 | struct xenvif *vif = netdev_priv(dev); | |
d0c2c997 | 357 | int max = vif->can_sg ? ETH_MAX_MTU - VLAN_ETH_HLEN : ETH_DATA_LEN; |
f942dc25 IC |
358 | |
359 | if (mtu > max) | |
360 | return -EINVAL; | |
1eb2cded | 361 | WRITE_ONCE(dev->mtu, mtu); |
f942dc25 IC |
362 | return 0; |
363 | } | |
364 | ||
c8f44aff MM |
365 | static netdev_features_t xenvif_fix_features(struct net_device *dev, |
366 | netdev_features_t features) | |
f942dc25 IC |
367 | { |
368 | struct xenvif *vif = netdev_priv(dev); | |
f942dc25 | 369 | |
47103041 MM |
370 | if (!vif->can_sg) |
371 | features &= ~NETIF_F_SG; | |
fedbc8c1 | 372 | if (~(vif->gso_mask) & GSO_BIT(TCPV4)) |
47103041 | 373 | features &= ~NETIF_F_TSO; |
fedbc8c1 | 374 | if (~(vif->gso_mask) & GSO_BIT(TCPV6)) |
82cada22 | 375 | features &= ~NETIF_F_TSO6; |
146c8a77 | 376 | if (!vif->ip_csum) |
47103041 | 377 | features &= ~NETIF_F_IP_CSUM; |
146c8a77 PD |
378 | if (!vif->ipv6_csum) |
379 | features &= ~NETIF_F_IPV6_CSUM; | |
f942dc25 | 380 | |
47103041 | 381 | return features; |
f942dc25 IC |
382 | } |
383 | ||
384 | static const struct xenvif_stat { | |
385 | char name[ETH_GSTRING_LEN]; | |
386 | u16 offset; | |
387 | } xenvif_stats[] = { | |
388 | { | |
389 | "rx_gso_checksum_fixup", | |
e9ce7cb6 | 390 | offsetof(struct xenvif_stats, rx_gso_checksum_fixup) |
f942dc25 | 391 | }, |
1bb332af ZK |
392 | /* If (sent != success + fail), there are probably packets never |
393 | * freed up properly! | |
394 | */ | |
395 | { | |
396 | "tx_zerocopy_sent", | |
e9ce7cb6 | 397 | offsetof(struct xenvif_stats, tx_zerocopy_sent), |
1bb332af ZK |
398 | }, |
399 | { | |
400 | "tx_zerocopy_success", | |
e9ce7cb6 | 401 | offsetof(struct xenvif_stats, tx_zerocopy_success), |
1bb332af ZK |
402 | }, |
403 | { | |
404 | "tx_zerocopy_fail", | |
e9ce7cb6 | 405 | offsetof(struct xenvif_stats, tx_zerocopy_fail) |
1bb332af | 406 | }, |
e3377f36 ZK |
407 | /* Number of packets exceeding MAX_SKB_FRAG slots. You should use |
408 | * a guest with the same MAX_SKB_FRAG | |
409 | */ | |
410 | { | |
411 | "tx_frag_overflow", | |
e9ce7cb6 | 412 | offsetof(struct xenvif_stats, tx_frag_overflow) |
e3377f36 | 413 | }, |
f942dc25 IC |
414 | }; |
415 | ||
416 | static int xenvif_get_sset_count(struct net_device *dev, int string_set) | |
417 | { | |
418 | switch (string_set) { | |
419 | case ETH_SS_STATS: | |
420 | return ARRAY_SIZE(xenvif_stats); | |
421 | default: | |
422 | return -EINVAL; | |
423 | } | |
424 | } | |
425 | ||
426 | static void xenvif_get_ethtool_stats(struct net_device *dev, | |
427 | struct ethtool_stats *stats, u64 * data) | |
428 | { | |
e9ce7cb6 | 429 | struct xenvif *vif = netdev_priv(dev); |
b17075d5 | 430 | unsigned int num_queues; |
f942dc25 | 431 | int i; |
e9ce7cb6 | 432 | unsigned int queue_index; |
e9ce7cb6 | 433 | |
b17075d5 ID |
434 | rcu_read_lock(); |
435 | num_queues = READ_ONCE(vif->num_queues); | |
436 | ||
e9ce7cb6 WL |
437 | for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) { |
438 | unsigned long accum = 0; | |
439 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { | |
d63951d7 | 440 | void *vif_stats = &vif->queues[queue_index].stats; |
e9ce7cb6 WL |
441 | accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset); |
442 | } | |
443 | data[i] = accum; | |
444 | } | |
b17075d5 ID |
445 | |
446 | rcu_read_unlock(); | |
f942dc25 IC |
447 | } |
448 | ||
449 | static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data) | |
450 | { | |
451 | int i; | |
452 | ||
453 | switch (stringset) { | |
454 | case ETH_SS_STATS: | |
455 | for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) | |
456 | memcpy(data + i * ETH_GSTRING_LEN, | |
457 | xenvif_stats[i].name, ETH_GSTRING_LEN); | |
458 | break; | |
459 | } | |
460 | } | |
461 | ||
813abbba | 462 | static const struct ethtool_ops xenvif_ethtool_ops = { |
f942dc25 | 463 | .get_link = ethtool_op_get_link, |
0b38d2ec | 464 | .get_ts_info = ethtool_op_get_ts_info, |
f942dc25 IC |
465 | .get_sset_count = xenvif_get_sset_count, |
466 | .get_ethtool_stats = xenvif_get_ethtool_stats, | |
467 | .get_strings = xenvif_get_strings, | |
468 | }; | |
469 | ||
813abbba | 470 | static const struct net_device_ops xenvif_netdev_ops = { |
40d8abde | 471 | .ndo_select_queue = xenvif_select_queue, |
f942dc25 IC |
472 | .ndo_start_xmit = xenvif_start_xmit, |
473 | .ndo_get_stats = xenvif_get_stats, | |
474 | .ndo_open = xenvif_open, | |
475 | .ndo_stop = xenvif_close, | |
476 | .ndo_change_mtu = xenvif_change_mtu, | |
47103041 | 477 | .ndo_fix_features = xenvif_fix_features, |
4a633a60 MW |
478 | .ndo_set_mac_address = eth_mac_addr, |
479 | .ndo_validate_addr = eth_validate_addr, | |
f942dc25 IC |
480 | }; |
481 | ||
482 | struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, | |
483 | unsigned int handle) | |
484 | { | |
93772114 JK |
485 | static const u8 dummy_addr[ETH_ALEN] = { |
486 | 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, | |
487 | }; | |
f942dc25 IC |
488 | int err; |
489 | struct net_device *dev; | |
490 | struct xenvif *vif; | |
491 | char name[IFNAMSIZ] = {}; | |
492 | ||
493 | snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle); | |
8d3d53b3 AB |
494 | /* Allocate a netdev with the max. supported number of queues. |
495 | * When the guest selects the desired number, it will be updated | |
f7b50c4e | 496 | * via netif_set_real_num_*_queues(). |
8d3d53b3 | 497 | */ |
c835a677 TG |
498 | dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN, |
499 | ether_setup, xenvif_max_queues); | |
f942dc25 | 500 | if (dev == NULL) { |
b3f980bd | 501 | pr_warn("Could not allocate netdev for %s\n", name); |
f942dc25 IC |
502 | return ERR_PTR(-ENOMEM); |
503 | } | |
504 | ||
505 | SET_NETDEV_DEV(dev, parent); | |
506 | ||
507 | vif = netdev_priv(dev); | |
ac3d5ac2 | 508 | |
f942dc25 IC |
509 | vif->domid = domid; |
510 | vif->handle = handle; | |
f942dc25 | 511 | vif->can_sg = 1; |
146c8a77 | 512 | vif->ip_csum = 1; |
f942dc25 | 513 | vif->dev = dev; |
e9d8b2c2 | 514 | vif->disabled = false; |
26c0e102 DV |
515 | vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs); |
516 | vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs); | |
e9d8b2c2 | 517 | |
f7b50c4e | 518 | /* Start out with no queues. */ |
e9ce7cb6 | 519 | vif->queues = NULL; |
f7b50c4e | 520 | vif->num_queues = 0; |
09350788 | 521 | |
1c9535c7 DK |
522 | vif->xdp_headroom = 0; |
523 | ||
ecf08d2d | 524 | spin_lock_init(&vif->lock); |
210c34dc | 525 | INIT_LIST_HEAD(&vif->fe_mcast_addr); |
ecf08d2d | 526 | |
f942dc25 | 527 | dev->netdev_ops = &xenvif_netdev_ops; |
146c8a77 PD |
528 | dev->hw_features = NETIF_F_SG | |
529 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | |
2167ca02 | 530 | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_FRAGLIST; |
7365bcfa | 531 | dev->features = dev->hw_features | NETIF_F_RXCSUM; |
7ad24ea4 | 532 | dev->ethtool_ops = &xenvif_ethtool_ops; |
f942dc25 | 533 | |
e1043a4b | 534 | dev->min_mtu = ETH_MIN_MTU; |
d0c2c997 JW |
535 | dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN; |
536 | ||
f942dc25 IC |
537 | /* |
538 | * Initialise a dummy MAC address. We choose the numerically | |
539 | * largest non-broadcast address to prevent the address getting | |
540 | * stolen by an Ethernet bridge for STP purposes. | |
541 | * (FE:FF:FF:FF:FF:FF) | |
542 | */ | |
93772114 | 543 | eth_hw_addr_set(dev, dummy_addr); |
f942dc25 IC |
544 | |
545 | netif_carrier_off(dev); | |
546 | ||
547 | err = register_netdev(dev); | |
548 | if (err) { | |
549 | netdev_warn(dev, "Could not register device: err=%d\n", err); | |
550 | free_netdev(dev); | |
551 | return ERR_PTR(err); | |
552 | } | |
553 | ||
554 | netdev_dbg(dev, "Successfully created xenvif\n"); | |
279f438e PD |
555 | |
556 | __module_get(THIS_MODULE); | |
557 | ||
f942dc25 IC |
558 | return vif; |
559 | } | |
560 | ||
e9ce7cb6 WL |
561 | int xenvif_init_queue(struct xenvif_queue *queue) |
562 | { | |
563 | int err, i; | |
564 | ||
565 | queue->credit_bytes = queue->remaining_credit = ~0UL; | |
566 | queue->credit_usec = 0UL; | |
cac6a8f9 | 567 | timer_setup(&queue->credit_timeout, xenvif_tx_credit_callback, 0); |
e9ce7cb6 WL |
568 | queue->credit_window_start = get_jiffies_64(); |
569 | ||
f48da8b1 DV |
570 | queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES; |
571 | ||
e9ce7cb6 WL |
572 | skb_queue_head_init(&queue->rx_queue); |
573 | skb_queue_head_init(&queue->tx_queue); | |
574 | ||
575 | queue->pending_cons = 0; | |
576 | queue->pending_prod = MAX_PENDING_REQS; | |
577 | for (i = 0; i < MAX_PENDING_REQS; ++i) | |
578 | queue->pending_ring[i] = i; | |
579 | ||
580 | spin_lock_init(&queue->callback_lock); | |
581 | spin_lock_init(&queue->response_lock); | |
582 | ||
583 | /* If ballooning is disabled, this will consume real memory, so you | |
584 | * better enable it. The long term solution would be to use just a | |
585 | * bunch of valid page descriptors, without dependency on ballooning | |
586 | */ | |
ff4b156f DV |
587 | err = gnttab_alloc_pages(MAX_PENDING_REQS, |
588 | queue->mmap_pages); | |
e9ce7cb6 WL |
589 | if (err) { |
590 | netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n"); | |
591 | return -ENOMEM; | |
592 | } | |
593 | ||
594 | for (i = 0; i < MAX_PENDING_REQS; i++) { | |
b63ca3e8 | 595 | queue->pending_tx_info[i].callback_struct = (struct ubuf_info_msgzc) |
7ab4f16f | 596 | { { .ops = &xenvif_ubuf_ops }, |
cc8737a5 WB |
597 | { { .ctx = NULL, |
598 | .desc = i } } }; | |
e9ce7cb6 WL |
599 | queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; |
600 | } | |
601 | ||
e9ce7cb6 WL |
602 | return 0; |
603 | } | |
604 | ||
605 | void xenvif_carrier_on(struct xenvif *vif) | |
606 | { | |
607 | rtnl_lock(); | |
608 | if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) | |
609 | dev_set_mtu(vif->dev, ETH_DATA_LEN); | |
610 | netdev_update_features(vif->dev); | |
3d1af1df | 611 | set_bit(VIF_STATUS_CONNECTED, &vif->status); |
e9ce7cb6 WL |
612 | if (netif_running(vif->dev)) |
613 | xenvif_up(vif); | |
614 | rtnl_unlock(); | |
615 | } | |
616 | ||
4e15ee2c PD |
617 | int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref, |
618 | unsigned int evtchn) | |
619 | { | |
620 | struct net_device *dev = vif->dev; | |
f2fa0e5e | 621 | struct xenbus_device *xendev = xenvif_to_xenbus_device(vif); |
4e15ee2c PD |
622 | void *addr; |
623 | struct xen_netif_ctrl_sring *shared; | |
9476654b | 624 | RING_IDX rsp_prod, req_prod; |
0364a882 | 625 | int err; |
4e15ee2c | 626 | |
f2fa0e5e | 627 | err = xenbus_map_ring_valloc(xendev, &ring_ref, 1, &addr); |
4e15ee2c PD |
628 | if (err) |
629 | goto err; | |
630 | ||
631 | shared = (struct xen_netif_ctrl_sring *)addr; | |
9476654b PD |
632 | rsp_prod = READ_ONCE(shared->rsp_prod); |
633 | req_prod = READ_ONCE(shared->req_prod); | |
634 | ||
635 | BACK_RING_ATTACH(&vif->ctrl, shared, rsp_prod, XEN_PAGE_SIZE); | |
636 | ||
637 | err = -EIO; | |
638 | if (req_prod - rsp_prod > RING_SIZE(&vif->ctrl)) | |
639 | goto err_unmap; | |
4e15ee2c | 640 | |
f2fa0e5e | 641 | err = bind_interdomain_evtchn_to_irq_lateeoi(xendev, evtchn); |
4e15ee2c PD |
642 | if (err < 0) |
643 | goto err_unmap; | |
644 | ||
645 | vif->ctrl_irq = err; | |
646 | ||
40d8abde PD |
647 | xenvif_init_hash(vif); |
648 | ||
0364a882 JG |
649 | err = request_threaded_irq(vif->ctrl_irq, NULL, xenvif_ctrl_irq_fn, |
650 | IRQF_ONESHOT, "xen-netback-ctrl", vif); | |
651 | if (err) { | |
652 | pr_warn("Could not setup irq handler for %s\n", dev->name); | |
4e15ee2c PD |
653 | goto err_deinit; |
654 | } | |
655 | ||
4e15ee2c PD |
656 | return 0; |
657 | ||
658 | err_deinit: | |
40d8abde | 659 | xenvif_deinit_hash(vif); |
4e15ee2c PD |
660 | unbind_from_irqhandler(vif->ctrl_irq, vif); |
661 | vif->ctrl_irq = 0; | |
662 | ||
663 | err_unmap: | |
f2fa0e5e | 664 | xenbus_unmap_ring_vfree(xendev, vif->ctrl.sring); |
4e15ee2c PD |
665 | vif->ctrl.sring = NULL; |
666 | ||
667 | err: | |
668 | return err; | |
669 | } | |
670 | ||
2ac061ce JG |
671 | static void xenvif_disconnect_queue(struct xenvif_queue *queue) |
672 | { | |
2ac061ce | 673 | if (queue->task) { |
6309727e | 674 | kthread_stop_put(queue->task); |
2ac061ce JG |
675 | queue->task = NULL; |
676 | } | |
677 | ||
678 | if (queue->dealloc_task) { | |
679 | kthread_stop(queue->dealloc_task); | |
680 | queue->dealloc_task = NULL; | |
681 | } | |
682 | ||
683 | if (queue->napi.poll) { | |
684 | netif_napi_del(&queue->napi); | |
685 | queue->napi.poll = NULL; | |
686 | } | |
687 | ||
fd42bfd1 PD |
688 | if (queue->tx_irq) { |
689 | unbind_from_irqhandler(queue->tx_irq, queue); | |
690 | if (queue->tx_irq == queue->rx_irq) | |
691 | queue->rx_irq = 0; | |
692 | queue->tx_irq = 0; | |
693 | } | |
694 | ||
695 | if (queue->rx_irq) { | |
696 | unbind_from_irqhandler(queue->rx_irq, queue); | |
697 | queue->rx_irq = 0; | |
698 | } | |
699 | ||
2ac061ce JG |
700 | xenvif_unmap_frontend_data_rings(queue); |
701 | } | |
702 | ||
4e15ee2c PD |
703 | int xenvif_connect_data(struct xenvif_queue *queue, |
704 | unsigned long tx_ring_ref, | |
705 | unsigned long rx_ring_ref, | |
706 | unsigned int tx_evtchn, | |
707 | unsigned int rx_evtchn) | |
f942dc25 | 708 | { |
f2fa0e5e | 709 | struct xenbus_device *dev = xenvif_to_xenbus_device(queue->vif); |
67fa3660 | 710 | struct task_struct *task; |
587a7126 | 711 | int err; |
f942dc25 | 712 | |
e9ce7cb6 WL |
713 | BUG_ON(queue->tx_irq); |
714 | BUG_ON(queue->task); | |
715 | BUG_ON(queue->dealloc_task); | |
f942dc25 | 716 | |
4e15ee2c PD |
717 | err = xenvif_map_frontend_data_rings(queue, tx_ring_ref, |
718 | rx_ring_ref); | |
f942dc25 IC |
719 | if (err < 0) |
720 | goto err; | |
721 | ||
e9ce7cb6 WL |
722 | init_waitqueue_head(&queue->wq); |
723 | init_waitqueue_head(&queue->dealloc_wq); | |
a64bd934 | 724 | atomic_set(&queue->inflight_packets, 0); |
ca2f09f2 | 725 | |
b48b89f9 | 726 | netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll); |
e24f8191 | 727 | |
2ac061ce JG |
728 | queue->stalled = true; |
729 | ||
730 | task = kthread_run(xenvif_kthread_guest_rx, queue, | |
731 | "%s-guest-rx", queue->name); | |
732 | if (IS_ERR(task)) | |
733 | goto kthread_err; | |
734 | queue->task = task; | |
107866a8 RPM |
735 | /* |
736 | * Take a reference to the task in order to prevent it from being freed | |
737 | * if the thread function returns before kthread_stop is called. | |
738 | */ | |
739 | get_task_struct(task); | |
2ac061ce JG |
740 | |
741 | task = kthread_run(xenvif_dealloc_kthread, queue, | |
742 | "%s-dealloc", queue->name); | |
743 | if (IS_ERR(task)) | |
744 | goto kthread_err; | |
745 | queue->dealloc_task = task; | |
746 | ||
e1f00a69 WL |
747 | if (tx_evtchn == rx_evtchn) { |
748 | /* feature-split-event-channels == 0 */ | |
23025393 | 749 | err = bind_interdomain_evtchn_to_irqhandler_lateeoi( |
f2fa0e5e | 750 | dev, tx_evtchn, xenvif_interrupt, 0, |
e9ce7cb6 | 751 | queue->name, queue); |
e1f00a69 | 752 | if (err < 0) |
2ac061ce | 753 | goto err; |
e9ce7cb6 WL |
754 | queue->tx_irq = queue->rx_irq = err; |
755 | disable_irq(queue->tx_irq); | |
e1f00a69 WL |
756 | } else { |
757 | /* feature-split-event-channels == 1 */ | |
e9ce7cb6 WL |
758 | snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), |
759 | "%s-tx", queue->name); | |
23025393 | 760 | err = bind_interdomain_evtchn_to_irqhandler_lateeoi( |
f2fa0e5e | 761 | dev, tx_evtchn, xenvif_tx_interrupt, 0, |
e9ce7cb6 | 762 | queue->tx_irq_name, queue); |
e1f00a69 | 763 | if (err < 0) |
2ac061ce | 764 | goto err; |
e9ce7cb6 WL |
765 | queue->tx_irq = err; |
766 | disable_irq(queue->tx_irq); | |
e1f00a69 | 767 | |
e9ce7cb6 WL |
768 | snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name), |
769 | "%s-rx", queue->name); | |
23025393 | 770 | err = bind_interdomain_evtchn_to_irqhandler_lateeoi( |
f2fa0e5e | 771 | dev, rx_evtchn, xenvif_rx_interrupt, 0, |
e9ce7cb6 | 772 | queue->rx_irq_name, queue); |
e1f00a69 | 773 | if (err < 0) |
2ac061ce | 774 | goto err; |
e9ce7cb6 WL |
775 | queue->rx_irq = err; |
776 | disable_irq(queue->rx_irq); | |
e1f00a69 | 777 | } |
f942dc25 | 778 | |
f942dc25 | 779 | return 0; |
b3f980bd | 780 | |
2ac061ce JG |
781 | kthread_err: |
782 | pr_warn("Could not allocate kthread for %s\n", queue->name); | |
783 | err = PTR_ERR(task); | |
f942dc25 | 784 | err: |
2ac061ce | 785 | xenvif_disconnect_queue(queue); |
f942dc25 IC |
786 | return err; |
787 | } | |
788 | ||
48856286 | 789 | void xenvif_carrier_off(struct xenvif *vif) |
f942dc25 IC |
790 | { |
791 | struct net_device *dev = vif->dev; | |
48856286 IC |
792 | |
793 | rtnl_lock(); | |
3d1af1df ZK |
794 | if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) { |
795 | netif_carrier_off(dev); /* discard queued packets */ | |
796 | if (netif_running(dev)) | |
797 | xenvif_down(vif); | |
798 | } | |
48856286 | 799 | rtnl_unlock(); |
48856286 IC |
800 | } |
801 | ||
4e15ee2c | 802 | void xenvif_disconnect_data(struct xenvif *vif) |
48856286 | 803 | { |
e9ce7cb6 | 804 | struct xenvif_queue *queue = NULL; |
f7b50c4e | 805 | unsigned int num_queues = vif->num_queues; |
e9ce7cb6 WL |
806 | unsigned int queue_index; |
807 | ||
3d1af1df | 808 | xenvif_carrier_off(vif); |
f942dc25 | 809 | |
e9ce7cb6 WL |
810 | for (queue_index = 0; queue_index < num_queues; ++queue_index) { |
811 | queue = &vif->queues[queue_index]; | |
db739ef3 | 812 | |
2ac061ce | 813 | xenvif_disconnect_queue(queue); |
e9ce7cb6 | 814 | } |
210c34dc PD |
815 | |
816 | xenvif_mcast_addr_list_free(vif); | |
279f438e PD |
817 | } |
818 | ||
4e15ee2c PD |
819 | void xenvif_disconnect_ctrl(struct xenvif *vif) |
820 | { | |
4e15ee2c | 821 | if (vif->ctrl_irq) { |
c0fcded2 | 822 | xenvif_deinit_hash(vif); |
4e15ee2c PD |
823 | unbind_from_irqhandler(vif->ctrl_irq, vif); |
824 | vif->ctrl_irq = 0; | |
825 | } | |
826 | ||
827 | if (vif->ctrl.sring) { | |
828 | xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), | |
829 | vif->ctrl.sring); | |
830 | vif->ctrl.sring = NULL; | |
831 | } | |
832 | } | |
833 | ||
8d3d53b3 AB |
834 | /* Reverse the relevant parts of xenvif_init_queue(). |
835 | * Used for queue teardown from xenvif_free(), and on the | |
836 | * error handling paths in xenbus.c:connect(). | |
837 | */ | |
838 | void xenvif_deinit_queue(struct xenvif_queue *queue) | |
839 | { | |
ff4b156f | 840 | gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages); |
8d3d53b3 AB |
841 | } |
842 | ||
279f438e PD |
843 | void xenvif_free(struct xenvif *vif) |
844 | { | |
9c6f3ffe | 845 | struct xenvif_queue *queues = vif->queues; |
f7b50c4e | 846 | unsigned int num_queues = vif->num_queues; |
e9ce7cb6 | 847 | unsigned int queue_index; |
f53c3fe8 | 848 | |
e9ce7cb6 | 849 | unregister_netdev(vif->dev); |
f942dc25 | 850 | free_netdev(vif->dev); |
b103f358 | 851 | |
9c6f3ffe DV |
852 | for (queue_index = 0; queue_index < num_queues; ++queue_index) |
853 | xenvif_deinit_queue(&queues[queue_index]); | |
854 | vfree(queues); | |
855 | ||
279f438e | 856 | module_put(THIS_MODULE); |
f942dc25 | 857 | } |