2 * Network-device interface management.
4 * Copyright (c) 2004-2005, Keir Fraser
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation; or, when distributed
9 * separately from the Linux kernel or incorporated into other
10 * software packages, subject to the following license:
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this source file (the "Software"), to deal in the Software without
14 * restriction, including without limitation the rights to use, copy, modify,
15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16 * and to permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
33 #include <linux/kthread.h>
34 #include <linux/sched/task.h>
35 #include <linux/ethtool.h>
36 #include <linux/rtnetlink.h>
37 #include <linux/if_vlan.h>
38 #include <linux/vmalloc.h>
40 #include <xen/events.h>
41 #include <asm/xen/hypercall.h>
42 #include <xen/balloon.h>
44 #define XENVIF_QUEUE_LENGTH 32
46 /* Number of bytes allowed on the internal guest Rx queue. */
47 #define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
49 /* This function is used to set SKBFL_ZEROCOPY_ENABLE as well as
50 * increasing the inflight counter. We need to increase the inflight
51 * counter because core driver calls into xenvif_zerocopy_callback
52 * which calls xenvif_skb_zerocopy_complete.
54 void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
57 skb_shinfo(skb)->flags |= SKBFL_ZEROCOPY_ENABLE;
58 atomic_inc(&queue->inflight_packets);
61 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
63 atomic_dec(&queue->inflight_packets);
65 /* Wake the dealloc thread _after_ decrementing inflight_packets so
66 * that if kthread_stop() has already been called, the dealloc thread
67 * does not wait forever with nothing to wake it.
69 wake_up(&queue->dealloc_wq);
72 static int xenvif_schedulable(struct xenvif *vif)
74 return netif_running(vif->dev) &&
75 test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
79 static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue)
83 rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
85 napi_schedule(&queue->napi);
89 static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
91 struct xenvif_queue *queue = dev_id;
94 old = atomic_fetch_or(NETBK_TX_EOI, &queue->eoi_pending);
95 WARN(old & NETBK_TX_EOI, "Interrupt while EOI pending\n");
97 if (!xenvif_handle_tx_interrupt(queue)) {
98 atomic_andnot(NETBK_TX_EOI, &queue->eoi_pending);
99 xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
105 static int xenvif_poll(struct napi_struct *napi, int budget)
107 struct xenvif_queue *queue =
108 container_of(napi, struct xenvif_queue, napi);
111 /* This vif is rogue, we pretend we've there is nothing to do
112 * for this vif to deschedule it from NAPI. But this interface
113 * will be turned off in thread context later.
115 if (unlikely(queue->vif->disabled)) {
120 work_done = xenvif_tx_action(queue, budget);
122 if (work_done < budget) {
123 napi_complete_done(napi, work_done);
124 /* If the queue is rate-limited, it shall be
125 * rescheduled in the timer callback.
127 if (likely(!queue->rate_limited))
128 xenvif_napi_schedule_or_enable_events(queue);
134 static bool xenvif_handle_rx_interrupt(struct xenvif_queue *queue)
138 rc = xenvif_have_rx_work(queue, false);
140 xenvif_kick_thread(queue);
144 static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
146 struct xenvif_queue *queue = dev_id;
149 old = atomic_fetch_or(NETBK_RX_EOI, &queue->eoi_pending);
150 WARN(old & NETBK_RX_EOI, "Interrupt while EOI pending\n");
152 if (!xenvif_handle_rx_interrupt(queue)) {
153 atomic_andnot(NETBK_RX_EOI, &queue->eoi_pending);
154 xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
160 irqreturn_t xenvif_interrupt(int irq, void *dev_id)
162 struct xenvif_queue *queue = dev_id;
166 old = atomic_fetch_or(NETBK_COMMON_EOI, &queue->eoi_pending);
167 WARN(old, "Interrupt while EOI pending\n");
169 has_tx = xenvif_handle_tx_interrupt(queue);
170 has_rx = xenvif_handle_rx_interrupt(queue);
172 if (!has_rx && !has_tx) {
173 atomic_andnot(NETBK_COMMON_EOI, &queue->eoi_pending);
174 xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
180 static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
181 struct net_device *sb_dev)
183 struct xenvif *vif = netdev_priv(dev);
184 unsigned int size = vif->hash.size;
185 unsigned int num_queues;
187 /* If queues are not set up internally - always return 0
188 * as the packet going to be dropped anyway */
189 num_queues = READ_ONCE(vif->num_queues);
193 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
194 return netdev_pick_tx(dev, skb, NULL) %
195 dev->real_num_tx_queues;
197 xenvif_set_skb_hash(vif, skb);
200 return skb_get_hash_raw(skb) % dev->real_num_tx_queues;
202 return vif->hash.mapping[vif->hash.mapping_sel]
203 [skb_get_hash_raw(skb) % size];
207 xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
209 struct xenvif *vif = netdev_priv(dev);
210 struct xenvif_queue *queue = NULL;
211 unsigned int num_queues;
213 struct xenvif_rx_cb *cb;
215 BUG_ON(skb->dev != dev);
217 /* Drop the packet if queues are not set up.
218 * This handler should be called inside an RCU read section
219 * so we don't need to enter it here explicitly.
221 num_queues = READ_ONCE(vif->num_queues);
225 /* Obtain the queue to be used to transmit this packet */
226 index = skb_get_queue_mapping(skb);
227 if (index >= num_queues) {
228 pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n",
229 index, vif->dev->name);
232 queue = &vif->queues[index];
234 /* Drop the packet if queue is not ready */
235 if (queue->task == NULL ||
236 queue->dealloc_task == NULL ||
237 !xenvif_schedulable(vif))
240 if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) {
241 struct ethhdr *eth = (struct ethhdr *)skb->data;
243 if (!xenvif_mcast_match(vif, eth->h_dest))
247 cb = XENVIF_RX_CB(skb);
248 cb->expires = jiffies + vif->drain_timeout;
250 /* If there is no hash algorithm configured then make sure there
251 * is no hash information in the socket buffer otherwise it
252 * would be incorrectly forwarded to the frontend.
254 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
257 if (!xenvif_rx_queue_tail(queue, skb))
260 xenvif_kick_thread(queue);
265 vif->dev->stats.tx_dropped++;
266 dev_kfree_skb_any(skb);
270 static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
272 struct xenvif *vif = netdev_priv(dev);
273 struct xenvif_queue *queue = NULL;
274 unsigned int num_queues;
282 num_queues = READ_ONCE(vif->num_queues);
284 /* Aggregate tx and rx stats from each queue */
285 for (index = 0; index < num_queues; ++index) {
286 queue = &vif->queues[index];
287 rx_bytes += queue->stats.rx_bytes;
288 rx_packets += queue->stats.rx_packets;
289 tx_bytes += queue->stats.tx_bytes;
290 tx_packets += queue->stats.tx_packets;
295 vif->dev->stats.rx_bytes = rx_bytes;
296 vif->dev->stats.rx_packets = rx_packets;
297 vif->dev->stats.tx_bytes = tx_bytes;
298 vif->dev->stats.tx_packets = tx_packets;
300 return &vif->dev->stats;
303 static void xenvif_up(struct xenvif *vif)
305 struct xenvif_queue *queue = NULL;
306 unsigned int num_queues = vif->num_queues;
307 unsigned int queue_index;
309 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
310 queue = &vif->queues[queue_index];
311 napi_enable(&queue->napi);
312 enable_irq(queue->tx_irq);
313 if (queue->tx_irq != queue->rx_irq)
314 enable_irq(queue->rx_irq);
315 xenvif_napi_schedule_or_enable_events(queue);
319 static void xenvif_down(struct xenvif *vif)
321 struct xenvif_queue *queue = NULL;
322 unsigned int num_queues = vif->num_queues;
323 unsigned int queue_index;
325 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
326 queue = &vif->queues[queue_index];
327 disable_irq(queue->tx_irq);
328 if (queue->tx_irq != queue->rx_irq)
329 disable_irq(queue->rx_irq);
330 napi_disable(&queue->napi);
331 del_timer_sync(&queue->credit_timeout);
335 static int xenvif_open(struct net_device *dev)
337 struct xenvif *vif = netdev_priv(dev);
338 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
340 netif_tx_start_all_queues(dev);
344 static int xenvif_close(struct net_device *dev)
346 struct xenvif *vif = netdev_priv(dev);
347 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
349 netif_tx_stop_all_queues(dev);
353 static int xenvif_change_mtu(struct net_device *dev, int mtu)
355 struct xenvif *vif = netdev_priv(dev);
356 int max = vif->can_sg ? ETH_MAX_MTU - VLAN_ETH_HLEN : ETH_DATA_LEN;
364 static netdev_features_t xenvif_fix_features(struct net_device *dev,
365 netdev_features_t features)
367 struct xenvif *vif = netdev_priv(dev);
370 features &= ~NETIF_F_SG;
371 if (~(vif->gso_mask) & GSO_BIT(TCPV4))
372 features &= ~NETIF_F_TSO;
373 if (~(vif->gso_mask) & GSO_BIT(TCPV6))
374 features &= ~NETIF_F_TSO6;
376 features &= ~NETIF_F_IP_CSUM;
378 features &= ~NETIF_F_IPV6_CSUM;
383 static const struct xenvif_stat {
384 char name[ETH_GSTRING_LEN];
388 "rx_gso_checksum_fixup",
389 offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
391 /* If (sent != success + fail), there are probably packets never
396 offsetof(struct xenvif_stats, tx_zerocopy_sent),
399 "tx_zerocopy_success",
400 offsetof(struct xenvif_stats, tx_zerocopy_success),
404 offsetof(struct xenvif_stats, tx_zerocopy_fail)
406 /* Number of packets exceeding MAX_SKB_FRAG slots. You should use
407 * a guest with the same MAX_SKB_FRAG
411 offsetof(struct xenvif_stats, tx_frag_overflow)
415 static int xenvif_get_sset_count(struct net_device *dev, int string_set)
417 switch (string_set) {
419 return ARRAY_SIZE(xenvif_stats);
425 static void xenvif_get_ethtool_stats(struct net_device *dev,
426 struct ethtool_stats *stats, u64 * data)
428 struct xenvif *vif = netdev_priv(dev);
429 unsigned int num_queues;
431 unsigned int queue_index;
434 num_queues = READ_ONCE(vif->num_queues);
436 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
437 unsigned long accum = 0;
438 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
439 void *vif_stats = &vif->queues[queue_index].stats;
440 accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
448 static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
454 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
455 memcpy(data + i * ETH_GSTRING_LEN,
456 xenvif_stats[i].name, ETH_GSTRING_LEN);
461 static const struct ethtool_ops xenvif_ethtool_ops = {
462 .get_link = ethtool_op_get_link,
464 .get_sset_count = xenvif_get_sset_count,
465 .get_ethtool_stats = xenvif_get_ethtool_stats,
466 .get_strings = xenvif_get_strings,
469 static const struct net_device_ops xenvif_netdev_ops = {
470 .ndo_select_queue = xenvif_select_queue,
471 .ndo_start_xmit = xenvif_start_xmit,
472 .ndo_get_stats = xenvif_get_stats,
473 .ndo_open = xenvif_open,
474 .ndo_stop = xenvif_close,
475 .ndo_change_mtu = xenvif_change_mtu,
476 .ndo_fix_features = xenvif_fix_features,
477 .ndo_set_mac_address = eth_mac_addr,
478 .ndo_validate_addr = eth_validate_addr,
481 struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
484 static const u8 dummy_addr[ETH_ALEN] = {
485 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff,
488 struct net_device *dev;
490 char name[IFNAMSIZ] = {};
492 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
493 /* Allocate a netdev with the max. supported number of queues.
494 * When the guest selects the desired number, it will be updated
495 * via netif_set_real_num_*_queues().
497 dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN,
498 ether_setup, xenvif_max_queues);
500 pr_warn("Could not allocate netdev for %s\n", name);
501 return ERR_PTR(-ENOMEM);
504 SET_NETDEV_DEV(dev, parent);
506 vif = netdev_priv(dev);
509 vif->handle = handle;
513 vif->disabled = false;
514 vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs);
515 vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs);
517 /* Start out with no queues. */
521 vif->xdp_headroom = 0;
523 spin_lock_init(&vif->lock);
524 INIT_LIST_HEAD(&vif->fe_mcast_addr);
526 dev->netdev_ops = &xenvif_netdev_ops;
527 dev->hw_features = NETIF_F_SG |
528 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
529 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_FRAGLIST;
530 dev->features = dev->hw_features | NETIF_F_RXCSUM;
531 dev->ethtool_ops = &xenvif_ethtool_ops;
533 dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
535 dev->min_mtu = ETH_MIN_MTU;
536 dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN;
539 * Initialise a dummy MAC address. We choose the numerically
540 * largest non-broadcast address to prevent the address getting
541 * stolen by an Ethernet bridge for STP purposes.
542 * (FE:FF:FF:FF:FF:FF)
544 eth_hw_addr_set(dev, dummy_addr);
546 netif_carrier_off(dev);
548 err = register_netdev(dev);
550 netdev_warn(dev, "Could not register device: err=%d\n", err);
555 netdev_dbg(dev, "Successfully created xenvif\n");
557 __module_get(THIS_MODULE);
562 int xenvif_init_queue(struct xenvif_queue *queue)
566 queue->credit_bytes = queue->remaining_credit = ~0UL;
567 queue->credit_usec = 0UL;
568 timer_setup(&queue->credit_timeout, xenvif_tx_credit_callback, 0);
569 queue->credit_window_start = get_jiffies_64();
571 queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;
573 skb_queue_head_init(&queue->rx_queue);
574 skb_queue_head_init(&queue->tx_queue);
576 queue->pending_cons = 0;
577 queue->pending_prod = MAX_PENDING_REQS;
578 for (i = 0; i < MAX_PENDING_REQS; ++i)
579 queue->pending_ring[i] = i;
581 spin_lock_init(&queue->callback_lock);
582 spin_lock_init(&queue->response_lock);
584 /* If ballooning is disabled, this will consume real memory, so you
585 * better enable it. The long term solution would be to use just a
586 * bunch of valid page descriptors, without dependency on ballooning
588 err = gnttab_alloc_pages(MAX_PENDING_REQS,
591 netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
595 for (i = 0; i < MAX_PENDING_REQS; i++) {
596 queue->pending_tx_info[i].callback_struct = (struct ubuf_info_msgzc)
597 { { .callback = xenvif_zerocopy_callback },
600 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
606 void xenvif_carrier_on(struct xenvif *vif)
609 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
610 dev_set_mtu(vif->dev, ETH_DATA_LEN);
611 netdev_update_features(vif->dev);
612 set_bit(VIF_STATUS_CONNECTED, &vif->status);
613 if (netif_running(vif->dev))
618 int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
621 struct net_device *dev = vif->dev;
622 struct xenbus_device *xendev = xenvif_to_xenbus_device(vif);
624 struct xen_netif_ctrl_sring *shared;
625 RING_IDX rsp_prod, req_prod;
628 err = xenbus_map_ring_valloc(xendev, &ring_ref, 1, &addr);
632 shared = (struct xen_netif_ctrl_sring *)addr;
633 rsp_prod = READ_ONCE(shared->rsp_prod);
634 req_prod = READ_ONCE(shared->req_prod);
636 BACK_RING_ATTACH(&vif->ctrl, shared, rsp_prod, XEN_PAGE_SIZE);
639 if (req_prod - rsp_prod > RING_SIZE(&vif->ctrl))
642 err = bind_interdomain_evtchn_to_irq_lateeoi(xendev, evtchn);
648 xenvif_init_hash(vif);
650 err = request_threaded_irq(vif->ctrl_irq, NULL, xenvif_ctrl_irq_fn,
651 IRQF_ONESHOT, "xen-netback-ctrl", vif);
653 pr_warn("Could not setup irq handler for %s\n", dev->name);
660 xenvif_deinit_hash(vif);
661 unbind_from_irqhandler(vif->ctrl_irq, vif);
665 xenbus_unmap_ring_vfree(xendev, vif->ctrl.sring);
666 vif->ctrl.sring = NULL;
672 static void xenvif_disconnect_queue(struct xenvif_queue *queue)
675 kthread_stop(queue->task);
676 put_task_struct(queue->task);
680 if (queue->dealloc_task) {
681 kthread_stop(queue->dealloc_task);
682 queue->dealloc_task = NULL;
685 if (queue->napi.poll) {
686 netif_napi_del(&queue->napi);
687 queue->napi.poll = NULL;
691 unbind_from_irqhandler(queue->tx_irq, queue);
692 if (queue->tx_irq == queue->rx_irq)
698 unbind_from_irqhandler(queue->rx_irq, queue);
702 xenvif_unmap_frontend_data_rings(queue);
705 int xenvif_connect_data(struct xenvif_queue *queue,
706 unsigned long tx_ring_ref,
707 unsigned long rx_ring_ref,
708 unsigned int tx_evtchn,
709 unsigned int rx_evtchn)
711 struct xenbus_device *dev = xenvif_to_xenbus_device(queue->vif);
712 struct task_struct *task;
715 BUG_ON(queue->tx_irq);
717 BUG_ON(queue->dealloc_task);
719 err = xenvif_map_frontend_data_rings(queue, tx_ring_ref,
724 init_waitqueue_head(&queue->wq);
725 init_waitqueue_head(&queue->dealloc_wq);
726 atomic_set(&queue->inflight_packets, 0);
728 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll);
730 queue->stalled = true;
732 task = kthread_run(xenvif_kthread_guest_rx, queue,
733 "%s-guest-rx", queue->name);
738 * Take a reference to the task in order to prevent it from being freed
739 * if the thread function returns before kthread_stop is called.
741 get_task_struct(task);
743 task = kthread_run(xenvif_dealloc_kthread, queue,
744 "%s-dealloc", queue->name);
747 queue->dealloc_task = task;
749 if (tx_evtchn == rx_evtchn) {
750 /* feature-split-event-channels == 0 */
751 err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
752 dev, tx_evtchn, xenvif_interrupt, 0,
756 queue->tx_irq = queue->rx_irq = err;
757 disable_irq(queue->tx_irq);
759 /* feature-split-event-channels == 1 */
760 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
761 "%s-tx", queue->name);
762 err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
763 dev, tx_evtchn, xenvif_tx_interrupt, 0,
764 queue->tx_irq_name, queue);
768 disable_irq(queue->tx_irq);
770 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
771 "%s-rx", queue->name);
772 err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
773 dev, rx_evtchn, xenvif_rx_interrupt, 0,
774 queue->rx_irq_name, queue);
778 disable_irq(queue->rx_irq);
784 pr_warn("Could not allocate kthread for %s\n", queue->name);
787 xenvif_disconnect_queue(queue);
791 void xenvif_carrier_off(struct xenvif *vif)
793 struct net_device *dev = vif->dev;
796 if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) {
797 netif_carrier_off(dev); /* discard queued packets */
798 if (netif_running(dev))
804 void xenvif_disconnect_data(struct xenvif *vif)
806 struct xenvif_queue *queue = NULL;
807 unsigned int num_queues = vif->num_queues;
808 unsigned int queue_index;
810 xenvif_carrier_off(vif);
812 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
813 queue = &vif->queues[queue_index];
815 xenvif_disconnect_queue(queue);
818 xenvif_mcast_addr_list_free(vif);
821 void xenvif_disconnect_ctrl(struct xenvif *vif)
824 xenvif_deinit_hash(vif);
825 unbind_from_irqhandler(vif->ctrl_irq, vif);
829 if (vif->ctrl.sring) {
830 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
832 vif->ctrl.sring = NULL;
836 /* Reverse the relevant parts of xenvif_init_queue().
837 * Used for queue teardown from xenvif_free(), and on the
838 * error handling paths in xenbus.c:connect().
840 void xenvif_deinit_queue(struct xenvif_queue *queue)
842 gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages);
845 void xenvif_free(struct xenvif *vif)
847 struct xenvif_queue *queues = vif->queues;
848 unsigned int num_queues = vif->num_queues;
849 unsigned int queue_index;
851 unregister_netdev(vif->dev);
852 free_netdev(vif->dev);
854 for (queue_index = 0; queue_index < num_queues; ++queue_index)
855 xenvif_deinit_queue(&queues[queue_index]);
858 module_put(THIS_MODULE);