1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Virtio PCI driver - modern (virtio 1.0) device support
5 * This module allows virtio devices to be used over a virtual PCI device.
6 * This can be used with QEMU based VMMs like KVM or Xen.
8 * Copyright IBM Corp. 2007
9 * Copyright Red Hat, Inc. 2014
12 * Anthony Liguori <aliguori@us.ibm.com>
13 * Rusty Russell <rusty@rustcorp.com.au>
14 * Michael S. Tsirkin <mst@redhat.com>
17 #include <linux/delay.h>
18 #define VIRTIO_PCI_NO_LEGACY
19 #define VIRTIO_RING_NO_LEGACY
20 #include "virtio_pci_common.h"
22 #define VIRTIO_AVQ_SGS_MAX 4
24 static u64 vp_get_features(struct virtio_device *vdev)
26 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
28 return vp_modern_get_features(&vp_dev->mdev);
31 static bool vp_is_avq(struct virtio_device *vdev, unsigned int index)
33 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
35 if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
38 return index == vp_dev->admin_vq.vq_index;
41 static int virtqueue_exec_admin_cmd(struct virtio_pci_admin_vq *admin_vq,
43 struct scatterlist **sgs,
51 vq = admin_vq->info.vq;
55 if (opcode != VIRTIO_ADMIN_CMD_LIST_QUERY &&
56 opcode != VIRTIO_ADMIN_CMD_LIST_USE &&
57 !((1ULL << opcode) & admin_vq->supported_cmds))
60 ret = virtqueue_add_sgs(vq, sgs, out_num, in_num, data, GFP_KERNEL);
64 if (unlikely(!virtqueue_kick(vq)))
67 while (!virtqueue_get_buf(vq, &len) &&
68 !virtqueue_is_broken(vq))
71 if (virtqueue_is_broken(vq))
77 int vp_modern_admin_cmd_exec(struct virtio_device *vdev,
78 struct virtio_admin_cmd *cmd)
80 struct scatterlist *sgs[VIRTIO_AVQ_SGS_MAX], hdr, stat;
81 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
82 struct virtio_admin_cmd_status *va_status;
83 unsigned int out_num = 0, in_num = 0;
84 struct virtio_admin_cmd_hdr *va_hdr;
88 if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
91 va_status = kzalloc(sizeof(*va_status), GFP_KERNEL);
95 va_hdr = kzalloc(sizeof(*va_hdr), GFP_KERNEL);
101 va_hdr->opcode = cmd->opcode;
102 va_hdr->group_type = cmd->group_type;
103 va_hdr->group_member_id = cmd->group_member_id;
106 sg_init_one(&hdr, va_hdr, sizeof(*va_hdr));
111 sgs[out_num] = cmd->data_sg;
115 /* Add return status */
116 sg_init_one(&stat, va_status, sizeof(*va_status));
117 sgs[out_num + in_num] = &stat;
120 if (cmd->result_sg) {
121 sgs[out_num + in_num] = cmd->result_sg;
125 mutex_lock(&vp_dev->admin_vq.cmd_lock);
126 ret = virtqueue_exec_admin_cmd(&vp_dev->admin_vq,
127 le16_to_cpu(cmd->opcode),
128 sgs, out_num, in_num, sgs);
129 mutex_unlock(&vp_dev->admin_vq.cmd_lock);
133 "Failed to execute command on admin vq: %d\n.", ret);
137 status = le16_to_cpu(va_status->status);
138 if (status != VIRTIO_ADMIN_STATUS_OK) {
140 "admin command error: status(%#x) qualifier(%#x)\n",
141 status, le16_to_cpu(va_status->status_qualifier));
152 static void virtio_pci_admin_cmd_list_init(struct virtio_device *virtio_dev)
154 struct virtio_pci_device *vp_dev = to_vp_device(virtio_dev);
155 struct virtio_admin_cmd cmd = {};
156 struct scatterlist result_sg;
157 struct scatterlist data_sg;
161 data = kzalloc(sizeof(*data), GFP_KERNEL);
165 sg_init_one(&result_sg, data, sizeof(*data));
166 cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_LIST_QUERY);
167 cmd.group_type = cpu_to_le16(VIRTIO_ADMIN_GROUP_TYPE_SRIOV);
168 cmd.result_sg = &result_sg;
170 ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
174 *data &= cpu_to_le64(VIRTIO_ADMIN_CMD_BITMAP);
175 sg_init_one(&data_sg, data, sizeof(*data));
176 cmd.opcode = cpu_to_le16(VIRTIO_ADMIN_CMD_LIST_USE);
177 cmd.data_sg = &data_sg;
178 cmd.result_sg = NULL;
180 ret = vp_modern_admin_cmd_exec(virtio_dev, &cmd);
184 vp_dev->admin_vq.supported_cmds = le64_to_cpu(*data);
189 static void vp_modern_avq_activate(struct virtio_device *vdev)
191 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
192 struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq;
194 if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
197 __virtqueue_unbreak(admin_vq->info.vq);
198 virtio_pci_admin_cmd_list_init(vdev);
201 static void vp_modern_avq_deactivate(struct virtio_device *vdev)
203 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
204 struct virtio_pci_admin_vq *admin_vq = &vp_dev->admin_vq;
206 if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
209 __virtqueue_break(admin_vq->info.vq);
212 static void vp_transport_features(struct virtio_device *vdev, u64 features)
214 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
215 struct pci_dev *pci_dev = vp_dev->pci_dev;
217 if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) &&
218 pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV))
219 __virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
221 if (features & BIT_ULL(VIRTIO_F_RING_RESET))
222 __virtio_set_bit(vdev, VIRTIO_F_RING_RESET);
224 if (features & BIT_ULL(VIRTIO_F_ADMIN_VQ))
225 __virtio_set_bit(vdev, VIRTIO_F_ADMIN_VQ);
228 static int __vp_check_common_size_one_feature(struct virtio_device *vdev, u32 fbit,
229 u32 offset, const char *fname)
231 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
233 if (!__virtio_test_bit(vdev, fbit))
236 if (likely(vp_dev->mdev.common_len >= offset))
240 "virtio: common cfg size(%zu) does not match the feature %s\n",
241 vp_dev->mdev.common_len, fname);
246 #define vp_check_common_size_one_feature(vdev, fbit, field) \
247 __vp_check_common_size_one_feature(vdev, fbit, \
248 offsetofend(struct virtio_pci_modern_common_cfg, field), #fbit)
250 static int vp_check_common_size(struct virtio_device *vdev)
252 if (vp_check_common_size_one_feature(vdev, VIRTIO_F_NOTIF_CONFIG_DATA, queue_notify_data))
255 if (vp_check_common_size_one_feature(vdev, VIRTIO_F_RING_RESET, queue_reset))
258 if (vp_check_common_size_one_feature(vdev, VIRTIO_F_ADMIN_VQ, admin_queue_num))
264 /* virtio config->finalize_features() implementation */
265 static int vp_finalize_features(struct virtio_device *vdev)
267 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
268 u64 features = vdev->features;
270 /* Give virtio_ring a chance to accept features. */
271 vring_transport_features(vdev);
273 /* Give virtio_pci a chance to accept features. */
274 vp_transport_features(vdev, features);
276 if (!__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
277 dev_err(&vdev->dev, "virtio: device uses modern interface "
278 "but does not have VIRTIO_F_VERSION_1\n");
282 if (vp_check_common_size(vdev))
285 vp_modern_set_features(&vp_dev->mdev, vdev->features);
290 /* virtio config->get() implementation */
291 static void vp_get(struct virtio_device *vdev, unsigned int offset,
292 void *buf, unsigned int len)
294 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
295 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
296 void __iomem *device = mdev->device;
301 BUG_ON(offset + len > mdev->device_len);
305 b = ioread8(device + offset);
306 memcpy(buf, &b, sizeof b);
309 w = cpu_to_le16(ioread16(device + offset));
310 memcpy(buf, &w, sizeof w);
313 l = cpu_to_le32(ioread32(device + offset));
314 memcpy(buf, &l, sizeof l);
317 l = cpu_to_le32(ioread32(device + offset));
318 memcpy(buf, &l, sizeof l);
319 l = cpu_to_le32(ioread32(device + offset + sizeof l));
320 memcpy(buf + sizeof l, &l, sizeof l);
327 /* the config->set() implementation. it's symmetric to the config->get()
329 static void vp_set(struct virtio_device *vdev, unsigned int offset,
330 const void *buf, unsigned int len)
332 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
333 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
334 void __iomem *device = mdev->device;
339 BUG_ON(offset + len > mdev->device_len);
343 memcpy(&b, buf, sizeof b);
344 iowrite8(b, device + offset);
347 memcpy(&w, buf, sizeof w);
348 iowrite16(le16_to_cpu(w), device + offset);
351 memcpy(&l, buf, sizeof l);
352 iowrite32(le32_to_cpu(l), device + offset);
355 memcpy(&l, buf, sizeof l);
356 iowrite32(le32_to_cpu(l), device + offset);
357 memcpy(&l, buf + sizeof l, sizeof l);
358 iowrite32(le32_to_cpu(l), device + offset + sizeof l);
365 static u32 vp_generation(struct virtio_device *vdev)
367 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
369 return vp_modern_generation(&vp_dev->mdev);
372 /* config->{get,set}_status() implementations */
373 static u8 vp_get_status(struct virtio_device *vdev)
375 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
377 return vp_modern_get_status(&vp_dev->mdev);
380 static void vp_set_status(struct virtio_device *vdev, u8 status)
382 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
384 /* We should never be setting status to 0. */
386 vp_modern_set_status(&vp_dev->mdev, status);
387 if (status & VIRTIO_CONFIG_S_DRIVER_OK)
388 vp_modern_avq_activate(vdev);
391 static void vp_reset(struct virtio_device *vdev)
393 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
394 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
396 /* 0 status means a reset. */
397 vp_modern_set_status(mdev, 0);
398 /* After writing 0 to device_status, the driver MUST wait for a read of
399 * device_status to return 0 before reinitializing the device.
400 * This will flush out the status write, and flush in device writes,
401 * including MSI-X interrupts, if any.
403 while (vp_modern_get_status(mdev))
406 vp_modern_avq_deactivate(vdev);
408 /* Flush pending VQ/configuration callbacks. */
409 vp_synchronize_vectors(vdev);
412 static int vp_active_vq(struct virtqueue *vq, u16 msix_vec)
414 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
415 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
420 /* activate the queue */
421 vp_modern_set_queue_size(mdev, index, virtqueue_get_vring_size(vq));
422 vp_modern_queue_address(mdev, index, virtqueue_get_desc_addr(vq),
423 virtqueue_get_avail_addr(vq),
424 virtqueue_get_used_addr(vq));
426 if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
427 msix_vec = vp_modern_queue_vector(mdev, index, msix_vec);
428 if (msix_vec == VIRTIO_MSI_NO_VECTOR)
435 static int vp_modern_disable_vq_and_reset(struct virtqueue *vq)
437 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
438 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
439 struct virtio_pci_vq_info *info;
442 if (!virtio_has_feature(vq->vdev, VIRTIO_F_RING_RESET))
445 vp_modern_set_queue_reset(mdev, vq->index);
447 info = vp_dev->vqs[vq->index];
449 /* delete vq from irq handler */
450 spin_lock_irqsave(&vp_dev->lock, flags);
451 list_del(&info->node);
452 spin_unlock_irqrestore(&vp_dev->lock, flags);
454 INIT_LIST_HEAD(&info->node);
456 #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
457 __virtqueue_break(vq);
460 /* For the case where vq has an exclusive irq, call synchronize_irq() to
461 * wait for completion.
463 * note: We can't use disable_irq() since it conflicts with the affinity
464 * managed IRQ that is used by some drivers.
466 if (vp_dev->per_vq_vectors && info->msix_vector != VIRTIO_MSI_NO_VECTOR)
467 synchronize_irq(pci_irq_vector(vp_dev->pci_dev, info->msix_vector));
474 static int vp_modern_enable_vq_after_reset(struct virtqueue *vq)
476 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
477 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
478 struct virtio_pci_vq_info *info;
479 unsigned long flags, index;
486 info = vp_dev->vqs[index];
488 if (vp_modern_get_queue_reset(mdev, index))
491 if (vp_modern_get_queue_enable(mdev, index))
494 err = vp_active_vq(vq, info->msix_vector);
499 spin_lock_irqsave(&vp_dev->lock, flags);
500 list_add(&info->node, &vp_dev->virtqueues);
501 spin_unlock_irqrestore(&vp_dev->lock, flags);
503 INIT_LIST_HEAD(&info->node);
506 #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION
507 __virtqueue_unbreak(vq);
510 vp_modern_set_queue_enable(&vp_dev->mdev, index, true);
516 static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
518 return vp_modern_config_vector(&vp_dev->mdev, vector);
521 static bool vp_notify_with_data(struct virtqueue *vq)
523 u32 data = vring_notification_data(vq);
525 iowrite32(data, (void __iomem *)vq->priv);
530 static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
531 struct virtio_pci_vq_info *info,
533 void (*callback)(struct virtqueue *vq),
539 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
540 bool (*notify)(struct virtqueue *vq);
541 struct virtqueue *vq;
546 if (__virtio_test_bit(&vp_dev->vdev, VIRTIO_F_NOTIFICATION_DATA))
547 notify = vp_notify_with_data;
551 is_avq = vp_is_avq(&vp_dev->vdev, index);
552 if (index >= vp_modern_get_num_queues(mdev) && !is_avq)
553 return ERR_PTR(-EINVAL);
556 VIRTIO_AVQ_SGS_MAX : vp_modern_get_queue_size(mdev, index);
557 /* Check if queue is either not available or already active. */
558 if (!num || vp_modern_get_queue_enable(mdev, index))
559 return ERR_PTR(-ENOENT);
561 info->msix_vector = msix_vec;
563 /* create the vring */
564 vq = vring_create_virtqueue(index, num,
565 SMP_CACHE_BYTES, &vp_dev->vdev,
567 notify, callback, name);
569 return ERR_PTR(-ENOMEM);
573 err = vp_active_vq(vq, msix_vec);
577 vq->priv = (void __force *)vp_modern_map_vq_notify(mdev, index, NULL);
584 mutex_lock(&vp_dev->admin_vq.cmd_lock);
585 vp_dev->admin_vq.info.vq = vq;
586 mutex_unlock(&vp_dev->admin_vq.cmd_lock);
592 vring_del_virtqueue(vq);
596 static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
597 struct virtqueue *vqs[],
598 vq_callback_t *callbacks[],
599 const char * const names[], const bool *ctx,
600 struct irq_affinity *desc)
602 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
603 struct virtqueue *vq;
604 int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names, ctx, desc);
609 /* Select and activate all queues. Has to be done last: once we do
610 * this, there's no way to go back except reset.
612 list_for_each_entry(vq, &vdev->vqs, list)
613 vp_modern_set_queue_enable(&vp_dev->mdev, vq->index, true);
618 static void del_vq(struct virtio_pci_vq_info *info)
620 struct virtqueue *vq = info->vq;
621 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
622 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
624 if (vp_is_avq(&vp_dev->vdev, vq->index)) {
625 mutex_lock(&vp_dev->admin_vq.cmd_lock);
626 vp_dev->admin_vq.info.vq = NULL;
627 mutex_unlock(&vp_dev->admin_vq.cmd_lock);
630 if (vp_dev->msix_enabled)
631 vp_modern_queue_vector(mdev, vq->index,
632 VIRTIO_MSI_NO_VECTOR);
634 if (!mdev->notify_base)
635 pci_iounmap(mdev->pci_dev, (void __force __iomem *)vq->priv);
637 vring_del_virtqueue(vq);
640 static int virtio_pci_find_shm_cap(struct pci_dev *dev, u8 required_id,
641 u8 *bar, u64 *offset, u64 *len)
645 for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR); pos > 0;
646 pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
647 u8 type, cap_len, id, res_bar;
649 u64 res_offset, res_length;
651 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
653 if (type != VIRTIO_PCI_CAP_SHARED_MEMORY_CFG)
656 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
658 if (cap_len != sizeof(struct virtio_pci_cap64)) {
659 dev_err(&dev->dev, "%s: shm cap with bad size offset:"
660 " %d size: %d\n", __func__, pos, cap_len);
664 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
666 if (id != required_id)
669 pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
671 if (res_bar >= PCI_STD_NUM_BARS)
674 /* Type and ID match, and the BAR value isn't reserved.
678 /* Read the lower 32bit of length and offset */
679 pci_read_config_dword(dev, pos + offsetof(struct virtio_pci_cap,
682 pci_read_config_dword(dev, pos + offsetof(struct virtio_pci_cap,
686 /* and now the top half */
687 pci_read_config_dword(dev,
688 pos + offsetof(struct virtio_pci_cap64,
690 res_offset |= ((u64)tmp32) << 32;
691 pci_read_config_dword(dev,
692 pos + offsetof(struct virtio_pci_cap64,
694 res_length |= ((u64)tmp32) << 32;
697 *offset = res_offset;
705 static bool vp_get_shm_region(struct virtio_device *vdev,
706 struct virtio_shm_region *region, u8 id)
708 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
709 struct pci_dev *pci_dev = vp_dev->pci_dev;
712 phys_addr_t phys_addr;
715 if (!virtio_pci_find_shm_cap(pci_dev, id, &bar, &offset, &len))
718 phys_addr = pci_resource_start(pci_dev, bar);
719 bar_len = pci_resource_len(pci_dev, bar);
721 if ((offset + len) < offset) {
722 dev_err(&pci_dev->dev, "%s: cap offset+len overflow detected\n",
727 if (offset + len > bar_len) {
728 dev_err(&pci_dev->dev, "%s: bar shorter than cap offset+len\n",
734 region->addr = (u64) phys_addr + offset;
739 static int vp_modern_create_avq(struct virtio_device *vdev)
741 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
742 struct virtio_pci_admin_vq *avq;
743 struct virtqueue *vq;
746 if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
749 admin_q_num = vp_modern_avq_num(&vp_dev->mdev);
753 avq = &vp_dev->admin_vq;
754 avq->vq_index = vp_modern_avq_index(&vp_dev->mdev);
755 sprintf(avq->name, "avq.%u", avq->vq_index);
756 vq = vp_dev->setup_vq(vp_dev, &vp_dev->admin_vq.info, avq->vq_index, NULL,
757 avq->name, NULL, VIRTIO_MSI_NO_VECTOR);
759 dev_err(&vdev->dev, "failed to setup admin virtqueue, err=%ld",
764 vp_modern_set_queue_enable(&vp_dev->mdev, avq->info.vq->index, true);
768 static void vp_modern_destroy_avq(struct virtio_device *vdev)
770 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
772 if (!virtio_has_feature(vdev, VIRTIO_F_ADMIN_VQ))
775 vp_dev->del_vq(&vp_dev->admin_vq.info);
778 static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
781 .generation = vp_generation,
782 .get_status = vp_get_status,
783 .set_status = vp_set_status,
785 .find_vqs = vp_modern_find_vqs,
786 .del_vqs = vp_del_vqs,
787 .synchronize_cbs = vp_synchronize_vectors,
788 .get_features = vp_get_features,
789 .finalize_features = vp_finalize_features,
790 .bus_name = vp_bus_name,
791 .set_vq_affinity = vp_set_vq_affinity,
792 .get_vq_affinity = vp_get_vq_affinity,
793 .get_shm_region = vp_get_shm_region,
794 .disable_vq_and_reset = vp_modern_disable_vq_and_reset,
795 .enable_vq_after_reset = vp_modern_enable_vq_after_reset,
796 .create_avq = vp_modern_create_avq,
797 .destroy_avq = vp_modern_destroy_avq,
800 static const struct virtio_config_ops virtio_pci_config_ops = {
803 .generation = vp_generation,
804 .get_status = vp_get_status,
805 .set_status = vp_set_status,
807 .find_vqs = vp_modern_find_vqs,
808 .del_vqs = vp_del_vqs,
809 .synchronize_cbs = vp_synchronize_vectors,
810 .get_features = vp_get_features,
811 .finalize_features = vp_finalize_features,
812 .bus_name = vp_bus_name,
813 .set_vq_affinity = vp_set_vq_affinity,
814 .get_vq_affinity = vp_get_vq_affinity,
815 .get_shm_region = vp_get_shm_region,
816 .disable_vq_and_reset = vp_modern_disable_vq_and_reset,
817 .enable_vq_after_reset = vp_modern_enable_vq_after_reset,
818 .create_avq = vp_modern_create_avq,
819 .destroy_avq = vp_modern_destroy_avq,
822 /* the PCI probing function */
823 int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
825 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
826 struct pci_dev *pci_dev = vp_dev->pci_dev;
829 mdev->pci_dev = pci_dev;
831 err = vp_modern_probe(mdev);
836 vp_dev->vdev.config = &virtio_pci_config_ops;
838 vp_dev->vdev.config = &virtio_pci_config_nodev_ops;
840 vp_dev->config_vector = vp_config_vector;
841 vp_dev->setup_vq = setup_vq;
842 vp_dev->del_vq = del_vq;
843 vp_dev->is_avq = vp_is_avq;
844 vp_dev->isr = mdev->isr;
845 vp_dev->vdev.id = mdev->id;
847 mutex_init(&vp_dev->admin_vq.cmd_lock);
851 void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev)
853 struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
855 mutex_destroy(&vp_dev->admin_vq.cmd_lock);
856 vp_modern_remove(mdev);