1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Virtio PCI driver - common functionality for all device versions
5 * This module allows virtio devices to be used over a virtual PCI device.
6 * This can be used with QEMU based VMMs like KVM or Xen.
8 * Copyright IBM Corp. 2007
9 * Copyright Red Hat, Inc. 2014
12 * Anthony Liguori <aliguori@us.ibm.com>
13 * Rusty Russell <rusty@rustcorp.com.au>
14 * Michael S. Tsirkin <mst@redhat.com>
17 #include "virtio_pci_common.h"
19 static bool force_legacy = false;
21 #if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY)
22 module_param(force_legacy, bool, 0444);
23 MODULE_PARM_DESC(force_legacy,
24 "Force legacy mode for transitional virtio 1 devices");
27 /* wait for pending irq handlers */
28 void vp_synchronize_vectors(struct virtio_device *vdev)
30 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
33 if (vp_dev->intx_enabled)
34 synchronize_irq(vp_dev->pci_dev->irq);
36 for (i = 0; i < vp_dev->msix_vectors; ++i)
37 synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
40 /* the notify function used when creating a virt queue */
41 bool vp_notify(struct virtqueue *vq)
43 /* we write the queue's selector into the notification register to
44 * signal the other end */
45 iowrite16(vq->index, (void __iomem *)vq->priv);
49 /* Handle a configuration change: Tell driver if it wants to know. */
50 static irqreturn_t vp_config_changed(int irq, void *opaque)
52 struct virtio_pci_device *vp_dev = opaque;
54 virtio_config_changed(&vp_dev->vdev);
58 /* Notify all virtqueues on an interrupt. */
59 static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
61 struct virtio_pci_device *vp_dev = opaque;
62 struct virtio_pci_vq_info *info;
63 irqreturn_t ret = IRQ_NONE;
66 spin_lock_irqsave(&vp_dev->lock, flags);
67 list_for_each_entry(info, &vp_dev->virtqueues, node) {
68 if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
71 spin_unlock_irqrestore(&vp_dev->lock, flags);
76 /* A small wrapper to also acknowledge the interrupt when it's handled.
77 * I really need an EIO hook for the vring so I can ack the interrupt once we
78 * know that we'll be handling the IRQ but before we invoke the callback since
79 * the callback may notify the host which results in the host attempting to
80 * raise an interrupt that we would then mask once we acknowledged the
82 static irqreturn_t vp_interrupt(int irq, void *opaque)
84 struct virtio_pci_device *vp_dev = opaque;
87 /* reading the ISR has the effect of also clearing it so it's very
88 * important to save off the value. */
89 isr = ioread8(vp_dev->isr);
91 /* It's definitely not us if the ISR was not high */
95 /* Configuration change? Tell driver if it wants to know. */
96 if (isr & VIRTIO_PCI_ISR_CONFIG)
97 vp_config_changed(irq, opaque);
99 return vp_vring_interrupt(irq, opaque);
102 static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
103 bool per_vq_vectors, struct irq_affinity *desc)
105 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
106 const char *name = dev_name(&vp_dev->vdev.dev);
107 unsigned int flags = PCI_IRQ_MSIX;
111 vp_dev->msix_vectors = nvectors;
113 vp_dev->msix_names = kmalloc_array(nvectors,
114 sizeof(*vp_dev->msix_names),
116 if (!vp_dev->msix_names)
118 vp_dev->msix_affinity_masks
119 = kcalloc(nvectors, sizeof(*vp_dev->msix_affinity_masks),
121 if (!vp_dev->msix_affinity_masks)
123 for (i = 0; i < nvectors; ++i)
124 if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
129 flags |= PCI_IRQ_AFFINITY;
130 desc->pre_vectors++; /* virtio config vector */
133 err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
134 nvectors, flags, desc);
137 vp_dev->msix_enabled = 1;
139 /* Set the vector used for configuration */
140 v = vp_dev->msix_used_vectors;
141 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
143 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
144 vp_config_changed, 0, vp_dev->msix_names[v],
148 ++vp_dev->msix_used_vectors;
150 v = vp_dev->config_vector(vp_dev, v);
151 /* Verify we had enough resources to assign the vector */
152 if (v == VIRTIO_MSI_NO_VECTOR) {
157 if (!per_vq_vectors) {
158 /* Shared vector for all VQs */
159 v = vp_dev->msix_used_vectors;
160 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
161 "%s-virtqueues", name);
162 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
163 vp_vring_interrupt, 0, vp_dev->msix_names[v],
167 ++vp_dev->msix_used_vectors;
174 static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned int index,
175 void (*callback)(struct virtqueue *vq),
181 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
182 struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
183 struct virtqueue *vq;
186 /* fill out our structure that represents an active queue */
188 return ERR_PTR(-ENOMEM);
190 vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, size, ctx,
197 spin_lock_irqsave(&vp_dev->lock, flags);
198 list_add(&info->node, &vp_dev->virtqueues);
199 spin_unlock_irqrestore(&vp_dev->lock, flags);
201 INIT_LIST_HEAD(&info->node);
204 vp_dev->vqs[index] = info;
212 static void vp_del_vq(struct virtqueue *vq)
214 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
215 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
219 * If it fails during re-enable reset vq. This way we won't rejoin
220 * info->node to the queue. Prevent unexpected irqs.
223 spin_lock_irqsave(&vp_dev->lock, flags);
224 list_del(&info->node);
225 spin_unlock_irqrestore(&vp_dev->lock, flags);
228 vp_dev->del_vq(info);
232 /* the config->del_vqs() implementation */
233 void vp_del_vqs(struct virtio_device *vdev)
235 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
236 struct virtqueue *vq, *n;
239 list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
240 if (vp_dev->per_vq_vectors) {
241 int v = vp_dev->vqs[vq->index]->msix_vector;
243 if (v != VIRTIO_MSI_NO_VECTOR) {
244 int irq = pci_irq_vector(vp_dev->pci_dev, v);
246 irq_set_affinity_hint(irq, NULL);
252 vp_dev->per_vq_vectors = false;
254 if (vp_dev->intx_enabled) {
255 free_irq(vp_dev->pci_dev->irq, vp_dev);
256 vp_dev->intx_enabled = 0;
259 for (i = 0; i < vp_dev->msix_used_vectors; ++i)
260 free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
262 if (vp_dev->msix_affinity_masks) {
263 for (i = 0; i < vp_dev->msix_vectors; i++)
264 free_cpumask_var(vp_dev->msix_affinity_masks[i]);
267 if (vp_dev->msix_enabled) {
268 /* Disable the vector used for configuration */
269 vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
271 pci_free_irq_vectors(vp_dev->pci_dev);
272 vp_dev->msix_enabled = 0;
275 vp_dev->msix_vectors = 0;
276 vp_dev->msix_used_vectors = 0;
277 kfree(vp_dev->msix_names);
278 vp_dev->msix_names = NULL;
279 kfree(vp_dev->msix_affinity_masks);
280 vp_dev->msix_affinity_masks = NULL;
285 static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
286 struct virtqueue *vqs[], vq_callback_t *callbacks[],
287 const char * const names[], u32 sizes[], bool per_vq_vectors,
289 struct irq_affinity *desc)
291 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
293 int i, err, nvectors, allocated_vectors, queue_idx = 0;
295 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
299 if (per_vq_vectors) {
300 /* Best option: one for change interrupt, one per vq. */
302 for (i = 0; i < nvqs; ++i)
303 if (names[i] && callbacks[i])
306 /* Second best: one for change, shared for all vqs. */
310 err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors,
311 per_vq_vectors ? desc : NULL);
315 vp_dev->per_vq_vectors = per_vq_vectors;
316 allocated_vectors = vp_dev->msix_used_vectors;
317 for (i = 0; i < nvqs; ++i) {
324 msix_vec = VIRTIO_MSI_NO_VECTOR;
325 else if (vp_dev->per_vq_vectors)
326 msix_vec = allocated_vectors++;
328 msix_vec = VP_MSIX_VQ_VECTOR;
329 vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
330 sizes ? sizes[i] : 0,
331 ctx ? ctx[i] : false, msix_vec);
332 if (IS_ERR(vqs[i])) {
333 err = PTR_ERR(vqs[i]);
337 if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
340 /* allocate per-vq irq if available and necessary */
341 snprintf(vp_dev->msix_names[msix_vec],
342 sizeof *vp_dev->msix_names,
344 dev_name(&vp_dev->vdev.dev), names[i]);
345 err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
347 vp_dev->msix_names[msix_vec],
359 static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs,
360 struct virtqueue *vqs[], vq_callback_t *callbacks[],
361 const char * const names[], u32 sizes[], const bool *ctx)
363 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
364 int i, err, queue_idx = 0;
366 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
370 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
371 dev_name(&vdev->dev), vp_dev);
375 vp_dev->intx_enabled = 1;
376 vp_dev->per_vq_vectors = false;
377 for (i = 0; i < nvqs; ++i) {
382 vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
383 sizes ? sizes[i] : 0,
384 ctx ? ctx[i] : false,
385 VIRTIO_MSI_NO_VECTOR);
386 if (IS_ERR(vqs[i])) {
387 err = PTR_ERR(vqs[i]);
398 /* the config->find_vqs() implementation */
399 int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
400 struct virtqueue *vqs[], vq_callback_t *callbacks[],
401 const char * const names[], u32 sizes[], const bool *ctx,
402 struct irq_affinity *desc)
406 /* Try MSI-X with one vector per queue. */
407 err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, sizes, true, ctx, desc);
410 /* Fallback: MSI-X with one vector for config, one shared for queues. */
411 err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, sizes, false, ctx, desc);
414 /* Finally fall back to regular interrupts. */
415 return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names, sizes, ctx);
418 const char *vp_bus_name(struct virtio_device *vdev)
420 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
422 return pci_name(vp_dev->pci_dev);
425 /* Setup the affinity for a virtqueue:
426 * - force the affinity for per vq vector
427 * - OR over all affinities for shared MSI
428 * - ignore the affinity request if we're using INTX
430 int vp_set_vq_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
432 struct virtio_device *vdev = vq->vdev;
433 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
434 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
435 struct cpumask *mask;
441 if (vp_dev->msix_enabled) {
442 mask = vp_dev->msix_affinity_masks[info->msix_vector];
443 irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
445 irq_set_affinity_hint(irq, NULL);
447 cpumask_copy(mask, cpu_mask);
448 irq_set_affinity_hint(irq, mask);
454 const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index)
456 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
458 if (!vp_dev->per_vq_vectors ||
459 vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR)
462 return pci_irq_get_affinity(vp_dev->pci_dev,
463 vp_dev->vqs[index]->msix_vector);
466 #ifdef CONFIG_PM_SLEEP
467 static int virtio_pci_freeze(struct device *dev)
469 struct pci_dev *pci_dev = to_pci_dev(dev);
470 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
473 ret = virtio_device_freeze(&vp_dev->vdev);
476 pci_disable_device(pci_dev);
480 static int virtio_pci_restore(struct device *dev)
482 struct pci_dev *pci_dev = to_pci_dev(dev);
483 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
486 ret = pci_enable_device(pci_dev);
490 pci_set_master(pci_dev);
491 return virtio_device_restore(&vp_dev->vdev);
494 static const struct dev_pm_ops virtio_pci_pm_ops = {
495 SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore)
500 /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
501 static const struct pci_device_id virtio_pci_id_table[] = {
502 { PCI_DEVICE(PCI_VENDOR_ID_REDHAT_QUMRANET, PCI_ANY_ID) },
506 MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
508 static void virtio_pci_release_dev(struct device *_d)
510 struct virtio_device *vdev = dev_to_virtio(_d);
511 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
513 /* As struct device is a kobject, it's not safe to
514 * free the memory (including the reference counter itself)
515 * until it's release callback. */
519 static int virtio_pci_probe(struct pci_dev *pci_dev,
520 const struct pci_device_id *id)
522 struct virtio_pci_device *vp_dev, *reg_dev = NULL;
525 /* allocate our structure and fill it out */
526 vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL);
530 pci_set_drvdata(pci_dev, vp_dev);
531 vp_dev->vdev.dev.parent = &pci_dev->dev;
532 vp_dev->vdev.dev.release = virtio_pci_release_dev;
533 vp_dev->pci_dev = pci_dev;
534 INIT_LIST_HEAD(&vp_dev->virtqueues);
535 spin_lock_init(&vp_dev->lock);
537 /* enable the device */
538 rc = pci_enable_device(pci_dev);
540 goto err_enable_device;
543 rc = virtio_pci_legacy_probe(vp_dev);
544 /* Also try modern mode if we can't map BAR0 (no IO space). */
545 if (rc == -ENODEV || rc == -ENOMEM)
546 rc = virtio_pci_modern_probe(vp_dev);
550 rc = virtio_pci_modern_probe(vp_dev);
552 rc = virtio_pci_legacy_probe(vp_dev);
557 pci_set_master(pci_dev);
559 vp_dev->is_legacy = vp_dev->ldev.ioaddr ? true : false;
561 rc = register_virtio_device(&vp_dev->vdev);
569 if (vp_dev->is_legacy)
570 virtio_pci_legacy_remove(vp_dev);
572 virtio_pci_modern_remove(vp_dev);
574 pci_disable_device(pci_dev);
577 put_device(&vp_dev->vdev.dev);
583 static void virtio_pci_remove(struct pci_dev *pci_dev)
585 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
586 struct device *dev = get_device(&vp_dev->vdev.dev);
589 * Device is marked broken on surprise removal so that virtio upper
590 * layers can abort any ongoing operation.
592 if (!pci_device_is_present(pci_dev))
593 virtio_break_device(&vp_dev->vdev);
595 pci_disable_sriov(pci_dev);
597 unregister_virtio_device(&vp_dev->vdev);
599 if (vp_dev->is_legacy)
600 virtio_pci_legacy_remove(vp_dev);
602 virtio_pci_modern_remove(vp_dev);
604 pci_disable_device(pci_dev);
608 static int virtio_pci_sriov_configure(struct pci_dev *pci_dev, int num_vfs)
610 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
611 struct virtio_device *vdev = &vp_dev->vdev;
614 if (!(vdev->config->get_status(vdev) & VIRTIO_CONFIG_S_DRIVER_OK))
617 if (!__virtio_test_bit(vdev, VIRTIO_F_SR_IOV))
620 if (pci_vfs_assigned(pci_dev))
624 pci_disable_sriov(pci_dev);
628 ret = pci_enable_sriov(pci_dev, num_vfs);
635 static struct pci_driver virtio_pci_driver = {
636 .name = "virtio-pci",
637 .id_table = virtio_pci_id_table,
638 .probe = virtio_pci_probe,
639 .remove = virtio_pci_remove,
640 #ifdef CONFIG_PM_SLEEP
641 .driver.pm = &virtio_pci_pm_ops,
643 .sriov_configure = virtio_pci_sriov_configure,
646 module_pci_driver(virtio_pci_driver);
648 MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>");
649 MODULE_DESCRIPTION("virtio-pci");
650 MODULE_LICENSE("GPL");