1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Virtio memory mapped device driver
5 * Copyright 2011-2014, ARM Ltd.
7 * This module allows virtio devices to be used over a virtual, memory mapped
10 * The guest device(s) may be instantiated in one of three equivalent ways:
12 * 1. Static platform device in board's code, eg.:
14 * static struct platform_device v2m_virtio_device = {
15 * .name = "virtio-mmio",
18 * .resource = (struct resource []) {
20 * .start = 0x1001e000,
22 * .flags = IORESOURCE_MEM,
26 * .flags = IORESOURCE_IRQ,
31 * 2. Device Tree node, eg.:
33 * virtio_block@1e000 {
34 * compatible = "virtio,mmio";
35 * reg = <0x1e000 0x100>;
39 * 3. Kernel module (or command line) parameter. Can be used more than once -
40 * one device will be created for each one. Syntax:
42 * [virtio_mmio.]device=<size>@<baseaddr>:<irq>[:<id>]
44 * <size> := size (can use standard suffixes like K, M or G)
45 * <baseaddr> := physical base address
46 * <irq> := interrupt number (as passed to request_irq())
47 * <id> := (optional) platform device id
49 * virtio_mmio.device=0x100@0x100b0000:48 \
50 * virtio_mmio.device=1K@0x1001e000:74
52 * Based on Virtio PCI driver by Anthony Liguori, copyright IBM Corp. 2007
55 #define pr_fmt(fmt) "virtio-mmio: " fmt
57 #include <linux/acpi.h>
58 #include <linux/dma-mapping.h>
59 #include <linux/highmem.h>
60 #include <linux/interrupt.h>
62 #include <linux/list.h>
63 #include <linux/module.h>
65 #include <linux/platform_device.h>
67 #include <linux/slab.h>
68 #include <linux/spinlock.h>
69 #include <linux/virtio.h>
70 #include <linux/virtio_config.h>
71 #include <uapi/linux/virtio_mmio.h>
72 #include <linux/virtio_ring.h>
76 /* The alignment to use between consumer and producer parts of vring.
77 * Currently hardcoded to the page size. */
78 #define VIRTIO_MMIO_VRING_ALIGN PAGE_SIZE
82 #define to_virtio_mmio_device(_plat_dev) \
83 container_of(_plat_dev, struct virtio_mmio_device, vdev)
85 struct virtio_mmio_device {
86 struct virtio_device vdev;
87 struct platform_device *pdev;
90 unsigned long version;
92 /* a list of queues so we can dispatch IRQs */
94 struct list_head virtqueues;
97 struct virtio_mmio_vq_info {
98 /* the actual virtqueue */
101 /* the list node for the virtqueues list */
102 struct list_head node;
107 /* Configuration interface */
109 static u64 vm_get_features(struct virtio_device *vdev)
111 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
114 writel(1, vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL);
115 features = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES);
118 writel(0, vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL);
119 features |= readl(vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES);
124 static int vm_finalize_features(struct virtio_device *vdev)
126 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
128 /* Give virtio_ring a chance to accept features. */
129 vring_transport_features(vdev);
131 /* Make sure there are no mixed devices */
132 if (vm_dev->version == 2 &&
133 !__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
134 dev_err(&vdev->dev, "New virtio-mmio devices (version 2) must provide VIRTIO_F_VERSION_1 feature!\n");
138 writel(1, vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL);
139 writel((u32)(vdev->features >> 32),
140 vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES);
142 writel(0, vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL);
143 writel((u32)vdev->features,
144 vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES);
149 static void vm_get(struct virtio_device *vdev, unsigned int offset,
150 void *buf, unsigned int len)
152 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
153 void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG;
158 if (vm_dev->version == 1) {
162 for (i = 0; i < len; i++)
163 ptr[i] = readb(base + offset + i);
169 b = readb(base + offset);
170 memcpy(buf, &b, sizeof b);
173 w = cpu_to_le16(readw(base + offset));
174 memcpy(buf, &w, sizeof w);
177 l = cpu_to_le32(readl(base + offset));
178 memcpy(buf, &l, sizeof l);
181 l = cpu_to_le32(readl(base + offset));
182 memcpy(buf, &l, sizeof l);
183 l = cpu_to_le32(ioread32(base + offset + sizeof l));
184 memcpy(buf + sizeof l, &l, sizeof l);
191 static void vm_set(struct virtio_device *vdev, unsigned int offset,
192 const void *buf, unsigned int len)
194 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
195 void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG;
200 if (vm_dev->version == 1) {
204 for (i = 0; i < len; i++)
205 writeb(ptr[i], base + offset + i);
212 memcpy(&b, buf, sizeof b);
213 writeb(b, base + offset);
216 memcpy(&w, buf, sizeof w);
217 writew(le16_to_cpu(w), base + offset);
220 memcpy(&l, buf, sizeof l);
221 writel(le32_to_cpu(l), base + offset);
224 memcpy(&l, buf, sizeof l);
225 writel(le32_to_cpu(l), base + offset);
226 memcpy(&l, buf + sizeof l, sizeof l);
227 writel(le32_to_cpu(l), base + offset + sizeof l);
234 static u32 vm_generation(struct virtio_device *vdev)
236 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
238 if (vm_dev->version == 1)
241 return readl(vm_dev->base + VIRTIO_MMIO_CONFIG_GENERATION);
244 static u8 vm_get_status(struct virtio_device *vdev)
246 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
248 return readl(vm_dev->base + VIRTIO_MMIO_STATUS) & 0xff;
251 static void vm_set_status(struct virtio_device *vdev, u8 status)
253 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
255 /* We should never be setting status to 0. */
259 * Per memory-barriers.txt, wmb() is not needed to guarantee
260 * that the cache coherent memory writes have completed
261 * before writing to the MMIO region.
263 writel(status, vm_dev->base + VIRTIO_MMIO_STATUS);
266 static void vm_reset(struct virtio_device *vdev)
268 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
270 /* 0 status means a reset. */
271 writel(0, vm_dev->base + VIRTIO_MMIO_STATUS);
276 /* Transport interface */
278 /* the notify function used when creating a virt queue */
279 static bool vm_notify(struct virtqueue *vq)
281 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
283 /* We write the queue's selector into the notification register to
284 * signal the other end */
285 writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY);
289 /* Notify all virtqueues on an interrupt. */
290 static irqreturn_t vm_interrupt(int irq, void *opaque)
292 struct virtio_mmio_device *vm_dev = opaque;
293 struct virtio_mmio_vq_info *info;
294 unsigned long status;
296 irqreturn_t ret = IRQ_NONE;
298 /* Read and acknowledge interrupts */
299 status = readl(vm_dev->base + VIRTIO_MMIO_INTERRUPT_STATUS);
300 writel(status, vm_dev->base + VIRTIO_MMIO_INTERRUPT_ACK);
302 if (unlikely(status & VIRTIO_MMIO_INT_CONFIG)) {
303 virtio_config_changed(&vm_dev->vdev);
307 if (likely(status & VIRTIO_MMIO_INT_VRING)) {
308 spin_lock_irqsave(&vm_dev->lock, flags);
309 list_for_each_entry(info, &vm_dev->virtqueues, node)
310 ret |= vring_interrupt(irq, info->vq);
311 spin_unlock_irqrestore(&vm_dev->lock, flags);
319 static void vm_del_vq(struct virtqueue *vq)
321 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
322 struct virtio_mmio_vq_info *info = vq->priv;
324 unsigned int index = vq->index;
326 spin_lock_irqsave(&vm_dev->lock, flags);
327 list_del(&info->node);
328 spin_unlock_irqrestore(&vm_dev->lock, flags);
330 /* Select and deactivate the queue */
331 writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
332 if (vm_dev->version == 1) {
333 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
335 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
336 WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY));
339 vring_del_virtqueue(vq);
344 static void vm_del_vqs(struct virtio_device *vdev)
346 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
347 struct virtqueue *vq, *n;
349 list_for_each_entry_safe(vq, n, &vdev->vqs, list)
352 free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev);
355 static void vm_synchronize_cbs(struct virtio_device *vdev)
357 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
359 synchronize_irq(platform_get_irq(vm_dev->pdev, 0));
362 static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int index,
363 void (*callback)(struct virtqueue *vq),
364 const char *name, bool ctx)
366 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
367 struct virtio_mmio_vq_info *info;
368 struct virtqueue *vq;
376 /* Select the queue we're interested in */
377 writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
379 /* Queue shouldn't already be set up. */
380 if (readl(vm_dev->base + (vm_dev->version == 1 ?
381 VIRTIO_MMIO_QUEUE_PFN : VIRTIO_MMIO_QUEUE_READY))) {
383 goto error_available;
386 /* Allocate and fill out our active queue description */
387 info = kmalloc(sizeof(*info), GFP_KERNEL);
393 num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX);
396 goto error_new_virtqueue;
399 /* Create the vring */
400 vq = vring_create_virtqueue(index, num, VIRTIO_MMIO_VRING_ALIGN, vdev,
401 true, true, ctx, vm_notify, callback, name);
404 goto error_new_virtqueue;
409 /* Activate the queue */
410 writel(virtqueue_get_vring_size(vq), vm_dev->base + VIRTIO_MMIO_QUEUE_NUM);
411 if (vm_dev->version == 1) {
412 u64 q_pfn = virtqueue_get_desc_addr(vq) >> PAGE_SHIFT;
415 * virtio-mmio v1 uses a 32bit QUEUE PFN. If we have something
416 * that doesn't fit in 32bit, fail the setup rather than
417 * pretending to be successful.
421 "platform bug: legacy virtio-mmio must not be used with RAM above 0x%llxGB\n",
422 0x1ULL << (32 + PAGE_SHIFT - 30));
427 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN);
428 writel(q_pfn, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
432 addr = virtqueue_get_desc_addr(vq);
433 writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_LOW);
434 writel((u32)(addr >> 32),
435 vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_HIGH);
437 addr = virtqueue_get_avail_addr(vq);
438 writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_LOW);
439 writel((u32)(addr >> 32),
440 vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_HIGH);
442 addr = virtqueue_get_used_addr(vq);
443 writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_USED_LOW);
444 writel((u32)(addr >> 32),
445 vm_dev->base + VIRTIO_MMIO_QUEUE_USED_HIGH);
447 writel(1, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
453 spin_lock_irqsave(&vm_dev->lock, flags);
454 list_add(&info->node, &vm_dev->virtqueues);
455 spin_unlock_irqrestore(&vm_dev->lock, flags);
460 vring_del_virtqueue(vq);
462 if (vm_dev->version == 1) {
463 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
465 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
466 WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY));
474 static int vm_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
475 struct virtqueue *vqs[],
476 vq_callback_t *callbacks[],
477 const char * const names[],
479 struct irq_affinity *desc)
481 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
482 int irq = platform_get_irq(vm_dev->pdev, 0);
483 int i, err, queue_idx = 0;
488 err = request_irq(irq, vm_interrupt, IRQF_SHARED,
489 dev_name(&vdev->dev), vm_dev);
493 if (of_property_read_bool(vm_dev->pdev->dev.of_node, "wakeup-source"))
494 enable_irq_wake(irq);
496 for (i = 0; i < nvqs; ++i) {
502 vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
503 ctx ? ctx[i] : false);
504 if (IS_ERR(vqs[i])) {
506 return PTR_ERR(vqs[i]);
513 static const char *vm_bus_name(struct virtio_device *vdev)
515 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
517 return vm_dev->pdev->name;
520 static bool vm_get_shm_region(struct virtio_device *vdev,
521 struct virtio_shm_region *region, u8 id)
523 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
526 /* Select the region we're interested in */
527 writel(id, vm_dev->base + VIRTIO_MMIO_SHM_SEL);
529 /* Read the region size */
530 len = (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_LEN_LOW);
531 len |= (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_LEN_HIGH) << 32;
535 /* Check if region length is -1. If that's the case, the shared memory
536 * region does not exist and there is no need to proceed further.
541 /* Read the region base address */
542 addr = (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_BASE_LOW);
543 addr |= (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_BASE_HIGH) << 32;
550 static const struct virtio_config_ops virtio_mmio_config_ops = {
553 .generation = vm_generation,
554 .get_status = vm_get_status,
555 .set_status = vm_set_status,
557 .find_vqs = vm_find_vqs,
558 .del_vqs = vm_del_vqs,
559 .get_features = vm_get_features,
560 .finalize_features = vm_finalize_features,
561 .bus_name = vm_bus_name,
562 .get_shm_region = vm_get_shm_region,
563 .synchronize_cbs = vm_synchronize_cbs,
566 #ifdef CONFIG_PM_SLEEP
567 static int virtio_mmio_freeze(struct device *dev)
569 struct virtio_mmio_device *vm_dev = dev_get_drvdata(dev);
571 return virtio_device_freeze(&vm_dev->vdev);
574 static int virtio_mmio_restore(struct device *dev)
576 struct virtio_mmio_device *vm_dev = dev_get_drvdata(dev);
578 if (vm_dev->version == 1)
579 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
581 return virtio_device_restore(&vm_dev->vdev);
584 static const struct dev_pm_ops virtio_mmio_pm_ops = {
585 SET_SYSTEM_SLEEP_PM_OPS(virtio_mmio_freeze, virtio_mmio_restore)
589 static void virtio_mmio_release_dev(struct device *_d)
591 struct virtio_device *vdev =
592 container_of(_d, struct virtio_device, dev);
593 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
594 struct platform_device *pdev = vm_dev->pdev;
596 devm_kfree(&pdev->dev, vm_dev);
599 /* Platform device */
601 static int virtio_mmio_probe(struct platform_device *pdev)
603 struct virtio_mmio_device *vm_dev;
607 vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL);
611 vm_dev->vdev.dev.parent = &pdev->dev;
612 vm_dev->vdev.dev.release = virtio_mmio_release_dev;
613 vm_dev->vdev.config = &virtio_mmio_config_ops;
615 INIT_LIST_HEAD(&vm_dev->virtqueues);
616 spin_lock_init(&vm_dev->lock);
618 vm_dev->base = devm_platform_ioremap_resource(pdev, 0);
619 if (IS_ERR(vm_dev->base))
620 return PTR_ERR(vm_dev->base);
622 /* Check magic value */
623 magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);
624 if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
625 dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic);
629 /* Check device version */
630 vm_dev->version = readl(vm_dev->base + VIRTIO_MMIO_VERSION);
631 if (vm_dev->version < 1 || vm_dev->version > 2) {
632 dev_err(&pdev->dev, "Version %ld not supported!\n",
637 vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID);
638 if (vm_dev->vdev.id.device == 0) {
640 * virtio-mmio device with an ID 0 is a (dummy) placeholder
641 * with no function. End probing now with no error reported.
645 vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
647 if (vm_dev->version == 1) {
648 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
650 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
652 * In the legacy case, ensure our coherently-allocated virtio
653 * ring will be at an address expressable as a 32-bit PFN.
656 dma_set_coherent_mask(&pdev->dev,
657 DMA_BIT_MASK(32 + PAGE_SHIFT));
659 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
662 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
664 dev_warn(&pdev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
666 platform_set_drvdata(pdev, vm_dev);
668 rc = register_virtio_device(&vm_dev->vdev);
670 put_device(&vm_dev->vdev.dev);
675 static int virtio_mmio_remove(struct platform_device *pdev)
677 struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev);
678 unregister_virtio_device(&vm_dev->vdev);
685 /* Devices list parameter */
687 #if defined(CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES)
689 static struct device vm_cmdline_parent = {
690 .init_name = "virtio-mmio-cmdline",
693 static int vm_cmdline_parent_registered;
694 static int vm_cmdline_id;
696 static int vm_cmdline_set(const char *device,
697 const struct kernel_param *kp)
700 struct resource resources[2] = {};
702 long long base, size;
704 int processed, consumed = 0;
705 struct platform_device *pdev;
707 /* Consume "size" part of the command line parameter */
708 size = memparse(device, &str);
710 /* Get "@<base>:<irq>[:<id>]" chunks */
711 processed = sscanf(str, "@%lli:%u%n:%d%n",
712 &base, &irq, &consumed,
713 &vm_cmdline_id, &consumed);
716 * sscanf() must process at least 2 chunks; also there
717 * must be no extra characters after the last chunk, so
718 * str[consumed] must be '\0'
720 if (processed < 2 || str[consumed] || irq == 0)
723 resources[0].flags = IORESOURCE_MEM;
724 resources[0].start = base;
725 resources[0].end = base + size - 1;
727 resources[1].flags = IORESOURCE_IRQ;
728 resources[1].start = resources[1].end = irq;
730 if (!vm_cmdline_parent_registered) {
731 err = device_register(&vm_cmdline_parent);
733 put_device(&vm_cmdline_parent);
734 pr_err("Failed to register parent device!\n");
737 vm_cmdline_parent_registered = 1;
740 pr_info("Registering device virtio-mmio.%d at 0x%llx-0x%llx, IRQ %d.\n",
742 (unsigned long long)resources[0].start,
743 (unsigned long long)resources[0].end,
744 (int)resources[1].start);
746 pdev = platform_device_register_resndata(&vm_cmdline_parent,
747 "virtio-mmio", vm_cmdline_id++,
748 resources, ARRAY_SIZE(resources), NULL, 0);
750 return PTR_ERR_OR_ZERO(pdev);
753 static int vm_cmdline_get_device(struct device *dev, void *data)
756 unsigned int len = strlen(buffer);
757 struct platform_device *pdev = to_platform_device(dev);
759 snprintf(buffer + len, PAGE_SIZE - len, "0x%llx@0x%llx:%llu:%d\n",
760 pdev->resource[0].end - pdev->resource[0].start + 1ULL,
761 (unsigned long long)pdev->resource[0].start,
762 (unsigned long long)pdev->resource[1].start,
767 static int vm_cmdline_get(char *buffer, const struct kernel_param *kp)
770 device_for_each_child(&vm_cmdline_parent, buffer,
771 vm_cmdline_get_device);
772 return strlen(buffer) + 1;
775 static const struct kernel_param_ops vm_cmdline_param_ops = {
776 .set = vm_cmdline_set,
777 .get = vm_cmdline_get,
780 device_param_cb(device, &vm_cmdline_param_ops, NULL, S_IRUSR);
782 static int vm_unregister_cmdline_device(struct device *dev,
785 platform_device_unregister(to_platform_device(dev));
790 static void vm_unregister_cmdline_devices(void)
792 if (vm_cmdline_parent_registered) {
793 device_for_each_child(&vm_cmdline_parent, NULL,
794 vm_unregister_cmdline_device);
795 device_unregister(&vm_cmdline_parent);
796 vm_cmdline_parent_registered = 0;
802 static void vm_unregister_cmdline_devices(void)
808 /* Platform driver */
810 static const struct of_device_id virtio_mmio_match[] = {
811 { .compatible = "virtio,mmio", },
814 MODULE_DEVICE_TABLE(of, virtio_mmio_match);
817 static const struct acpi_device_id virtio_mmio_acpi_match[] = {
821 MODULE_DEVICE_TABLE(acpi, virtio_mmio_acpi_match);
824 static struct platform_driver virtio_mmio_driver = {
825 .probe = virtio_mmio_probe,
826 .remove = virtio_mmio_remove,
828 .name = "virtio-mmio",
829 .of_match_table = virtio_mmio_match,
830 .acpi_match_table = ACPI_PTR(virtio_mmio_acpi_match),
831 #ifdef CONFIG_PM_SLEEP
832 .pm = &virtio_mmio_pm_ops,
837 static int __init virtio_mmio_init(void)
839 return platform_driver_register(&virtio_mmio_driver);
842 static void __exit virtio_mmio_exit(void)
844 platform_driver_unregister(&virtio_mmio_driver);
845 vm_unregister_cmdline_devices();
848 module_init(virtio_mmio_init);
849 module_exit(virtio_mmio_exit);
851 MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>");
852 MODULE_DESCRIPTION("Platform bus driver for memory mapped virtio devices");
853 MODULE_LICENSE("GPL");