1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2009, Microsoft Corporation.
6 * Haiyang Zhang <haiyangz@microsoft.com>
7 * Hank Janssen <hjanssen@microsoft.com>
8 * K. Y. Srinivasan <kys@microsoft.com>
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/device.h>
15 #include <linux/platform_device.h>
16 #include <linux/interrupt.h>
17 #include <linux/sysctl.h>
18 #include <linux/slab.h>
19 #include <linux/acpi.h>
20 #include <linux/completion.h>
21 #include <linux/hyperv.h>
22 #include <linux/kernel_stat.h>
23 #include <linux/of_address.h>
24 #include <linux/clockchips.h>
25 #include <linux/cpu.h>
26 #include <linux/sched/isolation.h>
27 #include <linux/sched/task_stack.h>
29 #include <linux/delay.h>
30 #include <linux/panic_notifier.h>
31 #include <linux/ptrace.h>
32 #include <linux/screen_info.h>
33 #include <linux/efi.h>
34 #include <linux/random.h>
35 #include <linux/kernel.h>
36 #include <linux/syscore_ops.h>
37 #include <linux/dma-map-ops.h>
38 #include <linux/pci.h>
39 #include <clocksource/hyperv_timer.h>
40 #include <asm/mshyperv.h>
41 #include "hyperv_vmbus.h"
44 struct list_head node;
45 struct hv_vmbus_device_id id;
48 static struct device *hv_dev;
50 static int hyperv_cpuhp_online;
52 static long __percpu *vmbus_evt;
54 /* Values parsed from ACPI DSDT */
59 * The panic notifier below is responsible solely for unloading the
60 * vmbus connection, which is necessary in a panic event.
62 * Notice an intrincate relation of this notifier with Hyper-V
63 * framebuffer panic notifier exists - we need vmbus connection alive
64 * there in order to succeed, so we need to order both with each other
65 * [see hvfb_on_panic()] - this is done using notifiers' priorities.
67 static int hv_panic_vmbus_unload(struct notifier_block *nb, unsigned long val,
70 vmbus_initiate_unload(true);
73 static struct notifier_block hyperv_panic_vmbus_unload_block = {
74 .notifier_call = hv_panic_vmbus_unload,
75 .priority = INT_MIN + 1, /* almost the latest one to execute */
78 static const char *fb_mmio_name = "fb_range";
79 static struct resource *fb_mmio;
80 static struct resource *hyperv_mmio;
81 static DEFINE_MUTEX(hyperv_mmio_lock);
83 static int vmbus_exists(void)
91 static u8 channel_monitor_group(const struct vmbus_channel *channel)
93 return (u8)channel->offermsg.monitorid / 32;
96 static u8 channel_monitor_offset(const struct vmbus_channel *channel)
98 return (u8)channel->offermsg.monitorid % 32;
101 static u32 channel_pending(const struct vmbus_channel *channel,
102 const struct hv_monitor_page *monitor_page)
104 u8 monitor_group = channel_monitor_group(channel);
106 return monitor_page->trigger_group[monitor_group].pending;
109 static u32 channel_latency(const struct vmbus_channel *channel,
110 const struct hv_monitor_page *monitor_page)
112 u8 monitor_group = channel_monitor_group(channel);
113 u8 monitor_offset = channel_monitor_offset(channel);
115 return monitor_page->latency[monitor_group][monitor_offset];
118 static u32 channel_conn_id(struct vmbus_channel *channel,
119 struct hv_monitor_page *monitor_page)
121 u8 monitor_group = channel_monitor_group(channel);
122 u8 monitor_offset = channel_monitor_offset(channel);
124 return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id;
127 static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
130 struct hv_device *hv_dev = device_to_hv_device(dev);
132 if (!hv_dev->channel)
134 return sysfs_emit(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
136 static DEVICE_ATTR_RO(id);
138 static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
141 struct hv_device *hv_dev = device_to_hv_device(dev);
143 if (!hv_dev->channel)
145 return sysfs_emit(buf, "%d\n", hv_dev->channel->state);
147 static DEVICE_ATTR_RO(state);
149 static ssize_t monitor_id_show(struct device *dev,
150 struct device_attribute *dev_attr, char *buf)
152 struct hv_device *hv_dev = device_to_hv_device(dev);
154 if (!hv_dev->channel)
156 return sysfs_emit(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
158 static DEVICE_ATTR_RO(monitor_id);
160 static ssize_t class_id_show(struct device *dev,
161 struct device_attribute *dev_attr, char *buf)
163 struct hv_device *hv_dev = device_to_hv_device(dev);
165 if (!hv_dev->channel)
167 return sysfs_emit(buf, "{%pUl}\n",
168 &hv_dev->channel->offermsg.offer.if_type);
170 static DEVICE_ATTR_RO(class_id);
172 static ssize_t device_id_show(struct device *dev,
173 struct device_attribute *dev_attr, char *buf)
175 struct hv_device *hv_dev = device_to_hv_device(dev);
177 if (!hv_dev->channel)
179 return sysfs_emit(buf, "{%pUl}\n",
180 &hv_dev->channel->offermsg.offer.if_instance);
182 static DEVICE_ATTR_RO(device_id);
184 static ssize_t modalias_show(struct device *dev,
185 struct device_attribute *dev_attr, char *buf)
187 struct hv_device *hv_dev = device_to_hv_device(dev);
189 return sysfs_emit(buf, "vmbus:%*phN\n", UUID_SIZE, &hv_dev->dev_type);
191 static DEVICE_ATTR_RO(modalias);
194 static ssize_t numa_node_show(struct device *dev,
195 struct device_attribute *attr, char *buf)
197 struct hv_device *hv_dev = device_to_hv_device(dev);
199 if (!hv_dev->channel)
202 return sysfs_emit(buf, "%d\n", cpu_to_node(hv_dev->channel->target_cpu));
204 static DEVICE_ATTR_RO(numa_node);
207 static ssize_t server_monitor_pending_show(struct device *dev,
208 struct device_attribute *dev_attr,
211 struct hv_device *hv_dev = device_to_hv_device(dev);
213 if (!hv_dev->channel)
215 return sysfs_emit(buf, "%d\n", channel_pending(hv_dev->channel,
216 vmbus_connection.monitor_pages[0]));
218 static DEVICE_ATTR_RO(server_monitor_pending);
220 static ssize_t client_monitor_pending_show(struct device *dev,
221 struct device_attribute *dev_attr,
224 struct hv_device *hv_dev = device_to_hv_device(dev);
226 if (!hv_dev->channel)
228 return sysfs_emit(buf, "%d\n", channel_pending(hv_dev->channel,
229 vmbus_connection.monitor_pages[1]));
231 static DEVICE_ATTR_RO(client_monitor_pending);
233 static ssize_t server_monitor_latency_show(struct device *dev,
234 struct device_attribute *dev_attr,
237 struct hv_device *hv_dev = device_to_hv_device(dev);
239 if (!hv_dev->channel)
241 return sysfs_emit(buf, "%d\n", channel_latency(hv_dev->channel,
242 vmbus_connection.monitor_pages[0]));
244 static DEVICE_ATTR_RO(server_monitor_latency);
246 static ssize_t client_monitor_latency_show(struct device *dev,
247 struct device_attribute *dev_attr,
250 struct hv_device *hv_dev = device_to_hv_device(dev);
252 if (!hv_dev->channel)
254 return sysfs_emit(buf, "%d\n", channel_latency(hv_dev->channel,
255 vmbus_connection.monitor_pages[1]));
257 static DEVICE_ATTR_RO(client_monitor_latency);
259 static ssize_t server_monitor_conn_id_show(struct device *dev,
260 struct device_attribute *dev_attr,
263 struct hv_device *hv_dev = device_to_hv_device(dev);
265 if (!hv_dev->channel)
267 return sysfs_emit(buf, "%d\n", channel_conn_id(hv_dev->channel,
268 vmbus_connection.monitor_pages[0]));
270 static DEVICE_ATTR_RO(server_monitor_conn_id);
272 static ssize_t client_monitor_conn_id_show(struct device *dev,
273 struct device_attribute *dev_attr,
276 struct hv_device *hv_dev = device_to_hv_device(dev);
278 if (!hv_dev->channel)
280 return sysfs_emit(buf, "%d\n", channel_conn_id(hv_dev->channel,
281 vmbus_connection.monitor_pages[1]));
283 static DEVICE_ATTR_RO(client_monitor_conn_id);
285 static ssize_t out_intr_mask_show(struct device *dev,
286 struct device_attribute *dev_attr, char *buf)
288 struct hv_device *hv_dev = device_to_hv_device(dev);
289 struct hv_ring_buffer_debug_info outbound;
292 if (!hv_dev->channel)
295 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
300 return sysfs_emit(buf, "%d\n", outbound.current_interrupt_mask);
302 static DEVICE_ATTR_RO(out_intr_mask);
304 static ssize_t out_read_index_show(struct device *dev,
305 struct device_attribute *dev_attr, char *buf)
307 struct hv_device *hv_dev = device_to_hv_device(dev);
308 struct hv_ring_buffer_debug_info outbound;
311 if (!hv_dev->channel)
314 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
318 return sysfs_emit(buf, "%d\n", outbound.current_read_index);
320 static DEVICE_ATTR_RO(out_read_index);
322 static ssize_t out_write_index_show(struct device *dev,
323 struct device_attribute *dev_attr,
326 struct hv_device *hv_dev = device_to_hv_device(dev);
327 struct hv_ring_buffer_debug_info outbound;
330 if (!hv_dev->channel)
333 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
337 return sysfs_emit(buf, "%d\n", outbound.current_write_index);
339 static DEVICE_ATTR_RO(out_write_index);
341 static ssize_t out_read_bytes_avail_show(struct device *dev,
342 struct device_attribute *dev_attr,
345 struct hv_device *hv_dev = device_to_hv_device(dev);
346 struct hv_ring_buffer_debug_info outbound;
349 if (!hv_dev->channel)
352 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
356 return sysfs_emit(buf, "%d\n", outbound.bytes_avail_toread);
358 static DEVICE_ATTR_RO(out_read_bytes_avail);
360 static ssize_t out_write_bytes_avail_show(struct device *dev,
361 struct device_attribute *dev_attr,
364 struct hv_device *hv_dev = device_to_hv_device(dev);
365 struct hv_ring_buffer_debug_info outbound;
368 if (!hv_dev->channel)
371 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
375 return sysfs_emit(buf, "%d\n", outbound.bytes_avail_towrite);
377 static DEVICE_ATTR_RO(out_write_bytes_avail);
379 static ssize_t in_intr_mask_show(struct device *dev,
380 struct device_attribute *dev_attr, char *buf)
382 struct hv_device *hv_dev = device_to_hv_device(dev);
383 struct hv_ring_buffer_debug_info inbound;
386 if (!hv_dev->channel)
389 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
393 return sysfs_emit(buf, "%d\n", inbound.current_interrupt_mask);
395 static DEVICE_ATTR_RO(in_intr_mask);
397 static ssize_t in_read_index_show(struct device *dev,
398 struct device_attribute *dev_attr, char *buf)
400 struct hv_device *hv_dev = device_to_hv_device(dev);
401 struct hv_ring_buffer_debug_info inbound;
404 if (!hv_dev->channel)
407 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
411 return sysfs_emit(buf, "%d\n", inbound.current_read_index);
413 static DEVICE_ATTR_RO(in_read_index);
415 static ssize_t in_write_index_show(struct device *dev,
416 struct device_attribute *dev_attr, char *buf)
418 struct hv_device *hv_dev = device_to_hv_device(dev);
419 struct hv_ring_buffer_debug_info inbound;
422 if (!hv_dev->channel)
425 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
429 return sysfs_emit(buf, "%d\n", inbound.current_write_index);
431 static DEVICE_ATTR_RO(in_write_index);
433 static ssize_t in_read_bytes_avail_show(struct device *dev,
434 struct device_attribute *dev_attr,
437 struct hv_device *hv_dev = device_to_hv_device(dev);
438 struct hv_ring_buffer_debug_info inbound;
441 if (!hv_dev->channel)
444 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
448 return sysfs_emit(buf, "%d\n", inbound.bytes_avail_toread);
450 static DEVICE_ATTR_RO(in_read_bytes_avail);
452 static ssize_t in_write_bytes_avail_show(struct device *dev,
453 struct device_attribute *dev_attr,
456 struct hv_device *hv_dev = device_to_hv_device(dev);
457 struct hv_ring_buffer_debug_info inbound;
460 if (!hv_dev->channel)
463 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
467 return sysfs_emit(buf, "%d\n", inbound.bytes_avail_towrite);
469 static DEVICE_ATTR_RO(in_write_bytes_avail);
471 static ssize_t channel_vp_mapping_show(struct device *dev,
472 struct device_attribute *dev_attr,
475 struct hv_device *hv_dev = device_to_hv_device(dev);
476 struct vmbus_channel *channel = hv_dev->channel, *cur_sc;
478 struct list_head *cur;
483 mutex_lock(&vmbus_connection.channel_mutex);
485 n_written = sysfs_emit(buf, "%u:%u\n",
486 channel->offermsg.child_relid,
487 channel->target_cpu);
489 list_for_each(cur, &channel->sc_list) {
491 cur_sc = list_entry(cur, struct vmbus_channel, sc_list);
492 n_written += sysfs_emit_at(buf, n_written, "%u:%u\n",
493 cur_sc->offermsg.child_relid,
497 mutex_unlock(&vmbus_connection.channel_mutex);
501 static DEVICE_ATTR_RO(channel_vp_mapping);
503 static ssize_t vendor_show(struct device *dev,
504 struct device_attribute *dev_attr,
507 struct hv_device *hv_dev = device_to_hv_device(dev);
509 return sysfs_emit(buf, "0x%x\n", hv_dev->vendor_id);
511 static DEVICE_ATTR_RO(vendor);
513 static ssize_t device_show(struct device *dev,
514 struct device_attribute *dev_attr,
517 struct hv_device *hv_dev = device_to_hv_device(dev);
519 return sysfs_emit(buf, "0x%x\n", hv_dev->device_id);
521 static DEVICE_ATTR_RO(device);
523 static ssize_t driver_override_store(struct device *dev,
524 struct device_attribute *attr,
525 const char *buf, size_t count)
527 struct hv_device *hv_dev = device_to_hv_device(dev);
530 ret = driver_set_override(dev, &hv_dev->driver_override, buf, count);
537 static ssize_t driver_override_show(struct device *dev,
538 struct device_attribute *attr, char *buf)
540 struct hv_device *hv_dev = device_to_hv_device(dev);
544 len = sysfs_emit(buf, "%s\n", hv_dev->driver_override);
549 static DEVICE_ATTR_RW(driver_override);
551 /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
552 static struct attribute *vmbus_dev_attrs[] = {
554 &dev_attr_state.attr,
555 &dev_attr_monitor_id.attr,
556 &dev_attr_class_id.attr,
557 &dev_attr_device_id.attr,
558 &dev_attr_modalias.attr,
560 &dev_attr_numa_node.attr,
562 &dev_attr_server_monitor_pending.attr,
563 &dev_attr_client_monitor_pending.attr,
564 &dev_attr_server_monitor_latency.attr,
565 &dev_attr_client_monitor_latency.attr,
566 &dev_attr_server_monitor_conn_id.attr,
567 &dev_attr_client_monitor_conn_id.attr,
568 &dev_attr_out_intr_mask.attr,
569 &dev_attr_out_read_index.attr,
570 &dev_attr_out_write_index.attr,
571 &dev_attr_out_read_bytes_avail.attr,
572 &dev_attr_out_write_bytes_avail.attr,
573 &dev_attr_in_intr_mask.attr,
574 &dev_attr_in_read_index.attr,
575 &dev_attr_in_write_index.attr,
576 &dev_attr_in_read_bytes_avail.attr,
577 &dev_attr_in_write_bytes_avail.attr,
578 &dev_attr_channel_vp_mapping.attr,
579 &dev_attr_vendor.attr,
580 &dev_attr_device.attr,
581 &dev_attr_driver_override.attr,
586 * Device-level attribute_group callback function. Returns the permission for
587 * each attribute, and returns 0 if an attribute is not visible.
589 static umode_t vmbus_dev_attr_is_visible(struct kobject *kobj,
590 struct attribute *attr, int idx)
592 struct device *dev = kobj_to_dev(kobj);
593 const struct hv_device *hv_dev = device_to_hv_device(dev);
595 /* Hide the monitor attributes if the monitor mechanism is not used. */
596 if (!hv_dev->channel->offermsg.monitor_allocated &&
597 (attr == &dev_attr_monitor_id.attr ||
598 attr == &dev_attr_server_monitor_pending.attr ||
599 attr == &dev_attr_client_monitor_pending.attr ||
600 attr == &dev_attr_server_monitor_latency.attr ||
601 attr == &dev_attr_client_monitor_latency.attr ||
602 attr == &dev_attr_server_monitor_conn_id.attr ||
603 attr == &dev_attr_client_monitor_conn_id.attr))
609 static const struct attribute_group vmbus_dev_group = {
610 .attrs = vmbus_dev_attrs,
611 .is_visible = vmbus_dev_attr_is_visible
613 __ATTRIBUTE_GROUPS(vmbus_dev);
615 /* Set up the attribute for /sys/bus/vmbus/hibernation */
616 static ssize_t hibernation_show(const struct bus_type *bus, char *buf)
618 return sprintf(buf, "%d\n", !!hv_is_hibernation_supported());
621 static BUS_ATTR_RO(hibernation);
623 static struct attribute *vmbus_bus_attrs[] = {
624 &bus_attr_hibernation.attr,
627 static const struct attribute_group vmbus_bus_group = {
628 .attrs = vmbus_bus_attrs,
630 __ATTRIBUTE_GROUPS(vmbus_bus);
633 * vmbus_uevent - add uevent for our device
635 * This routine is invoked when a device is added or removed on the vmbus to
636 * generate a uevent to udev in the userspace. The udev will then look at its
637 * rule and the uevent generated here to load the appropriate driver
639 * The alias string will be of the form vmbus:guid where guid is the string
640 * representation of the device guid (each byte of the guid will be
641 * represented with two hex characters.
643 static int vmbus_uevent(const struct device *device, struct kobj_uevent_env *env)
645 const struct hv_device *dev = device_to_hv_device(device);
646 const char *format = "MODALIAS=vmbus:%*phN";
648 return add_uevent_var(env, format, UUID_SIZE, &dev->dev_type);
651 static const struct hv_vmbus_device_id *
652 hv_vmbus_dev_match(const struct hv_vmbus_device_id *id, const guid_t *guid)
655 return NULL; /* empty device table */
657 for (; !guid_is_null(&id->guid); id++)
658 if (guid_equal(&id->guid, guid))
664 static const struct hv_vmbus_device_id *
665 hv_vmbus_dynid_match(struct hv_driver *drv, const guid_t *guid)
667 const struct hv_vmbus_device_id *id = NULL;
668 struct vmbus_dynid *dynid;
670 spin_lock(&drv->dynids.lock);
671 list_for_each_entry(dynid, &drv->dynids.list, node) {
672 if (guid_equal(&dynid->id.guid, guid)) {
677 spin_unlock(&drv->dynids.lock);
682 static const struct hv_vmbus_device_id vmbus_device_null;
685 * Return a matching hv_vmbus_device_id pointer.
686 * If there is no match, return NULL.
688 static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
689 struct hv_device *dev)
691 const guid_t *guid = &dev->dev_type;
692 const struct hv_vmbus_device_id *id;
694 /* When driver_override is set, only bind to the matching driver */
695 if (dev->driver_override && strcmp(dev->driver_override, drv->name))
698 /* Look at the dynamic ids first, before the static ones */
699 id = hv_vmbus_dynid_match(drv, guid);
701 id = hv_vmbus_dev_match(drv->id_table, guid);
703 /* driver_override will always match, send a dummy id */
704 if (!id && dev->driver_override)
705 id = &vmbus_device_null;
710 /* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */
711 static int vmbus_add_dynid(struct hv_driver *drv, guid_t *guid)
713 struct vmbus_dynid *dynid;
715 dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
719 dynid->id.guid = *guid;
721 spin_lock(&drv->dynids.lock);
722 list_add_tail(&dynid->node, &drv->dynids.list);
723 spin_unlock(&drv->dynids.lock);
725 return driver_attach(&drv->driver);
728 static void vmbus_free_dynids(struct hv_driver *drv)
730 struct vmbus_dynid *dynid, *n;
732 spin_lock(&drv->dynids.lock);
733 list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
734 list_del(&dynid->node);
737 spin_unlock(&drv->dynids.lock);
741 * store_new_id - sysfs frontend to vmbus_add_dynid()
743 * Allow GUIDs to be added to an existing driver via sysfs.
745 static ssize_t new_id_store(struct device_driver *driver, const char *buf,
748 struct hv_driver *drv = drv_to_hv_drv(driver);
752 retval = guid_parse(buf, &guid);
756 if (hv_vmbus_dynid_match(drv, &guid))
759 retval = vmbus_add_dynid(drv, &guid);
764 static DRIVER_ATTR_WO(new_id);
767 * store_remove_id - remove a PCI device ID from this driver
769 * Removes a dynamic pci device ID to this driver.
771 static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
774 struct hv_driver *drv = drv_to_hv_drv(driver);
775 struct vmbus_dynid *dynid, *n;
779 retval = guid_parse(buf, &guid);
784 spin_lock(&drv->dynids.lock);
785 list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
786 struct hv_vmbus_device_id *id = &dynid->id;
788 if (guid_equal(&id->guid, &guid)) {
789 list_del(&dynid->node);
795 spin_unlock(&drv->dynids.lock);
799 static DRIVER_ATTR_WO(remove_id);
801 static struct attribute *vmbus_drv_attrs[] = {
802 &driver_attr_new_id.attr,
803 &driver_attr_remove_id.attr,
806 ATTRIBUTE_GROUPS(vmbus_drv);
810 * vmbus_match - Attempt to match the specified device to the specified driver
812 static int vmbus_match(struct device *device, struct device_driver *driver)
814 struct hv_driver *drv = drv_to_hv_drv(driver);
815 struct hv_device *hv_dev = device_to_hv_device(device);
817 /* The hv_sock driver handles all hv_sock offers. */
818 if (is_hvsock_channel(hv_dev->channel))
821 if (hv_vmbus_get_id(drv, hv_dev))
828 * vmbus_probe - Add the new vmbus's child device
830 static int vmbus_probe(struct device *child_device)
833 struct hv_driver *drv =
834 drv_to_hv_drv(child_device->driver);
835 struct hv_device *dev = device_to_hv_device(child_device);
836 const struct hv_vmbus_device_id *dev_id;
838 dev_id = hv_vmbus_get_id(drv, dev);
840 ret = drv->probe(dev, dev_id);
842 pr_err("probe failed for device %s (%d)\n",
843 dev_name(child_device), ret);
846 pr_err("probe not set for driver %s\n",
847 dev_name(child_device));
854 * vmbus_dma_configure -- Configure DMA coherence for VMbus device
856 static int vmbus_dma_configure(struct device *child_device)
859 * On ARM64, propagate the DMA coherence setting from the top level
860 * VMbus ACPI device to the child VMbus device being added here.
861 * On x86/x64 coherence is assumed and these calls have no effect.
863 hv_setup_dma_ops(child_device,
864 device_get_dma_attr(hv_dev) == DEV_DMA_COHERENT);
869 * vmbus_remove - Remove a vmbus device
871 static void vmbus_remove(struct device *child_device)
873 struct hv_driver *drv;
874 struct hv_device *dev = device_to_hv_device(child_device);
876 if (child_device->driver) {
877 drv = drv_to_hv_drv(child_device->driver);
884 * vmbus_shutdown - Shutdown a vmbus device
886 static void vmbus_shutdown(struct device *child_device)
888 struct hv_driver *drv;
889 struct hv_device *dev = device_to_hv_device(child_device);
892 /* The device may not be attached yet */
893 if (!child_device->driver)
896 drv = drv_to_hv_drv(child_device->driver);
902 #ifdef CONFIG_PM_SLEEP
904 * vmbus_suspend - Suspend a vmbus device
906 static int vmbus_suspend(struct device *child_device)
908 struct hv_driver *drv;
909 struct hv_device *dev = device_to_hv_device(child_device);
911 /* The device may not be attached yet */
912 if (!child_device->driver)
915 drv = drv_to_hv_drv(child_device->driver);
919 return drv->suspend(dev);
923 * vmbus_resume - Resume a vmbus device
925 static int vmbus_resume(struct device *child_device)
927 struct hv_driver *drv;
928 struct hv_device *dev = device_to_hv_device(child_device);
930 /* The device may not be attached yet */
931 if (!child_device->driver)
934 drv = drv_to_hv_drv(child_device->driver);
938 return drv->resume(dev);
941 #define vmbus_suspend NULL
942 #define vmbus_resume NULL
943 #endif /* CONFIG_PM_SLEEP */
946 * vmbus_device_release - Final callback release of the vmbus child device
948 static void vmbus_device_release(struct device *device)
950 struct hv_device *hv_dev = device_to_hv_device(device);
951 struct vmbus_channel *channel = hv_dev->channel;
953 hv_debug_rm_dev_dir(hv_dev);
955 mutex_lock(&vmbus_connection.channel_mutex);
956 hv_process_channel_removal(channel);
957 mutex_unlock(&vmbus_connection.channel_mutex);
962 * Note: we must use the "noirq" ops: see the comment before vmbus_bus_pm.
964 * suspend_noirq/resume_noirq are set to NULL to support Suspend-to-Idle: we
965 * shouldn't suspend the vmbus devices upon Suspend-to-Idle, otherwise there
966 * is no way to wake up a Generation-2 VM.
968 * The other 4 ops are for hibernation.
971 static const struct dev_pm_ops vmbus_pm = {
972 .suspend_noirq = NULL,
973 .resume_noirq = NULL,
974 .freeze_noirq = vmbus_suspend,
975 .thaw_noirq = vmbus_resume,
976 .poweroff_noirq = vmbus_suspend,
977 .restore_noirq = vmbus_resume,
980 /* The one and only one */
981 static const struct bus_type hv_bus = {
983 .match = vmbus_match,
984 .shutdown = vmbus_shutdown,
985 .remove = vmbus_remove,
986 .probe = vmbus_probe,
987 .uevent = vmbus_uevent,
988 .dma_configure = vmbus_dma_configure,
989 .dev_groups = vmbus_dev_groups,
990 .drv_groups = vmbus_drv_groups,
991 .bus_groups = vmbus_bus_groups,
995 struct onmessage_work_context {
996 struct work_struct work;
998 struct hv_message_header header;
1003 static void vmbus_onmessage_work(struct work_struct *work)
1005 struct onmessage_work_context *ctx;
1007 /* Do not process messages if we're in DISCONNECTED state */
1008 if (vmbus_connection.conn_state == DISCONNECTED)
1011 ctx = container_of(work, struct onmessage_work_context,
1013 vmbus_onmessage((struct vmbus_channel_message_header *)
1018 void vmbus_on_msg_dpc(unsigned long data)
1020 struct hv_per_cpu_context *hv_cpu = (void *)data;
1021 void *page_addr = hv_cpu->synic_message_page;
1022 struct hv_message msg_copy, *msg = (struct hv_message *)page_addr +
1024 struct vmbus_channel_message_header *hdr;
1025 enum vmbus_channel_message_type msgtype;
1026 const struct vmbus_channel_message_table_entry *entry;
1027 struct onmessage_work_context *ctx;
1032 * 'enum vmbus_channel_message_type' is supposed to always be 'u32' as
1033 * it is being used in 'struct vmbus_channel_message_header' definition
1034 * which is supposed to match hypervisor ABI.
1036 BUILD_BUG_ON(sizeof(enum vmbus_channel_message_type) != sizeof(u32));
1039 * Since the message is in memory shared with the host, an erroneous or
1040 * malicious Hyper-V could modify the message while vmbus_on_msg_dpc()
1041 * or individual message handlers are executing; to prevent this, copy
1042 * the message into private memory.
1044 memcpy(&msg_copy, msg, sizeof(struct hv_message));
1046 message_type = msg_copy.header.message_type;
1047 if (message_type == HVMSG_NONE)
1051 hdr = (struct vmbus_channel_message_header *)msg_copy.u.payload;
1052 msgtype = hdr->msgtype;
1054 trace_vmbus_on_msg_dpc(hdr);
1056 if (msgtype >= CHANNELMSG_COUNT) {
1057 WARN_ONCE(1, "unknown msgtype=%d\n", msgtype);
1061 payload_size = msg_copy.header.payload_size;
1062 if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT) {
1063 WARN_ONCE(1, "payload size is too large (%d)\n", payload_size);
1067 entry = &channel_message_table[msgtype];
1069 if (!entry->message_handler)
1072 if (payload_size < entry->min_payload_len) {
1073 WARN_ONCE(1, "message too short: msgtype=%d len=%d\n", msgtype, payload_size);
1077 if (entry->handler_type == VMHT_BLOCKING) {
1078 ctx = kmalloc(struct_size(ctx, msg.payload, payload_size), GFP_ATOMIC);
1082 INIT_WORK(&ctx->work, vmbus_onmessage_work);
1083 ctx->msg.header = msg_copy.header;
1084 memcpy(&ctx->msg.payload, msg_copy.u.payload, payload_size);
1087 * The host can generate a rescind message while we
1088 * may still be handling the original offer. We deal with
1089 * this condition by relying on the synchronization provided
1090 * by offer_in_progress and by channel_mutex. See also the
1091 * inline comments in vmbus_onoffer_rescind().
1094 case CHANNELMSG_RESCIND_CHANNELOFFER:
1096 * If we are handling the rescind message;
1097 * schedule the work on the global work queue.
1099 * The OFFER message and the RESCIND message should
1100 * not be handled by the same serialized work queue,
1101 * because the OFFER handler may call vmbus_open(),
1102 * which tries to open the channel by sending an
1103 * OPEN_CHANNEL message to the host and waits for
1104 * the host's response; however, if the host has
1105 * rescinded the channel before it receives the
1106 * OPEN_CHANNEL message, the host just silently
1107 * ignores the OPEN_CHANNEL message; as a result,
1108 * the guest's OFFER handler hangs for ever, if we
1109 * handle the RESCIND message in the same serialized
1110 * work queue: the RESCIND handler can not start to
1111 * run before the OFFER handler finishes.
1113 if (vmbus_connection.ignore_any_offer_msg)
1115 queue_work(vmbus_connection.rescind_work_queue, &ctx->work);
1118 case CHANNELMSG_OFFERCHANNEL:
1120 * The host sends the offer message of a given channel
1121 * before sending the rescind message of the same
1122 * channel. These messages are sent to the guest's
1123 * connect CPU; the guest then starts processing them
1124 * in the tasklet handler on this CPU:
1128 * [vmbus_on_msg_dpc()]
1129 * atomic_inc() // CHANNELMSG_OFFERCHANNEL
1132 * [vmbus_on_msg_dpc()]
1133 * schedule_work() // CHANNELMSG_RESCIND_CHANNELOFFER
1135 * We rely on the memory-ordering properties of the
1136 * queue_work() and schedule_work() primitives, which
1137 * guarantee that the atomic increment will be visible
1138 * to the CPUs which will execute the offer & rescind
1139 * works by the time these works will start execution.
1141 if (vmbus_connection.ignore_any_offer_msg)
1143 atomic_inc(&vmbus_connection.offer_in_progress);
1147 queue_work(vmbus_connection.work_queue, &ctx->work);
1150 entry->message_handler(hdr);
1153 vmbus_signal_eom(msg, message_type);
1156 #ifdef CONFIG_PM_SLEEP
1158 * Fake RESCIND_CHANNEL messages to clean up hv_sock channels by force for
1159 * hibernation, because hv_sock connections can not persist across hibernation.
1161 static void vmbus_force_channel_rescinded(struct vmbus_channel *channel)
1163 struct onmessage_work_context *ctx;
1164 struct vmbus_channel_rescind_offer *rescind;
1166 WARN_ON(!is_hvsock_channel(channel));
1169 * Allocation size is small and the allocation should really not fail,
1170 * otherwise the state of the hv_sock connections ends up in limbo.
1172 ctx = kzalloc(sizeof(*ctx) + sizeof(*rescind),
1173 GFP_KERNEL | __GFP_NOFAIL);
1176 * So far, these are not really used by Linux. Just set them to the
1177 * reasonable values conforming to the definitions of the fields.
1179 ctx->msg.header.message_type = 1;
1180 ctx->msg.header.payload_size = sizeof(*rescind);
1182 /* These values are actually used by Linux. */
1183 rescind = (struct vmbus_channel_rescind_offer *)ctx->msg.payload;
1184 rescind->header.msgtype = CHANNELMSG_RESCIND_CHANNELOFFER;
1185 rescind->child_relid = channel->offermsg.child_relid;
1187 INIT_WORK(&ctx->work, vmbus_onmessage_work);
1189 queue_work(vmbus_connection.work_queue, &ctx->work);
1191 #endif /* CONFIG_PM_SLEEP */
1194 * Schedule all channels with events pending
1196 static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
1198 unsigned long *recv_int_page;
1202 * The event page can be directly checked to get the id of
1203 * the channel that has the interrupt pending.
1205 void *page_addr = hv_cpu->synic_event_page;
1206 union hv_synic_event_flags *event
1207 = (union hv_synic_event_flags *)page_addr +
1210 maxbits = HV_EVENT_FLAGS_COUNT;
1211 recv_int_page = event->flags;
1213 if (unlikely(!recv_int_page))
1216 for_each_set_bit(relid, recv_int_page, maxbits) {
1217 void (*callback_fn)(void *context);
1218 struct vmbus_channel *channel;
1220 if (!sync_test_and_clear_bit(relid, recv_int_page))
1223 /* Special case - vmbus channel protocol msg */
1228 * Pairs with the kfree_rcu() in vmbus_chan_release().
1229 * Guarantees that the channel data structure doesn't
1230 * get freed while the channel pointer below is being
1235 /* Find channel based on relid */
1236 channel = relid2channel(relid);
1237 if (channel == NULL)
1238 goto sched_unlock_rcu;
1240 if (channel->rescind)
1241 goto sched_unlock_rcu;
1244 * Make sure that the ring buffer data structure doesn't get
1245 * freed while we dereference the ring buffer pointer. Test
1246 * for the channel's onchannel_callback being NULL within a
1247 * sched_lock critical section. See also the inline comments
1248 * in vmbus_reset_channel_cb().
1250 spin_lock(&channel->sched_lock);
1252 callback_fn = channel->onchannel_callback;
1253 if (unlikely(callback_fn == NULL))
1256 trace_vmbus_chan_sched(channel);
1258 ++channel->interrupts;
1260 switch (channel->callback_mode) {
1262 (*callback_fn)(channel->channel_callback_context);
1265 case HV_CALL_BATCHED:
1266 hv_begin_read(&channel->inbound);
1268 case HV_CALL_DIRECT:
1269 tasklet_schedule(&channel->callback_event);
1273 spin_unlock(&channel->sched_lock);
1279 static void vmbus_isr(void)
1281 struct hv_per_cpu_context *hv_cpu
1282 = this_cpu_ptr(hv_context.cpu_context);
1284 struct hv_message *msg;
1286 vmbus_chan_sched(hv_cpu);
1288 page_addr = hv_cpu->synic_message_page;
1289 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
1291 /* Check if there are actual msgs to be processed */
1292 if (msg->header.message_type != HVMSG_NONE) {
1293 if (msg->header.message_type == HVMSG_TIMER_EXPIRED) {
1295 vmbus_signal_eom(msg, HVMSG_TIMER_EXPIRED);
1297 tasklet_schedule(&hv_cpu->msg_dpc);
1300 add_interrupt_randomness(vmbus_interrupt);
1303 static irqreturn_t vmbus_percpu_isr(int irq, void *dev_id)
1310 * vmbus_bus_init -Main vmbus driver initialization routine.
1313 * - initialize the vmbus driver context
1314 * - invoke the vmbus hv main init routine
1315 * - retrieve the channel offers
1317 static int vmbus_bus_init(void)
1323 pr_err("Unable to initialize the hypervisor - 0x%x\n", ret);
1327 ret = bus_register(&hv_bus);
1332 * VMbus interrupts are best modeled as per-cpu interrupts. If
1333 * on an architecture with support for per-cpu IRQs (e.g. ARM64),
1334 * allocate a per-cpu IRQ using standard Linux kernel functionality.
1335 * If not on such an architecture (e.g., x86/x64), then rely on
1336 * code in the arch-specific portion of the code tree to connect
1337 * the VMbus interrupt handler.
1340 if (vmbus_irq == -1) {
1341 hv_setup_vmbus_handler(vmbus_isr);
1343 vmbus_evt = alloc_percpu(long);
1344 ret = request_percpu_irq(vmbus_irq, vmbus_percpu_isr,
1345 "Hyper-V VMbus", vmbus_evt);
1347 pr_err("Can't request Hyper-V VMbus IRQ %d, Err %d",
1349 free_percpu(vmbus_evt);
1354 ret = hv_synic_alloc();
1359 * Initialize the per-cpu interrupt state and stimer state.
1360 * Then connect to the host.
1362 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online",
1363 hv_synic_init, hv_synic_cleanup);
1366 hyperv_cpuhp_online = ret;
1368 ret = vmbus_connect();
1373 * Always register the vmbus unload panic notifier because we
1374 * need to shut the VMbus channel connection on panic.
1376 atomic_notifier_chain_register(&panic_notifier_list,
1377 &hyperv_panic_vmbus_unload_block);
1379 vmbus_request_offers();
1384 cpuhp_remove_state(hyperv_cpuhp_online);
1387 if (vmbus_irq == -1) {
1388 hv_remove_vmbus_handler();
1390 free_percpu_irq(vmbus_irq, vmbus_evt);
1391 free_percpu(vmbus_evt);
1394 bus_unregister(&hv_bus);
1399 * __vmbus_driver_register() - Register a vmbus's driver
1400 * @hv_driver: Pointer to driver structure you want to register
1401 * @owner: owner module of the drv
1402 * @mod_name: module name string
1404 * Registers the given driver with Linux through the 'driver_register()' call
1405 * and sets up the hyper-v vmbus handling for this driver.
1406 * It will return the state of the 'driver_register()' call.
1409 int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name)
1413 pr_info("registering driver %s\n", hv_driver->name);
1415 ret = vmbus_exists();
1419 hv_driver->driver.name = hv_driver->name;
1420 hv_driver->driver.owner = owner;
1421 hv_driver->driver.mod_name = mod_name;
1422 hv_driver->driver.bus = &hv_bus;
1424 spin_lock_init(&hv_driver->dynids.lock);
1425 INIT_LIST_HEAD(&hv_driver->dynids.list);
1427 ret = driver_register(&hv_driver->driver);
1431 EXPORT_SYMBOL_GPL(__vmbus_driver_register);
1434 * vmbus_driver_unregister() - Unregister a vmbus's driver
1435 * @hv_driver: Pointer to driver structure you want to
1438 * Un-register the given driver that was previous registered with a call to
1439 * vmbus_driver_register()
1441 void vmbus_driver_unregister(struct hv_driver *hv_driver)
1443 pr_info("unregistering driver %s\n", hv_driver->name);
1445 if (!vmbus_exists()) {
1446 driver_unregister(&hv_driver->driver);
1447 vmbus_free_dynids(hv_driver);
1450 EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
1454 * Called when last reference to channel is gone.
1456 static void vmbus_chan_release(struct kobject *kobj)
1458 struct vmbus_channel *channel
1459 = container_of(kobj, struct vmbus_channel, kobj);
1461 kfree_rcu(channel, rcu);
1464 struct vmbus_chan_attribute {
1465 struct attribute attr;
1466 ssize_t (*show)(struct vmbus_channel *chan, char *buf);
1467 ssize_t (*store)(struct vmbus_channel *chan,
1468 const char *buf, size_t count);
1470 #define VMBUS_CHAN_ATTR(_name, _mode, _show, _store) \
1471 struct vmbus_chan_attribute chan_attr_##_name \
1472 = __ATTR(_name, _mode, _show, _store)
1473 #define VMBUS_CHAN_ATTR_RW(_name) \
1474 struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RW(_name)
1475 #define VMBUS_CHAN_ATTR_RO(_name) \
1476 struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RO(_name)
1477 #define VMBUS_CHAN_ATTR_WO(_name) \
1478 struct vmbus_chan_attribute chan_attr_##_name = __ATTR_WO(_name)
1480 static ssize_t vmbus_chan_attr_show(struct kobject *kobj,
1481 struct attribute *attr, char *buf)
1483 const struct vmbus_chan_attribute *attribute
1484 = container_of(attr, struct vmbus_chan_attribute, attr);
1485 struct vmbus_channel *chan
1486 = container_of(kobj, struct vmbus_channel, kobj);
1488 if (!attribute->show)
1491 return attribute->show(chan, buf);
1494 static ssize_t vmbus_chan_attr_store(struct kobject *kobj,
1495 struct attribute *attr, const char *buf,
1498 const struct vmbus_chan_attribute *attribute
1499 = container_of(attr, struct vmbus_chan_attribute, attr);
1500 struct vmbus_channel *chan
1501 = container_of(kobj, struct vmbus_channel, kobj);
1503 if (!attribute->store)
1506 return attribute->store(chan, buf, count);
1509 static const struct sysfs_ops vmbus_chan_sysfs_ops = {
1510 .show = vmbus_chan_attr_show,
1511 .store = vmbus_chan_attr_store,
1514 static ssize_t out_mask_show(struct vmbus_channel *channel, char *buf)
1516 struct hv_ring_buffer_info *rbi = &channel->outbound;
1519 mutex_lock(&rbi->ring_buffer_mutex);
1520 if (!rbi->ring_buffer) {
1521 mutex_unlock(&rbi->ring_buffer_mutex);
1525 ret = sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
1526 mutex_unlock(&rbi->ring_buffer_mutex);
1529 static VMBUS_CHAN_ATTR_RO(out_mask);
1531 static ssize_t in_mask_show(struct vmbus_channel *channel, char *buf)
1533 struct hv_ring_buffer_info *rbi = &channel->inbound;
1536 mutex_lock(&rbi->ring_buffer_mutex);
1537 if (!rbi->ring_buffer) {
1538 mutex_unlock(&rbi->ring_buffer_mutex);
1542 ret = sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
1543 mutex_unlock(&rbi->ring_buffer_mutex);
1546 static VMBUS_CHAN_ATTR_RO(in_mask);
1548 static ssize_t read_avail_show(struct vmbus_channel *channel, char *buf)
1550 struct hv_ring_buffer_info *rbi = &channel->inbound;
1553 mutex_lock(&rbi->ring_buffer_mutex);
1554 if (!rbi->ring_buffer) {
1555 mutex_unlock(&rbi->ring_buffer_mutex);
1559 ret = sprintf(buf, "%u\n", hv_get_bytes_to_read(rbi));
1560 mutex_unlock(&rbi->ring_buffer_mutex);
1563 static VMBUS_CHAN_ATTR_RO(read_avail);
1565 static ssize_t write_avail_show(struct vmbus_channel *channel, char *buf)
1567 struct hv_ring_buffer_info *rbi = &channel->outbound;
1570 mutex_lock(&rbi->ring_buffer_mutex);
1571 if (!rbi->ring_buffer) {
1572 mutex_unlock(&rbi->ring_buffer_mutex);
1576 ret = sprintf(buf, "%u\n", hv_get_bytes_to_write(rbi));
1577 mutex_unlock(&rbi->ring_buffer_mutex);
1580 static VMBUS_CHAN_ATTR_RO(write_avail);
1582 static ssize_t target_cpu_show(struct vmbus_channel *channel, char *buf)
1584 return sprintf(buf, "%u\n", channel->target_cpu);
1586 static ssize_t target_cpu_store(struct vmbus_channel *channel,
1587 const char *buf, size_t count)
1589 u32 target_cpu, origin_cpu;
1590 ssize_t ret = count;
1592 if (vmbus_proto_version < VERSION_WIN10_V4_1)
1595 if (sscanf(buf, "%uu", &target_cpu) != 1)
1598 /* Validate target_cpu for the cpumask_test_cpu() operation below. */
1599 if (target_cpu >= nr_cpumask_bits)
1602 if (!cpumask_test_cpu(target_cpu, housekeeping_cpumask(HK_TYPE_MANAGED_IRQ)))
1605 /* No CPUs should come up or down during this. */
1608 if (!cpu_online(target_cpu)) {
1614 * Synchronizes target_cpu_store() and channel closure:
1616 * { Initially: state = CHANNEL_OPENED }
1620 * [target_cpu_store()] [vmbus_disconnect_ring()]
1622 * LOCK channel_mutex LOCK channel_mutex
1623 * LOAD r1 = state LOAD r2 = state
1624 * IF (r1 == CHANNEL_OPENED) IF (r2 == CHANNEL_OPENED)
1625 * SEND MODIFYCHANNEL STORE state = CHANNEL_OPEN
1626 * [...] SEND CLOSECHANNEL
1627 * UNLOCK channel_mutex UNLOCK channel_mutex
1629 * Forbids: r1 == r2 == CHANNEL_OPENED (i.e., CPU1's LOCK precedes
1630 * CPU2's LOCK) && CPU2's SEND precedes CPU1's SEND
1632 * Note. The host processes the channel messages "sequentially", in
1633 * the order in which they are received on a per-partition basis.
1635 mutex_lock(&vmbus_connection.channel_mutex);
1638 * Hyper-V will ignore MODIFYCHANNEL messages for "non-open" channels;
1639 * avoid sending the message and fail here for such channels.
1641 if (channel->state != CHANNEL_OPENED_STATE) {
1643 goto cpu_store_unlock;
1646 origin_cpu = channel->target_cpu;
1647 if (target_cpu == origin_cpu)
1648 goto cpu_store_unlock;
1650 if (vmbus_send_modifychannel(channel,
1651 hv_cpu_number_to_vp_number(target_cpu))) {
1653 goto cpu_store_unlock;
1657 * For version before VERSION_WIN10_V5_3, the following warning holds:
1659 * Warning. At this point, there is *no* guarantee that the host will
1660 * have successfully processed the vmbus_send_modifychannel() request.
1661 * See the header comment of vmbus_send_modifychannel() for more info.
1663 * Lags in the processing of the above vmbus_send_modifychannel() can
1664 * result in missed interrupts if the "old" target CPU is taken offline
1665 * before Hyper-V starts sending interrupts to the "new" target CPU.
1666 * But apart from this offlining scenario, the code tolerates such
1667 * lags. It will function correctly even if a channel interrupt comes
1668 * in on a CPU that is different from the channel target_cpu value.
1671 channel->target_cpu = target_cpu;
1673 /* See init_vp_index(). */
1674 if (hv_is_perf_channel(channel))
1675 hv_update_allocated_cpus(origin_cpu, target_cpu);
1677 /* Currently set only for storvsc channels. */
1678 if (channel->change_target_cpu_callback) {
1679 (*channel->change_target_cpu_callback)(channel,
1680 origin_cpu, target_cpu);
1684 mutex_unlock(&vmbus_connection.channel_mutex);
1688 static VMBUS_CHAN_ATTR(cpu, 0644, target_cpu_show, target_cpu_store);
1690 static ssize_t channel_pending_show(struct vmbus_channel *channel,
1693 return sprintf(buf, "%d\n",
1694 channel_pending(channel,
1695 vmbus_connection.monitor_pages[1]));
1697 static VMBUS_CHAN_ATTR(pending, 0444, channel_pending_show, NULL);
1699 static ssize_t channel_latency_show(struct vmbus_channel *channel,
1702 return sprintf(buf, "%d\n",
1703 channel_latency(channel,
1704 vmbus_connection.monitor_pages[1]));
1706 static VMBUS_CHAN_ATTR(latency, 0444, channel_latency_show, NULL);
1708 static ssize_t channel_interrupts_show(struct vmbus_channel *channel, char *buf)
1710 return sprintf(buf, "%llu\n", channel->interrupts);
1712 static VMBUS_CHAN_ATTR(interrupts, 0444, channel_interrupts_show, NULL);
1714 static ssize_t channel_events_show(struct vmbus_channel *channel, char *buf)
1716 return sprintf(buf, "%llu\n", channel->sig_events);
1718 static VMBUS_CHAN_ATTR(events, 0444, channel_events_show, NULL);
1720 static ssize_t channel_intr_in_full_show(struct vmbus_channel *channel,
1723 return sprintf(buf, "%llu\n",
1724 (unsigned long long)channel->intr_in_full);
1726 static VMBUS_CHAN_ATTR(intr_in_full, 0444, channel_intr_in_full_show, NULL);
1728 static ssize_t channel_intr_out_empty_show(struct vmbus_channel *channel,
1731 return sprintf(buf, "%llu\n",
1732 (unsigned long long)channel->intr_out_empty);
1734 static VMBUS_CHAN_ATTR(intr_out_empty, 0444, channel_intr_out_empty_show, NULL);
1736 static ssize_t channel_out_full_first_show(struct vmbus_channel *channel,
1739 return sprintf(buf, "%llu\n",
1740 (unsigned long long)channel->out_full_first);
1742 static VMBUS_CHAN_ATTR(out_full_first, 0444, channel_out_full_first_show, NULL);
1744 static ssize_t channel_out_full_total_show(struct vmbus_channel *channel,
1747 return sprintf(buf, "%llu\n",
1748 (unsigned long long)channel->out_full_total);
1750 static VMBUS_CHAN_ATTR(out_full_total, 0444, channel_out_full_total_show, NULL);
1752 static ssize_t subchannel_monitor_id_show(struct vmbus_channel *channel,
1755 return sprintf(buf, "%u\n", channel->offermsg.monitorid);
1757 static VMBUS_CHAN_ATTR(monitor_id, 0444, subchannel_monitor_id_show, NULL);
1759 static ssize_t subchannel_id_show(struct vmbus_channel *channel,
1762 return sprintf(buf, "%u\n",
1763 channel->offermsg.offer.sub_channel_index);
1765 static VMBUS_CHAN_ATTR_RO(subchannel_id);
1767 static struct attribute *vmbus_chan_attrs[] = {
1768 &chan_attr_out_mask.attr,
1769 &chan_attr_in_mask.attr,
1770 &chan_attr_read_avail.attr,
1771 &chan_attr_write_avail.attr,
1772 &chan_attr_cpu.attr,
1773 &chan_attr_pending.attr,
1774 &chan_attr_latency.attr,
1775 &chan_attr_interrupts.attr,
1776 &chan_attr_events.attr,
1777 &chan_attr_intr_in_full.attr,
1778 &chan_attr_intr_out_empty.attr,
1779 &chan_attr_out_full_first.attr,
1780 &chan_attr_out_full_total.attr,
1781 &chan_attr_monitor_id.attr,
1782 &chan_attr_subchannel_id.attr,
1787 * Channel-level attribute_group callback function. Returns the permission for
1788 * each attribute, and returns 0 if an attribute is not visible.
1790 static umode_t vmbus_chan_attr_is_visible(struct kobject *kobj,
1791 struct attribute *attr, int idx)
1793 const struct vmbus_channel *channel =
1794 container_of(kobj, struct vmbus_channel, kobj);
1796 /* Hide the monitor attributes if the monitor mechanism is not used. */
1797 if (!channel->offermsg.monitor_allocated &&
1798 (attr == &chan_attr_pending.attr ||
1799 attr == &chan_attr_latency.attr ||
1800 attr == &chan_attr_monitor_id.attr))
1806 static struct attribute_group vmbus_chan_group = {
1807 .attrs = vmbus_chan_attrs,
1808 .is_visible = vmbus_chan_attr_is_visible
1811 static struct kobj_type vmbus_chan_ktype = {
1812 .sysfs_ops = &vmbus_chan_sysfs_ops,
1813 .release = vmbus_chan_release,
1817 * vmbus_add_channel_kobj - setup a sub-directory under device/channels
1819 int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
1821 const struct device *device = &dev->device;
1822 struct kobject *kobj = &channel->kobj;
1823 u32 relid = channel->offermsg.child_relid;
1826 kobj->kset = dev->channels_kset;
1827 ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL,
1834 ret = sysfs_create_group(kobj, &vmbus_chan_group);
1838 * The calling functions' error handling paths will cleanup the
1839 * empty channel directory.
1842 dev_err(device, "Unable to set up channel sysfs files\n");
1846 kobject_uevent(kobj, KOBJ_ADD);
1852 * vmbus_remove_channel_attr_group - remove the channel's attribute group
1854 void vmbus_remove_channel_attr_group(struct vmbus_channel *channel)
1856 sysfs_remove_group(&channel->kobj, &vmbus_chan_group);
1860 * vmbus_device_create - Creates and registers a new child device
1863 struct hv_device *vmbus_device_create(const guid_t *type,
1864 const guid_t *instance,
1865 struct vmbus_channel *channel)
1867 struct hv_device *child_device_obj;
1869 child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL);
1870 if (!child_device_obj) {
1871 pr_err("Unable to allocate device object for child device\n");
1875 child_device_obj->channel = channel;
1876 guid_copy(&child_device_obj->dev_type, type);
1877 guid_copy(&child_device_obj->dev_instance, instance);
1878 child_device_obj->vendor_id = PCI_VENDOR_ID_MICROSOFT;
1880 return child_device_obj;
1884 * vmbus_device_register - Register the child device
1886 int vmbus_device_register(struct hv_device *child_device_obj)
1888 struct kobject *kobj = &child_device_obj->device.kobj;
1891 dev_set_name(&child_device_obj->device, "%pUl",
1892 &child_device_obj->channel->offermsg.offer.if_instance);
1894 child_device_obj->device.bus = &hv_bus;
1895 child_device_obj->device.parent = hv_dev;
1896 child_device_obj->device.release = vmbus_device_release;
1898 child_device_obj->device.dma_parms = &child_device_obj->dma_parms;
1899 child_device_obj->device.dma_mask = &child_device_obj->dma_mask;
1900 dma_set_mask(&child_device_obj->device, DMA_BIT_MASK(64));
1903 * Register with the LDM. This will kick off the driver/device
1904 * binding...which will eventually call vmbus_match() and vmbus_probe()
1906 ret = device_register(&child_device_obj->device);
1908 pr_err("Unable to register child device\n");
1909 put_device(&child_device_obj->device);
1913 child_device_obj->channels_kset = kset_create_and_add("channels",
1915 if (!child_device_obj->channels_kset) {
1917 goto err_dev_unregister;
1920 ret = vmbus_add_channel_kobj(child_device_obj,
1921 child_device_obj->channel);
1923 pr_err("Unable to register primary channeln");
1924 goto err_kset_unregister;
1926 hv_debug_add_dev_dir(child_device_obj);
1930 err_kset_unregister:
1931 kset_unregister(child_device_obj->channels_kset);
1934 device_unregister(&child_device_obj->device);
1939 * vmbus_device_unregister - Remove the specified child device
1942 void vmbus_device_unregister(struct hv_device *device_obj)
1944 pr_debug("child device %s unregistered\n",
1945 dev_name(&device_obj->device));
1947 kset_unregister(device_obj->channels_kset);
1950 * Kick off the process of unregistering the device.
1951 * This will call vmbus_remove() and eventually vmbus_device_release()
1953 device_unregister(&device_obj->device);
1958 * VMBUS is an acpi enumerated device. Get the information we
1961 static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
1963 resource_size_t start = 0;
1964 resource_size_t end = 0;
1965 struct resource *new_res;
1966 struct resource **old_res = &hyperv_mmio;
1967 struct resource **prev_res = NULL;
1970 switch (res->type) {
1973 * "Address" descriptors are for bus windows. Ignore
1974 * "memory" descriptors, which are for registers on
1977 case ACPI_RESOURCE_TYPE_ADDRESS32:
1978 start = res->data.address32.address.minimum;
1979 end = res->data.address32.address.maximum;
1982 case ACPI_RESOURCE_TYPE_ADDRESS64:
1983 start = res->data.address64.address.minimum;
1984 end = res->data.address64.address.maximum;
1988 * The IRQ information is needed only on ARM64, which Hyper-V
1989 * sets up in the extended format. IRQ information is present
1990 * on x86/x64 in the non-extended format but it is not used by
1991 * Linux. So don't bother checking for the non-extended format.
1993 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
1994 if (!acpi_dev_resource_interrupt(res, 0, &r)) {
1995 pr_err("Unable to parse Hyper-V ACPI interrupt\n");
1998 /* ARM64 INTID for VMbus */
1999 vmbus_interrupt = res->data.extended_irq.interrupts[0];
2000 /* Linux IRQ number */
2001 vmbus_irq = r.start;
2005 /* Unused resource type */
2010 * Ignore ranges that are below 1MB, as they're not
2011 * necessary or useful here.
2016 new_res = kzalloc(sizeof(*new_res), GFP_ATOMIC);
2018 return AE_NO_MEMORY;
2020 /* If this range overlaps the virtual TPM, truncate it. */
2021 if (end > VTPM_BASE_ADDRESS && start < VTPM_BASE_ADDRESS)
2022 end = VTPM_BASE_ADDRESS;
2024 new_res->name = "hyperv mmio";
2025 new_res->flags = IORESOURCE_MEM;
2026 new_res->start = start;
2030 * If two ranges are adjacent, merge them.
2038 if (((*old_res)->end + 1) == new_res->start) {
2039 (*old_res)->end = new_res->end;
2044 if ((*old_res)->start == new_res->end + 1) {
2045 (*old_res)->start = new_res->start;
2050 if ((*old_res)->start > new_res->end) {
2051 new_res->sibling = *old_res;
2053 (*prev_res)->sibling = new_res;
2059 old_res = &(*old_res)->sibling;
2067 static void vmbus_mmio_remove(void)
2069 struct resource *cur_res;
2070 struct resource *next_res;
2074 __release_region(hyperv_mmio, fb_mmio->start,
2075 resource_size(fb_mmio));
2079 for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) {
2080 next_res = cur_res->sibling;
2086 static void __maybe_unused vmbus_reserve_fb(void)
2088 resource_size_t start = 0, size;
2089 struct pci_dev *pdev;
2091 if (efi_enabled(EFI_BOOT)) {
2092 /* Gen2 VM: get FB base from EFI framebuffer */
2093 if (IS_ENABLED(CONFIG_SYSFB)) {
2094 start = screen_info.lfb_base;
2095 size = max_t(__u32, screen_info.lfb_size, 0x800000);
2098 /* Gen1 VM: get FB base from PCI */
2099 pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
2100 PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
2104 if (pdev->resource[0].flags & IORESOURCE_MEM) {
2105 start = pci_resource_start(pdev, 0);
2106 size = pci_resource_len(pdev, 0);
2110 * Release the PCI device so hyperv_drm or hyperv_fb driver can
2120 * Make a claim for the frame buffer in the resource tree under the
2121 * first node, which will be the one below 4GB. The length seems to
2122 * be underreported, particularly in a Generation 1 VM. So start out
2123 * reserving a larger area and make it smaller until it succeeds.
2125 for (; !fb_mmio && (size >= 0x100000); size >>= 1)
2126 fb_mmio = __request_region(hyperv_mmio, start, size, fb_mmio_name, 0);
2130 * vmbus_allocate_mmio() - Pick a memory-mapped I/O range.
2131 * @new: If successful, supplied a pointer to the
2132 * allocated MMIO space.
2133 * @device_obj: Identifies the caller
2134 * @min: Minimum guest physical address of the
2136 * @max: Maximum guest physical address
2137 * @size: Size of the range to be allocated
2138 * @align: Alignment of the range to be allocated
2139 * @fb_overlap_ok: Whether this allocation can be allowed
2140 * to overlap the video frame buffer.
2142 * This function walks the resources granted to VMBus by the
2143 * _CRS object in the ACPI namespace underneath the parent
2144 * "bridge" whether that's a root PCI bus in the Generation 1
2145 * case or a Module Device in the Generation 2 case. It then
2146 * attempts to allocate from the global MMIO pool in a way that
2147 * matches the constraints supplied in these parameters and by
2150 * Return: 0 on success, -errno on failure
2152 int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
2153 resource_size_t min, resource_size_t max,
2154 resource_size_t size, resource_size_t align,
2157 struct resource *iter, *shadow;
2158 resource_size_t range_min, range_max, start, end;
2159 const char *dev_n = dev_name(&device_obj->device);
2163 mutex_lock(&hyperv_mmio_lock);
2166 * If overlaps with frame buffers are allowed, then first attempt to
2167 * make the allocation from within the reserved region. Because it
2168 * is already reserved, no shadow allocation is necessary.
2170 if (fb_overlap_ok && fb_mmio && !(min > fb_mmio->end) &&
2171 !(max < fb_mmio->start)) {
2173 range_min = fb_mmio->start;
2174 range_max = fb_mmio->end;
2175 start = (range_min + align - 1) & ~(align - 1);
2176 for (; start + size - 1 <= range_max; start += align) {
2177 *new = request_mem_region_exclusive(start, size, dev_n);
2185 for (iter = hyperv_mmio; iter; iter = iter->sibling) {
2186 if ((iter->start >= max) || (iter->end <= min))
2189 range_min = iter->start;
2190 range_max = iter->end;
2191 start = (range_min + align - 1) & ~(align - 1);
2192 for (; start + size - 1 <= range_max; start += align) {
2193 end = start + size - 1;
2195 /* Skip the whole fb_mmio region if not fb_overlap_ok */
2196 if (!fb_overlap_ok && fb_mmio &&
2197 (((start >= fb_mmio->start) && (start <= fb_mmio->end)) ||
2198 ((end >= fb_mmio->start) && (end <= fb_mmio->end))))
2201 shadow = __request_region(iter, start, size, NULL,
2206 *new = request_mem_region_exclusive(start, size, dev_n);
2208 shadow->name = (char *)*new;
2213 __release_region(iter, start, size);
2218 mutex_unlock(&hyperv_mmio_lock);
2221 EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
2224 * vmbus_free_mmio() - Free a memory-mapped I/O range.
2225 * @start: Base address of region to release.
2226 * @size: Size of the range to be allocated
2228 * This function releases anything requested by
2229 * vmbus_mmio_allocate().
2231 void vmbus_free_mmio(resource_size_t start, resource_size_t size)
2233 struct resource *iter;
2235 mutex_lock(&hyperv_mmio_lock);
2236 for (iter = hyperv_mmio; iter; iter = iter->sibling) {
2237 if ((iter->start >= start + size) || (iter->end <= start))
2240 __release_region(iter, start, size);
2242 release_mem_region(start, size);
2243 mutex_unlock(&hyperv_mmio_lock);
2246 EXPORT_SYMBOL_GPL(vmbus_free_mmio);
2249 static int vmbus_acpi_add(struct platform_device *pdev)
2252 int ret_val = -ENODEV;
2253 struct acpi_device *ancestor;
2254 struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
2256 hv_dev = &device->dev;
2259 * Older versions of Hyper-V for ARM64 fail to include the _CCA
2260 * method on the top level VMbus device in the DSDT. But devices
2261 * are hardware coherent in all current Hyper-V use cases, so fix
2262 * up the ACPI device to behave as if _CCA is present and indicates
2263 * hardware coherence.
2265 ACPI_COMPANION_SET(&device->dev, device);
2266 if (IS_ENABLED(CONFIG_ACPI_CCA_REQUIRED) &&
2267 device_get_dma_attr(&device->dev) == DEV_DMA_NOT_SUPPORTED) {
2268 pr_info("No ACPI _CCA found; assuming coherent device I/O\n");
2269 device->flags.cca_seen = true;
2270 device->flags.coherent_dma = true;
2273 result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
2274 vmbus_walk_resources, NULL);
2276 if (ACPI_FAILURE(result))
2279 * Some ancestor of the vmbus acpi device (Gen1 or Gen2
2280 * firmware) is the VMOD that has the mmio ranges. Get that.
2282 for (ancestor = acpi_dev_parent(device);
2283 ancestor && ancestor->handle != ACPI_ROOT_OBJECT;
2284 ancestor = acpi_dev_parent(ancestor)) {
2285 result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS,
2286 vmbus_walk_resources, NULL);
2288 if (ACPI_FAILURE(result))
2299 vmbus_mmio_remove();
2303 static int vmbus_acpi_add(struct platform_device *pdev)
2309 static int vmbus_device_add(struct platform_device *pdev)
2311 struct resource **cur_res = &hyperv_mmio;
2312 struct of_range range;
2313 struct of_range_parser parser;
2314 struct device_node *np = pdev->dev.of_node;
2317 hv_dev = &pdev->dev;
2319 ret = of_range_parser_init(&parser, np);
2323 for_each_of_range(&parser, &range) {
2324 struct resource *res;
2326 res = kzalloc(sizeof(*res), GFP_KERNEL);
2328 vmbus_mmio_remove();
2332 res->name = "hyperv mmio";
2333 res->flags = range.flags;
2334 res->start = range.cpu_addr;
2335 res->end = range.cpu_addr + range.size;
2338 cur_res = &res->sibling;
2344 static int vmbus_platform_driver_probe(struct platform_device *pdev)
2347 return vmbus_device_add(pdev);
2349 return vmbus_acpi_add(pdev);
2352 static void vmbus_platform_driver_remove(struct platform_device *pdev)
2354 vmbus_mmio_remove();
2357 #ifdef CONFIG_PM_SLEEP
2358 static int vmbus_bus_suspend(struct device *dev)
2360 struct hv_per_cpu_context *hv_cpu = per_cpu_ptr(
2361 hv_context.cpu_context, VMBUS_CONNECT_CPU);
2362 struct vmbus_channel *channel, *sc;
2364 tasklet_disable(&hv_cpu->msg_dpc);
2365 vmbus_connection.ignore_any_offer_msg = true;
2366 /* The tasklet_enable() takes care of providing a memory barrier */
2367 tasklet_enable(&hv_cpu->msg_dpc);
2369 /* Drain all the workqueues as we are in suspend */
2370 drain_workqueue(vmbus_connection.rescind_work_queue);
2371 drain_workqueue(vmbus_connection.work_queue);
2372 drain_workqueue(vmbus_connection.handle_primary_chan_wq);
2373 drain_workqueue(vmbus_connection.handle_sub_chan_wq);
2375 mutex_lock(&vmbus_connection.channel_mutex);
2376 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
2377 if (!is_hvsock_channel(channel))
2380 vmbus_force_channel_rescinded(channel);
2382 mutex_unlock(&vmbus_connection.channel_mutex);
2385 * Wait until all the sub-channels and hv_sock channels have been
2386 * cleaned up. Sub-channels should be destroyed upon suspend, otherwise
2387 * they would conflict with the new sub-channels that will be created
2388 * in the resume path. hv_sock channels should also be destroyed, but
2389 * a hv_sock channel of an established hv_sock connection can not be
2390 * really destroyed since it may still be referenced by the userspace
2391 * application, so we just force the hv_sock channel to be rescinded
2392 * by vmbus_force_channel_rescinded(), and the userspace application
2393 * will thoroughly destroy the channel after hibernation.
2395 * Note: the counter nr_chan_close_on_suspend may never go above 0 if
2396 * the VM has no sub-channel and hv_sock channel, e.g. a 1-vCPU VM.
2398 if (atomic_read(&vmbus_connection.nr_chan_close_on_suspend) > 0)
2399 wait_for_completion(&vmbus_connection.ready_for_suspend_event);
2401 if (atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0) {
2402 pr_err("Can not suspend due to a previous failed resuming\n");
2406 mutex_lock(&vmbus_connection.channel_mutex);
2408 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
2410 * Remove the channel from the array of channels and invalidate
2411 * the channel's relid. Upon resume, vmbus_onoffer() will fix
2412 * up the relid (and other fields, if necessary) and add the
2413 * channel back to the array.
2415 vmbus_channel_unmap_relid(channel);
2416 channel->offermsg.child_relid = INVALID_RELID;
2418 if (is_hvsock_channel(channel)) {
2419 if (!channel->rescind) {
2420 pr_err("hv_sock channel not rescinded!\n");
2426 list_for_each_entry(sc, &channel->sc_list, sc_list) {
2427 pr_err("Sub-channel not deleted!\n");
2431 atomic_inc(&vmbus_connection.nr_chan_fixup_on_resume);
2434 mutex_unlock(&vmbus_connection.channel_mutex);
2436 vmbus_initiate_unload(false);
2438 /* Reset the event for the next resume. */
2439 reinit_completion(&vmbus_connection.ready_for_resume_event);
2444 static int vmbus_bus_resume(struct device *dev)
2446 struct vmbus_channel_msginfo *msginfo;
2450 vmbus_connection.ignore_any_offer_msg = false;
2453 * We only use the 'vmbus_proto_version', which was in use before
2454 * hibernation, to re-negotiate with the host.
2456 if (!vmbus_proto_version) {
2457 pr_err("Invalid proto version = 0x%x\n", vmbus_proto_version);
2461 msgsize = sizeof(*msginfo) +
2462 sizeof(struct vmbus_channel_initiate_contact);
2464 msginfo = kzalloc(msgsize, GFP_KERNEL);
2466 if (msginfo == NULL)
2469 ret = vmbus_negotiate_version(msginfo, vmbus_proto_version);
2476 WARN_ON(atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) == 0);
2478 vmbus_request_offers();
2480 if (wait_for_completion_timeout(
2481 &vmbus_connection.ready_for_resume_event, 10 * HZ) == 0)
2482 pr_err("Some vmbus device is missing after suspending?\n");
2484 /* Reset the event for the next suspend. */
2485 reinit_completion(&vmbus_connection.ready_for_suspend_event);
2490 #define vmbus_bus_suspend NULL
2491 #define vmbus_bus_resume NULL
2492 #endif /* CONFIG_PM_SLEEP */
2494 static const __maybe_unused struct of_device_id vmbus_of_match[] = {
2496 .compatible = "microsoft,vmbus",
2502 MODULE_DEVICE_TABLE(of, vmbus_of_match);
2504 static const __maybe_unused struct acpi_device_id vmbus_acpi_device_ids[] = {
2509 MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
2512 * Note: we must use the "no_irq" ops, otherwise hibernation can not work with
2513 * PCI device assignment, because "pci_dev_pm_ops" uses the "noirq" ops: in
2514 * the resume path, the pci "noirq" restore op runs before "non-noirq" op (see
2515 * resume_target_kernel() -> dpm_resume_start(), and hibernation_restore() ->
2516 * dpm_resume_end()). This means vmbus_bus_resume() and the pci-hyperv's
2517 * resume callback must also run via the "noirq" ops.
2519 * Set suspend_noirq/resume_noirq to NULL for Suspend-to-Idle: see the comment
2520 * earlier in this file before vmbus_pm.
2523 static const struct dev_pm_ops vmbus_bus_pm = {
2524 .suspend_noirq = NULL,
2525 .resume_noirq = NULL,
2526 .freeze_noirq = vmbus_bus_suspend,
2527 .thaw_noirq = vmbus_bus_resume,
2528 .poweroff_noirq = vmbus_bus_suspend,
2529 .restore_noirq = vmbus_bus_resume
2532 static struct platform_driver vmbus_platform_driver = {
2533 .probe = vmbus_platform_driver_probe,
2534 .remove_new = vmbus_platform_driver_remove,
2537 .acpi_match_table = ACPI_PTR(vmbus_acpi_device_ids),
2538 .of_match_table = of_match_ptr(vmbus_of_match),
2539 .pm = &vmbus_bus_pm,
2540 .probe_type = PROBE_FORCE_SYNCHRONOUS,
2544 static void hv_kexec_handler(void)
2546 hv_stimer_global_cleanup();
2547 vmbus_initiate_unload(false);
2548 /* Make sure conn_state is set as hv_synic_cleanup checks for it */
2550 cpuhp_remove_state(hyperv_cpuhp_online);
2553 static void hv_crash_handler(struct pt_regs *regs)
2557 vmbus_initiate_unload(true);
2559 * In crash handler we can't schedule synic cleanup for all CPUs,
2560 * doing the cleanup for current CPU only. This should be sufficient
2563 cpu = smp_processor_id();
2564 hv_stimer_cleanup(cpu);
2565 hv_synic_disable_regs(cpu);
2568 static int hv_synic_suspend(void)
2571 * When we reach here, all the non-boot CPUs have been offlined.
2572 * If we're in a legacy configuration where stimer Direct Mode is
2573 * not enabled, the stimers on the non-boot CPUs have been unbound
2574 * in hv_synic_cleanup() -> hv_stimer_legacy_cleanup() ->
2575 * hv_stimer_cleanup() -> clockevents_unbind_device().
2577 * hv_synic_suspend() only runs on CPU0 with interrupts disabled.
2578 * Here we do not call hv_stimer_legacy_cleanup() on CPU0 because:
2579 * 1) it's unnecessary as interrupts remain disabled between
2580 * syscore_suspend() and syscore_resume(): see create_image() and
2581 * resume_target_kernel()
2582 * 2) the stimer on CPU0 is automatically disabled later by
2583 * syscore_suspend() -> timekeeping_suspend() -> tick_suspend() -> ...
2584 * -> clockevents_shutdown() -> ... -> hv_ce_shutdown()
2585 * 3) a warning would be triggered if we call
2586 * clockevents_unbind_device(), which may sleep, in an
2587 * interrupts-disabled context.
2590 hv_synic_disable_regs(0);
2595 static void hv_synic_resume(void)
2597 hv_synic_enable_regs(0);
2600 * Note: we don't need to call hv_stimer_init(0), because the timer
2601 * on CPU0 is not unbound in hv_synic_suspend(), and the timer is
2602 * automatically re-enabled in timekeeping_resume().
2606 /* The callbacks run only on CPU0, with irqs_disabled. */
2607 static struct syscore_ops hv_synic_syscore_ops = {
2608 .suspend = hv_synic_suspend,
2609 .resume = hv_synic_resume,
2612 static int __init hv_acpi_init(void)
2616 if (!hv_is_hyperv_initialized())
2619 if (hv_root_partition && !hv_nested)
2623 * Get ACPI resources first.
2625 ret = platform_driver_register(&vmbus_platform_driver);
2635 * If we're on an architecture with a hardcoded hypervisor
2636 * vector (i.e. x86/x64), override the VMbus interrupt found
2637 * in the ACPI tables. Ensure vmbus_irq is not set since the
2638 * normal Linux IRQ mechanism is not used in this case.
2640 #ifdef HYPERVISOR_CALLBACK_VECTOR
2641 vmbus_interrupt = HYPERVISOR_CALLBACK_VECTOR;
2647 ret = vmbus_bus_init();
2651 hv_setup_kexec_handler(hv_kexec_handler);
2652 hv_setup_crash_handler(hv_crash_handler);
2654 register_syscore_ops(&hv_synic_syscore_ops);
2659 platform_driver_unregister(&vmbus_platform_driver);
2664 static void __exit vmbus_exit(void)
2668 unregister_syscore_ops(&hv_synic_syscore_ops);
2670 hv_remove_kexec_handler();
2671 hv_remove_crash_handler();
2672 vmbus_connection.conn_state = DISCONNECTED;
2673 hv_stimer_global_cleanup();
2675 if (vmbus_irq == -1) {
2676 hv_remove_vmbus_handler();
2678 free_percpu_irq(vmbus_irq, vmbus_evt);
2679 free_percpu(vmbus_evt);
2681 for_each_online_cpu(cpu) {
2682 struct hv_per_cpu_context *hv_cpu
2683 = per_cpu_ptr(hv_context.cpu_context, cpu);
2685 tasklet_kill(&hv_cpu->msg_dpc);
2687 hv_debug_rm_all_dir();
2689 vmbus_free_channels();
2690 kfree(vmbus_connection.channels);
2693 * The vmbus panic notifier is always registered, hence we should
2694 * also unconditionally unregister it here as well.
2696 atomic_notifier_chain_unregister(&panic_notifier_list,
2697 &hyperv_panic_vmbus_unload_block);
2699 bus_unregister(&hv_bus);
2701 cpuhp_remove_state(hyperv_cpuhp_online);
2703 platform_driver_unregister(&vmbus_platform_driver);
2707 MODULE_LICENSE("GPL");
2708 MODULE_DESCRIPTION("Microsoft Hyper-V VMBus Driver");
2710 subsys_initcall(hv_acpi_init);
2711 module_exit(vmbus_exit);