1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2009, Microsoft Corporation.
6 * Haiyang Zhang <haiyangz@microsoft.com>
7 * Hank Janssen <hjanssen@microsoft.com>
8 * K. Y. Srinivasan <kys@microsoft.com>
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/device.h>
15 #include <linux/interrupt.h>
16 #include <linux/sysctl.h>
17 #include <linux/slab.h>
18 #include <linux/acpi.h>
19 #include <linux/completion.h>
20 #include <linux/hyperv.h>
21 #include <linux/kernel_stat.h>
22 #include <linux/clockchips.h>
23 #include <linux/cpu.h>
24 #include <linux/sched/task_stack.h>
26 #include <linux/delay.h>
27 #include <linux/notifier.h>
28 #include <linux/ptrace.h>
29 #include <linux/screen_info.h>
30 #include <linux/kdebug.h>
31 #include <linux/efi.h>
32 #include <linux/random.h>
33 #include <linux/kernel.h>
34 #include <linux/syscore_ops.h>
35 #include <clocksource/hyperv_timer.h>
36 #include "hyperv_vmbus.h"
39 struct list_head node;
40 struct hv_vmbus_device_id id;
43 static struct acpi_device *hv_acpi_dev;
45 static struct completion probe_event;
47 static int hyperv_cpuhp_online;
49 static void *hv_panic_page;
51 /* Values parsed from ACPI DSDT */
56 * Boolean to control whether to report panic messages over Hyper-V.
58 * It can be set via /proc/sys/kernel/hyperv_record_panic_msg
60 static int sysctl_record_panic_msg = 1;
62 static int hyperv_report_reg(void)
64 return !sysctl_record_panic_msg || !hv_panic_page;
67 static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
72 vmbus_initiate_unload(true);
75 * Hyper-V should be notified only once about a panic. If we will be
76 * doing hyperv_report_panic_msg() later with kmsg data, don't do
77 * the notification here.
79 if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE
80 && hyperv_report_reg()) {
81 regs = current_pt_regs();
82 hyperv_report_panic(regs, val, false);
87 static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
90 struct die_args *die = args;
91 struct pt_regs *regs = die->regs;
93 /* Don't notify Hyper-V if the die event is other than oops */
98 * Hyper-V should be notified only once about a panic. If we will be
99 * doing hyperv_report_panic_msg() later with kmsg data, don't do
100 * the notification here.
102 if (hyperv_report_reg())
103 hyperv_report_panic(regs, val, true);
107 static struct notifier_block hyperv_die_block = {
108 .notifier_call = hyperv_die_event,
110 static struct notifier_block hyperv_panic_block = {
111 .notifier_call = hyperv_panic_event,
114 static const char *fb_mmio_name = "fb_range";
115 static struct resource *fb_mmio;
116 static struct resource *hyperv_mmio;
117 static DEFINE_MUTEX(hyperv_mmio_lock);
119 static int vmbus_exists(void)
121 if (hv_acpi_dev == NULL)
127 static u8 channel_monitor_group(const struct vmbus_channel *channel)
129 return (u8)channel->offermsg.monitorid / 32;
132 static u8 channel_monitor_offset(const struct vmbus_channel *channel)
134 return (u8)channel->offermsg.monitorid % 32;
137 static u32 channel_pending(const struct vmbus_channel *channel,
138 const struct hv_monitor_page *monitor_page)
140 u8 monitor_group = channel_monitor_group(channel);
142 return monitor_page->trigger_group[monitor_group].pending;
145 static u32 channel_latency(const struct vmbus_channel *channel,
146 const struct hv_monitor_page *monitor_page)
148 u8 monitor_group = channel_monitor_group(channel);
149 u8 monitor_offset = channel_monitor_offset(channel);
151 return monitor_page->latency[monitor_group][monitor_offset];
154 static u32 channel_conn_id(struct vmbus_channel *channel,
155 struct hv_monitor_page *monitor_page)
157 u8 monitor_group = channel_monitor_group(channel);
158 u8 monitor_offset = channel_monitor_offset(channel);
160 return monitor_page->parameter[monitor_group][monitor_offset].connectionid.u.id;
163 static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
166 struct hv_device *hv_dev = device_to_hv_device(dev);
168 if (!hv_dev->channel)
170 return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
172 static DEVICE_ATTR_RO(id);
174 static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
177 struct hv_device *hv_dev = device_to_hv_device(dev);
179 if (!hv_dev->channel)
181 return sprintf(buf, "%d\n", hv_dev->channel->state);
183 static DEVICE_ATTR_RO(state);
185 static ssize_t monitor_id_show(struct device *dev,
186 struct device_attribute *dev_attr, char *buf)
188 struct hv_device *hv_dev = device_to_hv_device(dev);
190 if (!hv_dev->channel)
192 return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
194 static DEVICE_ATTR_RO(monitor_id);
196 static ssize_t class_id_show(struct device *dev,
197 struct device_attribute *dev_attr, char *buf)
199 struct hv_device *hv_dev = device_to_hv_device(dev);
201 if (!hv_dev->channel)
203 return sprintf(buf, "{%pUl}\n",
204 &hv_dev->channel->offermsg.offer.if_type);
206 static DEVICE_ATTR_RO(class_id);
208 static ssize_t device_id_show(struct device *dev,
209 struct device_attribute *dev_attr, char *buf)
211 struct hv_device *hv_dev = device_to_hv_device(dev);
213 if (!hv_dev->channel)
215 return sprintf(buf, "{%pUl}\n",
216 &hv_dev->channel->offermsg.offer.if_instance);
218 static DEVICE_ATTR_RO(device_id);
220 static ssize_t modalias_show(struct device *dev,
221 struct device_attribute *dev_attr, char *buf)
223 struct hv_device *hv_dev = device_to_hv_device(dev);
225 return sprintf(buf, "vmbus:%*phN\n", UUID_SIZE, &hv_dev->dev_type);
227 static DEVICE_ATTR_RO(modalias);
230 static ssize_t numa_node_show(struct device *dev,
231 struct device_attribute *attr, char *buf)
233 struct hv_device *hv_dev = device_to_hv_device(dev);
235 if (!hv_dev->channel)
238 return sprintf(buf, "%d\n", cpu_to_node(hv_dev->channel->target_cpu));
240 static DEVICE_ATTR_RO(numa_node);
243 static ssize_t server_monitor_pending_show(struct device *dev,
244 struct device_attribute *dev_attr,
247 struct hv_device *hv_dev = device_to_hv_device(dev);
249 if (!hv_dev->channel)
251 return sprintf(buf, "%d\n",
252 channel_pending(hv_dev->channel,
253 vmbus_connection.monitor_pages[0]));
255 static DEVICE_ATTR_RO(server_monitor_pending);
257 static ssize_t client_monitor_pending_show(struct device *dev,
258 struct device_attribute *dev_attr,
261 struct hv_device *hv_dev = device_to_hv_device(dev);
263 if (!hv_dev->channel)
265 return sprintf(buf, "%d\n",
266 channel_pending(hv_dev->channel,
267 vmbus_connection.monitor_pages[1]));
269 static DEVICE_ATTR_RO(client_monitor_pending);
271 static ssize_t server_monitor_latency_show(struct device *dev,
272 struct device_attribute *dev_attr,
275 struct hv_device *hv_dev = device_to_hv_device(dev);
277 if (!hv_dev->channel)
279 return sprintf(buf, "%d\n",
280 channel_latency(hv_dev->channel,
281 vmbus_connection.monitor_pages[0]));
283 static DEVICE_ATTR_RO(server_monitor_latency);
285 static ssize_t client_monitor_latency_show(struct device *dev,
286 struct device_attribute *dev_attr,
289 struct hv_device *hv_dev = device_to_hv_device(dev);
291 if (!hv_dev->channel)
293 return sprintf(buf, "%d\n",
294 channel_latency(hv_dev->channel,
295 vmbus_connection.monitor_pages[1]));
297 static DEVICE_ATTR_RO(client_monitor_latency);
299 static ssize_t server_monitor_conn_id_show(struct device *dev,
300 struct device_attribute *dev_attr,
303 struct hv_device *hv_dev = device_to_hv_device(dev);
305 if (!hv_dev->channel)
307 return sprintf(buf, "%d\n",
308 channel_conn_id(hv_dev->channel,
309 vmbus_connection.monitor_pages[0]));
311 static DEVICE_ATTR_RO(server_monitor_conn_id);
313 static ssize_t client_monitor_conn_id_show(struct device *dev,
314 struct device_attribute *dev_attr,
317 struct hv_device *hv_dev = device_to_hv_device(dev);
319 if (!hv_dev->channel)
321 return sprintf(buf, "%d\n",
322 channel_conn_id(hv_dev->channel,
323 vmbus_connection.monitor_pages[1]));
325 static DEVICE_ATTR_RO(client_monitor_conn_id);
327 static ssize_t out_intr_mask_show(struct device *dev,
328 struct device_attribute *dev_attr, char *buf)
330 struct hv_device *hv_dev = device_to_hv_device(dev);
331 struct hv_ring_buffer_debug_info outbound;
334 if (!hv_dev->channel)
337 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
342 return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
344 static DEVICE_ATTR_RO(out_intr_mask);
346 static ssize_t out_read_index_show(struct device *dev,
347 struct device_attribute *dev_attr, char *buf)
349 struct hv_device *hv_dev = device_to_hv_device(dev);
350 struct hv_ring_buffer_debug_info outbound;
353 if (!hv_dev->channel)
356 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
360 return sprintf(buf, "%d\n", outbound.current_read_index);
362 static DEVICE_ATTR_RO(out_read_index);
364 static ssize_t out_write_index_show(struct device *dev,
365 struct device_attribute *dev_attr,
368 struct hv_device *hv_dev = device_to_hv_device(dev);
369 struct hv_ring_buffer_debug_info outbound;
372 if (!hv_dev->channel)
375 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
379 return sprintf(buf, "%d\n", outbound.current_write_index);
381 static DEVICE_ATTR_RO(out_write_index);
383 static ssize_t out_read_bytes_avail_show(struct device *dev,
384 struct device_attribute *dev_attr,
387 struct hv_device *hv_dev = device_to_hv_device(dev);
388 struct hv_ring_buffer_debug_info outbound;
391 if (!hv_dev->channel)
394 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
398 return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
400 static DEVICE_ATTR_RO(out_read_bytes_avail);
402 static ssize_t out_write_bytes_avail_show(struct device *dev,
403 struct device_attribute *dev_attr,
406 struct hv_device *hv_dev = device_to_hv_device(dev);
407 struct hv_ring_buffer_debug_info outbound;
410 if (!hv_dev->channel)
413 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound,
417 return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
419 static DEVICE_ATTR_RO(out_write_bytes_avail);
421 static ssize_t in_intr_mask_show(struct device *dev,
422 struct device_attribute *dev_attr, char *buf)
424 struct hv_device *hv_dev = device_to_hv_device(dev);
425 struct hv_ring_buffer_debug_info inbound;
428 if (!hv_dev->channel)
431 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
435 return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
437 static DEVICE_ATTR_RO(in_intr_mask);
439 static ssize_t in_read_index_show(struct device *dev,
440 struct device_attribute *dev_attr, char *buf)
442 struct hv_device *hv_dev = device_to_hv_device(dev);
443 struct hv_ring_buffer_debug_info inbound;
446 if (!hv_dev->channel)
449 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
453 return sprintf(buf, "%d\n", inbound.current_read_index);
455 static DEVICE_ATTR_RO(in_read_index);
457 static ssize_t in_write_index_show(struct device *dev,
458 struct device_attribute *dev_attr, char *buf)
460 struct hv_device *hv_dev = device_to_hv_device(dev);
461 struct hv_ring_buffer_debug_info inbound;
464 if (!hv_dev->channel)
467 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
471 return sprintf(buf, "%d\n", inbound.current_write_index);
473 static DEVICE_ATTR_RO(in_write_index);
475 static ssize_t in_read_bytes_avail_show(struct device *dev,
476 struct device_attribute *dev_attr,
479 struct hv_device *hv_dev = device_to_hv_device(dev);
480 struct hv_ring_buffer_debug_info inbound;
483 if (!hv_dev->channel)
486 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
490 return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
492 static DEVICE_ATTR_RO(in_read_bytes_avail);
494 static ssize_t in_write_bytes_avail_show(struct device *dev,
495 struct device_attribute *dev_attr,
498 struct hv_device *hv_dev = device_to_hv_device(dev);
499 struct hv_ring_buffer_debug_info inbound;
502 if (!hv_dev->channel)
505 ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
509 return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
511 static DEVICE_ATTR_RO(in_write_bytes_avail);
513 static ssize_t channel_vp_mapping_show(struct device *dev,
514 struct device_attribute *dev_attr,
517 struct hv_device *hv_dev = device_to_hv_device(dev);
518 struct vmbus_channel *channel = hv_dev->channel, *cur_sc;
519 int buf_size = PAGE_SIZE, n_written, tot_written;
520 struct list_head *cur;
525 mutex_lock(&vmbus_connection.channel_mutex);
527 tot_written = snprintf(buf, buf_size, "%u:%u\n",
528 channel->offermsg.child_relid, channel->target_cpu);
530 list_for_each(cur, &channel->sc_list) {
531 if (tot_written >= buf_size - 1)
534 cur_sc = list_entry(cur, struct vmbus_channel, sc_list);
535 n_written = scnprintf(buf + tot_written,
536 buf_size - tot_written,
538 cur_sc->offermsg.child_relid,
540 tot_written += n_written;
543 mutex_unlock(&vmbus_connection.channel_mutex);
547 static DEVICE_ATTR_RO(channel_vp_mapping);
549 static ssize_t vendor_show(struct device *dev,
550 struct device_attribute *dev_attr,
553 struct hv_device *hv_dev = device_to_hv_device(dev);
555 return sprintf(buf, "0x%x\n", hv_dev->vendor_id);
557 static DEVICE_ATTR_RO(vendor);
559 static ssize_t device_show(struct device *dev,
560 struct device_attribute *dev_attr,
563 struct hv_device *hv_dev = device_to_hv_device(dev);
565 return sprintf(buf, "0x%x\n", hv_dev->device_id);
567 static DEVICE_ATTR_RO(device);
569 static ssize_t driver_override_store(struct device *dev,
570 struct device_attribute *attr,
571 const char *buf, size_t count)
573 struct hv_device *hv_dev = device_to_hv_device(dev);
574 char *driver_override, *old, *cp;
576 /* We need to keep extra room for a newline */
577 if (count >= (PAGE_SIZE - 1))
580 driver_override = kstrndup(buf, count, GFP_KERNEL);
581 if (!driver_override)
584 cp = strchr(driver_override, '\n');
589 old = hv_dev->driver_override;
590 if (strlen(driver_override)) {
591 hv_dev->driver_override = driver_override;
593 kfree(driver_override);
594 hv_dev->driver_override = NULL;
603 static ssize_t driver_override_show(struct device *dev,
604 struct device_attribute *attr, char *buf)
606 struct hv_device *hv_dev = device_to_hv_device(dev);
610 len = snprintf(buf, PAGE_SIZE, "%s\n", hv_dev->driver_override);
615 static DEVICE_ATTR_RW(driver_override);
617 /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
618 static struct attribute *vmbus_dev_attrs[] = {
620 &dev_attr_state.attr,
621 &dev_attr_monitor_id.attr,
622 &dev_attr_class_id.attr,
623 &dev_attr_device_id.attr,
624 &dev_attr_modalias.attr,
626 &dev_attr_numa_node.attr,
628 &dev_attr_server_monitor_pending.attr,
629 &dev_attr_client_monitor_pending.attr,
630 &dev_attr_server_monitor_latency.attr,
631 &dev_attr_client_monitor_latency.attr,
632 &dev_attr_server_monitor_conn_id.attr,
633 &dev_attr_client_monitor_conn_id.attr,
634 &dev_attr_out_intr_mask.attr,
635 &dev_attr_out_read_index.attr,
636 &dev_attr_out_write_index.attr,
637 &dev_attr_out_read_bytes_avail.attr,
638 &dev_attr_out_write_bytes_avail.attr,
639 &dev_attr_in_intr_mask.attr,
640 &dev_attr_in_read_index.attr,
641 &dev_attr_in_write_index.attr,
642 &dev_attr_in_read_bytes_avail.attr,
643 &dev_attr_in_write_bytes_avail.attr,
644 &dev_attr_channel_vp_mapping.attr,
645 &dev_attr_vendor.attr,
646 &dev_attr_device.attr,
647 &dev_attr_driver_override.attr,
652 * Device-level attribute_group callback function. Returns the permission for
653 * each attribute, and returns 0 if an attribute is not visible.
655 static umode_t vmbus_dev_attr_is_visible(struct kobject *kobj,
656 struct attribute *attr, int idx)
658 struct device *dev = kobj_to_dev(kobj);
659 const struct hv_device *hv_dev = device_to_hv_device(dev);
661 /* Hide the monitor attributes if the monitor mechanism is not used. */
662 if (!hv_dev->channel->offermsg.monitor_allocated &&
663 (attr == &dev_attr_monitor_id.attr ||
664 attr == &dev_attr_server_monitor_pending.attr ||
665 attr == &dev_attr_client_monitor_pending.attr ||
666 attr == &dev_attr_server_monitor_latency.attr ||
667 attr == &dev_attr_client_monitor_latency.attr ||
668 attr == &dev_attr_server_monitor_conn_id.attr ||
669 attr == &dev_attr_client_monitor_conn_id.attr))
675 static const struct attribute_group vmbus_dev_group = {
676 .attrs = vmbus_dev_attrs,
677 .is_visible = vmbus_dev_attr_is_visible
679 __ATTRIBUTE_GROUPS(vmbus_dev);
681 /* Set up the attribute for /sys/bus/vmbus/hibernation */
682 static ssize_t hibernation_show(struct bus_type *bus, char *buf)
684 return sprintf(buf, "%d\n", !!hv_is_hibernation_supported());
687 static BUS_ATTR_RO(hibernation);
689 static struct attribute *vmbus_bus_attrs[] = {
690 &bus_attr_hibernation.attr,
693 static const struct attribute_group vmbus_bus_group = {
694 .attrs = vmbus_bus_attrs,
696 __ATTRIBUTE_GROUPS(vmbus_bus);
699 * vmbus_uevent - add uevent for our device
701 * This routine is invoked when a device is added or removed on the vmbus to
702 * generate a uevent to udev in the userspace. The udev will then look at its
703 * rule and the uevent generated here to load the appropriate driver
705 * The alias string will be of the form vmbus:guid where guid is the string
706 * representation of the device guid (each byte of the guid will be
707 * represented with two hex characters.
709 static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
711 struct hv_device *dev = device_to_hv_device(device);
712 const char *format = "MODALIAS=vmbus:%*phN";
714 return add_uevent_var(env, format, UUID_SIZE, &dev->dev_type);
717 static const struct hv_vmbus_device_id *
718 hv_vmbus_dev_match(const struct hv_vmbus_device_id *id, const guid_t *guid)
721 return NULL; /* empty device table */
723 for (; !guid_is_null(&id->guid); id++)
724 if (guid_equal(&id->guid, guid))
730 static const struct hv_vmbus_device_id *
731 hv_vmbus_dynid_match(struct hv_driver *drv, const guid_t *guid)
733 const struct hv_vmbus_device_id *id = NULL;
734 struct vmbus_dynid *dynid;
736 spin_lock(&drv->dynids.lock);
737 list_for_each_entry(dynid, &drv->dynids.list, node) {
738 if (guid_equal(&dynid->id.guid, guid)) {
743 spin_unlock(&drv->dynids.lock);
748 static const struct hv_vmbus_device_id vmbus_device_null;
751 * Return a matching hv_vmbus_device_id pointer.
752 * If there is no match, return NULL.
754 static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
755 struct hv_device *dev)
757 const guid_t *guid = &dev->dev_type;
758 const struct hv_vmbus_device_id *id;
760 /* When driver_override is set, only bind to the matching driver */
761 if (dev->driver_override && strcmp(dev->driver_override, drv->name))
764 /* Look at the dynamic ids first, before the static ones */
765 id = hv_vmbus_dynid_match(drv, guid);
767 id = hv_vmbus_dev_match(drv->id_table, guid);
769 /* driver_override will always match, send a dummy id */
770 if (!id && dev->driver_override)
771 id = &vmbus_device_null;
776 /* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */
777 static int vmbus_add_dynid(struct hv_driver *drv, guid_t *guid)
779 struct vmbus_dynid *dynid;
781 dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
785 dynid->id.guid = *guid;
787 spin_lock(&drv->dynids.lock);
788 list_add_tail(&dynid->node, &drv->dynids.list);
789 spin_unlock(&drv->dynids.lock);
791 return driver_attach(&drv->driver);
794 static void vmbus_free_dynids(struct hv_driver *drv)
796 struct vmbus_dynid *dynid, *n;
798 spin_lock(&drv->dynids.lock);
799 list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
800 list_del(&dynid->node);
803 spin_unlock(&drv->dynids.lock);
807 * store_new_id - sysfs frontend to vmbus_add_dynid()
809 * Allow GUIDs to be added to an existing driver via sysfs.
811 static ssize_t new_id_store(struct device_driver *driver, const char *buf,
814 struct hv_driver *drv = drv_to_hv_drv(driver);
818 retval = guid_parse(buf, &guid);
822 if (hv_vmbus_dynid_match(drv, &guid))
825 retval = vmbus_add_dynid(drv, &guid);
830 static DRIVER_ATTR_WO(new_id);
833 * store_remove_id - remove a PCI device ID from this driver
835 * Removes a dynamic pci device ID to this driver.
837 static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
840 struct hv_driver *drv = drv_to_hv_drv(driver);
841 struct vmbus_dynid *dynid, *n;
845 retval = guid_parse(buf, &guid);
850 spin_lock(&drv->dynids.lock);
851 list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
852 struct hv_vmbus_device_id *id = &dynid->id;
854 if (guid_equal(&id->guid, &guid)) {
855 list_del(&dynid->node);
861 spin_unlock(&drv->dynids.lock);
865 static DRIVER_ATTR_WO(remove_id);
867 static struct attribute *vmbus_drv_attrs[] = {
868 &driver_attr_new_id.attr,
869 &driver_attr_remove_id.attr,
872 ATTRIBUTE_GROUPS(vmbus_drv);
876 * vmbus_match - Attempt to match the specified device to the specified driver
878 static int vmbus_match(struct device *device, struct device_driver *driver)
880 struct hv_driver *drv = drv_to_hv_drv(driver);
881 struct hv_device *hv_dev = device_to_hv_device(device);
883 /* The hv_sock driver handles all hv_sock offers. */
884 if (is_hvsock_channel(hv_dev->channel))
887 if (hv_vmbus_get_id(drv, hv_dev))
894 * vmbus_probe - Add the new vmbus's child device
896 static int vmbus_probe(struct device *child_device)
899 struct hv_driver *drv =
900 drv_to_hv_drv(child_device->driver);
901 struct hv_device *dev = device_to_hv_device(child_device);
902 const struct hv_vmbus_device_id *dev_id;
904 dev_id = hv_vmbus_get_id(drv, dev);
906 ret = drv->probe(dev, dev_id);
908 pr_err("probe failed for device %s (%d)\n",
909 dev_name(child_device), ret);
912 pr_err("probe not set for driver %s\n",
913 dev_name(child_device));
920 * vmbus_remove - Remove a vmbus device
922 static int vmbus_remove(struct device *child_device)
924 struct hv_driver *drv;
925 struct hv_device *dev = device_to_hv_device(child_device);
927 if (child_device->driver) {
928 drv = drv_to_hv_drv(child_device->driver);
938 * vmbus_shutdown - Shutdown a vmbus device
940 static void vmbus_shutdown(struct device *child_device)
942 struct hv_driver *drv;
943 struct hv_device *dev = device_to_hv_device(child_device);
946 /* The device may not be attached yet */
947 if (!child_device->driver)
950 drv = drv_to_hv_drv(child_device->driver);
956 #ifdef CONFIG_PM_SLEEP
958 * vmbus_suspend - Suspend a vmbus device
960 static int vmbus_suspend(struct device *child_device)
962 struct hv_driver *drv;
963 struct hv_device *dev = device_to_hv_device(child_device);
965 /* The device may not be attached yet */
966 if (!child_device->driver)
969 drv = drv_to_hv_drv(child_device->driver);
973 return drv->suspend(dev);
977 * vmbus_resume - Resume a vmbus device
979 static int vmbus_resume(struct device *child_device)
981 struct hv_driver *drv;
982 struct hv_device *dev = device_to_hv_device(child_device);
984 /* The device may not be attached yet */
985 if (!child_device->driver)
988 drv = drv_to_hv_drv(child_device->driver);
992 return drv->resume(dev);
995 #define vmbus_suspend NULL
996 #define vmbus_resume NULL
997 #endif /* CONFIG_PM_SLEEP */
1000 * vmbus_device_release - Final callback release of the vmbus child device
1002 static void vmbus_device_release(struct device *device)
1004 struct hv_device *hv_dev = device_to_hv_device(device);
1005 struct vmbus_channel *channel = hv_dev->channel;
1007 hv_debug_rm_dev_dir(hv_dev);
1009 mutex_lock(&vmbus_connection.channel_mutex);
1010 hv_process_channel_removal(channel);
1011 mutex_unlock(&vmbus_connection.channel_mutex);
1016 * Note: we must use the "noirq" ops: see the comment before vmbus_bus_pm.
1018 * suspend_noirq/resume_noirq are set to NULL to support Suspend-to-Idle: we
1019 * shouldn't suspend the vmbus devices upon Suspend-to-Idle, otherwise there
1020 * is no way to wake up a Generation-2 VM.
1022 * The other 4 ops are for hibernation.
1025 static const struct dev_pm_ops vmbus_pm = {
1026 .suspend_noirq = NULL,
1027 .resume_noirq = NULL,
1028 .freeze_noirq = vmbus_suspend,
1029 .thaw_noirq = vmbus_resume,
1030 .poweroff_noirq = vmbus_suspend,
1031 .restore_noirq = vmbus_resume,
1034 /* The one and only one */
1035 static struct bus_type hv_bus = {
1037 .match = vmbus_match,
1038 .shutdown = vmbus_shutdown,
1039 .remove = vmbus_remove,
1040 .probe = vmbus_probe,
1041 .uevent = vmbus_uevent,
1042 .dev_groups = vmbus_dev_groups,
1043 .drv_groups = vmbus_drv_groups,
1044 .bus_groups = vmbus_bus_groups,
1048 struct onmessage_work_context {
1049 struct work_struct work;
1051 struct hv_message_header header;
1056 static void vmbus_onmessage_work(struct work_struct *work)
1058 struct onmessage_work_context *ctx;
1060 /* Do not process messages if we're in DISCONNECTED state */
1061 if (vmbus_connection.conn_state == DISCONNECTED)
1064 ctx = container_of(work, struct onmessage_work_context,
1066 vmbus_onmessage((struct vmbus_channel_message_header *)
1071 void vmbus_on_msg_dpc(unsigned long data)
1073 struct hv_per_cpu_context *hv_cpu = (void *)data;
1074 void *page_addr = hv_cpu->synic_message_page;
1075 struct hv_message msg_copy, *msg = (struct hv_message *)page_addr +
1077 struct vmbus_channel_message_header *hdr;
1078 enum vmbus_channel_message_type msgtype;
1079 const struct vmbus_channel_message_table_entry *entry;
1080 struct onmessage_work_context *ctx;
1085 * 'enum vmbus_channel_message_type' is supposed to always be 'u32' as
1086 * it is being used in 'struct vmbus_channel_message_header' definition
1087 * which is supposed to match hypervisor ABI.
1089 BUILD_BUG_ON(sizeof(enum vmbus_channel_message_type) != sizeof(u32));
1092 * Since the message is in memory shared with the host, an erroneous or
1093 * malicious Hyper-V could modify the message while vmbus_on_msg_dpc()
1094 * or individual message handlers are executing; to prevent this, copy
1095 * the message into private memory.
1097 memcpy(&msg_copy, msg, sizeof(struct hv_message));
1099 message_type = msg_copy.header.message_type;
1100 if (message_type == HVMSG_NONE)
1104 hdr = (struct vmbus_channel_message_header *)msg_copy.u.payload;
1105 msgtype = hdr->msgtype;
1107 trace_vmbus_on_msg_dpc(hdr);
1109 if (msgtype >= CHANNELMSG_COUNT) {
1110 WARN_ONCE(1, "unknown msgtype=%d\n", msgtype);
1114 payload_size = msg_copy.header.payload_size;
1115 if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT) {
1116 WARN_ONCE(1, "payload size is too large (%d)\n", payload_size);
1120 entry = &channel_message_table[msgtype];
1122 if (!entry->message_handler)
1125 if (payload_size < entry->min_payload_len) {
1126 WARN_ONCE(1, "message too short: msgtype=%d len=%d\n", msgtype, payload_size);
1130 if (entry->handler_type == VMHT_BLOCKING) {
1131 ctx = kmalloc(sizeof(*ctx) + payload_size, GFP_ATOMIC);
1135 INIT_WORK(&ctx->work, vmbus_onmessage_work);
1136 memcpy(&ctx->msg, &msg_copy, sizeof(msg->header) + payload_size);
1139 * The host can generate a rescind message while we
1140 * may still be handling the original offer. We deal with
1141 * this condition by relying on the synchronization provided
1142 * by offer_in_progress and by channel_mutex. See also the
1143 * inline comments in vmbus_onoffer_rescind().
1146 case CHANNELMSG_RESCIND_CHANNELOFFER:
1148 * If we are handling the rescind message;
1149 * schedule the work on the global work queue.
1151 * The OFFER message and the RESCIND message should
1152 * not be handled by the same serialized work queue,
1153 * because the OFFER handler may call vmbus_open(),
1154 * which tries to open the channel by sending an
1155 * OPEN_CHANNEL message to the host and waits for
1156 * the host's response; however, if the host has
1157 * rescinded the channel before it receives the
1158 * OPEN_CHANNEL message, the host just silently
1159 * ignores the OPEN_CHANNEL message; as a result,
1160 * the guest's OFFER handler hangs for ever, if we
1161 * handle the RESCIND message in the same serialized
1162 * work queue: the RESCIND handler can not start to
1163 * run before the OFFER handler finishes.
1165 schedule_work(&ctx->work);
1168 case CHANNELMSG_OFFERCHANNEL:
1170 * The host sends the offer message of a given channel
1171 * before sending the rescind message of the same
1172 * channel. These messages are sent to the guest's
1173 * connect CPU; the guest then starts processing them
1174 * in the tasklet handler on this CPU:
1178 * [vmbus_on_msg_dpc()]
1179 * atomic_inc() // CHANNELMSG_OFFERCHANNEL
1182 * [vmbus_on_msg_dpc()]
1183 * schedule_work() // CHANNELMSG_RESCIND_CHANNELOFFER
1185 * We rely on the memory-ordering properties of the
1186 * queue_work() and schedule_work() primitives, which
1187 * guarantee that the atomic increment will be visible
1188 * to the CPUs which will execute the offer & rescind
1189 * works by the time these works will start execution.
1191 atomic_inc(&vmbus_connection.offer_in_progress);
1195 queue_work(vmbus_connection.work_queue, &ctx->work);
1198 entry->message_handler(hdr);
1201 vmbus_signal_eom(msg, message_type);
1204 #ifdef CONFIG_PM_SLEEP
1206 * Fake RESCIND_CHANNEL messages to clean up hv_sock channels by force for
1207 * hibernation, because hv_sock connections can not persist across hibernation.
1209 static void vmbus_force_channel_rescinded(struct vmbus_channel *channel)
1211 struct onmessage_work_context *ctx;
1212 struct vmbus_channel_rescind_offer *rescind;
1214 WARN_ON(!is_hvsock_channel(channel));
1217 * Allocation size is small and the allocation should really not fail,
1218 * otherwise the state of the hv_sock connections ends up in limbo.
1220 ctx = kzalloc(sizeof(*ctx) + sizeof(*rescind),
1221 GFP_KERNEL | __GFP_NOFAIL);
1224 * So far, these are not really used by Linux. Just set them to the
1225 * reasonable values conforming to the definitions of the fields.
1227 ctx->msg.header.message_type = 1;
1228 ctx->msg.header.payload_size = sizeof(*rescind);
1230 /* These values are actually used by Linux. */
1231 rescind = (struct vmbus_channel_rescind_offer *)ctx->msg.payload;
1232 rescind->header.msgtype = CHANNELMSG_RESCIND_CHANNELOFFER;
1233 rescind->child_relid = channel->offermsg.child_relid;
1235 INIT_WORK(&ctx->work, vmbus_onmessage_work);
1237 queue_work(vmbus_connection.work_queue, &ctx->work);
1239 #endif /* CONFIG_PM_SLEEP */
1242 * Schedule all channels with events pending
1244 static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
1246 unsigned long *recv_int_page;
1249 if (vmbus_proto_version < VERSION_WIN8) {
1250 maxbits = MAX_NUM_CHANNELS_SUPPORTED;
1251 recv_int_page = vmbus_connection.recv_int_page;
1254 * When the host is win8 and beyond, the event page
1255 * can be directly checked to get the id of the channel
1256 * that has the interrupt pending.
1258 void *page_addr = hv_cpu->synic_event_page;
1259 union hv_synic_event_flags *event
1260 = (union hv_synic_event_flags *)page_addr +
1263 maxbits = HV_EVENT_FLAGS_COUNT;
1264 recv_int_page = event->flags;
1267 if (unlikely(!recv_int_page))
1270 for_each_set_bit(relid, recv_int_page, maxbits) {
1271 void (*callback_fn)(void *context);
1272 struct vmbus_channel *channel;
1274 if (!sync_test_and_clear_bit(relid, recv_int_page))
1277 /* Special case - vmbus channel protocol msg */
1282 * Pairs with the kfree_rcu() in vmbus_chan_release().
1283 * Guarantees that the channel data structure doesn't
1284 * get freed while the channel pointer below is being
1289 /* Find channel based on relid */
1290 channel = relid2channel(relid);
1291 if (channel == NULL)
1292 goto sched_unlock_rcu;
1294 if (channel->rescind)
1295 goto sched_unlock_rcu;
1298 * Make sure that the ring buffer data structure doesn't get
1299 * freed while we dereference the ring buffer pointer. Test
1300 * for the channel's onchannel_callback being NULL within a
1301 * sched_lock critical section. See also the inline comments
1302 * in vmbus_reset_channel_cb().
1304 spin_lock(&channel->sched_lock);
1306 callback_fn = channel->onchannel_callback;
1307 if (unlikely(callback_fn == NULL))
1310 trace_vmbus_chan_sched(channel);
1312 ++channel->interrupts;
1314 switch (channel->callback_mode) {
1316 (*callback_fn)(channel->channel_callback_context);
1319 case HV_CALL_BATCHED:
1320 hv_begin_read(&channel->inbound);
1322 case HV_CALL_DIRECT:
1323 tasklet_schedule(&channel->callback_event);
1327 spin_unlock(&channel->sched_lock);
1333 static void vmbus_isr(void)
1335 struct hv_per_cpu_context *hv_cpu
1336 = this_cpu_ptr(hv_context.cpu_context);
1337 void *page_addr = hv_cpu->synic_event_page;
1338 struct hv_message *msg;
1339 union hv_synic_event_flags *event;
1340 bool handled = false;
1342 if (unlikely(page_addr == NULL))
1345 event = (union hv_synic_event_flags *)page_addr +
1348 * Check for events before checking for messages. This is the order
1349 * in which events and messages are checked in Windows guests on
1350 * Hyper-V, and the Windows team suggested we do the same.
1353 if ((vmbus_proto_version == VERSION_WS2008) ||
1354 (vmbus_proto_version == VERSION_WIN7)) {
1356 /* Since we are a child, we only need to check bit 0 */
1357 if (sync_test_and_clear_bit(0, event->flags))
1361 * Our host is win8 or above. The signaling mechanism
1362 * has changed and we can directly look at the event page.
1363 * If bit n is set then we have an interrup on the channel
1370 vmbus_chan_sched(hv_cpu);
1372 page_addr = hv_cpu->synic_message_page;
1373 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
1375 /* Check if there are actual msgs to be processed */
1376 if (msg->header.message_type != HVMSG_NONE) {
1377 if (msg->header.message_type == HVMSG_TIMER_EXPIRED) {
1379 vmbus_signal_eom(msg, HVMSG_TIMER_EXPIRED);
1381 tasklet_schedule(&hv_cpu->msg_dpc);
1384 add_interrupt_randomness(hv_get_vector(), 0);
1388 * Callback from kmsg_dump. Grab as much as possible from the end of the kmsg
1389 * buffer and call into Hyper-V to transfer the data.
1391 static void hv_kmsg_dump(struct kmsg_dumper *dumper,
1392 enum kmsg_dump_reason reason)
1394 size_t bytes_written;
1395 phys_addr_t panic_pa;
1397 /* We are only interested in panics. */
1398 if ((reason != KMSG_DUMP_PANIC) || (!sysctl_record_panic_msg))
1401 panic_pa = virt_to_phys(hv_panic_page);
1404 * Write dump contents to the page. No need to synchronize; panic should
1405 * be single-threaded.
1407 kmsg_dump_get_buffer(dumper, false, hv_panic_page, HV_HYP_PAGE_SIZE,
1410 hyperv_report_panic_msg(panic_pa, bytes_written);
1413 static struct kmsg_dumper hv_kmsg_dumper = {
1414 .dump = hv_kmsg_dump,
1417 static void hv_kmsg_dump_register(void)
1421 hv_panic_page = hv_alloc_hyperv_zeroed_page();
1422 if (!hv_panic_page) {
1423 pr_err("Hyper-V: panic message page memory allocation failed\n");
1427 ret = kmsg_dump_register(&hv_kmsg_dumper);
1429 pr_err("Hyper-V: kmsg dump register error 0x%x\n", ret);
1430 hv_free_hyperv_page((unsigned long)hv_panic_page);
1431 hv_panic_page = NULL;
1435 static struct ctl_table_header *hv_ctl_table_hdr;
1438 * sysctl option to allow the user to control whether kmsg data should be
1439 * reported to Hyper-V on panic.
1441 static struct ctl_table hv_ctl_table[] = {
1443 .procname = "hyperv_record_panic_msg",
1444 .data = &sysctl_record_panic_msg,
1445 .maxlen = sizeof(int),
1447 .proc_handler = proc_dointvec_minmax,
1448 .extra1 = SYSCTL_ZERO,
1449 .extra2 = SYSCTL_ONE
1454 static struct ctl_table hv_root_table[] = {
1456 .procname = "kernel",
1458 .child = hv_ctl_table
1464 * vmbus_bus_init -Main vmbus driver initialization routine.
1467 * - initialize the vmbus driver context
1468 * - invoke the vmbus hv main init routine
1469 * - retrieve the channel offers
1471 static int vmbus_bus_init(void)
1477 pr_err("Unable to initialize the hypervisor - 0x%x\n", ret);
1481 ret = bus_register(&hv_bus);
1485 ret = hv_setup_vmbus_irq(vmbus_irq, vmbus_isr);
1489 ret = hv_synic_alloc();
1494 * Initialize the per-cpu interrupt state and stimer state.
1495 * Then connect to the host.
1497 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online",
1498 hv_synic_init, hv_synic_cleanup);
1501 hyperv_cpuhp_online = ret;
1503 ret = vmbus_connect();
1508 * Only register if the crash MSRs are available
1510 if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
1511 u64 hyperv_crash_ctl;
1513 * Sysctl registration is not fatal, since by default
1514 * reporting is enabled.
1516 hv_ctl_table_hdr = register_sysctl_table(hv_root_table);
1517 if (!hv_ctl_table_hdr)
1518 pr_err("Hyper-V: sysctl table register error");
1521 * Register for panic kmsg callback only if the right
1522 * capability is supported by the hypervisor.
1524 hv_get_crash_ctl(hyperv_crash_ctl);
1525 if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG)
1526 hv_kmsg_dump_register();
1528 register_die_notifier(&hyperv_die_block);
1532 * Always register the panic notifier because we need to unload
1533 * the VMbus channel connection to prevent any VMbus
1534 * activity after the VM panics.
1536 atomic_notifier_chain_register(&panic_notifier_list,
1537 &hyperv_panic_block);
1539 vmbus_request_offers();
1544 cpuhp_remove_state(hyperv_cpuhp_online);
1548 hv_remove_vmbus_irq();
1550 bus_unregister(&hv_bus);
1551 unregister_sysctl_table(hv_ctl_table_hdr);
1552 hv_ctl_table_hdr = NULL;
1557 * __vmbus_child_driver_register() - Register a vmbus's driver
1558 * @hv_driver: Pointer to driver structure you want to register
1559 * @owner: owner module of the drv
1560 * @mod_name: module name string
1562 * Registers the given driver with Linux through the 'driver_register()' call
1563 * and sets up the hyper-v vmbus handling for this driver.
1564 * It will return the state of the 'driver_register()' call.
1567 int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name)
1571 pr_info("registering driver %s\n", hv_driver->name);
1573 ret = vmbus_exists();
1577 hv_driver->driver.name = hv_driver->name;
1578 hv_driver->driver.owner = owner;
1579 hv_driver->driver.mod_name = mod_name;
1580 hv_driver->driver.bus = &hv_bus;
1582 spin_lock_init(&hv_driver->dynids.lock);
1583 INIT_LIST_HEAD(&hv_driver->dynids.list);
1585 ret = driver_register(&hv_driver->driver);
1589 EXPORT_SYMBOL_GPL(__vmbus_driver_register);
1592 * vmbus_driver_unregister() - Unregister a vmbus's driver
1593 * @hv_driver: Pointer to driver structure you want to
1596 * Un-register the given driver that was previous registered with a call to
1597 * vmbus_driver_register()
1599 void vmbus_driver_unregister(struct hv_driver *hv_driver)
1601 pr_info("unregistering driver %s\n", hv_driver->name);
1603 if (!vmbus_exists()) {
1604 driver_unregister(&hv_driver->driver);
1605 vmbus_free_dynids(hv_driver);
1608 EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
1612 * Called when last reference to channel is gone.
1614 static void vmbus_chan_release(struct kobject *kobj)
1616 struct vmbus_channel *channel
1617 = container_of(kobj, struct vmbus_channel, kobj);
1619 kfree_rcu(channel, rcu);
1622 struct vmbus_chan_attribute {
1623 struct attribute attr;
1624 ssize_t (*show)(struct vmbus_channel *chan, char *buf);
1625 ssize_t (*store)(struct vmbus_channel *chan,
1626 const char *buf, size_t count);
1628 #define VMBUS_CHAN_ATTR(_name, _mode, _show, _store) \
1629 struct vmbus_chan_attribute chan_attr_##_name \
1630 = __ATTR(_name, _mode, _show, _store)
1631 #define VMBUS_CHAN_ATTR_RW(_name) \
1632 struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RW(_name)
1633 #define VMBUS_CHAN_ATTR_RO(_name) \
1634 struct vmbus_chan_attribute chan_attr_##_name = __ATTR_RO(_name)
1635 #define VMBUS_CHAN_ATTR_WO(_name) \
1636 struct vmbus_chan_attribute chan_attr_##_name = __ATTR_WO(_name)
1638 static ssize_t vmbus_chan_attr_show(struct kobject *kobj,
1639 struct attribute *attr, char *buf)
1641 const struct vmbus_chan_attribute *attribute
1642 = container_of(attr, struct vmbus_chan_attribute, attr);
1643 struct vmbus_channel *chan
1644 = container_of(kobj, struct vmbus_channel, kobj);
1646 if (!attribute->show)
1649 return attribute->show(chan, buf);
1652 static ssize_t vmbus_chan_attr_store(struct kobject *kobj,
1653 struct attribute *attr, const char *buf,
1656 const struct vmbus_chan_attribute *attribute
1657 = container_of(attr, struct vmbus_chan_attribute, attr);
1658 struct vmbus_channel *chan
1659 = container_of(kobj, struct vmbus_channel, kobj);
1661 if (!attribute->store)
1664 return attribute->store(chan, buf, count);
1667 static const struct sysfs_ops vmbus_chan_sysfs_ops = {
1668 .show = vmbus_chan_attr_show,
1669 .store = vmbus_chan_attr_store,
1672 static ssize_t out_mask_show(struct vmbus_channel *channel, char *buf)
1674 struct hv_ring_buffer_info *rbi = &channel->outbound;
1677 mutex_lock(&rbi->ring_buffer_mutex);
1678 if (!rbi->ring_buffer) {
1679 mutex_unlock(&rbi->ring_buffer_mutex);
1683 ret = sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
1684 mutex_unlock(&rbi->ring_buffer_mutex);
1687 static VMBUS_CHAN_ATTR_RO(out_mask);
1689 static ssize_t in_mask_show(struct vmbus_channel *channel, char *buf)
1691 struct hv_ring_buffer_info *rbi = &channel->inbound;
1694 mutex_lock(&rbi->ring_buffer_mutex);
1695 if (!rbi->ring_buffer) {
1696 mutex_unlock(&rbi->ring_buffer_mutex);
1700 ret = sprintf(buf, "%u\n", rbi->ring_buffer->interrupt_mask);
1701 mutex_unlock(&rbi->ring_buffer_mutex);
1704 static VMBUS_CHAN_ATTR_RO(in_mask);
1706 static ssize_t read_avail_show(struct vmbus_channel *channel, char *buf)
1708 struct hv_ring_buffer_info *rbi = &channel->inbound;
1711 mutex_lock(&rbi->ring_buffer_mutex);
1712 if (!rbi->ring_buffer) {
1713 mutex_unlock(&rbi->ring_buffer_mutex);
1717 ret = sprintf(buf, "%u\n", hv_get_bytes_to_read(rbi));
1718 mutex_unlock(&rbi->ring_buffer_mutex);
1721 static VMBUS_CHAN_ATTR_RO(read_avail);
1723 static ssize_t write_avail_show(struct vmbus_channel *channel, char *buf)
1725 struct hv_ring_buffer_info *rbi = &channel->outbound;
1728 mutex_lock(&rbi->ring_buffer_mutex);
1729 if (!rbi->ring_buffer) {
1730 mutex_unlock(&rbi->ring_buffer_mutex);
1734 ret = sprintf(buf, "%u\n", hv_get_bytes_to_write(rbi));
1735 mutex_unlock(&rbi->ring_buffer_mutex);
1738 static VMBUS_CHAN_ATTR_RO(write_avail);
1740 static ssize_t target_cpu_show(struct vmbus_channel *channel, char *buf)
1742 return sprintf(buf, "%u\n", channel->target_cpu);
1744 static ssize_t target_cpu_store(struct vmbus_channel *channel,
1745 const char *buf, size_t count)
1747 u32 target_cpu, origin_cpu;
1748 ssize_t ret = count;
1750 if (vmbus_proto_version < VERSION_WIN10_V4_1)
1753 if (sscanf(buf, "%uu", &target_cpu) != 1)
1756 /* Validate target_cpu for the cpumask_test_cpu() operation below. */
1757 if (target_cpu >= nr_cpumask_bits)
1760 /* No CPUs should come up or down during this. */
1763 if (!cpu_online(target_cpu)) {
1769 * Synchronizes target_cpu_store() and channel closure:
1771 * { Initially: state = CHANNEL_OPENED }
1775 * [target_cpu_store()] [vmbus_disconnect_ring()]
1777 * LOCK channel_mutex LOCK channel_mutex
1778 * LOAD r1 = state LOAD r2 = state
1779 * IF (r1 == CHANNEL_OPENED) IF (r2 == CHANNEL_OPENED)
1780 * SEND MODIFYCHANNEL STORE state = CHANNEL_OPEN
1781 * [...] SEND CLOSECHANNEL
1782 * UNLOCK channel_mutex UNLOCK channel_mutex
1784 * Forbids: r1 == r2 == CHANNEL_OPENED (i.e., CPU1's LOCK precedes
1785 * CPU2's LOCK) && CPU2's SEND precedes CPU1's SEND
1787 * Note. The host processes the channel messages "sequentially", in
1788 * the order in which they are received on a per-partition basis.
1790 mutex_lock(&vmbus_connection.channel_mutex);
1793 * Hyper-V will ignore MODIFYCHANNEL messages for "non-open" channels;
1794 * avoid sending the message and fail here for such channels.
1796 if (channel->state != CHANNEL_OPENED_STATE) {
1798 goto cpu_store_unlock;
1801 origin_cpu = channel->target_cpu;
1802 if (target_cpu == origin_cpu)
1803 goto cpu_store_unlock;
1805 if (vmbus_send_modifychannel(channel->offermsg.child_relid,
1806 hv_cpu_number_to_vp_number(target_cpu))) {
1808 goto cpu_store_unlock;
1812 * Warning. At this point, there is *no* guarantee that the host will
1813 * have successfully processed the vmbus_send_modifychannel() request.
1814 * See the header comment of vmbus_send_modifychannel() for more info.
1816 * Lags in the processing of the above vmbus_send_modifychannel() can
1817 * result in missed interrupts if the "old" target CPU is taken offline
1818 * before Hyper-V starts sending interrupts to the "new" target CPU.
1819 * But apart from this offlining scenario, the code tolerates such
1820 * lags. It will function correctly even if a channel interrupt comes
1821 * in on a CPU that is different from the channel target_cpu value.
1824 channel->target_cpu = target_cpu;
1826 /* See init_vp_index(). */
1827 if (hv_is_perf_channel(channel))
1828 hv_update_alloced_cpus(origin_cpu, target_cpu);
1830 /* Currently set only for storvsc channels. */
1831 if (channel->change_target_cpu_callback) {
1832 (*channel->change_target_cpu_callback)(channel,
1833 origin_cpu, target_cpu);
1837 mutex_unlock(&vmbus_connection.channel_mutex);
1841 static VMBUS_CHAN_ATTR(cpu, 0644, target_cpu_show, target_cpu_store);
1843 static ssize_t channel_pending_show(struct vmbus_channel *channel,
1846 return sprintf(buf, "%d\n",
1847 channel_pending(channel,
1848 vmbus_connection.monitor_pages[1]));
1850 static VMBUS_CHAN_ATTR(pending, 0444, channel_pending_show, NULL);
1852 static ssize_t channel_latency_show(struct vmbus_channel *channel,
1855 return sprintf(buf, "%d\n",
1856 channel_latency(channel,
1857 vmbus_connection.monitor_pages[1]));
1859 static VMBUS_CHAN_ATTR(latency, 0444, channel_latency_show, NULL);
1861 static ssize_t channel_interrupts_show(struct vmbus_channel *channel, char *buf)
1863 return sprintf(buf, "%llu\n", channel->interrupts);
1865 static VMBUS_CHAN_ATTR(interrupts, 0444, channel_interrupts_show, NULL);
1867 static ssize_t channel_events_show(struct vmbus_channel *channel, char *buf)
1869 return sprintf(buf, "%llu\n", channel->sig_events);
1871 static VMBUS_CHAN_ATTR(events, 0444, channel_events_show, NULL);
1873 static ssize_t channel_intr_in_full_show(struct vmbus_channel *channel,
1876 return sprintf(buf, "%llu\n",
1877 (unsigned long long)channel->intr_in_full);
1879 static VMBUS_CHAN_ATTR(intr_in_full, 0444, channel_intr_in_full_show, NULL);
1881 static ssize_t channel_intr_out_empty_show(struct vmbus_channel *channel,
1884 return sprintf(buf, "%llu\n",
1885 (unsigned long long)channel->intr_out_empty);
1887 static VMBUS_CHAN_ATTR(intr_out_empty, 0444, channel_intr_out_empty_show, NULL);
1889 static ssize_t channel_out_full_first_show(struct vmbus_channel *channel,
1892 return sprintf(buf, "%llu\n",
1893 (unsigned long long)channel->out_full_first);
1895 static VMBUS_CHAN_ATTR(out_full_first, 0444, channel_out_full_first_show, NULL);
1897 static ssize_t channel_out_full_total_show(struct vmbus_channel *channel,
1900 return sprintf(buf, "%llu\n",
1901 (unsigned long long)channel->out_full_total);
1903 static VMBUS_CHAN_ATTR(out_full_total, 0444, channel_out_full_total_show, NULL);
1905 static ssize_t subchannel_monitor_id_show(struct vmbus_channel *channel,
1908 return sprintf(buf, "%u\n", channel->offermsg.monitorid);
1910 static VMBUS_CHAN_ATTR(monitor_id, 0444, subchannel_monitor_id_show, NULL);
1912 static ssize_t subchannel_id_show(struct vmbus_channel *channel,
1915 return sprintf(buf, "%u\n",
1916 channel->offermsg.offer.sub_channel_index);
1918 static VMBUS_CHAN_ATTR_RO(subchannel_id);
1920 static struct attribute *vmbus_chan_attrs[] = {
1921 &chan_attr_out_mask.attr,
1922 &chan_attr_in_mask.attr,
1923 &chan_attr_read_avail.attr,
1924 &chan_attr_write_avail.attr,
1925 &chan_attr_cpu.attr,
1926 &chan_attr_pending.attr,
1927 &chan_attr_latency.attr,
1928 &chan_attr_interrupts.attr,
1929 &chan_attr_events.attr,
1930 &chan_attr_intr_in_full.attr,
1931 &chan_attr_intr_out_empty.attr,
1932 &chan_attr_out_full_first.attr,
1933 &chan_attr_out_full_total.attr,
1934 &chan_attr_monitor_id.attr,
1935 &chan_attr_subchannel_id.attr,
1940 * Channel-level attribute_group callback function. Returns the permission for
1941 * each attribute, and returns 0 if an attribute is not visible.
1943 static umode_t vmbus_chan_attr_is_visible(struct kobject *kobj,
1944 struct attribute *attr, int idx)
1946 const struct vmbus_channel *channel =
1947 container_of(kobj, struct vmbus_channel, kobj);
1949 /* Hide the monitor attributes if the monitor mechanism is not used. */
1950 if (!channel->offermsg.monitor_allocated &&
1951 (attr == &chan_attr_pending.attr ||
1952 attr == &chan_attr_latency.attr ||
1953 attr == &chan_attr_monitor_id.attr))
1959 static struct attribute_group vmbus_chan_group = {
1960 .attrs = vmbus_chan_attrs,
1961 .is_visible = vmbus_chan_attr_is_visible
1964 static struct kobj_type vmbus_chan_ktype = {
1965 .sysfs_ops = &vmbus_chan_sysfs_ops,
1966 .release = vmbus_chan_release,
1970 * vmbus_add_channel_kobj - setup a sub-directory under device/channels
1972 int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
1974 const struct device *device = &dev->device;
1975 struct kobject *kobj = &channel->kobj;
1976 u32 relid = channel->offermsg.child_relid;
1979 kobj->kset = dev->channels_kset;
1980 ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL,
1985 ret = sysfs_create_group(kobj, &vmbus_chan_group);
1989 * The calling functions' error handling paths will cleanup the
1990 * empty channel directory.
1992 dev_err(device, "Unable to set up channel sysfs files\n");
1996 kobject_uevent(kobj, KOBJ_ADD);
2002 * vmbus_remove_channel_attr_group - remove the channel's attribute group
2004 void vmbus_remove_channel_attr_group(struct vmbus_channel *channel)
2006 sysfs_remove_group(&channel->kobj, &vmbus_chan_group);
2010 * vmbus_device_create - Creates and registers a new child device
2013 struct hv_device *vmbus_device_create(const guid_t *type,
2014 const guid_t *instance,
2015 struct vmbus_channel *channel)
2017 struct hv_device *child_device_obj;
2019 child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL);
2020 if (!child_device_obj) {
2021 pr_err("Unable to allocate device object for child device\n");
2025 child_device_obj->channel = channel;
2026 guid_copy(&child_device_obj->dev_type, type);
2027 guid_copy(&child_device_obj->dev_instance, instance);
2028 child_device_obj->vendor_id = 0x1414; /* MSFT vendor ID */
2030 return child_device_obj;
2034 * vmbus_device_register - Register the child device
2036 int vmbus_device_register(struct hv_device *child_device_obj)
2038 struct kobject *kobj = &child_device_obj->device.kobj;
2041 dev_set_name(&child_device_obj->device, "%pUl",
2042 &child_device_obj->channel->offermsg.offer.if_instance);
2044 child_device_obj->device.bus = &hv_bus;
2045 child_device_obj->device.parent = &hv_acpi_dev->dev;
2046 child_device_obj->device.release = vmbus_device_release;
2049 * Register with the LDM. This will kick off the driver/device
2050 * binding...which will eventually call vmbus_match() and vmbus_probe()
2052 ret = device_register(&child_device_obj->device);
2054 pr_err("Unable to register child device\n");
2058 child_device_obj->channels_kset = kset_create_and_add("channels",
2060 if (!child_device_obj->channels_kset) {
2062 goto err_dev_unregister;
2065 ret = vmbus_add_channel_kobj(child_device_obj,
2066 child_device_obj->channel);
2068 pr_err("Unable to register primary channeln");
2069 goto err_kset_unregister;
2071 hv_debug_add_dev_dir(child_device_obj);
2075 err_kset_unregister:
2076 kset_unregister(child_device_obj->channels_kset);
2079 device_unregister(&child_device_obj->device);
2084 * vmbus_device_unregister - Remove the specified child device
2087 void vmbus_device_unregister(struct hv_device *device_obj)
2089 pr_debug("child device %s unregistered\n",
2090 dev_name(&device_obj->device));
2092 kset_unregister(device_obj->channels_kset);
2095 * Kick off the process of unregistering the device.
2096 * This will call vmbus_remove() and eventually vmbus_device_release()
2098 device_unregister(&device_obj->device);
2103 * VMBUS is an acpi enumerated device. Get the information we
2106 #define VTPM_BASE_ADDRESS 0xfed40000
2107 static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *ctx)
2109 resource_size_t start = 0;
2110 resource_size_t end = 0;
2111 struct resource *new_res;
2112 struct resource **old_res = &hyperv_mmio;
2113 struct resource **prev_res = NULL;
2116 switch (res->type) {
2119 * "Address" descriptors are for bus windows. Ignore
2120 * "memory" descriptors, which are for registers on
2123 case ACPI_RESOURCE_TYPE_ADDRESS32:
2124 start = res->data.address32.address.minimum;
2125 end = res->data.address32.address.maximum;
2128 case ACPI_RESOURCE_TYPE_ADDRESS64:
2129 start = res->data.address64.address.minimum;
2130 end = res->data.address64.address.maximum;
2134 * The IRQ information is needed only on ARM64, which Hyper-V
2135 * sets up in the extended format. IRQ information is present
2136 * on x86/x64 in the non-extended format but it is not used by
2137 * Linux. So don't bother checking for the non-extended format.
2139 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
2140 if (!acpi_dev_resource_interrupt(res, 0, &r)) {
2141 pr_err("Unable to parse Hyper-V ACPI interrupt\n");
2144 /* ARM64 INTID for VMbus */
2145 vmbus_interrupt = res->data.extended_irq.interrupts[0];
2146 /* Linux IRQ number */
2147 vmbus_irq = r.start;
2151 /* Unused resource type */
2156 * Ignore ranges that are below 1MB, as they're not
2157 * necessary or useful here.
2162 new_res = kzalloc(sizeof(*new_res), GFP_ATOMIC);
2164 return AE_NO_MEMORY;
2166 /* If this range overlaps the virtual TPM, truncate it. */
2167 if (end > VTPM_BASE_ADDRESS && start < VTPM_BASE_ADDRESS)
2168 end = VTPM_BASE_ADDRESS;
2170 new_res->name = "hyperv mmio";
2171 new_res->flags = IORESOURCE_MEM;
2172 new_res->start = start;
2176 * If two ranges are adjacent, merge them.
2184 if (((*old_res)->end + 1) == new_res->start) {
2185 (*old_res)->end = new_res->end;
2190 if ((*old_res)->start == new_res->end + 1) {
2191 (*old_res)->start = new_res->start;
2196 if ((*old_res)->start > new_res->end) {
2197 new_res->sibling = *old_res;
2199 (*prev_res)->sibling = new_res;
2205 old_res = &(*old_res)->sibling;
2212 static int vmbus_acpi_remove(struct acpi_device *device)
2214 struct resource *cur_res;
2215 struct resource *next_res;
2219 __release_region(hyperv_mmio, fb_mmio->start,
2220 resource_size(fb_mmio));
2224 for (cur_res = hyperv_mmio; cur_res; cur_res = next_res) {
2225 next_res = cur_res->sibling;
2233 static void vmbus_reserve_fb(void)
2237 * Make a claim for the frame buffer in the resource tree under the
2238 * first node, which will be the one below 4GB. The length seems to
2239 * be underreported, particularly in a Generation 1 VM. So start out
2240 * reserving a larger area and make it smaller until it succeeds.
2243 if (screen_info.lfb_base) {
2244 if (efi_enabled(EFI_BOOT))
2245 size = max_t(__u32, screen_info.lfb_size, 0x800000);
2247 size = max_t(__u32, screen_info.lfb_size, 0x4000000);
2249 for (; !fb_mmio && (size >= 0x100000); size >>= 1) {
2250 fb_mmio = __request_region(hyperv_mmio,
2251 screen_info.lfb_base, size,
2258 * vmbus_allocate_mmio() - Pick a memory-mapped I/O range.
2259 * @new: If successful, supplied a pointer to the
2260 * allocated MMIO space.
2261 * @device_obj: Identifies the caller
2262 * @min: Minimum guest physical address of the
2264 * @max: Maximum guest physical address
2265 * @size: Size of the range to be allocated
2266 * @align: Alignment of the range to be allocated
2267 * @fb_overlap_ok: Whether this allocation can be allowed
2268 * to overlap the video frame buffer.
2270 * This function walks the resources granted to VMBus by the
2271 * _CRS object in the ACPI namespace underneath the parent
2272 * "bridge" whether that's a root PCI bus in the Generation 1
2273 * case or a Module Device in the Generation 2 case. It then
2274 * attempts to allocate from the global MMIO pool in a way that
2275 * matches the constraints supplied in these parameters and by
2278 * Return: 0 on success, -errno on failure
2280 int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
2281 resource_size_t min, resource_size_t max,
2282 resource_size_t size, resource_size_t align,
2285 struct resource *iter, *shadow;
2286 resource_size_t range_min, range_max, start;
2287 const char *dev_n = dev_name(&device_obj->device);
2291 mutex_lock(&hyperv_mmio_lock);
2294 * If overlaps with frame buffers are allowed, then first attempt to
2295 * make the allocation from within the reserved region. Because it
2296 * is already reserved, no shadow allocation is necessary.
2298 if (fb_overlap_ok && fb_mmio && !(min > fb_mmio->end) &&
2299 !(max < fb_mmio->start)) {
2301 range_min = fb_mmio->start;
2302 range_max = fb_mmio->end;
2303 start = (range_min + align - 1) & ~(align - 1);
2304 for (; start + size - 1 <= range_max; start += align) {
2305 *new = request_mem_region_exclusive(start, size, dev_n);
2313 for (iter = hyperv_mmio; iter; iter = iter->sibling) {
2314 if ((iter->start >= max) || (iter->end <= min))
2317 range_min = iter->start;
2318 range_max = iter->end;
2319 start = (range_min + align - 1) & ~(align - 1);
2320 for (; start + size - 1 <= range_max; start += align) {
2321 shadow = __request_region(iter, start, size, NULL,
2326 *new = request_mem_region_exclusive(start, size, dev_n);
2328 shadow->name = (char *)*new;
2333 __release_region(iter, start, size);
2338 mutex_unlock(&hyperv_mmio_lock);
2341 EXPORT_SYMBOL_GPL(vmbus_allocate_mmio);
2344 * vmbus_free_mmio() - Free a memory-mapped I/O range.
2345 * @start: Base address of region to release.
2346 * @size: Size of the range to be allocated
2348 * This function releases anything requested by
2349 * vmbus_mmio_allocate().
2351 void vmbus_free_mmio(resource_size_t start, resource_size_t size)
2353 struct resource *iter;
2355 mutex_lock(&hyperv_mmio_lock);
2356 for (iter = hyperv_mmio; iter; iter = iter->sibling) {
2357 if ((iter->start >= start + size) || (iter->end <= start))
2360 __release_region(iter, start, size);
2362 release_mem_region(start, size);
2363 mutex_unlock(&hyperv_mmio_lock);
2366 EXPORT_SYMBOL_GPL(vmbus_free_mmio);
2368 static int vmbus_acpi_add(struct acpi_device *device)
2371 int ret_val = -ENODEV;
2372 struct acpi_device *ancestor;
2374 hv_acpi_dev = device;
2376 result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
2377 vmbus_walk_resources, NULL);
2379 if (ACPI_FAILURE(result))
2382 * Some ancestor of the vmbus acpi device (Gen1 or Gen2
2383 * firmware) is the VMOD that has the mmio ranges. Get that.
2385 for (ancestor = device->parent; ancestor; ancestor = ancestor->parent) {
2386 result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS,
2387 vmbus_walk_resources, NULL);
2389 if (ACPI_FAILURE(result))
2399 complete(&probe_event);
2401 vmbus_acpi_remove(device);
2405 #ifdef CONFIG_PM_SLEEP
2406 static int vmbus_bus_suspend(struct device *dev)
2408 struct vmbus_channel *channel, *sc;
2410 while (atomic_read(&vmbus_connection.offer_in_progress) != 0) {
2412 * We wait here until the completion of any channel
2413 * offers that are currently in progress.
2415 usleep_range(1000, 2000);
2418 mutex_lock(&vmbus_connection.channel_mutex);
2419 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
2420 if (!is_hvsock_channel(channel))
2423 vmbus_force_channel_rescinded(channel);
2425 mutex_unlock(&vmbus_connection.channel_mutex);
2428 * Wait until all the sub-channels and hv_sock channels have been
2429 * cleaned up. Sub-channels should be destroyed upon suspend, otherwise
2430 * they would conflict with the new sub-channels that will be created
2431 * in the resume path. hv_sock channels should also be destroyed, but
2432 * a hv_sock channel of an established hv_sock connection can not be
2433 * really destroyed since it may still be referenced by the userspace
2434 * application, so we just force the hv_sock channel to be rescinded
2435 * by vmbus_force_channel_rescinded(), and the userspace application
2436 * will thoroughly destroy the channel after hibernation.
2438 * Note: the counter nr_chan_close_on_suspend may never go above 0 if
2439 * the VM has no sub-channel and hv_sock channel, e.g. a 1-vCPU VM.
2441 if (atomic_read(&vmbus_connection.nr_chan_close_on_suspend) > 0)
2442 wait_for_completion(&vmbus_connection.ready_for_suspend_event);
2444 if (atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0) {
2445 pr_err("Can not suspend due to a previous failed resuming\n");
2449 mutex_lock(&vmbus_connection.channel_mutex);
2451 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
2453 * Remove the channel from the array of channels and invalidate
2454 * the channel's relid. Upon resume, vmbus_onoffer() will fix
2455 * up the relid (and other fields, if necessary) and add the
2456 * channel back to the array.
2458 vmbus_channel_unmap_relid(channel);
2459 channel->offermsg.child_relid = INVALID_RELID;
2461 if (is_hvsock_channel(channel)) {
2462 if (!channel->rescind) {
2463 pr_err("hv_sock channel not rescinded!\n");
2469 list_for_each_entry(sc, &channel->sc_list, sc_list) {
2470 pr_err("Sub-channel not deleted!\n");
2474 atomic_inc(&vmbus_connection.nr_chan_fixup_on_resume);
2477 mutex_unlock(&vmbus_connection.channel_mutex);
2479 vmbus_initiate_unload(false);
2481 /* Reset the event for the next resume. */
2482 reinit_completion(&vmbus_connection.ready_for_resume_event);
2487 static int vmbus_bus_resume(struct device *dev)
2489 struct vmbus_channel_msginfo *msginfo;
2494 * We only use the 'vmbus_proto_version', which was in use before
2495 * hibernation, to re-negotiate with the host.
2497 if (!vmbus_proto_version) {
2498 pr_err("Invalid proto version = 0x%x\n", vmbus_proto_version);
2502 msgsize = sizeof(*msginfo) +
2503 sizeof(struct vmbus_channel_initiate_contact);
2505 msginfo = kzalloc(msgsize, GFP_KERNEL);
2507 if (msginfo == NULL)
2510 ret = vmbus_negotiate_version(msginfo, vmbus_proto_version);
2517 WARN_ON(atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) == 0);
2519 vmbus_request_offers();
2521 if (wait_for_completion_timeout(
2522 &vmbus_connection.ready_for_resume_event, 10 * HZ) == 0)
2523 pr_err("Some vmbus device is missing after suspending?\n");
2525 /* Reset the event for the next suspend. */
2526 reinit_completion(&vmbus_connection.ready_for_suspend_event);
2531 #define vmbus_bus_suspend NULL
2532 #define vmbus_bus_resume NULL
2533 #endif /* CONFIG_PM_SLEEP */
2535 static const struct acpi_device_id vmbus_acpi_device_ids[] = {
2540 MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
2543 * Note: we must use the "no_irq" ops, otherwise hibernation can not work with
2544 * PCI device assignment, because "pci_dev_pm_ops" uses the "noirq" ops: in
2545 * the resume path, the pci "noirq" restore op runs before "non-noirq" op (see
2546 * resume_target_kernel() -> dpm_resume_start(), and hibernation_restore() ->
2547 * dpm_resume_end()). This means vmbus_bus_resume() and the pci-hyperv's
2548 * resume callback must also run via the "noirq" ops.
2550 * Set suspend_noirq/resume_noirq to NULL for Suspend-to-Idle: see the comment
2551 * earlier in this file before vmbus_pm.
2554 static const struct dev_pm_ops vmbus_bus_pm = {
2555 .suspend_noirq = NULL,
2556 .resume_noirq = NULL,
2557 .freeze_noirq = vmbus_bus_suspend,
2558 .thaw_noirq = vmbus_bus_resume,
2559 .poweroff_noirq = vmbus_bus_suspend,
2560 .restore_noirq = vmbus_bus_resume
2563 static struct acpi_driver vmbus_acpi_driver = {
2565 .ids = vmbus_acpi_device_ids,
2567 .add = vmbus_acpi_add,
2568 .remove = vmbus_acpi_remove,
2570 .drv.pm = &vmbus_bus_pm,
2573 static void hv_kexec_handler(void)
2575 hv_stimer_global_cleanup();
2576 vmbus_initiate_unload(false);
2577 /* Make sure conn_state is set as hv_synic_cleanup checks for it */
2579 cpuhp_remove_state(hyperv_cpuhp_online);
2582 static void hv_crash_handler(struct pt_regs *regs)
2586 vmbus_initiate_unload(true);
2588 * In crash handler we can't schedule synic cleanup for all CPUs,
2589 * doing the cleanup for current CPU only. This should be sufficient
2592 cpu = smp_processor_id();
2593 hv_stimer_cleanup(cpu);
2594 hv_synic_disable_regs(cpu);
2597 static int hv_synic_suspend(void)
2600 * When we reach here, all the non-boot CPUs have been offlined.
2601 * If we're in a legacy configuration where stimer Direct Mode is
2602 * not enabled, the stimers on the non-boot CPUs have been unbound
2603 * in hv_synic_cleanup() -> hv_stimer_legacy_cleanup() ->
2604 * hv_stimer_cleanup() -> clockevents_unbind_device().
2606 * hv_synic_suspend() only runs on CPU0 with interrupts disabled.
2607 * Here we do not call hv_stimer_legacy_cleanup() on CPU0 because:
2608 * 1) it's unnecessary as interrupts remain disabled between
2609 * syscore_suspend() and syscore_resume(): see create_image() and
2610 * resume_target_kernel()
2611 * 2) the stimer on CPU0 is automatically disabled later by
2612 * syscore_suspend() -> timekeeping_suspend() -> tick_suspend() -> ...
2613 * -> clockevents_shutdown() -> ... -> hv_ce_shutdown()
2614 * 3) a warning would be triggered if we call
2615 * clockevents_unbind_device(), which may sleep, in an
2616 * interrupts-disabled context.
2619 hv_synic_disable_regs(0);
2624 static void hv_synic_resume(void)
2626 hv_synic_enable_regs(0);
2629 * Note: we don't need to call hv_stimer_init(0), because the timer
2630 * on CPU0 is not unbound in hv_synic_suspend(), and the timer is
2631 * automatically re-enabled in timekeeping_resume().
2635 /* The callbacks run only on CPU0, with irqs_disabled. */
2636 static struct syscore_ops hv_synic_syscore_ops = {
2637 .suspend = hv_synic_suspend,
2638 .resume = hv_synic_resume,
2641 static int __init hv_acpi_init(void)
2645 if (!hv_is_hyperv_initialized())
2648 if (hv_root_partition)
2651 init_completion(&probe_event);
2654 * Get ACPI resources first.
2656 ret = acpi_bus_register_driver(&vmbus_acpi_driver);
2661 t = wait_for_completion_timeout(&probe_event, 5*HZ);
2668 ret = vmbus_bus_init();
2672 hv_setup_kexec_handler(hv_kexec_handler);
2673 hv_setup_crash_handler(hv_crash_handler);
2675 register_syscore_ops(&hv_synic_syscore_ops);
2680 acpi_bus_unregister_driver(&vmbus_acpi_driver);
2685 static void __exit vmbus_exit(void)
2689 unregister_syscore_ops(&hv_synic_syscore_ops);
2691 hv_remove_kexec_handler();
2692 hv_remove_crash_handler();
2693 vmbus_connection.conn_state = DISCONNECTED;
2694 hv_stimer_global_cleanup();
2696 hv_remove_vmbus_irq();
2697 for_each_online_cpu(cpu) {
2698 struct hv_per_cpu_context *hv_cpu
2699 = per_cpu_ptr(hv_context.cpu_context, cpu);
2701 tasklet_kill(&hv_cpu->msg_dpc);
2703 hv_debug_rm_all_dir();
2705 vmbus_free_channels();
2706 kfree(vmbus_connection.channels);
2708 if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
2709 kmsg_dump_unregister(&hv_kmsg_dumper);
2710 unregister_die_notifier(&hyperv_die_block);
2711 atomic_notifier_chain_unregister(&panic_notifier_list,
2712 &hyperv_panic_block);
2715 free_page((unsigned long)hv_panic_page);
2716 unregister_sysctl_table(hv_ctl_table_hdr);
2717 hv_ctl_table_hdr = NULL;
2718 bus_unregister(&hv_bus);
2720 cpuhp_remove_state(hyperv_cpuhp_online);
2722 acpi_bus_unregister_driver(&vmbus_acpi_driver);
2726 MODULE_LICENSE("GPL");
2727 MODULE_DESCRIPTION("Microsoft Hyper-V VMBus Driver");
2729 subsys_initcall(hv_acpi_init);
2730 module_exit(vmbus_exit);