2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/kernel.h>
24 #include <linux/interrupt.h>
25 #include <linux/sched.h>
26 #include <linux/wait.h>
28 #include <linux/slab.h>
29 #include <linux/list.h>
30 #include <linux/module.h>
31 #include <linux/completion.h>
32 #include <linux/delay.h>
33 #include <linux/hyperv.h>
34 #include <asm/mshyperv.h>
36 #include "hyperv_vmbus.h"
38 static void init_vp_index(struct vmbus_channel *channel, u16 dev_type);
40 static const struct vmbus_device vmbus_devs[] = {
48 { .dev_type = HV_SCSI,
72 { .dev_type = HV_PCIE,
77 /* Synthetic Frame Buffer */
83 /* Synthetic Keyboard */
90 { .dev_type = HV_MOUSE,
104 .perf_device = false,
110 .perf_device = false,
114 { .dev_type = HV_SHUTDOWN,
116 .perf_device = false,
120 { .dev_type = HV_FCOPY,
122 .perf_device = false,
126 { .dev_type = HV_BACKUP,
128 .perf_device = false,
134 .perf_device = false,
138 { .dev_type = HV_UNKNOWN,
139 .perf_device = false,
143 static const struct {
145 } vmbus_unsupported_devs[] = {
152 * The rescinded channel may be blocked waiting for a response from the host;
155 static void vmbus_rescind_cleanup(struct vmbus_channel *channel)
157 struct vmbus_channel_msginfo *msginfo;
161 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
163 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
166 if (msginfo->waiting_channel == channel) {
167 complete(&msginfo->waitevent);
171 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
174 static bool is_unsupported_vmbus_devs(const uuid_le *guid)
178 for (i = 0; i < ARRAY_SIZE(vmbus_unsupported_devs); i++)
179 if (!uuid_le_cmp(*guid, vmbus_unsupported_devs[i].guid))
184 static u16 hv_get_dev_type(const struct vmbus_channel *channel)
186 const uuid_le *guid = &channel->offermsg.offer.if_type;
189 if (is_hvsock_channel(channel) || is_unsupported_vmbus_devs(guid))
192 for (i = HV_IDE; i < HV_UNKNOWN; i++) {
193 if (!uuid_le_cmp(*guid, vmbus_devs[i].guid))
196 pr_info("Unknown GUID: %pUl\n", guid);
201 * vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message
202 * @icmsghdrp: Pointer to msg header structure
203 * @icmsg_negotiate: Pointer to negotiate message structure
204 * @buf: Raw buffer channel data
206 * @icmsghdrp is of type &struct icmsg_hdr.
207 * Set up and fill in default negotiate response message.
209 * The fw_version and fw_vercnt specifies the framework version that
212 * The srv_version and srv_vercnt specifies the service
213 * versions we can support.
215 * Versions are given in decreasing order.
217 * nego_fw_version and nego_srv_version store the selected protocol versions.
219 * Mainly used by Hyper-V drivers.
221 bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
222 u8 *buf, const int *fw_version, int fw_vercnt,
223 const int *srv_version, int srv_vercnt,
224 int *nego_fw_version, int *nego_srv_version)
226 int icframe_major, icframe_minor;
227 int icmsg_major, icmsg_minor;
228 int fw_major, fw_minor;
229 int srv_major, srv_minor;
231 bool found_match = false;
232 struct icmsg_negotiate *negop;
234 icmsghdrp->icmsgsize = 0x10;
235 negop = (struct icmsg_negotiate *)&buf[
236 sizeof(struct vmbuspipe_hdr) +
237 sizeof(struct icmsg_hdr)];
239 icframe_major = negop->icframe_vercnt;
242 icmsg_major = negop->icmsg_vercnt;
246 * Select the framework version number we will
250 for (i = 0; i < fw_vercnt; i++) {
251 fw_major = (fw_version[i] >> 16);
252 fw_minor = (fw_version[i] & 0xFFFF);
254 for (j = 0; j < negop->icframe_vercnt; j++) {
255 if ((negop->icversion_data[j].major == fw_major) &&
256 (negop->icversion_data[j].minor == fw_minor)) {
257 icframe_major = negop->icversion_data[j].major;
258 icframe_minor = negop->icversion_data[j].minor;
273 for (i = 0; i < srv_vercnt; i++) {
274 srv_major = (srv_version[i] >> 16);
275 srv_minor = (srv_version[i] & 0xFFFF);
277 for (j = negop->icframe_vercnt;
278 (j < negop->icframe_vercnt + negop->icmsg_vercnt);
281 if ((negop->icversion_data[j].major == srv_major) &&
282 (negop->icversion_data[j].minor == srv_minor)) {
284 icmsg_major = negop->icversion_data[j].major;
285 icmsg_minor = negop->icversion_data[j].minor;
296 * Respond with the framework and service
297 * version numbers we can support.
302 negop->icframe_vercnt = 0;
303 negop->icmsg_vercnt = 0;
305 negop->icframe_vercnt = 1;
306 negop->icmsg_vercnt = 1;
310 *nego_fw_version = (icframe_major << 16) | icframe_minor;
312 if (nego_srv_version)
313 *nego_srv_version = (icmsg_major << 16) | icmsg_minor;
315 negop->icversion_data[0].major = icframe_major;
316 negop->icversion_data[0].minor = icframe_minor;
317 negop->icversion_data[1].major = icmsg_major;
318 negop->icversion_data[1].minor = icmsg_minor;
322 EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp);
325 * alloc_channel - Allocate and initialize a vmbus channel object
327 static struct vmbus_channel *alloc_channel(void)
329 struct vmbus_channel *channel;
331 channel = kzalloc(sizeof(*channel), GFP_ATOMIC);
335 spin_lock_init(&channel->lock);
337 INIT_LIST_HEAD(&channel->sc_list);
338 INIT_LIST_HEAD(&channel->percpu_list);
340 tasklet_init(&channel->callback_event,
341 vmbus_on_event, (unsigned long)channel);
347 * free_channel - Release the resources used by the vmbus channel object
349 static void free_channel(struct vmbus_channel *channel)
351 tasklet_kill(&channel->callback_event);
353 kfree_rcu(channel, rcu);
356 static void percpu_channel_enq(void *arg)
358 struct vmbus_channel *channel = arg;
359 struct hv_per_cpu_context *hv_cpu
360 = this_cpu_ptr(hv_context.cpu_context);
362 list_add_tail_rcu(&channel->percpu_list, &hv_cpu->chan_list);
365 static void percpu_channel_deq(void *arg)
367 struct vmbus_channel *channel = arg;
369 list_del_rcu(&channel->percpu_list);
373 static void vmbus_release_relid(u32 relid)
375 struct vmbus_channel_relid_released msg;
377 memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
378 msg.child_relid = relid;
379 msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
380 vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released),
384 void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
387 struct vmbus_channel *primary_channel;
389 BUG_ON(!channel->rescind);
390 BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
392 if (channel->target_cpu != get_cpu()) {
394 smp_call_function_single(channel->target_cpu,
395 percpu_channel_deq, channel, true);
397 percpu_channel_deq(channel);
401 if (channel->primary_channel == NULL) {
402 list_del(&channel->listentry);
404 primary_channel = channel;
406 primary_channel = channel->primary_channel;
407 spin_lock_irqsave(&primary_channel->lock, flags);
408 list_del(&channel->sc_list);
409 primary_channel->num_sc--;
410 spin_unlock_irqrestore(&primary_channel->lock, flags);
414 * We need to free the bit for init_vp_index() to work in the case
415 * of sub-channel, when we reload drivers like hv_netvsc.
417 if (channel->affinity_policy == HV_LOCALIZED)
418 cpumask_clear_cpu(channel->target_cpu,
419 &primary_channel->alloced_cpus_in_node);
421 vmbus_release_relid(relid);
423 free_channel(channel);
426 void vmbus_free_channels(void)
428 struct vmbus_channel *channel, *tmp;
430 list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list,
432 /* hv_process_channel_removal() needs this */
433 channel->rescind = true;
435 vmbus_device_unregister(channel->device_obj);
440 * vmbus_process_offer - Process the offer by creating a channel/device
441 * associated with this offer
443 static void vmbus_process_offer(struct vmbus_channel *newchannel)
445 struct vmbus_channel *channel;
451 /* Make sure this is a new offer */
452 mutex_lock(&vmbus_connection.channel_mutex);
454 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
455 if (!uuid_le_cmp(channel->offermsg.offer.if_type,
456 newchannel->offermsg.offer.if_type) &&
457 !uuid_le_cmp(channel->offermsg.offer.if_instance,
458 newchannel->offermsg.offer.if_instance)) {
465 list_add_tail(&newchannel->listentry,
466 &vmbus_connection.chn_list);
468 mutex_unlock(&vmbus_connection.channel_mutex);
472 * Check to see if this is a sub-channel.
474 if (newchannel->offermsg.offer.sub_channel_index != 0) {
476 * Process the sub-channel.
478 newchannel->primary_channel = channel;
479 spin_lock_irqsave(&channel->lock, flags);
480 list_add_tail(&newchannel->sc_list, &channel->sc_list);
482 spin_unlock_irqrestore(&channel->lock, flags);
484 atomic_dec(&vmbus_connection.offer_in_progress);
489 dev_type = hv_get_dev_type(newchannel);
491 init_vp_index(newchannel, dev_type);
493 if (newchannel->target_cpu != get_cpu()) {
495 smp_call_function_single(newchannel->target_cpu,
499 percpu_channel_enq(newchannel);
504 * This state is used to indicate a successful open
505 * so that when we do close the channel normally, we
506 * can cleanup properly
508 newchannel->state = CHANNEL_OPEN_STATE;
511 if (channel->sc_creation_callback != NULL)
512 channel->sc_creation_callback(newchannel);
513 atomic_dec(&vmbus_connection.offer_in_progress);
518 * Start the process of binding this offer to the driver
519 * We need to set the DeviceObject field before calling
520 * vmbus_child_dev_add()
522 newchannel->device_obj = vmbus_device_create(
523 &newchannel->offermsg.offer.if_type,
524 &newchannel->offermsg.offer.if_instance,
526 if (!newchannel->device_obj)
529 newchannel->device_obj->device_id = dev_type;
531 * Add the new device to the bus. This will kick off device-driver
532 * binding which eventually invokes the device driver's AddDevice()
535 ret = vmbus_device_register(newchannel->device_obj);
538 pr_err("unable to add child device object (relid %d)\n",
539 newchannel->offermsg.child_relid);
540 kfree(newchannel->device_obj);
544 atomic_dec(&vmbus_connection.offer_in_progress);
548 mutex_lock(&vmbus_connection.channel_mutex);
549 list_del(&newchannel->listentry);
550 mutex_unlock(&vmbus_connection.channel_mutex);
552 if (newchannel->target_cpu != get_cpu()) {
554 smp_call_function_single(newchannel->target_cpu,
555 percpu_channel_deq, newchannel, true);
557 percpu_channel_deq(newchannel);
561 vmbus_release_relid(newchannel->offermsg.child_relid);
564 free_channel(newchannel);
568 * We use this state to statically distribute the channel interrupt load.
570 static int next_numa_node_id;
573 * Starting with Win8, we can statically distribute the incoming
574 * channel interrupt load by binding a channel to VCPU.
575 * We do this in a hierarchical fashion:
576 * First distribute the primary channels across available NUMA nodes
577 * and then distribute the subchannels amongst the CPUs in the NUMA
578 * node assigned to the primary channel.
580 * For pre-win8 hosts or non-performance critical channels we assign the
581 * first CPU in the first NUMA node.
583 static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
586 bool perf_chn = vmbus_devs[dev_type].perf_device;
587 struct vmbus_channel *primary = channel->primary_channel;
589 struct cpumask available_mask;
590 struct cpumask *alloced_mask;
592 if ((vmbus_proto_version == VERSION_WS2008) ||
593 (vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) {
595 * Prior to win8, all channel interrupts are
596 * delivered on cpu 0.
597 * Also if the channel is not a performance critical
598 * channel, bind it to cpu 0.
600 channel->numa_node = 0;
601 channel->target_cpu = 0;
602 channel->target_vp = hv_context.vp_index[0];
607 * Based on the channel affinity policy, we will assign the NUMA
611 if ((channel->affinity_policy == HV_BALANCED) || (!primary)) {
613 next_node = next_numa_node_id++;
614 if (next_node == nr_node_ids) {
615 next_node = next_numa_node_id = 0;
618 if (cpumask_empty(cpumask_of_node(next_node)))
622 channel->numa_node = next_node;
625 alloced_mask = &hv_context.hv_numa_map[primary->numa_node];
627 if (cpumask_weight(alloced_mask) ==
628 cpumask_weight(cpumask_of_node(primary->numa_node))) {
630 * We have cycled through all the CPUs in the node;
631 * reset the alloced map.
633 cpumask_clear(alloced_mask);
636 cpumask_xor(&available_mask, alloced_mask,
637 cpumask_of_node(primary->numa_node));
641 if (primary->affinity_policy == HV_LOCALIZED) {
643 * Normally Hyper-V host doesn't create more subchannels
644 * than there are VCPUs on the node but it is possible when not
645 * all present VCPUs on the node are initialized by guest.
646 * Clear the alloced_cpus_in_node to start over.
648 if (cpumask_equal(&primary->alloced_cpus_in_node,
649 cpumask_of_node(primary->numa_node)))
650 cpumask_clear(&primary->alloced_cpus_in_node);
654 cur_cpu = cpumask_next(cur_cpu, &available_mask);
655 if (cur_cpu >= nr_cpu_ids) {
657 cpumask_copy(&available_mask,
658 cpumask_of_node(primary->numa_node));
662 if (primary->affinity_policy == HV_LOCALIZED) {
664 * NOTE: in the case of sub-channel, we clear the
665 * sub-channel related bit(s) in
666 * primary->alloced_cpus_in_node in
667 * hv_process_channel_removal(), so when we
668 * reload drivers like hv_netvsc in SMP guest, here
669 * we're able to re-allocate
670 * bit from primary->alloced_cpus_in_node.
672 if (!cpumask_test_cpu(cur_cpu,
673 &primary->alloced_cpus_in_node)) {
674 cpumask_set_cpu(cur_cpu,
675 &primary->alloced_cpus_in_node);
676 cpumask_set_cpu(cur_cpu, alloced_mask);
680 cpumask_set_cpu(cur_cpu, alloced_mask);
685 channel->target_cpu = cur_cpu;
686 channel->target_vp = hv_context.vp_index[cur_cpu];
689 static void vmbus_wait_for_unload(void)
693 struct hv_message *msg;
694 struct vmbus_channel_message_header *hdr;
698 * CHANNELMSG_UNLOAD_RESPONSE is always delivered to the CPU which was
699 * used for initial contact or to CPU0 depending on host version. When
700 * we're crashing on a different CPU let's hope that IRQ handler on
701 * the cpu which receives CHANNELMSG_UNLOAD_RESPONSE is still
702 * functional and vmbus_unload_response() will complete
703 * vmbus_connection.unload_event. If not, the last thing we can do is
704 * read message pages for all CPUs directly.
707 if (completion_done(&vmbus_connection.unload_event))
710 for_each_online_cpu(cpu) {
711 struct hv_per_cpu_context *hv_cpu
712 = per_cpu_ptr(hv_context.cpu_context, cpu);
714 page_addr = hv_cpu->synic_message_page;
715 msg = (struct hv_message *)page_addr
716 + VMBUS_MESSAGE_SINT;
718 message_type = READ_ONCE(msg->header.message_type);
719 if (message_type == HVMSG_NONE)
722 hdr = (struct vmbus_channel_message_header *)
725 if (hdr->msgtype == CHANNELMSG_UNLOAD_RESPONSE)
726 complete(&vmbus_connection.unload_event);
728 vmbus_signal_eom(msg, message_type);
735 * We're crashing and already got the UNLOAD_RESPONSE, cleanup all
736 * maybe-pending messages on all CPUs to be able to receive new
737 * messages after we reconnect.
739 for_each_online_cpu(cpu) {
740 struct hv_per_cpu_context *hv_cpu
741 = per_cpu_ptr(hv_context.cpu_context, cpu);
743 page_addr = hv_cpu->synic_message_page;
744 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
745 msg->header.message_type = HVMSG_NONE;
750 * vmbus_unload_response - Handler for the unload response.
752 static void vmbus_unload_response(struct vmbus_channel_message_header *hdr)
755 * This is a global event; just wakeup the waiting thread.
756 * Once we successfully unload, we can cleanup the monitor state.
758 complete(&vmbus_connection.unload_event);
761 void vmbus_initiate_unload(bool crash)
763 struct vmbus_channel_message_header hdr;
765 /* Pre-Win2012R2 hosts don't support reconnect */
766 if (vmbus_proto_version < VERSION_WIN8_1)
769 init_completion(&vmbus_connection.unload_event);
770 memset(&hdr, 0, sizeof(struct vmbus_channel_message_header));
771 hdr.msgtype = CHANNELMSG_UNLOAD;
772 vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header),
776 * vmbus_initiate_unload() is also called on crash and the crash can be
777 * happening in an interrupt context, where scheduling is impossible.
780 wait_for_completion(&vmbus_connection.unload_event);
782 vmbus_wait_for_unload();
786 * vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
789 static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
791 struct vmbus_channel_offer_channel *offer;
792 struct vmbus_channel *newchannel;
794 offer = (struct vmbus_channel_offer_channel *)hdr;
796 /* Allocate the channel object and save this offer. */
797 newchannel = alloc_channel();
799 vmbus_release_relid(offer->child_relid);
800 atomic_dec(&vmbus_connection.offer_in_progress);
801 pr_err("Unable to allocate channel object\n");
806 * Setup state for signalling the host.
808 newchannel->sig_event = (struct hv_input_signal_event *)
809 (ALIGN((unsigned long)
810 &newchannel->sig_buf,
811 HV_HYPERCALL_PARAM_ALIGN));
813 newchannel->sig_event->connectionid.asu32 = 0;
814 newchannel->sig_event->connectionid.u.id = VMBUS_EVENT_CONNECTION_ID;
815 newchannel->sig_event->flag_number = 0;
816 newchannel->sig_event->rsvdz = 0;
818 if (vmbus_proto_version != VERSION_WS2008) {
819 newchannel->is_dedicated_interrupt =
820 (offer->is_dedicated_interrupt != 0);
821 newchannel->sig_event->connectionid.u.id =
822 offer->connection_id;
825 memcpy(&newchannel->offermsg, offer,
826 sizeof(struct vmbus_channel_offer_channel));
827 newchannel->monitor_grp = (u8)offer->monitorid / 32;
828 newchannel->monitor_bit = (u8)offer->monitorid % 32;
830 vmbus_process_offer(newchannel);
834 * vmbus_onoffer_rescind - Rescind offer handler.
836 * We queue a work item to process this offer synchronously
838 static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
840 struct vmbus_channel_rescind_offer *rescind;
841 struct vmbus_channel *channel;
845 rescind = (struct vmbus_channel_rescind_offer *)hdr;
848 * The offer msg and the corresponding rescind msg
849 * from the host are guranteed to be ordered -
850 * offer comes in first and then the rescind.
851 * Since we process these events in work elements,
852 * and with preemption, we may end up processing
853 * the events out of order. Given that we handle these
854 * work elements on the same CPU, this is possible only
855 * in the case of preemption. In any case wait here
856 * until the offer processing has moved beyond the
857 * point where the channel is discoverable.
860 while (atomic_read(&vmbus_connection.offer_in_progress) != 0) {
862 * We wait here until any channel offer is currently
868 mutex_lock(&vmbus_connection.channel_mutex);
869 channel = relid2channel(rescind->child_relid);
870 mutex_unlock(&vmbus_connection.channel_mutex);
872 if (channel == NULL) {
874 * We failed in processing the offer message;
875 * we would have cleaned up the relid in that
881 spin_lock_irqsave(&channel->lock, flags);
882 channel->rescind = true;
883 spin_unlock_irqrestore(&channel->lock, flags);
885 vmbus_rescind_cleanup(channel);
887 if (channel->device_obj) {
888 if (channel->chn_rescind_callback) {
889 channel->chn_rescind_callback(channel);
893 * We will have to unregister this device from the
896 dev = get_device(&channel->device_obj->device);
898 vmbus_device_unregister(channel->device_obj);
902 if (channel->primary_channel != NULL) {
904 * Sub-channel is being rescinded. Following is the channel
905 * close sequence when initiated from the driveri (refer to
906 * vmbus_close() for details):
907 * 1. Close all sub-channels first
908 * 2. Then close the primary channel.
910 if (channel->state == CHANNEL_OPEN_STATE) {
912 * The channel is currently not open;
913 * it is safe for us to cleanup the channel.
915 mutex_lock(&vmbus_connection.channel_mutex);
916 hv_process_channel_removal(channel,
917 channel->offermsg.child_relid);
918 mutex_unlock(&vmbus_connection.channel_mutex);
923 void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
925 mutex_lock(&vmbus_connection.channel_mutex);
927 BUG_ON(!is_hvsock_channel(channel));
929 channel->rescind = true;
930 vmbus_device_unregister(channel->device_obj);
932 mutex_unlock(&vmbus_connection.channel_mutex);
934 EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister);
938 * vmbus_onoffers_delivered -
939 * This is invoked when all offers have been delivered.
941 * Nothing to do here.
943 static void vmbus_onoffers_delivered(
944 struct vmbus_channel_message_header *hdr)
949 * vmbus_onopen_result - Open result handler.
951 * This is invoked when we received a response to our channel open request.
952 * Find the matching request, copy the response and signal the requesting
955 static void vmbus_onopen_result(struct vmbus_channel_message_header *hdr)
957 struct vmbus_channel_open_result *result;
958 struct vmbus_channel_msginfo *msginfo;
959 struct vmbus_channel_message_header *requestheader;
960 struct vmbus_channel_open_channel *openmsg;
963 result = (struct vmbus_channel_open_result *)hdr;
966 * Find the open msg, copy the result and signal/unblock the wait event
968 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
970 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
973 (struct vmbus_channel_message_header *)msginfo->msg;
975 if (requestheader->msgtype == CHANNELMSG_OPENCHANNEL) {
977 (struct vmbus_channel_open_channel *)msginfo->msg;
978 if (openmsg->child_relid == result->child_relid &&
979 openmsg->openid == result->openid) {
980 memcpy(&msginfo->response.open_result,
983 struct vmbus_channel_open_result));
984 complete(&msginfo->waitevent);
989 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
993 * vmbus_ongpadl_created - GPADL created handler.
995 * This is invoked when we received a response to our gpadl create request.
996 * Find the matching request, copy the response and signal the requesting
999 static void vmbus_ongpadl_created(struct vmbus_channel_message_header *hdr)
1001 struct vmbus_channel_gpadl_created *gpadlcreated;
1002 struct vmbus_channel_msginfo *msginfo;
1003 struct vmbus_channel_message_header *requestheader;
1004 struct vmbus_channel_gpadl_header *gpadlheader;
1005 unsigned long flags;
1007 gpadlcreated = (struct vmbus_channel_gpadl_created *)hdr;
1010 * Find the establish msg, copy the result and signal/unblock the wait
1013 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1015 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1018 (struct vmbus_channel_message_header *)msginfo->msg;
1020 if (requestheader->msgtype == CHANNELMSG_GPADL_HEADER) {
1022 (struct vmbus_channel_gpadl_header *)requestheader;
1024 if ((gpadlcreated->child_relid ==
1025 gpadlheader->child_relid) &&
1026 (gpadlcreated->gpadl == gpadlheader->gpadl)) {
1027 memcpy(&msginfo->response.gpadl_created,
1030 struct vmbus_channel_gpadl_created));
1031 complete(&msginfo->waitevent);
1036 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1040 * vmbus_ongpadl_torndown - GPADL torndown handler.
1042 * This is invoked when we received a response to our gpadl teardown request.
1043 * Find the matching request, copy the response and signal the requesting
1046 static void vmbus_ongpadl_torndown(
1047 struct vmbus_channel_message_header *hdr)
1049 struct vmbus_channel_gpadl_torndown *gpadl_torndown;
1050 struct vmbus_channel_msginfo *msginfo;
1051 struct vmbus_channel_message_header *requestheader;
1052 struct vmbus_channel_gpadl_teardown *gpadl_teardown;
1053 unsigned long flags;
1055 gpadl_torndown = (struct vmbus_channel_gpadl_torndown *)hdr;
1058 * Find the open msg, copy the result and signal/unblock the wait event
1060 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1062 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1065 (struct vmbus_channel_message_header *)msginfo->msg;
1067 if (requestheader->msgtype == CHANNELMSG_GPADL_TEARDOWN) {
1069 (struct vmbus_channel_gpadl_teardown *)requestheader;
1071 if (gpadl_torndown->gpadl == gpadl_teardown->gpadl) {
1072 memcpy(&msginfo->response.gpadl_torndown,
1075 struct vmbus_channel_gpadl_torndown));
1076 complete(&msginfo->waitevent);
1081 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1085 * vmbus_onversion_response - Version response handler
1087 * This is invoked when we received a response to our initiate contact request.
1088 * Find the matching request, copy the response and signal the requesting
1091 static void vmbus_onversion_response(
1092 struct vmbus_channel_message_header *hdr)
1094 struct vmbus_channel_msginfo *msginfo;
1095 struct vmbus_channel_message_header *requestheader;
1096 struct vmbus_channel_version_response *version_response;
1097 unsigned long flags;
1099 version_response = (struct vmbus_channel_version_response *)hdr;
1100 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1102 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1105 (struct vmbus_channel_message_header *)msginfo->msg;
1107 if (requestheader->msgtype ==
1108 CHANNELMSG_INITIATE_CONTACT) {
1109 memcpy(&msginfo->response.version_response,
1111 sizeof(struct vmbus_channel_version_response));
1112 complete(&msginfo->waitevent);
1115 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1118 /* Channel message dispatch table */
1119 const struct vmbus_channel_message_table_entry
1120 channel_message_table[CHANNELMSG_COUNT] = {
1121 { CHANNELMSG_INVALID, 0, NULL },
1122 { CHANNELMSG_OFFERCHANNEL, 0, vmbus_onoffer },
1123 { CHANNELMSG_RESCIND_CHANNELOFFER, 0, vmbus_onoffer_rescind },
1124 { CHANNELMSG_REQUESTOFFERS, 0, NULL },
1125 { CHANNELMSG_ALLOFFERS_DELIVERED, 1, vmbus_onoffers_delivered },
1126 { CHANNELMSG_OPENCHANNEL, 0, NULL },
1127 { CHANNELMSG_OPENCHANNEL_RESULT, 1, vmbus_onopen_result },
1128 { CHANNELMSG_CLOSECHANNEL, 0, NULL },
1129 { CHANNELMSG_GPADL_HEADER, 0, NULL },
1130 { CHANNELMSG_GPADL_BODY, 0, NULL },
1131 { CHANNELMSG_GPADL_CREATED, 1, vmbus_ongpadl_created },
1132 { CHANNELMSG_GPADL_TEARDOWN, 0, NULL },
1133 { CHANNELMSG_GPADL_TORNDOWN, 1, vmbus_ongpadl_torndown },
1134 { CHANNELMSG_RELID_RELEASED, 0, NULL },
1135 { CHANNELMSG_INITIATE_CONTACT, 0, NULL },
1136 { CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response },
1137 { CHANNELMSG_UNLOAD, 0, NULL },
1138 { CHANNELMSG_UNLOAD_RESPONSE, 1, vmbus_unload_response },
1139 { CHANNELMSG_18, 0, NULL },
1140 { CHANNELMSG_19, 0, NULL },
1141 { CHANNELMSG_20, 0, NULL },
1142 { CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL },
1146 * vmbus_onmessage - Handler for channel protocol messages.
1148 * This is invoked in the vmbus worker thread context.
1150 void vmbus_onmessage(void *context)
1152 struct hv_message *msg = context;
1153 struct vmbus_channel_message_header *hdr;
1156 hdr = (struct vmbus_channel_message_header *)msg->u.payload;
1157 size = msg->header.payload_size;
1159 if (hdr->msgtype >= CHANNELMSG_COUNT) {
1160 pr_err("Received invalid channel message type %d size %d\n",
1161 hdr->msgtype, size);
1162 print_hex_dump_bytes("", DUMP_PREFIX_NONE,
1163 (unsigned char *)msg->u.payload, size);
1167 if (channel_message_table[hdr->msgtype].message_handler)
1168 channel_message_table[hdr->msgtype].message_handler(hdr);
1170 pr_err("Unhandled channel message type %d\n", hdr->msgtype);
1174 * vmbus_request_offers - Send a request to get all our pending offers.
1176 int vmbus_request_offers(void)
1178 struct vmbus_channel_message_header *msg;
1179 struct vmbus_channel_msginfo *msginfo;
1182 msginfo = kmalloc(sizeof(*msginfo) +
1183 sizeof(struct vmbus_channel_message_header),
1188 msg = (struct vmbus_channel_message_header *)msginfo->msg;
1190 msg->msgtype = CHANNELMSG_REQUESTOFFERS;
1193 ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_message_header),
1196 pr_err("Unable to request offers - %d\n", ret);
1208 * Retrieve the (sub) channel on which to send an outgoing request.
1209 * When a primary channel has multiple sub-channels, we try to
1210 * distribute the load equally amongst all available channels.
1212 struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary)
1214 struct list_head *cur, *tmp;
1216 struct vmbus_channel *cur_channel;
1217 struct vmbus_channel *outgoing_channel = primary;
1221 if (list_empty(&primary->sc_list))
1222 return outgoing_channel;
1224 next_channel = primary->next_oc++;
1226 if (next_channel > (primary->num_sc)) {
1227 primary->next_oc = 0;
1228 return outgoing_channel;
1231 cur_cpu = hv_context.vp_index[get_cpu()];
1233 list_for_each_safe(cur, tmp, &primary->sc_list) {
1234 cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
1235 if (cur_channel->state != CHANNEL_OPENED_STATE)
1238 if (cur_channel->target_vp == cur_cpu)
1241 if (i == next_channel)
1247 return outgoing_channel;
1249 EXPORT_SYMBOL_GPL(vmbus_get_outgoing_channel);
1251 static void invoke_sc_cb(struct vmbus_channel *primary_channel)
1253 struct list_head *cur, *tmp;
1254 struct vmbus_channel *cur_channel;
1256 if (primary_channel->sc_creation_callback == NULL)
1259 list_for_each_safe(cur, tmp, &primary_channel->sc_list) {
1260 cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
1262 primary_channel->sc_creation_callback(cur_channel);
1266 void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
1267 void (*sc_cr_cb)(struct vmbus_channel *new_sc))
1269 primary_channel->sc_creation_callback = sc_cr_cb;
1271 EXPORT_SYMBOL_GPL(vmbus_set_sc_create_callback);
1273 bool vmbus_are_subchannels_present(struct vmbus_channel *primary)
1277 ret = !list_empty(&primary->sc_list);
1281 * Invoke the callback on sub-channel creation.
1282 * This will present a uniform interface to the
1285 invoke_sc_cb(primary);
1290 EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present);
1292 void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
1293 void (*chn_rescind_cb)(struct vmbus_channel *))
1295 channel->chn_rescind_callback = chn_rescind_cb;
1297 EXPORT_SYMBOL_GPL(vmbus_set_chn_rescind_callback);