2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/wait.h>
27 #include <linux/slab.h>
28 #include <linux/list.h>
29 #include <linux/module.h>
30 #include <linux/completion.h>
31 #include <linux/delay.h>
32 #include <linux/hyperv.h>
34 #include "hyperv_vmbus.h"
36 static void init_vp_index(struct vmbus_channel *channel, u16 dev_type);
38 static const struct vmbus_device vmbus_devs[] = {
46 { .dev_type = HV_SCSI,
70 { .dev_type = HV_PCIE,
75 /* Synthetic Frame Buffer */
81 /* Synthetic Keyboard */
88 { .dev_type = HV_MOUSE,
102 .perf_device = false,
108 .perf_device = false,
112 { .dev_type = HV_SHUTDOWN,
114 .perf_device = false,
118 { .dev_type = HV_FCOPY,
120 .perf_device = false,
124 { .dev_type = HV_BACKUP,
126 .perf_device = false,
132 .perf_device = false,
136 { .dev_type = HV_UNKOWN,
137 .perf_device = false,
141 static u16 hv_get_dev_type(const uuid_le *guid)
145 for (i = HV_IDE; i < HV_UNKOWN; i++) {
146 if (!uuid_le_cmp(*guid, vmbus_devs[i].guid))
149 pr_info("Unknown GUID: %pUl\n", guid);
154 * vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message
155 * @icmsghdrp: Pointer to msg header structure
156 * @icmsg_negotiate: Pointer to negotiate message structure
157 * @buf: Raw buffer channel data
159 * @icmsghdrp is of type &struct icmsg_hdr.
160 * @negop is of type &struct icmsg_negotiate.
161 * Set up and fill in default negotiate response message.
163 * The fw_version specifies the framework version that
164 * we can support and srv_version specifies the service
165 * version we can support.
167 * Mainly used by Hyper-V drivers.
169 bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
170 struct icmsg_negotiate *negop, u8 *buf,
171 int fw_version, int srv_version)
173 int icframe_major, icframe_minor;
174 int icmsg_major, icmsg_minor;
175 int fw_major, fw_minor;
176 int srv_major, srv_minor;
178 bool found_match = false;
180 icmsghdrp->icmsgsize = 0x10;
181 fw_major = (fw_version >> 16);
182 fw_minor = (fw_version & 0xFFFF);
184 srv_major = (srv_version >> 16);
185 srv_minor = (srv_version & 0xFFFF);
187 negop = (struct icmsg_negotiate *)&buf[
188 sizeof(struct vmbuspipe_hdr) +
189 sizeof(struct icmsg_hdr)];
191 icframe_major = negop->icframe_vercnt;
194 icmsg_major = negop->icmsg_vercnt;
198 * Select the framework version number we will
202 for (i = 0; i < negop->icframe_vercnt; i++) {
203 if ((negop->icversion_data[i].major == fw_major) &&
204 (negop->icversion_data[i].minor == fw_minor)) {
205 icframe_major = negop->icversion_data[i].major;
206 icframe_minor = negop->icversion_data[i].minor;
216 for (i = negop->icframe_vercnt;
217 (i < negop->icframe_vercnt + negop->icmsg_vercnt); i++) {
218 if ((negop->icversion_data[i].major == srv_major) &&
219 (negop->icversion_data[i].minor == srv_minor)) {
220 icmsg_major = negop->icversion_data[i].major;
221 icmsg_minor = negop->icversion_data[i].minor;
227 * Respond with the framework and service
228 * version numbers we can support.
233 negop->icframe_vercnt = 0;
234 negop->icmsg_vercnt = 0;
236 negop->icframe_vercnt = 1;
237 negop->icmsg_vercnt = 1;
240 negop->icversion_data[0].major = icframe_major;
241 negop->icversion_data[0].minor = icframe_minor;
242 negop->icversion_data[1].major = icmsg_major;
243 negop->icversion_data[1].minor = icmsg_minor;
247 EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp);
250 * alloc_channel - Allocate and initialize a vmbus channel object
252 static struct vmbus_channel *alloc_channel(void)
254 static atomic_t chan_num = ATOMIC_INIT(0);
255 struct vmbus_channel *channel;
257 channel = kzalloc(sizeof(*channel), GFP_ATOMIC);
261 channel->id = atomic_inc_return(&chan_num);
262 spin_lock_init(&channel->inbound_lock);
263 spin_lock_init(&channel->lock);
265 INIT_LIST_HEAD(&channel->sc_list);
266 INIT_LIST_HEAD(&channel->percpu_list);
272 * free_channel - Release the resources used by the vmbus channel object
274 static void free_channel(struct vmbus_channel *channel)
279 static void percpu_channel_enq(void *arg)
281 struct vmbus_channel *channel = arg;
282 int cpu = smp_processor_id();
284 list_add_tail(&channel->percpu_list, &hv_context.percpu_list[cpu]);
287 static void percpu_channel_deq(void *arg)
289 struct vmbus_channel *channel = arg;
291 list_del(&channel->percpu_list);
295 static void vmbus_release_relid(u32 relid)
297 struct vmbus_channel_relid_released msg;
299 memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
300 msg.child_relid = relid;
301 msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
302 vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released));
305 void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
308 struct vmbus_channel *primary_channel;
310 vmbus_release_relid(relid);
312 BUG_ON(!channel->rescind);
313 BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
315 if (channel->target_cpu != get_cpu()) {
317 smp_call_function_single(channel->target_cpu,
318 percpu_channel_deq, channel, true);
320 percpu_channel_deq(channel);
324 if (channel->primary_channel == NULL) {
325 list_del(&channel->listentry);
327 primary_channel = channel;
329 primary_channel = channel->primary_channel;
330 spin_lock_irqsave(&primary_channel->lock, flags);
331 list_del(&channel->sc_list);
332 primary_channel->num_sc--;
333 spin_unlock_irqrestore(&primary_channel->lock, flags);
337 * We need to free the bit for init_vp_index() to work in the case
338 * of sub-channel, when we reload drivers like hv_netvsc.
340 cpumask_clear_cpu(channel->target_cpu,
341 &primary_channel->alloced_cpus_in_node);
343 free_channel(channel);
346 void vmbus_free_channels(void)
348 struct vmbus_channel *channel, *tmp;
350 list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list,
352 /* hv_process_channel_removal() needs this */
353 channel->rescind = true;
355 vmbus_device_unregister(channel->device_obj);
360 * vmbus_process_offer - Process the offer by creating a channel/device
361 * associated with this offer
363 static void vmbus_process_offer(struct vmbus_channel *newchannel)
365 struct vmbus_channel *channel;
371 /* Make sure this is a new offer */
372 mutex_lock(&vmbus_connection.channel_mutex);
374 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
375 if (!uuid_le_cmp(channel->offermsg.offer.if_type,
376 newchannel->offermsg.offer.if_type) &&
377 !uuid_le_cmp(channel->offermsg.offer.if_instance,
378 newchannel->offermsg.offer.if_instance)) {
385 list_add_tail(&newchannel->listentry,
386 &vmbus_connection.chn_list);
388 mutex_unlock(&vmbus_connection.channel_mutex);
392 * Check to see if this is a sub-channel.
394 if (newchannel->offermsg.offer.sub_channel_index != 0) {
396 * Process the sub-channel.
398 newchannel->primary_channel = channel;
399 spin_lock_irqsave(&channel->lock, flags);
400 list_add_tail(&newchannel->sc_list, &channel->sc_list);
402 spin_unlock_irqrestore(&channel->lock, flags);
407 dev_type = hv_get_dev_type(&newchannel->offermsg.offer.if_type);
409 init_vp_index(newchannel, dev_type);
411 if (newchannel->target_cpu != get_cpu()) {
413 smp_call_function_single(newchannel->target_cpu,
417 percpu_channel_enq(newchannel);
422 * This state is used to indicate a successful open
423 * so that when we do close the channel normally, we
424 * can cleanup properly
426 newchannel->state = CHANNEL_OPEN_STATE;
429 if (channel->sc_creation_callback != NULL)
430 channel->sc_creation_callback(newchannel);
435 * Start the process of binding this offer to the driver
436 * We need to set the DeviceObject field before calling
437 * vmbus_child_dev_add()
439 newchannel->device_obj = vmbus_device_create(
440 &newchannel->offermsg.offer.if_type,
441 &newchannel->offermsg.offer.if_instance,
443 if (!newchannel->device_obj)
446 newchannel->device_obj->device_id = dev_type;
448 * Add the new device to the bus. This will kick off device-driver
449 * binding which eventually invokes the device driver's AddDevice()
452 mutex_lock(&vmbus_connection.channel_mutex);
453 ret = vmbus_device_register(newchannel->device_obj);
454 mutex_unlock(&vmbus_connection.channel_mutex);
457 pr_err("unable to add child device object (relid %d)\n",
458 newchannel->offermsg.child_relid);
459 kfree(newchannel->device_obj);
465 vmbus_release_relid(newchannel->offermsg.child_relid);
467 mutex_lock(&vmbus_connection.channel_mutex);
468 list_del(&newchannel->listentry);
469 mutex_unlock(&vmbus_connection.channel_mutex);
471 if (newchannel->target_cpu != get_cpu()) {
473 smp_call_function_single(newchannel->target_cpu,
474 percpu_channel_deq, newchannel, true);
476 percpu_channel_deq(newchannel);
481 free_channel(newchannel);
485 * We use this state to statically distribute the channel interrupt load.
487 static int next_numa_node_id;
490 * Starting with Win8, we can statically distribute the incoming
491 * channel interrupt load by binding a channel to VCPU.
492 * We do this in a hierarchical fashion:
493 * First distribute the primary channels across available NUMA nodes
494 * and then distribute the subchannels amongst the CPUs in the NUMA
495 * node assigned to the primary channel.
497 * For pre-win8 hosts or non-performance critical channels we assign the
498 * first CPU in the first NUMA node.
500 static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
503 bool perf_chn = vmbus_devs[dev_type].perf_device;
504 struct vmbus_channel *primary = channel->primary_channel;
506 struct cpumask available_mask;
507 struct cpumask *alloced_mask;
509 if ((vmbus_proto_version == VERSION_WS2008) ||
510 (vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) {
512 * Prior to win8, all channel interrupts are
513 * delivered on cpu 0.
514 * Also if the channel is not a performance critical
515 * channel, bind it to cpu 0.
517 channel->numa_node = 0;
518 channel->target_cpu = 0;
519 channel->target_vp = hv_context.vp_index[0];
524 * We distribute primary channels evenly across all the available
525 * NUMA nodes and within the assigned NUMA node we will assign the
526 * first available CPU to the primary channel.
527 * The sub-channels will be assigned to the CPUs available in the
532 next_node = next_numa_node_id++;
533 if (next_node == nr_node_ids)
534 next_node = next_numa_node_id = 0;
535 if (cpumask_empty(cpumask_of_node(next_node)))
539 channel->numa_node = next_node;
542 alloced_mask = &hv_context.hv_numa_map[primary->numa_node];
544 if (cpumask_weight(alloced_mask) ==
545 cpumask_weight(cpumask_of_node(primary->numa_node))) {
547 * We have cycled through all the CPUs in the node;
548 * reset the alloced map.
550 cpumask_clear(alloced_mask);
553 cpumask_xor(&available_mask, alloced_mask,
554 cpumask_of_node(primary->numa_node));
559 * Normally Hyper-V host doesn't create more subchannels than there
560 * are VCPUs on the node but it is possible when not all present VCPUs
561 * on the node are initialized by guest. Clear the alloced_cpus_in_node
564 if (cpumask_equal(&primary->alloced_cpus_in_node,
565 cpumask_of_node(primary->numa_node)))
566 cpumask_clear(&primary->alloced_cpus_in_node);
569 cur_cpu = cpumask_next(cur_cpu, &available_mask);
570 if (cur_cpu >= nr_cpu_ids) {
572 cpumask_copy(&available_mask,
573 cpumask_of_node(primary->numa_node));
578 * NOTE: in the case of sub-channel, we clear the sub-channel
579 * related bit(s) in primary->alloced_cpus_in_node in
580 * hv_process_channel_removal(), so when we reload drivers
581 * like hv_netvsc in SMP guest, here we're able to re-allocate
582 * bit from primary->alloced_cpus_in_node.
584 if (!cpumask_test_cpu(cur_cpu,
585 &primary->alloced_cpus_in_node)) {
586 cpumask_set_cpu(cur_cpu,
587 &primary->alloced_cpus_in_node);
588 cpumask_set_cpu(cur_cpu, alloced_mask);
593 channel->target_cpu = cur_cpu;
594 channel->target_vp = hv_context.vp_index[cur_cpu];
597 static void vmbus_wait_for_unload(void)
599 int cpu = smp_processor_id();
600 void *page_addr = hv_context.synic_message_page[cpu];
601 struct hv_message *msg = (struct hv_message *)page_addr +
603 struct vmbus_channel_message_header *hdr;
604 bool unloaded = false;
607 if (msg->header.message_type == HVMSG_NONE) {
612 hdr = (struct vmbus_channel_message_header *)msg->u.payload;
613 if (hdr->msgtype == CHANNELMSG_UNLOAD_RESPONSE)
616 msg->header.message_type = HVMSG_NONE;
618 * header.message_type needs to be written before we do
623 if (msg->header.message_flags.msg_pending)
624 wrmsrl(HV_X64_MSR_EOM, 0);
632 * vmbus_unload_response - Handler for the unload response.
634 static void vmbus_unload_response(struct vmbus_channel_message_header *hdr)
637 * This is a global event; just wakeup the waiting thread.
638 * Once we successfully unload, we can cleanup the monitor state.
640 complete(&vmbus_connection.unload_event);
643 void vmbus_initiate_unload(void)
645 struct vmbus_channel_message_header hdr;
647 /* Pre-Win2012R2 hosts don't support reconnect */
648 if (vmbus_proto_version < VERSION_WIN8_1)
651 init_completion(&vmbus_connection.unload_event);
652 memset(&hdr, 0, sizeof(struct vmbus_channel_message_header));
653 hdr.msgtype = CHANNELMSG_UNLOAD;
654 vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header));
657 * vmbus_initiate_unload() is also called on crash and the crash can be
658 * happening in an interrupt context, where scheduling is impossible.
661 wait_for_completion(&vmbus_connection.unload_event);
663 vmbus_wait_for_unload();
667 * vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
670 static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
672 struct vmbus_channel_offer_channel *offer;
673 struct vmbus_channel *newchannel;
675 offer = (struct vmbus_channel_offer_channel *)hdr;
677 /* Allocate the channel object and save this offer. */
678 newchannel = alloc_channel();
680 pr_err("Unable to allocate channel object\n");
685 * By default we setup state to enable batched
686 * reading. A specific service can choose to
687 * disable this prior to opening the channel.
689 newchannel->batched_reading = true;
692 * Setup state for signalling the host.
694 newchannel->sig_event = (struct hv_input_signal_event *)
695 (ALIGN((unsigned long)
696 &newchannel->sig_buf,
697 HV_HYPERCALL_PARAM_ALIGN));
699 newchannel->sig_event->connectionid.asu32 = 0;
700 newchannel->sig_event->connectionid.u.id = VMBUS_EVENT_CONNECTION_ID;
701 newchannel->sig_event->flag_number = 0;
702 newchannel->sig_event->rsvdz = 0;
704 if (vmbus_proto_version != VERSION_WS2008) {
705 newchannel->is_dedicated_interrupt =
706 (offer->is_dedicated_interrupt != 0);
707 newchannel->sig_event->connectionid.u.id =
708 offer->connection_id;
711 memcpy(&newchannel->offermsg, offer,
712 sizeof(struct vmbus_channel_offer_channel));
713 newchannel->monitor_grp = (u8)offer->monitorid / 32;
714 newchannel->monitor_bit = (u8)offer->monitorid % 32;
716 vmbus_process_offer(newchannel);
720 * vmbus_onoffer_rescind - Rescind offer handler.
722 * We queue a work item to process this offer synchronously
724 static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
726 struct vmbus_channel_rescind_offer *rescind;
727 struct vmbus_channel *channel;
731 rescind = (struct vmbus_channel_rescind_offer *)hdr;
733 mutex_lock(&vmbus_connection.channel_mutex);
734 channel = relid2channel(rescind->child_relid);
736 if (channel == NULL) {
738 * This is very impossible, because in
739 * vmbus_process_offer(), we have already invoked
740 * vmbus_release_relid() on error.
745 spin_lock_irqsave(&channel->lock, flags);
746 channel->rescind = true;
747 spin_unlock_irqrestore(&channel->lock, flags);
749 if (channel->device_obj) {
750 if (channel->chn_rescind_callback) {
751 channel->chn_rescind_callback(channel);
755 * We will have to unregister this device from the
758 dev = get_device(&channel->device_obj->device);
760 vmbus_device_unregister(channel->device_obj);
764 hv_process_channel_removal(channel,
765 channel->offermsg.child_relid);
769 mutex_unlock(&vmbus_connection.channel_mutex);
772 void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
774 mutex_lock(&vmbus_connection.channel_mutex);
776 BUG_ON(!is_hvsock_channel(channel));
778 channel->rescind = true;
779 vmbus_device_unregister(channel->device_obj);
781 mutex_unlock(&vmbus_connection.channel_mutex);
783 EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister);
787 * vmbus_onoffers_delivered -
788 * This is invoked when all offers have been delivered.
790 * Nothing to do here.
792 static void vmbus_onoffers_delivered(
793 struct vmbus_channel_message_header *hdr)
798 * vmbus_onopen_result - Open result handler.
800 * This is invoked when we received a response to our channel open request.
801 * Find the matching request, copy the response and signal the requesting
804 static void vmbus_onopen_result(struct vmbus_channel_message_header *hdr)
806 struct vmbus_channel_open_result *result;
807 struct vmbus_channel_msginfo *msginfo;
808 struct vmbus_channel_message_header *requestheader;
809 struct vmbus_channel_open_channel *openmsg;
812 result = (struct vmbus_channel_open_result *)hdr;
815 * Find the open msg, copy the result and signal/unblock the wait event
817 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
819 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
822 (struct vmbus_channel_message_header *)msginfo->msg;
824 if (requestheader->msgtype == CHANNELMSG_OPENCHANNEL) {
826 (struct vmbus_channel_open_channel *)msginfo->msg;
827 if (openmsg->child_relid == result->child_relid &&
828 openmsg->openid == result->openid) {
829 memcpy(&msginfo->response.open_result,
832 struct vmbus_channel_open_result));
833 complete(&msginfo->waitevent);
838 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
842 * vmbus_ongpadl_created - GPADL created handler.
844 * This is invoked when we received a response to our gpadl create request.
845 * Find the matching request, copy the response and signal the requesting
848 static void vmbus_ongpadl_created(struct vmbus_channel_message_header *hdr)
850 struct vmbus_channel_gpadl_created *gpadlcreated;
851 struct vmbus_channel_msginfo *msginfo;
852 struct vmbus_channel_message_header *requestheader;
853 struct vmbus_channel_gpadl_header *gpadlheader;
856 gpadlcreated = (struct vmbus_channel_gpadl_created *)hdr;
859 * Find the establish msg, copy the result and signal/unblock the wait
862 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
864 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
867 (struct vmbus_channel_message_header *)msginfo->msg;
869 if (requestheader->msgtype == CHANNELMSG_GPADL_HEADER) {
871 (struct vmbus_channel_gpadl_header *)requestheader;
873 if ((gpadlcreated->child_relid ==
874 gpadlheader->child_relid) &&
875 (gpadlcreated->gpadl == gpadlheader->gpadl)) {
876 memcpy(&msginfo->response.gpadl_created,
879 struct vmbus_channel_gpadl_created));
880 complete(&msginfo->waitevent);
885 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
889 * vmbus_ongpadl_torndown - GPADL torndown handler.
891 * This is invoked when we received a response to our gpadl teardown request.
892 * Find the matching request, copy the response and signal the requesting
895 static void vmbus_ongpadl_torndown(
896 struct vmbus_channel_message_header *hdr)
898 struct vmbus_channel_gpadl_torndown *gpadl_torndown;
899 struct vmbus_channel_msginfo *msginfo;
900 struct vmbus_channel_message_header *requestheader;
901 struct vmbus_channel_gpadl_teardown *gpadl_teardown;
904 gpadl_torndown = (struct vmbus_channel_gpadl_torndown *)hdr;
907 * Find the open msg, copy the result and signal/unblock the wait event
909 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
911 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
914 (struct vmbus_channel_message_header *)msginfo->msg;
916 if (requestheader->msgtype == CHANNELMSG_GPADL_TEARDOWN) {
918 (struct vmbus_channel_gpadl_teardown *)requestheader;
920 if (gpadl_torndown->gpadl == gpadl_teardown->gpadl) {
921 memcpy(&msginfo->response.gpadl_torndown,
924 struct vmbus_channel_gpadl_torndown));
925 complete(&msginfo->waitevent);
930 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
934 * vmbus_onversion_response - Version response handler
936 * This is invoked when we received a response to our initiate contact request.
937 * Find the matching request, copy the response and signal the requesting
940 static void vmbus_onversion_response(
941 struct vmbus_channel_message_header *hdr)
943 struct vmbus_channel_msginfo *msginfo;
944 struct vmbus_channel_message_header *requestheader;
945 struct vmbus_channel_version_response *version_response;
948 version_response = (struct vmbus_channel_version_response *)hdr;
949 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
951 list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
954 (struct vmbus_channel_message_header *)msginfo->msg;
956 if (requestheader->msgtype ==
957 CHANNELMSG_INITIATE_CONTACT) {
958 memcpy(&msginfo->response.version_response,
960 sizeof(struct vmbus_channel_version_response));
961 complete(&msginfo->waitevent);
964 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
967 /* Channel message dispatch table */
968 struct vmbus_channel_message_table_entry
969 channel_message_table[CHANNELMSG_COUNT] = {
970 {CHANNELMSG_INVALID, 0, NULL},
971 {CHANNELMSG_OFFERCHANNEL, 0, vmbus_onoffer},
972 {CHANNELMSG_RESCIND_CHANNELOFFER, 0, vmbus_onoffer_rescind},
973 {CHANNELMSG_REQUESTOFFERS, 0, NULL},
974 {CHANNELMSG_ALLOFFERS_DELIVERED, 1, vmbus_onoffers_delivered},
975 {CHANNELMSG_OPENCHANNEL, 0, NULL},
976 {CHANNELMSG_OPENCHANNEL_RESULT, 1, vmbus_onopen_result},
977 {CHANNELMSG_CLOSECHANNEL, 0, NULL},
978 {CHANNELMSG_GPADL_HEADER, 0, NULL},
979 {CHANNELMSG_GPADL_BODY, 0, NULL},
980 {CHANNELMSG_GPADL_CREATED, 1, vmbus_ongpadl_created},
981 {CHANNELMSG_GPADL_TEARDOWN, 0, NULL},
982 {CHANNELMSG_GPADL_TORNDOWN, 1, vmbus_ongpadl_torndown},
983 {CHANNELMSG_RELID_RELEASED, 0, NULL},
984 {CHANNELMSG_INITIATE_CONTACT, 0, NULL},
985 {CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response},
986 {CHANNELMSG_UNLOAD, 0, NULL},
987 {CHANNELMSG_UNLOAD_RESPONSE, 1, vmbus_unload_response},
988 {CHANNELMSG_18, 0, NULL},
989 {CHANNELMSG_19, 0, NULL},
990 {CHANNELMSG_20, 0, NULL},
991 {CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL},
995 * vmbus_onmessage - Handler for channel protocol messages.
997 * This is invoked in the vmbus worker thread context.
999 void vmbus_onmessage(void *context)
1001 struct hv_message *msg = context;
1002 struct vmbus_channel_message_header *hdr;
1005 hdr = (struct vmbus_channel_message_header *)msg->u.payload;
1006 size = msg->header.payload_size;
1008 if (hdr->msgtype >= CHANNELMSG_COUNT) {
1009 pr_err("Received invalid channel message type %d size %d\n",
1010 hdr->msgtype, size);
1011 print_hex_dump_bytes("", DUMP_PREFIX_NONE,
1012 (unsigned char *)msg->u.payload, size);
1016 if (channel_message_table[hdr->msgtype].message_handler)
1017 channel_message_table[hdr->msgtype].message_handler(hdr);
1019 pr_err("Unhandled channel message type %d\n", hdr->msgtype);
1023 * vmbus_request_offers - Send a request to get all our pending offers.
1025 int vmbus_request_offers(void)
1027 struct vmbus_channel_message_header *msg;
1028 struct vmbus_channel_msginfo *msginfo;
1031 msginfo = kmalloc(sizeof(*msginfo) +
1032 sizeof(struct vmbus_channel_message_header),
1037 msg = (struct vmbus_channel_message_header *)msginfo->msg;
1039 msg->msgtype = CHANNELMSG_REQUESTOFFERS;
1042 ret = vmbus_post_msg(msg,
1043 sizeof(struct vmbus_channel_message_header));
1045 pr_err("Unable to request offers - %d\n", ret);
1057 * Retrieve the (sub) channel on which to send an outgoing request.
1058 * When a primary channel has multiple sub-channels, we try to
1059 * distribute the load equally amongst all available channels.
1061 struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary)
1063 struct list_head *cur, *tmp;
1065 struct vmbus_channel *cur_channel;
1066 struct vmbus_channel *outgoing_channel = primary;
1070 if (list_empty(&primary->sc_list))
1071 return outgoing_channel;
1073 next_channel = primary->next_oc++;
1075 if (next_channel > (primary->num_sc)) {
1076 primary->next_oc = 0;
1077 return outgoing_channel;
1080 cur_cpu = hv_context.vp_index[get_cpu()];
1082 list_for_each_safe(cur, tmp, &primary->sc_list) {
1083 cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
1084 if (cur_channel->state != CHANNEL_OPENED_STATE)
1087 if (cur_channel->target_vp == cur_cpu)
1090 if (i == next_channel)
1096 return outgoing_channel;
1098 EXPORT_SYMBOL_GPL(vmbus_get_outgoing_channel);
1100 static void invoke_sc_cb(struct vmbus_channel *primary_channel)
1102 struct list_head *cur, *tmp;
1103 struct vmbus_channel *cur_channel;
1105 if (primary_channel->sc_creation_callback == NULL)
1108 list_for_each_safe(cur, tmp, &primary_channel->sc_list) {
1109 cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
1111 primary_channel->sc_creation_callback(cur_channel);
1115 void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
1116 void (*sc_cr_cb)(struct vmbus_channel *new_sc))
1118 primary_channel->sc_creation_callback = sc_cr_cb;
1120 EXPORT_SYMBOL_GPL(vmbus_set_sc_create_callback);
1122 bool vmbus_are_subchannels_present(struct vmbus_channel *primary)
1126 ret = !list_empty(&primary->sc_list);
1130 * Invoke the callback on sub-channel creation.
1131 * This will present a uniform interface to the
1134 invoke_sc_cb(primary);
1139 EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present);
1141 void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
1142 void (*chn_rescind_cb)(struct vmbus_channel *))
1144 channel->chn_rescind_callback = chn_rescind_cb;
1146 EXPORT_SYMBOL_GPL(vmbus_set_chn_rescind_callback);