Drivers: hv: vmbus: avoid wait_for_completion() on crash
[linux-2.6-block.git] / drivers / hv / channel_mgmt.c
1 /*
2  * Copyright (c) 2009, Microsoft Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Authors:
18  *   Haiyang Zhang <haiyangz@microsoft.com>
19  *   Hank Janssen  <hjanssen@microsoft.com>
20  */
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/wait.h>
26 #include <linux/mm.h>
27 #include <linux/slab.h>
28 #include <linux/list.h>
29 #include <linux/module.h>
30 #include <linux/completion.h>
31 #include <linux/delay.h>
32 #include <linux/hyperv.h>
33
34 #include "hyperv_vmbus.h"
35
36 static void init_vp_index(struct vmbus_channel *channel, u16 dev_type);
37
38 static const struct vmbus_device vmbus_devs[] = {
39         /* IDE */
40         { .dev_type = HV_IDE,
41           HV_IDE_GUID,
42           .perf_device = true,
43         },
44
45         /* SCSI */
46         { .dev_type = HV_SCSI,
47           HV_SCSI_GUID,
48           .perf_device = true,
49         },
50
51         /* Fibre Channel */
52         { .dev_type = HV_FC,
53           HV_SYNTHFC_GUID,
54           .perf_device = true,
55         },
56
57         /* Synthetic NIC */
58         { .dev_type = HV_NIC,
59           HV_NIC_GUID,
60           .perf_device = true,
61         },
62
63         /* Network Direct */
64         { .dev_type = HV_ND,
65           HV_ND_GUID,
66           .perf_device = true,
67         },
68
69         /* PCIE */
70         { .dev_type = HV_PCIE,
71           HV_PCIE_GUID,
72           .perf_device = true,
73         },
74
75         /* Synthetic Frame Buffer */
76         { .dev_type = HV_FB,
77           HV_SYNTHVID_GUID,
78           .perf_device = false,
79         },
80
81         /* Synthetic Keyboard */
82         { .dev_type = HV_KBD,
83           HV_KBD_GUID,
84           .perf_device = false,
85         },
86
87         /* Synthetic MOUSE */
88         { .dev_type = HV_MOUSE,
89           HV_MOUSE_GUID,
90           .perf_device = false,
91         },
92
93         /* KVP */
94         { .dev_type = HV_KVP,
95           HV_KVP_GUID,
96           .perf_device = false,
97         },
98
99         /* Time Synch */
100         { .dev_type = HV_TS,
101           HV_TS_GUID,
102           .perf_device = false,
103         },
104
105         /* Heartbeat */
106         { .dev_type = HV_HB,
107           HV_HEART_BEAT_GUID,
108           .perf_device = false,
109         },
110
111         /* Shutdown */
112         { .dev_type = HV_SHUTDOWN,
113           HV_SHUTDOWN_GUID,
114           .perf_device = false,
115         },
116
117         /* File copy */
118         { .dev_type = HV_FCOPY,
119           HV_FCOPY_GUID,
120           .perf_device = false,
121         },
122
123         /* Backup */
124         { .dev_type = HV_BACKUP,
125           HV_VSS_GUID,
126           .perf_device = false,
127         },
128
129         /* Dynamic Memory */
130         { .dev_type = HV_DM,
131           HV_DM_GUID,
132           .perf_device = false,
133         },
134
135         /* Unknown GUID */
136         { .dev_type = HV_UNKOWN,
137           .perf_device = false,
138         },
139 };
140
141 static u16 hv_get_dev_type(const uuid_le *guid)
142 {
143         u16 i;
144
145         for (i = HV_IDE; i < HV_UNKOWN; i++) {
146                 if (!uuid_le_cmp(*guid, vmbus_devs[i].guid))
147                         return i;
148         }
149         pr_info("Unknown GUID: %pUl\n", guid);
150         return i;
151 }
152
153 /**
154  * vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message
155  * @icmsghdrp: Pointer to msg header structure
156  * @icmsg_negotiate: Pointer to negotiate message structure
157  * @buf: Raw buffer channel data
158  *
159  * @icmsghdrp is of type &struct icmsg_hdr.
160  * @negop is of type &struct icmsg_negotiate.
161  * Set up and fill in default negotiate response message.
162  *
163  * The fw_version specifies the  framework version that
164  * we can support and srv_version specifies the service
165  * version we can support.
166  *
167  * Mainly used by Hyper-V drivers.
168  */
169 bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
170                                 struct icmsg_negotiate *negop, u8 *buf,
171                                 int fw_version, int srv_version)
172 {
173         int icframe_major, icframe_minor;
174         int icmsg_major, icmsg_minor;
175         int fw_major, fw_minor;
176         int srv_major, srv_minor;
177         int i;
178         bool found_match = false;
179
180         icmsghdrp->icmsgsize = 0x10;
181         fw_major = (fw_version >> 16);
182         fw_minor = (fw_version & 0xFFFF);
183
184         srv_major = (srv_version >> 16);
185         srv_minor = (srv_version & 0xFFFF);
186
187         negop = (struct icmsg_negotiate *)&buf[
188                 sizeof(struct vmbuspipe_hdr) +
189                 sizeof(struct icmsg_hdr)];
190
191         icframe_major = negop->icframe_vercnt;
192         icframe_minor = 0;
193
194         icmsg_major = negop->icmsg_vercnt;
195         icmsg_minor = 0;
196
197         /*
198          * Select the framework version number we will
199          * support.
200          */
201
202         for (i = 0; i < negop->icframe_vercnt; i++) {
203                 if ((negop->icversion_data[i].major == fw_major) &&
204                    (negop->icversion_data[i].minor == fw_minor)) {
205                         icframe_major = negop->icversion_data[i].major;
206                         icframe_minor = negop->icversion_data[i].minor;
207                         found_match = true;
208                 }
209         }
210
211         if (!found_match)
212                 goto fw_error;
213
214         found_match = false;
215
216         for (i = negop->icframe_vercnt;
217                  (i < negop->icframe_vercnt + negop->icmsg_vercnt); i++) {
218                 if ((negop->icversion_data[i].major == srv_major) &&
219                    (negop->icversion_data[i].minor == srv_minor)) {
220                         icmsg_major = negop->icversion_data[i].major;
221                         icmsg_minor = negop->icversion_data[i].minor;
222                         found_match = true;
223                 }
224         }
225
226         /*
227          * Respond with the framework and service
228          * version numbers we can support.
229          */
230
231 fw_error:
232         if (!found_match) {
233                 negop->icframe_vercnt = 0;
234                 negop->icmsg_vercnt = 0;
235         } else {
236                 negop->icframe_vercnt = 1;
237                 negop->icmsg_vercnt = 1;
238         }
239
240         negop->icversion_data[0].major = icframe_major;
241         negop->icversion_data[0].minor = icframe_minor;
242         negop->icversion_data[1].major = icmsg_major;
243         negop->icversion_data[1].minor = icmsg_minor;
244         return found_match;
245 }
246
247 EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp);
248
249 /*
250  * alloc_channel - Allocate and initialize a vmbus channel object
251  */
252 static struct vmbus_channel *alloc_channel(void)
253 {
254         static atomic_t chan_num = ATOMIC_INIT(0);
255         struct vmbus_channel *channel;
256
257         channel = kzalloc(sizeof(*channel), GFP_ATOMIC);
258         if (!channel)
259                 return NULL;
260
261         channel->id = atomic_inc_return(&chan_num);
262         channel->acquire_ring_lock = true;
263         spin_lock_init(&channel->inbound_lock);
264         spin_lock_init(&channel->lock);
265
266         INIT_LIST_HEAD(&channel->sc_list);
267         INIT_LIST_HEAD(&channel->percpu_list);
268
269         return channel;
270 }
271
272 /*
273  * free_channel - Release the resources used by the vmbus channel object
274  */
275 static void free_channel(struct vmbus_channel *channel)
276 {
277         kfree(channel);
278 }
279
280 static void percpu_channel_enq(void *arg)
281 {
282         struct vmbus_channel *channel = arg;
283         int cpu = smp_processor_id();
284
285         list_add_tail(&channel->percpu_list, &hv_context.percpu_list[cpu]);
286 }
287
288 static void percpu_channel_deq(void *arg)
289 {
290         struct vmbus_channel *channel = arg;
291
292         list_del(&channel->percpu_list);
293 }
294
295
296 static void vmbus_release_relid(u32 relid)
297 {
298         struct vmbus_channel_relid_released msg;
299
300         memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
301         msg.child_relid = relid;
302         msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
303         vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released));
304 }
305
306 void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
307 {
308         unsigned long flags;
309         struct vmbus_channel *primary_channel;
310
311         vmbus_release_relid(relid);
312
313         BUG_ON(!channel->rescind);
314         BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
315
316         if (channel->target_cpu != get_cpu()) {
317                 put_cpu();
318                 smp_call_function_single(channel->target_cpu,
319                                          percpu_channel_deq, channel, true);
320         } else {
321                 percpu_channel_deq(channel);
322                 put_cpu();
323         }
324
325         if (channel->primary_channel == NULL) {
326                 list_del(&channel->listentry);
327
328                 primary_channel = channel;
329         } else {
330                 primary_channel = channel->primary_channel;
331                 spin_lock_irqsave(&primary_channel->lock, flags);
332                 list_del(&channel->sc_list);
333                 primary_channel->num_sc--;
334                 spin_unlock_irqrestore(&primary_channel->lock, flags);
335         }
336
337         /*
338          * We need to free the bit for init_vp_index() to work in the case
339          * of sub-channel, when we reload drivers like hv_netvsc.
340          */
341         cpumask_clear_cpu(channel->target_cpu,
342                           &primary_channel->alloced_cpus_in_node);
343
344         free_channel(channel);
345 }
346
347 void vmbus_free_channels(void)
348 {
349         struct vmbus_channel *channel, *tmp;
350
351         list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list,
352                 listentry) {
353                 /* hv_process_channel_removal() needs this */
354                 channel->rescind = true;
355
356                 vmbus_device_unregister(channel->device_obj);
357         }
358 }
359
360 /*
361  * vmbus_process_offer - Process the offer by creating a channel/device
362  * associated with this offer
363  */
364 static void vmbus_process_offer(struct vmbus_channel *newchannel)
365 {
366         struct vmbus_channel *channel;
367         bool fnew = true;
368         unsigned long flags;
369         u16 dev_type;
370         int ret;
371
372         /* Make sure this is a new offer */
373         mutex_lock(&vmbus_connection.channel_mutex);
374
375         list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
376                 if (!uuid_le_cmp(channel->offermsg.offer.if_type,
377                         newchannel->offermsg.offer.if_type) &&
378                         !uuid_le_cmp(channel->offermsg.offer.if_instance,
379                                 newchannel->offermsg.offer.if_instance)) {
380                         fnew = false;
381                         break;
382                 }
383         }
384
385         if (fnew)
386                 list_add_tail(&newchannel->listentry,
387                               &vmbus_connection.chn_list);
388
389         mutex_unlock(&vmbus_connection.channel_mutex);
390
391         if (!fnew) {
392                 /*
393                  * Check to see if this is a sub-channel.
394                  */
395                 if (newchannel->offermsg.offer.sub_channel_index != 0) {
396                         /*
397                          * Process the sub-channel.
398                          */
399                         newchannel->primary_channel = channel;
400                         spin_lock_irqsave(&channel->lock, flags);
401                         list_add_tail(&newchannel->sc_list, &channel->sc_list);
402                         channel->num_sc++;
403                         spin_unlock_irqrestore(&channel->lock, flags);
404                 } else
405                         goto err_free_chan;
406         }
407
408         dev_type = hv_get_dev_type(&newchannel->offermsg.offer.if_type);
409
410         init_vp_index(newchannel, dev_type);
411
412         if (newchannel->target_cpu != get_cpu()) {
413                 put_cpu();
414                 smp_call_function_single(newchannel->target_cpu,
415                                          percpu_channel_enq,
416                                          newchannel, true);
417         } else {
418                 percpu_channel_enq(newchannel);
419                 put_cpu();
420         }
421
422         /*
423          * This state is used to indicate a successful open
424          * so that when we do close the channel normally, we
425          * can cleanup properly
426          */
427         newchannel->state = CHANNEL_OPEN_STATE;
428
429         if (!fnew) {
430                 if (channel->sc_creation_callback != NULL)
431                         channel->sc_creation_callback(newchannel);
432                 return;
433         }
434
435         /*
436          * Start the process of binding this offer to the driver
437          * We need to set the DeviceObject field before calling
438          * vmbus_child_dev_add()
439          */
440         newchannel->device_obj = vmbus_device_create(
441                 &newchannel->offermsg.offer.if_type,
442                 &newchannel->offermsg.offer.if_instance,
443                 newchannel);
444         if (!newchannel->device_obj)
445                 goto err_deq_chan;
446
447         newchannel->device_obj->device_id = dev_type;
448         /*
449          * Add the new device to the bus. This will kick off device-driver
450          * binding which eventually invokes the device driver's AddDevice()
451          * method.
452          */
453         mutex_lock(&vmbus_connection.channel_mutex);
454         ret = vmbus_device_register(newchannel->device_obj);
455         mutex_unlock(&vmbus_connection.channel_mutex);
456
457         if (ret != 0) {
458                 pr_err("unable to add child device object (relid %d)\n",
459                         newchannel->offermsg.child_relid);
460                 kfree(newchannel->device_obj);
461                 goto err_deq_chan;
462         }
463         return;
464
465 err_deq_chan:
466         vmbus_release_relid(newchannel->offermsg.child_relid);
467
468         mutex_lock(&vmbus_connection.channel_mutex);
469         list_del(&newchannel->listentry);
470         mutex_unlock(&vmbus_connection.channel_mutex);
471
472         if (newchannel->target_cpu != get_cpu()) {
473                 put_cpu();
474                 smp_call_function_single(newchannel->target_cpu,
475                                          percpu_channel_deq, newchannel, true);
476         } else {
477                 percpu_channel_deq(newchannel);
478                 put_cpu();
479         }
480
481 err_free_chan:
482         free_channel(newchannel);
483 }
484
485 /*
486  * We use this state to statically distribute the channel interrupt load.
487  */
488 static int next_numa_node_id;
489
490 /*
491  * Starting with Win8, we can statically distribute the incoming
492  * channel interrupt load by binding a channel to VCPU.
493  * We do this in a hierarchical fashion:
494  * First distribute the primary channels across available NUMA nodes
495  * and then distribute the subchannels amongst the CPUs in the NUMA
496  * node assigned to the primary channel.
497  *
498  * For pre-win8 hosts or non-performance critical channels we assign the
499  * first CPU in the first NUMA node.
500  */
501 static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
502 {
503         u32 cur_cpu;
504         bool perf_chn = vmbus_devs[dev_type].perf_device;
505         struct vmbus_channel *primary = channel->primary_channel;
506         int next_node;
507         struct cpumask available_mask;
508         struct cpumask *alloced_mask;
509
510         if ((vmbus_proto_version == VERSION_WS2008) ||
511             (vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) {
512                 /*
513                  * Prior to win8, all channel interrupts are
514                  * delivered on cpu 0.
515                  * Also if the channel is not a performance critical
516                  * channel, bind it to cpu 0.
517                  */
518                 channel->numa_node = 0;
519                 channel->target_cpu = 0;
520                 channel->target_vp = hv_context.vp_index[0];
521                 return;
522         }
523
524         /*
525          * We distribute primary channels evenly across all the available
526          * NUMA nodes and within the assigned NUMA node we will assign the
527          * first available CPU to the primary channel.
528          * The sub-channels will be assigned to the CPUs available in the
529          * NUMA node evenly.
530          */
531         if (!primary) {
532                 while (true) {
533                         next_node = next_numa_node_id++;
534                         if (next_node == nr_node_ids)
535                                 next_node = next_numa_node_id = 0;
536                         if (cpumask_empty(cpumask_of_node(next_node)))
537                                 continue;
538                         break;
539                 }
540                 channel->numa_node = next_node;
541                 primary = channel;
542         }
543         alloced_mask = &hv_context.hv_numa_map[primary->numa_node];
544
545         if (cpumask_weight(alloced_mask) ==
546             cpumask_weight(cpumask_of_node(primary->numa_node))) {
547                 /*
548                  * We have cycled through all the CPUs in the node;
549                  * reset the alloced map.
550                  */
551                 cpumask_clear(alloced_mask);
552         }
553
554         cpumask_xor(&available_mask, alloced_mask,
555                     cpumask_of_node(primary->numa_node));
556
557         cur_cpu = -1;
558
559         /*
560          * Normally Hyper-V host doesn't create more subchannels than there
561          * are VCPUs on the node but it is possible when not all present VCPUs
562          * on the node are initialized by guest. Clear the alloced_cpus_in_node
563          * to start over.
564          */
565         if (cpumask_equal(&primary->alloced_cpus_in_node,
566                           cpumask_of_node(primary->numa_node)))
567                 cpumask_clear(&primary->alloced_cpus_in_node);
568
569         while (true) {
570                 cur_cpu = cpumask_next(cur_cpu, &available_mask);
571                 if (cur_cpu >= nr_cpu_ids) {
572                         cur_cpu = -1;
573                         cpumask_copy(&available_mask,
574                                      cpumask_of_node(primary->numa_node));
575                         continue;
576                 }
577
578                 /*
579                  * NOTE: in the case of sub-channel, we clear the sub-channel
580                  * related bit(s) in primary->alloced_cpus_in_node in
581                  * hv_process_channel_removal(), so when we reload drivers
582                  * like hv_netvsc in SMP guest, here we're able to re-allocate
583                  * bit from primary->alloced_cpus_in_node.
584                  */
585                 if (!cpumask_test_cpu(cur_cpu,
586                                 &primary->alloced_cpus_in_node)) {
587                         cpumask_set_cpu(cur_cpu,
588                                         &primary->alloced_cpus_in_node);
589                         cpumask_set_cpu(cur_cpu, alloced_mask);
590                         break;
591                 }
592         }
593
594         channel->target_cpu = cur_cpu;
595         channel->target_vp = hv_context.vp_index[cur_cpu];
596 }
597
598 static void vmbus_wait_for_unload(void)
599 {
600         int cpu = smp_processor_id();
601         void *page_addr = hv_context.synic_message_page[cpu];
602         struct hv_message *msg = (struct hv_message *)page_addr +
603                                   VMBUS_MESSAGE_SINT;
604         struct vmbus_channel_message_header *hdr;
605         bool unloaded = false;
606
607         while (1) {
608                 if (msg->header.message_type == HVMSG_NONE) {
609                         mdelay(10);
610                         continue;
611                 }
612
613                 hdr = (struct vmbus_channel_message_header *)msg->u.payload;
614                 if (hdr->msgtype == CHANNELMSG_UNLOAD_RESPONSE)
615                         unloaded = true;
616
617                 msg->header.message_type = HVMSG_NONE;
618                 /*
619                  * header.message_type needs to be written before we do
620                  * wrmsrl() below.
621                  */
622                 mb();
623
624                 if (msg->header.message_flags.msg_pending)
625                         wrmsrl(HV_X64_MSR_EOM, 0);
626
627                 if (unloaded)
628                         break;
629         }
630 }
631
632 /*
633  * vmbus_unload_response - Handler for the unload response.
634  */
635 static void vmbus_unload_response(struct vmbus_channel_message_header *hdr)
636 {
637         /*
638          * This is a global event; just wakeup the waiting thread.
639          * Once we successfully unload, we can cleanup the monitor state.
640          */
641         complete(&vmbus_connection.unload_event);
642 }
643
644 void vmbus_initiate_unload(bool crash)
645 {
646         struct vmbus_channel_message_header hdr;
647
648         /* Pre-Win2012R2 hosts don't support reconnect */
649         if (vmbus_proto_version < VERSION_WIN8_1)
650                 return;
651
652         init_completion(&vmbus_connection.unload_event);
653         memset(&hdr, 0, sizeof(struct vmbus_channel_message_header));
654         hdr.msgtype = CHANNELMSG_UNLOAD;
655         vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header));
656
657         /*
658          * vmbus_initiate_unload() is also called on crash and the crash can be
659          * happening in an interrupt context, where scheduling is impossible.
660          */
661         if (!crash)
662                 wait_for_completion(&vmbus_connection.unload_event);
663         else
664                 vmbus_wait_for_unload();
665 }
666
667 /*
668  * vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
669  *
670  */
671 static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
672 {
673         struct vmbus_channel_offer_channel *offer;
674         struct vmbus_channel *newchannel;
675
676         offer = (struct vmbus_channel_offer_channel *)hdr;
677
678         /* Allocate the channel object and save this offer. */
679         newchannel = alloc_channel();
680         if (!newchannel) {
681                 pr_err("Unable to allocate channel object\n");
682                 return;
683         }
684
685         /*
686          * By default we setup state to enable batched
687          * reading. A specific service can choose to
688          * disable this prior to opening the channel.
689          */
690         newchannel->batched_reading = true;
691
692         /*
693          * Setup state for signalling the host.
694          */
695         newchannel->sig_event = (struct hv_input_signal_event *)
696                                 (ALIGN((unsigned long)
697                                 &newchannel->sig_buf,
698                                 HV_HYPERCALL_PARAM_ALIGN));
699
700         newchannel->sig_event->connectionid.asu32 = 0;
701         newchannel->sig_event->connectionid.u.id = VMBUS_EVENT_CONNECTION_ID;
702         newchannel->sig_event->flag_number = 0;
703         newchannel->sig_event->rsvdz = 0;
704
705         if (vmbus_proto_version != VERSION_WS2008) {
706                 newchannel->is_dedicated_interrupt =
707                                 (offer->is_dedicated_interrupt != 0);
708                 newchannel->sig_event->connectionid.u.id =
709                                 offer->connection_id;
710         }
711
712         memcpy(&newchannel->offermsg, offer,
713                sizeof(struct vmbus_channel_offer_channel));
714         newchannel->monitor_grp = (u8)offer->monitorid / 32;
715         newchannel->monitor_bit = (u8)offer->monitorid % 32;
716
717         vmbus_process_offer(newchannel);
718 }
719
720 /*
721  * vmbus_onoffer_rescind - Rescind offer handler.
722  *
723  * We queue a work item to process this offer synchronously
724  */
725 static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
726 {
727         struct vmbus_channel_rescind_offer *rescind;
728         struct vmbus_channel *channel;
729         unsigned long flags;
730         struct device *dev;
731
732         rescind = (struct vmbus_channel_rescind_offer *)hdr;
733
734         mutex_lock(&vmbus_connection.channel_mutex);
735         channel = relid2channel(rescind->child_relid);
736
737         if (channel == NULL) {
738                 /*
739                  * This is very impossible, because in
740                  * vmbus_process_offer(), we have already invoked
741                  * vmbus_release_relid() on error.
742                  */
743                 goto out;
744         }
745
746         spin_lock_irqsave(&channel->lock, flags);
747         channel->rescind = true;
748         spin_unlock_irqrestore(&channel->lock, flags);
749
750         if (channel->device_obj) {
751                 if (channel->chn_rescind_callback) {
752                         channel->chn_rescind_callback(channel);
753                         goto out;
754                 }
755                 /*
756                  * We will have to unregister this device from the
757                  * driver core.
758                  */
759                 dev = get_device(&channel->device_obj->device);
760                 if (dev) {
761                         vmbus_device_unregister(channel->device_obj);
762                         put_device(dev);
763                 }
764         } else {
765                 hv_process_channel_removal(channel,
766                         channel->offermsg.child_relid);
767         }
768
769 out:
770         mutex_unlock(&vmbus_connection.channel_mutex);
771 }
772
773 void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
774 {
775         mutex_lock(&vmbus_connection.channel_mutex);
776
777         BUG_ON(!is_hvsock_channel(channel));
778
779         channel->rescind = true;
780         vmbus_device_unregister(channel->device_obj);
781
782         mutex_unlock(&vmbus_connection.channel_mutex);
783 }
784 EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister);
785
786
787 /*
788  * vmbus_onoffers_delivered -
789  * This is invoked when all offers have been delivered.
790  *
791  * Nothing to do here.
792  */
793 static void vmbus_onoffers_delivered(
794                         struct vmbus_channel_message_header *hdr)
795 {
796 }
797
798 /*
799  * vmbus_onopen_result - Open result handler.
800  *
801  * This is invoked when we received a response to our channel open request.
802  * Find the matching request, copy the response and signal the requesting
803  * thread.
804  */
805 static void vmbus_onopen_result(struct vmbus_channel_message_header *hdr)
806 {
807         struct vmbus_channel_open_result *result;
808         struct vmbus_channel_msginfo *msginfo;
809         struct vmbus_channel_message_header *requestheader;
810         struct vmbus_channel_open_channel *openmsg;
811         unsigned long flags;
812
813         result = (struct vmbus_channel_open_result *)hdr;
814
815         /*
816          * Find the open msg, copy the result and signal/unblock the wait event
817          */
818         spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
819
820         list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
821                                 msglistentry) {
822                 requestheader =
823                         (struct vmbus_channel_message_header *)msginfo->msg;
824
825                 if (requestheader->msgtype == CHANNELMSG_OPENCHANNEL) {
826                         openmsg =
827                         (struct vmbus_channel_open_channel *)msginfo->msg;
828                         if (openmsg->child_relid == result->child_relid &&
829                             openmsg->openid == result->openid) {
830                                 memcpy(&msginfo->response.open_result,
831                                        result,
832                                        sizeof(
833                                         struct vmbus_channel_open_result));
834                                 complete(&msginfo->waitevent);
835                                 break;
836                         }
837                 }
838         }
839         spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
840 }
841
842 /*
843  * vmbus_ongpadl_created - GPADL created handler.
844  *
845  * This is invoked when we received a response to our gpadl create request.
846  * Find the matching request, copy the response and signal the requesting
847  * thread.
848  */
849 static void vmbus_ongpadl_created(struct vmbus_channel_message_header *hdr)
850 {
851         struct vmbus_channel_gpadl_created *gpadlcreated;
852         struct vmbus_channel_msginfo *msginfo;
853         struct vmbus_channel_message_header *requestheader;
854         struct vmbus_channel_gpadl_header *gpadlheader;
855         unsigned long flags;
856
857         gpadlcreated = (struct vmbus_channel_gpadl_created *)hdr;
858
859         /*
860          * Find the establish msg, copy the result and signal/unblock the wait
861          * event
862          */
863         spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
864
865         list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
866                                 msglistentry) {
867                 requestheader =
868                         (struct vmbus_channel_message_header *)msginfo->msg;
869
870                 if (requestheader->msgtype == CHANNELMSG_GPADL_HEADER) {
871                         gpadlheader =
872                         (struct vmbus_channel_gpadl_header *)requestheader;
873
874                         if ((gpadlcreated->child_relid ==
875                              gpadlheader->child_relid) &&
876                             (gpadlcreated->gpadl == gpadlheader->gpadl)) {
877                                 memcpy(&msginfo->response.gpadl_created,
878                                        gpadlcreated,
879                                        sizeof(
880                                         struct vmbus_channel_gpadl_created));
881                                 complete(&msginfo->waitevent);
882                                 break;
883                         }
884                 }
885         }
886         spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
887 }
888
889 /*
890  * vmbus_ongpadl_torndown - GPADL torndown handler.
891  *
892  * This is invoked when we received a response to our gpadl teardown request.
893  * Find the matching request, copy the response and signal the requesting
894  * thread.
895  */
896 static void vmbus_ongpadl_torndown(
897                         struct vmbus_channel_message_header *hdr)
898 {
899         struct vmbus_channel_gpadl_torndown *gpadl_torndown;
900         struct vmbus_channel_msginfo *msginfo;
901         struct vmbus_channel_message_header *requestheader;
902         struct vmbus_channel_gpadl_teardown *gpadl_teardown;
903         unsigned long flags;
904
905         gpadl_torndown = (struct vmbus_channel_gpadl_torndown *)hdr;
906
907         /*
908          * Find the open msg, copy the result and signal/unblock the wait event
909          */
910         spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
911
912         list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
913                                 msglistentry) {
914                 requestheader =
915                         (struct vmbus_channel_message_header *)msginfo->msg;
916
917                 if (requestheader->msgtype == CHANNELMSG_GPADL_TEARDOWN) {
918                         gpadl_teardown =
919                         (struct vmbus_channel_gpadl_teardown *)requestheader;
920
921                         if (gpadl_torndown->gpadl == gpadl_teardown->gpadl) {
922                                 memcpy(&msginfo->response.gpadl_torndown,
923                                        gpadl_torndown,
924                                        sizeof(
925                                         struct vmbus_channel_gpadl_torndown));
926                                 complete(&msginfo->waitevent);
927                                 break;
928                         }
929                 }
930         }
931         spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
932 }
933
934 /*
935  * vmbus_onversion_response - Version response handler
936  *
937  * This is invoked when we received a response to our initiate contact request.
938  * Find the matching request, copy the response and signal the requesting
939  * thread.
940  */
941 static void vmbus_onversion_response(
942                 struct vmbus_channel_message_header *hdr)
943 {
944         struct vmbus_channel_msginfo *msginfo;
945         struct vmbus_channel_message_header *requestheader;
946         struct vmbus_channel_version_response *version_response;
947         unsigned long flags;
948
949         version_response = (struct vmbus_channel_version_response *)hdr;
950         spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
951
952         list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
953                                 msglistentry) {
954                 requestheader =
955                         (struct vmbus_channel_message_header *)msginfo->msg;
956
957                 if (requestheader->msgtype ==
958                     CHANNELMSG_INITIATE_CONTACT) {
959                         memcpy(&msginfo->response.version_response,
960                               version_response,
961                               sizeof(struct vmbus_channel_version_response));
962                         complete(&msginfo->waitevent);
963                 }
964         }
965         spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
966 }
967
968 /* Channel message dispatch table */
969 struct vmbus_channel_message_table_entry
970         channel_message_table[CHANNELMSG_COUNT] = {
971         {CHANNELMSG_INVALID,                    0, NULL},
972         {CHANNELMSG_OFFERCHANNEL,               0, vmbus_onoffer},
973         {CHANNELMSG_RESCIND_CHANNELOFFER,       0, vmbus_onoffer_rescind},
974         {CHANNELMSG_REQUESTOFFERS,              0, NULL},
975         {CHANNELMSG_ALLOFFERS_DELIVERED,        1, vmbus_onoffers_delivered},
976         {CHANNELMSG_OPENCHANNEL,                0, NULL},
977         {CHANNELMSG_OPENCHANNEL_RESULT,         1, vmbus_onopen_result},
978         {CHANNELMSG_CLOSECHANNEL,               0, NULL},
979         {CHANNELMSG_GPADL_HEADER,               0, NULL},
980         {CHANNELMSG_GPADL_BODY,                 0, NULL},
981         {CHANNELMSG_GPADL_CREATED,              1, vmbus_ongpadl_created},
982         {CHANNELMSG_GPADL_TEARDOWN,             0, NULL},
983         {CHANNELMSG_GPADL_TORNDOWN,             1, vmbus_ongpadl_torndown},
984         {CHANNELMSG_RELID_RELEASED,             0, NULL},
985         {CHANNELMSG_INITIATE_CONTACT,           0, NULL},
986         {CHANNELMSG_VERSION_RESPONSE,           1, vmbus_onversion_response},
987         {CHANNELMSG_UNLOAD,                     0, NULL},
988         {CHANNELMSG_UNLOAD_RESPONSE,            1, vmbus_unload_response},
989         {CHANNELMSG_18,                         0, NULL},
990         {CHANNELMSG_19,                         0, NULL},
991         {CHANNELMSG_20,                         0, NULL},
992         {CHANNELMSG_TL_CONNECT_REQUEST,         0, NULL},
993 };
994
995 /*
996  * vmbus_onmessage - Handler for channel protocol messages.
997  *
998  * This is invoked in the vmbus worker thread context.
999  */
1000 void vmbus_onmessage(void *context)
1001 {
1002         struct hv_message *msg = context;
1003         struct vmbus_channel_message_header *hdr;
1004         int size;
1005
1006         hdr = (struct vmbus_channel_message_header *)msg->u.payload;
1007         size = msg->header.payload_size;
1008
1009         if (hdr->msgtype >= CHANNELMSG_COUNT) {
1010                 pr_err("Received invalid channel message type %d size %d\n",
1011                            hdr->msgtype, size);
1012                 print_hex_dump_bytes("", DUMP_PREFIX_NONE,
1013                                      (unsigned char *)msg->u.payload, size);
1014                 return;
1015         }
1016
1017         if (channel_message_table[hdr->msgtype].message_handler)
1018                 channel_message_table[hdr->msgtype].message_handler(hdr);
1019         else
1020                 pr_err("Unhandled channel message type %d\n", hdr->msgtype);
1021 }
1022
1023 /*
1024  * vmbus_request_offers - Send a request to get all our pending offers.
1025  */
1026 int vmbus_request_offers(void)
1027 {
1028         struct vmbus_channel_message_header *msg;
1029         struct vmbus_channel_msginfo *msginfo;
1030         int ret;
1031
1032         msginfo = kmalloc(sizeof(*msginfo) +
1033                           sizeof(struct vmbus_channel_message_header),
1034                           GFP_KERNEL);
1035         if (!msginfo)
1036                 return -ENOMEM;
1037
1038         msg = (struct vmbus_channel_message_header *)msginfo->msg;
1039
1040         msg->msgtype = CHANNELMSG_REQUESTOFFERS;
1041
1042
1043         ret = vmbus_post_msg(msg,
1044                                sizeof(struct vmbus_channel_message_header));
1045         if (ret != 0) {
1046                 pr_err("Unable to request offers - %d\n", ret);
1047
1048                 goto cleanup;
1049         }
1050
1051 cleanup:
1052         kfree(msginfo);
1053
1054         return ret;
1055 }
1056
1057 /*
1058  * Retrieve the (sub) channel on which to send an outgoing request.
1059  * When a primary channel has multiple sub-channels, we try to
1060  * distribute the load equally amongst all available channels.
1061  */
1062 struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary)
1063 {
1064         struct list_head *cur, *tmp;
1065         int cur_cpu;
1066         struct vmbus_channel *cur_channel;
1067         struct vmbus_channel *outgoing_channel = primary;
1068         int next_channel;
1069         int i = 1;
1070
1071         if (list_empty(&primary->sc_list))
1072                 return outgoing_channel;
1073
1074         next_channel = primary->next_oc++;
1075
1076         if (next_channel > (primary->num_sc)) {
1077                 primary->next_oc = 0;
1078                 return outgoing_channel;
1079         }
1080
1081         cur_cpu = hv_context.vp_index[get_cpu()];
1082         put_cpu();
1083         list_for_each_safe(cur, tmp, &primary->sc_list) {
1084                 cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
1085                 if (cur_channel->state != CHANNEL_OPENED_STATE)
1086                         continue;
1087
1088                 if (cur_channel->target_vp == cur_cpu)
1089                         return cur_channel;
1090
1091                 if (i == next_channel)
1092                         return cur_channel;
1093
1094                 i++;
1095         }
1096
1097         return outgoing_channel;
1098 }
1099 EXPORT_SYMBOL_GPL(vmbus_get_outgoing_channel);
1100
1101 static void invoke_sc_cb(struct vmbus_channel *primary_channel)
1102 {
1103         struct list_head *cur, *tmp;
1104         struct vmbus_channel *cur_channel;
1105
1106         if (primary_channel->sc_creation_callback == NULL)
1107                 return;
1108
1109         list_for_each_safe(cur, tmp, &primary_channel->sc_list) {
1110                 cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
1111
1112                 primary_channel->sc_creation_callback(cur_channel);
1113         }
1114 }
1115
1116 void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
1117                                 void (*sc_cr_cb)(struct vmbus_channel *new_sc))
1118 {
1119         primary_channel->sc_creation_callback = sc_cr_cb;
1120 }
1121 EXPORT_SYMBOL_GPL(vmbus_set_sc_create_callback);
1122
1123 bool vmbus_are_subchannels_present(struct vmbus_channel *primary)
1124 {
1125         bool ret;
1126
1127         ret = !list_empty(&primary->sc_list);
1128
1129         if (ret) {
1130                 /*
1131                  * Invoke the callback on sub-channel creation.
1132                  * This will present a uniform interface to the
1133                  * clients.
1134                  */
1135                 invoke_sc_cb(primary);
1136         }
1137
1138         return ret;
1139 }
1140 EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present);
1141
1142 void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
1143                 void (*chn_rescind_cb)(struct vmbus_channel *))
1144 {
1145         channel->chn_rescind_callback = chn_rescind_cb;
1146 }
1147 EXPORT_SYMBOL_GPL(vmbus_set_chn_rescind_callback);