Commit | Line | Data |
---|---|---|
3b20eb23 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
3e7ee490 | 2 | /* |
3e7ee490 HJ |
3 | * Copyright (c) 2009, Microsoft Corporation. |
4 | * | |
3e7ee490 HJ |
5 | * Authors: |
6 | * Haiyang Zhang <haiyangz@microsoft.com> | |
7 | * Hank Janssen <hjanssen@microsoft.com> | |
3e7ee490 | 8 | */ |
0a46618d HJ |
9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
10 | ||
faff4406 | 11 | #include <linux/io.h> |
a0086dc5 GKH |
12 | #include <linux/kernel.h> |
13 | #include <linux/mm.h> | |
5a0e3ad6 | 14 | #include <linux/slab.h> |
b7c947f0 | 15 | #include <linux/vmalloc.h> |
46a97191 | 16 | #include <linux/hyperv.h> |
248e742a | 17 | #include <linux/random.h> |
4061ed9e | 18 | #include <linux/clockchips.h> |
b635ccc1 | 19 | #include <linux/delay.h> |
d608715d | 20 | #include <linux/interrupt.h> |
fd1fea68 | 21 | #include <clocksource/hyperv_timer.h> |
4061ed9e | 22 | #include <asm/mshyperv.h> |
193061ea | 23 | #include <linux/set_memory.h> |
0f2a6619 | 24 | #include "hyperv_vmbus.h" |
3e7ee490 | 25 | |
454f18a9 | 26 | /* The one and only */ |
a3cadf38 | 27 | struct hv_context hv_context; |
3e7ee490 | 28 | |
3e189519 | 29 | /* |
d44890c8 | 30 | * hv_init - Main initialization routine. |
0831ad04 GKH |
31 | * |
32 | * This routine must be called before any other routines in here are called | |
33 | */ | |
d44890c8 | 34 | int hv_init(void) |
3e7ee490 | 35 | { |
37cdd991 SH |
36 | hv_context.cpu_context = alloc_percpu(struct hv_per_cpu_context); |
37 | if (!hv_context.cpu_context) | |
38 | return -ENOMEM; | |
5433e003 | 39 | return 0; |
3e7ee490 HJ |
40 | } |
41 | ||
3e189519 | 42 | /* |
d44890c8 | 43 | * hv_post_message - Post a message using the hypervisor message IPC. |
0831ad04 GKH |
44 | * |
45 | * This involves a hypercall. | |
46 | */ | |
415f0a02 | 47 | int hv_post_message(union hv_connection_id connection_id, |
b8dfb264 HZ |
48 | enum hv_message_type message_type, |
49 | void *payload, size_t payload_size) | |
3e7ee490 | 50 | { |
b8dfb264 | 51 | struct hv_input_post_message *aligned_msg; |
9a6b1a17 | 52 | unsigned long flags; |
a108393d | 53 | u64 status; |
3e7ee490 | 54 | |
b8dfb264 | 55 | if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT) |
39594abc | 56 | return -EMSGSIZE; |
3e7ee490 | 57 | |
9a6b1a17 DC |
58 | local_irq_save(flags); |
59 | ||
23378295 DC |
60 | /* |
61 | * A TDX VM with the paravisor must use the decrypted post_msg_page: see | |
62 | * the comment in struct hv_per_cpu_context. A SNP VM with the paravisor | |
63 | * can use the encrypted hyperv_pcpu_input_arg because it copies the | |
64 | * input into the GHCB page, which has been decrypted by the paravisor. | |
65 | */ | |
66 | if (hv_isolation_type_tdx() && ms_hyperv.paravisor_present) | |
67 | aligned_msg = this_cpu_ptr(hv_context.cpu_context)->post_msg_page; | |
68 | else | |
69 | aligned_msg = *this_cpu_ptr(hyperv_pcpu_input_arg); | |
70 | ||
b8dfb264 | 71 | aligned_msg->connectionid = connection_id; |
b29ef354 | 72 | aligned_msg->reserved = 0; |
b8dfb264 HZ |
73 | aligned_msg->message_type = message_type; |
74 | aligned_msg->payload_size = payload_size; | |
75 | memcpy((void *)aligned_msg->payload, payload, payload_size); | |
3e7ee490 | 76 | |
23378295 DC |
77 | if (ms_hyperv.paravisor_present) { |
78 | if (hv_isolation_type_tdx()) | |
79 | status = hv_tdx_hypercall(HVCALL_POST_MESSAGE, | |
80 | virt_to_phys(aligned_msg), 0); | |
81 | else if (hv_isolation_type_snp()) | |
82 | status = hv_ghcb_hypercall(HVCALL_POST_MESSAGE, | |
83 | aligned_msg, NULL, | |
84 | sizeof(*aligned_msg)); | |
85 | else | |
86 | status = HV_STATUS_INVALID_PARAMETER; | |
87 | } else { | |
20c89a55 TL |
88 | status = hv_do_hypercall(HVCALL_POST_MESSAGE, |
89 | aligned_msg, NULL); | |
23378295 | 90 | } |
3e7ee490 | 91 | |
9a6b1a17 | 92 | local_irq_restore(flags); |
13b9abfc | 93 | |
753ed9c9 | 94 | return hv_result(status); |
3e7ee490 HJ |
95 | } |
96 | ||
2608fb65 JW |
97 | int hv_synic_alloc(void) |
98 | { | |
193061ea | 99 | int cpu, ret = -ENOMEM; |
f25a7ece MK |
100 | struct hv_per_cpu_context *hv_cpu; |
101 | ||
102 | /* | |
103 | * First, zero all per-cpu memory areas so hv_synic_free() can | |
104 | * detect what memory has been allocated and cleanup properly | |
105 | * after any failures. | |
106 | */ | |
107 | for_each_present_cpu(cpu) { | |
108 | hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu); | |
109 | memset(hv_cpu, 0, sizeof(*hv_cpu)); | |
110 | } | |
2608fb65 | 111 | |
6396bb22 | 112 | hv_context.hv_numa_map = kcalloc(nr_node_ids, sizeof(struct cpumask), |
597ff72f | 113 | GFP_KERNEL); |
9f01ec53 S |
114 | if (hv_context.hv_numa_map == NULL) { |
115 | pr_err("Unable to allocate NUMA map\n"); | |
116 | goto err; | |
117 | } | |
118 | ||
421b8f20 | 119 | for_each_present_cpu(cpu) { |
f25a7ece | 120 | hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu); |
37cdd991 | 121 | |
37cdd991 SH |
122 | tasklet_init(&hv_cpu->msg_dpc, |
123 | vmbus_on_msg_dpc, (unsigned long) hv_cpu); | |
124 | ||
23378295 DC |
125 | if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) { |
126 | hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC); | |
127 | if (hv_cpu->post_msg_page == NULL) { | |
128 | pr_err("Unable to allocate post msg page\n"); | |
129 | goto err; | |
130 | } | |
131 | ||
132 | ret = set_memory_decrypted((unsigned long)hv_cpu->post_msg_page, 1); | |
133 | if (ret) { | |
134 | pr_err("Failed to decrypt post msg page: %d\n", ret); | |
135 | /* Just leak the page, as it's unsafe to free the page. */ | |
136 | hv_cpu->post_msg_page = NULL; | |
137 | goto err; | |
138 | } | |
139 | ||
140 | memset(hv_cpu->post_msg_page, 0, PAGE_SIZE); | |
141 | } | |
142 | ||
faff4406 TL |
143 | /* |
144 | * Synic message and event pages are allocated by paravisor. | |
145 | * Skip these pages allocation here. | |
146 | */ | |
d3a9d7e4 | 147 | if (!ms_hyperv.paravisor_present && !hv_root_partition) { |
faff4406 TL |
148 | hv_cpu->synic_message_page = |
149 | (void *)get_zeroed_page(GFP_ATOMIC); | |
150 | if (hv_cpu->synic_message_page == NULL) { | |
151 | pr_err("Unable to allocate SYNIC message page\n"); | |
152 | goto err; | |
153 | } | |
154 | ||
155 | hv_cpu->synic_event_page = | |
156 | (void *)get_zeroed_page(GFP_ATOMIC); | |
157 | if (hv_cpu->synic_event_page == NULL) { | |
158 | pr_err("Unable to allocate SYNIC event page\n"); | |
68f2f2bc DC |
159 | |
160 | free_page((unsigned long)hv_cpu->synic_message_page); | |
161 | hv_cpu->synic_message_page = NULL; | |
faff4406 TL |
162 | goto err; |
163 | } | |
2608fb65 | 164 | } |
193061ea | 165 | |
68f2f2bc | 166 | if (!ms_hyperv.paravisor_present && |
e3131f1c | 167 | (hv_isolation_type_snp() || hv_isolation_type_tdx())) { |
193061ea TL |
168 | ret = set_memory_decrypted((unsigned long) |
169 | hv_cpu->synic_message_page, 1); | |
170 | if (ret) { | |
171 | pr_err("Failed to decrypt SYNIC msg page: %d\n", ret); | |
172 | hv_cpu->synic_message_page = NULL; | |
173 | ||
174 | /* | |
175 | * Free the event page here so that hv_synic_free() | |
176 | * won't later try to re-encrypt it. | |
177 | */ | |
178 | free_page((unsigned long)hv_cpu->synic_event_page); | |
179 | hv_cpu->synic_event_page = NULL; | |
180 | goto err; | |
181 | } | |
182 | ||
183 | ret = set_memory_decrypted((unsigned long) | |
184 | hv_cpu->synic_event_page, 1); | |
185 | if (ret) { | |
186 | pr_err("Failed to decrypt SYNIC event page: %d\n", ret); | |
187 | hv_cpu->synic_event_page = NULL; | |
188 | goto err; | |
189 | } | |
190 | ||
191 | memset(hv_cpu->synic_message_page, 0, PAGE_SIZE); | |
192 | memset(hv_cpu->synic_event_page, 0, PAGE_SIZE); | |
193 | } | |
2608fb65 JW |
194 | } |
195 | ||
196 | return 0; | |
193061ea | 197 | |
2608fb65 | 198 | err: |
57208632 MK |
199 | /* |
200 | * Any memory allocations that succeeded will be freed when | |
201 | * the caller cleans up by calling hv_synic_free() | |
202 | */ | |
193061ea | 203 | return ret; |
2608fb65 JW |
204 | } |
205 | ||
2608fb65 JW |
206 | |
207 | void hv_synic_free(void) | |
208 | { | |
193061ea | 209 | int cpu, ret; |
2608fb65 | 210 | |
37cdd991 SH |
211 | for_each_present_cpu(cpu) { |
212 | struct hv_per_cpu_context *hv_cpu | |
213 | = per_cpu_ptr(hv_context.cpu_context, cpu); | |
214 | ||
193061ea | 215 | /* It's better to leak the page if the encryption fails. */ |
23378295 DC |
216 | if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) { |
217 | if (hv_cpu->post_msg_page) { | |
218 | ret = set_memory_encrypted((unsigned long) | |
219 | hv_cpu->post_msg_page, 1); | |
220 | if (ret) { | |
221 | pr_err("Failed to encrypt post msg page: %d\n", ret); | |
222 | hv_cpu->post_msg_page = NULL; | |
223 | } | |
224 | } | |
225 | } | |
226 | ||
68f2f2bc | 227 | if (!ms_hyperv.paravisor_present && |
e3131f1c | 228 | (hv_isolation_type_snp() || hv_isolation_type_tdx())) { |
193061ea TL |
229 | if (hv_cpu->synic_message_page) { |
230 | ret = set_memory_encrypted((unsigned long) | |
231 | hv_cpu->synic_message_page, 1); | |
232 | if (ret) { | |
233 | pr_err("Failed to encrypt SYNIC msg page: %d\n", ret); | |
234 | hv_cpu->synic_message_page = NULL; | |
235 | } | |
236 | } | |
237 | ||
238 | if (hv_cpu->synic_event_page) { | |
239 | ret = set_memory_encrypted((unsigned long) | |
240 | hv_cpu->synic_event_page, 1); | |
241 | if (ret) { | |
242 | pr_err("Failed to encrypt SYNIC event page: %d\n", ret); | |
243 | hv_cpu->synic_event_page = NULL; | |
244 | } | |
245 | } | |
246 | } | |
247 | ||
23378295 | 248 | free_page((unsigned long)hv_cpu->post_msg_page); |
57208632 MK |
249 | free_page((unsigned long)hv_cpu->synic_event_page); |
250 | free_page((unsigned long)hv_cpu->synic_message_page); | |
37cdd991 SH |
251 | } |
252 | ||
9f01ec53 | 253 | kfree(hv_context.hv_numa_map); |
2608fb65 JW |
254 | } |
255 | ||
3e189519 | 256 | /* |
68cb8117 | 257 | * hv_synic_init - Initialize the Synthetic Interrupt Controller. |
0831ad04 GKH |
258 | * |
259 | * If it is already initialized by another entity (ie x2v shim), we need to | |
260 | * retrieve the initialized message and event pages. Otherwise, we create and | |
261 | * initialize the message and event pages. | |
262 | */ | |
dba61cda | 263 | void hv_synic_enable_regs(unsigned int cpu) |
3e7ee490 | 264 | { |
37cdd991 SH |
265 | struct hv_per_cpu_context *hv_cpu |
266 | = per_cpu_ptr(hv_context.cpu_context, cpu); | |
eacb1b4d GKH |
267 | union hv_synic_simp simp; |
268 | union hv_synic_siefp siefp; | |
b8dfb264 | 269 | union hv_synic_sint shared_sint; |
eacb1b4d | 270 | union hv_synic_scontrol sctrl; |
a73e6b7c | 271 | |
a73e6b7c | 272 | /* Setup the Synic's message page */ |
f3c5e63c | 273 | simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP); |
f6feebe0 | 274 | simp.simp_enabled = 1; |
faff4406 | 275 | |
d3a9d7e4 | 276 | if (ms_hyperv.paravisor_present || hv_root_partition) { |
6afd9dc1 MK |
277 | /* Mask out vTOM bit. ioremap_cache() maps decrypted */ |
278 | u64 base = (simp.base_simp_gpa << HV_HYP_PAGE_SHIFT) & | |
279 | ~ms_hyperv.shared_gpa_boundary; | |
faff4406 | 280 | hv_cpu->synic_message_page |
6afd9dc1 | 281 | = (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE); |
faff4406 | 282 | if (!hv_cpu->synic_message_page) |
6afd9dc1 | 283 | pr_err("Fail to map synic message page.\n"); |
faff4406 TL |
284 | } else { |
285 | simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page) | |
286 | >> HV_HYP_PAGE_SHIFT; | |
287 | } | |
3e7ee490 | 288 | |
f3c5e63c | 289 | hv_set_register(HV_REGISTER_SIMP, simp.as_uint64); |
3e7ee490 | 290 | |
a73e6b7c | 291 | /* Setup the Synic's event page */ |
f3c5e63c | 292 | siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP); |
f6feebe0 | 293 | siefp.siefp_enabled = 1; |
faff4406 | 294 | |
d3a9d7e4 | 295 | if (ms_hyperv.paravisor_present || hv_root_partition) { |
6afd9dc1 MK |
296 | /* Mask out vTOM bit. ioremap_cache() maps decrypted */ |
297 | u64 base = (siefp.base_siefp_gpa << HV_HYP_PAGE_SHIFT) & | |
298 | ~ms_hyperv.shared_gpa_boundary; | |
299 | hv_cpu->synic_event_page | |
300 | = (void *)ioremap_cache(base, HV_HYP_PAGE_SIZE); | |
faff4406 | 301 | if (!hv_cpu->synic_event_page) |
6afd9dc1 | 302 | pr_err("Fail to map synic event page.\n"); |
faff4406 TL |
303 | } else { |
304 | siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page) | |
305 | >> HV_HYP_PAGE_SHIFT; | |
306 | } | |
a73e6b7c | 307 | |
f3c5e63c | 308 | hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64); |
0831ad04 | 309 | |
0831ad04 | 310 | /* Setup the shared SINT. */ |
d608715d MK |
311 | if (vmbus_irq != -1) |
312 | enable_percpu_irq(vmbus_irq, 0); | |
f3c5e63c MK |
313 | shared_sint.as_uint64 = hv_get_register(HV_REGISTER_SINT0 + |
314 | VMBUS_MESSAGE_SINT); | |
3e7ee490 | 315 | |
d608715d | 316 | shared_sint.vector = vmbus_interrupt; |
b8dfb264 | 317 | shared_sint.masked = false; |
946f4b86 MK |
318 | |
319 | /* | |
320 | * On architectures where Hyper-V doesn't support AEOI (e.g., ARM64), | |
321 | * it doesn't provide a recommendation flag and AEOI must be disabled. | |
322 | */ | |
323 | #ifdef HV_DEPRECATING_AEOI_RECOMMENDED | |
324 | shared_sint.auto_eoi = | |
325 | !(ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED); | |
326 | #else | |
327 | shared_sint.auto_eoi = 0; | |
328 | #endif | |
f3c5e63c MK |
329 | hv_set_register(HV_REGISTER_SINT0 + VMBUS_MESSAGE_SINT, |
330 | shared_sint.as_uint64); | |
3e7ee490 | 331 | |
454f18a9 | 332 | /* Enable the global synic bit */ |
f3c5e63c | 333 | sctrl.as_uint64 = hv_get_register(HV_REGISTER_SCONTROL); |
f6feebe0 | 334 | sctrl.enable = 1; |
3e7ee490 | 335 | |
f3c5e63c | 336 | hv_set_register(HV_REGISTER_SCONTROL, sctrl.as_uint64); |
dba61cda DC |
337 | } |
338 | ||
339 | int hv_synic_init(unsigned int cpu) | |
340 | { | |
341 | hv_synic_enable_regs(cpu); | |
3e7ee490 | 342 | |
4df4cb9e | 343 | hv_stimer_legacy_init(cpu, VMBUS_MESSAGE_SINT); |
e086748c | 344 | |
fd1fea68 | 345 | return 0; |
e086748c VK |
346 | } |
347 | ||
3e189519 | 348 | /* |
d44890c8 | 349 | * hv_synic_cleanup - Cleanup routine for hv_synic_init(). |
0831ad04 | 350 | */ |
dba61cda | 351 | void hv_synic_disable_regs(unsigned int cpu) |
3e7ee490 | 352 | { |
faff4406 TL |
353 | struct hv_per_cpu_context *hv_cpu |
354 | = per_cpu_ptr(hv_context.cpu_context, cpu); | |
b8dfb264 | 355 | union hv_synic_sint shared_sint; |
eacb1b4d GKH |
356 | union hv_synic_simp simp; |
357 | union hv_synic_siefp siefp; | |
e72e7ac5 | 358 | union hv_synic_scontrol sctrl; |
dba61cda | 359 | |
f3c5e63c MK |
360 | shared_sint.as_uint64 = hv_get_register(HV_REGISTER_SINT0 + |
361 | VMBUS_MESSAGE_SINT); | |
dba61cda DC |
362 | |
363 | shared_sint.masked = 1; | |
364 | ||
365 | /* Need to correctly cleanup in the case of SMP!!! */ | |
366 | /* Disable the interrupt */ | |
f3c5e63c MK |
367 | hv_set_register(HV_REGISTER_SINT0 + VMBUS_MESSAGE_SINT, |
368 | shared_sint.as_uint64); | |
dba61cda | 369 | |
f3c5e63c | 370 | simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP); |
faff4406 TL |
371 | /* |
372 | * In Isolation VM, sim and sief pages are allocated by | |
373 | * paravisor. These pages also will be used by kdump | |
374 | * kernel. So just reset enable bit here and keep page | |
375 | * addresses. | |
376 | */ | |
dba61cda | 377 | simp.simp_enabled = 0; |
d3a9d7e4 | 378 | if (ms_hyperv.paravisor_present || hv_root_partition) { |
6afd9dc1 | 379 | iounmap(hv_cpu->synic_message_page); |
7fec185a JJ |
380 | hv_cpu->synic_message_page = NULL; |
381 | } else { | |
faff4406 | 382 | simp.base_simp_gpa = 0; |
7fec185a | 383 | } |
dba61cda | 384 | |
f3c5e63c | 385 | hv_set_register(HV_REGISTER_SIMP, simp.as_uint64); |
dba61cda | 386 | |
f3c5e63c | 387 | siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP); |
dba61cda | 388 | siefp.siefp_enabled = 0; |
faff4406 | 389 | |
d3a9d7e4 | 390 | if (ms_hyperv.paravisor_present || hv_root_partition) { |
6afd9dc1 | 391 | iounmap(hv_cpu->synic_event_page); |
7fec185a JJ |
392 | hv_cpu->synic_event_page = NULL; |
393 | } else { | |
faff4406 | 394 | siefp.base_siefp_gpa = 0; |
7fec185a | 395 | } |
dba61cda | 396 | |
f3c5e63c | 397 | hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64); |
dba61cda DC |
398 | |
399 | /* Disable the global synic bit */ | |
f3c5e63c | 400 | sctrl.as_uint64 = hv_get_register(HV_REGISTER_SCONTROL); |
dba61cda | 401 | sctrl.enable = 0; |
f3c5e63c | 402 | hv_set_register(HV_REGISTER_SCONTROL, sctrl.as_uint64); |
d608715d MK |
403 | |
404 | if (vmbus_irq != -1) | |
405 | disable_percpu_irq(vmbus_irq); | |
dba61cda DC |
406 | } |
407 | ||
b635ccc1 APM |
408 | #define HV_MAX_TRIES 3 |
409 | /* | |
410 | * Scan the event flags page of 'this' CPU looking for any bit that is set. If we find one | |
411 | * bit set, then wait for a few milliseconds. Repeat these steps for a maximum of 3 times. | |
412 | * Return 'true', if there is still any set bit after this operation; 'false', otherwise. | |
413 | * | |
414 | * If a bit is set, that means there is a pending channel interrupt. The expectation is | |
415 | * that the normal interrupt handling mechanism will find and process the channel interrupt | |
416 | * "very soon", and in the process clear the bit. | |
417 | */ | |
418 | static bool hv_synic_event_pending(void) | |
419 | { | |
420 | struct hv_per_cpu_context *hv_cpu = this_cpu_ptr(hv_context.cpu_context); | |
421 | union hv_synic_event_flags *event = | |
422 | (union hv_synic_event_flags *)hv_cpu->synic_event_page + VMBUS_MESSAGE_SINT; | |
423 | unsigned long *recv_int_page = event->flags; /* assumes VMBus version >= VERSION_WIN8 */ | |
424 | bool pending; | |
425 | u32 relid; | |
426 | int tries = 0; | |
427 | ||
428 | retry: | |
429 | pending = false; | |
430 | for_each_set_bit(relid, recv_int_page, HV_EVENT_FLAGS_COUNT) { | |
431 | /* Special case - VMBus channel protocol messages */ | |
432 | if (relid == 0) | |
433 | continue; | |
434 | pending = true; | |
435 | break; | |
436 | } | |
437 | if (pending && tries++ < HV_MAX_TRIES) { | |
438 | usleep_range(10000, 20000); | |
439 | goto retry; | |
440 | } | |
441 | return pending; | |
442 | } | |
f3c5e63c | 443 | |
dba61cda DC |
444 | int hv_synic_cleanup(unsigned int cpu) |
445 | { | |
523b9408 VK |
446 | struct vmbus_channel *channel, *sc; |
447 | bool channel_found = false; | |
3e7ee490 | 448 | |
b635ccc1 APM |
449 | if (vmbus_connection.conn_state != CONNECTED) |
450 | goto always_cleanup; | |
451 | ||
8a857c55 APM |
452 | /* |
453 | * Hyper-V does not provide a way to change the connect CPU once | |
92e4dc8b CC |
454 | * it is set; we must prevent the connect CPU from going offline |
455 | * while the VM is running normally. But in the panic or kexec() | |
456 | * path where the vmbus is already disconnected, the CPU must be | |
457 | * allowed to shut down. | |
8a857c55 | 458 | */ |
b635ccc1 | 459 | if (cpu == VMBUS_CONNECT_CPU) |
8a857c55 APM |
460 | return -EBUSY; |
461 | ||
523b9408 VK |
462 | /* |
463 | * Search for channels which are bound to the CPU we're about to | |
d570aec0 APM |
464 | * cleanup. In case we find one and vmbus is still connected, we |
465 | * fail; this will effectively prevent CPU offlining. | |
466 | * | |
467 | * TODO: Re-bind the channels to different CPUs. | |
523b9408 VK |
468 | */ |
469 | mutex_lock(&vmbus_connection.channel_mutex); | |
470 | list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { | |
471 | if (channel->target_cpu == cpu) { | |
472 | channel_found = true; | |
473 | break; | |
474 | } | |
523b9408 VK |
475 | list_for_each_entry(sc, &channel->sc_list, sc_list) { |
476 | if (sc->target_cpu == cpu) { | |
477 | channel_found = true; | |
478 | break; | |
479 | } | |
480 | } | |
523b9408 VK |
481 | if (channel_found) |
482 | break; | |
483 | } | |
484 | mutex_unlock(&vmbus_connection.channel_mutex); | |
485 | ||
b635ccc1 APM |
486 | if (channel_found) |
487 | return -EBUSY; | |
488 | ||
489 | /* | |
490 | * channel_found == false means that any channels that were previously | |
491 | * assigned to the CPU have been reassigned elsewhere with a call of | |
492 | * vmbus_send_modifychannel(). Scan the event flags page looking for | |
493 | * bits that are set and waiting with a timeout for vmbus_chan_sched() | |
494 | * to process such bits. If bits are still set after this operation | |
495 | * and VMBus is connected, fail the CPU offlining operation. | |
496 | */ | |
497 | if (vmbus_proto_version >= VERSION_WIN10_V4_1 && hv_synic_event_pending()) | |
523b9408 VK |
498 | return -EBUSY; |
499 | ||
b635ccc1 | 500 | always_cleanup: |
4df4cb9e | 501 | hv_stimer_legacy_cleanup(cpu); |
e086748c | 502 | |
dba61cda | 503 | hv_synic_disable_regs(cpu); |
76d36ab7 VK |
504 | |
505 | return 0; | |
3e7ee490 | 506 | } |