clocksource/drivers: Continue making Hyper-V clocksource ISA agnostic
[linux-2.6-block.git] / arch / x86 / include / asm / mshyperv.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
e08cae41
PA
2#ifndef _ASM_X86_MSHYPER_H
3#define _ASM_X86_MSHYPER_H
a2a47c6c 4
e08cae41 5#include <linux/types.h>
26fcd952 6#include <linux/atomic.h>
806c8927 7#include <linux/nmi.h>
fc53662f 8#include <asm/io.h>
5a485803 9#include <asm/hyperv-tlfs.h>
e70e5892 10#include <asm/nospec-branch.h>
e08cae41 11
1268ed0c
S
12#define VP_INVAL U32_MAX
13
e08cae41
PA
14struct ms_hyperv_info {
15 u32 features;
cc2dd402 16 u32 misc_features;
e08cae41 17 u32 hints;
5431390b 18 u32 nested_features;
dd018597
VK
19 u32 max_vp_index;
20 u32 max_lp_index;
e08cae41
PA
21};
22
23extern struct ms_hyperv_info ms_hyperv;
a2a47c6c 24
cc4edae4
LT
25
26typedef int (*hyperv_fill_flush_list_func)(
27 struct hv_guest_mapping_flush_list *flush,
28 void *data);
29
352c9624 30/*
415bd1cd 31 * Generate the guest ID.
352c9624
S
32 */
33
34static inline __u64 generate_guest_id(__u64 d_info1, __u64 kernel_version,
35 __u64 d_info2)
36{
37 __u64 guest_id = 0;
38
9b06e101 39 guest_id = (((__u64)HV_LINUX_VENDOR_ID) << 48);
352c9624
S
40 guest_id |= (d_info1 << 48);
41 guest_id |= (kernel_version << 16);
42 guest_id |= d_info2;
43
44 return guest_id;
45}
46
e810e48c
S
47
48/* Free the message slot and signal end-of-message if required */
49static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
50{
51 /*
52 * On crash we're reading some other CPU's message page and we need
53 * to be careful: this other CPU may already had cleared the header
54 * and the host may already had delivered some other message there.
55 * In case we blindly write msg->header.message_type we're going
56 * to lose it. We can still lose a message of the same type but
57 * we count on the fact that there can only be one
58 * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages
59 * on crash.
60 */
61 if (cmpxchg(&msg->header.message_type, old_msg_type,
62 HVMSG_NONE) != old_msg_type)
63 return;
64
65 /*
66 * Make sure the write to MessageType (ie set to
67 * HVMSG_NONE) happens before we read the
68 * MessagePending and EOMing. Otherwise, the EOMing
69 * will not deliver any more messages since there is
70 * no empty slot
71 */
72 mb();
73
74 if (msg->header.message_flags.msg_pending) {
75 /*
76 * This will cause message queue rescan to
77 * possibly deliver another msg from the
78 * hypervisor
79 */
80 wrmsrl(HV_X64_MSR_EOM, 0);
81 }
82}
83
619a4c8b
MK
84#define hv_init_timer(timer, tick) \
85 wrmsrl(HV_X64_MSR_STIMER0_COUNT + (2*timer), tick)
86#define hv_init_timer_config(timer, val) \
87 wrmsrl(HV_X64_MSR_STIMER0_CONFIG + (2*timer), val)
d5116b40 88
155e4a2f
S
89#define hv_get_simp(val) rdmsrl(HV_X64_MSR_SIMP, val)
90#define hv_set_simp(val) wrmsrl(HV_X64_MSR_SIMP, val)
91
8e307bf8
S
92#define hv_get_siefp(val) rdmsrl(HV_X64_MSR_SIEFP, val)
93#define hv_set_siefp(val) wrmsrl(HV_X64_MSR_SIEFP, val)
94
06d1d98a
S
95#define hv_get_synic_state(val) rdmsrl(HV_X64_MSR_SCONTROL, val)
96#define hv_set_synic_state(val) wrmsrl(HV_X64_MSR_SCONTROL, val)
97
7297ff0c
S
98#define hv_get_vp_index(index) rdmsrl(HV_X64_MSR_VP_INDEX, index)
99
619a4c8b
MK
100#define hv_get_synint_state(int_num, val) \
101 rdmsrl(HV_X64_MSR_SINT0 + int_num, val)
102#define hv_set_synint_state(int_num, val) \
103 wrmsrl(HV_X64_MSR_SINT0 + int_num, val)
37e11d5c 104
9d9c9656
SM
105#define hv_get_crash_ctl(val) \
106 rdmsrl(HV_X64_MSR_CRASH_CTL, val)
37e11d5c 107
dd2cb348
MK
108#define hv_get_time_ref_count(val) \
109 rdmsrl(HV_X64_MSR_TIME_REF_COUNT, val)
110
111#define hv_get_reference_tsc(val) \
112 rdmsrl(HV_X64_MSR_REFERENCE_TSC, val)
113#define hv_set_reference_tsc(val) \
114 wrmsrl(HV_X64_MSR_REFERENCE_TSC, val)
115#define hv_set_clocksource_vdso(val) \
116 ((val).archdata.vclock_mode = VCLOCK_HVCLOCK)
117#define hv_get_raw_timer() rdtsc_ordered()
118
bc2b0331 119void hyperv_callback_vector(void);
93286261 120void hyperv_reenlightenment_vector(void);
cf910e83
SA
121#ifdef CONFIG_TRACING
122#define trace_hyperv_callback_vector hyperv_callback_vector
123#endif
bc2b0331 124void hyperv_vector_handler(struct pt_regs *regs);
76d388cd
TG
125void hv_setup_vmbus_irq(void (*handler)(void));
126void hv_remove_vmbus_irq(void);
bc2b0331 127
2517281d
VK
128void hv_setup_kexec_handler(void (*handler)(void));
129void hv_remove_kexec_handler(void);
b4370df2
VK
130void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs));
131void hv_remove_crash_handler(void);
8730046c 132
248e742a
MK
133/*
134 * Routines for stimer0 Direct Mode handling.
135 * On x86/x64, there are no percpu actions to take.
136 */
137void hv_stimer0_vector_handler(struct pt_regs *regs);
138void hv_stimer0_callback_vector(void);
139int hv_setup_stimer0_irq(int *irq, int *vector, void (*handler)(void));
140void hv_remove_stimer0_irq(int irq);
141
142static inline void hv_enable_stimer0_percpu_irq(int irq) {}
143static inline void hv_disable_stimer0_percpu_irq(int irq) {}
144
145
8730046c 146#if IS_ENABLED(CONFIG_HYPERV)
fc53662f 147extern void *hv_hypercall_pg;
68bb7bfb 148extern void __percpu **hyperv_pcpu_input_arg;
fc53662f
VK
149
150static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
151{
152 u64 input_address = input ? virt_to_phys(input) : 0;
153 u64 output_address = output ? virt_to_phys(output) : 0;
154 u64 hv_status;
fc53662f
VK
155
156#ifdef CONFIG_X86_64
157 if (!hv_hypercall_pg)
158 return U64_MAX;
159
160 __asm__ __volatile__("mov %4, %%r8\n"
e70e5892 161 CALL_NOSPEC
f5caf621 162 : "=a" (hv_status), ASM_CALL_CONSTRAINT,
fc53662f 163 "+c" (control), "+d" (input_address)
e70e5892
DW
164 : "r" (output_address),
165 THUNK_TARGET(hv_hypercall_pg)
fc53662f
VK
166 : "cc", "memory", "r8", "r9", "r10", "r11");
167#else
168 u32 input_address_hi = upper_32_bits(input_address);
169 u32 input_address_lo = lower_32_bits(input_address);
170 u32 output_address_hi = upper_32_bits(output_address);
171 u32 output_address_lo = lower_32_bits(output_address);
172
173 if (!hv_hypercall_pg)
174 return U64_MAX;
175
e70e5892 176 __asm__ __volatile__(CALL_NOSPEC
fc53662f 177 : "=A" (hv_status),
f5caf621 178 "+c" (input_address_lo), ASM_CALL_CONSTRAINT
fc53662f
VK
179 : "A" (control),
180 "b" (input_address_hi),
181 "D"(output_address_hi), "S"(output_address_lo),
e70e5892 182 THUNK_TARGET(hv_hypercall_pg)
fc53662f
VK
183 : "cc", "memory");
184#endif /* !x86_64 */
185 return hv_status;
186}
dee863b5 187
6a8edbd0
VK
188/* Fast hypercall with 8 bytes of input and no output */
189static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
190{
191 u64 hv_status, control = (u64)code | HV_HYPERCALL_FAST_BIT;
6a8edbd0
VK
192
193#ifdef CONFIG_X86_64
194 {
e70e5892 195 __asm__ __volatile__(CALL_NOSPEC
f5caf621 196 : "=a" (hv_status), ASM_CALL_CONSTRAINT,
6a8edbd0 197 "+c" (control), "+d" (input1)
e70e5892 198 : THUNK_TARGET(hv_hypercall_pg)
6a8edbd0
VK
199 : "cc", "r8", "r9", "r10", "r11");
200 }
201#else
202 {
203 u32 input1_hi = upper_32_bits(input1);
204 u32 input1_lo = lower_32_bits(input1);
205
e70e5892 206 __asm__ __volatile__ (CALL_NOSPEC
6a8edbd0
VK
207 : "=A"(hv_status),
208 "+c"(input1_lo),
f5caf621 209 ASM_CALL_CONSTRAINT
6a8edbd0
VK
210 : "A" (control),
211 "b" (input1_hi),
e70e5892 212 THUNK_TARGET(hv_hypercall_pg)
6a8edbd0
VK
213 : "cc", "edi", "esi");
214 }
215#endif
216 return hv_status;
217}
218
53e52966
VK
219/* Fast hypercall with 16 bytes of input */
220static inline u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2)
221{
222 u64 hv_status, control = (u64)code | HV_HYPERCALL_FAST_BIT;
223
224#ifdef CONFIG_X86_64
225 {
226 __asm__ __volatile__("mov %4, %%r8\n"
227 CALL_NOSPEC
228 : "=a" (hv_status), ASM_CALL_CONSTRAINT,
229 "+c" (control), "+d" (input1)
230 : "r" (input2),
231 THUNK_TARGET(hv_hypercall_pg)
232 : "cc", "r8", "r9", "r10", "r11");
233 }
234#else
235 {
236 u32 input1_hi = upper_32_bits(input1);
237 u32 input1_lo = lower_32_bits(input1);
238 u32 input2_hi = upper_32_bits(input2);
239 u32 input2_lo = lower_32_bits(input2);
240
241 __asm__ __volatile__ (CALL_NOSPEC
242 : "=A"(hv_status),
243 "+c"(input1_lo), ASM_CALL_CONSTRAINT
244 : "A" (control), "b" (input1_hi),
245 "D"(input2_hi), "S"(input2_lo),
246 THUNK_TARGET(hv_hypercall_pg)
247 : "cc");
248 }
249#endif
b42967dc 250 return hv_status;
53e52966
VK
251}
252
806c8927
VK
253/*
254 * Rep hypercalls. Callers of this functions are supposed to ensure that
255 * rep_count and varhead_size comply with Hyper-V hypercall definition.
256 */
257static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
258 void *input, void *output)
259{
260 u64 control = code;
261 u64 status;
262 u16 rep_comp;
263
264 control |= (u64)varhead_size << HV_HYPERCALL_VARHEAD_OFFSET;
265 control |= (u64)rep_count << HV_HYPERCALL_REP_COMP_OFFSET;
266
267 do {
268 status = hv_do_hypercall(control, input, output);
269 if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS)
270 return status;
271
272 /* Bits 32-43 of status have 'Reps completed' data. */
273 rep_comp = (status & HV_HYPERCALL_REP_COMP_MASK) >>
274 HV_HYPERCALL_REP_COMP_OFFSET;
275
276 control &= ~HV_HYPERCALL_REP_START_MASK;
277 control |= (u64)rep_comp << HV_HYPERCALL_REP_START_OFFSET;
278
279 touch_nmi_watchdog();
280 } while (rep_comp < rep_count);
281
282 return status;
283}
284
7415aea6
VK
285/*
286 * Hypervisor's notion of virtual processor ID is different from
287 * Linux' notion of CPU ID. This information can only be retrieved
288 * in the context of the calling CPU. Setup a map for easy access
289 * to this information.
290 */
291extern u32 *hv_vp_index;
a3b74243 292extern u32 hv_max_vp_index;
a46d15cc
VK
293extern struct hv_vp_assist_page **hv_vp_assist_page;
294
295static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
296{
297 if (!hv_vp_assist_page)
298 return NULL;
299
300 return hv_vp_assist_page[cpu];
301}
7415aea6
VK
302
303/**
304 * hv_cpu_number_to_vp_number() - Map CPU to VP.
305 * @cpu_number: CPU number in Linux terms
306 *
307 * This function returns the mapping between the Linux processor
308 * number and the hypervisor's virtual processor number, useful
309 * in making hypercalls and such that talk about specific
310 * processors.
311 *
312 * Return: Virtual processor number in Hyper-V terms
313 */
314static inline int hv_cpu_number_to_vp_number(int cpu_number)
315{
316 return hv_vp_index[cpu_number];
317}
dee863b5 318
366f03b0
S
319static inline int cpumask_to_vpset(struct hv_vpset *vpset,
320 const struct cpumask *cpus)
321{
322 int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
323
324 /* valid_bank_mask can represent up to 64 banks */
325 if (hv_max_vp_index / 64 >= 64)
326 return 0;
327
328 /*
c9c92bee 329 * Clear all banks up to the maximum possible bank as hv_tlb_flush_ex
366f03b0
S
330 * structs are not cleared between calls, we risk flushing unneeded
331 * vCPUs otherwise.
332 */
333 for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++)
334 vpset->bank_contents[vcpu_bank] = 0;
335
336 /*
337 * Some banks may end up being empty but this is acceptable.
338 */
339 for_each_cpu(cpu, cpus) {
340 vcpu = hv_cpu_number_to_vp_number(cpu);
1268ed0c
S
341 if (vcpu == VP_INVAL)
342 return -1;
366f03b0
S
343 vcpu_bank = vcpu / 64;
344 vcpu_offset = vcpu % 64;
345 __set_bit(vcpu_offset, (unsigned long *)
346 &vpset->bank_contents[vcpu_bank]);
347 if (vcpu_bank >= nr_bank)
348 nr_bank = vcpu_bank + 1;
349 }
350 vpset->valid_bank_mask = GENMASK_ULL(nr_bank - 1, 0);
351 return nr_bank;
352}
353
6b48cb5f 354void __init hyperv_init(void);
2ffd9e33 355void hyperv_setup_mmu_ops(void);
7ed4325a 356void hyperv_report_panic(struct pt_regs *regs, long err);
81b18bce 357void hyperv_report_panic_msg(phys_addr_t pa, size_t size);
4a5f3cde 358bool hv_is_hyperv_initialized(void);
d6f3609d 359void hyperv_cleanup(void);
93286261
VK
360
361void hyperv_reenlightenment_intr(struct pt_regs *regs);
362void set_hv_tscchange_cb(void (*cb)(void));
363void clear_hv_tscchange_cb(void);
364void hyperv_stop_tsc_emulation(void);
eb914cfe 365int hyperv_flush_guest_mapping(u64 as);
cc4edae4
LT
366int hyperv_flush_guest_mapping_range(u64 as,
367 hyperv_fill_flush_list_func fill_func, void *data);
368int hyperv_fill_flush_guest_mapping_list(
369 struct hv_guest_mapping_flush_list *flush,
370 u64 start_gfn, u64 end_gfn);
2d2ccf24
TG
371
372#ifdef CONFIG_X86_64
6b48cb5f 373void hv_apic_init(void);
3a025de6
YS
374void __init hv_init_spinlocks(void);
375bool hv_vcpu_is_preempted(int vcpu);
2d2ccf24
TG
376#else
377static inline void hv_apic_init(void) {}
378#endif
379
79cadff2
VK
380#else /* CONFIG_HYPERV */
381static inline void hyperv_init(void) {}
4a5f3cde 382static inline bool hv_is_hyperv_initialized(void) { return false; }
79cadff2 383static inline void hyperv_cleanup(void) {}
2ffd9e33 384static inline void hyperv_setup_mmu_ops(void) {}
93286261
VK
385static inline void set_hv_tscchange_cb(void (*cb)(void)) {}
386static inline void clear_hv_tscchange_cb(void) {}
387static inline void hyperv_stop_tsc_emulation(void) {};
a46d15cc
VK
388static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
389{
390 return NULL;
391}
eb914cfe 392static inline int hyperv_flush_guest_mapping(u64 as) { return -1; }
cc4edae4
LT
393static inline int hyperv_flush_guest_mapping_range(u64 as,
394 hyperv_fill_flush_list_func fill_func, void *data)
395{
396 return -1;
397}
79cadff2
VK
398#endif /* CONFIG_HYPERV */
399
a2a47c6c 400#endif