x86/vdso: Give the [ph]vclock_page declarations real types
[linux-2.6-block.git] / arch / x86 / include / asm / vdso / gettimeofday.h
CommitLineData
7ac87074
VF
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Fast user context implementation of clock_gettime, gettimeofday, and time.
4 *
5 * Copyright (C) 2019 ARM Limited.
6 * Copyright 2006 Andi Kleen, SUSE Labs.
7 * 32 Bit compat layer by Stefani Seibold <stefani@seibold.net>
8 * sponsored by Rohde & Schwarz GmbH & Co. KG Munich/Germany
9 */
10#ifndef __ASM_VDSO_GETTIMEOFDAY_H
11#define __ASM_VDSO_GETTIMEOFDAY_H
12
13#ifndef __ASSEMBLY__
14
15#include <uapi/linux/time.h>
16#include <asm/vgtod.h>
17#include <asm/vvar.h>
18#include <asm/unistd.h>
19#include <asm/msr.h>
20#include <asm/pvclock.h>
21#include <asm/mshyperv.h>
22
23#define __vdso_data (VVAR(_vdso_data))
24
25#define VDSO_HAS_TIME 1
26
f66501dc
VF
27#define VDSO_HAS_CLOCK_GETRES 1
28
ecf9db3d
AL
29/*
30 * Declare the memory-mapped vclock data pages. These come from hypervisors.
31 * If we ever reintroduce something like direct access to an MMIO clock like
32 * the HPET again, it will go here as well.
33 *
34 * A load from any of these pages will segfault if the clock in question is
35 * disabled, so appropriate compiler barriers and checks need to be used
36 * to prevent stray loads.
37 *
38 * These declarations MUST NOT be const. The compiler will assume that
39 * an extern const variable has genuinely constant contents, and the
40 * resulting code won't work, since the whole point is that these pages
41 * change over time, possibly while we're accessing them.
42 */
43
7ac87074 44#ifdef CONFIG_PARAVIRT_CLOCK
ecf9db3d
AL
45/*
46 * This is the vCPU 0 pvclock page. We only use pvclock from the vDSO
47 * if the hypervisor tells us that all vCPUs can get valid data from the
48 * vCPU 0 page.
49 */
50extern struct pvclock_vsyscall_time_info pvclock_page
7ac87074
VF
51 __attribute__((visibility("hidden")));
52#endif
53
54#ifdef CONFIG_HYPERV_TSCPAGE
ecf9db3d 55extern struct ms_hyperv_tsc_page hvclock_page
7ac87074
VF
56 __attribute__((visibility("hidden")));
57#endif
58
59#ifndef BUILD_VDSO32
60
61static __always_inline
62long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
63{
64 long ret;
65
66 asm ("syscall" : "=a" (ret), "=m" (*_ts) :
67 "0" (__NR_clock_gettime), "D" (_clkid), "S" (_ts) :
68 "rcx", "r11");
69
70 return ret;
71}
72
73static __always_inline
74long gettimeofday_fallback(struct __kernel_old_timeval *_tv,
75 struct timezone *_tz)
76{
77 long ret;
78
79 asm("syscall" : "=a" (ret) :
80 "0" (__NR_gettimeofday), "D" (_tv), "S" (_tz) : "memory");
81
82 return ret;
83}
84
f66501dc
VF
85static __always_inline
86long clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
87{
88 long ret;
89
90 asm ("syscall" : "=a" (ret), "=m" (*_ts) :
91 "0" (__NR_clock_getres), "D" (_clkid), "S" (_ts) :
92 "rcx", "r11");
93
94 return ret;
95}
96
7ac87074
VF
97#else
98
99static __always_inline
100long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
101{
102 long ret;
103
104 asm (
105 "mov %%ebx, %%edx \n"
106 "mov %[clock], %%ebx \n"
107 "call __kernel_vsyscall \n"
108 "mov %%edx, %%ebx \n"
109 : "=a" (ret), "=m" (*_ts)
110 : "0" (__NR_clock_gettime64), [clock] "g" (_clkid), "c" (_ts)
111 : "edx");
112
113 return ret;
114}
115
116static __always_inline
117long gettimeofday_fallback(struct __kernel_old_timeval *_tv,
118 struct timezone *_tz)
119{
120 long ret;
121
122 asm(
123 "mov %%ebx, %%edx \n"
124 "mov %2, %%ebx \n"
125 "call __kernel_vsyscall \n"
126 "mov %%edx, %%ebx \n"
127 : "=a" (ret)
128 : "0" (__NR_gettimeofday), "g" (_tv), "c" (_tz)
129 : "memory", "edx");
130
131 return ret;
132}
133
f66501dc
VF
134static __always_inline long
135clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
136{
137 long ret;
138
139 asm (
140 "mov %%ebx, %%edx \n"
141 "mov %[clock], %%ebx \n"
142 "call __kernel_vsyscall \n"
143 "mov %%edx, %%ebx \n"
144 : "=a" (ret), "=m" (*_ts)
145 : "0" (__NR_clock_getres_time64), [clock] "g" (_clkid), "c" (_ts)
146 : "edx");
147
148 return ret;
149}
150
7ac87074
VF
151#endif
152
153#ifdef CONFIG_PARAVIRT_CLOCK
7ac87074
VF
154static u64 vread_pvclock(void)
155{
ecf9db3d 156 const struct pvclock_vcpu_time_info *pvti = &pvclock_page.pvti;
7ac87074
VF
157 u32 version;
158 u64 ret;
159
160 /*
161 * Note: The kernel and hypervisor must guarantee that cpu ID
162 * number maps 1:1 to per-CPU pvclock time info.
163 *
164 * Because the hypervisor is entirely unaware of guest userspace
165 * preemption, it cannot guarantee that per-CPU pvclock time
166 * info is updated if the underlying CPU changes or that that
167 * version is increased whenever underlying CPU changes.
168 *
169 * On KVM, we are guaranteed that pvti updates for any vCPU are
170 * atomic as seen by *all* vCPUs. This is an even stronger
171 * guarantee than we get with a normal seqlock.
172 *
173 * On Xen, we don't appear to have that guarantee, but Xen still
174 * supplies a valid seqlock using the version field.
175 *
176 * We only do pvclock vdso timing at all if
177 * PVCLOCK_TSC_STABLE_BIT is set, and we interpret that bit to
178 * mean that all vCPUs have matching pvti and that the TSC is
179 * synced, so we can just look at vCPU 0's pvti.
180 */
181
182 do {
183 version = pvclock_read_begin(pvti);
184
185 if (unlikely(!(pvti->flags & PVCLOCK_TSC_STABLE_BIT)))
186 return U64_MAX;
187
188 ret = __pvclock_read_cycles(pvti, rdtsc_ordered());
189 } while (pvclock_read_retry(pvti, version));
190
191 return ret;
192}
193#endif
194
195#ifdef CONFIG_HYPERV_TSCPAGE
196static u64 vread_hvclock(void)
197{
ecf9db3d 198 return hv_read_tsc_page(&hvclock_page);
7ac87074
VF
199}
200#endif
201
202static inline u64 __arch_get_hw_counter(s32 clock_mode)
203{
204 if (clock_mode == VCLOCK_TSC)
205 return (u64)rdtsc_ordered();
206 /*
207 * For any memory-mapped vclock type, we need to make sure that gcc
208 * doesn't cleverly hoist a load before the mode check. Otherwise we
209 * might end up touching the memory-mapped page even if the vclock in
210 * question isn't enabled, which will segfault. Hence the barriers.
211 */
212#ifdef CONFIG_PARAVIRT_CLOCK
213 if (clock_mode == VCLOCK_PVCLOCK) {
214 barrier();
215 return vread_pvclock();
216 }
217#endif
218#ifdef CONFIG_HYPERV_TSCPAGE
219 if (clock_mode == VCLOCK_HVCLOCK) {
220 barrier();
221 return vread_hvclock();
222 }
223#endif
224 return U64_MAX;
225}
226
227static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
228{
229 return __vdso_data;
230}
231
232#endif /* !__ASSEMBLY__ */
233
234#endif /* __ASM_VDSO_GETTIMEOFDAY_H */