1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Fast user context implementation of clock_gettime, gettimeofday, and time.
5 * Copyright (C) 2019 ARM Limited.
6 * Copyright 2006 Andi Kleen, SUSE Labs.
7 * 32 Bit compat layer by Stefani Seibold <stefani@seibold.net>
8 * sponsored by Rohde & Schwarz GmbH & Co. KG Munich/Germany
10 #ifndef __ASM_VDSO_GETTIMEOFDAY_H
11 #define __ASM_VDSO_GETTIMEOFDAY_H
15 #include <uapi/linux/time.h>
16 #include <asm/vgtod.h>
18 #include <asm/unistd.h>
20 #include <asm/pvclock.h>
21 #include <asm/mshyperv.h>
23 #define __vdso_data (VVAR(_vdso_data))
25 #define VDSO_HAS_TIME 1
27 #define VDSO_HAS_CLOCK_GETRES 1
30 * Declare the memory-mapped vclock data pages. These come from hypervisors.
31 * If we ever reintroduce something like direct access to an MMIO clock like
32 * the HPET again, it will go here as well.
34 * A load from any of these pages will segfault if the clock in question is
35 * disabled, so appropriate compiler barriers and checks need to be used
36 * to prevent stray loads.
38 * These declarations MUST NOT be const. The compiler will assume that
39 * an extern const variable has genuinely constant contents, and the
40 * resulting code won't work, since the whole point is that these pages
41 * change over time, possibly while we're accessing them.
44 #ifdef CONFIG_PARAVIRT_CLOCK
46 * This is the vCPU 0 pvclock page. We only use pvclock from the vDSO
47 * if the hypervisor tells us that all vCPUs can get valid data from the
50 extern struct pvclock_vsyscall_time_info pvclock_page
51 __attribute__((visibility("hidden")));
54 #ifdef CONFIG_HYPERV_TSCPAGE
55 extern struct ms_hyperv_tsc_page hvclock_page
56 __attribute__((visibility("hidden")));
61 static __always_inline
62 long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
66 asm ("syscall" : "=a" (ret), "=m" (*_ts) :
67 "0" (__NR_clock_gettime), "D" (_clkid), "S" (_ts) :
73 static __always_inline
74 long gettimeofday_fallback(struct __kernel_old_timeval *_tv,
79 asm("syscall" : "=a" (ret) :
80 "0" (__NR_gettimeofday), "D" (_tv), "S" (_tz) : "memory");
85 static __always_inline
86 long clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
90 asm ("syscall" : "=a" (ret), "=m" (*_ts) :
91 "0" (__NR_clock_getres), "D" (_clkid), "S" (_ts) :
99 static __always_inline
100 long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
105 "mov %%ebx, %%edx \n"
106 "mov %[clock], %%ebx \n"
107 "call __kernel_vsyscall \n"
108 "mov %%edx, %%ebx \n"
109 : "=a" (ret), "=m" (*_ts)
110 : "0" (__NR_clock_gettime64), [clock] "g" (_clkid), "c" (_ts)
116 static __always_inline
117 long gettimeofday_fallback(struct __kernel_old_timeval *_tv,
118 struct timezone *_tz)
123 "mov %%ebx, %%edx \n"
125 "call __kernel_vsyscall \n"
126 "mov %%edx, %%ebx \n"
128 : "0" (__NR_gettimeofday), "g" (_tv), "c" (_tz)
134 static __always_inline long
135 clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
140 "mov %%ebx, %%edx \n"
141 "mov %[clock], %%ebx \n"
142 "call __kernel_vsyscall \n"
143 "mov %%edx, %%ebx \n"
144 : "=a" (ret), "=m" (*_ts)
145 : "0" (__NR_clock_getres_time64), [clock] "g" (_clkid), "c" (_ts)
153 #ifdef CONFIG_PARAVIRT_CLOCK
154 static u64 vread_pvclock(void)
156 const struct pvclock_vcpu_time_info *pvti = &pvclock_page.pvti;
161 * Note: The kernel and hypervisor must guarantee that cpu ID
162 * number maps 1:1 to per-CPU pvclock time info.
164 * Because the hypervisor is entirely unaware of guest userspace
165 * preemption, it cannot guarantee that per-CPU pvclock time
166 * info is updated if the underlying CPU changes or that that
167 * version is increased whenever underlying CPU changes.
169 * On KVM, we are guaranteed that pvti updates for any vCPU are
170 * atomic as seen by *all* vCPUs. This is an even stronger
171 * guarantee than we get with a normal seqlock.
173 * On Xen, we don't appear to have that guarantee, but Xen still
174 * supplies a valid seqlock using the version field.
176 * We only do pvclock vdso timing at all if
177 * PVCLOCK_TSC_STABLE_BIT is set, and we interpret that bit to
178 * mean that all vCPUs have matching pvti and that the TSC is
179 * synced, so we can just look at vCPU 0's pvti.
183 version = pvclock_read_begin(pvti);
185 if (unlikely(!(pvti->flags & PVCLOCK_TSC_STABLE_BIT)))
188 ret = __pvclock_read_cycles(pvti, rdtsc_ordered());
189 } while (pvclock_read_retry(pvti, version));
195 #ifdef CONFIG_HYPERV_TSCPAGE
196 static u64 vread_hvclock(void)
198 return hv_read_tsc_page(&hvclock_page);
202 static inline u64 __arch_get_hw_counter(s32 clock_mode)
204 if (clock_mode == VCLOCK_TSC)
205 return (u64)rdtsc_ordered();
207 * For any memory-mapped vclock type, we need to make sure that gcc
208 * doesn't cleverly hoist a load before the mode check. Otherwise we
209 * might end up touching the memory-mapped page even if the vclock in
210 * question isn't enabled, which will segfault. Hence the barriers.
212 #ifdef CONFIG_PARAVIRT_CLOCK
213 if (clock_mode == VCLOCK_PVCLOCK) {
215 return vread_pvclock();
218 #ifdef CONFIG_HYPERV_TSCPAGE
219 if (clock_mode == VCLOCK_HVCLOCK) {
221 return vread_hvclock();
227 static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
232 #endif /* !__ASSEMBLY__ */
234 #endif /* __ASM_VDSO_GETTIMEOFDAY_H */