1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Fast user context implementation of clock_gettime, gettimeofday, and time.
5 * Copyright (C) 2019 ARM Limited.
6 * Copyright 2006 Andi Kleen, SUSE Labs.
7 * 32 Bit compat layer by Stefani Seibold <stefani@seibold.net>
8 * sponsored by Rohde & Schwarz GmbH & Co. KG Munich/Germany
10 #ifndef __ASM_VDSO_GETTIMEOFDAY_H
11 #define __ASM_VDSO_GETTIMEOFDAY_H
15 #include <uapi/linux/time.h>
16 #include <asm/vgtod.h>
18 #include <asm/unistd.h>
20 #include <asm/pvclock.h>
21 #include <asm/mshyperv.h>
23 #define __vdso_data (VVAR(_vdso_data))
25 #define VDSO_HAS_TIME 1
27 #define VDSO_HAS_CLOCK_GETRES 1
29 #ifdef CONFIG_PARAVIRT_CLOCK
30 extern u8 pvclock_page[PAGE_SIZE]
31 __attribute__((visibility("hidden")));
34 #ifdef CONFIG_HYPERV_TSCPAGE
35 extern u8 hvclock_page[PAGE_SIZE]
36 __attribute__((visibility("hidden")));
41 static __always_inline
42 long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
46 asm ("syscall" : "=a" (ret), "=m" (*_ts) :
47 "0" (__NR_clock_gettime), "D" (_clkid), "S" (_ts) :
53 static __always_inline
54 long gettimeofday_fallback(struct __kernel_old_timeval *_tv,
59 asm("syscall" : "=a" (ret) :
60 "0" (__NR_gettimeofday), "D" (_tv), "S" (_tz) : "memory");
65 static __always_inline
66 long clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
70 asm ("syscall" : "=a" (ret), "=m" (*_ts) :
71 "0" (__NR_clock_getres), "D" (_clkid), "S" (_ts) :
79 static __always_inline
80 long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
86 "mov %[clock], %%ebx \n"
87 "call __kernel_vsyscall \n"
89 : "=a" (ret), "=m" (*_ts)
90 : "0" (__NR_clock_gettime64), [clock] "g" (_clkid), "c" (_ts)
96 static __always_inline
97 long gettimeofday_fallback(struct __kernel_old_timeval *_tv,
103 "mov %%ebx, %%edx \n"
105 "call __kernel_vsyscall \n"
106 "mov %%edx, %%ebx \n"
108 : "0" (__NR_gettimeofday), "g" (_tv), "c" (_tz)
114 static __always_inline long
115 clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
120 "mov %%ebx, %%edx \n"
121 "mov %[clock], %%ebx \n"
122 "call __kernel_vsyscall \n"
123 "mov %%edx, %%ebx \n"
124 : "=a" (ret), "=m" (*_ts)
125 : "0" (__NR_clock_getres_time64), [clock] "g" (_clkid), "c" (_ts)
133 #ifdef CONFIG_PARAVIRT_CLOCK
134 static const struct pvclock_vsyscall_time_info *get_pvti0(void)
136 return (const struct pvclock_vsyscall_time_info *)&pvclock_page;
139 static u64 vread_pvclock(void)
141 const struct pvclock_vcpu_time_info *pvti = &get_pvti0()->pvti;
146 * Note: The kernel and hypervisor must guarantee that cpu ID
147 * number maps 1:1 to per-CPU pvclock time info.
149 * Because the hypervisor is entirely unaware of guest userspace
150 * preemption, it cannot guarantee that per-CPU pvclock time
151 * info is updated if the underlying CPU changes or that that
152 * version is increased whenever underlying CPU changes.
154 * On KVM, we are guaranteed that pvti updates for any vCPU are
155 * atomic as seen by *all* vCPUs. This is an even stronger
156 * guarantee than we get with a normal seqlock.
158 * On Xen, we don't appear to have that guarantee, but Xen still
159 * supplies a valid seqlock using the version field.
161 * We only do pvclock vdso timing at all if
162 * PVCLOCK_TSC_STABLE_BIT is set, and we interpret that bit to
163 * mean that all vCPUs have matching pvti and that the TSC is
164 * synced, so we can just look at vCPU 0's pvti.
168 version = pvclock_read_begin(pvti);
170 if (unlikely(!(pvti->flags & PVCLOCK_TSC_STABLE_BIT)))
173 ret = __pvclock_read_cycles(pvti, rdtsc_ordered());
174 } while (pvclock_read_retry(pvti, version));
180 #ifdef CONFIG_HYPERV_TSCPAGE
181 static u64 vread_hvclock(void)
183 const struct ms_hyperv_tsc_page *tsc_pg =
184 (const struct ms_hyperv_tsc_page *)&hvclock_page;
186 return hv_read_tsc_page(tsc_pg);
190 static inline u64 __arch_get_hw_counter(s32 clock_mode)
192 if (clock_mode == VCLOCK_TSC)
193 return (u64)rdtsc_ordered();
195 * For any memory-mapped vclock type, we need to make sure that gcc
196 * doesn't cleverly hoist a load before the mode check. Otherwise we
197 * might end up touching the memory-mapped page even if the vclock in
198 * question isn't enabled, which will segfault. Hence the barriers.
200 #ifdef CONFIG_PARAVIRT_CLOCK
201 if (clock_mode == VCLOCK_PVCLOCK) {
203 return vread_pvclock();
206 #ifdef CONFIG_HYPERV_TSCPAGE
207 if (clock_mode == VCLOCK_HVCLOCK) {
209 return vread_hvclock();
215 static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
220 #endif /* !__ASSEMBLY__ */
222 #endif /* __ASM_VDSO_GETTIMEOFDAY_H */