Commit | Line | Data |
---|---|---|
2aae950b AK |
1 | /* |
2 | * Copyright 2006 Andi Kleen, SUSE Labs. | |
3 | * Subject to the GNU Public License, v.2 | |
4 | * | |
f144a6b4 | 5 | * Fast user context implementation of clock_gettime, gettimeofday, and time. |
2aae950b | 6 | * |
7a59ed41 SS |
7 | * 32 Bit compat layer by Stefani Seibold <stefani@seibold.net> |
8 | * sponsored by Rohde & Schwarz GmbH & Co. KG Munich/Germany | |
9 | * | |
2aae950b AK |
10 | * The code should have no internal unresolved relocations. |
11 | * Check with readelf after changing. | |
2aae950b AK |
12 | */ |
13 | ||
2b7d0390 | 14 | /* Disable profiling for userspace code: */ |
2ed84eeb | 15 | #define DISABLE_BRANCH_PROFILING |
2b7d0390 | 16 | |
2aae950b | 17 | #include <linux/kernel.h> |
7a59ed41 | 18 | #include <uapi/linux/time.h> |
2aae950b AK |
19 | #include <linux/string.h> |
20 | #include <asm/vsyscall.h> | |
98d0ac38 | 21 | #include <asm/fixmap.h> |
2aae950b | 22 | #include <asm/vgtod.h> |
2aae950b AK |
23 | #include <asm/hpet.h> |
24 | #include <asm/unistd.h> | |
25 | #include <asm/io.h> | |
51c19b4f | 26 | #include <asm/pvclock.h> |
2aae950b | 27 | |
8c49d9a7 | 28 | #define gtod (&VVAR(vsyscall_gtod_data)) |
2aae950b | 29 | |
7a59ed41 SS |
30 | extern int __vdso_clock_gettime(clockid_t clock, struct timespec *ts); |
31 | extern int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz); | |
32 | extern time_t __vdso_time(time_t *t); | |
33 | ||
34 | #ifndef BUILD_VDSO32 | |
35 | ||
411f790c | 36 | static notrace cycle_t vread_hpet(void) |
98d0ac38 | 37 | { |
411f790c SS |
38 | return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + HPET_COUNTER); |
39 | } | |
98d0ac38 | 40 | |
411f790c SS |
41 | notrace static long vdso_fallback_gettime(long clock, struct timespec *ts) |
42 | { | |
43 | long ret; | |
44 | asm("syscall" : "=a" (ret) : | |
45 | "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory"); | |
46 | return ret; | |
98d0ac38 AL |
47 | } |
48 | ||
411f790c | 49 | notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz) |
98d0ac38 | 50 | { |
411f790c SS |
51 | long ret; |
52 | ||
53 | asm("syscall" : "=a" (ret) : | |
54 | "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory"); | |
55 | return ret; | |
98d0ac38 AL |
56 | } |
57 | ||
51c19b4f MT |
58 | #ifdef CONFIG_PARAVIRT_CLOCK |
59 | ||
60 | static notrace const struct pvclock_vsyscall_time_info *get_pvti(int cpu) | |
61 | { | |
62 | const struct pvclock_vsyscall_time_info *pvti_base; | |
63 | int idx = cpu / (PAGE_SIZE/PVTI_SIZE); | |
64 | int offset = cpu % (PAGE_SIZE/PVTI_SIZE); | |
65 | ||
66 | BUG_ON(PVCLOCK_FIXMAP_BEGIN + idx > PVCLOCK_FIXMAP_END); | |
67 | ||
68 | pvti_base = (struct pvclock_vsyscall_time_info *) | |
69 | __fix_to_virt(PVCLOCK_FIXMAP_BEGIN+idx); | |
70 | ||
71 | return &pvti_base[offset]; | |
72 | } | |
73 | ||
74 | static notrace cycle_t vread_pvclock(int *mode) | |
75 | { | |
76 | const struct pvclock_vsyscall_time_info *pvti; | |
77 | cycle_t ret; | |
78 | u64 last; | |
79 | u32 version; | |
51c19b4f MT |
80 | u8 flags; |
81 | unsigned cpu, cpu1; | |
82 | ||
83 | ||
84 | /* | |
e04c5d76 MT |
85 | * Note: hypervisor must guarantee that: |
86 | * 1. cpu ID number maps 1:1 to per-CPU pvclock time info. | |
87 | * 2. that per-CPU pvclock time info is updated if the | |
88 | * underlying CPU changes. | |
89 | * 3. that version is increased whenever underlying CPU | |
90 | * changes. | |
91 | * | |
51c19b4f MT |
92 | */ |
93 | do { | |
94 | cpu = __getcpu() & VGETCPU_CPU_MASK; | |
95 | /* TODO: We can put vcpu id into higher bits of pvti.version. | |
96 | * This will save a couple of cycles by getting rid of | |
97 | * __getcpu() calls (Gleb). | |
98 | */ | |
99 | ||
100 | pvti = get_pvti(cpu); | |
101 | ||
51c19b4f MT |
102 | version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags); |
103 | ||
104 | /* | |
105 | * Test we're still on the cpu as well as the version. | |
106 | * We could have been migrated just after the first | |
107 | * vgetcpu but before fetching the version, so we | |
108 | * wouldn't notice a version change. | |
109 | */ | |
110 | cpu1 = __getcpu() & VGETCPU_CPU_MASK; | |
111 | } while (unlikely(cpu != cpu1 || | |
112 | (pvti->pvti.version & 1) || | |
e04c5d76 | 113 | pvti->pvti.version != version)); |
51c19b4f MT |
114 | |
115 | if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT))) | |
116 | *mode = VCLOCK_NONE; | |
117 | ||
118 | /* refer to tsc.c read_tsc() comment for rationale */ | |
af8c93d8 | 119 | last = gtod->clock.cycle_last; |
51c19b4f MT |
120 | |
121 | if (likely(ret >= last)) | |
122 | return ret; | |
123 | ||
124 | return last; | |
125 | } | |
126 | #endif | |
127 | ||
7a59ed41 SS |
128 | #else |
129 | ||
130 | extern u8 hpet_page | |
131 | __attribute__((visibility("hidden"))); | |
132 | ||
133 | #ifdef CONFIG_HPET_TIMER | |
134 | static notrace cycle_t vread_hpet(void) | |
135 | { | |
136 | return readl((const void __iomem *)(&hpet_page + HPET_COUNTER)); | |
137 | } | |
138 | #endif | |
139 | ||
140 | notrace static long vdso_fallback_gettime(long clock, struct timespec *ts) | |
141 | { | |
142 | long ret; | |
143 | ||
144 | asm( | |
145 | "mov %%ebx, %%edx \n" | |
146 | "mov %2, %%ebx \n" | |
147 | "call VDSO32_vsyscall \n" | |
148 | "mov %%edx, %%ebx \n" | |
149 | : "=a" (ret) | |
150 | : "0" (__NR_clock_gettime), "g" (clock), "c" (ts) | |
151 | : "memory", "edx"); | |
152 | return ret; | |
153 | } | |
154 | ||
155 | notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz) | |
156 | { | |
157 | long ret; | |
158 | ||
159 | asm( | |
160 | "mov %%ebx, %%edx \n" | |
161 | "mov %2, %%ebx \n" | |
162 | "call VDSO32_vsyscall \n" | |
163 | "mov %%edx, %%ebx \n" | |
164 | : "=a" (ret) | |
165 | : "0" (__NR_gettimeofday), "g" (tv), "c" (tz) | |
166 | : "memory", "edx"); | |
167 | return ret; | |
168 | } | |
169 | ||
170 | #ifdef CONFIG_PARAVIRT_CLOCK | |
171 | ||
172 | static notrace cycle_t vread_pvclock(int *mode) | |
173 | { | |
174 | *mode = VCLOCK_NONE; | |
175 | return 0; | |
176 | } | |
177 | #endif | |
178 | ||
179 | #endif | |
180 | ||
411f790c | 181 | notrace static cycle_t vread_tsc(void) |
2aae950b | 182 | { |
411f790c SS |
183 | cycle_t ret; |
184 | u64 last; | |
2aae950b | 185 | |
411f790c SS |
186 | /* |
187 | * Empirically, a fence (of type that depends on the CPU) | |
188 | * before rdtsc is enough to ensure that rdtsc is ordered | |
189 | * with respect to loads. The various CPU manuals are unclear | |
190 | * as to whether rdtsc can be reordered with later loads, | |
191 | * but no one has ever seen it happen. | |
192 | */ | |
193 | rdtsc_barrier(); | |
7a59ed41 | 194 | ret = (cycle_t)__native_read_tsc(); |
a939e817 | 195 | |
af8c93d8 | 196 | last = gtod->clock.cycle_last; |
a939e817 | 197 | |
411f790c SS |
198 | if (likely(ret >= last)) |
199 | return ret; | |
200 | ||
201 | /* | |
202 | * GCC likes to generate cmov here, but this branch is extremely | |
203 | * predictable (it's just a funciton of time and the likely is | |
204 | * very likely) and there's a data dependence, so force GCC | |
205 | * to generate a branch instead. I don't barrier() because | |
206 | * we don't actually need a barrier, and if this function | |
207 | * ever gets inlined it will generate worse code. | |
208 | */ | |
209 | asm volatile (""); | |
210 | return last; | |
211 | } | |
a939e817 | 212 | |
51c19b4f | 213 | notrace static inline u64 vgetsns(int *mode) |
2aae950b | 214 | { |
7a59ed41 | 215 | u64 v; |
98d0ac38 AL |
216 | cycles_t cycles; |
217 | if (gtod->clock.vclock_mode == VCLOCK_TSC) | |
218 | cycles = vread_tsc(); | |
7a59ed41 | 219 | #ifdef CONFIG_HPET_TIMER |
a939e817 | 220 | else if (gtod->clock.vclock_mode == VCLOCK_HPET) |
98d0ac38 | 221 | cycles = vread_hpet(); |
7a59ed41 | 222 | #endif |
51c19b4f MT |
223 | #ifdef CONFIG_PARAVIRT_CLOCK |
224 | else if (gtod->clock.vclock_mode == VCLOCK_PVCLOCK) | |
225 | cycles = vread_pvclock(mode); | |
226 | #endif | |
a939e817 JS |
227 | else |
228 | return 0; | |
98d0ac38 | 229 | v = (cycles - gtod->clock.cycle_last) & gtod->clock.mask; |
650ea024 | 230 | return v * gtod->clock.mult; |
2aae950b AK |
231 | } |
232 | ||
5f293474 AL |
233 | /* Code size doesn't matter (vdso is 4k anyway) and this is faster. */ |
234 | notrace static int __always_inline do_realtime(struct timespec *ts) | |
2aae950b | 235 | { |
650ea024 JS |
236 | unsigned long seq; |
237 | u64 ns; | |
a939e817 JS |
238 | int mode; |
239 | ||
650ea024 | 240 | ts->tv_nsec = 0; |
2aae950b | 241 | do { |
0c3351d4 | 242 | seq = raw_read_seqcount_begin(>od->seq); |
a939e817 | 243 | mode = gtod->clock.vclock_mode; |
2aae950b | 244 | ts->tv_sec = gtod->wall_time_sec; |
650ea024 | 245 | ns = gtod->wall_time_snsec; |
51c19b4f | 246 | ns += vgetsns(&mode); |
650ea024 | 247 | ns >>= gtod->clock.shift; |
2ab51657 | 248 | } while (unlikely(read_seqcount_retry(>od->seq, seq))); |
a939e817 | 249 | |
2aae950b | 250 | timespec_add_ns(ts, ns); |
a939e817 | 251 | return mode; |
2aae950b AK |
252 | } |
253 | ||
7a59ed41 | 254 | notrace static int __always_inline do_monotonic(struct timespec *ts) |
2aae950b | 255 | { |
650ea024 JS |
256 | unsigned long seq; |
257 | u64 ns; | |
a939e817 JS |
258 | int mode; |
259 | ||
650ea024 | 260 | ts->tv_nsec = 0; |
2aae950b | 261 | do { |
0c3351d4 | 262 | seq = raw_read_seqcount_begin(>od->seq); |
a939e817 | 263 | mode = gtod->clock.vclock_mode; |
91ec87d5 | 264 | ts->tv_sec = gtod->monotonic_time_sec; |
650ea024 | 265 | ns = gtod->monotonic_time_snsec; |
51c19b4f | 266 | ns += vgetsns(&mode); |
650ea024 | 267 | ns >>= gtod->clock.shift; |
2ab51657 | 268 | } while (unlikely(read_seqcount_retry(>od->seq, seq))); |
91ec87d5 | 269 | timespec_add_ns(ts, ns); |
0f51f285 | 270 | |
a939e817 | 271 | return mode; |
2aae950b AK |
272 | } |
273 | ||
ce39c640 | 274 | notrace static void do_realtime_coarse(struct timespec *ts) |
da15cfda | 275 | { |
276 | unsigned long seq; | |
277 | do { | |
0c3351d4 | 278 | seq = raw_read_seqcount_begin(>od->seq); |
da15cfda | 279 | ts->tv_sec = gtod->wall_time_coarse.tv_sec; |
280 | ts->tv_nsec = gtod->wall_time_coarse.tv_nsec; | |
2ab51657 | 281 | } while (unlikely(read_seqcount_retry(>od->seq, seq))); |
da15cfda | 282 | } |
283 | ||
ce39c640 | 284 | notrace static void do_monotonic_coarse(struct timespec *ts) |
da15cfda | 285 | { |
91ec87d5 | 286 | unsigned long seq; |
da15cfda | 287 | do { |
0c3351d4 | 288 | seq = raw_read_seqcount_begin(>od->seq); |
91ec87d5 AL |
289 | ts->tv_sec = gtod->monotonic_time_coarse.tv_sec; |
290 | ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec; | |
2ab51657 | 291 | } while (unlikely(read_seqcount_retry(>od->seq, seq))); |
da15cfda | 292 | } |
293 | ||
23adec55 | 294 | notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts) |
2aae950b | 295 | { |
0d7b8547 AL |
296 | switch (clock) { |
297 | case CLOCK_REALTIME: | |
ce39c640 SS |
298 | if (do_realtime(ts) == VCLOCK_NONE) |
299 | goto fallback; | |
0d7b8547 AL |
300 | break; |
301 | case CLOCK_MONOTONIC: | |
ce39c640 SS |
302 | if (do_monotonic(ts) == VCLOCK_NONE) |
303 | goto fallback; | |
0d7b8547 AL |
304 | break; |
305 | case CLOCK_REALTIME_COARSE: | |
ce39c640 SS |
306 | do_realtime_coarse(ts); |
307 | break; | |
0d7b8547 | 308 | case CLOCK_MONOTONIC_COARSE: |
ce39c640 SS |
309 | do_monotonic_coarse(ts); |
310 | break; | |
311 | default: | |
312 | goto fallback; | |
0d7b8547 AL |
313 | } |
314 | ||
a939e817 | 315 | return 0; |
ce39c640 SS |
316 | fallback: |
317 | return vdso_fallback_gettime(clock, ts); | |
2aae950b AK |
318 | } |
319 | int clock_gettime(clockid_t, struct timespec *) | |
320 | __attribute__((weak, alias("__vdso_clock_gettime"))); | |
321 | ||
23adec55 | 322 | notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) |
2aae950b | 323 | { |
a939e817 JS |
324 | if (likely(tv != NULL)) { |
325 | BUILD_BUG_ON(offsetof(struct timeval, tv_usec) != | |
326 | offsetof(struct timespec, tv_nsec) || | |
327 | sizeof(*tv) != sizeof(struct timespec)); | |
0df1ea2b SS |
328 | if (unlikely(do_realtime((struct timespec *)tv) == VCLOCK_NONE)) |
329 | return vdso_fallback_gtod(tv, tz); | |
a939e817 | 330 | tv->tv_usec /= 1000; |
2aae950b | 331 | } |
a939e817 JS |
332 | if (unlikely(tz != NULL)) { |
333 | /* Avoid memcpy. Some old compilers fail to inline it */ | |
334 | tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest; | |
335 | tz->tz_dsttime = gtod->sys_tz.tz_dsttime; | |
336 | } | |
337 | ||
a939e817 | 338 | return 0; |
2aae950b AK |
339 | } |
340 | int gettimeofday(struct timeval *, struct timezone *) | |
341 | __attribute__((weak, alias("__vdso_gettimeofday"))); | |
f144a6b4 | 342 | |
0d7b8547 AL |
343 | /* |
344 | * This will break when the xtime seconds get inaccurate, but that is | |
345 | * unlikely | |
346 | */ | |
f144a6b4 AL |
347 | notrace time_t __vdso_time(time_t *t) |
348 | { | |
7a59ed41 | 349 | /* This is atomic on x86 so we don't need any locks. */ |
af8c93d8 | 350 | time_t result = ACCESS_ONCE(gtod->wall_time_sec); |
f144a6b4 AL |
351 | |
352 | if (t) | |
353 | *t = result; | |
354 | return result; | |
355 | } | |
356 | int time(time_t *t) | |
357 | __attribute__((weak, alias("__vdso_time"))); |