Commit | Line | Data |
---|---|---|
3e51f33f PZ |
1 | /* |
2 | * sched_clock for unstable cpu clocks | |
3 | * | |
4 | * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | |
5 | * | |
c300ba25 SR |
6 | * Updates and enhancements: |
7 | * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com> | |
8 | * | |
3e51f33f PZ |
9 | * Based on code by: |
10 | * Ingo Molnar <mingo@redhat.com> | |
11 | * Guillaume Chazarain <guichaz@gmail.com> | |
12 | * | |
13 | * Create a semi stable clock from a mixture of other events, including: | |
14 | * - gtod | |
15 | * - jiffies | |
16 | * - sched_clock() | |
17 | * - explicit idle events | |
18 | * | |
19 | * We use gtod as base and the unstable clock deltas. The deltas are filtered, | |
20 | * making it monotonic and keeping it within an expected window. This window | |
21 | * is set up using jiffies. | |
22 | * | |
23 | * Furthermore, explicit sleep and wakeup hooks allow us to account for time | |
24 | * that is otherwise invisible (TSC gets stopped). | |
25 | * | |
26 | * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat | |
27 | * consistent between cpus (never more than 1 jiffies difference). | |
28 | */ | |
29 | #include <linux/sched.h> | |
30 | #include <linux/percpu.h> | |
31 | #include <linux/spinlock.h> | |
32 | #include <linux/ktime.h> | |
33 | #include <linux/module.h> | |
34 | ||
35 | ||
36 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | |
37 | ||
c300ba25 SR |
38 | #define MULTI_SHIFT 15 |
39 | /* Max is double, Min is 1/2 */ | |
40 | #define MAX_MULTI (2LL << MULTI_SHIFT) | |
41 | #define MIN_MULTI (1LL << (MULTI_SHIFT-1)) | |
42 | ||
3e51f33f PZ |
43 | struct sched_clock_data { |
44 | /* | |
45 | * Raw spinlock - this is a special case: this might be called | |
46 | * from within instrumentation code so we dont want to do any | |
47 | * instrumentation ourselves. | |
48 | */ | |
49 | raw_spinlock_t lock; | |
50 | ||
62c43dd9 | 51 | unsigned long tick_jiffies; |
3e51f33f PZ |
52 | u64 prev_raw; |
53 | u64 tick_raw; | |
54 | u64 tick_gtod; | |
55 | u64 clock; | |
c300ba25 | 56 | s64 multi; |
af52a90a SR |
57 | #ifdef CONFIG_NO_HZ |
58 | int check_max; | |
59 | #endif | |
3e51f33f PZ |
60 | }; |
61 | ||
62 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); | |
63 | ||
64 | static inline struct sched_clock_data *this_scd(void) | |
65 | { | |
66 | return &__get_cpu_var(sched_clock_data); | |
67 | } | |
68 | ||
69 | static inline struct sched_clock_data *cpu_sdc(int cpu) | |
70 | { | |
71 | return &per_cpu(sched_clock_data, cpu); | |
72 | } | |
73 | ||
a381759d PZ |
74 | static __read_mostly int sched_clock_running; |
75 | ||
3e51f33f PZ |
76 | void sched_clock_init(void) |
77 | { | |
78 | u64 ktime_now = ktime_to_ns(ktime_get()); | |
a381759d | 79 | unsigned long now_jiffies = jiffies; |
3e51f33f PZ |
80 | int cpu; |
81 | ||
82 | for_each_possible_cpu(cpu) { | |
83 | struct sched_clock_data *scd = cpu_sdc(cpu); | |
84 | ||
85 | scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | |
62c43dd9 | 86 | scd->tick_jiffies = now_jiffies; |
a381759d PZ |
87 | scd->prev_raw = 0; |
88 | scd->tick_raw = 0; | |
3e51f33f PZ |
89 | scd->tick_gtod = ktime_now; |
90 | scd->clock = ktime_now; | |
c300ba25 | 91 | scd->multi = 1 << MULTI_SHIFT; |
af52a90a SR |
92 | #ifdef CONFIG_NO_HZ |
93 | scd->check_max = 1; | |
94 | #endif | |
3e51f33f | 95 | } |
a381759d PZ |
96 | |
97 | sched_clock_running = 1; | |
3e51f33f PZ |
98 | } |
99 | ||
af52a90a SR |
100 | #ifdef CONFIG_NO_HZ |
101 | /* | |
102 | * The dynamic ticks makes the delta jiffies inaccurate. This | |
103 | * prevents us from checking the maximum time update. | |
104 | * Disable the maximum check during stopped ticks. | |
105 | */ | |
106 | void sched_clock_tick_stop(int cpu) | |
107 | { | |
108 | struct sched_clock_data *scd = cpu_sdc(cpu); | |
109 | ||
110 | scd->check_max = 0; | |
111 | } | |
112 | ||
113 | void sched_clock_tick_start(int cpu) | |
114 | { | |
115 | struct sched_clock_data *scd = cpu_sdc(cpu); | |
116 | ||
117 | scd->check_max = 1; | |
118 | } | |
119 | ||
120 | static int check_max(struct sched_clock_data *scd) | |
121 | { | |
122 | return scd->check_max; | |
123 | } | |
124 | #else | |
125 | static int check_max(struct sched_clock_data *scd) | |
126 | { | |
127 | return 1; | |
128 | } | |
129 | #endif /* CONFIG_NO_HZ */ | |
130 | ||
3e51f33f PZ |
131 | /* |
132 | * update the percpu scd from the raw @now value | |
133 | * | |
134 | * - filter out backward motion | |
135 | * - use jiffies to generate a min,max window to clip the raw values | |
136 | */ | |
c0c87734 | 137 | static void __update_sched_clock(struct sched_clock_data *scd, u64 now, u64 *time) |
3e51f33f PZ |
138 | { |
139 | unsigned long now_jiffies = jiffies; | |
62c43dd9 | 140 | long delta_jiffies = now_jiffies - scd->tick_jiffies; |
3e51f33f PZ |
141 | u64 clock = scd->clock; |
142 | u64 min_clock, max_clock; | |
143 | s64 delta = now - scd->prev_raw; | |
144 | ||
145 | WARN_ON_ONCE(!irqs_disabled()); | |
f7cce27f | 146 | |
c300ba25 SR |
147 | /* |
148 | * At schedule tick the clock can be just under the gtod. We don't | |
149 | * want to push it too prematurely. | |
150 | */ | |
151 | min_clock = scd->tick_gtod + (delta_jiffies * TICK_NSEC); | |
152 | if (min_clock > TICK_NSEC) | |
153 | min_clock -= TICK_NSEC / 2; | |
3e51f33f PZ |
154 | |
155 | if (unlikely(delta < 0)) { | |
156 | clock++; | |
157 | goto out; | |
158 | } | |
159 | ||
f7cce27f SR |
160 | /* |
161 | * The clock must stay within a jiffie of the gtod. | |
162 | * But since we may be at the start of a jiffy or the end of one | |
163 | * we add another jiffy buffer. | |
164 | */ | |
165 | max_clock = scd->tick_gtod + (2 + delta_jiffies) * TICK_NSEC; | |
3e51f33f | 166 | |
c300ba25 SR |
167 | delta *= scd->multi; |
168 | delta >>= MULTI_SHIFT; | |
3e51f33f | 169 | |
af52a90a | 170 | if (unlikely(clock + delta > max_clock) && check_max(scd)) { |
3e51f33f PZ |
171 | if (clock < max_clock) |
172 | clock = max_clock; | |
173 | else | |
174 | clock++; | |
175 | } else { | |
176 | clock += delta; | |
177 | } | |
178 | ||
179 | out: | |
180 | if (unlikely(clock < min_clock)) | |
181 | clock = min_clock; | |
182 | ||
c0c87734 SR |
183 | if (time) |
184 | *time = clock; | |
185 | else { | |
186 | scd->prev_raw = now; | |
187 | scd->clock = clock; | |
188 | } | |
3e51f33f PZ |
189 | } |
190 | ||
191 | static void lock_double_clock(struct sched_clock_data *data1, | |
192 | struct sched_clock_data *data2) | |
193 | { | |
194 | if (data1 < data2) { | |
195 | __raw_spin_lock(&data1->lock); | |
196 | __raw_spin_lock(&data2->lock); | |
197 | } else { | |
198 | __raw_spin_lock(&data2->lock); | |
199 | __raw_spin_lock(&data1->lock); | |
200 | } | |
201 | } | |
202 | ||
203 | u64 sched_clock_cpu(int cpu) | |
204 | { | |
205 | struct sched_clock_data *scd = cpu_sdc(cpu); | |
206 | u64 now, clock; | |
207 | ||
a381759d PZ |
208 | if (unlikely(!sched_clock_running)) |
209 | return 0ull; | |
210 | ||
3e51f33f PZ |
211 | WARN_ON_ONCE(!irqs_disabled()); |
212 | now = sched_clock(); | |
213 | ||
214 | if (cpu != raw_smp_processor_id()) { | |
215 | /* | |
216 | * in order to update a remote cpu's clock based on our | |
217 | * unstable raw time rebase it against: | |
218 | * tick_raw (offset between raw counters) | |
219 | * tick_gotd (tick offset between cpus) | |
220 | */ | |
221 | struct sched_clock_data *my_scd = this_scd(); | |
222 | ||
223 | lock_double_clock(scd, my_scd); | |
224 | ||
225 | now -= my_scd->tick_raw; | |
226 | now += scd->tick_raw; | |
227 | ||
2b8a0cf4 SR |
228 | now += my_scd->tick_gtod; |
229 | now -= scd->tick_gtod; | |
3e51f33f PZ |
230 | |
231 | __raw_spin_unlock(&my_scd->lock); | |
c0c87734 SR |
232 | |
233 | __update_sched_clock(scd, now, &clock); | |
234 | ||
235 | __raw_spin_unlock(&scd->lock); | |
236 | ||
3e51f33f PZ |
237 | } else { |
238 | __raw_spin_lock(&scd->lock); | |
c0c87734 SR |
239 | __update_sched_clock(scd, now, NULL); |
240 | clock = scd->clock; | |
241 | __raw_spin_unlock(&scd->lock); | |
3e51f33f PZ |
242 | } |
243 | ||
3e51f33f PZ |
244 | return clock; |
245 | } | |
246 | ||
247 | void sched_clock_tick(void) | |
248 | { | |
249 | struct sched_clock_data *scd = this_scd(); | |
62c43dd9 | 250 | unsigned long now_jiffies = jiffies; |
c300ba25 | 251 | s64 mult, delta_gtod, delta_raw; |
3e51f33f PZ |
252 | u64 now, now_gtod; |
253 | ||
a381759d PZ |
254 | if (unlikely(!sched_clock_running)) |
255 | return; | |
256 | ||
3e51f33f PZ |
257 | WARN_ON_ONCE(!irqs_disabled()); |
258 | ||
3e51f33f | 259 | now_gtod = ktime_to_ns(ktime_get()); |
a83bc47c | 260 | now = sched_clock(); |
3e51f33f PZ |
261 | |
262 | __raw_spin_lock(&scd->lock); | |
c0c87734 | 263 | __update_sched_clock(scd, now, NULL); |
3e51f33f PZ |
264 | /* |
265 | * update tick_gtod after __update_sched_clock() because that will | |
266 | * already observe 1 new jiffy; adding a new tick_gtod to that would | |
267 | * increase the clock 2 jiffies. | |
268 | */ | |
c300ba25 SR |
269 | delta_gtod = now_gtod - scd->tick_gtod; |
270 | delta_raw = now - scd->tick_raw; | |
271 | ||
272 | if ((long)delta_raw > 0) { | |
273 | mult = delta_gtod << MULTI_SHIFT; | |
274 | do_div(mult, delta_raw); | |
275 | scd->multi = mult; | |
276 | if (scd->multi > MAX_MULTI) | |
277 | scd->multi = MAX_MULTI; | |
278 | else if (scd->multi < MIN_MULTI) | |
279 | scd->multi = MIN_MULTI; | |
280 | } else | |
281 | scd->multi = 1 << MULTI_SHIFT; | |
282 | ||
3e51f33f PZ |
283 | scd->tick_raw = now; |
284 | scd->tick_gtod = now_gtod; | |
c300ba25 | 285 | scd->tick_jiffies = now_jiffies; |
3e51f33f PZ |
286 | __raw_spin_unlock(&scd->lock); |
287 | } | |
288 | ||
289 | /* | |
290 | * We are going deep-idle (irqs are disabled): | |
291 | */ | |
292 | void sched_clock_idle_sleep_event(void) | |
293 | { | |
294 | sched_clock_cpu(smp_processor_id()); | |
295 | } | |
296 | EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event); | |
297 | ||
298 | /* | |
299 | * We just idled delta nanoseconds (called with irqs disabled): | |
300 | */ | |
301 | void sched_clock_idle_wakeup_event(u64 delta_ns) | |
302 | { | |
303 | struct sched_clock_data *scd = this_scd(); | |
304 | u64 now = sched_clock(); | |
305 | ||
306 | /* | |
307 | * Override the previous timestamp and ignore all | |
308 | * sched_clock() deltas that occured while we idled, | |
309 | * and use the PM-provided delta_ns to advance the | |
310 | * rq clock: | |
311 | */ | |
312 | __raw_spin_lock(&scd->lock); | |
313 | scd->prev_raw = now; | |
314 | scd->clock += delta_ns; | |
c300ba25 | 315 | scd->multi = 1 << MULTI_SHIFT; |
3e51f33f PZ |
316 | __raw_spin_unlock(&scd->lock); |
317 | ||
318 | touch_softlockup_watchdog(); | |
319 | } | |
320 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); | |
321 | ||
322 | #endif | |
323 | ||
324 | /* | |
325 | * Scheduler clock - returns current time in nanosec units. | |
326 | * This is default implementation. | |
327 | * Architectures and sub-architectures can override this. | |
328 | */ | |
329 | unsigned long long __attribute__((weak)) sched_clock(void) | |
330 | { | |
331 | return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); | |
332 | } | |
76a2a6ee PZ |
333 | |
334 | unsigned long long cpu_clock(int cpu) | |
335 | { | |
336 | unsigned long long clock; | |
337 | unsigned long flags; | |
338 | ||
2d452c9b | 339 | local_irq_save(flags); |
76a2a6ee | 340 | clock = sched_clock_cpu(cpu); |
2d452c9b | 341 | local_irq_restore(flags); |
76a2a6ee PZ |
342 | |
343 | return clock; | |
344 | } | |
4c9fe8ad | 345 | EXPORT_SYMBOL_GPL(cpu_clock); |