| 1 | /* |
| 2 | * sched_clock for unstable cpu clocks |
| 3 | * |
| 4 | * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
| 5 | * |
| 6 | * Updates and enhancements: |
| 7 | * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com> |
| 8 | * |
| 9 | * Based on code by: |
| 10 | * Ingo Molnar <mingo@redhat.com> |
| 11 | * Guillaume Chazarain <guichaz@gmail.com> |
| 12 | * |
| 13 | * Create a semi stable clock from a mixture of other events, including: |
| 14 | * - gtod |
| 15 | * - sched_clock() |
| 16 | * - explicit idle events |
| 17 | * |
| 18 | * We use gtod as base and the unstable clock deltas. The deltas are filtered, |
| 19 | * making it monotonic and keeping it within an expected window. |
| 20 | * |
| 21 | * Furthermore, explicit sleep and wakeup hooks allow us to account for time |
| 22 | * that is otherwise invisible (TSC gets stopped). |
| 23 | * |
| 24 | * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat |
| 25 | * consistent between cpus (never more than 2 jiffies difference). |
| 26 | */ |
| 27 | #include <linux/spinlock.h> |
| 28 | #include <linux/hardirq.h> |
| 29 | #include <linux/module.h> |
| 30 | #include <linux/percpu.h> |
| 31 | #include <linux/ktime.h> |
| 32 | #include <linux/sched.h> |
| 33 | |
| 34 | /* |
| 35 | * Scheduler clock - returns current time in nanosec units. |
| 36 | * This is default implementation. |
| 37 | * Architectures and sub-architectures can override this. |
| 38 | */ |
| 39 | unsigned long long __attribute__((weak)) sched_clock(void) |
| 40 | { |
| 41 | return (unsigned long long)(jiffies - INITIAL_JIFFIES) |
| 42 | * (NSEC_PER_SEC / HZ); |
| 43 | } |
| 44 | |
| 45 | static __read_mostly int sched_clock_running; |
| 46 | |
| 47 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK |
| 48 | __read_mostly int sched_clock_stable; |
| 49 | |
| 50 | struct sched_clock_data { |
| 51 | u64 tick_raw; |
| 52 | u64 tick_gtod; |
| 53 | u64 clock; |
| 54 | }; |
| 55 | |
| 56 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); |
| 57 | |
| 58 | static inline struct sched_clock_data *this_scd(void) |
| 59 | { |
| 60 | return &__get_cpu_var(sched_clock_data); |
| 61 | } |
| 62 | |
| 63 | static inline struct sched_clock_data *cpu_sdc(int cpu) |
| 64 | { |
| 65 | return &per_cpu(sched_clock_data, cpu); |
| 66 | } |
| 67 | |
| 68 | void sched_clock_init(void) |
| 69 | { |
| 70 | u64 ktime_now = ktime_to_ns(ktime_get()); |
| 71 | int cpu; |
| 72 | |
| 73 | for_each_possible_cpu(cpu) { |
| 74 | struct sched_clock_data *scd = cpu_sdc(cpu); |
| 75 | |
| 76 | scd->tick_raw = 0; |
| 77 | scd->tick_gtod = ktime_now; |
| 78 | scd->clock = ktime_now; |
| 79 | } |
| 80 | |
| 81 | sched_clock_running = 1; |
| 82 | } |
| 83 | |
| 84 | /* |
| 85 | * min, max except they take wrapping into account |
| 86 | */ |
| 87 | |
| 88 | static inline u64 wrap_min(u64 x, u64 y) |
| 89 | { |
| 90 | return (s64)(x - y) < 0 ? x : y; |
| 91 | } |
| 92 | |
| 93 | static inline u64 wrap_max(u64 x, u64 y) |
| 94 | { |
| 95 | return (s64)(x - y) > 0 ? x : y; |
| 96 | } |
| 97 | |
| 98 | /* |
| 99 | * update the percpu scd from the raw @now value |
| 100 | * |
| 101 | * - filter out backward motion |
| 102 | * - use the GTOD tick value to create a window to filter crazy TSC values |
| 103 | */ |
| 104 | static u64 sched_clock_local(struct sched_clock_data *scd) |
| 105 | { |
| 106 | u64 now, clock, old_clock, min_clock, max_clock; |
| 107 | s64 delta; |
| 108 | |
| 109 | again: |
| 110 | now = sched_clock(); |
| 111 | delta = now - scd->tick_raw; |
| 112 | if (unlikely(delta < 0)) |
| 113 | delta = 0; |
| 114 | |
| 115 | old_clock = scd->clock; |
| 116 | |
| 117 | /* |
| 118 | * scd->clock = clamp(scd->tick_gtod + delta, |
| 119 | * max(scd->tick_gtod, scd->clock), |
| 120 | * scd->tick_gtod + TICK_NSEC); |
| 121 | */ |
| 122 | |
| 123 | clock = scd->tick_gtod + delta; |
| 124 | min_clock = wrap_max(scd->tick_gtod, old_clock); |
| 125 | max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC); |
| 126 | |
| 127 | clock = wrap_max(clock, min_clock); |
| 128 | clock = wrap_min(clock, max_clock); |
| 129 | |
| 130 | if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock) |
| 131 | goto again; |
| 132 | |
| 133 | return clock; |
| 134 | } |
| 135 | |
| 136 | static u64 sched_clock_remote(struct sched_clock_data *scd) |
| 137 | { |
| 138 | struct sched_clock_data *my_scd = this_scd(); |
| 139 | u64 this_clock, remote_clock; |
| 140 | u64 *ptr, old_val, val; |
| 141 | |
| 142 | sched_clock_local(my_scd); |
| 143 | again: |
| 144 | this_clock = my_scd->clock; |
| 145 | remote_clock = scd->clock; |
| 146 | |
| 147 | /* |
| 148 | * Use the opportunity that we have both locks |
| 149 | * taken to couple the two clocks: we take the |
| 150 | * larger time as the latest time for both |
| 151 | * runqueues. (this creates monotonic movement) |
| 152 | */ |
| 153 | if (likely((s64)(remote_clock - this_clock) < 0)) { |
| 154 | ptr = &scd->clock; |
| 155 | old_val = remote_clock; |
| 156 | val = this_clock; |
| 157 | } else { |
| 158 | /* |
| 159 | * Should be rare, but possible: |
| 160 | */ |
| 161 | ptr = &my_scd->clock; |
| 162 | old_val = this_clock; |
| 163 | val = remote_clock; |
| 164 | } |
| 165 | |
| 166 | if (cmpxchg64(ptr, old_val, val) != old_val) |
| 167 | goto again; |
| 168 | |
| 169 | return val; |
| 170 | } |
| 171 | |
| 172 | u64 sched_clock_cpu(int cpu) |
| 173 | { |
| 174 | struct sched_clock_data *scd; |
| 175 | u64 clock; |
| 176 | |
| 177 | WARN_ON_ONCE(!irqs_disabled()); |
| 178 | |
| 179 | if (sched_clock_stable) |
| 180 | return sched_clock(); |
| 181 | |
| 182 | if (unlikely(!sched_clock_running)) |
| 183 | return 0ull; |
| 184 | |
| 185 | scd = cpu_sdc(cpu); |
| 186 | |
| 187 | if (cpu != smp_processor_id()) |
| 188 | clock = sched_clock_remote(scd); |
| 189 | else |
| 190 | clock = sched_clock_local(scd); |
| 191 | |
| 192 | return clock; |
| 193 | } |
| 194 | |
| 195 | void sched_clock_tick(void) |
| 196 | { |
| 197 | struct sched_clock_data *scd; |
| 198 | u64 now, now_gtod; |
| 199 | |
| 200 | if (sched_clock_stable) |
| 201 | return; |
| 202 | |
| 203 | if (unlikely(!sched_clock_running)) |
| 204 | return; |
| 205 | |
| 206 | WARN_ON_ONCE(!irqs_disabled()); |
| 207 | |
| 208 | scd = this_scd(); |
| 209 | now_gtod = ktime_to_ns(ktime_get()); |
| 210 | now = sched_clock(); |
| 211 | |
| 212 | scd->tick_raw = now; |
| 213 | scd->tick_gtod = now_gtod; |
| 214 | sched_clock_local(scd); |
| 215 | } |
| 216 | |
| 217 | /* |
| 218 | * We are going deep-idle (irqs are disabled): |
| 219 | */ |
| 220 | void sched_clock_idle_sleep_event(void) |
| 221 | { |
| 222 | sched_clock_cpu(smp_processor_id()); |
| 223 | } |
| 224 | EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event); |
| 225 | |
| 226 | /* |
| 227 | * We just idled delta nanoseconds (called with irqs disabled): |
| 228 | */ |
| 229 | void sched_clock_idle_wakeup_event(u64 delta_ns) |
| 230 | { |
| 231 | if (timekeeping_suspended) |
| 232 | return; |
| 233 | |
| 234 | sched_clock_tick(); |
| 235 | touch_softlockup_watchdog(); |
| 236 | } |
| 237 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); |
| 238 | |
| 239 | #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ |
| 240 | |
| 241 | void sched_clock_init(void) |
| 242 | { |
| 243 | sched_clock_running = 1; |
| 244 | } |
| 245 | |
| 246 | u64 sched_clock_cpu(int cpu) |
| 247 | { |
| 248 | if (unlikely(!sched_clock_running)) |
| 249 | return 0; |
| 250 | |
| 251 | return sched_clock(); |
| 252 | } |
| 253 | |
| 254 | #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ |
| 255 | |
| 256 | unsigned long long cpu_clock(int cpu) |
| 257 | { |
| 258 | unsigned long long clock; |
| 259 | unsigned long flags; |
| 260 | |
| 261 | local_irq_save(flags); |
| 262 | clock = sched_clock_cpu(cpu); |
| 263 | local_irq_restore(flags); |
| 264 | |
| 265 | return clock; |
| 266 | } |
| 267 | EXPORT_SYMBOL_GPL(cpu_clock); |