Commit | Line | Data |
---|---|---|
35728b82 | 1 | // SPDX-License-Identifier: GPL-2.0 |
79bf2bb3 | 2 | /* |
79bf2bb3 TG |
3 | * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> |
4 | * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar | |
5 | * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner | |
6 | * | |
7 | * No idle tick implementation for low and high resolution timers | |
8 | * | |
9 | * Started by: Thomas Gleixner and Ingo Molnar | |
79bf2bb3 TG |
10 | */ |
11 | #include <linux/cpu.h> | |
12 | #include <linux/err.h> | |
13 | #include <linux/hrtimer.h> | |
14 | #include <linux/interrupt.h> | |
15 | #include <linux/kernel_stat.h> | |
16 | #include <linux/percpu.h> | |
38b8d208 | 17 | #include <linux/nmi.h> |
79bf2bb3 | 18 | #include <linux/profile.h> |
3f07c014 | 19 | #include <linux/sched/signal.h> |
e6017571 | 20 | #include <linux/sched/clock.h> |
03441a34 | 21 | #include <linux/sched/stat.h> |
370c9135 | 22 | #include <linux/sched/nohz.h> |
896b969e | 23 | #include <linux/sched/loadavg.h> |
8083e4ad | 24 | #include <linux/module.h> |
00b42959 | 25 | #include <linux/irq_work.h> |
9014c45d | 26 | #include <linux/posix-timers.h> |
2e709338 | 27 | #include <linux/context_tracking.h> |
62cb1188 | 28 | #include <linux/mm.h> |
79bf2bb3 | 29 | |
9e203bcc DM |
30 | #include <asm/irq_regs.h> |
31 | ||
79bf2bb3 TG |
32 | #include "tick-internal.h" |
33 | ||
cb41a290 FW |
34 | #include <trace/events/timer.h> |
35 | ||
79bf2bb3 | 36 | /* |
0de7611a | 37 | * Per-CPU nohz control structure |
79bf2bb3 | 38 | */ |
c1797baf | 39 | static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); |
79bf2bb3 | 40 | |
289f480a IM |
41 | struct tick_sched *tick_get_tick_sched(int cpu) |
42 | { | |
43 | return &per_cpu(tick_cpu_sched, cpu); | |
44 | } | |
45 | ||
7809998a AB |
46 | #if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS) |
47 | /* | |
c398960c TG |
48 | * The time, when the last jiffy update happened. Write access must hold |
49 | * jiffies_lock and jiffies_seq. tick_nohz_next_event() needs to get a | |
50 | * consistent view of jiffies and last_jiffies_update. | |
7809998a AB |
51 | */ |
52 | static ktime_t last_jiffies_update; | |
53 | ||
79bf2bb3 TG |
54 | /* |
55 | * Must be called with interrupts disabled ! | |
56 | */ | |
57 | static void tick_do_update_jiffies64(ktime_t now) | |
58 | { | |
7a35bf2a | 59 | unsigned long ticks = 1; |
aa3b66f4 | 60 | ktime_t delta, nextp; |
79bf2bb3 | 61 | |
7a14ce1d | 62 | /* |
aa3b66f4 TG |
63 | * 64bit can do a quick check without holding jiffies lock and |
64 | * without looking at the sequence count. The smp_load_acquire() | |
372acbba TG |
65 | * pairs with the update done later in this function. |
66 | * | |
aa3b66f4 TG |
67 | * 32bit cannot do that because the store of tick_next_period |
68 | * consists of two 32bit stores and the first store could move it | |
69 | * to a random point in the future. | |
7a14ce1d | 70 | */ |
aa3b66f4 TG |
71 | if (IS_ENABLED(CONFIG_64BIT)) { |
72 | if (ktime_before(now, smp_load_acquire(&tick_next_period))) | |
73 | return; | |
74 | } else { | |
75 | unsigned int seq; | |
76 | ||
77 | /* | |
78 | * Avoid contention on jiffies_lock and protect the quick | |
79 | * check with the sequence count. | |
80 | */ | |
81 | do { | |
82 | seq = read_seqcount_begin(&jiffies_seq); | |
83 | nextp = tick_next_period; | |
84 | } while (read_seqcount_retry(&jiffies_seq, seq)); | |
85 | ||
86 | if (ktime_before(now, nextp)) | |
87 | return; | |
88 | } | |
7a14ce1d | 89 | |
aa3b66f4 | 90 | /* Quick check failed, i.e. update is required. */ |
e5d4d175 | 91 | raw_spin_lock(&jiffies_lock); |
aa3b66f4 TG |
92 | /* |
93 | * Reevaluate with the lock held. Another CPU might have done the | |
94 | * update already. | |
95 | */ | |
94ad2e3c | 96 | if (ktime_before(now, tick_next_period)) { |
e5d4d175 | 97 | raw_spin_unlock(&jiffies_lock); |
03e6bdc5 | 98 | return; |
79bf2bb3 | 99 | } |
94ad2e3c YY |
100 | |
101 | write_seqcount_begin(&jiffies_seq); | |
102 | ||
94ad2e3c | 103 | delta = ktime_sub(now, tick_next_period); |
b9965449 | 104 | if (unlikely(delta >= TICK_NSEC)) { |
94ad2e3c | 105 | /* Slow path for long idle sleep times */ |
b9965449 | 106 | s64 incr = TICK_NSEC; |
94ad2e3c | 107 | |
7a35bf2a | 108 | ticks += ktime_divns(delta, incr); |
94ad2e3c YY |
109 | |
110 | last_jiffies_update = ktime_add_ns(last_jiffies_update, | |
111 | incr * ticks); | |
7a35bf2a | 112 | } else { |
b9965449 TG |
113 | last_jiffies_update = ktime_add_ns(last_jiffies_update, |
114 | TICK_NSEC); | |
94ad2e3c YY |
115 | } |
116 | ||
896b969e YY |
117 | /* Advance jiffies to complete the jiffies_seq protected job */ |
118 | jiffies_64 += ticks; | |
94ad2e3c YY |
119 | |
120 | /* | |
aa3b66f4 | 121 | * Keep the tick_next_period variable up to date. |
94ad2e3c | 122 | */ |
aa3b66f4 TG |
123 | nextp = ktime_add_ns(last_jiffies_update, TICK_NSEC); |
124 | ||
125 | if (IS_ENABLED(CONFIG_64BIT)) { | |
126 | /* | |
127 | * Pairs with smp_load_acquire() in the lockless quick | |
128 | * check above and ensures that the update to jiffies_64 is | |
129 | * not reordered vs. the store to tick_next_period, neither | |
130 | * by the compiler nor by the CPU. | |
131 | */ | |
132 | smp_store_release(&tick_next_period, nextp); | |
133 | } else { | |
134 | /* | |
135 | * A plain store is good enough on 32bit as the quick check | |
136 | * above is protected by the sequence count. | |
137 | */ | |
138 | tick_next_period = nextp; | |
139 | } | |
94ad2e3c | 140 | |
896b969e YY |
141 | /* |
142 | * Release the sequence count. calc_global_load() below is not | |
143 | * protected by it, but jiffies_lock needs to be held to prevent | |
144 | * concurrent invocations. | |
145 | */ | |
e5d4d175 | 146 | write_seqcount_end(&jiffies_seq); |
896b969e YY |
147 | |
148 | calc_global_load(); | |
149 | ||
e5d4d175 | 150 | raw_spin_unlock(&jiffies_lock); |
47a1b796 | 151 | update_wall_time(); |
79bf2bb3 TG |
152 | } |
153 | ||
154 | /* | |
155 | * Initialize and return retrieve the jiffies update. | |
156 | */ | |
157 | static ktime_t tick_init_jiffy_update(void) | |
158 | { | |
159 | ktime_t period; | |
160 | ||
e5d4d175 TG |
161 | raw_spin_lock(&jiffies_lock); |
162 | write_seqcount_begin(&jiffies_seq); | |
79bf2bb3 | 163 | /* Did we start the jiffies update yet ? */ |
13bb06f8 TG |
164 | if (last_jiffies_update == 0) { |
165 | u32 rem; | |
166 | ||
167 | /* | |
168 | * Ensure that the tick is aligned to a multiple of | |
169 | * TICK_NSEC. | |
170 | */ | |
171 | div_u64_rem(tick_next_period, TICK_NSEC, &rem); | |
172 | if (rem) | |
173 | tick_next_period += TICK_NSEC - rem; | |
174 | ||
79bf2bb3 | 175 | last_jiffies_update = tick_next_period; |
13bb06f8 | 176 | } |
79bf2bb3 | 177 | period = last_jiffies_update; |
e5d4d175 TG |
178 | write_seqcount_end(&jiffies_seq); |
179 | raw_spin_unlock(&jiffies_lock); | |
79bf2bb3 TG |
180 | return period; |
181 | } | |
182 | ||
a1ff03cd FW |
183 | #define MAX_STALLED_JIFFIES 5 |
184 | ||
ff7de620 | 185 | static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now) |
5bb96226 FW |
186 | { |
187 | int cpu = smp_processor_id(); | |
188 | ||
3451d024 | 189 | #ifdef CONFIG_NO_HZ_COMMON |
5bb96226 FW |
190 | /* |
191 | * Check if the do_timer duty was dropped. We don't care about | |
0de7611a IM |
192 | * concurrency: This happens only when the CPU in charge went |
193 | * into a long sleep. If two CPUs happen to assign themselves to | |
5bb96226 | 194 | * this duty, then the jiffies update is still serialized by |
9c3f9e28 | 195 | * jiffies_lock. |
08ae95f4 NP |
196 | * |
197 | * If nohz_full is enabled, this should not happen because the | |
198 | * tick_do_timer_cpu never relinquishes. | |
5bb96226 | 199 | */ |
08ae95f4 NP |
200 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) { |
201 | #ifdef CONFIG_NO_HZ_FULL | |
40e97e42 | 202 | WARN_ON_ONCE(tick_nohz_full_running); |
08ae95f4 | 203 | #endif |
5bb96226 | 204 | tick_do_timer_cpu = cpu; |
08ae95f4 | 205 | } |
5bb96226 FW |
206 | #endif |
207 | ||
208 | /* Check, if the jiffies need an update */ | |
209 | if (tick_do_timer_cpu == cpu) | |
210 | tick_do_update_jiffies64(now); | |
ff7de620 | 211 | |
a1ff03cd FW |
212 | /* |
213 | * If jiffies update stalled for too long (timekeeper in stop_machine() | |
214 | * or VMEXIT'ed for several msecs), force an update. | |
215 | */ | |
216 | if (ts->last_tick_jiffies != jiffies) { | |
217 | ts->stalled_jiffies = 0; | |
218 | ts->last_tick_jiffies = READ_ONCE(jiffies); | |
219 | } else { | |
220 | if (++ts->stalled_jiffies == MAX_STALLED_JIFFIES) { | |
221 | tick_do_update_jiffies64(now); | |
222 | ts->stalled_jiffies = 0; | |
223 | ts->last_tick_jiffies = READ_ONCE(jiffies); | |
224 | } | |
225 | } | |
226 | ||
ff7de620 RW |
227 | if (ts->inidle) |
228 | ts->got_idle_tick = 1; | |
5bb96226 FW |
229 | } |
230 | ||
9e8f559b FW |
231 | static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs) |
232 | { | |
3451d024 | 233 | #ifdef CONFIG_NO_HZ_COMMON |
9e8f559b FW |
234 | /* |
235 | * When we are idle and the tick is stopped, we have to touch | |
236 | * the watchdog as we might not schedule for a really long | |
237 | * time. This happens on complete idle SMP systems while | |
238 | * waiting on the login prompt. We also increment the "start of | |
239 | * idle" jiffy stamp so the idle accounting adjustment we do | |
240 | * when we go busy again does not account too much ticks. | |
241 | */ | |
242 | if (ts->tick_stopped) { | |
03e0d461 | 243 | touch_softlockup_watchdog_sched(); |
9e8f559b FW |
244 | if (is_idle_task(current)) |
245 | ts->idle_jiffies++; | |
411fe24e FW |
246 | /* |
247 | * In case the current tick fired too early past its expected | |
248 | * expiration, make sure we don't bypass the next clock reprogramming | |
249 | * to the same deadline. | |
250 | */ | |
251 | ts->next_tick = 0; | |
9e8f559b | 252 | } |
94a57140 | 253 | #endif |
9e8f559b FW |
254 | update_process_times(user_mode(regs)); |
255 | profile_tick(CPU_PROFILING); | |
256 | } | |
7809998a | 257 | #endif |
9e8f559b | 258 | |
c5bfece2 | 259 | #ifdef CONFIG_NO_HZ_FULL |
460775df | 260 | cpumask_var_t tick_nohz_full_mask; |
f268c373 | 261 | EXPORT_SYMBOL_GPL(tick_nohz_full_mask); |
73867dcd | 262 | bool tick_nohz_full_running; |
ae9e557b | 263 | EXPORT_SYMBOL_GPL(tick_nohz_full_running); |
f009a7a7 | 264 | static atomic_t tick_dep_mask; |
a831881b | 265 | |
f009a7a7 | 266 | static bool check_tick_dependency(atomic_t *dep) |
d027d45d | 267 | { |
f009a7a7 FW |
268 | int val = atomic_read(dep); |
269 | ||
270 | if (val & TICK_DEP_MASK_POSIX_TIMER) { | |
e6e6cc22 | 271 | trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER); |
f009a7a7 | 272 | return true; |
d027d45d FW |
273 | } |
274 | ||
f009a7a7 | 275 | if (val & TICK_DEP_MASK_PERF_EVENTS) { |
e6e6cc22 | 276 | trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS); |
f009a7a7 | 277 | return true; |
d027d45d FW |
278 | } |
279 | ||
f009a7a7 | 280 | if (val & TICK_DEP_MASK_SCHED) { |
e6e6cc22 | 281 | trace_tick_stop(0, TICK_DEP_MASK_SCHED); |
f009a7a7 | 282 | return true; |
d027d45d FW |
283 | } |
284 | ||
f009a7a7 | 285 | if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) { |
e6e6cc22 | 286 | trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE); |
f009a7a7 FW |
287 | return true; |
288 | } | |
289 | ||
01b4c399 FW |
290 | if (val & TICK_DEP_MASK_RCU) { |
291 | trace_tick_stop(0, TICK_DEP_MASK_RCU); | |
292 | return true; | |
293 | } | |
294 | ||
db7b464d Z |
295 | if (val & TICK_DEP_MASK_RCU_EXP) { |
296 | trace_tick_stop(0, TICK_DEP_MASK_RCU_EXP); | |
297 | return true; | |
298 | } | |
299 | ||
f009a7a7 | 300 | return false; |
d027d45d FW |
301 | } |
302 | ||
57ccdf44 | 303 | static bool can_stop_full_tick(int cpu, struct tick_sched *ts) |
9014c45d | 304 | { |
ebf3adba | 305 | lockdep_assert_irqs_disabled(); |
9014c45d | 306 | |
57ccdf44 WL |
307 | if (unlikely(!cpu_online(cpu))) |
308 | return false; | |
309 | ||
f009a7a7 | 310 | if (check_tick_dependency(&tick_dep_mask)) |
d027d45d | 311 | return false; |
d027d45d | 312 | |
f009a7a7 | 313 | if (check_tick_dependency(&ts->tick_dep_mask)) |
d027d45d | 314 | return false; |
d027d45d | 315 | |
f009a7a7 | 316 | if (check_tick_dependency(¤t->tick_dep_mask)) |
d027d45d | 317 | return false; |
d027d45d | 318 | |
f009a7a7 | 319 | if (check_tick_dependency(¤t->signal->tick_dep_mask)) |
d027d45d | 320 | return false; |
d027d45d | 321 | |
9014c45d FW |
322 | return true; |
323 | } | |
324 | ||
d027d45d | 325 | static void nohz_full_kick_func(struct irq_work *work) |
76c24fb0 | 326 | { |
73738a95 | 327 | /* Empty, the tick restart happens on tick_nohz_irq_exit() */ |
76c24fb0 FW |
328 | } |
329 | ||
7a9f50a0 PZ |
330 | static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = |
331 | IRQ_WORK_INIT_HARD(nohz_full_kick_func); | |
76c24fb0 | 332 | |
40bea039 FW |
333 | /* |
334 | * Kick this CPU if it's full dynticks in order to force it to | |
335 | * re-evaluate its dependency on the tick and restart it if necessary. | |
336 | * This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(), | |
337 | * is NMI safe. | |
338 | */ | |
555e0c1e | 339 | static void tick_nohz_full_kick(void) |
40bea039 FW |
340 | { |
341 | if (!tick_nohz_full_cpu(smp_processor_id())) | |
342 | return; | |
343 | ||
56e4dea8 | 344 | irq_work_queue(this_cpu_ptr(&nohz_full_kick_work)); |
40bea039 FW |
345 | } |
346 | ||
76c24fb0 | 347 | /* |
3d36aebc | 348 | * Kick the CPU if it's full dynticks in order to force it to |
76c24fb0 FW |
349 | * re-evaluate its dependency on the tick and restart it if necessary. |
350 | */ | |
3d36aebc | 351 | void tick_nohz_full_kick_cpu(int cpu) |
76c24fb0 | 352 | { |
3d36aebc FW |
353 | if (!tick_nohz_full_cpu(cpu)) |
354 | return; | |
355 | ||
356 | irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu); | |
76c24fb0 FW |
357 | } |
358 | ||
29721b85 FW |
359 | static void tick_nohz_kick_task(struct task_struct *tsk) |
360 | { | |
a1dfb631 MT |
361 | int cpu; |
362 | ||
363 | /* | |
364 | * If the task is not running, run_posix_cpu_timers() | |
365 | * has nothing to elapse, IPI can then be spared. | |
366 | * | |
367 | * activate_task() STORE p->tick_dep_mask | |
368 | * STORE p->on_rq | |
369 | * __schedule() (switch to task 'p') smp_mb() (atomic_fetch_or()) | |
370 | * LOCK rq->lock LOAD p->on_rq | |
371 | * smp_mb__after_spin_lock() | |
372 | * tick_nohz_task_switch() | |
373 | * LOAD p->tick_dep_mask | |
374 | */ | |
375 | if (!sched_task_on_rq(tsk)) | |
376 | return; | |
29721b85 FW |
377 | |
378 | /* | |
379 | * If the task concurrently migrates to another CPU, | |
380 | * we guarantee it sees the new tick dependency upon | |
381 | * schedule. | |
382 | * | |
29721b85 FW |
383 | * set_task_cpu(p, cpu); |
384 | * STORE p->cpu = @cpu | |
385 | * __schedule() (switch to task 'p') | |
386 | * LOCK rq->lock | |
387 | * smp_mb__after_spin_lock() STORE p->tick_dep_mask | |
388 | * tick_nohz_task_switch() smp_mb() (atomic_fetch_or()) | |
389 | * LOAD p->tick_dep_mask LOAD p->cpu | |
390 | */ | |
a1dfb631 | 391 | cpu = task_cpu(tsk); |
29721b85 FW |
392 | |
393 | preempt_disable(); | |
394 | if (cpu_online(cpu)) | |
395 | tick_nohz_full_kick_cpu(cpu); | |
396 | preempt_enable(); | |
397 | } | |
398 | ||
76c24fb0 FW |
399 | /* |
400 | * Kick all full dynticks CPUs in order to force these to re-evaluate | |
401 | * their dependency on the tick and restart it if necessary. | |
402 | */ | |
b7878300 | 403 | static void tick_nohz_full_kick_all(void) |
76c24fb0 | 404 | { |
8537bb95 FW |
405 | int cpu; |
406 | ||
73867dcd | 407 | if (!tick_nohz_full_running) |
76c24fb0 FW |
408 | return; |
409 | ||
410 | preempt_disable(); | |
8537bb95 FW |
411 | for_each_cpu_and(cpu, tick_nohz_full_mask, cpu_online_mask) |
412 | tick_nohz_full_kick_cpu(cpu); | |
76c24fb0 FW |
413 | preempt_enable(); |
414 | } | |
415 | ||
f009a7a7 | 416 | static void tick_nohz_dep_set_all(atomic_t *dep, |
d027d45d FW |
417 | enum tick_dep_bits bit) |
418 | { | |
f009a7a7 | 419 | int prev; |
d027d45d | 420 | |
a1cc5bcf | 421 | prev = atomic_fetch_or(BIT(bit), dep); |
d027d45d FW |
422 | if (!prev) |
423 | tick_nohz_full_kick_all(); | |
424 | } | |
425 | ||
426 | /* | |
427 | * Set a global tick dependency. Used by perf events that rely on freq and | |
428 | * by unstable clock. | |
429 | */ | |
430 | void tick_nohz_dep_set(enum tick_dep_bits bit) | |
431 | { | |
432 | tick_nohz_dep_set_all(&tick_dep_mask, bit); | |
433 | } | |
434 | ||
435 | void tick_nohz_dep_clear(enum tick_dep_bits bit) | |
436 | { | |
f009a7a7 | 437 | atomic_andnot(BIT(bit), &tick_dep_mask); |
d027d45d FW |
438 | } |
439 | ||
440 | /* | |
441 | * Set per-CPU tick dependency. Used by scheduler and perf events in order to | |
442 | * manage events throttling. | |
443 | */ | |
444 | void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) | |
445 | { | |
f009a7a7 | 446 | int prev; |
d027d45d FW |
447 | struct tick_sched *ts; |
448 | ||
449 | ts = per_cpu_ptr(&tick_cpu_sched, cpu); | |
450 | ||
a1cc5bcf | 451 | prev = atomic_fetch_or(BIT(bit), &ts->tick_dep_mask); |
d027d45d FW |
452 | if (!prev) { |
453 | preempt_disable(); | |
454 | /* Perf needs local kick that is NMI safe */ | |
455 | if (cpu == smp_processor_id()) { | |
456 | tick_nohz_full_kick(); | |
457 | } else { | |
458 | /* Remote irq work not NMI-safe */ | |
459 | if (!WARN_ON_ONCE(in_nmi())) | |
460 | tick_nohz_full_kick_cpu(cpu); | |
461 | } | |
462 | preempt_enable(); | |
463 | } | |
464 | } | |
01b4c399 | 465 | EXPORT_SYMBOL_GPL(tick_nohz_dep_set_cpu); |
d027d45d FW |
466 | |
467 | void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) | |
468 | { | |
469 | struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu); | |
470 | ||
f009a7a7 | 471 | atomic_andnot(BIT(bit), &ts->tick_dep_mask); |
d027d45d | 472 | } |
01b4c399 | 473 | EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_cpu); |
d027d45d FW |
474 | |
475 | /* | |
3c8920e2 FW |
476 | * Set a per-task tick dependency. RCU need this. Also posix CPU timers |
477 | * in order to elapse per task timers. | |
d027d45d FW |
478 | */ |
479 | void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit) | |
480 | { | |
29721b85 FW |
481 | if (!atomic_fetch_or(BIT(bit), &tsk->tick_dep_mask)) |
482 | tick_nohz_kick_task(tsk); | |
d027d45d | 483 | } |
ae9e557b | 484 | EXPORT_SYMBOL_GPL(tick_nohz_dep_set_task); |
d027d45d FW |
485 | |
486 | void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit) | |
487 | { | |
f009a7a7 | 488 | atomic_andnot(BIT(bit), &tsk->tick_dep_mask); |
d027d45d | 489 | } |
ae9e557b | 490 | EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_task); |
d027d45d FW |
491 | |
492 | /* | |
493 | * Set a per-taskgroup tick dependency. Posix CPU timers need this in order to elapse | |
494 | * per process timers. | |
495 | */ | |
1e4ca26d MT |
496 | void tick_nohz_dep_set_signal(struct task_struct *tsk, |
497 | enum tick_dep_bits bit) | |
d027d45d | 498 | { |
1e4ca26d MT |
499 | int prev; |
500 | struct signal_struct *sig = tsk->signal; | |
501 | ||
502 | prev = atomic_fetch_or(BIT(bit), &sig->tick_dep_mask); | |
503 | if (!prev) { | |
504 | struct task_struct *t; | |
505 | ||
506 | lockdep_assert_held(&tsk->sighand->siglock); | |
507 | __for_each_thread(sig, t) | |
508 | tick_nohz_kick_task(t); | |
509 | } | |
d027d45d FW |
510 | } |
511 | ||
512 | void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit) | |
513 | { | |
f009a7a7 | 514 | atomic_andnot(BIT(bit), &sig->tick_dep_mask); |
d027d45d FW |
515 | } |
516 | ||
99e5ada9 FW |
517 | /* |
518 | * Re-evaluate the need for the tick as we switch the current task. | |
519 | * It might need the tick due to per task/process properties: | |
0de7611a | 520 | * perf events, posix CPU timers, ... |
99e5ada9 | 521 | */ |
de734f89 | 522 | void __tick_nohz_task_switch(void) |
99e5ada9 | 523 | { |
d027d45d | 524 | struct tick_sched *ts; |
99e5ada9 | 525 | |
6296ace4 | 526 | if (!tick_nohz_full_cpu(smp_processor_id())) |
0fdcccfa | 527 | return; |
6296ace4 | 528 | |
d027d45d | 529 | ts = this_cpu_ptr(&tick_cpu_sched); |
99e5ada9 | 530 | |
d027d45d | 531 | if (ts->tick_stopped) { |
f009a7a7 FW |
532 | if (atomic_read(¤t->tick_dep_mask) || |
533 | atomic_read(¤t->signal->tick_dep_mask)) | |
d027d45d FW |
534 | tick_nohz_full_kick(); |
535 | } | |
99e5ada9 FW |
536 | } |
537 | ||
6f1982fe FW |
538 | /* Get the boot-time nohz CPU list from the kernel parameters. */ |
539 | void __init tick_nohz_full_setup(cpumask_var_t cpumask) | |
a831881b | 540 | { |
73867dcd | 541 | alloc_bootmem_cpumask_var(&tick_nohz_full_mask); |
6f1982fe | 542 | cpumask_copy(tick_nohz_full_mask, cpumask); |
73867dcd | 543 | tick_nohz_full_running = true; |
a831881b | 544 | } |
a831881b | 545 | |
58d76682 | 546 | bool tick_nohz_cpu_hotpluggable(unsigned int cpu) |
a382bf93 | 547 | { |
31eff243 | 548 | /* |
08ae95f4 NP |
549 | * The tick_do_timer_cpu CPU handles housekeeping duty (unbound |
550 | * timers, workqueues, timekeeping, ...) on behalf of full dynticks | |
31eff243 SAS |
551 | * CPUs. It must remain online when nohz full is enabled. |
552 | */ | |
553 | if (tick_nohz_full_running && tick_do_timer_cpu == cpu) | |
58d76682 JFG |
554 | return false; |
555 | return true; | |
556 | } | |
557 | ||
558 | static int tick_nohz_cpu_down(unsigned int cpu) | |
559 | { | |
560 | return tick_nohz_cpu_hotpluggable(cpu) ? 0 : -EBUSY; | |
a382bf93 FW |
561 | } |
562 | ||
d1e43fa5 | 563 | void __init tick_nohz_init(void) |
a831881b | 564 | { |
31eff243 | 565 | int cpu, ret; |
d1e43fa5 | 566 | |
a7c8655b PM |
567 | if (!tick_nohz_full_running) |
568 | return; | |
d1e43fa5 | 569 | |
9b01f5bf FW |
570 | /* |
571 | * Full dynticks uses irq work to drive the tick rescheduling on safe | |
572 | * locking contexts. But then we need irq work to raise its own | |
573 | * interrupts to avoid circular dependency on the tick | |
574 | */ | |
575 | if (!arch_irq_work_has_interrupt()) { | |
a395d6a7 | 576 | pr_warn("NO_HZ: Can't run full dynticks because arch doesn't support irq work self-IPIs\n"); |
9b01f5bf | 577 | cpumask_clear(tick_nohz_full_mask); |
9b01f5bf FW |
578 | tick_nohz_full_running = false; |
579 | return; | |
580 | } | |
581 | ||
08ae95f4 NP |
582 | if (IS_ENABLED(CONFIG_PM_SLEEP_SMP) && |
583 | !IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU)) { | |
584 | cpu = smp_processor_id(); | |
4327b15f | 585 | |
08ae95f4 NP |
586 | if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) { |
587 | pr_warn("NO_HZ: Clearing %d from nohz_full range " | |
588 | "for timekeeping\n", cpu); | |
589 | cpumask_clear_cpu(cpu, tick_nohz_full_mask); | |
590 | } | |
4327b15f FW |
591 | } |
592 | ||
73867dcd | 593 | for_each_cpu(cpu, tick_nohz_full_mask) |
2a0aafce | 594 | ct_cpu_track_user(cpu); |
2e709338 | 595 | |
31eff243 SAS |
596 | ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, |
597 | "kernel/nohz:predown", NULL, | |
598 | tick_nohz_cpu_down); | |
599 | WARN_ON(ret < 0); | |
ffda22c1 TH |
600 | pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n", |
601 | cpumask_pr_args(tick_nohz_full_mask)); | |
a831881b | 602 | } |
a831881b FW |
603 | #endif |
604 | ||
79bf2bb3 TG |
605 | /* |
606 | * NOHZ - aka dynamic tick functionality | |
607 | */ | |
3451d024 | 608 | #ifdef CONFIG_NO_HZ_COMMON |
79bf2bb3 TG |
609 | /* |
610 | * NO HZ enabled ? | |
611 | */ | |
4cc7ecb7 | 612 | bool tick_nohz_enabled __read_mostly = true; |
bc7a34b8 | 613 | unsigned long tick_nohz_active __read_mostly; |
79bf2bb3 TG |
614 | /* |
615 | * Enable / Disable tickless mode | |
616 | */ | |
617 | static int __init setup_tick_nohz(char *str) | |
618 | { | |
4cc7ecb7 | 619 | return (kstrtobool(str, &tick_nohz_enabled) == 0); |
79bf2bb3 TG |
620 | } |
621 | ||
622 | __setup("nohz=", setup_tick_nohz); | |
623 | ||
a3642983 | 624 | bool tick_nohz_tick_stopped(void) |
c1797baf | 625 | { |
2bc629a6 FW |
626 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
627 | ||
628 | return ts->tick_stopped; | |
c1797baf TG |
629 | } |
630 | ||
22ab8bc0 FW |
631 | bool tick_nohz_tick_stopped_cpu(int cpu) |
632 | { | |
633 | struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu); | |
634 | ||
635 | return ts->tick_stopped; | |
636 | } | |
637 | ||
79bf2bb3 TG |
638 | /** |
639 | * tick_nohz_update_jiffies - update jiffies when idle was interrupted | |
640 | * | |
641 | * Called from interrupt entry when the CPU was idle | |
642 | * | |
643 | * In case the sched_tick was stopped on this CPU, we have to check if jiffies | |
644 | * must be updated. Otherwise an interrupt handler could use a stale jiffy | |
0de7611a IM |
645 | * value. We do this unconditionally on any CPU, as we don't know whether the |
646 | * CPU, which has the update task assigned is in a long sleep. | |
79bf2bb3 | 647 | */ |
eed3b9cf | 648 | static void tick_nohz_update_jiffies(ktime_t now) |
79bf2bb3 | 649 | { |
79bf2bb3 | 650 | unsigned long flags; |
79bf2bb3 | 651 | |
e8fcaa5c | 652 | __this_cpu_write(tick_cpu_sched.idle_waketime, now); |
79bf2bb3 TG |
653 | |
654 | local_irq_save(flags); | |
655 | tick_do_update_jiffies64(now); | |
656 | local_irq_restore(flags); | |
02ff3755 | 657 | |
03e0d461 | 658 | touch_softlockup_watchdog_sched(); |
79bf2bb3 TG |
659 | } |
660 | ||
07b65a80 | 661 | static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now) |
6378ddb5 | 662 | { |
eed3b9cf | 663 | ktime_t delta; |
6378ddb5 | 664 | |
07b65a80 FW |
665 | if (WARN_ON_ONCE(!ts->idle_active)) |
666 | return; | |
8d63bf94 | 667 | |
07b65a80 | 668 | delta = ktime_sub(now, ts->idle_entrytime); |
8d63bf94 | 669 | |
620a30fa | 670 | write_seqcount_begin(&ts->idle_sleeptime_seq); |
07b65a80 FW |
671 | if (nr_iowait_cpu(smp_processor_id()) > 0) |
672 | ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); | |
673 | else | |
674 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); | |
595aac48 | 675 | |
07b65a80 | 676 | ts->idle_entrytime = now; |
eed3b9cf | 677 | ts->idle_active = 0; |
620a30fa | 678 | write_seqcount_end(&ts->idle_sleeptime_seq); |
56c7426b | 679 | |
ac1e843f | 680 | sched_clock_idle_wakeup_event(); |
6378ddb5 VP |
681 | } |
682 | ||
0e776768 | 683 | static void tick_nohz_start_idle(struct tick_sched *ts) |
6378ddb5 | 684 | { |
620a30fa | 685 | write_seqcount_begin(&ts->idle_sleeptime_seq); |
0e776768 | 686 | ts->idle_entrytime = ktime_get(); |
6378ddb5 | 687 | ts->idle_active = 1; |
620a30fa FW |
688 | write_seqcount_end(&ts->idle_sleeptime_seq); |
689 | ||
56c7426b | 690 | sched_clock_idle_sleep_event(); |
6378ddb5 VP |
691 | } |
692 | ||
07b65a80 FW |
693 | static u64 get_cpu_sleep_time_us(struct tick_sched *ts, ktime_t *sleeptime, |
694 | bool compute_delta, u64 *last_update_time) | |
695 | { | |
696 | ktime_t now, idle; | |
620a30fa | 697 | unsigned int seq; |
07b65a80 FW |
698 | |
699 | if (!tick_nohz_active) | |
700 | return -1; | |
701 | ||
702 | now = ktime_get(); | |
703 | if (last_update_time) | |
704 | *last_update_time = ktime_to_us(now); | |
705 | ||
620a30fa FW |
706 | do { |
707 | seq = read_seqcount_begin(&ts->idle_sleeptime_seq); | |
07b65a80 | 708 | |
620a30fa FW |
709 | if (ts->idle_active && compute_delta) { |
710 | ktime_t delta = ktime_sub(now, ts->idle_entrytime); | |
711 | ||
712 | idle = ktime_add(*sleeptime, delta); | |
713 | } else { | |
714 | idle = *sleeptime; | |
715 | } | |
716 | } while (read_seqcount_retry(&ts->idle_sleeptime_seq, seq)); | |
07b65a80 FW |
717 | |
718 | return ktime_to_us(idle); | |
719 | ||
720 | } | |
721 | ||
b1f724c3 | 722 | /** |
0de7611a | 723 | * get_cpu_idle_time_us - get the total idle time of a CPU |
b1f724c3 | 724 | * @cpu: CPU number to query |
09a1d34f MH |
725 | * @last_update_time: variable to store update time in. Do not update |
726 | * counters if NULL. | |
b1f724c3 | 727 | * |
6168f8ed | 728 | * Return the cumulative idle time (since boot) for a given |
ead70b75 FW |
729 | * CPU, in microseconds. Note this is partially broken due to |
730 | * the counter of iowait tasks that can be remotely updated without | |
731 | * any synchronization. Therefore it is possible to observe backward | |
732 | * values within two consecutive reads. | |
b1f724c3 AV |
733 | * |
734 | * This time is measured via accounting rather than sampling, | |
735 | * and is as accurate as ktime_get() is. | |
736 | * | |
737 | * This function returns -1 if NOHZ is not enabled. | |
738 | */ | |
6378ddb5 VP |
739 | u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) |
740 | { | |
741 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
8083e4ad | 742 | |
07b65a80 FW |
743 | return get_cpu_sleep_time_us(ts, &ts->idle_sleeptime, |
744 | !nr_iowait_cpu(cpu), last_update_time); | |
6378ddb5 | 745 | } |
8083e4ad | 746 | EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); |
6378ddb5 | 747 | |
6beea0cd | 748 | /** |
0de7611a | 749 | * get_cpu_iowait_time_us - get the total iowait time of a CPU |
0224cf4c | 750 | * @cpu: CPU number to query |
09a1d34f MH |
751 | * @last_update_time: variable to store update time in. Do not update |
752 | * counters if NULL. | |
0224cf4c | 753 | * |
6168f8ed | 754 | * Return the cumulative iowait time (since boot) for a given |
ead70b75 FW |
755 | * CPU, in microseconds. Note this is partially broken due to |
756 | * the counter of iowait tasks that can be remotely updated without | |
757 | * any synchronization. Therefore it is possible to observe backward | |
758 | * values within two consecutive reads. | |
0224cf4c AV |
759 | * |
760 | * This time is measured via accounting rather than sampling, | |
761 | * and is as accurate as ktime_get() is. | |
762 | * | |
763 | * This function returns -1 if NOHZ is not enabled. | |
764 | */ | |
765 | u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) | |
766 | { | |
767 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
768 | ||
07b65a80 FW |
769 | return get_cpu_sleep_time_us(ts, &ts->iowait_sleeptime, |
770 | nr_iowait_cpu(cpu), last_update_time); | |
0224cf4c AV |
771 | } |
772 | EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us); | |
773 | ||
0ff53d09 TG |
774 | static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) |
775 | { | |
776 | hrtimer_cancel(&ts->sched_timer); | |
777 | hrtimer_set_expires(&ts->sched_timer, ts->last_tick); | |
778 | ||
779 | /* Forward the time to expire in the future */ | |
b9965449 | 780 | hrtimer_forward(&ts->sched_timer, now, TICK_NSEC); |
0ff53d09 | 781 | |
902a9f9c SAS |
782 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { |
783 | hrtimer_start_expires(&ts->sched_timer, | |
784 | HRTIMER_MODE_ABS_PINNED_HARD); | |
785 | } else { | |
0ff53d09 | 786 | tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); |
902a9f9c | 787 | } |
411fe24e FW |
788 | |
789 | /* | |
790 | * Reset to make sure next tick stop doesn't get fooled by past | |
791 | * cached clock deadline. | |
792 | */ | |
793 | ts->next_tick = 0; | |
0ff53d09 TG |
794 | } |
795 | ||
5d62c183 TG |
796 | static inline bool local_timer_softirq_pending(void) |
797 | { | |
80d20d35 | 798 | return local_softirq_pending() & BIT(TIMER_SOFTIRQ); |
5d62c183 TG |
799 | } |
800 | ||
23a8d888 | 801 | static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu) |
79bf2bb3 | 802 | { |
29845399 | 803 | u64 basemono, next_tick, delta, expires; |
e1e41b6c RV |
804 | unsigned long basejiff; |
805 | unsigned int seq; | |
855a0fc3 | 806 | |
79bf2bb3 TG |
807 | /* Read jiffies and the time when jiffies were updated last */ |
808 | do { | |
e5d4d175 | 809 | seq = read_seqcount_begin(&jiffies_seq); |
2456e855 | 810 | basemono = last_jiffies_update; |
c1ad348b | 811 | basejiff = jiffies; |
e5d4d175 | 812 | } while (read_seqcount_retry(&jiffies_seq, seq)); |
c1ad348b | 813 | ts->last_jiffies = basejiff; |
23a8d888 | 814 | ts->timer_expires_base = basemono; |
79bf2bb3 | 815 | |
5d62c183 TG |
816 | /* |
817 | * Keep the periodic tick, when RCU, architecture or irq_work | |
818 | * requests it. | |
819 | * Aside of that check whether the local timer softirq is | |
820 | * pending. If so its a bad idea to call get_next_timer_interrupt() | |
821 | * because there is an already expired timer, so it will request | |
4bf07f65 | 822 | * immediate expiry, which rearms the hardware timer with a |
5d62c183 TG |
823 | * minimal delta which brings us back to this place |
824 | * immediately. Lather, rinse and repeat... | |
825 | */ | |
29845399 | 826 | if (rcu_needs_cpu() || arch_needs_cpu() || |
5d62c183 | 827 | irq_work_needs_cpu() || local_timer_softirq_pending()) { |
c1ad348b | 828 | next_tick = basemono + TICK_NSEC; |
3c5d92a0 | 829 | } else { |
c1ad348b TG |
830 | /* |
831 | * Get the next pending timer. If high resolution | |
832 | * timers are enabled this only takes the timer wheel | |
833 | * timers into account. If high resolution timers are | |
834 | * disabled this also looks at the next expiring | |
835 | * hrtimer. | |
836 | */ | |
29845399 FW |
837 | next_tick = get_next_timer_interrupt(basejiff, basemono); |
838 | ts->next_timer = next_tick; | |
3c5d92a0 | 839 | } |
47aa8b6c | 840 | |
c1ad348b TG |
841 | /* |
842 | * If the tick is due in the next period, keep it ticking or | |
82bbe34b | 843 | * force prod the timer. |
c1ad348b TG |
844 | */ |
845 | delta = next_tick - basemono; | |
846 | if (delta <= (u64)TICK_NSEC) { | |
a683f390 TG |
847 | /* |
848 | * Tell the timer code that the base is not idle, i.e. undo | |
849 | * the effect of get_next_timer_interrupt(): | |
850 | */ | |
851 | timer_clear_idle(); | |
82bbe34b PZ |
852 | /* |
853 | * We've not stopped the tick yet, and there's a timer in the | |
854 | * next period, so no point in stopping it either, bail. | |
855 | */ | |
f99973e1 | 856 | if (!ts->tick_stopped) { |
23a8d888 | 857 | ts->timer_expires = 0; |
157d29e1 TG |
858 | goto out; |
859 | } | |
860 | } | |
861 | ||
23a8d888 RW |
862 | /* |
863 | * If this CPU is the one which had the do_timer() duty last, we limit | |
864 | * the sleep time to the timekeeping max_deferment value. | |
865 | * Otherwise we can sleep as long as we want. | |
866 | */ | |
867 | delta = timekeeping_max_deferment(); | |
868 | if (cpu != tick_do_timer_cpu && | |
869 | (tick_do_timer_cpu != TICK_DO_TIMER_NONE || !ts->do_timer_last)) | |
870 | delta = KTIME_MAX; | |
871 | ||
872 | /* Calculate the next expiry time */ | |
873 | if (delta < (KTIME_MAX - basemono)) | |
874 | expires = basemono + delta; | |
875 | else | |
876 | expires = KTIME_MAX; | |
877 | ||
878 | ts->timer_expires = min_t(u64, expires, next_tick); | |
879 | ||
880 | out: | |
881 | return ts->timer_expires; | |
882 | } | |
883 | ||
884 | static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu) | |
885 | { | |
886 | struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); | |
887 | u64 basemono = ts->timer_expires_base; | |
888 | u64 expires = ts->timer_expires; | |
889 | ktime_t tick = expires; | |
890 | ||
891 | /* Make sure we won't be trying to stop it twice in a row. */ | |
892 | ts->timer_expires_base = 0; | |
893 | ||
79bf2bb3 | 894 | /* |
0de7611a IM |
895 | * If this CPU is the one which updates jiffies, then give up |
896 | * the assignment and let it be taken by the CPU which runs | |
897 | * the tick timer next, which might be this CPU as well. If we | |
157d29e1 TG |
898 | * don't drop this here the jiffies might be stale and |
899 | * do_timer() never invoked. Keep track of the fact that it | |
23a8d888 | 900 | * was the one which had the do_timer() duty last. |
79bf2bb3 | 901 | */ |
157d29e1 TG |
902 | if (cpu == tick_do_timer_cpu) { |
903 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; | |
904 | ts->do_timer_last = 1; | |
905 | } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) { | |
157d29e1 | 906 | ts->do_timer_last = 0; |
157d29e1 | 907 | } |
27185016 | 908 | |
157d29e1 | 909 | /* Skip reprogram of event if its not changed */ |
411fe24e FW |
910 | if (ts->tick_stopped && (expires == ts->next_tick)) { |
911 | /* Sanity check: make sure clockevent is actually programmed */ | |
d4af6d93 | 912 | if (tick == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer)) |
23a8d888 | 913 | return; |
411fe24e FW |
914 | |
915 | WARN_ON_ONCE(1); | |
916 | printk_once("basemono: %llu ts->next_tick: %llu dev->next_event: %llu timer->active: %d timer->expires: %llu\n", | |
917 | basemono, ts->next_tick, dev->next_event, | |
918 | hrtimer_active(&ts->sched_timer), hrtimer_get_expires(&ts->sched_timer)); | |
ce6cf9a1 | 919 | } |
84bf1bcc | 920 | |
157d29e1 TG |
921 | /* |
922 | * nohz_stop_sched_tick can be called several times before | |
923 | * the nohz_restart_sched_tick is called. This happens when | |
924 | * interrupts arrive which do not cause a reschedule. In the | |
925 | * first call we save the current tick time, so we can restart | |
926 | * the scheduler tick in nohz_restart_sched_tick. | |
927 | */ | |
928 | if (!ts->tick_stopped) { | |
3c85d6db | 929 | calc_load_nohz_start(); |
62cb1188 | 930 | quiet_vmstat(); |
d3ed7824 | 931 | |
157d29e1 TG |
932 | ts->last_tick = hrtimer_get_expires(&ts->sched_timer); |
933 | ts->tick_stopped = 1; | |
e6e6cc22 | 934 | trace_tick_stop(1, TICK_DEP_MASK_NONE); |
157d29e1 | 935 | } |
eaad084b | 936 | |
411fe24e FW |
937 | ts->next_tick = tick; |
938 | ||
157d29e1 | 939 | /* |
c1ad348b TG |
940 | * If the expiration time == KTIME_MAX, then we simply stop |
941 | * the tick timer. | |
157d29e1 | 942 | */ |
c1ad348b | 943 | if (unlikely(expires == KTIME_MAX)) { |
157d29e1 TG |
944 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) |
945 | hrtimer_cancel(&ts->sched_timer); | |
62c1256d NP |
946 | else |
947 | tick_program_event(KTIME_MAX, 1); | |
23a8d888 | 948 | return; |
79bf2bb3 | 949 | } |
0ff53d09 | 950 | |
1f71addd | 951 | if (ts->nohz_mode == NOHZ_MODE_HIGHRES) { |
902a9f9c SAS |
952 | hrtimer_start(&ts->sched_timer, tick, |
953 | HRTIMER_MODE_ABS_PINNED_HARD); | |
1f71addd TG |
954 | } else { |
955 | hrtimer_set_expires(&ts->sched_timer, tick); | |
c1ad348b | 956 | tick_program_event(tick, 1); |
1f71addd | 957 | } |
280f0677 FW |
958 | } |
959 | ||
23a8d888 RW |
960 | static void tick_nohz_retain_tick(struct tick_sched *ts) |
961 | { | |
962 | ts->timer_expires_base = 0; | |
963 | } | |
964 | ||
965 | #ifdef CONFIG_NO_HZ_FULL | |
966 | static void tick_nohz_stop_sched_tick(struct tick_sched *ts, int cpu) | |
967 | { | |
968 | if (tick_nohz_next_event(ts, cpu)) | |
969 | tick_nohz_stop_tick(ts, cpu); | |
970 | else | |
971 | tick_nohz_retain_tick(ts); | |
972 | } | |
973 | #endif /* CONFIG_NO_HZ_FULL */ | |
974 | ||
1f41906a | 975 | static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now) |
59d2c7ca FW |
976 | { |
977 | /* Update jiffies first */ | |
978 | tick_do_update_jiffies64(now); | |
59d2c7ca | 979 | |
a683f390 TG |
980 | /* |
981 | * Clear the timer idle flag, so we avoid IPIs on remote queueing and | |
982 | * the clock forward checks in the enqueue path: | |
983 | */ | |
984 | timer_clear_idle(); | |
985 | ||
3c85d6db | 986 | calc_load_nohz_stop(); |
03e0d461 | 987 | touch_softlockup_watchdog_sched(); |
59d2c7ca FW |
988 | /* |
989 | * Cancel the scheduled timer and restore the tick | |
990 | */ | |
991 | ts->tick_stopped = 0; | |
59d2c7ca FW |
992 | tick_nohz_restart(ts, now); |
993 | } | |
73738a95 | 994 | |
a5183862 YY |
995 | static void __tick_nohz_full_update_tick(struct tick_sched *ts, |
996 | ktime_t now) | |
5811d996 FW |
997 | { |
998 | #ifdef CONFIG_NO_HZ_FULL | |
e9a2eb40 | 999 | int cpu = smp_processor_id(); |
5811d996 | 1000 | |
a5183862 YY |
1001 | if (can_stop_full_tick(cpu, ts)) |
1002 | tick_nohz_stop_sched_tick(ts, cpu); | |
1003 | else if (ts->tick_stopped) | |
1004 | tick_nohz_restart_sched_tick(ts, now); | |
1005 | #endif | |
1006 | } | |
1007 | ||
1008 | static void tick_nohz_full_update_tick(struct tick_sched *ts) | |
1009 | { | |
1010 | if (!tick_nohz_full_cpu(smp_processor_id())) | |
e9a2eb40 | 1011 | return; |
5811d996 | 1012 | |
e9a2eb40 AS |
1013 | if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE) |
1014 | return; | |
5811d996 | 1015 | |
a5183862 | 1016 | __tick_nohz_full_update_tick(ts, ktime_get()); |
5811d996 FW |
1017 | } |
1018 | ||
0345691b FW |
1019 | /* |
1020 | * A pending softirq outside an IRQ (or softirq disabled section) context | |
1021 | * should be waiting for ksoftirqd to handle it. Therefore we shouldn't | |
1022 | * reach here due to the need_resched() early check in can_stop_idle_tick(). | |
1023 | * | |
1024 | * However if we are between CPUHP_AP_SMPBOOT_THREADS and CPU_TEARDOWN_CPU on the | |
1025 | * cpu_down() process, softirqs can still be raised while ksoftirqd is parked, | |
1026 | * triggering the below since wakep_softirqd() is ignored. | |
1027 | * | |
1028 | */ | |
1029 | static bool report_idle_softirq(void) | |
1030 | { | |
1031 | static int ratelimit; | |
1032 | unsigned int pending = local_softirq_pending(); | |
1033 | ||
1034 | if (likely(!pending)) | |
1035 | return false; | |
1036 | ||
1037 | /* Some softirqs claim to be safe against hotplug and ksoftirqd parking */ | |
1038 | if (!cpu_active(smp_processor_id())) { | |
1039 | pending &= ~SOFTIRQ_HOTPLUG_SAFE_MASK; | |
1040 | if (!pending) | |
1041 | return false; | |
1042 | } | |
1043 | ||
a7e282c7 | 1044 | if (ratelimit >= 10) |
0345691b FW |
1045 | return false; |
1046 | ||
1047 | /* On RT, softirqs handling may be waiting on some lock */ | |
96c1fa04 | 1048 | if (local_bh_blocked()) |
0345691b FW |
1049 | return false; |
1050 | ||
1051 | pr_warn("NOHZ tick-stop error: local softirq work is pending, handler #%02x!!!\n", | |
1052 | pending); | |
1053 | ratelimit++; | |
1054 | ||
1055 | return true; | |
1056 | } | |
1057 | ||
5b39939a FW |
1058 | static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) |
1059 | { | |
1060 | /* | |
0de7611a | 1061 | * If this CPU is offline and it is the one which updates |
5b39939a | 1062 | * jiffies, then give up the assignment and let it be taken by |
0de7611a | 1063 | * the CPU which runs the tick timer next. If we don't drop |
5b39939a FW |
1064 | * this here the jiffies might be stale and do_timer() never |
1065 | * invoked. | |
1066 | */ | |
1067 | if (unlikely(!cpu_online(cpu))) { | |
1068 | if (cpu == tick_do_timer_cpu) | |
1069 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; | |
411fe24e FW |
1070 | /* |
1071 | * Make sure the CPU doesn't get fooled by obsolete tick | |
1072 | * deadline if it comes back online later. | |
1073 | */ | |
1074 | ts->next_tick = 0; | |
f7ea0fd6 | 1075 | return false; |
5b39939a FW |
1076 | } |
1077 | ||
23a8d888 | 1078 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) |
5b39939a FW |
1079 | return false; |
1080 | ||
1081 | if (need_resched()) | |
1082 | return false; | |
1083 | ||
0345691b | 1084 | if (unlikely(report_idle_softirq())) |
5b39939a | 1085 | return false; |
5b39939a | 1086 | |
460775df | 1087 | if (tick_nohz_full_enabled()) { |
a382bf93 FW |
1088 | /* |
1089 | * Keep the tick alive to guarantee timekeeping progression | |
1090 | * if there are full dynticks CPUs around | |
1091 | */ | |
1092 | if (tick_do_timer_cpu == cpu) | |
1093 | return false; | |
08ae95f4 NP |
1094 | |
1095 | /* Should not happen for nohz-full */ | |
1096 | if (WARN_ON_ONCE(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) | |
a382bf93 FW |
1097 | return false; |
1098 | } | |
1099 | ||
5b39939a FW |
1100 | return true; |
1101 | } | |
1102 | ||
289dafed FW |
1103 | /** |
1104 | * tick_nohz_idle_stop_tick - stop the idle tick from the idle task | |
1105 | * | |
1106 | * When the next event is more than a tick into the future, stop the idle tick | |
1107 | */ | |
1108 | void tick_nohz_idle_stop_tick(void) | |
19f5f736 | 1109 | { |
289dafed | 1110 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
5b39939a | 1111 | int cpu = smp_processor_id(); |
289dafed | 1112 | ktime_t expires; |
19f5f736 | 1113 | |
554c8aa8 RW |
1114 | /* |
1115 | * If tick_nohz_get_sleep_length() ran tick_nohz_next_event(), the | |
1116 | * tick timer expiration time is known already. | |
1117 | */ | |
1118 | if (ts->timer_expires_base) | |
1119 | expires = ts->timer_expires; | |
1120 | else if (can_stop_idle_tick(cpu, ts)) | |
1121 | expires = tick_nohz_next_event(ts, cpu); | |
1122 | else | |
1123 | return; | |
23a8d888 RW |
1124 | |
1125 | ts->idle_calls++; | |
08d07259 | 1126 | |
23a8d888 | 1127 | if (expires > 0LL) { |
5b39939a FW |
1128 | int was_stopped = ts->tick_stopped; |
1129 | ||
23a8d888 | 1130 | tick_nohz_stop_tick(ts, cpu); |
84bf1bcc | 1131 | |
23a8d888 RW |
1132 | ts->idle_sleeps++; |
1133 | ts->idle_expires = expires; | |
5b39939a | 1134 | |
a0db971e | 1135 | if (!was_stopped && ts->tick_stopped) { |
5b39939a | 1136 | ts->idle_jiffies = ts->last_jiffies; |
a0db971e FW |
1137 | nohz_balance_enter_idle(cpu); |
1138 | } | |
23a8d888 RW |
1139 | } else { |
1140 | tick_nohz_retain_tick(ts); | |
5b39939a | 1141 | } |
280f0677 FW |
1142 | } |
1143 | ||
554c8aa8 RW |
1144 | void tick_nohz_idle_retain_tick(void) |
1145 | { | |
1146 | tick_nohz_retain_tick(this_cpu_ptr(&tick_cpu_sched)); | |
1147 | /* | |
1148 | * Undo the effect of get_next_timer_interrupt() called from | |
1149 | * tick_nohz_next_event(). | |
1150 | */ | |
1151 | timer_clear_idle(); | |
1152 | } | |
1153 | ||
0e776768 RW |
1154 | /** |
1155 | * tick_nohz_idle_enter - prepare for entering idle on the current CPU | |
2bbb6817 | 1156 | * |
0e776768 | 1157 | * Called when we start the idle loop. |
280f0677 | 1158 | */ |
1268fbc7 | 1159 | void tick_nohz_idle_enter(void) |
280f0677 FW |
1160 | { |
1161 | struct tick_sched *ts; | |
1162 | ||
ebf3adba | 1163 | lockdep_assert_irqs_enabled(); |
0db49b72 | 1164 | |
1268fbc7 FW |
1165 | local_irq_disable(); |
1166 | ||
22127e93 | 1167 | ts = this_cpu_ptr(&tick_cpu_sched); |
23a8d888 RW |
1168 | |
1169 | WARN_ON_ONCE(ts->timer_expires_base); | |
1170 | ||
280f0677 | 1171 | ts->inidle = 1; |
0e776768 | 1172 | tick_nohz_start_idle(ts); |
1268fbc7 FW |
1173 | |
1174 | local_irq_enable(); | |
280f0677 FW |
1175 | } |
1176 | ||
1177 | /** | |
1178 | * tick_nohz_irq_exit - update next tick event from interrupt exit | |
1179 | * | |
1180 | * When an interrupt fires while we are idle and it doesn't cause | |
1181 | * a reschedule, it may still add, modify or delete a timer, enqueue | |
1182 | * an RCU callback, etc... | |
1183 | * So we need to re-calculate and reprogram the next tick event. | |
1184 | */ | |
1185 | void tick_nohz_irq_exit(void) | |
1186 | { | |
22127e93 | 1187 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
280f0677 | 1188 | |
14851912 | 1189 | if (ts->inidle) |
0e776768 | 1190 | tick_nohz_start_idle(ts); |
14851912 | 1191 | else |
73738a95 | 1192 | tick_nohz_full_update_tick(ts); |
79bf2bb3 TG |
1193 | } |
1194 | ||
4f86d3a8 | 1195 | /** |
45f1ff59 RW |
1196 | * tick_nohz_idle_got_tick - Check whether or not the tick handler has run |
1197 | */ | |
1198 | bool tick_nohz_idle_got_tick(void) | |
1199 | { | |
1200 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); | |
1201 | ||
2bc629a6 FW |
1202 | if (ts->got_idle_tick) { |
1203 | ts->got_idle_tick = 0; | |
45f1ff59 RW |
1204 | return true; |
1205 | } | |
1206 | return false; | |
1207 | } | |
1208 | ||
6f9b83ac UH |
1209 | /** |
1210 | * tick_nohz_get_next_hrtimer - return the next expiration time for the hrtimer | |
1211 | * or the tick, whatever that expires first. Note that, if the tick has been | |
1212 | * stopped, it returns the next hrtimer. | |
1213 | * | |
1214 | * Called from power state control code with interrupts disabled | |
1215 | */ | |
1216 | ktime_t tick_nohz_get_next_hrtimer(void) | |
1217 | { | |
1218 | return __this_cpu_read(tick_cpu_device.evtdev)->next_event; | |
1219 | } | |
1220 | ||
4f86d3a8 | 1221 | /** |
554c8aa8 | 1222 | * tick_nohz_get_sleep_length - return the expected length of the current sleep |
296bb1e5 | 1223 | * @delta_next: duration until the next event if the tick cannot be stopped |
4f86d3a8 | 1224 | * |
4c81cb7e RW |
1225 | * Called from power state control code with interrupts disabled. |
1226 | * | |
1227 | * The return value of this function and/or the value returned by it through the | |
1228 | * @delta_next pointer can be negative which must be taken into account by its | |
1229 | * callers. | |
4f86d3a8 | 1230 | */ |
296bb1e5 | 1231 | ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next) |
4f86d3a8 | 1232 | { |
554c8aa8 | 1233 | struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); |
22127e93 | 1234 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
554c8aa8 RW |
1235 | int cpu = smp_processor_id(); |
1236 | /* | |
1237 | * The idle entry time is expected to be a sufficient approximation of | |
1238 | * the current time at this point. | |
1239 | */ | |
1240 | ktime_t now = ts->idle_entrytime; | |
1241 | ktime_t next_event; | |
1242 | ||
1243 | WARN_ON_ONCE(!ts->inidle); | |
1244 | ||
296bb1e5 RW |
1245 | *delta_next = ktime_sub(dev->next_event, now); |
1246 | ||
554c8aa8 | 1247 | if (!can_stop_idle_tick(cpu, ts)) |
296bb1e5 | 1248 | return *delta_next; |
554c8aa8 RW |
1249 | |
1250 | next_event = tick_nohz_next_event(ts, cpu); | |
1251 | if (!next_event) | |
296bb1e5 | 1252 | return *delta_next; |
554c8aa8 RW |
1253 | |
1254 | /* | |
1255 | * If the next highres timer to expire is earlier than next_event, the | |
1256 | * idle governor needs to know that. | |
1257 | */ | |
1258 | next_event = min_t(u64, next_event, | |
1259 | hrtimer_next_event_without(&ts->sched_timer)); | |
4f86d3a8 | 1260 | |
554c8aa8 | 1261 | return ktime_sub(next_event, now); |
4f86d3a8 LB |
1262 | } |
1263 | ||
466a2b42 JF |
1264 | /** |
1265 | * tick_nohz_get_idle_calls_cpu - return the current idle calls counter value | |
1266 | * for a particular CPU. | |
1267 | * | |
1268 | * Called from the schedutil frequency scaling governor in scheduler context. | |
1269 | */ | |
1270 | unsigned long tick_nohz_get_idle_calls_cpu(int cpu) | |
1271 | { | |
1272 | struct tick_sched *ts = tick_get_tick_sched(cpu); | |
1273 | ||
1274 | return ts->idle_calls; | |
1275 | } | |
1276 | ||
b7eaf1aa RW |
1277 | /** |
1278 | * tick_nohz_get_idle_calls - return the current idle calls counter value | |
1279 | * | |
1280 | * Called from the schedutil frequency scaling governor in scheduler context. | |
1281 | */ | |
1282 | unsigned long tick_nohz_get_idle_calls(void) | |
1283 | { | |
1284 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); | |
1285 | ||
1286 | return ts->idle_calls; | |
1287 | } | |
1288 | ||
96c9b903 YY |
1289 | static void tick_nohz_account_idle_time(struct tick_sched *ts, |
1290 | ktime_t now) | |
2ac0d98f | 1291 | { |
2ac0d98f | 1292 | unsigned long ticks; |
3f4724ea | 1293 | |
96c9b903 YY |
1294 | ts->idle_exittime = now; |
1295 | ||
e44fcb4b | 1296 | if (vtime_accounting_enabled_this_cpu()) |
3f4724ea | 1297 | return; |
79bf2bb3 TG |
1298 | /* |
1299 | * We stopped the tick in idle. Update process times would miss the | |
1300 | * time we slept as update_process_times does only a 1 tick | |
1301 | * accounting. Enforce that this is accounted to idle ! | |
1302 | */ | |
1303 | ticks = jiffies - ts->idle_jiffies; | |
1304 | /* | |
1305 | * We might be one off. Do not randomly account a huge number of ticks! | |
1306 | */ | |
79741dd3 MS |
1307 | if (ticks && ticks < LONG_MAX) |
1308 | account_idle_ticks(ticks); | |
19f5f736 FW |
1309 | } |
1310 | ||
a5183862 | 1311 | void tick_nohz_idle_restart_tick(void) |
2aaf709a | 1312 | { |
a5183862 YY |
1313 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
1314 | ||
1315 | if (ts->tick_stopped) { | |
96c9b903 YY |
1316 | ktime_t now = ktime_get(); |
1317 | tick_nohz_restart_sched_tick(ts, now); | |
1318 | tick_nohz_account_idle_time(ts, now); | |
a5183862 | 1319 | } |
2aaf709a RW |
1320 | } |
1321 | ||
a5183862 | 1322 | static void tick_nohz_idle_update_tick(struct tick_sched *ts, ktime_t now) |
2aaf709a | 1323 | { |
a5183862 YY |
1324 | if (tick_nohz_full_cpu(smp_processor_id())) |
1325 | __tick_nohz_full_update_tick(ts, now); | |
1326 | else | |
1327 | tick_nohz_restart_sched_tick(ts, now); | |
2aaf709a | 1328 | |
96c9b903 | 1329 | tick_nohz_account_idle_time(ts, now); |
2aaf709a RW |
1330 | } |
1331 | ||
79bf2bb3 | 1332 | /** |
280f0677 | 1333 | * tick_nohz_idle_exit - restart the idle tick from the idle task |
79bf2bb3 TG |
1334 | * |
1335 | * Restart the idle tick when the CPU is woken up from idle | |
280f0677 FW |
1336 | * This also exit the RCU extended quiescent state. The CPU |
1337 | * can use RCU again after this function is called. | |
79bf2bb3 | 1338 | */ |
280f0677 | 1339 | void tick_nohz_idle_exit(void) |
79bf2bb3 | 1340 | { |
4a32fea9 | 1341 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
bbe9a70a | 1342 | bool idle_active, tick_stopped; |
6378ddb5 | 1343 | ktime_t now; |
79bf2bb3 | 1344 | |
6378ddb5 | 1345 | local_irq_disable(); |
2bbb6817 | 1346 | |
15f827be | 1347 | WARN_ON_ONCE(!ts->inidle); |
23a8d888 | 1348 | WARN_ON_ONCE(ts->timer_expires_base); |
15f827be FW |
1349 | |
1350 | ts->inidle = 0; | |
bbe9a70a AB |
1351 | idle_active = ts->idle_active; |
1352 | tick_stopped = ts->tick_stopped; | |
15f827be | 1353 | |
bbe9a70a | 1354 | if (idle_active || tick_stopped) |
eed3b9cf MS |
1355 | now = ktime_get(); |
1356 | ||
bbe9a70a | 1357 | if (idle_active) |
e8fcaa5c | 1358 | tick_nohz_stop_idle(ts, now); |
6378ddb5 | 1359 | |
bbe9a70a | 1360 | if (tick_stopped) |
a5183862 | 1361 | tick_nohz_idle_update_tick(ts, now); |
79bf2bb3 | 1362 | |
79bf2bb3 TG |
1363 | local_irq_enable(); |
1364 | } | |
1365 | ||
79bf2bb3 TG |
1366 | /* |
1367 | * The nohz low res interrupt handler | |
1368 | */ | |
1369 | static void tick_nohz_handler(struct clock_event_device *dev) | |
1370 | { | |
22127e93 | 1371 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
79bf2bb3 TG |
1372 | struct pt_regs *regs = get_irq_regs(); |
1373 | ktime_t now = ktime_get(); | |
1374 | ||
2456e855 | 1375 | dev->next_event = KTIME_MAX; |
79bf2bb3 | 1376 | |
ff7de620 | 1377 | tick_sched_do_timer(ts, now); |
9e8f559b | 1378 | tick_sched_handle(ts, regs); |
79bf2bb3 | 1379 | |
62c1256d NP |
1380 | if (unlikely(ts->tick_stopped)) { |
1381 | /* | |
1382 | * The clockevent device is not reprogrammed, so change the | |
1383 | * clock event device to ONESHOT_STOPPED to avoid spurious | |
1384 | * interrupts on devices which might not be truly one shot. | |
1385 | */ | |
1386 | tick_program_event(KTIME_MAX, 1); | |
b5e995e6 | 1387 | return; |
62c1256d | 1388 | } |
b5e995e6 | 1389 | |
b9965449 | 1390 | hrtimer_forward(&ts->sched_timer, now, TICK_NSEC); |
0ff53d09 | 1391 | tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); |
79bf2bb3 TG |
1392 | } |
1393 | ||
bc7a34b8 TG |
1394 | static inline void tick_nohz_activate(struct tick_sched *ts, int mode) |
1395 | { | |
1396 | if (!tick_nohz_enabled) | |
1397 | return; | |
1398 | ts->nohz_mode = mode; | |
1399 | /* One update is enough */ | |
1400 | if (!test_and_set_bit(0, &tick_nohz_active)) | |
ae67bada | 1401 | timers_update_nohz(); |
bc7a34b8 TG |
1402 | } |
1403 | ||
79bf2bb3 TG |
1404 | /** |
1405 | * tick_nohz_switch_to_nohz - switch to nohz mode | |
1406 | */ | |
1407 | static void tick_nohz_switch_to_nohz(void) | |
1408 | { | |
22127e93 | 1409 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
79bf2bb3 TG |
1410 | ktime_t next; |
1411 | ||
27630532 | 1412 | if (!tick_nohz_enabled) |
79bf2bb3 TG |
1413 | return; |
1414 | ||
6b442bc8 | 1415 | if (tick_switch_to_oneshot(tick_nohz_handler)) |
79bf2bb3 | 1416 | return; |
6b442bc8 | 1417 | |
79bf2bb3 TG |
1418 | /* |
1419 | * Recycle the hrtimer in ts, so we can share the | |
1420 | * hrtimer_forward with the highres code. | |
1421 | */ | |
71fed982 | 1422 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); |
79bf2bb3 TG |
1423 | /* Get the next period */ |
1424 | next = tick_init_jiffy_update(); | |
1425 | ||
0ff53d09 | 1426 | hrtimer_set_expires(&ts->sched_timer, next); |
b9965449 | 1427 | hrtimer_forward_now(&ts->sched_timer, TICK_NSEC); |
1ca8ec53 | 1428 | tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); |
bc7a34b8 | 1429 | tick_nohz_activate(ts, NOHZ_MODE_LOWRES); |
79bf2bb3 TG |
1430 | } |
1431 | ||
5acac1be | 1432 | static inline void tick_nohz_irq_enter(void) |
eed3b9cf | 1433 | { |
4a32fea9 | 1434 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
eed3b9cf MS |
1435 | ktime_t now; |
1436 | ||
1437 | if (!ts->idle_active && !ts->tick_stopped) | |
1438 | return; | |
1439 | now = ktime_get(); | |
1440 | if (ts->idle_active) | |
e8fcaa5c | 1441 | tick_nohz_stop_idle(ts, now); |
53e87e3c FW |
1442 | /* |
1443 | * If all CPUs are idle. We may need to update a stale jiffies value. | |
1444 | * Note nohz_full is a special case: a timekeeper is guaranteed to stay | |
1445 | * alive but it might be busy looping with interrupts disabled in some | |
1446 | * rare case (typically stop machine). So we must make sure we have a | |
1447 | * last resort. | |
1448 | */ | |
ff006732 | 1449 | if (ts->tick_stopped) |
eed3b9cf | 1450 | tick_nohz_update_jiffies(now); |
eed3b9cf MS |
1451 | } |
1452 | ||
79bf2bb3 TG |
1453 | #else |
1454 | ||
1455 | static inline void tick_nohz_switch_to_nohz(void) { } | |
5acac1be | 1456 | static inline void tick_nohz_irq_enter(void) { } |
bc7a34b8 | 1457 | static inline void tick_nohz_activate(struct tick_sched *ts, int mode) { } |
79bf2bb3 | 1458 | |
3451d024 | 1459 | #endif /* CONFIG_NO_HZ_COMMON */ |
79bf2bb3 | 1460 | |
719254fa TG |
1461 | /* |
1462 | * Called from irq_enter to notify about the possible interruption of idle() | |
1463 | */ | |
5acac1be | 1464 | void tick_irq_enter(void) |
719254fa | 1465 | { |
e8fcaa5c | 1466 | tick_check_oneshot_broadcast_this_cpu(); |
5acac1be | 1467 | tick_nohz_irq_enter(); |
719254fa TG |
1468 | } |
1469 | ||
79bf2bb3 TG |
1470 | /* |
1471 | * High resolution timer specific code | |
1472 | */ | |
1473 | #ifdef CONFIG_HIGH_RES_TIMERS | |
1474 | /* | |
4c9dc641 | 1475 | * We rearm the timer until we get disabled by the idle code. |
351f181f | 1476 | * Called with interrupts disabled. |
79bf2bb3 TG |
1477 | */ |
1478 | static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) | |
1479 | { | |
1480 | struct tick_sched *ts = | |
1481 | container_of(timer, struct tick_sched, sched_timer); | |
79bf2bb3 TG |
1482 | struct pt_regs *regs = get_irq_regs(); |
1483 | ktime_t now = ktime_get(); | |
d3ed7824 | 1484 | |
ff7de620 | 1485 | tick_sched_do_timer(ts, now); |
79bf2bb3 TG |
1486 | |
1487 | /* | |
1488 | * Do not call, when we are not in irq context and have | |
1489 | * no valid regs pointer | |
1490 | */ | |
9e8f559b FW |
1491 | if (regs) |
1492 | tick_sched_handle(ts, regs); | |
7c259045 FW |
1493 | else |
1494 | ts->next_tick = 0; | |
79bf2bb3 | 1495 | |
2a16fc93 VK |
1496 | /* No need to reprogram if we are in idle or full dynticks mode */ |
1497 | if (unlikely(ts->tick_stopped)) | |
1498 | return HRTIMER_NORESTART; | |
1499 | ||
b9965449 | 1500 | hrtimer_forward(timer, now, TICK_NSEC); |
79bf2bb3 TG |
1501 | |
1502 | return HRTIMER_RESTART; | |
1503 | } | |
1504 | ||
5307c955 MG |
1505 | static int sched_skew_tick; |
1506 | ||
62cf20b3 TG |
1507 | static int __init skew_tick(char *str) |
1508 | { | |
1509 | get_option(&str, &sched_skew_tick); | |
1510 | ||
1511 | return 0; | |
1512 | } | |
1513 | early_param("skew_tick", skew_tick); | |
1514 | ||
79bf2bb3 TG |
1515 | /** |
1516 | * tick_setup_sched_timer - setup the tick emulation timer | |
1517 | */ | |
1518 | void tick_setup_sched_timer(void) | |
1519 | { | |
22127e93 | 1520 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
79bf2bb3 TG |
1521 | ktime_t now = ktime_get(); |
1522 | ||
1523 | /* | |
1524 | * Emulate tick processing via per-CPU hrtimers: | |
1525 | */ | |
902a9f9c | 1526 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); |
79bf2bb3 | 1527 | ts->sched_timer.function = tick_sched_timer; |
79bf2bb3 | 1528 | |
0de7611a | 1529 | /* Get the next period (per-CPU) */ |
cc584b21 | 1530 | hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); |
79bf2bb3 | 1531 | |
9c3f9e28 | 1532 | /* Offset the tick to avert jiffies_lock contention. */ |
5307c955 | 1533 | if (sched_skew_tick) { |
b9965449 | 1534 | u64 offset = TICK_NSEC >> 1; |
5307c955 MG |
1535 | do_div(offset, num_possible_cpus()); |
1536 | offset *= smp_processor_id(); | |
1537 | hrtimer_add_expires_ns(&ts->sched_timer, offset); | |
1538 | } | |
1539 | ||
b9965449 | 1540 | hrtimer_forward(&ts->sched_timer, now, TICK_NSEC); |
902a9f9c | 1541 | hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD); |
bc7a34b8 | 1542 | tick_nohz_activate(ts, NOHZ_MODE_HIGHRES); |
79bf2bb3 | 1543 | } |
3c4fbe5e | 1544 | #endif /* HIGH_RES_TIMERS */ |
79bf2bb3 | 1545 | |
3451d024 | 1546 | #if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS |
79bf2bb3 TG |
1547 | void tick_cancel_sched_timer(int cpu) |
1548 | { | |
1549 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | |
1550 | ||
3c4fbe5e | 1551 | # ifdef CONFIG_HIGH_RES_TIMERS |
79bf2bb3 TG |
1552 | if (ts->sched_timer.base) |
1553 | hrtimer_cancel(&ts->sched_timer); | |
3c4fbe5e | 1554 | # endif |
a7901766 | 1555 | |
4b0c0f29 | 1556 | memset(ts, 0, sizeof(*ts)); |
79bf2bb3 | 1557 | } |
3c4fbe5e | 1558 | #endif |
79bf2bb3 | 1559 | |
9c95bc25 | 1560 | /* |
79bf2bb3 TG |
1561 | * Async notification about clocksource changes |
1562 | */ | |
1563 | void tick_clock_notify(void) | |
1564 | { | |
1565 | int cpu; | |
1566 | ||
1567 | for_each_possible_cpu(cpu) | |
1568 | set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks); | |
1569 | } | |
1570 | ||
1571 | /* | |
1572 | * Async notification about clock event changes | |
1573 | */ | |
1574 | void tick_oneshot_notify(void) | |
1575 | { | |
22127e93 | 1576 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
79bf2bb3 TG |
1577 | |
1578 | set_bit(0, &ts->check_clocks); | |
1579 | } | |
1580 | ||
9c95bc25 | 1581 | /* |
79bf2bb3 TG |
1582 | * Check, if a change happened, which makes oneshot possible. |
1583 | * | |
1584 | * Called cyclic from the hrtimer softirq (driven by the timer | |
1585 | * softirq) allow_nohz signals, that we can switch into low-res nohz | |
1586 | * mode, because high resolution timers are disabled (either compile | |
6b442bc8 | 1587 | * or runtime). Called with interrupts disabled. |
79bf2bb3 TG |
1588 | */ |
1589 | int tick_check_oneshot_change(int allow_nohz) | |
1590 | { | |
22127e93 | 1591 | struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched); |
79bf2bb3 TG |
1592 | |
1593 | if (!test_and_clear_bit(0, &ts->check_clocks)) | |
1594 | return 0; | |
1595 | ||
1596 | if (ts->nohz_mode != NOHZ_MODE_INACTIVE) | |
1597 | return 0; | |
1598 | ||
cf4fc6cb | 1599 | if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available()) |
79bf2bb3 TG |
1600 | return 0; |
1601 | ||
1602 | if (!allow_nohz) | |
1603 | return 1; | |
1604 | ||
1605 | tick_nohz_switch_to_nohz(); | |
1606 | return 0; | |
1607 | } |