Merge tag 'mm-nonmm-stable-2024-05-22-17-30' of git://git.kernel.org/pub/scm/linux...
[linux-block.git] / kernel / time / timer_migration.c
CommitLineData
7ee98877
AMB
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Infrastructure for migratable timers
4 *
5 * Copyright(C) 2022 linutronix GmbH
6 */
7#include <linux/cpuhotplug.h>
8#include <linux/slab.h>
9#include <linux/smp.h>
10#include <linux/spinlock.h>
11#include <linux/timerqueue.h>
12#include <trace/events/ipi.h>
13
14#include "timer_migration.h"
15#include "tick-internal.h"
16
36e40df3
AMB
17#define CREATE_TRACE_POINTS
18#include <trace/events/timer_migration.h>
19
7ee98877
AMB
20/*
21 * The timer migration mechanism is built on a hierarchy of groups. The
22 * lowest level group contains CPUs, the next level groups of CPU groups
23 * and so forth. The CPU groups are kept per node so for the normal case
24 * lock contention won't happen across nodes. Depending on the number of
25 * CPUs per node even the next level might be kept as groups of CPU groups
26 * per node and only the levels above cross the node topology.
27 *
28 * Example topology for a two node system with 24 CPUs each.
29 *
30 * LVL 2 [GRP2:0]
31 * GRP1:0 = GRP1:M
32 *
33 * LVL 1 [GRP1:0] [GRP1:1]
34 * GRP0:0 - GRP0:2 GRP0:3 - GRP0:5
35 *
36 * LVL 0 [GRP0:0] [GRP0:1] [GRP0:2] [GRP0:3] [GRP0:4] [GRP0:5]
37 * CPUS 0-7 8-15 16-23 24-31 32-39 40-47
38 *
39 * The groups hold a timer queue of events sorted by expiry time. These
40 * queues are updated when CPUs go in idle. When they come out of idle
41 * ignore flag of events is set.
42 *
43 * Each group has a designated migrator CPU/group as long as a CPU/group is
44 * active in the group. This designated role is necessary to avoid that all
45 * active CPUs in a group try to migrate expired timers from other CPUs,
46 * which would result in massive lock bouncing.
47 *
48 * When a CPU is awake, it checks in it's own timer tick the group
49 * hierarchy up to the point where it is assigned the migrator role or if
50 * no CPU is active, it also checks the groups where no migrator is set
51 * (TMIGR_NONE).
52 *
53 * If it finds expired timers in one of the group queues it pulls them over
54 * from the idle CPU and runs the timer function. After that it updates the
55 * group and the parent groups if required.
56 *
57 * CPUs which go idle arm their CPU local timer hardware for the next local
58 * (pinned) timer event. If the next migratable timer expires after the
59 * next local timer or the CPU has no migratable timer pending then the
60 * CPU does not queue an event in the LVL0 group. If the next migratable
61 * timer expires before the next local timer then the CPU queues that timer
62 * in the LVL0 group. In both cases the CPU marks itself idle in the LVL0
63 * group.
64 *
65 * When CPU comes out of idle and when a group has at least a single active
66 * child, the ignore flag of the tmigr_event is set. This indicates, that
67 * the event is ignored even if it is still enqueued in the parent groups
68 * timer queue. It will be removed when touching the timer queue the next
69 * time. This spares locking in active path as the lock protects (after
70 * setup) only event information. For more information about locking,
71 * please read the section "Locking rules".
72 *
73 * If the CPU is the migrator of the group then it delegates that role to
74 * the next active CPU in the group or sets migrator to TMIGR_NONE when
75 * there is no active CPU in the group. This delegation needs to be
76 * propagated up the hierarchy so hand over from other leaves can happen at
77 * all hierarchy levels w/o doing a search.
78 *
79 * When the last CPU in the system goes idle, then it drops all migrator
80 * duties up to the top level of the hierarchy (LVL2 in the example). It
81 * then has to make sure, that it arms it's own local hardware timer for
82 * the earliest event in the system.
83 *
84 *
85 * Lifetime rules:
86 * ---------------
87 *
88 * The groups are built up at init time or when CPUs come online. They are
89 * not destroyed when a group becomes empty due to offlining. The group
90 * just won't participate in the hierarchy management anymore. Destroying
91 * groups would result in interesting race conditions which would just make
92 * the whole mechanism slow and complex.
93 *
94 *
95 * Locking rules:
96 * --------------
97 *
98 * For setting up new groups and handling events it's required to lock both
99 * child and parent group. The lock ordering is always bottom up. This also
100 * includes the per CPU locks in struct tmigr_cpu. For updating the migrator and
101 * active CPU/group information atomic_try_cmpxchg() is used instead and only
102 * the per CPU tmigr_cpu->lock is held.
103 *
104 * During the setup of groups tmigr_level_list is required. It is protected by
105 * @tmigr_mutex.
106 *
107 * When @timer_base->lock as well as tmigr related locks are required, the lock
108 * ordering is: first @timer_base->lock, afterwards tmigr related locks.
109 *
110 *
111 * Protection of the tmigr group state information:
112 * ------------------------------------------------
113 *
114 * The state information with the list of active children and migrator needs to
115 * be protected by a sequence counter. It prevents a race when updates in child
116 * groups are propagated in changed order. The state update is performed
117 * lockless and group wise. The following scenario describes what happens
118 * without updating the sequence counter:
119 *
120 * Therefore, let's take three groups and four CPUs (CPU2 and CPU3 as well
121 * as GRP0:1 will not change during the scenario):
122 *
123 * LVL 1 [GRP1:0]
124 * migrator = GRP0:1
125 * active = GRP0:0, GRP0:1
126 * / \
127 * LVL 0 [GRP0:0] [GRP0:1]
128 * migrator = CPU0 migrator = CPU2
129 * active = CPU0 active = CPU2
130 * / \ / \
131 * CPUs 0 1 2 3
132 * active idle active idle
133 *
134 *
135 * 1. CPU0 goes idle. As the update is performed group wise, in the first step
136 * only GRP0:0 is updated. The update of GRP1:0 is pending as CPU0 has to
137 * walk the hierarchy.
138 *
139 * LVL 1 [GRP1:0]
140 * migrator = GRP0:1
141 * active = GRP0:0, GRP0:1
142 * / \
143 * LVL 0 [GRP0:0] [GRP0:1]
144 * --> migrator = TMIGR_NONE migrator = CPU2
145 * --> active = active = CPU2
146 * / \ / \
147 * CPUs 0 1 2 3
148 * --> idle idle active idle
149 *
150 * 2. While CPU0 goes idle and continues to update the state, CPU1 comes out of
151 * idle. CPU1 updates GRP0:0. The update for GRP1:0 is pending as CPU1 also
152 * has to walk the hierarchy. Both CPUs (CPU0 and CPU1) now walk the
153 * hierarchy to perform the needed update from their point of view. The
154 * currently visible state looks the following:
155 *
156 * LVL 1 [GRP1:0]
157 * migrator = GRP0:1
158 * active = GRP0:0, GRP0:1
159 * / \
160 * LVL 0 [GRP0:0] [GRP0:1]
161 * --> migrator = CPU1 migrator = CPU2
162 * --> active = CPU1 active = CPU2
163 * / \ / \
164 * CPUs 0 1 2 3
165 * idle --> active active idle
166 *
167 * 3. Here is the race condition: CPU1 managed to propagate its changes (from
168 * step 2) through the hierarchy to GRP1:0 before CPU0 (step 1) did. The
169 * active members of GRP1:0 remain unchanged after the update since it is
170 * still valid from CPU1 current point of view:
171 *
172 * LVL 1 [GRP1:0]
173 * --> migrator = GRP0:1
174 * --> active = GRP0:0, GRP0:1
175 * / \
176 * LVL 0 [GRP0:0] [GRP0:1]
177 * migrator = CPU1 migrator = CPU2
178 * active = CPU1 active = CPU2
179 * / \ / \
180 * CPUs 0 1 2 3
181 * idle active active idle
182 *
183 * 4. Now CPU0 finally propagates its changes (from step 1) to GRP1:0.
184 *
185 * LVL 1 [GRP1:0]
186 * --> migrator = GRP0:1
187 * --> active = GRP0:1
188 * / \
189 * LVL 0 [GRP0:0] [GRP0:1]
190 * migrator = CPU1 migrator = CPU2
191 * active = CPU1 active = CPU2
192 * / \ / \
193 * CPUs 0 1 2 3
194 * idle active active idle
195 *
196 *
197 * The race of CPU0 vs. CPU1 led to an inconsistent state in GRP1:0. CPU1 is
198 * active and is correctly listed as active in GRP0:0. However GRP1:0 does not
199 * have GRP0:0 listed as active, which is wrong. The sequence counter has been
200 * added to avoid inconsistent states during updates. The state is updated
201 * atomically only if all members, including the sequence counter, match the
202 * expected value (compare-and-exchange).
203 *
204 * Looking back at the previous example with the addition of the sequence
205 * counter: The update as performed by CPU0 in step 4 will fail. CPU1 changed
206 * the sequence number during the update in step 3 so the expected old value (as
207 * seen by CPU0 before starting the walk) does not match.
208 *
209 * Prevent race between new event and last CPU going inactive
210 * ----------------------------------------------------------
211 *
212 * When the last CPU is going idle and there is a concurrent update of a new
213 * first global timer of an idle CPU, the group and child states have to be read
214 * while holding the lock in tmigr_update_events(). The following scenario shows
215 * what happens, when this is not done.
216 *
217 * 1. Only CPU2 is active:
218 *
219 * LVL 1 [GRP1:0]
220 * migrator = GRP0:1
221 * active = GRP0:1
222 * next_expiry = KTIME_MAX
223 * / \
224 * LVL 0 [GRP0:0] [GRP0:1]
225 * migrator = TMIGR_NONE migrator = CPU2
226 * active = active = CPU2
227 * next_expiry = KTIME_MAX next_expiry = KTIME_MAX
228 * / \ / \
229 * CPUs 0 1 2 3
230 * idle idle active idle
231 *
232 * 2. Now CPU 2 goes idle (and has no global timer, that has to be handled) and
233 * propagates that to GRP0:1:
234 *
235 * LVL 1 [GRP1:0]
236 * migrator = GRP0:1
237 * active = GRP0:1
238 * next_expiry = KTIME_MAX
239 * / \
240 * LVL 0 [GRP0:0] [GRP0:1]
241 * migrator = TMIGR_NONE --> migrator = TMIGR_NONE
242 * active = --> active =
243 * next_expiry = KTIME_MAX next_expiry = KTIME_MAX
244 * / \ / \
245 * CPUs 0 1 2 3
246 * idle idle --> idle idle
247 *
248 * 3. Now the idle state is propagated up to GRP1:0. As this is now the last
249 * child going idle in top level group, the expiry of the next group event
250 * has to be handed back to make sure no event is lost. As there is no event
251 * enqueued, KTIME_MAX is handed back to CPU2.
252 *
253 * LVL 1 [GRP1:0]
254 * --> migrator = TMIGR_NONE
255 * --> active =
256 * next_expiry = KTIME_MAX
257 * / \
258 * LVL 0 [GRP0:0] [GRP0:1]
259 * migrator = TMIGR_NONE migrator = TMIGR_NONE
260 * active = active =
261 * next_expiry = KTIME_MAX next_expiry = KTIME_MAX
262 * / \ / \
263 * CPUs 0 1 2 3
264 * idle idle --> idle idle
265 *
266 * 4. CPU 0 has a new timer queued from idle and it expires at TIMER0. CPU0
267 * propagates that to GRP0:0:
268 *
269 * LVL 1 [GRP1:0]
270 * migrator = TMIGR_NONE
271 * active =
272 * next_expiry = KTIME_MAX
273 * / \
274 * LVL 0 [GRP0:0] [GRP0:1]
275 * migrator = TMIGR_NONE migrator = TMIGR_NONE
276 * active = active =
277 * --> next_expiry = TIMER0 next_expiry = KTIME_MAX
278 * / \ / \
279 * CPUs 0 1 2 3
280 * idle idle idle idle
281 *
282 * 5. GRP0:0 is not active, so the new timer has to be propagated to
283 * GRP1:0. Therefore the GRP1:0 state has to be read. When the stalled value
284 * (from step 2) is read, the timer is enqueued into GRP1:0, but nothing is
285 * handed back to CPU0, as it seems that there is still an active child in
286 * top level group.
287 *
288 * LVL 1 [GRP1:0]
289 * migrator = TMIGR_NONE
290 * active =
291 * --> next_expiry = TIMER0
292 * / \
293 * LVL 0 [GRP0:0] [GRP0:1]
294 * migrator = TMIGR_NONE migrator = TMIGR_NONE
295 * active = active =
296 * next_expiry = TIMER0 next_expiry = KTIME_MAX
297 * / \ / \
298 * CPUs 0 1 2 3
299 * idle idle idle idle
300 *
301 * This is prevented by reading the state when holding the lock (when a new
302 * timer has to be propagated from idle path)::
303 *
304 * CPU2 (tmigr_inactive_up()) CPU0 (tmigr_new_timer_up())
305 * -------------------------- ---------------------------
306 * // step 3:
307 * cmpxchg(&GRP1:0->state);
308 * tmigr_update_events() {
309 * spin_lock(&GRP1:0->lock);
310 * // ... update events ...
311 * // hand back first expiry when GRP1:0 is idle
312 * spin_unlock(&GRP1:0->lock);
313 * // ^^^ release state modification
314 * }
315 * tmigr_update_events() {
316 * spin_lock(&GRP1:0->lock)
317 * // ^^^ acquire state modification
318 * group_state = atomic_read(&GRP1:0->state)
319 * // .... update events ...
320 * // hand back first expiry when GRP1:0 is idle
321 * spin_unlock(&GRP1:0->lock) <3>
322 * // ^^^ makes state visible for other
323 * // callers of tmigr_new_timer_up()
324 * }
325 *
326 * When CPU0 grabs the lock directly after cmpxchg, the first timer is reported
327 * back to CPU0 and also later on to CPU2. So no timer is missed. A concurrent
328 * update of the group state from active path is no problem, as the upcoming CPU
329 * will take care of the group events.
330 *
331 * Required event and timerqueue update after a remote expiry:
332 * -----------------------------------------------------------
333 *
334 * After expiring timers of a remote CPU, a walk through the hierarchy and
335 * update of events and timerqueues is required. It is obviously needed if there
336 * is a 'new' global timer but also if there is no new global timer but the
337 * remote CPU is still idle.
338 *
339 * 1. CPU0 and CPU1 are idle and have both a global timer expiring at the same
340 * time. So both have an event enqueued in the timerqueue of GRP0:0. CPU3 is
341 * also idle and has no global timer pending. CPU2 is the only active CPU and
342 * thus also the migrator:
343 *
344 * LVL 1 [GRP1:0]
345 * migrator = GRP0:1
346 * active = GRP0:1
347 * --> timerqueue = evt-GRP0:0
348 * / \
349 * LVL 0 [GRP0:0] [GRP0:1]
350 * migrator = TMIGR_NONE migrator = CPU2
351 * active = active = CPU2
352 * groupevt.ignore = false groupevt.ignore = true
353 * groupevt.cpu = CPU0 groupevt.cpu =
354 * timerqueue = evt-CPU0, timerqueue =
355 * evt-CPU1
356 * / \ / \
357 * CPUs 0 1 2 3
358 * idle idle active idle
359 *
360 * 2. CPU2 starts to expire remote timers. It starts with LVL0 group
361 * GRP0:1. There is no event queued in the timerqueue, so CPU2 continues with
362 * the parent of GRP0:1: GRP1:0. In GRP1:0 it dequeues the first event. It
363 * looks at tmigr_event::cpu struct member and expires the pending timer(s)
364 * of CPU0.
365 *
366 * LVL 1 [GRP1:0]
367 * migrator = GRP0:1
368 * active = GRP0:1
369 * --> timerqueue =
370 * / \
371 * LVL 0 [GRP0:0] [GRP0:1]
372 * migrator = TMIGR_NONE migrator = CPU2
373 * active = active = CPU2
374 * groupevt.ignore = false groupevt.ignore = true
375 * --> groupevt.cpu = CPU0 groupevt.cpu =
376 * timerqueue = evt-CPU0, timerqueue =
377 * evt-CPU1
378 * / \ / \
379 * CPUs 0 1 2 3
380 * idle idle active idle
381 *
382 * 3. Some work has to be done after expiring the timers of CPU0. If we stop
383 * here, then CPU1's pending global timer(s) will not expire in time and the
384 * timerqueue of GRP0:0 has still an event for CPU0 enqueued which has just
385 * been processed. So it is required to walk the hierarchy from CPU0's point
386 * of view and update it accordingly. CPU0's event will be removed from the
387 * timerqueue because it has no pending timer. If CPU0 would have a timer
388 * pending then it has to expire after CPU1's first timer because all timers
389 * from this period were just expired. Either way CPU1's event will be first
390 * in GRP0:0's timerqueue and therefore set in the CPU field of the group
391 * event which is then enqueued in GRP1:0's timerqueue as GRP0:0 is still not
392 * active:
393 *
394 * LVL 1 [GRP1:0]
395 * migrator = GRP0:1
396 * active = GRP0:1
397 * --> timerqueue = evt-GRP0:0
398 * / \
399 * LVL 0 [GRP0:0] [GRP0:1]
400 * migrator = TMIGR_NONE migrator = CPU2
401 * active = active = CPU2
402 * groupevt.ignore = false groupevt.ignore = true
403 * --> groupevt.cpu = CPU1 groupevt.cpu =
404 * --> timerqueue = evt-CPU1 timerqueue =
405 * / \ / \
406 * CPUs 0 1 2 3
407 * idle idle active idle
408 *
409 * Now CPU2 (migrator) will continue step 2 at GRP1:0 and will expire the
410 * timer(s) of CPU1.
411 *
412 * The hierarchy walk in step 3 can be skipped if the migrator notices that a
413 * CPU of GRP0:0 is active again. The CPU will mark GRP0:0 active and take care
414 * of the group as migrator and any needed updates within the hierarchy.
415 */
416
417static DEFINE_MUTEX(tmigr_mutex);
418static struct list_head *tmigr_level_list __read_mostly;
419
420static unsigned int tmigr_hierarchy_levels __read_mostly;
421static unsigned int tmigr_crossnode_level __read_mostly;
422
423static DEFINE_PER_CPU(struct tmigr_cpu, tmigr_cpu);
424
425#define TMIGR_NONE 0xFF
426#define BIT_CNT 8
427
428static inline bool tmigr_is_not_available(struct tmigr_cpu *tmc)
429{
430 return !(tmc->tmgroup && tmc->online);
431}
432
433/*
434 * Returns true, when @childmask corresponds to the group migrator or when the
435 * group is not active - so no migrator is set.
436 */
437static bool tmigr_check_migrator(struct tmigr_group *group, u8 childmask)
438{
439 union tmigr_state s;
440
441 s.state = atomic_read(&group->migr_state);
442
443 if ((s.migrator == childmask) || (s.migrator == TMIGR_NONE))
444 return true;
445
446 return false;
447}
448
449static bool tmigr_check_migrator_and_lonely(struct tmigr_group *group, u8 childmask)
450{
451 bool lonely, migrator = false;
452 unsigned long active;
453 union tmigr_state s;
454
455 s.state = atomic_read(&group->migr_state);
456
457 if ((s.migrator == childmask) || (s.migrator == TMIGR_NONE))
458 migrator = true;
459
460 active = s.active;
461 lonely = bitmap_weight(&active, BIT_CNT) <= 1;
462
463 return (migrator && lonely);
464}
465
466static bool tmigr_check_lonely(struct tmigr_group *group)
467{
468 unsigned long active;
469 union tmigr_state s;
470
471 s.state = atomic_read(&group->migr_state);
472
473 active = s.active;
474
475 return bitmap_weight(&active, BIT_CNT) <= 1;
476}
477
478typedef bool (*up_f)(struct tmigr_group *, struct tmigr_group *, void *);
479
480static void __walk_groups(up_f up, void *data,
481 struct tmigr_cpu *tmc)
482{
483 struct tmigr_group *child = NULL, *group = tmc->tmgroup;
484
485 do {
486 WARN_ON_ONCE(group->level >= tmigr_hierarchy_levels);
487
488 if (up(group, child, data))
489 break;
490
491 child = group;
492 group = group->parent;
493 } while (group);
494}
495
496static void walk_groups(up_f up, void *data, struct tmigr_cpu *tmc)
497{
498 lockdep_assert_held(&tmc->lock);
499
500 __walk_groups(up, data, tmc);
501}
502
503/**
504 * struct tmigr_walk - data required for walking the hierarchy
505 * @nextexp: Next CPU event expiry information which is handed into
506 * the timer migration code by the timer code
507 * (get_next_timer_interrupt())
508 * @firstexp: Contains the first event expiry information when last
509 * active CPU of hierarchy is on the way to idle to make
510 * sure CPU will be back in time.
511 * @evt: Pointer to tmigr_event which needs to be queued (of idle
512 * child group)
513 * @childmask: childmask of child group
514 * @remote: Is set, when the new timer path is executed in
515 * tmigr_handle_remote_cpu()
516 */
517struct tmigr_walk {
518 u64 nextexp;
519 u64 firstexp;
520 struct tmigr_event *evt;
521 u8 childmask;
522 bool remote;
523};
524
525/**
526 * struct tmigr_remote_data - data required for remote expiry hierarchy walk
527 * @basej: timer base in jiffies
528 * @now: timer base monotonic
529 * @firstexp: returns expiry of the first timer in the idle timer
530 * migration hierarchy to make sure the timer is handled in
531 * time; it is stored in the per CPU tmigr_cpu struct of
532 * CPU which expires remote timers
533 * @childmask: childmask of child group
534 * @check: is set if there is the need to handle remote timers;
535 * required in tmigr_requires_handle_remote() only
536 * @tmc_active: this flag indicates, whether the CPU which triggers
537 * the hierarchy walk is !idle in the timer migration
538 * hierarchy. When the CPU is idle and the whole hierarchy is
539 * idle, only the first event of the top level has to be
540 * considered.
541 */
542struct tmigr_remote_data {
543 unsigned long basej;
544 u64 now;
545 u64 firstexp;
546 u8 childmask;
547 bool check;
548 bool tmc_active;
549};
550
551/*
552 * Returns the next event of the timerqueue @group->events
553 *
554 * Removes timers with ignore flag and update next_expiry of the group. Values
555 * of the group event are updated in tmigr_update_events() only.
556 */
557static struct tmigr_event *tmigr_next_groupevt(struct tmigr_group *group)
558{
559 struct timerqueue_node *node = NULL;
560 struct tmigr_event *evt = NULL;
561
562 lockdep_assert_held(&group->lock);
563
564 WRITE_ONCE(group->next_expiry, KTIME_MAX);
565
566 while ((node = timerqueue_getnext(&group->events))) {
567 evt = container_of(node, struct tmigr_event, nextevt);
568
569 if (!evt->ignore) {
570 WRITE_ONCE(group->next_expiry, evt->nextevt.expires);
571 return evt;
572 }
573
574 /*
575 * Remove next timers with ignore flag, because the group lock
576 * is held anyway
577 */
578 if (!timerqueue_del(&group->events, node))
579 break;
580 }
581
582 return NULL;
583}
584
585/*
586 * Return the next event (with the expiry equal or before @now)
587 *
588 * Event, which is returned, is also removed from the queue.
589 */
590static struct tmigr_event *tmigr_next_expired_groupevt(struct tmigr_group *group,
591 u64 now)
592{
593 struct tmigr_event *evt = tmigr_next_groupevt(group);
594
595 if (!evt || now < evt->nextevt.expires)
596 return NULL;
597
598 /*
599 * The event is ready to expire. Remove it and update next group event.
600 */
601 timerqueue_del(&group->events, &evt->nextevt);
602 tmigr_next_groupevt(group);
603
604 return evt;
605}
606
607static u64 tmigr_next_groupevt_expires(struct tmigr_group *group)
608{
609 struct tmigr_event *evt;
610
611 evt = tmigr_next_groupevt(group);
612
613 if (!evt)
614 return KTIME_MAX;
615 else
616 return evt->nextevt.expires;
617}
618
619static bool tmigr_active_up(struct tmigr_group *group,
620 struct tmigr_group *child,
621 void *ptr)
622{
623 union tmigr_state curstate, newstate;
624 struct tmigr_walk *data = ptr;
625 bool walk_done;
626 u8 childmask;
627
628 childmask = data->childmask;
629 /*
630 * No memory barrier is required here in contrast to
631 * tmigr_inactive_up(), as the group state change does not depend on the
632 * child state.
633 */
634 curstate.state = atomic_read(&group->migr_state);
635
636 do {
637 newstate = curstate;
638 walk_done = true;
639
640 if (newstate.migrator == TMIGR_NONE) {
641 newstate.migrator = childmask;
642
643 /* Changes need to be propagated */
644 walk_done = false;
645 }
646
647 newstate.active |= childmask;
648 newstate.seq++;
649
650 } while (!atomic_try_cmpxchg(&group->migr_state, &curstate.state, newstate.state));
651
652 if ((walk_done == false) && group->parent)
653 data->childmask = group->childmask;
654
655 /*
656 * The group is active (again). The group event might be still queued
657 * into the parent group's timerqueue but can now be handled by the
658 * migrator of this group. Therefore the ignore flag for the group event
659 * is updated to reflect this.
660 *
661 * The update of the ignore flag in the active path is done lockless. In
662 * worst case the migrator of the parent group observes the change too
663 * late and expires remotely all events belonging to this group. The
664 * lock is held while updating the ignore flag in idle path. So this
665 * state change will not be lost.
666 */
667 group->groupevt.ignore = true;
668
36e40df3
AMB
669 trace_tmigr_group_set_cpu_active(group, newstate, childmask);
670
7ee98877
AMB
671 return walk_done;
672}
673
674static void __tmigr_cpu_activate(struct tmigr_cpu *tmc)
675{
676 struct tmigr_walk data;
677
678 data.childmask = tmc->childmask;
679
36e40df3
AMB
680 trace_tmigr_cpu_active(tmc);
681
7ee98877
AMB
682 tmc->cpuevt.ignore = true;
683 WRITE_ONCE(tmc->wakeup, KTIME_MAX);
684
685 walk_groups(&tmigr_active_up, &data, tmc);
686}
687
688/**
689 * tmigr_cpu_activate() - set this CPU active in timer migration hierarchy
690 *
691 * Call site timer_clear_idle() is called with interrupts disabled.
692 */
693void tmigr_cpu_activate(void)
694{
695 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
696
697 if (tmigr_is_not_available(tmc))
698 return;
699
700 if (WARN_ON_ONCE(!tmc->idle))
701 return;
702
703 raw_spin_lock(&tmc->lock);
704 tmc->idle = false;
705 __tmigr_cpu_activate(tmc);
706 raw_spin_unlock(&tmc->lock);
707}
708
709/*
710 * Returns true, if there is nothing to be propagated to the next level
711 *
712 * @data->firstexp is set to expiry of first gobal event of the (top level of
713 * the) hierarchy, but only when hierarchy is completely idle.
714 *
715 * The child and group states need to be read under the lock, to prevent a race
716 * against a concurrent tmigr_inactive_up() run when the last CPU goes idle. See
717 * also section "Prevent race between new event and last CPU going inactive" in
718 * the documentation at the top.
719 *
720 * This is the only place where the group event expiry value is set.
721 */
722static
723bool tmigr_update_events(struct tmigr_group *group, struct tmigr_group *child,
724 struct tmigr_walk *data)
725{
726 struct tmigr_event *evt, *first_childevt;
727 union tmigr_state childstate, groupstate;
728 bool remote = data->remote;
729 bool walk_done = false;
730 u64 nextexp;
731
732 if (child) {
733 raw_spin_lock(&child->lock);
734 raw_spin_lock_nested(&group->lock, SINGLE_DEPTH_NESTING);
735
736 childstate.state = atomic_read(&child->migr_state);
737 groupstate.state = atomic_read(&group->migr_state);
738
739 if (childstate.active) {
740 walk_done = true;
741 goto unlock;
742 }
743
744 first_childevt = tmigr_next_groupevt(child);
745 nextexp = child->next_expiry;
746 evt = &child->groupevt;
747
748 evt->ignore = (nextexp == KTIME_MAX) ? true : false;
749 } else {
750 nextexp = data->nextexp;
751
752 first_childevt = evt = data->evt;
753
7a96a84b
AMB
754 /*
755 * Walking the hierarchy is required in any case when a
756 * remote expiry was done before. This ensures to not lose
757 * already queued events in non active groups (see section
758 * "Required event and timerqueue update after a remote
759 * expiry" in the documentation at the top).
760 *
761 * The two call sites which are executed without a remote expiry
762 * before, are not prevented from propagating changes through
763 * the hierarchy by the return:
764 * - When entering this path by tmigr_new_timer(), @evt->ignore
765 * is never set.
766 * - tmigr_inactive_up() takes care of the propagation by
767 * itself and ignores the return value. But an immediate
768 * return is possible if there is a parent, sparing group
769 * locking at this level, because the upper walking call to
770 * the parent will take care about removing this event from
771 * within the group and update next_expiry accordingly.
772 *
773 * However if there is no parent, ie: the hierarchy has only a
774 * single level so @group is the top level group, make sure the
775 * first event information of the group is updated properly and
776 * also handled properly, so skip this fast return path.
777 */
778 if (evt->ignore && !remote && group->parent)
779 return true;
780
7ee98877
AMB
781 raw_spin_lock(&group->lock);
782
783 childstate.state = 0;
784 groupstate.state = atomic_read(&group->migr_state);
785 }
786
787 /*
788 * If the child event is already queued in the group, remove it from the
789 * queue when the expiry time changed only or when it could be ignored.
790 */
791 if (timerqueue_node_queued(&evt->nextevt)) {
61f7fdf8
FW
792 if ((evt->nextevt.expires == nextexp) && !evt->ignore) {
793 /* Make sure not to miss a new CPU event with the same expiry */
794 evt->cpu = first_childevt->cpu;
7ee98877 795 goto check_toplvl;
61f7fdf8 796 }
7ee98877
AMB
797
798 if (!timerqueue_del(&group->events, &evt->nextevt))
799 WRITE_ONCE(group->next_expiry, KTIME_MAX);
800 }
801
802 if (evt->ignore) {
803 /*
804 * When the next child event could be ignored (nextexp is
805 * KTIME_MAX) and there was no remote timer handling before or
806 * the group is already active, there is no need to walk the
807 * hierarchy even if there is a parent group.
808 *
809 * The other way round: even if the event could be ignored, but
810 * if a remote timer handling was executed before and the group
811 * is not active, walking the hierarchy is required to not miss
812 * an enqueued timer in the non active group. The enqueued timer
813 * of the group needs to be propagated to a higher level to
814 * ensure it is handled.
815 */
816 if (!remote || groupstate.active)
817 walk_done = true;
818 } else {
819 evt->nextevt.expires = nextexp;
820 evt->cpu = first_childevt->cpu;
821
822 if (timerqueue_add(&group->events, &evt->nextevt))
823 WRITE_ONCE(group->next_expiry, nextexp);
824 }
825
826check_toplvl:
827 if (!group->parent && (groupstate.migrator == TMIGR_NONE)) {
828 walk_done = true;
829
830 /*
831 * Nothing to do when update was done during remote timer
832 * handling. First timer in top level group which needs to be
833 * handled when top level group is not active, is calculated
834 * directly in tmigr_handle_remote_up().
835 */
836 if (remote)
837 goto unlock;
838
839 /*
840 * The top level group is idle and it has to be ensured the
841 * global timers are handled in time. (This could be optimized
842 * by keeping track of the last global scheduled event and only
843 * arming it on the CPU if the new event is earlier. Not sure if
844 * its worth the complexity.)
845 */
846 data->firstexp = tmigr_next_groupevt_expires(group);
847 }
848
36e40df3
AMB
849 trace_tmigr_update_events(child, group, childstate, groupstate,
850 nextexp);
851
7ee98877
AMB
852unlock:
853 raw_spin_unlock(&group->lock);
854
855 if (child)
856 raw_spin_unlock(&child->lock);
857
858 return walk_done;
859}
860
861static bool tmigr_new_timer_up(struct tmigr_group *group,
862 struct tmigr_group *child,
863 void *ptr)
864{
865 struct tmigr_walk *data = ptr;
866
867 return tmigr_update_events(group, child, data);
868}
869
870/*
871 * Returns the expiry of the next timer that needs to be handled. KTIME_MAX is
872 * returned, if an active CPU will handle all the timer migration hierarchy
873 * timers.
874 */
875static u64 tmigr_new_timer(struct tmigr_cpu *tmc, u64 nextexp)
876{
877 struct tmigr_walk data = { .nextexp = nextexp,
878 .firstexp = KTIME_MAX,
879 .evt = &tmc->cpuevt };
880
881 lockdep_assert_held(&tmc->lock);
882
883 if (tmc->remote)
884 return KTIME_MAX;
885
36e40df3
AMB
886 trace_tmigr_cpu_new_timer(tmc);
887
7ee98877
AMB
888 tmc->cpuevt.ignore = false;
889 data.remote = false;
890
891 walk_groups(&tmigr_new_timer_up, &data, tmc);
892
893 /* If there is a new first global event, make sure it is handled */
894 return data.firstexp;
895}
896
897static void tmigr_handle_remote_cpu(unsigned int cpu, u64 now,
898 unsigned long jif)
899{
900 struct timer_events tevt;
901 struct tmigr_walk data;
902 struct tmigr_cpu *tmc;
903
904 tmc = per_cpu_ptr(&tmigr_cpu, cpu);
905
906 raw_spin_lock_irq(&tmc->lock);
907
908 /*
909 * If the remote CPU is offline then the timers have been migrated to
910 * another CPU.
911 *
912 * If tmigr_cpu::remote is set, at the moment another CPU already
913 * expires the timers of the remote CPU.
914 *
915 * If tmigr_event::ignore is set, then the CPU returns from idle and
916 * takes care of its timers.
917 *
918 * If the next event expires in the future, then the event has been
919 * updated and there are no timers to expire right now. The CPU which
920 * updated the event takes care when hierarchy is completely
921 * idle. Otherwise the migrator does it as the event is enqueued.
922 */
923 if (!tmc->online || tmc->remote || tmc->cpuevt.ignore ||
924 now < tmc->cpuevt.nextevt.expires) {
925 raw_spin_unlock_irq(&tmc->lock);
926 return;
927 }
928
36e40df3
AMB
929 trace_tmigr_handle_remote_cpu(tmc);
930
7ee98877
AMB
931 tmc->remote = true;
932 WRITE_ONCE(tmc->wakeup, KTIME_MAX);
933
934 /* Drop the lock to allow the remote CPU to exit idle */
935 raw_spin_unlock_irq(&tmc->lock);
936
937 if (cpu != smp_processor_id())
938 timer_expire_remote(cpu);
939
940 /*
941 * Lock ordering needs to be preserved - timer_base locks before tmigr
942 * related locks (see section "Locking rules" in the documentation at
943 * the top). During fetching the next timer interrupt, also tmc->lock
944 * needs to be held. Otherwise there is a possible race window against
945 * the CPU itself when it comes out of idle, updates the first timer in
946 * the hierarchy and goes back to idle.
947 *
948 * timer base locks are dropped as fast as possible: After checking
949 * whether the remote CPU went offline in the meantime and after
950 * fetching the next remote timer interrupt. Dropping the locks as fast
951 * as possible keeps the locking region small and prevents holding
952 * several (unnecessary) locks during walking the hierarchy for updating
953 * the timerqueue and group events.
954 */
955 local_irq_disable();
956 timer_lock_remote_bases(cpu);
957 raw_spin_lock(&tmc->lock);
958
959 /*
960 * When the CPU went offline in the meantime, no hierarchy walk has to
961 * be done for updating the queued events, because the walk was
962 * already done during marking the CPU offline in the hierarchy.
963 *
964 * When the CPU is no longer idle, the CPU takes care of the timers and
965 * also of the timers in the hierarchy.
966 *
967 * (See also section "Required event and timerqueue update after a
968 * remote expiry" in the documentation at the top)
969 */
970 if (!tmc->online || !tmc->idle) {
971 timer_unlock_remote_bases(cpu);
972 goto unlock;
973 }
974
975 /* next event of CPU */
976 fetch_next_timer_interrupt_remote(jif, now, &tevt, cpu);
977 timer_unlock_remote_bases(cpu);
978
979 data.nextexp = tevt.global;
980 data.firstexp = KTIME_MAX;
981 data.evt = &tmc->cpuevt;
982 data.remote = true;
983
984 /*
985 * The update is done even when there is no 'new' global timer pending
986 * on the remote CPU (see section "Required event and timerqueue update
987 * after a remote expiry" in the documentation at the top)
988 */
989 walk_groups(&tmigr_new_timer_up, &data, tmc);
990
991unlock:
992 tmc->remote = false;
993 raw_spin_unlock_irq(&tmc->lock);
994}
995
996static bool tmigr_handle_remote_up(struct tmigr_group *group,
997 struct tmigr_group *child,
998 void *ptr)
999{
1000 struct tmigr_remote_data *data = ptr;
1001 struct tmigr_event *evt;
1002 unsigned long jif;
1003 u8 childmask;
1004 u64 now;
1005
1006 jif = data->basej;
1007 now = data->now;
1008
1009 childmask = data->childmask;
1010
36e40df3 1011 trace_tmigr_handle_remote(group);
7ee98877
AMB
1012again:
1013 /*
1014 * Handle the group only if @childmask is the migrator or if the
1015 * group has no migrator. Otherwise the group is active and is
1016 * handled by its own migrator.
1017 */
1018 if (!tmigr_check_migrator(group, childmask))
1019 return true;
1020
1021 raw_spin_lock_irq(&group->lock);
1022
1023 evt = tmigr_next_expired_groupevt(group, now);
1024
1025 if (evt) {
1026 unsigned int remote_cpu = evt->cpu;
1027
1028 raw_spin_unlock_irq(&group->lock);
1029
1030 tmigr_handle_remote_cpu(remote_cpu, now, jif);
1031
1032 /* check if there is another event, that needs to be handled */
1033 goto again;
1034 }
1035
1036 /*
1037 * Update of childmask for the next level and keep track of the expiry
1038 * of the first event that needs to be handled (group->next_expiry was
1039 * updated by tmigr_next_expired_groupevt(), next was set by
1040 * tmigr_handle_remote_cpu()).
1041 */
1042 data->childmask = group->childmask;
1043 data->firstexp = group->next_expiry;
1044
1045 raw_spin_unlock_irq(&group->lock);
1046
1047 return false;
1048}
1049
1050/**
1051 * tmigr_handle_remote() - Handle global timers of remote idle CPUs
1052 *
1053 * Called from the timer soft interrupt with interrupts enabled.
1054 */
1055void tmigr_handle_remote(void)
1056{
1057 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1058 struct tmigr_remote_data data;
1059
1060 if (tmigr_is_not_available(tmc))
1061 return;
1062
1063 data.childmask = tmc->childmask;
1064 data.firstexp = KTIME_MAX;
1065
1066 /*
1067 * NOTE: This is a doubled check because the migrator test will be done
1068 * in tmigr_handle_remote_up() anyway. Keep this check to speed up the
1069 * return when nothing has to be done.
1070 */
f55acb1e
FW
1071 if (!tmigr_check_migrator(tmc->tmgroup, tmc->childmask)) {
1072 /*
1073 * If this CPU was an idle migrator, make sure to clear its wakeup
1074 * value so it won't chase timers that have already expired elsewhere.
1075 * This avoids endless requeue from tmigr_new_timer().
1076 */
1077 if (READ_ONCE(tmc->wakeup) == KTIME_MAX)
1078 return;
1079 }
7ee98877
AMB
1080
1081 data.now = get_jiffies_update(&data.basej);
1082
1083 /*
1084 * Update @tmc->wakeup only at the end and do not reset @tmc->wakeup to
1085 * KTIME_MAX. Even if tmc->lock is not held during the whole remote
1086 * handling, tmc->wakeup is fine to be stale as it is called in
1087 * interrupt context and tick_nohz_next_event() is executed in interrupt
1088 * exit path only after processing the last pending interrupt.
1089 */
1090
1091 __walk_groups(&tmigr_handle_remote_up, &data, tmc);
1092
1093 raw_spin_lock_irq(&tmc->lock);
1094 WRITE_ONCE(tmc->wakeup, data.firstexp);
1095 raw_spin_unlock_irq(&tmc->lock);
1096}
1097
1098static bool tmigr_requires_handle_remote_up(struct tmigr_group *group,
1099 struct tmigr_group *child,
1100 void *ptr)
1101{
1102 struct tmigr_remote_data *data = ptr;
1103 u8 childmask;
1104
1105 childmask = data->childmask;
1106
1107 /*
1108 * Handle the group only if the child is the migrator or if the group
1109 * has no migrator. Otherwise the group is active and is handled by its
1110 * own migrator.
1111 */
1112 if (!tmigr_check_migrator(group, childmask))
1113 return true;
1114
1115 /*
1116 * When there is a parent group and the CPU which triggered the
1117 * hierarchy walk is not active, proceed the walk to reach the top level
1118 * group before reading the next_expiry value.
1119 */
1120 if (group->parent && !data->tmc_active)
1121 goto out;
1122
1123 /*
1124 * The lock is required on 32bit architectures to read the variable
1125 * consistently with a concurrent writer. On 64bit the lock is not
1126 * required because the read operation is not split and so it is always
1127 * consistent.
1128 */
1129 if (IS_ENABLED(CONFIG_64BIT)) {
1130 data->firstexp = READ_ONCE(group->next_expiry);
1131 if (data->now >= data->firstexp) {
1132 data->check = true;
1133 return true;
1134 }
1135 } else {
1136 raw_spin_lock(&group->lock);
1137 data->firstexp = group->next_expiry;
1138 if (data->now >= group->next_expiry) {
1139 data->check = true;
1140 raw_spin_unlock(&group->lock);
1141 return true;
1142 }
1143 raw_spin_unlock(&group->lock);
1144 }
1145
1146out:
1147 /* Update of childmask for the next level */
1148 data->childmask = group->childmask;
1149 return false;
1150}
1151
1152/**
1153 * tmigr_requires_handle_remote() - Check the need of remote timer handling
1154 *
1155 * Must be called with interrupts disabled.
1156 */
1157bool tmigr_requires_handle_remote(void)
1158{
1159 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1160 struct tmigr_remote_data data;
1161 unsigned long jif;
1162 bool ret = false;
1163
1164 if (tmigr_is_not_available(tmc))
1165 return ret;
1166
1167 data.now = get_jiffies_update(&jif);
1168 data.childmask = tmc->childmask;
1169 data.firstexp = KTIME_MAX;
1170 data.tmc_active = !tmc->idle;
1171 data.check = false;
1172
1173 /*
1174 * If the CPU is active, walk the hierarchy to check whether a remote
1175 * expiry is required.
1176 *
1177 * Check is done lockless as interrupts are disabled and @tmc->idle is
1178 * set only by the local CPU.
1179 */
1180 if (!tmc->idle) {
1181 __walk_groups(&tmigr_requires_handle_remote_up, &data, tmc);
1182
1183 return data.check;
1184 }
1185
1186 /*
1187 * When the CPU is idle, compare @tmc->wakeup with @data.now. The lock
1188 * is required on 32bit architectures to read the variable consistently
1189 * with a concurrent writer. On 64bit the lock is not required because
1190 * the read operation is not split and so it is always consistent.
1191 */
1192 if (IS_ENABLED(CONFIG_64BIT)) {
1193 if (data.now >= READ_ONCE(tmc->wakeup))
1194 return true;
1195 } else {
1196 raw_spin_lock(&tmc->lock);
1197 if (data.now >= tmc->wakeup)
1198 ret = true;
1199 raw_spin_unlock(&tmc->lock);
1200 }
1201
1202 return ret;
1203}
1204
1205/**
1206 * tmigr_cpu_new_timer() - enqueue next global timer into hierarchy (idle tmc)
1207 * @nextexp: Next expiry of global timer (or KTIME_MAX if not)
1208 *
1209 * The CPU is already deactivated in the timer migration
1210 * hierarchy. tick_nohz_get_sleep_length() calls tick_nohz_next_event()
1211 * and thereby the timer idle path is executed once more. @tmc->wakeup
1212 * holds the first timer, when the timer migration hierarchy is
1213 * completely idle.
1214 *
1215 * Returns the first timer that needs to be handled by this CPU or KTIME_MAX if
1216 * nothing needs to be done.
1217 */
1218u64 tmigr_cpu_new_timer(u64 nextexp)
1219{
1220 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1221 u64 ret;
1222
1223 if (tmigr_is_not_available(tmc))
1224 return nextexp;
1225
1226 raw_spin_lock(&tmc->lock);
1227
1228 ret = READ_ONCE(tmc->wakeup);
1229 if (nextexp != KTIME_MAX) {
1230 if (nextexp != tmc->cpuevt.nextevt.expires ||
1231 tmc->cpuevt.ignore) {
1232 ret = tmigr_new_timer(tmc, nextexp);
1233 }
1234 }
1235 /*
1236 * Make sure the reevaluation of timers in idle path will not miss an
1237 * event.
1238 */
1239 WRITE_ONCE(tmc->wakeup, ret);
1240
36e40df3 1241 trace_tmigr_cpu_new_timer_idle(tmc, nextexp);
7ee98877
AMB
1242 raw_spin_unlock(&tmc->lock);
1243 return ret;
1244}
1245
1246static bool tmigr_inactive_up(struct tmigr_group *group,
1247 struct tmigr_group *child,
1248 void *ptr)
1249{
1250 union tmigr_state curstate, newstate, childstate;
1251 struct tmigr_walk *data = ptr;
1252 bool walk_done;
1253 u8 childmask;
1254
1255 childmask = data->childmask;
1256 childstate.state = 0;
1257
1258 /*
1259 * The memory barrier is paired with the cmpxchg() in tmigr_active_up()
1260 * to make sure the updates of child and group states are ordered. The
1261 * ordering is mandatory, as the group state change depends on the child
1262 * state.
1263 */
1264 curstate.state = atomic_read_acquire(&group->migr_state);
1265
1266 for (;;) {
1267 if (child)
1268 childstate.state = atomic_read(&child->migr_state);
1269
1270 newstate = curstate;
1271 walk_done = true;
1272
1273 /* Reset active bit when the child is no longer active */
1274 if (!childstate.active)
1275 newstate.active &= ~childmask;
1276
1277 if (newstate.migrator == childmask) {
1278 /*
1279 * Find a new migrator for the group, because the child
1280 * group is idle!
1281 */
1282 if (!childstate.active) {
1283 unsigned long new_migr_bit, active = newstate.active;
1284
1285 new_migr_bit = find_first_bit(&active, BIT_CNT);
1286
1287 if (new_migr_bit != BIT_CNT) {
1288 newstate.migrator = BIT(new_migr_bit);
1289 } else {
1290 newstate.migrator = TMIGR_NONE;
1291
1292 /* Changes need to be propagated */
1293 walk_done = false;
1294 }
1295 }
1296 }
1297
1298 newstate.seq++;
1299
1300 WARN_ON_ONCE((newstate.migrator != TMIGR_NONE) && !(newstate.active));
1301
1302 if (atomic_try_cmpxchg(&group->migr_state, &curstate.state,
1303 newstate.state))
1304 break;
1305
1306 /*
1307 * The memory barrier is paired with the cmpxchg() in
1308 * tmigr_active_up() to make sure the updates of child and group
1309 * states are ordered. It is required only when the above
1310 * try_cmpxchg() fails.
1311 */
1312 smp_mb__after_atomic();
1313 }
1314
1315 data->remote = false;
1316
1317 /* Event Handling */
1318 tmigr_update_events(group, child, data);
1319
1320 if (group->parent && (walk_done == false))
1321 data->childmask = group->childmask;
1322
1323 /*
1324 * data->firstexp was set by tmigr_update_events() and contains the
1325 * expiry of the first global event which needs to be handled. It
1326 * differs from KTIME_MAX if:
1327 * - group is the top level group and
1328 * - group is idle (which means CPU was the last active CPU in the
1329 * hierarchy) and
1330 * - there is a pending event in the hierarchy
1331 */
1332 WARN_ON_ONCE(data->firstexp != KTIME_MAX && group->parent);
1333
36e40df3
AMB
1334 trace_tmigr_group_set_cpu_inactive(group, newstate, childmask);
1335
7ee98877
AMB
1336 return walk_done;
1337}
1338
1339static u64 __tmigr_cpu_deactivate(struct tmigr_cpu *tmc, u64 nextexp)
1340{
1341 struct tmigr_walk data = { .nextexp = nextexp,
1342 .firstexp = KTIME_MAX,
1343 .evt = &tmc->cpuevt,
1344 .childmask = tmc->childmask };
1345
1346 /*
1347 * If nextexp is KTIME_MAX, the CPU event will be ignored because the
1348 * local timer expires before the global timer, no global timer is set
1349 * or CPU goes offline.
1350 */
1351 if (nextexp != KTIME_MAX)
1352 tmc->cpuevt.ignore = false;
1353
1354 walk_groups(&tmigr_inactive_up, &data, tmc);
1355 return data.firstexp;
1356}
1357
1358/**
1359 * tmigr_cpu_deactivate() - Put current CPU into inactive state
1360 * @nextexp: The next global timer expiry of the current CPU
1361 *
1362 * Must be called with interrupts disabled.
1363 *
1364 * Return: the next event expiry of the current CPU or the next event expiry
1365 * from the hierarchy if this CPU is the top level migrator or the hierarchy is
1366 * completely idle.
1367 */
1368u64 tmigr_cpu_deactivate(u64 nextexp)
1369{
1370 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1371 u64 ret;
1372
1373 if (tmigr_is_not_available(tmc))
1374 return nextexp;
1375
1376 raw_spin_lock(&tmc->lock);
1377
1378 ret = __tmigr_cpu_deactivate(tmc, nextexp);
1379
1380 tmc->idle = true;
1381
1382 /*
1383 * Make sure the reevaluation of timers in idle path will not miss an
1384 * event.
1385 */
1386 WRITE_ONCE(tmc->wakeup, ret);
1387
36e40df3 1388 trace_tmigr_cpu_idle(tmc, nextexp);
7ee98877
AMB
1389 raw_spin_unlock(&tmc->lock);
1390 return ret;
1391}
1392
1393/**
1394 * tmigr_quick_check() - Quick forecast of next tmigr event when CPU wants to
1395 * go idle
1396 * @nextevt: The next global timer expiry of the current CPU
1397 *
1398 * Return:
1399 * * KTIME_MAX - when it is probable that nothing has to be done (not
1400 * the only one in the level 0 group; and if it is the
1401 * only one in level 0 group, but there are more than a
1402 * single group active on the way to top level)
1403 * * nextevt - when CPU is offline and has to handle timer on his own
1404 * or when on the way to top in every group only a single
8ca18367
FW
1405 * child is active but @nextevt is before the lowest
1406 * next_expiry encountered while walking up to top level.
1407 * * next_expiry - value of lowest expiry encountered while walking groups
1408 * if only a single child is active on each and @nextevt
1409 * is after this lowest expiry.
7ee98877
AMB
1410 */
1411u64 tmigr_quick_check(u64 nextevt)
1412{
1413 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1414 struct tmigr_group *group = tmc->tmgroup;
1415
1416 if (tmigr_is_not_available(tmc))
1417 return nextevt;
1418
1419 if (WARN_ON_ONCE(tmc->idle))
1420 return nextevt;
1421
1422 if (!tmigr_check_migrator_and_lonely(tmc->tmgroup, tmc->childmask))
1423 return KTIME_MAX;
1424
1425 do {
1426 if (!tmigr_check_lonely(group)) {
1427 return KTIME_MAX;
8ca18367
FW
1428 } else {
1429 /*
1430 * Since current CPU is active, events may not be sorted
1431 * from bottom to the top because the CPU's event is ignored
1432 * up to the top and its sibling's events not propagated upwards.
1433 * Thus keep track of the lowest observed expiry.
1434 */
1435 nextevt = min_t(u64, nextevt, READ_ONCE(group->next_expiry));
1436 if (!group->parent)
1437 return nextevt;
7ee98877
AMB
1438 }
1439 group = group->parent;
1440 } while (group);
1441
1442 return KTIME_MAX;
1443}
1444
1445static void tmigr_init_group(struct tmigr_group *group, unsigned int lvl,
1446 int node)
1447{
1448 union tmigr_state s;
1449
1450 raw_spin_lock_init(&group->lock);
1451
1452 group->level = lvl;
1453 group->numa_node = lvl < tmigr_crossnode_level ? node : NUMA_NO_NODE;
1454
1455 group->num_children = 0;
1456
1457 s.migrator = TMIGR_NONE;
1458 s.active = 0;
1459 s.seq = 0;
1460 atomic_set(&group->migr_state, s.state);
1461
1462 timerqueue_init_head(&group->events);
1463 timerqueue_init(&group->groupevt.nextevt);
1464 group->groupevt.nextevt.expires = KTIME_MAX;
1465 WRITE_ONCE(group->next_expiry, KTIME_MAX);
1466 group->groupevt.ignore = true;
1467}
1468
1469static struct tmigr_group *tmigr_get_group(unsigned int cpu, int node,
1470 unsigned int lvl)
1471{
1472 struct tmigr_group *tmp, *group = NULL;
1473
1474 lockdep_assert_held(&tmigr_mutex);
1475
1476 /* Try to attach to an existing group first */
1477 list_for_each_entry(tmp, &tmigr_level_list[lvl], list) {
1478 /*
1479 * If @lvl is below the cross NUMA node level, check whether
1480 * this group belongs to the same NUMA node.
1481 */
1482 if (lvl < tmigr_crossnode_level && tmp->numa_node != node)
1483 continue;
1484
1485 /* Capacity left? */
1486 if (tmp->num_children >= TMIGR_CHILDREN_PER_GROUP)
1487 continue;
1488
1489 /*
1490 * TODO: A possible further improvement: Make sure that all CPU
1491 * siblings end up in the same group of the lowest level of the
1492 * hierarchy. Rely on the topology sibling mask would be a
1493 * reasonable solution.
1494 */
1495
1496 group = tmp;
1497 break;
1498 }
1499
1500 if (group)
1501 return group;
1502
1503 /* Allocate and set up a new group */
1504 group = kzalloc_node(sizeof(*group), GFP_KERNEL, node);
1505 if (!group)
1506 return ERR_PTR(-ENOMEM);
1507
1508 tmigr_init_group(group, lvl, node);
1509
1510 /* Setup successful. Add it to the hierarchy */
1511 list_add(&group->list, &tmigr_level_list[lvl]);
36e40df3 1512 trace_tmigr_group_set(group);
7ee98877
AMB
1513 return group;
1514}
1515
1516static void tmigr_connect_child_parent(struct tmigr_group *child,
1517 struct tmigr_group *parent)
1518{
1519 union tmigr_state childstate;
1520
1521 raw_spin_lock_irq(&child->lock);
1522 raw_spin_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING);
1523
1524 child->parent = parent;
1525 child->childmask = BIT(parent->num_children++);
1526
1527 raw_spin_unlock(&parent->lock);
1528 raw_spin_unlock_irq(&child->lock);
1529
36e40df3
AMB
1530 trace_tmigr_connect_child_parent(child);
1531
7ee98877
AMB
1532 /*
1533 * To prevent inconsistent states, active children need to be active in
1534 * the new parent as well. Inactive children are already marked inactive
1535 * in the parent group:
1536 *
1537 * * When new groups were created by tmigr_setup_groups() starting from
1538 * the lowest level (and not higher then one level below the current
1539 * top level), then they are not active. They will be set active when
1540 * the new online CPU comes active.
1541 *
1542 * * But if a new group above the current top level is required, it is
1543 * mandatory to propagate the active state of the already existing
1544 * child to the new parent. So tmigr_connect_child_parent() is
1545 * executed with the formerly top level group (child) and the newly
1546 * created group (parent).
1547 */
1548 childstate.state = atomic_read(&child->migr_state);
1549 if (childstate.migrator != TMIGR_NONE) {
1550 struct tmigr_walk data;
1551
1552 data.childmask = child->childmask;
1553
1554 /*
1555 * There is only one new level per time. When connecting the
1556 * child and the parent and set the child active when the parent
1557 * is inactive, the parent needs to be the uppermost
1558 * level. Otherwise there went something wrong!
1559 */
1560 WARN_ON(!tmigr_active_up(parent, child, &data) && parent->parent);
1561 }
1562}
1563
1564static int tmigr_setup_groups(unsigned int cpu, unsigned int node)
1565{
1566 struct tmigr_group *group, *child, **stack;
1567 int top = 0, err = 0, i = 0;
1568 struct list_head *lvllist;
1569
1570 stack = kcalloc(tmigr_hierarchy_levels, sizeof(*stack), GFP_KERNEL);
1571 if (!stack)
1572 return -ENOMEM;
1573
1574 do {
1575 group = tmigr_get_group(cpu, node, i);
1576 if (IS_ERR(group)) {
1577 err = PTR_ERR(group);
1578 break;
1579 }
1580
1581 top = i;
1582 stack[i++] = group;
1583
1584 /*
1585 * When booting only less CPUs of a system than CPUs are
1586 * available, not all calculated hierarchy levels are required.
1587 *
1588 * The loop is aborted as soon as the highest level, which might
1589 * be different from tmigr_hierarchy_levels, contains only a
1590 * single group.
1591 */
1592 if (group->parent || i == tmigr_hierarchy_levels ||
1593 (list_empty(&tmigr_level_list[i]) &&
1594 list_is_singular(&tmigr_level_list[i - 1])))
1595 break;
1596
1597 } while (i < tmigr_hierarchy_levels);
1598
d7ad05c8 1599 while (i > 0) {
7ee98877
AMB
1600 group = stack[--i];
1601
1602 if (err < 0) {
1603 list_del(&group->list);
1604 kfree(group);
1605 continue;
1606 }
1607
1608 WARN_ON_ONCE(i != group->level);
1609
1610 /*
1611 * Update tmc -> group / child -> group connection
1612 */
1613 if (i == 0) {
1614 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1615
1616 raw_spin_lock_irq(&group->lock);
1617
1618 tmc->tmgroup = group;
1619 tmc->childmask = BIT(group->num_children++);
1620
1621 raw_spin_unlock_irq(&group->lock);
1622
36e40df3
AMB
1623 trace_tmigr_connect_cpu_parent(tmc);
1624
7ee98877
AMB
1625 /* There are no children that need to be connected */
1626 continue;
1627 } else {
1628 child = stack[i - 1];
1629 tmigr_connect_child_parent(child, group);
1630 }
1631
1632 /* check if uppermost level was newly created */
1633 if (top != i)
1634 continue;
1635
1636 WARN_ON_ONCE(top == 0);
1637
1638 lvllist = &tmigr_level_list[top];
1639 if (group->num_children == 1 && list_is_singular(lvllist)) {
1640 lvllist = &tmigr_level_list[top - 1];
1641 list_for_each_entry(child, lvllist, list) {
1642 if (child->parent)
1643 continue;
1644
1645 tmigr_connect_child_parent(child, group);
1646 }
1647 }
d7ad05c8 1648 }
7ee98877
AMB
1649
1650 kfree(stack);
1651
1652 return err;
1653}
1654
1655static int tmigr_add_cpu(unsigned int cpu)
1656{
1657 int node = cpu_to_node(cpu);
1658 int ret;
1659
1660 mutex_lock(&tmigr_mutex);
1661 ret = tmigr_setup_groups(cpu, node);
1662 mutex_unlock(&tmigr_mutex);
1663
1664 return ret;
1665}
1666
1667static int tmigr_cpu_online(unsigned int cpu)
1668{
1669 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1670 int ret;
1671
1672 /* First online attempt? Initialize CPU data */
1673 if (!tmc->tmgroup) {
1674 raw_spin_lock_init(&tmc->lock);
1675
1676 ret = tmigr_add_cpu(cpu);
1677 if (ret < 0)
1678 return ret;
1679
1680 if (tmc->childmask == 0)
1681 return -EINVAL;
1682
1683 timerqueue_init(&tmc->cpuevt.nextevt);
1684 tmc->cpuevt.nextevt.expires = KTIME_MAX;
1685 tmc->cpuevt.ignore = true;
1686 tmc->cpuevt.cpu = cpu;
1687
1688 tmc->remote = false;
1689 WRITE_ONCE(tmc->wakeup, KTIME_MAX);
1690 }
1691 raw_spin_lock_irq(&tmc->lock);
36e40df3 1692 trace_tmigr_cpu_online(tmc);
7ee98877
AMB
1693 tmc->idle = timer_base_is_idle();
1694 if (!tmc->idle)
1695 __tmigr_cpu_activate(tmc);
1696 tmc->online = true;
1697 raw_spin_unlock_irq(&tmc->lock);
1698 return 0;
1699}
1700
1701/*
1702 * tmigr_trigger_active() - trigger a CPU to become active again
1703 *
1704 * This function is executed on a CPU which is part of cpu_online_mask, when the
1705 * last active CPU in the hierarchy is offlining. With this, it is ensured that
1706 * the other CPU is active and takes over the migrator duty.
1707 */
1708static long tmigr_trigger_active(void *unused)
1709{
1710 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1711
1712 WARN_ON_ONCE(!tmc->online || tmc->idle);
1713
1714 return 0;
1715}
1716
1717static int tmigr_cpu_offline(unsigned int cpu)
1718{
1719 struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
1720 int migrator;
1721 u64 firstexp;
1722
1723 raw_spin_lock_irq(&tmc->lock);
1724 tmc->online = false;
1725 WRITE_ONCE(tmc->wakeup, KTIME_MAX);
1726
1727 /*
1728 * CPU has to handle the local events on his own, when on the way to
1729 * offline; Therefore nextevt value is set to KTIME_MAX
1730 */
1731 firstexp = __tmigr_cpu_deactivate(tmc, KTIME_MAX);
36e40df3 1732 trace_tmigr_cpu_offline(tmc);
7ee98877
AMB
1733 raw_spin_unlock_irq(&tmc->lock);
1734
1735 if (firstexp != KTIME_MAX) {
1736 migrator = cpumask_any_but(cpu_online_mask, cpu);
1737 work_on_cpu(migrator, tmigr_trigger_active, NULL);
1738 }
1739
1740 return 0;
1741}
1742
1743static int __init tmigr_init(void)
1744{
1745 unsigned int cpulvl, nodelvl, cpus_per_node, i;
1746 unsigned int nnodes = num_possible_nodes();
1747 unsigned int ncpus = num_possible_cpus();
1748 int ret = -ENOMEM;
1749
1750 BUILD_BUG_ON_NOT_POWER_OF_2(TMIGR_CHILDREN_PER_GROUP);
1751
1752 /* Nothing to do if running on UP */
1753 if (ncpus == 1)
1754 return 0;
1755
1756 /*
1757 * Calculate the required hierarchy levels. Unfortunately there is no
1758 * reliable information available, unless all possible CPUs have been
1759 * brought up and all NUMA nodes are populated.
1760 *
1761 * Estimate the number of levels with the number of possible nodes and
1762 * the number of possible CPUs. Assume CPUs are spread evenly across
1763 * nodes. We cannot rely on cpumask_of_node() because it only works for
1764 * online CPUs.
1765 */
1766 cpus_per_node = DIV_ROUND_UP(ncpus, nnodes);
1767
1768 /* Calc the hierarchy levels required to hold the CPUs of a node */
1769 cpulvl = DIV_ROUND_UP(order_base_2(cpus_per_node),
1770 ilog2(TMIGR_CHILDREN_PER_GROUP));
1771
1772 /* Calculate the extra levels to connect all nodes */
1773 nodelvl = DIV_ROUND_UP(order_base_2(nnodes),
1774 ilog2(TMIGR_CHILDREN_PER_GROUP));
1775
1776 tmigr_hierarchy_levels = cpulvl + nodelvl;
1777
1778 /*
1779 * If a NUMA node spawns more than one CPU level group then the next
1780 * level(s) of the hierarchy contains groups which handle all CPU groups
1781 * of the same NUMA node. The level above goes across NUMA nodes. Store
1782 * this information for the setup code to decide in which level node
1783 * matching is no longer required.
1784 */
1785 tmigr_crossnode_level = cpulvl;
1786
1787 tmigr_level_list = kcalloc(tmigr_hierarchy_levels, sizeof(struct list_head), GFP_KERNEL);
1788 if (!tmigr_level_list)
1789 goto err;
1790
1791 for (i = 0; i < tmigr_hierarchy_levels; i++)
1792 INIT_LIST_HEAD(&tmigr_level_list[i]);
1793
1794 pr_info("Timer migration: %d hierarchy levels; %d children per group;"
1795 " %d crossnode level\n",
1796 tmigr_hierarchy_levels, TMIGR_CHILDREN_PER_GROUP,
1797 tmigr_crossnode_level);
1798
1799 ret = cpuhp_setup_state(CPUHP_AP_TMIGR_ONLINE, "tmigr:online",
1800 tmigr_cpu_online, tmigr_cpu_offline);
1801 if (ret)
1802 goto err;
1803
1804 return 0;
1805
1806err:
1807 pr_err("Timer migration setup failed\n");
1808 return ret;
1809}
1810late_initcall(tmigr_init);