Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
cf37b6b4 | 2 | /* |
a92057e1 IM |
3 | * Generic entry points for the idle threads and |
4 | * implementation of the idle task scheduling class. | |
5 | * | |
6 | * (NOTE: these are not related to SCHED_IDLE batch scheduled | |
7 | * tasks which are handled in sched/fair.c ) | |
cf37b6b4 | 8 | */ |
cf37b6b4 | 9 | |
6727ad9e CM |
10 | /* Linker adds these: start and end of __cpuidle functions */ |
11 | extern char __cpuidle_text_start[], __cpuidle_text_end[]; | |
12 | ||
faad3849 RW |
13 | /** |
14 | * sched_idle_set_state - Record idle state for the current CPU. | |
15 | * @idle_state: State to record. | |
16 | */ | |
17 | void sched_idle_set_state(struct cpuidle_state *idle_state) | |
18 | { | |
19 | idle_set_state(this_rq(), idle_state); | |
20 | } | |
21 | ||
cf37b6b4 NP |
22 | static int __read_mostly cpu_idle_force_poll; |
23 | ||
24 | void cpu_idle_poll_ctrl(bool enable) | |
25 | { | |
26 | if (enable) { | |
27 | cpu_idle_force_poll++; | |
28 | } else { | |
29 | cpu_idle_force_poll--; | |
30 | WARN_ON_ONCE(cpu_idle_force_poll < 0); | |
31 | } | |
32 | } | |
33 | ||
34 | #ifdef CONFIG_GENERIC_IDLE_POLL_SETUP | |
35 | static int __init cpu_idle_poll_setup(char *__unused) | |
36 | { | |
37 | cpu_idle_force_poll = 1; | |
a92057e1 | 38 | |
cf37b6b4 NP |
39 | return 1; |
40 | } | |
41 | __setup("nohlt", cpu_idle_poll_setup); | |
42 | ||
43 | static int __init cpu_idle_nopoll_setup(char *__unused) | |
44 | { | |
45 | cpu_idle_force_poll = 0; | |
a92057e1 | 46 | |
cf37b6b4 NP |
47 | return 1; |
48 | } | |
49 | __setup("hlt", cpu_idle_nopoll_setup); | |
50 | #endif | |
51 | ||
6727ad9e | 52 | static noinline int __cpuidle cpu_idle_poll(void) |
cf37b6b4 | 53 | { |
1098582a PZ |
54 | trace_cpu_idle(0, smp_processor_id()); |
55 | stop_critical_timings(); | |
cf37b6b4 | 56 | rcu_idle_enter(); |
cf37b6b4 | 57 | local_irq_enable(); |
a92057e1 | 58 | |
ff6f2d29 | 59 | while (!tif_need_resched() && |
1098582a | 60 | (cpu_idle_force_poll || tick_check_broadcast_expired())) |
cf37b6b4 | 61 | cpu_relax(); |
1098582a | 62 | |
cf37b6b4 | 63 | rcu_idle_exit(); |
1098582a PZ |
64 | start_critical_timings(); |
65 | trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); | |
a92057e1 | 66 | |
cf37b6b4 NP |
67 | return 1; |
68 | } | |
69 | ||
70 | /* Weak implementations for optional arch specific functions */ | |
71 | void __weak arch_cpu_idle_prepare(void) { } | |
72 | void __weak arch_cpu_idle_enter(void) { } | |
73 | void __weak arch_cpu_idle_exit(void) { } | |
74 | void __weak arch_cpu_idle_dead(void) { } | |
75 | void __weak arch_cpu_idle(void) | |
76 | { | |
77 | cpu_idle_force_poll = 1; | |
58c644ba | 78 | raw_local_irq_enable(); |
cf37b6b4 NP |
79 | } |
80 | ||
827a5aef RW |
81 | /** |
82 | * default_idle_call - Default CPU idle routine. | |
83 | * | |
84 | * To use when the cpuidle framework cannot be used. | |
85 | */ | |
6727ad9e | 86 | void __cpuidle default_idle_call(void) |
82f66327 | 87 | { |
63caae84 | 88 | if (current_clr_polling_and_test()) { |
82f66327 | 89 | local_irq_enable(); |
63caae84 | 90 | } else { |
9864f5b5 PZ |
91 | |
92 | trace_cpu_idle(1, smp_processor_id()); | |
63caae84 | 93 | stop_critical_timings(); |
58c644ba PZ |
94 | |
95 | /* | |
96 | * arch_cpu_idle() is supposed to enable IRQs, however | |
97 | * we can't do that because of RCU and tracing. | |
98 | * | |
99 | * Trace IRQs enable here, then switch off RCU, and have | |
100 | * arch_cpu_idle() use raw_local_irq_enable(). Note that | |
101 | * rcu_idle_enter() relies on lockdep IRQ state, so switch that | |
102 | * last -- this is very similar to the entry code. | |
103 | */ | |
104 | trace_hardirqs_on_prepare(); | |
8b023acc | 105 | lockdep_hardirqs_on_prepare(); |
1098582a | 106 | rcu_idle_enter(); |
58c644ba PZ |
107 | lockdep_hardirqs_on(_THIS_IP_); |
108 | ||
82f66327 | 109 | arch_cpu_idle(); |
58c644ba PZ |
110 | |
111 | /* | |
112 | * OK, so IRQs are enabled here, but RCU needs them disabled to | |
113 | * turn itself back on.. funny thing is that disabling IRQs | |
114 | * will cause tracing, which needs RCU. Jump through hoops to | |
115 | * make it 'work'. | |
116 | */ | |
117 | raw_local_irq_disable(); | |
118 | lockdep_hardirqs_off(_THIS_IP_); | |
1098582a | 119 | rcu_idle_exit(); |
58c644ba PZ |
120 | lockdep_hardirqs_on(_THIS_IP_); |
121 | raw_local_irq_enable(); | |
122 | ||
63caae84 | 123 | start_critical_timings(); |
9864f5b5 | 124 | trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); |
63caae84 | 125 | } |
82f66327 RW |
126 | } |
127 | ||
10e8b11e RW |
128 | static int call_cpuidle_s2idle(struct cpuidle_driver *drv, |
129 | struct cpuidle_device *dev) | |
130 | { | |
131 | if (current_clr_polling_and_test()) | |
132 | return -EBUSY; | |
133 | ||
134 | return cpuidle_enter_s2idle(drv, dev); | |
135 | } | |
136 | ||
bcf6ad8a RW |
137 | static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev, |
138 | int next_state) | |
139 | { | |
bcf6ad8a RW |
140 | /* |
141 | * The idle task must be scheduled, it is pointless to go to idle, just | |
142 | * update no idle residency and return. | |
143 | */ | |
144 | if (current_clr_polling_and_test()) { | |
c1d51f68 | 145 | dev->last_residency_ns = 0; |
bcf6ad8a RW |
146 | local_irq_enable(); |
147 | return -EBUSY; | |
148 | } | |
149 | ||
bcf6ad8a RW |
150 | /* |
151 | * Enter the idle state previously returned by the governor decision. | |
152 | * This function will block until an interrupt occurs and will take | |
153 | * care of re-enabling the local interrupts | |
154 | */ | |
827a5aef | 155 | return cpuidle_enter(drv, dev, next_state); |
bcf6ad8a RW |
156 | } |
157 | ||
30cdd69e DL |
158 | /** |
159 | * cpuidle_idle_call - the main idle function | |
160 | * | |
161 | * NOTE: no locks or semaphores should be used here | |
82c65d60 | 162 | * |
3b03706f | 163 | * On architectures that support TIF_POLLING_NRFLAG, is called with polling |
82c65d60 AL |
164 | * set, and it returns with polling set. If it ever stops polling, it |
165 | * must clear the polling bit. | |
30cdd69e | 166 | */ |
08c373e5 | 167 | static void cpuidle_idle_call(void) |
30cdd69e | 168 | { |
9bd616e3 | 169 | struct cpuidle_device *dev = cpuidle_get_device(); |
30cdd69e | 170 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); |
37352273 | 171 | int next_state, entered_state; |
30cdd69e | 172 | |
a1d028bd DL |
173 | /* |
174 | * Check if the idle task must be rescheduled. If it is the | |
c444117f | 175 | * case, exit the function after re-enabling the local irq. |
a1d028bd | 176 | */ |
c444117f | 177 | if (need_resched()) { |
8ca3c642 | 178 | local_irq_enable(); |
08c373e5 | 179 | return; |
8ca3c642 DL |
180 | } |
181 | ||
a1d028bd | 182 | /* |
ed98c349 RW |
183 | * The RCU framework needs to be told that we are entering an idle |
184 | * section, so no more rcu read side critical sections and one more | |
a1d028bd DL |
185 | * step to the grace period |
186 | */ | |
c8cc7d4d | 187 | |
82f66327 | 188 | if (cpuidle_not_available(drv, dev)) { |
ed98c349 | 189 | tick_nohz_idle_stop_tick(); |
ed98c349 | 190 | |
82f66327 RW |
191 | default_idle_call(); |
192 | goto exit_idle; | |
193 | } | |
ef2b22ac | 194 | |
38106313 | 195 | /* |
f02f4f9d | 196 | * Suspend-to-idle ("s2idle") is a system state in which all user space |
38106313 | 197 | * has been frozen, all I/O devices have been suspended and the only |
3e0de271 | 198 | * activity happens here and in interrupts (if any). In that case bypass |
3b03706f | 199 | * the cpuidle governor and go straight for the deepest idle state |
38106313 RW |
200 | * available. Possibly also suspend the local tick and the entire |
201 | * timekeeping to prevent timer interrupts from kicking us out of idle | |
202 | * until a proper wakeup interrupt happens. | |
203 | */ | |
bb8313b6 | 204 | |
c55b51a0 | 205 | if (idle_should_enter_s2idle() || dev->forced_idle_latency_limit_ns) { |
5aa9ba63 DL |
206 | u64 max_latency_ns; |
207 | ||
f02f4f9d | 208 | if (idle_should_enter_s2idle()) { |
ed98c349 | 209 | |
10e8b11e RW |
210 | entered_state = call_cpuidle_s2idle(drv, dev); |
211 | if (entered_state > 0) | |
bb8313b6 | 212 | goto exit_idle; |
ed98c349 | 213 | |
5aa9ba63 DL |
214 | max_latency_ns = U64_MAX; |
215 | } else { | |
216 | max_latency_ns = dev->forced_idle_latency_limit_ns; | |
ef2b22ac RW |
217 | } |
218 | ||
ed98c349 | 219 | tick_nohz_idle_stop_tick(); |
ed98c349 | 220 | |
5aa9ba63 | 221 | next_state = cpuidle_find_deepest_state(drv, dev, max_latency_ns); |
bcf6ad8a | 222 | call_cpuidle(drv, dev, next_state); |
ef2b22ac | 223 | } else { |
45f1ff59 RW |
224 | bool stop_tick = true; |
225 | ||
ef2b22ac RW |
226 | /* |
227 | * Ask the cpuidle framework to choose a convenient idle state. | |
228 | */ | |
45f1ff59 | 229 | next_state = cpuidle_select(drv, dev, &stop_tick); |
554c8aa8 | 230 | |
7059b366 | 231 | if (stop_tick || tick_nohz_tick_stopped()) |
554c8aa8 RW |
232 | tick_nohz_idle_stop_tick(); |
233 | else | |
234 | tick_nohz_idle_retain_tick(); | |
235 | ||
bcf6ad8a RW |
236 | entered_state = call_cpuidle(drv, dev, next_state); |
237 | /* | |
238 | * Give the governor an opportunity to reflect on the outcome | |
239 | */ | |
ef2b22ac | 240 | cpuidle_reflect(dev, entered_state); |
bcf6ad8a | 241 | } |
37352273 PZ |
242 | |
243 | exit_idle: | |
8ca3c642 | 244 | __current_set_polling(); |
30cdd69e | 245 | |
a1d028bd | 246 | /* |
37352273 | 247 | * It is up to the idle functions to reenable local interrupts |
a1d028bd | 248 | */ |
c8cc7d4d DL |
249 | if (WARN_ON_ONCE(irqs_disabled())) |
250 | local_irq_enable(); | |
30cdd69e | 251 | } |
30cdd69e | 252 | |
cf37b6b4 NP |
253 | /* |
254 | * Generic idle loop implementation | |
82c65d60 AL |
255 | * |
256 | * Called with polling cleared. | |
cf37b6b4 | 257 | */ |
c1de45ca | 258 | static void do_idle(void) |
cf37b6b4 | 259 | { |
54b933c6 | 260 | int cpu = smp_processor_id(); |
c6f88654 VG |
261 | |
262 | /* | |
263 | * Check if we need to update blocked load | |
264 | */ | |
265 | nohz_run_idle_balance(cpu); | |
266 | ||
c1de45ca PZ |
267 | /* |
268 | * If the arch has a polling bit, we maintain an invariant: | |
269 | * | |
270 | * Our polling bit is clear if we're not scheduled (i.e. if rq->curr != | |
271 | * rq->idle). This means that, if rq->idle has the polling bit set, | |
272 | * then setting need_resched is guaranteed to cause the CPU to | |
273 | * reschedule. | |
274 | */ | |
cf37b6b4 | 275 | |
c1de45ca PZ |
276 | __current_set_polling(); |
277 | tick_nohz_idle_enter(); | |
cf37b6b4 | 278 | |
c1de45ca | 279 | while (!need_resched()) { |
c1de45ca | 280 | rmb(); |
cf37b6b4 | 281 | |
e78a7614 PZ |
282 | local_irq_disable(); |
283 | ||
54b933c6 | 284 | if (cpu_is_offline(cpu)) { |
e78a7614 | 285 | tick_nohz_idle_stop_tick(); |
c1de45ca PZ |
286 | cpuhp_report_idle_dead(); |
287 | arch_cpu_idle_dead(); | |
cf37b6b4 | 288 | } |
06d50c65 | 289 | |
c1de45ca | 290 | arch_cpu_idle_enter(); |
43789ef3 | 291 | rcu_nocb_flush_deferred_wakeup(); |
82c65d60 AL |
292 | |
293 | /* | |
c1de45ca PZ |
294 | * In poll mode we reenable interrupts and spin. Also if we |
295 | * detected in the wakeup from idle path that the tick | |
296 | * broadcast device expired for us, we don't want to go deep | |
297 | * idle as we know that the IPI is going to arrive right away. | |
82c65d60 | 298 | */ |
2aaf709a RW |
299 | if (cpu_idle_force_poll || tick_check_broadcast_expired()) { |
300 | tick_nohz_idle_restart_tick(); | |
c1de45ca | 301 | cpu_idle_poll(); |
2aaf709a | 302 | } else { |
c1de45ca | 303 | cpuidle_idle_call(); |
2aaf709a | 304 | } |
c1de45ca | 305 | arch_cpu_idle_exit(); |
cf37b6b4 | 306 | } |
c1de45ca PZ |
307 | |
308 | /* | |
309 | * Since we fell out of the loop above, we know TIF_NEED_RESCHED must | |
310 | * be set, propagate it into PREEMPT_NEED_RESCHED. | |
311 | * | |
312 | * This is required because for polling idle loops we will not have had | |
313 | * an IPI to fold the state for us. | |
314 | */ | |
315 | preempt_set_need_resched(); | |
316 | tick_nohz_idle_exit(); | |
317 | __current_clr_polling(); | |
318 | ||
319 | /* | |
320 | * We promise to call sched_ttwu_pending() and reschedule if | |
321 | * need_resched() is set while polling is set. That means that clearing | |
322 | * polling needs to be visible before doing these things. | |
323 | */ | |
324 | smp_mb__after_atomic(); | |
325 | ||
b2a02fc4 PZ |
326 | /* |
327 | * RCU relies on this call to be done outside of an RCU read-side | |
328 | * critical section. | |
329 | */ | |
16bf5a5e | 330 | flush_smp_call_function_queue(); |
8663effb | 331 | schedule_idle(); |
d83a7cb3 JP |
332 | |
333 | if (unlikely(klp_patch_pending(current))) | |
334 | klp_update_patch_state(current); | |
cf37b6b4 NP |
335 | } |
336 | ||
6727ad9e CM |
337 | bool cpu_in_idle(unsigned long pc) |
338 | { | |
339 | return pc >= (unsigned long)__cpuidle_text_start && | |
340 | pc < (unsigned long)__cpuidle_text_end; | |
341 | } | |
342 | ||
c1de45ca PZ |
343 | struct idle_timer { |
344 | struct hrtimer timer; | |
345 | int done; | |
346 | }; | |
347 | ||
348 | static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer) | |
349 | { | |
350 | struct idle_timer *it = container_of(timer, struct idle_timer, timer); | |
351 | ||
352 | WRITE_ONCE(it->done, 1); | |
353 | set_tsk_need_resched(current); | |
354 | ||
355 | return HRTIMER_NORESTART; | |
356 | } | |
357 | ||
c55b51a0 | 358 | void play_idle_precise(u64 duration_ns, u64 latency_ns) |
c1de45ca PZ |
359 | { |
360 | struct idle_timer it; | |
361 | ||
362 | /* | |
363 | * Only FIFO tasks can disable the tick since they don't need the forced | |
364 | * preemption. | |
365 | */ | |
366 | WARN_ON_ONCE(current->policy != SCHED_FIFO); | |
367 | WARN_ON_ONCE(current->nr_cpus_allowed != 1); | |
368 | WARN_ON_ONCE(!(current->flags & PF_KTHREAD)); | |
369 | WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY)); | |
c55b51a0 | 370 | WARN_ON_ONCE(!duration_ns); |
618758ed | 371 | WARN_ON_ONCE(current->mm); |
c1de45ca PZ |
372 | |
373 | rcu_sleep_check(); | |
374 | preempt_disable(); | |
375 | current->flags |= PF_IDLE; | |
c55b51a0 | 376 | cpuidle_use_deepest_state(latency_ns); |
c1de45ca PZ |
377 | |
378 | it.done = 0; | |
98484179 | 379 | hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); |
c1de45ca | 380 | it.timer.function = idle_inject_timer_fn; |
c55b51a0 | 381 | hrtimer_start(&it.timer, ns_to_ktime(duration_ns), |
98484179 | 382 | HRTIMER_MODE_REL_PINNED_HARD); |
c1de45ca PZ |
383 | |
384 | while (!READ_ONCE(it.done)) | |
385 | do_idle(); | |
386 | ||
c55b51a0 | 387 | cpuidle_use_deepest_state(0); |
c1de45ca PZ |
388 | current->flags &= ~PF_IDLE; |
389 | ||
390 | preempt_fold_need_resched(); | |
391 | preempt_enable(); | |
392 | } | |
c55b51a0 | 393 | EXPORT_SYMBOL_GPL(play_idle_precise); |
c1de45ca | 394 | |
cf37b6b4 NP |
395 | void cpu_startup_entry(enum cpuhp_state state) |
396 | { | |
cf37b6b4 | 397 | arch_cpu_idle_prepare(); |
8df3e07e | 398 | cpuhp_online_idle(state); |
c1de45ca PZ |
399 | while (1) |
400 | do_idle(); | |
cf37b6b4 | 401 | } |
a92057e1 IM |
402 | |
403 | /* | |
404 | * idle-task scheduling class. | |
405 | */ | |
406 | ||
407 | #ifdef CONFIG_SMP | |
408 | static int | |
3aef1551 | 409 | select_task_rq_idle(struct task_struct *p, int cpu, int flags) |
a92057e1 IM |
410 | { |
411 | return task_cpu(p); /* IDLE tasks as never migrated */ | |
412 | } | |
6e2df058 PZ |
413 | |
414 | static int | |
415 | balance_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) | |
416 | { | |
417 | return WARN_ON_ONCE(1); | |
418 | } | |
a92057e1 IM |
419 | #endif |
420 | ||
421 | /* | |
422 | * Idle tasks are unconditionally rescheduled: | |
423 | */ | |
424 | static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags) | |
425 | { | |
426 | resched_curr(rq); | |
427 | } | |
428 | ||
6e2df058 | 429 | static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) |
03b7fad1 PZ |
430 | { |
431 | } | |
432 | ||
a0e813f2 | 433 | static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first) |
a92057e1 | 434 | { |
a92057e1 IM |
435 | update_idle_core(rq); |
436 | schedstat_inc(rq->sched_goidle); | |
03b7fad1 PZ |
437 | } |
438 | ||
21f56ffe PZ |
439 | #ifdef CONFIG_SMP |
440 | static struct task_struct *pick_task_idle(struct rq *rq) | |
441 | { | |
442 | return rq->idle; | |
443 | } | |
444 | #endif | |
445 | ||
98c2f700 | 446 | struct task_struct *pick_next_task_idle(struct rq *rq) |
a92057e1 | 447 | { |
03b7fad1 PZ |
448 | struct task_struct *next = rq->idle; |
449 | ||
a0e813f2 | 450 | set_next_task_idle(rq, next, true); |
a92057e1 | 451 | |
03b7fad1 | 452 | return next; |
a92057e1 IM |
453 | } |
454 | ||
455 | /* | |
456 | * It is not legal to sleep in the idle task - print a warning | |
457 | * message if some code attempts to do it: | |
458 | */ | |
459 | static void | |
460 | dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) | |
461 | { | |
5cb9eaa3 | 462 | raw_spin_rq_unlock_irq(rq); |
a92057e1 IM |
463 | printk(KERN_ERR "bad: scheduling from the idle thread!\n"); |
464 | dump_stack(); | |
5cb9eaa3 | 465 | raw_spin_rq_lock_irq(rq); |
a92057e1 IM |
466 | } |
467 | ||
a92057e1 IM |
468 | /* |
469 | * scheduler tick hitting a task of our scheduling class. | |
470 | * | |
471 | * NOTE: This function can be called remotely by the tick offload that | |
472 | * goes along full dynticks. Therefore no local assumption can be made | |
473 | * and everything must be accessed through the @rq and @curr passed in | |
474 | * parameters. | |
475 | */ | |
476 | static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) | |
477 | { | |
478 | } | |
479 | ||
a92057e1 IM |
480 | static void switched_to_idle(struct rq *rq, struct task_struct *p) |
481 | { | |
482 | BUG(); | |
483 | } | |
484 | ||
485 | static void | |
486 | prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio) | |
487 | { | |
488 | BUG(); | |
489 | } | |
490 | ||
a92057e1 IM |
491 | static void update_curr_idle(struct rq *rq) |
492 | { | |
493 | } | |
494 | ||
495 | /* | |
496 | * Simple, special scheduling class for the per-CPU idle tasks: | |
497 | */ | |
43c31ac0 PZ |
498 | DEFINE_SCHED_CLASS(idle) = { |
499 | ||
a92057e1 IM |
500 | /* no enqueue/yield_task for idle tasks */ |
501 | ||
502 | /* dequeue is not valid, we print a debug message there: */ | |
503 | .dequeue_task = dequeue_task_idle, | |
504 | ||
505 | .check_preempt_curr = check_preempt_curr_idle, | |
506 | ||
507 | .pick_next_task = pick_next_task_idle, | |
508 | .put_prev_task = put_prev_task_idle, | |
03b7fad1 | 509 | .set_next_task = set_next_task_idle, |
a92057e1 IM |
510 | |
511 | #ifdef CONFIG_SMP | |
6e2df058 | 512 | .balance = balance_idle, |
21f56ffe | 513 | .pick_task = pick_task_idle, |
a92057e1 IM |
514 | .select_task_rq = select_task_rq_idle, |
515 | .set_cpus_allowed = set_cpus_allowed_common, | |
516 | #endif | |
517 | ||
a92057e1 IM |
518 | .task_tick = task_tick_idle, |
519 | ||
a92057e1 IM |
520 | .prio_changed = prio_changed_idle, |
521 | .switched_to = switched_to_idle, | |
522 | .update_curr = update_curr_idle, | |
523 | }; |