Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1da177e4 | 2 | /* |
391e43da | 3 | * kernel/sched/core.c |
1da177e4 | 4 | * |
d1ccc66d | 5 | * Core kernel scheduler code and related syscalls |
1da177e4 LT |
6 | * |
7 | * Copyright (C) 1991-2002 Linus Torvalds | |
1da177e4 | 8 | */ |
e66f6481 IM |
9 | #include <linux/highmem.h> |
10 | #include <linux/hrtimer_api.h> | |
11 | #include <linux/ktime_api.h> | |
12 | #include <linux/sched/signal.h> | |
13 | #include <linux/syscalls_api.h> | |
14 | #include <linux/debug_locks.h> | |
15 | #include <linux/prefetch.h> | |
16 | #include <linux/capability.h> | |
17 | #include <linux/pgtable_api.h> | |
18 | #include <linux/wait_bit.h> | |
19 | #include <linux/jiffies.h> | |
20 | #include <linux/spinlock_api.h> | |
21 | #include <linux/cpumask_api.h> | |
22 | #include <linux/lockdep_api.h> | |
23 | #include <linux/hardirq.h> | |
24 | #include <linux/softirq.h> | |
25 | #include <linux/refcount_api.h> | |
26 | #include <linux/topology.h> | |
27 | #include <linux/sched/clock.h> | |
28 | #include <linux/sched/cond_resched.h> | |
d664e399 | 29 | #include <linux/sched/cputime.h> |
e66f6481 | 30 | #include <linux/sched/debug.h> |
d664e399 TG |
31 | #include <linux/sched/hotplug.h> |
32 | #include <linux/sched/init.h> | |
e66f6481 IM |
33 | #include <linux/sched/isolation.h> |
34 | #include <linux/sched/loadavg.h> | |
35 | #include <linux/sched/mm.h> | |
36 | #include <linux/sched/nohz.h> | |
37 | #include <linux/sched/rseq_api.h> | |
38 | #include <linux/sched/rt.h> | |
1da177e4 | 39 | |
6a5850d1 | 40 | #include <linux/blkdev.h> |
e66f6481 IM |
41 | #include <linux/context_tracking.h> |
42 | #include <linux/cpuset.h> | |
43 | #include <linux/delayacct.h> | |
44 | #include <linux/init_task.h> | |
45 | #include <linux/interrupt.h> | |
46 | #include <linux/ioprio.h> | |
47 | #include <linux/kallsyms.h> | |
0ed557aa | 48 | #include <linux/kcov.h> |
e66f6481 IM |
49 | #include <linux/kprobes.h> |
50 | #include <linux/llist_api.h> | |
51 | #include <linux/mmu_context.h> | |
52 | #include <linux/mmzone.h> | |
53 | #include <linux/mutex_api.h> | |
54 | #include <linux/nmi.h> | |
55 | #include <linux/nospec.h> | |
56 | #include <linux/perf_event_api.h> | |
57 | #include <linux/profile.h> | |
58 | #include <linux/psi.h> | |
59 | #include <linux/rcuwait_api.h> | |
932562a6 | 60 | #include <linux/rseq.h> |
e66f6481 | 61 | #include <linux/sched/wake_q.h> |
d08b9f0c | 62 | #include <linux/scs.h> |
e66f6481 IM |
63 | #include <linux/slab.h> |
64 | #include <linux/syscalls.h> | |
65 | #include <linux/vtime.h> | |
66 | #include <linux/wait_api.h> | |
67 | #include <linux/workqueue_api.h> | |
68 | ||
69 | #ifdef CONFIG_PREEMPT_DYNAMIC | |
a7b2553b IM |
70 | # ifdef CONFIG_GENERIC_ENTRY |
71 | # include <linux/entry-common.h> | |
72 | # endif | |
e66f6481 IM |
73 | #endif |
74 | ||
75 | #include <uapi/linux/sched/types.h> | |
0ed557aa | 76 | |
bc1cca97 | 77 | #include <asm/irq_regs.h> |
96f951ed | 78 | #include <asm/switch_to.h> |
5517d86b | 79 | #include <asm/tlb.h> |
1da177e4 | 80 | |
9d246053 | 81 | #define CREATE_TRACE_POINTS |
e66f6481 | 82 | #include <linux/sched/rseq_api.h> |
9d246053 | 83 | #include <trace/events/sched.h> |
cc9cb0a7 | 84 | #include <trace/events/ipi.h> |
9d246053 PA |
85 | #undef CREATE_TRACE_POINTS |
86 | ||
325ea10c | 87 | #include "sched.h" |
b9e9c6ca | 88 | #include "stats.h" |
6e0534f2 | 89 | |
e66f6481 | 90 | #include "autogroup.h" |
91c27493 | 91 | #include "pelt.h" |
1f8db415 | 92 | #include "smp.h" |
e66f6481 | 93 | #include "stats.h" |
1da177e4 | 94 | |
ea138446 | 95 | #include "../workqueue_internal.h" |
ed29b0b4 | 96 | #include "../../io_uring/io-wq.h" |
29d5e047 | 97 | #include "../smpboot.h" |
91c27493 | 98 | |
68e2d17c | 99 | EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu); |
cc9cb0a7 VS |
100 | EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask); |
101 | ||
a056a5be QY |
102 | /* |
103 | * Export tracepoints that act as a bare tracehook (ie: have no trace event | |
104 | * associated with them) to allow external modules to probe them. | |
105 | */ | |
106 | EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_cfs_tp); | |
107 | EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp); | |
108 | EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp); | |
109 | EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp); | |
110 | EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp); | |
77cf151b | 111 | EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_thermal_tp); |
51cf18c9 | 112 | EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp); |
a056a5be | 113 | EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp); |
4581bea8 VD |
114 | EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp); |
115 | EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp); | |
9d246053 | 116 | EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp); |
15874a3d | 117 | EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp); |
a056a5be | 118 | |
029632fb | 119 | DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); |
dc61b1d6 | 120 | |
a73f863a | 121 | #ifdef CONFIG_SCHED_DEBUG |
bf5c91ba IM |
122 | /* |
123 | * Debugging: various feature bits | |
765cc3a4 PB |
124 | * |
125 | * If SCHED_DEBUG is disabled, each compilation unit has its own copy of | |
126 | * sysctl_sched_features, defined in sched.h, to allow constants propagation | |
127 | * at compile time and compiler optimization based on features default. | |
bf5c91ba | 128 | */ |
f00b45c1 PZ |
129 | #define SCHED_FEAT(name, enabled) \ |
130 | (1UL << __SCHED_FEAT_##name) * enabled | | |
bf5c91ba | 131 | const_debug unsigned int sysctl_sched_features = |
391e43da | 132 | #include "features.h" |
f00b45c1 | 133 | 0; |
f00b45c1 | 134 | #undef SCHED_FEAT |
c006fac5 PT |
135 | |
136 | /* | |
137 | * Print a warning if need_resched is set for the given duration (if | |
138 | * LATENCY_WARN is enabled). | |
139 | * | |
140 | * If sysctl_resched_latency_warn_once is set, only one warning will be shown | |
141 | * per boot. | |
142 | */ | |
143 | __read_mostly int sysctl_resched_latency_warn_ms = 100; | |
144 | __read_mostly int sysctl_resched_latency_warn_once = 1; | |
145 | #endif /* CONFIG_SCHED_DEBUG */ | |
f00b45c1 | 146 | |
b82d9fdd PZ |
147 | /* |
148 | * Number of tasks to iterate in a single balance run. | |
149 | * Limited because this is done with IRQs disabled. | |
150 | */ | |
c59862f8 | 151 | const_debug unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK; |
b82d9fdd | 152 | |
029632fb | 153 | __read_mostly int scheduler_running; |
6892b75e | 154 | |
9edeaea1 PZ |
155 | #ifdef CONFIG_SCHED_CORE |
156 | ||
157 | DEFINE_STATIC_KEY_FALSE(__sched_core_enabled); | |
158 | ||
8a311c74 | 159 | /* kernel prio, less is more */ |
904cbab7 | 160 | static inline int __task_prio(const struct task_struct *p) |
8a311c74 PZ |
161 | { |
162 | if (p->sched_class == &stop_sched_class) /* trumps deadline */ | |
163 | return -2; | |
164 | ||
165 | if (rt_prio(p->prio)) /* includes deadline */ | |
166 | return p->prio; /* [-1, 99] */ | |
167 | ||
168 | if (p->sched_class == &idle_sched_class) | |
169 | return MAX_RT_PRIO + NICE_WIDTH; /* 140 */ | |
170 | ||
171 | return MAX_RT_PRIO + MAX_NICE; /* 120, squash fair */ | |
172 | } | |
173 | ||
174 | /* | |
175 | * l(a,b) | |
176 | * le(a,b) := !l(b,a) | |
177 | * g(a,b) := l(b,a) | |
178 | * ge(a,b) := !l(a,b) | |
179 | */ | |
180 | ||
181 | /* real prio, less is less */ | |
904cbab7 MWO |
182 | static inline bool prio_less(const struct task_struct *a, |
183 | const struct task_struct *b, bool in_fi) | |
8a311c74 PZ |
184 | { |
185 | ||
186 | int pa = __task_prio(a), pb = __task_prio(b); | |
187 | ||
188 | if (-pa < -pb) | |
189 | return true; | |
190 | ||
191 | if (-pb < -pa) | |
192 | return false; | |
193 | ||
194 | if (pa == -1) /* dl_prio() doesn't work because of stop_class above */ | |
195 | return !dl_time_before(a->dl.deadline, b->dl.deadline); | |
196 | ||
c6047c2e JFG |
197 | if (pa == MAX_RT_PRIO + MAX_NICE) /* fair */ |
198 | return cfs_prio_less(a, b, in_fi); | |
8a311c74 PZ |
199 | |
200 | return false; | |
201 | } | |
202 | ||
904cbab7 MWO |
203 | static inline bool __sched_core_less(const struct task_struct *a, |
204 | const struct task_struct *b) | |
8a311c74 PZ |
205 | { |
206 | if (a->core_cookie < b->core_cookie) | |
207 | return true; | |
208 | ||
209 | if (a->core_cookie > b->core_cookie) | |
210 | return false; | |
211 | ||
212 | /* flip prio, so high prio is leftmost */ | |
4feee7d1 | 213 | if (prio_less(b, a, !!task_rq(a)->core->core_forceidle_count)) |
8a311c74 PZ |
214 | return true; |
215 | ||
216 | return false; | |
217 | } | |
218 | ||
219 | #define __node_2_sc(node) rb_entry((node), struct task_struct, core_node) | |
220 | ||
221 | static inline bool rb_sched_core_less(struct rb_node *a, const struct rb_node *b) | |
222 | { | |
223 | return __sched_core_less(__node_2_sc(a), __node_2_sc(b)); | |
224 | } | |
225 | ||
226 | static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node) | |
227 | { | |
228 | const struct task_struct *p = __node_2_sc(node); | |
229 | unsigned long cookie = (unsigned long)key; | |
230 | ||
231 | if (cookie < p->core_cookie) | |
232 | return -1; | |
233 | ||
234 | if (cookie > p->core_cookie) | |
235 | return 1; | |
236 | ||
237 | return 0; | |
238 | } | |
239 | ||
6e33cad0 | 240 | void sched_core_enqueue(struct rq *rq, struct task_struct *p) |
8a311c74 PZ |
241 | { |
242 | rq->core->core_task_seq++; | |
243 | ||
244 | if (!p->core_cookie) | |
245 | return; | |
246 | ||
247 | rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less); | |
248 | } | |
249 | ||
4feee7d1 | 250 | void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) |
8a311c74 PZ |
251 | { |
252 | rq->core->core_task_seq++; | |
253 | ||
4feee7d1 JD |
254 | if (sched_core_enqueued(p)) { |
255 | rb_erase(&p->core_node, &rq->core_tree); | |
256 | RB_CLEAR_NODE(&p->core_node); | |
257 | } | |
8a311c74 | 258 | |
4feee7d1 JD |
259 | /* |
260 | * Migrating the last task off the cpu, with the cpu in forced idle | |
261 | * state. Reschedule to create an accounting edge for forced idle, | |
262 | * and re-examine whether the core is still in forced idle state. | |
263 | */ | |
264 | if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 && | |
265 | rq->core->core_forceidle_count && rq->curr == rq->idle) | |
266 | resched_curr(rq); | |
8a311c74 PZ |
267 | } |
268 | ||
530bfad1 | 269 | static int sched_task_is_throttled(struct task_struct *p, int cpu) |
8a311c74 | 270 | { |
530bfad1 HJ |
271 | if (p->sched_class->task_is_throttled) |
272 | return p->sched_class->task_is_throttled(p, cpu); | |
8a311c74 | 273 | |
530bfad1 | 274 | return 0; |
8a311c74 PZ |
275 | } |
276 | ||
d2dfa17b PZ |
277 | static struct task_struct *sched_core_next(struct task_struct *p, unsigned long cookie) |
278 | { | |
279 | struct rb_node *node = &p->core_node; | |
530bfad1 | 280 | int cpu = task_cpu(p); |
d2dfa17b | 281 | |
530bfad1 HJ |
282 | do { |
283 | node = rb_next(node); | |
284 | if (!node) | |
285 | return NULL; | |
d2dfa17b | 286 | |
530bfad1 HJ |
287 | p = __node_2_sc(node); |
288 | if (p->core_cookie != cookie) | |
289 | return NULL; | |
290 | ||
291 | } while (sched_task_is_throttled(p, cpu)); | |
292 | ||
293 | return p; | |
294 | } | |
295 | ||
296 | /* | |
297 | * Find left-most (aka, highest priority) and unthrottled task matching @cookie. | |
298 | * If no suitable task is found, NULL will be returned. | |
299 | */ | |
300 | static struct task_struct *sched_core_find(struct rq *rq, unsigned long cookie) | |
301 | { | |
302 | struct task_struct *p; | |
303 | struct rb_node *node; | |
304 | ||
305 | node = rb_find_first((void *)cookie, &rq->core_tree, rb_sched_core_cmp); | |
d2dfa17b PZ |
306 | if (!node) |
307 | return NULL; | |
308 | ||
530bfad1 HJ |
309 | p = __node_2_sc(node); |
310 | if (!sched_task_is_throttled(p, rq->cpu)) | |
311 | return p; | |
d2dfa17b | 312 | |
530bfad1 | 313 | return sched_core_next(p, cookie); |
d2dfa17b PZ |
314 | } |
315 | ||
9edeaea1 PZ |
316 | /* |
317 | * Magic required such that: | |
318 | * | |
319 | * raw_spin_rq_lock(rq); | |
320 | * ... | |
321 | * raw_spin_rq_unlock(rq); | |
322 | * | |
323 | * ends up locking and unlocking the _same_ lock, and all CPUs | |
324 | * always agree on what rq has what lock. | |
325 | * | |
326 | * XXX entirely possible to selectively enable cores, don't bother for now. | |
327 | */ | |
328 | ||
329 | static DEFINE_MUTEX(sched_core_mutex); | |
875feb41 | 330 | static atomic_t sched_core_count; |
9edeaea1 PZ |
331 | static struct cpumask sched_core_mask; |
332 | ||
3c474b32 PZ |
333 | static void sched_core_lock(int cpu, unsigned long *flags) |
334 | { | |
335 | const struct cpumask *smt_mask = cpu_smt_mask(cpu); | |
336 | int t, i = 0; | |
337 | ||
338 | local_irq_save(*flags); | |
339 | for_each_cpu(t, smt_mask) | |
340 | raw_spin_lock_nested(&cpu_rq(t)->__lock, i++); | |
341 | } | |
342 | ||
343 | static void sched_core_unlock(int cpu, unsigned long *flags) | |
344 | { | |
345 | const struct cpumask *smt_mask = cpu_smt_mask(cpu); | |
346 | int t; | |
347 | ||
348 | for_each_cpu(t, smt_mask) | |
349 | raw_spin_unlock(&cpu_rq(t)->__lock); | |
350 | local_irq_restore(*flags); | |
351 | } | |
352 | ||
9edeaea1 PZ |
353 | static void __sched_core_flip(bool enabled) |
354 | { | |
3c474b32 PZ |
355 | unsigned long flags; |
356 | int cpu, t; | |
9edeaea1 PZ |
357 | |
358 | cpus_read_lock(); | |
359 | ||
360 | /* | |
361 | * Toggle the online cores, one by one. | |
362 | */ | |
363 | cpumask_copy(&sched_core_mask, cpu_online_mask); | |
364 | for_each_cpu(cpu, &sched_core_mask) { | |
365 | const struct cpumask *smt_mask = cpu_smt_mask(cpu); | |
366 | ||
3c474b32 | 367 | sched_core_lock(cpu, &flags); |
9edeaea1 PZ |
368 | |
369 | for_each_cpu(t, smt_mask) | |
370 | cpu_rq(t)->core_enabled = enabled; | |
371 | ||
4feee7d1 JD |
372 | cpu_rq(cpu)->core->core_forceidle_start = 0; |
373 | ||
3c474b32 | 374 | sched_core_unlock(cpu, &flags); |
9edeaea1 PZ |
375 | |
376 | cpumask_andnot(&sched_core_mask, &sched_core_mask, smt_mask); | |
377 | } | |
378 | ||
379 | /* | |
380 | * Toggle the offline CPUs. | |
381 | */ | |
585463f0 | 382 | for_each_cpu_andnot(cpu, cpu_possible_mask, cpu_online_mask) |
9edeaea1 PZ |
383 | cpu_rq(cpu)->core_enabled = enabled; |
384 | ||
385 | cpus_read_unlock(); | |
386 | } | |
387 | ||
8a311c74 | 388 | static void sched_core_assert_empty(void) |
9edeaea1 | 389 | { |
8a311c74 | 390 | int cpu; |
9edeaea1 | 391 | |
8a311c74 PZ |
392 | for_each_possible_cpu(cpu) |
393 | WARN_ON_ONCE(!RB_EMPTY_ROOT(&cpu_rq(cpu)->core_tree)); | |
394 | } | |
395 | ||
396 | static void __sched_core_enable(void) | |
397 | { | |
9edeaea1 PZ |
398 | static_branch_enable(&__sched_core_enabled); |
399 | /* | |
400 | * Ensure all previous instances of raw_spin_rq_*lock() have finished | |
401 | * and future ones will observe !sched_core_disabled(). | |
402 | */ | |
403 | synchronize_rcu(); | |
404 | __sched_core_flip(true); | |
8a311c74 | 405 | sched_core_assert_empty(); |
9edeaea1 PZ |
406 | } |
407 | ||
408 | static void __sched_core_disable(void) | |
409 | { | |
8a311c74 | 410 | sched_core_assert_empty(); |
9edeaea1 PZ |
411 | __sched_core_flip(false); |
412 | static_branch_disable(&__sched_core_enabled); | |
413 | } | |
414 | ||
415 | void sched_core_get(void) | |
416 | { | |
875feb41 PZ |
417 | if (atomic_inc_not_zero(&sched_core_count)) |
418 | return; | |
419 | ||
9edeaea1 | 420 | mutex_lock(&sched_core_mutex); |
875feb41 | 421 | if (!atomic_read(&sched_core_count)) |
9edeaea1 | 422 | __sched_core_enable(); |
875feb41 PZ |
423 | |
424 | smp_mb__before_atomic(); | |
425 | atomic_inc(&sched_core_count); | |
9edeaea1 PZ |
426 | mutex_unlock(&sched_core_mutex); |
427 | } | |
428 | ||
875feb41 | 429 | static void __sched_core_put(struct work_struct *work) |
9edeaea1 | 430 | { |
875feb41 | 431 | if (atomic_dec_and_mutex_lock(&sched_core_count, &sched_core_mutex)) { |
9edeaea1 | 432 | __sched_core_disable(); |
875feb41 PZ |
433 | mutex_unlock(&sched_core_mutex); |
434 | } | |
435 | } | |
436 | ||
437 | void sched_core_put(void) | |
438 | { | |
439 | static DECLARE_WORK(_work, __sched_core_put); | |
440 | ||
441 | /* | |
442 | * "There can be only one" | |
443 | * | |
444 | * Either this is the last one, or we don't actually need to do any | |
445 | * 'work'. If it is the last *again*, we rely on | |
446 | * WORK_STRUCT_PENDING_BIT. | |
447 | */ | |
448 | if (!atomic_add_unless(&sched_core_count, -1, 1)) | |
449 | schedule_work(&_work); | |
9edeaea1 PZ |
450 | } |
451 | ||
8a311c74 PZ |
452 | #else /* !CONFIG_SCHED_CORE */ |
453 | ||
454 | static inline void sched_core_enqueue(struct rq *rq, struct task_struct *p) { } | |
4feee7d1 JD |
455 | static inline void |
456 | sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { } | |
8a311c74 | 457 | |
9edeaea1 PZ |
458 | #endif /* CONFIG_SCHED_CORE */ |
459 | ||
58877d34 PZ |
460 | /* |
461 | * Serialization rules: | |
462 | * | |
463 | * Lock order: | |
464 | * | |
465 | * p->pi_lock | |
466 | * rq->lock | |
467 | * hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls) | |
468 | * | |
469 | * rq1->lock | |
470 | * rq2->lock where: rq1 < rq2 | |
471 | * | |
472 | * Regular state: | |
473 | * | |
474 | * Normal scheduling state is serialized by rq->lock. __schedule() takes the | |
475 | * local CPU's rq->lock, it optionally removes the task from the runqueue and | |
b19a888c | 476 | * always looks at the local rq data structures to find the most eligible task |
58877d34 PZ |
477 | * to run next. |
478 | * | |
479 | * Task enqueue is also under rq->lock, possibly taken from another CPU. | |
480 | * Wakeups from another LLC domain might use an IPI to transfer the enqueue to | |
481 | * the local CPU to avoid bouncing the runqueue state around [ see | |
482 | * ttwu_queue_wakelist() ] | |
483 | * | |
484 | * Task wakeup, specifically wakeups that involve migration, are horribly | |
485 | * complicated to avoid having to take two rq->locks. | |
486 | * | |
487 | * Special state: | |
488 | * | |
489 | * System-calls and anything external will use task_rq_lock() which acquires | |
490 | * both p->pi_lock and rq->lock. As a consequence the state they change is | |
491 | * stable while holding either lock: | |
492 | * | |
493 | * - sched_setaffinity()/ | |
494 | * set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed | |
495 | * - set_user_nice(): p->se.load, p->*prio | |
496 | * - __sched_setscheduler(): p->sched_class, p->policy, p->*prio, | |
497 | * p->se.load, p->rt_priority, | |
498 | * p->dl.dl_{runtime, deadline, period, flags, bw, density} | |
499 | * - sched_setnuma(): p->numa_preferred_nid | |
39c42611 | 500 | * - sched_move_task(): p->sched_task_group |
58877d34 PZ |
501 | * - uclamp_update_active() p->uclamp* |
502 | * | |
503 | * p->state <- TASK_*: | |
504 | * | |
505 | * is changed locklessly using set_current_state(), __set_current_state() or | |
506 | * set_special_state(), see their respective comments, or by | |
507 | * try_to_wake_up(). This latter uses p->pi_lock to serialize against | |
508 | * concurrent self. | |
509 | * | |
510 | * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }: | |
511 | * | |
512 | * is set by activate_task() and cleared by deactivate_task(), under | |
513 | * rq->lock. Non-zero indicates the task is runnable, the special | |
514 | * ON_RQ_MIGRATING state is used for migration without holding both | |
515 | * rq->locks. It indicates task_cpu() is not stable, see task_rq_lock(). | |
516 | * | |
517 | * p->on_cpu <- { 0, 1 }: | |
518 | * | |
519 | * is set by prepare_task() and cleared by finish_task() such that it will be | |
520 | * set before p is scheduled-in and cleared after p is scheduled-out, both | |
521 | * under rq->lock. Non-zero indicates the task is running on its CPU. | |
522 | * | |
523 | * [ The astute reader will observe that it is possible for two tasks on one | |
524 | * CPU to have ->on_cpu = 1 at the same time. ] | |
525 | * | |
526 | * task_cpu(p): is changed by set_task_cpu(), the rules are: | |
527 | * | |
528 | * - Don't call set_task_cpu() on a blocked task: | |
529 | * | |
530 | * We don't care what CPU we're not running on, this simplifies hotplug, | |
531 | * the CPU assignment of blocked tasks isn't required to be valid. | |
532 | * | |
533 | * - for try_to_wake_up(), called under p->pi_lock: | |
534 | * | |
535 | * This allows try_to_wake_up() to only take one rq->lock, see its comment. | |
536 | * | |
537 | * - for migration called under rq->lock: | |
538 | * [ see task_on_rq_migrating() in task_rq_lock() ] | |
539 | * | |
540 | * o move_queued_task() | |
541 | * o detach_task() | |
542 | * | |
543 | * - for migration called under double_rq_lock(): | |
544 | * | |
545 | * o __migrate_swap_task() | |
546 | * o push_rt_task() / pull_rt_task() | |
547 | * o push_dl_task() / pull_dl_task() | |
548 | * o dl_task_offline_migration() | |
549 | * | |
550 | */ | |
551 | ||
39d371b7 PZ |
552 | void raw_spin_rq_lock_nested(struct rq *rq, int subclass) |
553 | { | |
d66f1b06 PZ |
554 | raw_spinlock_t *lock; |
555 | ||
9edeaea1 PZ |
556 | /* Matches synchronize_rcu() in __sched_core_enable() */ |
557 | preempt_disable(); | |
d66f1b06 PZ |
558 | if (sched_core_disabled()) { |
559 | raw_spin_lock_nested(&rq->__lock, subclass); | |
9edeaea1 PZ |
560 | /* preempt_count *MUST* be > 1 */ |
561 | preempt_enable_no_resched(); | |
d66f1b06 PZ |
562 | return; |
563 | } | |
564 | ||
565 | for (;;) { | |
9ef7e7e3 | 566 | lock = __rq_lockp(rq); |
d66f1b06 | 567 | raw_spin_lock_nested(lock, subclass); |
9ef7e7e3 | 568 | if (likely(lock == __rq_lockp(rq))) { |
9edeaea1 PZ |
569 | /* preempt_count *MUST* be > 1 */ |
570 | preempt_enable_no_resched(); | |
d66f1b06 | 571 | return; |
9edeaea1 | 572 | } |
d66f1b06 PZ |
573 | raw_spin_unlock(lock); |
574 | } | |
39d371b7 PZ |
575 | } |
576 | ||
577 | bool raw_spin_rq_trylock(struct rq *rq) | |
578 | { | |
d66f1b06 PZ |
579 | raw_spinlock_t *lock; |
580 | bool ret; | |
581 | ||
9edeaea1 PZ |
582 | /* Matches synchronize_rcu() in __sched_core_enable() */ |
583 | preempt_disable(); | |
584 | if (sched_core_disabled()) { | |
585 | ret = raw_spin_trylock(&rq->__lock); | |
586 | preempt_enable(); | |
587 | return ret; | |
588 | } | |
d66f1b06 PZ |
589 | |
590 | for (;;) { | |
9ef7e7e3 | 591 | lock = __rq_lockp(rq); |
d66f1b06 | 592 | ret = raw_spin_trylock(lock); |
9ef7e7e3 | 593 | if (!ret || (likely(lock == __rq_lockp(rq)))) { |
9edeaea1 | 594 | preempt_enable(); |
d66f1b06 | 595 | return ret; |
9edeaea1 | 596 | } |
d66f1b06 PZ |
597 | raw_spin_unlock(lock); |
598 | } | |
39d371b7 PZ |
599 | } |
600 | ||
601 | void raw_spin_rq_unlock(struct rq *rq) | |
602 | { | |
603 | raw_spin_unlock(rq_lockp(rq)); | |
604 | } | |
605 | ||
d66f1b06 PZ |
606 | #ifdef CONFIG_SMP |
607 | /* | |
608 | * double_rq_lock - safely lock two runqueues | |
609 | */ | |
610 | void double_rq_lock(struct rq *rq1, struct rq *rq2) | |
611 | { | |
612 | lockdep_assert_irqs_disabled(); | |
613 | ||
614 | if (rq_order_less(rq2, rq1)) | |
615 | swap(rq1, rq2); | |
616 | ||
617 | raw_spin_rq_lock(rq1); | |
2679a837 HJ |
618 | if (__rq_lockp(rq1) != __rq_lockp(rq2)) |
619 | raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING); | |
d66f1b06 | 620 | |
2679a837 | 621 | double_rq_clock_clear_update(rq1, rq2); |
d66f1b06 PZ |
622 | } |
623 | #endif | |
624 | ||
3e71a462 PZ |
625 | /* |
626 | * __task_rq_lock - lock the rq @p resides on. | |
627 | */ | |
eb580751 | 628 | struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) |
3e71a462 PZ |
629 | __acquires(rq->lock) |
630 | { | |
631 | struct rq *rq; | |
632 | ||
633 | lockdep_assert_held(&p->pi_lock); | |
634 | ||
635 | for (;;) { | |
636 | rq = task_rq(p); | |
5cb9eaa3 | 637 | raw_spin_rq_lock(rq); |
3e71a462 | 638 | if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { |
d8ac8971 | 639 | rq_pin_lock(rq, rf); |
3e71a462 PZ |
640 | return rq; |
641 | } | |
5cb9eaa3 | 642 | raw_spin_rq_unlock(rq); |
3e71a462 PZ |
643 | |
644 | while (unlikely(task_on_rq_migrating(p))) | |
645 | cpu_relax(); | |
646 | } | |
647 | } | |
648 | ||
649 | /* | |
650 | * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. | |
651 | */ | |
eb580751 | 652 | struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) |
3e71a462 PZ |
653 | __acquires(p->pi_lock) |
654 | __acquires(rq->lock) | |
655 | { | |
656 | struct rq *rq; | |
657 | ||
658 | for (;;) { | |
eb580751 | 659 | raw_spin_lock_irqsave(&p->pi_lock, rf->flags); |
3e71a462 | 660 | rq = task_rq(p); |
5cb9eaa3 | 661 | raw_spin_rq_lock(rq); |
3e71a462 PZ |
662 | /* |
663 | * move_queued_task() task_rq_lock() | |
664 | * | |
665 | * ACQUIRE (rq->lock) | |
666 | * [S] ->on_rq = MIGRATING [L] rq = task_rq() | |
667 | * WMB (__set_task_cpu()) ACQUIRE (rq->lock); | |
668 | * [S] ->cpu = new_cpu [L] task_rq() | |
669 | * [L] ->on_rq | |
670 | * RELEASE (rq->lock) | |
671 | * | |
c546951d | 672 | * If we observe the old CPU in task_rq_lock(), the acquire of |
3e71a462 PZ |
673 | * the old rq->lock will fully serialize against the stores. |
674 | * | |
c546951d AP |
675 | * If we observe the new CPU in task_rq_lock(), the address |
676 | * dependency headed by '[L] rq = task_rq()' and the acquire | |
677 | * will pair with the WMB to ensure we then also see migrating. | |
3e71a462 PZ |
678 | */ |
679 | if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { | |
d8ac8971 | 680 | rq_pin_lock(rq, rf); |
3e71a462 PZ |
681 | return rq; |
682 | } | |
5cb9eaa3 | 683 | raw_spin_rq_unlock(rq); |
eb580751 | 684 | raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); |
3e71a462 PZ |
685 | |
686 | while (unlikely(task_on_rq_migrating(p))) | |
687 | cpu_relax(); | |
688 | } | |
689 | } | |
690 | ||
535b9552 IM |
691 | /* |
692 | * RQ-clock updating methods: | |
693 | */ | |
694 | ||
695 | static void update_rq_clock_task(struct rq *rq, s64 delta) | |
696 | { | |
697 | /* | |
698 | * In theory, the compile should just see 0 here, and optimize out the call | |
699 | * to sched_rt_avg_update. But I don't trust it... | |
700 | */ | |
11d4afd4 VG |
701 | s64 __maybe_unused steal = 0, irq_delta = 0; |
702 | ||
535b9552 IM |
703 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
704 | irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; | |
705 | ||
706 | /* | |
707 | * Since irq_time is only updated on {soft,}irq_exit, we might run into | |
708 | * this case when a previous update_rq_clock() happened inside a | |
709 | * {soft,}irq region. | |
710 | * | |
711 | * When this happens, we stop ->clock_task and only update the | |
712 | * prev_irq_time stamp to account for the part that fit, so that a next | |
713 | * update will consume the rest. This ensures ->clock_task is | |
714 | * monotonic. | |
715 | * | |
716 | * It does however cause some slight miss-attribution of {soft,}irq | |
717 | * time, a more accurate solution would be to update the irq_time using | |
718 | * the current rq->clock timestamp, except that would require using | |
719 | * atomic ops. | |
720 | */ | |
721 | if (irq_delta > delta) | |
722 | irq_delta = delta; | |
723 | ||
724 | rq->prev_irq_time += irq_delta; | |
725 | delta -= irq_delta; | |
52b1364b | 726 | psi_account_irqtime(rq->curr, irq_delta); |
a3b2aeac | 727 | delayacct_irq(rq->curr, irq_delta); |
535b9552 IM |
728 | #endif |
729 | #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING | |
730 | if (static_key_false((¶virt_steal_rq_enabled))) { | |
731 | steal = paravirt_steal_clock(cpu_of(rq)); | |
732 | steal -= rq->prev_steal_time_rq; | |
733 | ||
734 | if (unlikely(steal > delta)) | |
735 | steal = delta; | |
736 | ||
737 | rq->prev_steal_time_rq += steal; | |
738 | delta -= steal; | |
739 | } | |
740 | #endif | |
741 | ||
742 | rq->clock_task += delta; | |
743 | ||
11d4afd4 | 744 | #ifdef CONFIG_HAVE_SCHED_AVG_IRQ |
535b9552 | 745 | if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) |
91c27493 | 746 | update_irq_load_avg(rq, irq_delta + steal); |
535b9552 | 747 | #endif |
23127296 | 748 | update_rq_clock_pelt(rq, delta); |
535b9552 IM |
749 | } |
750 | ||
751 | void update_rq_clock(struct rq *rq) | |
752 | { | |
753 | s64 delta; | |
754 | ||
5cb9eaa3 | 755 | lockdep_assert_rq_held(rq); |
535b9552 IM |
756 | |
757 | if (rq->clock_update_flags & RQCF_ACT_SKIP) | |
758 | return; | |
759 | ||
760 | #ifdef CONFIG_SCHED_DEBUG | |
26ae58d2 PZ |
761 | if (sched_feat(WARN_DOUBLE_CLOCK)) |
762 | SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED); | |
535b9552 IM |
763 | rq->clock_update_flags |= RQCF_UPDATED; |
764 | #endif | |
26ae58d2 | 765 | |
535b9552 IM |
766 | delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; |
767 | if (delta < 0) | |
768 | return; | |
769 | rq->clock += delta; | |
770 | update_rq_clock_task(rq, delta); | |
771 | } | |
772 | ||
8f4d37ec PZ |
773 | #ifdef CONFIG_SCHED_HRTICK |
774 | /* | |
775 | * Use HR-timers to deliver accurate preemption points. | |
8f4d37ec | 776 | */ |
8f4d37ec | 777 | |
8f4d37ec PZ |
778 | static void hrtick_clear(struct rq *rq) |
779 | { | |
780 | if (hrtimer_active(&rq->hrtick_timer)) | |
781 | hrtimer_cancel(&rq->hrtick_timer); | |
782 | } | |
783 | ||
8f4d37ec PZ |
784 | /* |
785 | * High-resolution timer tick. | |
786 | * Runs from hardirq context with interrupts disabled. | |
787 | */ | |
788 | static enum hrtimer_restart hrtick(struct hrtimer *timer) | |
789 | { | |
790 | struct rq *rq = container_of(timer, struct rq, hrtick_timer); | |
8a8c69c3 | 791 | struct rq_flags rf; |
8f4d37ec PZ |
792 | |
793 | WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); | |
794 | ||
8a8c69c3 | 795 | rq_lock(rq, &rf); |
3e51f33f | 796 | update_rq_clock(rq); |
8f4d37ec | 797 | rq->curr->sched_class->task_tick(rq, rq->curr, 1); |
8a8c69c3 | 798 | rq_unlock(rq, &rf); |
8f4d37ec PZ |
799 | |
800 | return HRTIMER_NORESTART; | |
801 | } | |
802 | ||
95e904c7 | 803 | #ifdef CONFIG_SMP |
971ee28c | 804 | |
4961b6e1 | 805 | static void __hrtick_restart(struct rq *rq) |
971ee28c PZ |
806 | { |
807 | struct hrtimer *timer = &rq->hrtick_timer; | |
156ec6f4 | 808 | ktime_t time = rq->hrtick_time; |
971ee28c | 809 | |
156ec6f4 | 810 | hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD); |
971ee28c PZ |
811 | } |
812 | ||
31656519 PZ |
813 | /* |
814 | * called from hardirq (IPI) context | |
815 | */ | |
816 | static void __hrtick_start(void *arg) | |
b328ca18 | 817 | { |
31656519 | 818 | struct rq *rq = arg; |
8a8c69c3 | 819 | struct rq_flags rf; |
b328ca18 | 820 | |
8a8c69c3 | 821 | rq_lock(rq, &rf); |
971ee28c | 822 | __hrtick_restart(rq); |
8a8c69c3 | 823 | rq_unlock(rq, &rf); |
b328ca18 PZ |
824 | } |
825 | ||
31656519 PZ |
826 | /* |
827 | * Called to set the hrtick timer state. | |
828 | * | |
829 | * called with rq->lock held and irqs disabled | |
830 | */ | |
029632fb | 831 | void hrtick_start(struct rq *rq, u64 delay) |
b328ca18 | 832 | { |
31656519 | 833 | struct hrtimer *timer = &rq->hrtick_timer; |
177ef2a6 | 834 | s64 delta; |
835 | ||
836 | /* | |
837 | * Don't schedule slices shorter than 10000ns, that just | |
838 | * doesn't make sense and can cause timer DoS. | |
839 | */ | |
840 | delta = max_t(s64, delay, 10000LL); | |
156ec6f4 | 841 | rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta); |
31656519 | 842 | |
fd3eafda | 843 | if (rq == this_rq()) |
971ee28c | 844 | __hrtick_restart(rq); |
fd3eafda | 845 | else |
c46fff2a | 846 | smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); |
b328ca18 PZ |
847 | } |
848 | ||
31656519 PZ |
849 | #else |
850 | /* | |
851 | * Called to set the hrtick timer state. | |
852 | * | |
853 | * called with rq->lock held and irqs disabled | |
854 | */ | |
029632fb | 855 | void hrtick_start(struct rq *rq, u64 delay) |
31656519 | 856 | { |
86893335 WL |
857 | /* |
858 | * Don't schedule slices shorter than 10000ns, that just | |
859 | * doesn't make sense. Rely on vruntime for fairness. | |
860 | */ | |
861 | delay = max_t(u64, delay, 10000LL); | |
4961b6e1 | 862 | hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), |
d5096aa6 | 863 | HRTIMER_MODE_REL_PINNED_HARD); |
31656519 | 864 | } |
90b5363a | 865 | |
31656519 | 866 | #endif /* CONFIG_SMP */ |
8f4d37ec | 867 | |
77a021be | 868 | static void hrtick_rq_init(struct rq *rq) |
8f4d37ec | 869 | { |
31656519 | 870 | #ifdef CONFIG_SMP |
545b8c8d | 871 | INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq); |
31656519 | 872 | #endif |
d5096aa6 | 873 | hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); |
31656519 | 874 | rq->hrtick_timer.function = hrtick; |
8f4d37ec | 875 | } |
006c75f1 | 876 | #else /* CONFIG_SCHED_HRTICK */ |
8f4d37ec PZ |
877 | static inline void hrtick_clear(struct rq *rq) |
878 | { | |
879 | } | |
880 | ||
77a021be | 881 | static inline void hrtick_rq_init(struct rq *rq) |
8f4d37ec PZ |
882 | { |
883 | } | |
006c75f1 | 884 | #endif /* CONFIG_SCHED_HRTICK */ |
8f4d37ec | 885 | |
5529578a FW |
886 | /* |
887 | * cmpxchg based fetch_or, macro so it works for different integer types | |
888 | */ | |
889 | #define fetch_or(ptr, mask) \ | |
890 | ({ \ | |
891 | typeof(ptr) _ptr = (ptr); \ | |
892 | typeof(mask) _mask = (mask); \ | |
c02d5546 | 893 | typeof(*_ptr) _val = *_ptr; \ |
5529578a | 894 | \ |
c02d5546 UB |
895 | do { \ |
896 | } while (!try_cmpxchg(_ptr, &_val, _val | _mask)); \ | |
897 | _val; \ | |
5529578a FW |
898 | }) |
899 | ||
e3baac47 | 900 | #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) |
fd99f91a PZ |
901 | /* |
902 | * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, | |
903 | * this avoids any races wrt polling state changes and thereby avoids | |
904 | * spurious IPIs. | |
905 | */ | |
c02d5546 | 906 | static inline bool set_nr_and_not_polling(struct task_struct *p) |
fd99f91a PZ |
907 | { |
908 | struct thread_info *ti = task_thread_info(p); | |
909 | return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG); | |
910 | } | |
e3baac47 PZ |
911 | |
912 | /* | |
913 | * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set. | |
914 | * | |
915 | * If this returns true, then the idle task promises to call | |
916 | * sched_ttwu_pending() and reschedule soon. | |
917 | */ | |
918 | static bool set_nr_if_polling(struct task_struct *p) | |
919 | { | |
920 | struct thread_info *ti = task_thread_info(p); | |
c02d5546 | 921 | typeof(ti->flags) val = READ_ONCE(ti->flags); |
e3baac47 | 922 | |
4ff34ad3 | 923 | do { |
e3baac47 PZ |
924 | if (!(val & _TIF_POLLING_NRFLAG)) |
925 | return false; | |
926 | if (val & _TIF_NEED_RESCHED) | |
927 | return true; | |
4ff34ad3 UB |
928 | } while (!try_cmpxchg(&ti->flags, &val, val | _TIF_NEED_RESCHED)); |
929 | ||
e3baac47 PZ |
930 | return true; |
931 | } | |
932 | ||
fd99f91a | 933 | #else |
c02d5546 | 934 | static inline bool set_nr_and_not_polling(struct task_struct *p) |
fd99f91a PZ |
935 | { |
936 | set_tsk_need_resched(p); | |
937 | return true; | |
938 | } | |
e3baac47 PZ |
939 | |
940 | #ifdef CONFIG_SMP | |
c02d5546 | 941 | static inline bool set_nr_if_polling(struct task_struct *p) |
e3baac47 PZ |
942 | { |
943 | return false; | |
944 | } | |
945 | #endif | |
fd99f91a PZ |
946 | #endif |
947 | ||
07879c6a | 948 | static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task) |
76751049 PZ |
949 | { |
950 | struct wake_q_node *node = &task->wake_q; | |
951 | ||
952 | /* | |
953 | * Atomically grab the task, if ->wake_q is !nil already it means | |
b19a888c | 954 | * it's already queued (either by us or someone else) and will get the |
76751049 PZ |
955 | * wakeup due to that. |
956 | * | |
4c4e3731 PZ |
957 | * In order to ensure that a pending wakeup will observe our pending |
958 | * state, even in the failed case, an explicit smp_mb() must be used. | |
76751049 | 959 | */ |
4c4e3731 | 960 | smp_mb__before_atomic(); |
87ff19cb | 961 | if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))) |
07879c6a | 962 | return false; |
76751049 PZ |
963 | |
964 | /* | |
965 | * The head is context local, there can be no concurrency. | |
966 | */ | |
967 | *head->lastp = node; | |
968 | head->lastp = &node->next; | |
07879c6a DB |
969 | return true; |
970 | } | |
971 | ||
972 | /** | |
973 | * wake_q_add() - queue a wakeup for 'later' waking. | |
974 | * @head: the wake_q_head to add @task to | |
975 | * @task: the task to queue for 'later' wakeup | |
976 | * | |
977 | * Queue a task for later wakeup, most likely by the wake_up_q() call in the | |
978 | * same context, _HOWEVER_ this is not guaranteed, the wakeup can come | |
979 | * instantly. | |
980 | * | |
981 | * This function must be used as-if it were wake_up_process(); IOW the task | |
982 | * must be ready to be woken at this location. | |
983 | */ | |
984 | void wake_q_add(struct wake_q_head *head, struct task_struct *task) | |
985 | { | |
986 | if (__wake_q_add(head, task)) | |
987 | get_task_struct(task); | |
988 | } | |
989 | ||
990 | /** | |
991 | * wake_q_add_safe() - safely queue a wakeup for 'later' waking. | |
992 | * @head: the wake_q_head to add @task to | |
993 | * @task: the task to queue for 'later' wakeup | |
994 | * | |
995 | * Queue a task for later wakeup, most likely by the wake_up_q() call in the | |
996 | * same context, _HOWEVER_ this is not guaranteed, the wakeup can come | |
997 | * instantly. | |
998 | * | |
999 | * This function must be used as-if it were wake_up_process(); IOW the task | |
1000 | * must be ready to be woken at this location. | |
1001 | * | |
1002 | * This function is essentially a task-safe equivalent to wake_q_add(). Callers | |
1003 | * that already hold reference to @task can call the 'safe' version and trust | |
1004 | * wake_q to do the right thing depending whether or not the @task is already | |
1005 | * queued for wakeup. | |
1006 | */ | |
1007 | void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task) | |
1008 | { | |
1009 | if (!__wake_q_add(head, task)) | |
1010 | put_task_struct(task); | |
76751049 PZ |
1011 | } |
1012 | ||
1013 | void wake_up_q(struct wake_q_head *head) | |
1014 | { | |
1015 | struct wake_q_node *node = head->first; | |
1016 | ||
1017 | while (node != WAKE_Q_TAIL) { | |
1018 | struct task_struct *task; | |
1019 | ||
1020 | task = container_of(node, struct task_struct, wake_q); | |
d1ccc66d | 1021 | /* Task can safely be re-inserted now: */ |
76751049 PZ |
1022 | node = node->next; |
1023 | task->wake_q.next = NULL; | |
1024 | ||
1025 | /* | |
7696f991 AP |
1026 | * wake_up_process() executes a full barrier, which pairs with |
1027 | * the queueing in wake_q_add() so as not to miss wakeups. | |
76751049 PZ |
1028 | */ |
1029 | wake_up_process(task); | |
1030 | put_task_struct(task); | |
1031 | } | |
1032 | } | |
1033 | ||
c24d20db | 1034 | /* |
8875125e | 1035 | * resched_curr - mark rq's current task 'to be rescheduled now'. |
c24d20db IM |
1036 | * |
1037 | * On UP this means the setting of the need_resched flag, on SMP it | |
1038 | * might also involve a cross-CPU call to trigger the scheduler on | |
1039 | * the target CPU. | |
1040 | */ | |
8875125e | 1041 | void resched_curr(struct rq *rq) |
c24d20db | 1042 | { |
8875125e | 1043 | struct task_struct *curr = rq->curr; |
c24d20db IM |
1044 | int cpu; |
1045 | ||
5cb9eaa3 | 1046 | lockdep_assert_rq_held(rq); |
c24d20db | 1047 | |
8875125e | 1048 | if (test_tsk_need_resched(curr)) |
c24d20db IM |
1049 | return; |
1050 | ||
8875125e | 1051 | cpu = cpu_of(rq); |
fd99f91a | 1052 | |
f27dde8d | 1053 | if (cpu == smp_processor_id()) { |
8875125e | 1054 | set_tsk_need_resched(curr); |
f27dde8d | 1055 | set_preempt_need_resched(); |
c24d20db | 1056 | return; |
f27dde8d | 1057 | } |
c24d20db | 1058 | |
8875125e | 1059 | if (set_nr_and_not_polling(curr)) |
c24d20db | 1060 | smp_send_reschedule(cpu); |
dfc68f29 AL |
1061 | else |
1062 | trace_sched_wake_idle_without_ipi(cpu); | |
c24d20db IM |
1063 | } |
1064 | ||
029632fb | 1065 | void resched_cpu(int cpu) |
c24d20db IM |
1066 | { |
1067 | struct rq *rq = cpu_rq(cpu); | |
1068 | unsigned long flags; | |
1069 | ||
5cb9eaa3 | 1070 | raw_spin_rq_lock_irqsave(rq, flags); |
a0982dfa PM |
1071 | if (cpu_online(cpu) || cpu == smp_processor_id()) |
1072 | resched_curr(rq); | |
5cb9eaa3 | 1073 | raw_spin_rq_unlock_irqrestore(rq, flags); |
c24d20db | 1074 | } |
06d8308c | 1075 | |
b021fe3e | 1076 | #ifdef CONFIG_SMP |
3451d024 | 1077 | #ifdef CONFIG_NO_HZ_COMMON |
83cd4fe2 | 1078 | /* |
d1ccc66d IM |
1079 | * In the semi idle case, use the nearest busy CPU for migrating timers |
1080 | * from an idle CPU. This is good for power-savings. | |
83cd4fe2 VP |
1081 | * |
1082 | * We don't do similar optimization for completely idle system, as | |
d1ccc66d IM |
1083 | * selecting an idle CPU will add more delays to the timers than intended |
1084 | * (as that CPU's timer base may not be uptodate wrt jiffies etc). | |
83cd4fe2 | 1085 | */ |
bc7a34b8 | 1086 | int get_nohz_timer_target(void) |
83cd4fe2 | 1087 | { |
e938b9c9 | 1088 | int i, cpu = smp_processor_id(), default_cpu = -1; |
83cd4fe2 | 1089 | struct sched_domain *sd; |
031e3bd8 | 1090 | const struct cpumask *hk_mask; |
83cd4fe2 | 1091 | |
04d4e665 | 1092 | if (housekeeping_cpu(cpu, HK_TYPE_TIMER)) { |
e938b9c9 WL |
1093 | if (!idle_cpu(cpu)) |
1094 | return cpu; | |
1095 | default_cpu = cpu; | |
1096 | } | |
6201b4d6 | 1097 | |
04d4e665 | 1098 | hk_mask = housekeeping_cpumask(HK_TYPE_TIMER); |
031e3bd8 | 1099 | |
7537b90c PZ |
1100 | guard(rcu)(); |
1101 | ||
83cd4fe2 | 1102 | for_each_domain(cpu, sd) { |
031e3bd8 | 1103 | for_each_cpu_and(i, sched_domain_span(sd), hk_mask) { |
44496922 WL |
1104 | if (cpu == i) |
1105 | continue; | |
1106 | ||
7537b90c PZ |
1107 | if (!idle_cpu(i)) |
1108 | return i; | |
057f3fad | 1109 | } |
83cd4fe2 | 1110 | } |
9642d18e | 1111 | |
e938b9c9 | 1112 | if (default_cpu == -1) |
04d4e665 | 1113 | default_cpu = housekeeping_any_cpu(HK_TYPE_TIMER); |
7537b90c PZ |
1114 | |
1115 | return default_cpu; | |
83cd4fe2 | 1116 | } |
d1ccc66d | 1117 | |
06d8308c TG |
1118 | /* |
1119 | * When add_timer_on() enqueues a timer into the timer wheel of an | |
1120 | * idle CPU then this timer might expire before the next timer event | |
1121 | * which is scheduled to wake up that CPU. In case of a completely | |
1122 | * idle system the next event might even be infinite time into the | |
1123 | * future. wake_up_idle_cpu() ensures that the CPU is woken up and | |
1124 | * leaves the inner idle loop so the newly added timer is taken into | |
1125 | * account when the CPU goes back to idle and evaluates the timer | |
1126 | * wheel for the next timer event. | |
1127 | */ | |
1c20091e | 1128 | static void wake_up_idle_cpu(int cpu) |
06d8308c TG |
1129 | { |
1130 | struct rq *rq = cpu_rq(cpu); | |
1131 | ||
1132 | if (cpu == smp_processor_id()) | |
1133 | return; | |
1134 | ||
67b9ca70 | 1135 | if (set_nr_and_not_polling(rq->idle)) |
06d8308c | 1136 | smp_send_reschedule(cpu); |
dfc68f29 AL |
1137 | else |
1138 | trace_sched_wake_idle_without_ipi(cpu); | |
45bf76df IM |
1139 | } |
1140 | ||
c5bfece2 | 1141 | static bool wake_up_full_nohz_cpu(int cpu) |
1c20091e | 1142 | { |
53c5fa16 FW |
1143 | /* |
1144 | * We just need the target to call irq_exit() and re-evaluate | |
1145 | * the next tick. The nohz full kick at least implies that. | |
1146 | * If needed we can still optimize that later with an | |
1147 | * empty IRQ. | |
1148 | */ | |
379d9ecb PM |
1149 | if (cpu_is_offline(cpu)) |
1150 | return true; /* Don't try to wake offline CPUs. */ | |
c5bfece2 | 1151 | if (tick_nohz_full_cpu(cpu)) { |
1c20091e FW |
1152 | if (cpu != smp_processor_id() || |
1153 | tick_nohz_tick_stopped()) | |
53c5fa16 | 1154 | tick_nohz_full_kick_cpu(cpu); |
1c20091e FW |
1155 | return true; |
1156 | } | |
1157 | ||
1158 | return false; | |
1159 | } | |
1160 | ||
379d9ecb PM |
1161 | /* |
1162 | * Wake up the specified CPU. If the CPU is going offline, it is the | |
1163 | * caller's responsibility to deal with the lost wakeup, for example, | |
1164 | * by hooking into the CPU_DEAD notifier like timers and hrtimers do. | |
1165 | */ | |
1c20091e FW |
1166 | void wake_up_nohz_cpu(int cpu) |
1167 | { | |
c5bfece2 | 1168 | if (!wake_up_full_nohz_cpu(cpu)) |
1c20091e FW |
1169 | wake_up_idle_cpu(cpu); |
1170 | } | |
1171 | ||
19a1f5ec | 1172 | static void nohz_csd_func(void *info) |
45bf76df | 1173 | { |
19a1f5ec PZ |
1174 | struct rq *rq = info; |
1175 | int cpu = cpu_of(rq); | |
1176 | unsigned int flags; | |
873b4c65 VG |
1177 | |
1178 | /* | |
19a1f5ec | 1179 | * Release the rq::nohz_csd. |
873b4c65 | 1180 | */ |
c6f88654 | 1181 | flags = atomic_fetch_andnot(NOHZ_KICK_MASK | NOHZ_NEWILB_KICK, nohz_flags(cpu)); |
19a1f5ec | 1182 | WARN_ON(!(flags & NOHZ_KICK_MASK)); |
45bf76df | 1183 | |
19a1f5ec PZ |
1184 | rq->idle_balance = idle_cpu(cpu); |
1185 | if (rq->idle_balance && !need_resched()) { | |
1186 | rq->nohz_idle_balance = flags; | |
90b5363a PZI |
1187 | raise_softirq_irqoff(SCHED_SOFTIRQ); |
1188 | } | |
2069dd75 PZ |
1189 | } |
1190 | ||
3451d024 | 1191 | #endif /* CONFIG_NO_HZ_COMMON */ |
d842de87 | 1192 | |
ce831b38 | 1193 | #ifdef CONFIG_NO_HZ_FULL |
88c56cfe PA |
1194 | static inline bool __need_bw_check(struct rq *rq, struct task_struct *p) |
1195 | { | |
1196 | if (rq->nr_running != 1) | |
1197 | return false; | |
1198 | ||
1199 | if (p->sched_class != &fair_sched_class) | |
1200 | return false; | |
1201 | ||
1202 | if (!task_on_rq_queued(p)) | |
1203 | return false; | |
1204 | ||
1205 | return true; | |
1206 | } | |
1207 | ||
76d92ac3 | 1208 | bool sched_can_stop_tick(struct rq *rq) |
ce831b38 | 1209 | { |
76d92ac3 FW |
1210 | int fifo_nr_running; |
1211 | ||
1212 | /* Deadline tasks, even if single, need the tick */ | |
1213 | if (rq->dl.dl_nr_running) | |
1214 | return false; | |
1215 | ||
1e78cdbd | 1216 | /* |
b19a888c | 1217 | * If there are more than one RR tasks, we need the tick to affect the |
2548d546 | 1218 | * actual RR behaviour. |
1e78cdbd | 1219 | */ |
76d92ac3 FW |
1220 | if (rq->rt.rr_nr_running) { |
1221 | if (rq->rt.rr_nr_running == 1) | |
1222 | return true; | |
1223 | else | |
1224 | return false; | |
1e78cdbd RR |
1225 | } |
1226 | ||
2548d546 PZ |
1227 | /* |
1228 | * If there's no RR tasks, but FIFO tasks, we can skip the tick, no | |
1229 | * forced preemption between FIFO tasks. | |
1230 | */ | |
1231 | fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; | |
1232 | if (fifo_nr_running) | |
1233 | return true; | |
1234 | ||
1235 | /* | |
1236 | * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left; | |
1237 | * if there's more than one we need the tick for involuntary | |
1238 | * preemption. | |
1239 | */ | |
1240 | if (rq->nr_running > 1) | |
541b8264 | 1241 | return false; |
ce831b38 | 1242 | |
88c56cfe PA |
1243 | /* |
1244 | * If there is one task and it has CFS runtime bandwidth constraints | |
1245 | * and it's on the cpu now we don't want to stop the tick. | |
1246 | * This check prevents clearing the bit if a newly enqueued task here is | |
1247 | * dequeued by migrating while the constrained task continues to run. | |
1248 | * E.g. going from 2->1 without going through pick_next_task(). | |
1249 | */ | |
1250 | if (sched_feat(HZ_BW) && __need_bw_check(rq, rq->curr)) { | |
1251 | if (cfs_task_bw_constrained(rq->curr)) | |
1252 | return false; | |
1253 | } | |
1254 | ||
541b8264 | 1255 | return true; |
ce831b38 FW |
1256 | } |
1257 | #endif /* CONFIG_NO_HZ_FULL */ | |
6d6bc0ad | 1258 | #endif /* CONFIG_SMP */ |
18d95a28 | 1259 | |
a790de99 PT |
1260 | #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ |
1261 | (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH))) | |
c09595f6 | 1262 | /* |
8277434e PT |
1263 | * Iterate task_group tree rooted at *from, calling @down when first entering a |
1264 | * node and @up when leaving it for the final time. | |
1265 | * | |
1266 | * Caller must hold rcu_lock or sufficient equivalent. | |
c09595f6 | 1267 | */ |
029632fb | 1268 | int walk_tg_tree_from(struct task_group *from, |
8277434e | 1269 | tg_visitor down, tg_visitor up, void *data) |
c09595f6 PZ |
1270 | { |
1271 | struct task_group *parent, *child; | |
eb755805 | 1272 | int ret; |
c09595f6 | 1273 | |
8277434e PT |
1274 | parent = from; |
1275 | ||
c09595f6 | 1276 | down: |
eb755805 PZ |
1277 | ret = (*down)(parent, data); |
1278 | if (ret) | |
8277434e | 1279 | goto out; |
c09595f6 PZ |
1280 | list_for_each_entry_rcu(child, &parent->children, siblings) { |
1281 | parent = child; | |
1282 | goto down; | |
1283 | ||
1284 | up: | |
1285 | continue; | |
1286 | } | |
eb755805 | 1287 | ret = (*up)(parent, data); |
8277434e PT |
1288 | if (ret || parent == from) |
1289 | goto out; | |
c09595f6 PZ |
1290 | |
1291 | child = parent; | |
1292 | parent = parent->parent; | |
1293 | if (parent) | |
1294 | goto up; | |
8277434e | 1295 | out: |
eb755805 | 1296 | return ret; |
c09595f6 PZ |
1297 | } |
1298 | ||
029632fb | 1299 | int tg_nop(struct task_group *tg, void *data) |
eb755805 | 1300 | { |
e2b245f8 | 1301 | return 0; |
eb755805 | 1302 | } |
18d95a28 PZ |
1303 | #endif |
1304 | ||
b1e82065 | 1305 | static void set_load_weight(struct task_struct *p, bool update_load) |
45bf76df | 1306 | { |
f05998d4 NR |
1307 | int prio = p->static_prio - MAX_RT_PRIO; |
1308 | struct load_weight *load = &p->se.load; | |
1309 | ||
dd41f596 IM |
1310 | /* |
1311 | * SCHED_IDLE tasks get minimal weight: | |
1312 | */ | |
1da1843f | 1313 | if (task_has_idle_policy(p)) { |
c8b28116 | 1314 | load->weight = scale_load(WEIGHT_IDLEPRIO); |
f05998d4 | 1315 | load->inv_weight = WMULT_IDLEPRIO; |
dd41f596 IM |
1316 | return; |
1317 | } | |
71f8bd46 | 1318 | |
9059393e VG |
1319 | /* |
1320 | * SCHED_OTHER tasks have to update their load when changing their | |
1321 | * weight | |
1322 | */ | |
1323 | if (update_load && p->sched_class == &fair_sched_class) { | |
1324 | reweight_task(p, prio); | |
1325 | } else { | |
1326 | load->weight = scale_load(sched_prio_to_weight[prio]); | |
1327 | load->inv_weight = sched_prio_to_wmult[prio]; | |
1328 | } | |
71f8bd46 IM |
1329 | } |
1330 | ||
69842cba | 1331 | #ifdef CONFIG_UCLAMP_TASK |
2480c093 PB |
1332 | /* |
1333 | * Serializes updates of utilization clamp values | |
1334 | * | |
1335 | * The (slow-path) user-space triggers utilization clamp value updates which | |
1336 | * can require updates on (fast-path) scheduler's data structures used to | |
1337 | * support enqueue/dequeue operations. | |
1338 | * While the per-CPU rq lock protects fast-path update operations, user-space | |
1339 | * requests are serialized using a mutex to reduce the risk of conflicting | |
1340 | * updates or API abuses. | |
1341 | */ | |
1342 | static DEFINE_MUTEX(uclamp_mutex); | |
1343 | ||
e8f14172 | 1344 | /* Max allowed minimum utilization */ |
494dcdf4 | 1345 | static unsigned int __maybe_unused sysctl_sched_uclamp_util_min = SCHED_CAPACITY_SCALE; |
e8f14172 PB |
1346 | |
1347 | /* Max allowed maximum utilization */ | |
494dcdf4 | 1348 | static unsigned int __maybe_unused sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE; |
e8f14172 | 1349 | |
13685c4a QY |
1350 | /* |
1351 | * By default RT tasks run at the maximum performance point/capacity of the | |
1352 | * system. Uclamp enforces this by always setting UCLAMP_MIN of RT tasks to | |
1353 | * SCHED_CAPACITY_SCALE. | |
1354 | * | |
1355 | * This knob allows admins to change the default behavior when uclamp is being | |
1356 | * used. In battery powered devices, particularly, running at the maximum | |
1357 | * capacity and frequency will increase energy consumption and shorten the | |
1358 | * battery life. | |
1359 | * | |
1360 | * This knob only affects RT tasks that their uclamp_se->user_defined == false. | |
1361 | * | |
1362 | * This knob will not override the system default sched_util_clamp_min defined | |
1363 | * above. | |
1364 | */ | |
3267e015 | 1365 | static unsigned int sysctl_sched_uclamp_util_min_rt_default = SCHED_CAPACITY_SCALE; |
13685c4a | 1366 | |
e8f14172 PB |
1367 | /* All clamps are required to be less or equal than these values */ |
1368 | static struct uclamp_se uclamp_default[UCLAMP_CNT]; | |
69842cba | 1369 | |
46609ce2 QY |
1370 | /* |
1371 | * This static key is used to reduce the uclamp overhead in the fast path. It | |
1372 | * primarily disables the call to uclamp_rq_{inc, dec}() in | |
1373 | * enqueue/dequeue_task(). | |
1374 | * | |
1375 | * This allows users to continue to enable uclamp in their kernel config with | |
1376 | * minimum uclamp overhead in the fast path. | |
1377 | * | |
1378 | * As soon as userspace modifies any of the uclamp knobs, the static key is | |
1379 | * enabled, since we have an actual users that make use of uclamp | |
1380 | * functionality. | |
1381 | * | |
1382 | * The knobs that would enable this static key are: | |
1383 | * | |
1384 | * * A task modifying its uclamp value with sched_setattr(). | |
1385 | * * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs. | |
1386 | * * An admin modifying the cgroup cpu.uclamp.{min, max} | |
1387 | */ | |
1388 | DEFINE_STATIC_KEY_FALSE(sched_uclamp_used); | |
1389 | ||
69842cba PB |
1390 | /* Integer rounded range for each bucket */ |
1391 | #define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS) | |
1392 | ||
1393 | #define for_each_clamp_id(clamp_id) \ | |
1394 | for ((clamp_id) = 0; (clamp_id) < UCLAMP_CNT; (clamp_id)++) | |
1395 | ||
1396 | static inline unsigned int uclamp_bucket_id(unsigned int clamp_value) | |
1397 | { | |
6d2f8909 | 1398 | return min_t(unsigned int, clamp_value / UCLAMP_BUCKET_DELTA, UCLAMP_BUCKETS - 1); |
69842cba PB |
1399 | } |
1400 | ||
7763baac | 1401 | static inline unsigned int uclamp_none(enum uclamp_id clamp_id) |
69842cba PB |
1402 | { |
1403 | if (clamp_id == UCLAMP_MIN) | |
1404 | return 0; | |
1405 | return SCHED_CAPACITY_SCALE; | |
1406 | } | |
1407 | ||
a509a7cd PB |
1408 | static inline void uclamp_se_set(struct uclamp_se *uc_se, |
1409 | unsigned int value, bool user_defined) | |
69842cba PB |
1410 | { |
1411 | uc_se->value = value; | |
1412 | uc_se->bucket_id = uclamp_bucket_id(value); | |
a509a7cd | 1413 | uc_se->user_defined = user_defined; |
69842cba PB |
1414 | } |
1415 | ||
e496187d | 1416 | static inline unsigned int |
0413d7f3 | 1417 | uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id, |
e496187d PB |
1418 | unsigned int clamp_value) |
1419 | { | |
1420 | /* | |
1421 | * Avoid blocked utilization pushing up the frequency when we go | |
1422 | * idle (which drops the max-clamp) by retaining the last known | |
1423 | * max-clamp. | |
1424 | */ | |
1425 | if (clamp_id == UCLAMP_MAX) { | |
1426 | rq->uclamp_flags |= UCLAMP_FLAG_IDLE; | |
1427 | return clamp_value; | |
1428 | } | |
1429 | ||
1430 | return uclamp_none(UCLAMP_MIN); | |
1431 | } | |
1432 | ||
0413d7f3 | 1433 | static inline void uclamp_idle_reset(struct rq *rq, enum uclamp_id clamp_id, |
e496187d PB |
1434 | unsigned int clamp_value) |
1435 | { | |
1436 | /* Reset max-clamp retention only on idle exit */ | |
1437 | if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE)) | |
1438 | return; | |
1439 | ||
24422603 | 1440 | uclamp_rq_set(rq, clamp_id, clamp_value); |
e496187d PB |
1441 | } |
1442 | ||
69842cba | 1443 | static inline |
7763baac | 1444 | unsigned int uclamp_rq_max_value(struct rq *rq, enum uclamp_id clamp_id, |
0413d7f3 | 1445 | unsigned int clamp_value) |
69842cba PB |
1446 | { |
1447 | struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket; | |
1448 | int bucket_id = UCLAMP_BUCKETS - 1; | |
1449 | ||
1450 | /* | |
1451 | * Since both min and max clamps are max aggregated, find the | |
1452 | * top most bucket with tasks in. | |
1453 | */ | |
1454 | for ( ; bucket_id >= 0; bucket_id--) { | |
1455 | if (!bucket[bucket_id].tasks) | |
1456 | continue; | |
1457 | return bucket[bucket_id].value; | |
1458 | } | |
1459 | ||
1460 | /* No tasks -- default clamp values */ | |
e496187d | 1461 | return uclamp_idle_value(rq, clamp_id, clamp_value); |
69842cba PB |
1462 | } |
1463 | ||
13685c4a QY |
1464 | static void __uclamp_update_util_min_rt_default(struct task_struct *p) |
1465 | { | |
1466 | unsigned int default_util_min; | |
1467 | struct uclamp_se *uc_se; | |
1468 | ||
1469 | lockdep_assert_held(&p->pi_lock); | |
1470 | ||
1471 | uc_se = &p->uclamp_req[UCLAMP_MIN]; | |
1472 | ||
1473 | /* Only sync if user didn't override the default */ | |
1474 | if (uc_se->user_defined) | |
1475 | return; | |
1476 | ||
1477 | default_util_min = sysctl_sched_uclamp_util_min_rt_default; | |
1478 | uclamp_se_set(uc_se, default_util_min, false); | |
1479 | } | |
1480 | ||
1481 | static void uclamp_update_util_min_rt_default(struct task_struct *p) | |
1482 | { | |
13685c4a QY |
1483 | if (!rt_task(p)) |
1484 | return; | |
1485 | ||
1486 | /* Protect updates to p->uclamp_* */ | |
0e34600a | 1487 | guard(task_rq_lock)(p); |
13685c4a | 1488 | __uclamp_update_util_min_rt_default(p); |
13685c4a QY |
1489 | } |
1490 | ||
3eac870a | 1491 | static inline struct uclamp_se |
0413d7f3 | 1492 | uclamp_tg_restrict(struct task_struct *p, enum uclamp_id clamp_id) |
3eac870a | 1493 | { |
0213b708 | 1494 | /* Copy by value as we could modify it */ |
3eac870a PB |
1495 | struct uclamp_se uc_req = p->uclamp_req[clamp_id]; |
1496 | #ifdef CONFIG_UCLAMP_TASK_GROUP | |
0213b708 | 1497 | unsigned int tg_min, tg_max, value; |
3eac870a PB |
1498 | |
1499 | /* | |
1500 | * Tasks in autogroups or root task group will be | |
1501 | * restricted by system defaults. | |
1502 | */ | |
1503 | if (task_group_is_autogroup(task_group(p))) | |
1504 | return uc_req; | |
1505 | if (task_group(p) == &root_task_group) | |
1506 | return uc_req; | |
1507 | ||
0213b708 QY |
1508 | tg_min = task_group(p)->uclamp[UCLAMP_MIN].value; |
1509 | tg_max = task_group(p)->uclamp[UCLAMP_MAX].value; | |
1510 | value = uc_req.value; | |
1511 | value = clamp(value, tg_min, tg_max); | |
1512 | uclamp_se_set(&uc_req, value, false); | |
3eac870a PB |
1513 | #endif |
1514 | ||
1515 | return uc_req; | |
1516 | } | |
1517 | ||
e8f14172 PB |
1518 | /* |
1519 | * The effective clamp bucket index of a task depends on, by increasing | |
1520 | * priority: | |
1521 | * - the task specific clamp value, when explicitly requested from userspace | |
3eac870a PB |
1522 | * - the task group effective clamp value, for tasks not either in the root |
1523 | * group or in an autogroup | |
e8f14172 PB |
1524 | * - the system default clamp value, defined by the sysadmin |
1525 | */ | |
1526 | static inline struct uclamp_se | |
0413d7f3 | 1527 | uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id) |
e8f14172 | 1528 | { |
3eac870a | 1529 | struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id); |
e8f14172 PB |
1530 | struct uclamp_se uc_max = uclamp_default[clamp_id]; |
1531 | ||
1532 | /* System default restrictions always apply */ | |
1533 | if (unlikely(uc_req.value > uc_max.value)) | |
1534 | return uc_max; | |
1535 | ||
1536 | return uc_req; | |
1537 | } | |
1538 | ||
686516b5 | 1539 | unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id) |
9d20ad7d PB |
1540 | { |
1541 | struct uclamp_se uc_eff; | |
1542 | ||
1543 | /* Task currently refcounted: use back-annotated (effective) value */ | |
1544 | if (p->uclamp[clamp_id].active) | |
686516b5 | 1545 | return (unsigned long)p->uclamp[clamp_id].value; |
9d20ad7d PB |
1546 | |
1547 | uc_eff = uclamp_eff_get(p, clamp_id); | |
1548 | ||
686516b5 | 1549 | return (unsigned long)uc_eff.value; |
9d20ad7d PB |
1550 | } |
1551 | ||
69842cba PB |
1552 | /* |
1553 | * When a task is enqueued on a rq, the clamp bucket currently defined by the | |
1554 | * task's uclamp::bucket_id is refcounted on that rq. This also immediately | |
1555 | * updates the rq's clamp value if required. | |
60daf9c1 PB |
1556 | * |
1557 | * Tasks can have a task-specific value requested from user-space, track | |
1558 | * within each bucket the maximum value for tasks refcounted in it. | |
1559 | * This "local max aggregation" allows to track the exact "requested" value | |
1560 | * for each bucket when all its RUNNABLE tasks require the same clamp. | |
69842cba PB |
1561 | */ |
1562 | static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p, | |
0413d7f3 | 1563 | enum uclamp_id clamp_id) |
69842cba PB |
1564 | { |
1565 | struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; | |
1566 | struct uclamp_se *uc_se = &p->uclamp[clamp_id]; | |
1567 | struct uclamp_bucket *bucket; | |
1568 | ||
5cb9eaa3 | 1569 | lockdep_assert_rq_held(rq); |
69842cba | 1570 | |
e8f14172 PB |
1571 | /* Update task effective clamp */ |
1572 | p->uclamp[clamp_id] = uclamp_eff_get(p, clamp_id); | |
1573 | ||
69842cba PB |
1574 | bucket = &uc_rq->bucket[uc_se->bucket_id]; |
1575 | bucket->tasks++; | |
e8f14172 | 1576 | uc_se->active = true; |
69842cba | 1577 | |
e496187d PB |
1578 | uclamp_idle_reset(rq, clamp_id, uc_se->value); |
1579 | ||
60daf9c1 PB |
1580 | /* |
1581 | * Local max aggregation: rq buckets always track the max | |
1582 | * "requested" clamp value of its RUNNABLE tasks. | |
1583 | */ | |
1584 | if (bucket->tasks == 1 || uc_se->value > bucket->value) | |
1585 | bucket->value = uc_se->value; | |
1586 | ||
24422603 QY |
1587 | if (uc_se->value > uclamp_rq_get(rq, clamp_id)) |
1588 | uclamp_rq_set(rq, clamp_id, uc_se->value); | |
69842cba PB |
1589 | } |
1590 | ||
1591 | /* | |
1592 | * When a task is dequeued from a rq, the clamp bucket refcounted by the task | |
1593 | * is released. If this is the last task reference counting the rq's max | |
1594 | * active clamp value, then the rq's clamp value is updated. | |
1595 | * | |
1596 | * Both refcounted tasks and rq's cached clamp values are expected to be | |
1597 | * always valid. If it's detected they are not, as defensive programming, | |
1598 | * enforce the expected state and warn. | |
1599 | */ | |
1600 | static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p, | |
0413d7f3 | 1601 | enum uclamp_id clamp_id) |
69842cba PB |
1602 | { |
1603 | struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; | |
1604 | struct uclamp_se *uc_se = &p->uclamp[clamp_id]; | |
1605 | struct uclamp_bucket *bucket; | |
e496187d | 1606 | unsigned int bkt_clamp; |
69842cba PB |
1607 | unsigned int rq_clamp; |
1608 | ||
5cb9eaa3 | 1609 | lockdep_assert_rq_held(rq); |
69842cba | 1610 | |
46609ce2 QY |
1611 | /* |
1612 | * If sched_uclamp_used was enabled after task @p was enqueued, | |
1613 | * we could end up with unbalanced call to uclamp_rq_dec_id(). | |
1614 | * | |
1615 | * In this case the uc_se->active flag should be false since no uclamp | |
1616 | * accounting was performed at enqueue time and we can just return | |
1617 | * here. | |
1618 | * | |
b19a888c | 1619 | * Need to be careful of the following enqueue/dequeue ordering |
46609ce2 QY |
1620 | * problem too |
1621 | * | |
1622 | * enqueue(taskA) | |
1623 | * // sched_uclamp_used gets enabled | |
1624 | * enqueue(taskB) | |
1625 | * dequeue(taskA) | |
b19a888c | 1626 | * // Must not decrement bucket->tasks here |
46609ce2 QY |
1627 | * dequeue(taskB) |
1628 | * | |
1629 | * where we could end up with stale data in uc_se and | |
1630 | * bucket[uc_se->bucket_id]. | |
1631 | * | |
1632 | * The following check here eliminates the possibility of such race. | |
1633 | */ | |
1634 | if (unlikely(!uc_se->active)) | |
1635 | return; | |
1636 | ||
69842cba | 1637 | bucket = &uc_rq->bucket[uc_se->bucket_id]; |
46609ce2 | 1638 | |
69842cba PB |
1639 | SCHED_WARN_ON(!bucket->tasks); |
1640 | if (likely(bucket->tasks)) | |
1641 | bucket->tasks--; | |
46609ce2 | 1642 | |
e8f14172 | 1643 | uc_se->active = false; |
69842cba | 1644 | |
60daf9c1 PB |
1645 | /* |
1646 | * Keep "local max aggregation" simple and accept to (possibly) | |
1647 | * overboost some RUNNABLE tasks in the same bucket. | |
1648 | * The rq clamp bucket value is reset to its base value whenever | |
1649 | * there are no more RUNNABLE tasks refcounting it. | |
1650 | */ | |
69842cba PB |
1651 | if (likely(bucket->tasks)) |
1652 | return; | |
1653 | ||
24422603 | 1654 | rq_clamp = uclamp_rq_get(rq, clamp_id); |
69842cba PB |
1655 | /* |
1656 | * Defensive programming: this should never happen. If it happens, | |
1657 | * e.g. due to future modification, warn and fixup the expected value. | |
1658 | */ | |
1659 | SCHED_WARN_ON(bucket->value > rq_clamp); | |
e496187d PB |
1660 | if (bucket->value >= rq_clamp) { |
1661 | bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value); | |
24422603 | 1662 | uclamp_rq_set(rq, clamp_id, bkt_clamp); |
e496187d | 1663 | } |
69842cba PB |
1664 | } |
1665 | ||
1666 | static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) | |
1667 | { | |
0413d7f3 | 1668 | enum uclamp_id clamp_id; |
69842cba | 1669 | |
46609ce2 QY |
1670 | /* |
1671 | * Avoid any overhead until uclamp is actually used by the userspace. | |
1672 | * | |
1673 | * The condition is constructed such that a NOP is generated when | |
1674 | * sched_uclamp_used is disabled. | |
1675 | */ | |
1676 | if (!static_branch_unlikely(&sched_uclamp_used)) | |
1677 | return; | |
1678 | ||
69842cba PB |
1679 | if (unlikely(!p->sched_class->uclamp_enabled)) |
1680 | return; | |
1681 | ||
1682 | for_each_clamp_id(clamp_id) | |
1683 | uclamp_rq_inc_id(rq, p, clamp_id); | |
e496187d PB |
1684 | |
1685 | /* Reset clamp idle holding when there is one RUNNABLE task */ | |
1686 | if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) | |
1687 | rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; | |
69842cba PB |
1688 | } |
1689 | ||
1690 | static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) | |
1691 | { | |
0413d7f3 | 1692 | enum uclamp_id clamp_id; |
69842cba | 1693 | |
46609ce2 QY |
1694 | /* |
1695 | * Avoid any overhead until uclamp is actually used by the userspace. | |
1696 | * | |
1697 | * The condition is constructed such that a NOP is generated when | |
1698 | * sched_uclamp_used is disabled. | |
1699 | */ | |
1700 | if (!static_branch_unlikely(&sched_uclamp_used)) | |
1701 | return; | |
1702 | ||
69842cba PB |
1703 | if (unlikely(!p->sched_class->uclamp_enabled)) |
1704 | return; | |
1705 | ||
1706 | for_each_clamp_id(clamp_id) | |
1707 | uclamp_rq_dec_id(rq, p, clamp_id); | |
1708 | } | |
1709 | ||
ca4984a7 QP |
1710 | static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p, |
1711 | enum uclamp_id clamp_id) | |
1712 | { | |
1713 | if (!p->uclamp[clamp_id].active) | |
1714 | return; | |
1715 | ||
1716 | uclamp_rq_dec_id(rq, p, clamp_id); | |
1717 | uclamp_rq_inc_id(rq, p, clamp_id); | |
1718 | ||
1719 | /* | |
1720 | * Make sure to clear the idle flag if we've transiently reached 0 | |
1721 | * active tasks on rq. | |
1722 | */ | |
1723 | if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE)) | |
1724 | rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; | |
1725 | } | |
1726 | ||
babbe170 | 1727 | static inline void |
0213b708 | 1728 | uclamp_update_active(struct task_struct *p) |
babbe170 | 1729 | { |
0213b708 | 1730 | enum uclamp_id clamp_id; |
babbe170 PB |
1731 | struct rq_flags rf; |
1732 | struct rq *rq; | |
1733 | ||
1734 | /* | |
1735 | * Lock the task and the rq where the task is (or was) queued. | |
1736 | * | |
1737 | * We might lock the (previous) rq of a !RUNNABLE task, but that's the | |
1738 | * price to pay to safely serialize util_{min,max} updates with | |
1739 | * enqueues, dequeues and migration operations. | |
1740 | * This is the same locking schema used by __set_cpus_allowed_ptr(). | |
1741 | */ | |
1742 | rq = task_rq_lock(p, &rf); | |
1743 | ||
1744 | /* | |
1745 | * Setting the clamp bucket is serialized by task_rq_lock(). | |
1746 | * If the task is not yet RUNNABLE and its task_struct is not | |
1747 | * affecting a valid clamp bucket, the next time it's enqueued, | |
1748 | * it will already see the updated clamp bucket value. | |
1749 | */ | |
ca4984a7 QP |
1750 | for_each_clamp_id(clamp_id) |
1751 | uclamp_rq_reinc_id(rq, p, clamp_id); | |
babbe170 PB |
1752 | |
1753 | task_rq_unlock(rq, p, &rf); | |
1754 | } | |
1755 | ||
e3b8b6a0 | 1756 | #ifdef CONFIG_UCLAMP_TASK_GROUP |
babbe170 | 1757 | static inline void |
0213b708 | 1758 | uclamp_update_active_tasks(struct cgroup_subsys_state *css) |
babbe170 PB |
1759 | { |
1760 | struct css_task_iter it; | |
1761 | struct task_struct *p; | |
babbe170 PB |
1762 | |
1763 | css_task_iter_start(css, 0, &it); | |
0213b708 QY |
1764 | while ((p = css_task_iter_next(&it))) |
1765 | uclamp_update_active(p); | |
babbe170 PB |
1766 | css_task_iter_end(&it); |
1767 | } | |
1768 | ||
7274a5c1 | 1769 | static void cpu_util_update_eff(struct cgroup_subsys_state *css); |
494dcdf4 Y |
1770 | #endif |
1771 | ||
1772 | #ifdef CONFIG_SYSCTL | |
1773 | #ifdef CONFIG_UCLAMP_TASK | |
1774 | #ifdef CONFIG_UCLAMP_TASK_GROUP | |
7274a5c1 PB |
1775 | static void uclamp_update_root_tg(void) |
1776 | { | |
1777 | struct task_group *tg = &root_task_group; | |
1778 | ||
1779 | uclamp_se_set(&tg->uclamp_req[UCLAMP_MIN], | |
1780 | sysctl_sched_uclamp_util_min, false); | |
1781 | uclamp_se_set(&tg->uclamp_req[UCLAMP_MAX], | |
1782 | sysctl_sched_uclamp_util_max, false); | |
1783 | ||
0e34600a | 1784 | guard(rcu)(); |
7274a5c1 | 1785 | cpu_util_update_eff(&root_task_group.css); |
7274a5c1 PB |
1786 | } |
1787 | #else | |
1788 | static void uclamp_update_root_tg(void) { } | |
1789 | #endif | |
1790 | ||
494dcdf4 Y |
1791 | static void uclamp_sync_util_min_rt_default(void) |
1792 | { | |
1793 | struct task_struct *g, *p; | |
1794 | ||
1795 | /* | |
1796 | * copy_process() sysctl_uclamp | |
1797 | * uclamp_min_rt = X; | |
1798 | * write_lock(&tasklist_lock) read_lock(&tasklist_lock) | |
1799 | * // link thread smp_mb__after_spinlock() | |
1800 | * write_unlock(&tasklist_lock) read_unlock(&tasklist_lock); | |
1801 | * sched_post_fork() for_each_process_thread() | |
1802 | * __uclamp_sync_rt() __uclamp_sync_rt() | |
1803 | * | |
1804 | * Ensures that either sched_post_fork() will observe the new | |
1805 | * uclamp_min_rt or for_each_process_thread() will observe the new | |
1806 | * task. | |
1807 | */ | |
1808 | read_lock(&tasklist_lock); | |
1809 | smp_mb__after_spinlock(); | |
1810 | read_unlock(&tasklist_lock); | |
1811 | ||
0e34600a | 1812 | guard(rcu)(); |
494dcdf4 Y |
1813 | for_each_process_thread(g, p) |
1814 | uclamp_update_util_min_rt_default(p); | |
494dcdf4 Y |
1815 | } |
1816 | ||
3267e015 | 1817 | static int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, |
32927393 | 1818 | void *buffer, size_t *lenp, loff_t *ppos) |
e8f14172 | 1819 | { |
7274a5c1 | 1820 | bool update_root_tg = false; |
13685c4a | 1821 | int old_min, old_max, old_min_rt; |
e8f14172 PB |
1822 | int result; |
1823 | ||
0f92cdf3 PZ |
1824 | guard(mutex)(&uclamp_mutex); |
1825 | ||
e8f14172 PB |
1826 | old_min = sysctl_sched_uclamp_util_min; |
1827 | old_max = sysctl_sched_uclamp_util_max; | |
13685c4a | 1828 | old_min_rt = sysctl_sched_uclamp_util_min_rt_default; |
e8f14172 PB |
1829 | |
1830 | result = proc_dointvec(table, write, buffer, lenp, ppos); | |
1831 | if (result) | |
1832 | goto undo; | |
1833 | if (!write) | |
0f92cdf3 | 1834 | return 0; |
e8f14172 PB |
1835 | |
1836 | if (sysctl_sched_uclamp_util_min > sysctl_sched_uclamp_util_max || | |
13685c4a QY |
1837 | sysctl_sched_uclamp_util_max > SCHED_CAPACITY_SCALE || |
1838 | sysctl_sched_uclamp_util_min_rt_default > SCHED_CAPACITY_SCALE) { | |
1839 | ||
e8f14172 PB |
1840 | result = -EINVAL; |
1841 | goto undo; | |
1842 | } | |
1843 | ||
1844 | if (old_min != sysctl_sched_uclamp_util_min) { | |
1845 | uclamp_se_set(&uclamp_default[UCLAMP_MIN], | |
a509a7cd | 1846 | sysctl_sched_uclamp_util_min, false); |
7274a5c1 | 1847 | update_root_tg = true; |
e8f14172 PB |
1848 | } |
1849 | if (old_max != sysctl_sched_uclamp_util_max) { | |
1850 | uclamp_se_set(&uclamp_default[UCLAMP_MAX], | |
a509a7cd | 1851 | sysctl_sched_uclamp_util_max, false); |
7274a5c1 | 1852 | update_root_tg = true; |
e8f14172 PB |
1853 | } |
1854 | ||
46609ce2 QY |
1855 | if (update_root_tg) { |
1856 | static_branch_enable(&sched_uclamp_used); | |
7274a5c1 | 1857 | uclamp_update_root_tg(); |
46609ce2 | 1858 | } |
7274a5c1 | 1859 | |
13685c4a QY |
1860 | if (old_min_rt != sysctl_sched_uclamp_util_min_rt_default) { |
1861 | static_branch_enable(&sched_uclamp_used); | |
1862 | uclamp_sync_util_min_rt_default(); | |
1863 | } | |
7274a5c1 | 1864 | |
e8f14172 | 1865 | /* |
7274a5c1 PB |
1866 | * We update all RUNNABLE tasks only when task groups are in use. |
1867 | * Otherwise, keep it simple and do just a lazy update at each next | |
1868 | * task enqueue time. | |
e8f14172 | 1869 | */ |
0f92cdf3 | 1870 | return 0; |
e8f14172 PB |
1871 | |
1872 | undo: | |
1873 | sysctl_sched_uclamp_util_min = old_min; | |
1874 | sysctl_sched_uclamp_util_max = old_max; | |
13685c4a | 1875 | sysctl_sched_uclamp_util_min_rt_default = old_min_rt; |
e8f14172 PB |
1876 | return result; |
1877 | } | |
494dcdf4 Y |
1878 | #endif |
1879 | #endif | |
e8f14172 | 1880 | |
a509a7cd PB |
1881 | static int uclamp_validate(struct task_struct *p, |
1882 | const struct sched_attr *attr) | |
1883 | { | |
480a6ca2 DE |
1884 | int util_min = p->uclamp_req[UCLAMP_MIN].value; |
1885 | int util_max = p->uclamp_req[UCLAMP_MAX].value; | |
a509a7cd | 1886 | |
480a6ca2 DE |
1887 | if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN) { |
1888 | util_min = attr->sched_util_min; | |
a509a7cd | 1889 | |
480a6ca2 DE |
1890 | if (util_min + 1 > SCHED_CAPACITY_SCALE + 1) |
1891 | return -EINVAL; | |
1892 | } | |
1893 | ||
1894 | if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX) { | |
1895 | util_max = attr->sched_util_max; | |
1896 | ||
1897 | if (util_max + 1 > SCHED_CAPACITY_SCALE + 1) | |
1898 | return -EINVAL; | |
1899 | } | |
1900 | ||
1901 | if (util_min != -1 && util_max != -1 && util_min > util_max) | |
a509a7cd PB |
1902 | return -EINVAL; |
1903 | ||
e65855a5 QY |
1904 | /* |
1905 | * We have valid uclamp attributes; make sure uclamp is enabled. | |
1906 | * | |
1907 | * We need to do that here, because enabling static branches is a | |
1908 | * blocking operation which obviously cannot be done while holding | |
1909 | * scheduler locks. | |
1910 | */ | |
1911 | static_branch_enable(&sched_uclamp_used); | |
1912 | ||
a509a7cd PB |
1913 | return 0; |
1914 | } | |
1915 | ||
480a6ca2 DE |
1916 | static bool uclamp_reset(const struct sched_attr *attr, |
1917 | enum uclamp_id clamp_id, | |
1918 | struct uclamp_se *uc_se) | |
1919 | { | |
1920 | /* Reset on sched class change for a non user-defined clamp value. */ | |
1921 | if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)) && | |
1922 | !uc_se->user_defined) | |
1923 | return true; | |
1924 | ||
1925 | /* Reset on sched_util_{min,max} == -1. */ | |
1926 | if (clamp_id == UCLAMP_MIN && | |
1927 | attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN && | |
1928 | attr->sched_util_min == -1) { | |
1929 | return true; | |
1930 | } | |
1931 | ||
1932 | if (clamp_id == UCLAMP_MAX && | |
1933 | attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX && | |
1934 | attr->sched_util_max == -1) { | |
1935 | return true; | |
1936 | } | |
1937 | ||
1938 | return false; | |
1939 | } | |
1940 | ||
a509a7cd PB |
1941 | static void __setscheduler_uclamp(struct task_struct *p, |
1942 | const struct sched_attr *attr) | |
1943 | { | |
0413d7f3 | 1944 | enum uclamp_id clamp_id; |
1a00d999 | 1945 | |
1a00d999 PB |
1946 | for_each_clamp_id(clamp_id) { |
1947 | struct uclamp_se *uc_se = &p->uclamp_req[clamp_id]; | |
480a6ca2 | 1948 | unsigned int value; |
1a00d999 | 1949 | |
480a6ca2 | 1950 | if (!uclamp_reset(attr, clamp_id, uc_se)) |
1a00d999 PB |
1951 | continue; |
1952 | ||
13685c4a QY |
1953 | /* |
1954 | * RT by default have a 100% boost value that could be modified | |
1955 | * at runtime. | |
1956 | */ | |
1a00d999 | 1957 | if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN)) |
480a6ca2 | 1958 | value = sysctl_sched_uclamp_util_min_rt_default; |
13685c4a | 1959 | else |
480a6ca2 DE |
1960 | value = uclamp_none(clamp_id); |
1961 | ||
1962 | uclamp_se_set(uc_se, value, false); | |
1a00d999 | 1963 | |
1a00d999 PB |
1964 | } |
1965 | ||
a509a7cd PB |
1966 | if (likely(!(attr->sched_flags & SCHED_FLAG_UTIL_CLAMP))) |
1967 | return; | |
1968 | ||
480a6ca2 DE |
1969 | if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MIN && |
1970 | attr->sched_util_min != -1) { | |
a509a7cd PB |
1971 | uclamp_se_set(&p->uclamp_req[UCLAMP_MIN], |
1972 | attr->sched_util_min, true); | |
1973 | } | |
1974 | ||
480a6ca2 DE |
1975 | if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX && |
1976 | attr->sched_util_max != -1) { | |
a509a7cd PB |
1977 | uclamp_se_set(&p->uclamp_req[UCLAMP_MAX], |
1978 | attr->sched_util_max, true); | |
1979 | } | |
1980 | } | |
1981 | ||
e8f14172 PB |
1982 | static void uclamp_fork(struct task_struct *p) |
1983 | { | |
0413d7f3 | 1984 | enum uclamp_id clamp_id; |
e8f14172 | 1985 | |
13685c4a QY |
1986 | /* |
1987 | * We don't need to hold task_rq_lock() when updating p->uclamp_* here | |
1988 | * as the task is still at its early fork stages. | |
1989 | */ | |
e8f14172 PB |
1990 | for_each_clamp_id(clamp_id) |
1991 | p->uclamp[clamp_id].active = false; | |
a87498ac PB |
1992 | |
1993 | if (likely(!p->sched_reset_on_fork)) | |
1994 | return; | |
1995 | ||
1996 | for_each_clamp_id(clamp_id) { | |
eaf5a92e QP |
1997 | uclamp_se_set(&p->uclamp_req[clamp_id], |
1998 | uclamp_none(clamp_id), false); | |
a87498ac | 1999 | } |
e8f14172 PB |
2000 | } |
2001 | ||
13685c4a QY |
2002 | static void uclamp_post_fork(struct task_struct *p) |
2003 | { | |
2004 | uclamp_update_util_min_rt_default(p); | |
2005 | } | |
2006 | ||
d81ae8aa QY |
2007 | static void __init init_uclamp_rq(struct rq *rq) |
2008 | { | |
2009 | enum uclamp_id clamp_id; | |
2010 | struct uclamp_rq *uc_rq = rq->uclamp; | |
2011 | ||
2012 | for_each_clamp_id(clamp_id) { | |
2013 | uc_rq[clamp_id] = (struct uclamp_rq) { | |
2014 | .value = uclamp_none(clamp_id) | |
2015 | }; | |
2016 | } | |
2017 | ||
315c4f88 | 2018 | rq->uclamp_flags = UCLAMP_FLAG_IDLE; |
d81ae8aa QY |
2019 | } |
2020 | ||
69842cba PB |
2021 | static void __init init_uclamp(void) |
2022 | { | |
e8f14172 | 2023 | struct uclamp_se uc_max = {}; |
0413d7f3 | 2024 | enum uclamp_id clamp_id; |
69842cba PB |
2025 | int cpu; |
2026 | ||
d81ae8aa QY |
2027 | for_each_possible_cpu(cpu) |
2028 | init_uclamp_rq(cpu_rq(cpu)); | |
69842cba | 2029 | |
69842cba | 2030 | for_each_clamp_id(clamp_id) { |
e8f14172 | 2031 | uclamp_se_set(&init_task.uclamp_req[clamp_id], |
a509a7cd | 2032 | uclamp_none(clamp_id), false); |
69842cba | 2033 | } |
e8f14172 PB |
2034 | |
2035 | /* System defaults allow max clamp values for both indexes */ | |
a509a7cd | 2036 | uclamp_se_set(&uc_max, uclamp_none(UCLAMP_MAX), false); |
2480c093 | 2037 | for_each_clamp_id(clamp_id) { |
e8f14172 | 2038 | uclamp_default[clamp_id] = uc_max; |
2480c093 PB |
2039 | #ifdef CONFIG_UCLAMP_TASK_GROUP |
2040 | root_task_group.uclamp_req[clamp_id] = uc_max; | |
0b60ba2d | 2041 | root_task_group.uclamp[clamp_id] = uc_max; |
2480c093 PB |
2042 | #endif |
2043 | } | |
69842cba PB |
2044 | } |
2045 | ||
2046 | #else /* CONFIG_UCLAMP_TASK */ | |
2047 | static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) { } | |
2048 | static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) { } | |
a509a7cd PB |
2049 | static inline int uclamp_validate(struct task_struct *p, |
2050 | const struct sched_attr *attr) | |
2051 | { | |
2052 | return -EOPNOTSUPP; | |
2053 | } | |
2054 | static void __setscheduler_uclamp(struct task_struct *p, | |
2055 | const struct sched_attr *attr) { } | |
e8f14172 | 2056 | static inline void uclamp_fork(struct task_struct *p) { } |
13685c4a | 2057 | static inline void uclamp_post_fork(struct task_struct *p) { } |
69842cba PB |
2058 | static inline void init_uclamp(void) { } |
2059 | #endif /* CONFIG_UCLAMP_TASK */ | |
2060 | ||
a1dfb631 MT |
2061 | bool sched_task_on_rq(struct task_struct *p) |
2062 | { | |
2063 | return task_on_rq_queued(p); | |
2064 | } | |
2065 | ||
42a20f86 KC |
2066 | unsigned long get_wchan(struct task_struct *p) |
2067 | { | |
2068 | unsigned long ip = 0; | |
2069 | unsigned int state; | |
2070 | ||
2071 | if (!p || p == current) | |
2072 | return 0; | |
2073 | ||
2074 | /* Only get wchan if task is blocked and we can keep it that way. */ | |
2075 | raw_spin_lock_irq(&p->pi_lock); | |
2076 | state = READ_ONCE(p->__state); | |
2077 | smp_rmb(); /* see try_to_wake_up() */ | |
2078 | if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq) | |
2079 | ip = __get_wchan(p); | |
2080 | raw_spin_unlock_irq(&p->pi_lock); | |
2081 | ||
2082 | return ip; | |
2083 | } | |
2084 | ||
1de64443 | 2085 | static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) |
2087a1ad | 2086 | { |
0a67d1ee PZ |
2087 | if (!(flags & ENQUEUE_NOCLOCK)) |
2088 | update_rq_clock(rq); | |
2089 | ||
eb414681 | 2090 | if (!(flags & ENQUEUE_RESTORE)) { |
4e29fb70 | 2091 | sched_info_enqueue(rq, p); |
52b33d87 | 2092 | psi_enqueue(p, (flags & ENQUEUE_WAKEUP) && !(flags & ENQUEUE_MIGRATED)); |
eb414681 | 2093 | } |
0a67d1ee | 2094 | |
69842cba | 2095 | uclamp_rq_inc(rq, p); |
371fd7e7 | 2096 | p->sched_class->enqueue_task(rq, p, flags); |
8a311c74 PZ |
2097 | |
2098 | if (sched_core_enabled(rq)) | |
2099 | sched_core_enqueue(rq, p); | |
71f8bd46 IM |
2100 | } |
2101 | ||
1de64443 | 2102 | static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) |
71f8bd46 | 2103 | { |
8a311c74 | 2104 | if (sched_core_enabled(rq)) |
4feee7d1 | 2105 | sched_core_dequeue(rq, p, flags); |
8a311c74 | 2106 | |
0a67d1ee PZ |
2107 | if (!(flags & DEQUEUE_NOCLOCK)) |
2108 | update_rq_clock(rq); | |
2109 | ||
eb414681 | 2110 | if (!(flags & DEQUEUE_SAVE)) { |
4e29fb70 | 2111 | sched_info_dequeue(rq, p); |
eb414681 JW |
2112 | psi_dequeue(p, flags & DEQUEUE_SLEEP); |
2113 | } | |
0a67d1ee | 2114 | |
69842cba | 2115 | uclamp_rq_dec(rq, p); |
371fd7e7 | 2116 | p->sched_class->dequeue_task(rq, p, flags); |
71f8bd46 IM |
2117 | } |
2118 | ||
029632fb | 2119 | void activate_task(struct rq *rq, struct task_struct *p, int flags) |
1e3c88bd | 2120 | { |
a53ce18c VG |
2121 | if (task_on_rq_migrating(p)) |
2122 | flags |= ENQUEUE_MIGRATED; | |
223baf9d MD |
2123 | if (flags & ENQUEUE_MIGRATED) |
2124 | sched_mm_cid_migrate_to(rq, p); | |
a53ce18c | 2125 | |
371fd7e7 | 2126 | enqueue_task(rq, p, flags); |
7dd77884 PZ |
2127 | |
2128 | p->on_rq = TASK_ON_RQ_QUEUED; | |
1e3c88bd PZ |
2129 | } |
2130 | ||
029632fb | 2131 | void deactivate_task(struct rq *rq, struct task_struct *p, int flags) |
1e3c88bd | 2132 | { |
7dd77884 PZ |
2133 | p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING; |
2134 | ||
371fd7e7 | 2135 | dequeue_task(rq, p, flags); |
1e3c88bd PZ |
2136 | } |
2137 | ||
f558c2b8 | 2138 | static inline int __normal_prio(int policy, int rt_prio, int nice) |
14531189 | 2139 | { |
f558c2b8 PZ |
2140 | int prio; |
2141 | ||
2142 | if (dl_policy(policy)) | |
2143 | prio = MAX_DL_PRIO - 1; | |
2144 | else if (rt_policy(policy)) | |
2145 | prio = MAX_RT_PRIO - 1 - rt_prio; | |
2146 | else | |
2147 | prio = NICE_TO_PRIO(nice); | |
2148 | ||
2149 | return prio; | |
14531189 IM |
2150 | } |
2151 | ||
b29739f9 IM |
2152 | /* |
2153 | * Calculate the expected normal priority: i.e. priority | |
2154 | * without taking RT-inheritance into account. Might be | |
2155 | * boosted by interactivity modifiers. Changes upon fork, | |
2156 | * setprio syscalls, and whenever the interactivity | |
2157 | * estimator recalculates. | |
2158 | */ | |
36c8b586 | 2159 | static inline int normal_prio(struct task_struct *p) |
b29739f9 | 2160 | { |
f558c2b8 | 2161 | return __normal_prio(p->policy, p->rt_priority, PRIO_TO_NICE(p->static_prio)); |
b29739f9 IM |
2162 | } |
2163 | ||
2164 | /* | |
2165 | * Calculate the current priority, i.e. the priority | |
2166 | * taken into account by the scheduler. This value might | |
2167 | * be boosted by RT tasks, or might be boosted by | |
2168 | * interactivity modifiers. Will be RT if the task got | |
2169 | * RT-boosted. If not then it returns p->normal_prio. | |
2170 | */ | |
36c8b586 | 2171 | static int effective_prio(struct task_struct *p) |
b29739f9 IM |
2172 | { |
2173 | p->normal_prio = normal_prio(p); | |
2174 | /* | |
2175 | * If we are RT tasks or we were boosted to RT priority, | |
2176 | * keep the priority unchanged. Otherwise, update priority | |
2177 | * to the normal priority: | |
2178 | */ | |
2179 | if (!rt_prio(p->prio)) | |
2180 | return p->normal_prio; | |
2181 | return p->prio; | |
2182 | } | |
2183 | ||
1da177e4 LT |
2184 | /** |
2185 | * task_curr - is this task currently executing on a CPU? | |
2186 | * @p: the task in question. | |
e69f6186 YB |
2187 | * |
2188 | * Return: 1 if the task is currently executing. 0 otherwise. | |
1da177e4 | 2189 | */ |
36c8b586 | 2190 | inline int task_curr(const struct task_struct *p) |
1da177e4 LT |
2191 | { |
2192 | return cpu_curr(task_cpu(p)) == p; | |
2193 | } | |
2194 | ||
67dfa1b7 | 2195 | /* |
4c9a4bc8 PZ |
2196 | * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock, |
2197 | * use the balance_callback list if you want balancing. | |
2198 | * | |
2199 | * this means any call to check_class_changed() must be followed by a call to | |
2200 | * balance_callback(). | |
67dfa1b7 | 2201 | */ |
cb469845 SR |
2202 | static inline void check_class_changed(struct rq *rq, struct task_struct *p, |
2203 | const struct sched_class *prev_class, | |
da7a735e | 2204 | int oldprio) |
cb469845 SR |
2205 | { |
2206 | if (prev_class != p->sched_class) { | |
2207 | if (prev_class->switched_from) | |
da7a735e | 2208 | prev_class->switched_from(rq, p); |
4c9a4bc8 | 2209 | |
da7a735e | 2210 | p->sched_class->switched_to(rq, p); |
2d3d891d | 2211 | } else if (oldprio != p->prio || dl_task(p)) |
da7a735e | 2212 | p->sched_class->prio_changed(rq, p, oldprio); |
cb469845 SR |
2213 | } |
2214 | ||
e23edc86 | 2215 | void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags) |
1e5a7405 | 2216 | { |
aa93cd53 | 2217 | if (p->sched_class == rq->curr->sched_class) |
e23edc86 | 2218 | rq->curr->sched_class->wakeup_preempt(rq, p, flags); |
546a3fee | 2219 | else if (sched_class_above(p->sched_class, rq->curr->sched_class)) |
aa93cd53 | 2220 | resched_curr(rq); |
1e5a7405 PZ |
2221 | |
2222 | /* | |
2223 | * A queue event has occurred, and we're going to schedule. In | |
2224 | * this case, we can save a useless back to back clock update. | |
2225 | */ | |
da0c1e65 | 2226 | if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) |
adcc8da8 | 2227 | rq_clock_skip_update(rq); |
1e5a7405 PZ |
2228 | } |
2229 | ||
1c069187 PZ |
2230 | static __always_inline |
2231 | int __task_state_match(struct task_struct *p, unsigned int state) | |
2232 | { | |
2233 | if (READ_ONCE(p->__state) & state) | |
2234 | return 1; | |
2235 | ||
1c069187 PZ |
2236 | if (READ_ONCE(p->saved_state) & state) |
2237 | return -1; | |
fbaa6a18 | 2238 | |
1c069187 PZ |
2239 | return 0; |
2240 | } | |
2241 | ||
2242 | static __always_inline | |
2243 | int task_state_match(struct task_struct *p, unsigned int state) | |
2244 | { | |
1c069187 | 2245 | /* |
8f0eed4a EB |
2246 | * Serialize against current_save_and_set_rtlock_wait_state(), |
2247 | * current_restore_rtlock_saved_state(), and __refrigerator(). | |
1c069187 | 2248 | */ |
0e34600a | 2249 | guard(raw_spinlock_irq)(&p->pi_lock); |
1c069187 | 2250 | return __task_state_match(p, state); |
1c069187 PZ |
2251 | } |
2252 | ||
d5e15866 PZ |
2253 | /* |
2254 | * wait_task_inactive - wait for a thread to unschedule. | |
2255 | * | |
2256 | * Wait for the thread to block in any of the states set in @match_state. | |
2257 | * If it changes, i.e. @p might have woken up, then return zero. When we | |
2258 | * succeed in waiting for @p to be off its CPU, we return a positive number | |
2259 | * (its total switch count). If a second call a short while later returns the | |
2260 | * same number, the caller can be sure that @p has remained unscheduled the | |
2261 | * whole time. | |
2262 | * | |
2263 | * The caller must ensure that the task *will* unschedule sometime soon, | |
2264 | * else this function might spin for a *long* time. This function can't | |
2265 | * be called with interrupts off, or it may introduce deadlock with | |
2266 | * smp_call_function() if an IPI is sent by the same process we are | |
2267 | * waiting to become inactive. | |
2268 | */ | |
2269 | unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state) | |
2270 | { | |
1c069187 | 2271 | int running, queued, match; |
d5e15866 PZ |
2272 | struct rq_flags rf; |
2273 | unsigned long ncsw; | |
2274 | struct rq *rq; | |
2275 | ||
2276 | for (;;) { | |
2277 | /* | |
2278 | * We do the initial early heuristics without holding | |
2279 | * any task-queue locks at all. We'll only try to get | |
2280 | * the runqueue lock when things look like they will | |
2281 | * work out! | |
2282 | */ | |
2283 | rq = task_rq(p); | |
2284 | ||
2285 | /* | |
2286 | * If the task is actively running on another CPU | |
2287 | * still, just relax and busy-wait without holding | |
2288 | * any locks. | |
2289 | * | |
2290 | * NOTE! Since we don't hold any locks, it's not | |
2291 | * even sure that "rq" stays as the right runqueue! | |
2292 | * But we don't care, since "task_on_cpu()" will | |
2293 | * return false if the runqueue has changed and p | |
2294 | * is actually now running somewhere else! | |
2295 | */ | |
2296 | while (task_on_cpu(rq, p)) { | |
1c069187 | 2297 | if (!task_state_match(p, match_state)) |
d5e15866 PZ |
2298 | return 0; |
2299 | cpu_relax(); | |
2300 | } | |
2301 | ||
2302 | /* | |
2303 | * Ok, time to look more closely! We need the rq | |
2304 | * lock now, to be *sure*. If we're wrong, we'll | |
2305 | * just go back and repeat. | |
2306 | */ | |
2307 | rq = task_rq_lock(p, &rf); | |
2308 | trace_sched_wait_task(p); | |
2309 | running = task_on_cpu(rq, p); | |
2310 | queued = task_on_rq_queued(p); | |
2311 | ncsw = 0; | |
1c069187 PZ |
2312 | if ((match = __task_state_match(p, match_state))) { |
2313 | /* | |
2314 | * When matching on p->saved_state, consider this task | |
2315 | * still queued so it will wait. | |
2316 | */ | |
2317 | if (match < 0) | |
2318 | queued = 1; | |
d5e15866 | 2319 | ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ |
1c069187 | 2320 | } |
d5e15866 PZ |
2321 | task_rq_unlock(rq, p, &rf); |
2322 | ||
2323 | /* | |
2324 | * If it changed from the expected state, bail out now. | |
2325 | */ | |
2326 | if (unlikely(!ncsw)) | |
2327 | break; | |
2328 | ||
2329 | /* | |
2330 | * Was it really running after all now that we | |
2331 | * checked with the proper locks actually held? | |
2332 | * | |
2333 | * Oops. Go back and try again.. | |
2334 | */ | |
2335 | if (unlikely(running)) { | |
2336 | cpu_relax(); | |
2337 | continue; | |
2338 | } | |
2339 | ||
2340 | /* | |
2341 | * It's not enough that it's not actively running, | |
2342 | * it must be off the runqueue _entirely_, and not | |
2343 | * preempted! | |
2344 | * | |
2345 | * So if it was still runnable (but just not actively | |
2346 | * running right now), it's preempted, and we should | |
2347 | * yield - it could be a while. | |
2348 | */ | |
2349 | if (unlikely(queued)) { | |
2350 | ktime_t to = NSEC_PER_SEC / HZ; | |
2351 | ||
2352 | set_current_state(TASK_UNINTERRUPTIBLE); | |
2353 | schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD); | |
2354 | continue; | |
2355 | } | |
2356 | ||
2357 | /* | |
2358 | * Ahh, all good. It wasn't running, and it wasn't | |
2359 | * runnable, which means that it will never become | |
2360 | * running in the future either. We're all done! | |
2361 | */ | |
2362 | break; | |
2363 | } | |
2364 | ||
2365 | return ncsw; | |
2366 | } | |
2367 | ||
1da177e4 | 2368 | #ifdef CONFIG_SMP |
175f0e25 | 2369 | |
af449901 | 2370 | static void |
713a2e21 | 2371 | __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx); |
af449901 PZ |
2372 | |
2373 | static int __set_cpus_allowed_ptr(struct task_struct *p, | |
713a2e21 | 2374 | struct affinity_context *ctx); |
af449901 PZ |
2375 | |
2376 | static void migrate_disable_switch(struct rq *rq, struct task_struct *p) | |
2377 | { | |
713a2e21 WL |
2378 | struct affinity_context ac = { |
2379 | .new_mask = cpumask_of(rq->cpu), | |
2380 | .flags = SCA_MIGRATE_DISABLE, | |
2381 | }; | |
2382 | ||
af449901 PZ |
2383 | if (likely(!p->migration_disabled)) |
2384 | return; | |
2385 | ||
2386 | if (p->cpus_ptr != &p->cpus_mask) | |
2387 | return; | |
2388 | ||
2389 | /* | |
2390 | * Violates locking rules! see comment in __do_set_cpus_allowed(). | |
2391 | */ | |
713a2e21 | 2392 | __do_set_cpus_allowed(p, &ac); |
af449901 PZ |
2393 | } |
2394 | ||
2395 | void migrate_disable(void) | |
2396 | { | |
3015ef4b TG |
2397 | struct task_struct *p = current; |
2398 | ||
2399 | if (p->migration_disabled) { | |
2400 | p->migration_disabled++; | |
af449901 | 2401 | return; |
3015ef4b | 2402 | } |
af449901 | 2403 | |
0e34600a | 2404 | guard(preempt)(); |
3015ef4b TG |
2405 | this_rq()->nr_pinned++; |
2406 | p->migration_disabled = 1; | |
af449901 PZ |
2407 | } |
2408 | EXPORT_SYMBOL_GPL(migrate_disable); | |
2409 | ||
2410 | void migrate_enable(void) | |
2411 | { | |
2412 | struct task_struct *p = current; | |
713a2e21 WL |
2413 | struct affinity_context ac = { |
2414 | .new_mask = &p->cpus_mask, | |
2415 | .flags = SCA_MIGRATE_ENABLE, | |
2416 | }; | |
af449901 | 2417 | |
6d337eab PZ |
2418 | if (p->migration_disabled > 1) { |
2419 | p->migration_disabled--; | |
af449901 | 2420 | return; |
6d337eab | 2421 | } |
af449901 | 2422 | |
9d0df377 SAS |
2423 | if (WARN_ON_ONCE(!p->migration_disabled)) |
2424 | return; | |
2425 | ||
6d337eab PZ |
2426 | /* |
2427 | * Ensure stop_task runs either before or after this, and that | |
2428 | * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule(). | |
2429 | */ | |
0e34600a | 2430 | guard(preempt)(); |
6d337eab | 2431 | if (p->cpus_ptr != &p->cpus_mask) |
713a2e21 | 2432 | __set_cpus_allowed_ptr(p, &ac); |
6d337eab PZ |
2433 | /* |
2434 | * Mustn't clear migration_disabled() until cpus_ptr points back at the | |
2435 | * regular cpus_mask, otherwise things that race (eg. | |
2436 | * select_fallback_rq) get confused. | |
2437 | */ | |
af449901 | 2438 | barrier(); |
6d337eab | 2439 | p->migration_disabled = 0; |
3015ef4b | 2440 | this_rq()->nr_pinned--; |
af449901 PZ |
2441 | } |
2442 | EXPORT_SYMBOL_GPL(migrate_enable); | |
2443 | ||
3015ef4b TG |
2444 | static inline bool rq_has_pinned_tasks(struct rq *rq) |
2445 | { | |
2446 | return rq->nr_pinned; | |
2447 | } | |
2448 | ||
175f0e25 | 2449 | /* |
bee98539 | 2450 | * Per-CPU kthreads are allowed to run on !active && online CPUs, see |
175f0e25 PZ |
2451 | * __set_cpus_allowed_ptr() and select_fallback_rq(). |
2452 | */ | |
2453 | static inline bool is_cpu_allowed(struct task_struct *p, int cpu) | |
2454 | { | |
5ba2ffba | 2455 | /* When not in the task's cpumask, no point in looking further. */ |
3bd37062 | 2456 | if (!cpumask_test_cpu(cpu, p->cpus_ptr)) |
175f0e25 PZ |
2457 | return false; |
2458 | ||
5ba2ffba PZ |
2459 | /* migrate_disabled() must be allowed to finish. */ |
2460 | if (is_migration_disabled(p)) | |
175f0e25 PZ |
2461 | return cpu_online(cpu); |
2462 | ||
5ba2ffba PZ |
2463 | /* Non kernel threads are not allowed during either online or offline. */ |
2464 | if (!(p->flags & PF_KTHREAD)) | |
9ae606bc | 2465 | return cpu_active(cpu) && task_cpu_possible(cpu, p); |
5ba2ffba PZ |
2466 | |
2467 | /* KTHREAD_IS_PER_CPU is always allowed. */ | |
2468 | if (kthread_is_per_cpu(p)) | |
2469 | return cpu_online(cpu); | |
2470 | ||
2471 | /* Regular kernel threads don't get to stay during offline. */ | |
b5c44773 | 2472 | if (cpu_dying(cpu)) |
5ba2ffba PZ |
2473 | return false; |
2474 | ||
2475 | /* But are allowed during online. */ | |
2476 | return cpu_online(cpu); | |
175f0e25 PZ |
2477 | } |
2478 | ||
5cc389bc PZ |
2479 | /* |
2480 | * This is how migration works: | |
2481 | * | |
2482 | * 1) we invoke migration_cpu_stop() on the target CPU using | |
2483 | * stop_one_cpu(). | |
2484 | * 2) stopper starts to run (implicitly forcing the migrated thread | |
2485 | * off the CPU) | |
2486 | * 3) it checks whether the migrated task is still in the wrong runqueue. | |
2487 | * 4) if it's in the wrong runqueue then the migration thread removes | |
2488 | * it and puts it into the right queue. | |
2489 | * 5) stopper completes and stop_one_cpu() returns and the migration | |
2490 | * is done. | |
2491 | */ | |
2492 | ||
2493 | /* | |
2494 | * move_queued_task - move a queued task to new rq. | |
2495 | * | |
2496 | * Returns (locked) new rq. Old rq's lock is released. | |
2497 | */ | |
8a8c69c3 PZ |
2498 | static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf, |
2499 | struct task_struct *p, int new_cpu) | |
5cc389bc | 2500 | { |
5cb9eaa3 | 2501 | lockdep_assert_rq_held(rq); |
5cc389bc | 2502 | |
58877d34 | 2503 | deactivate_task(rq, p, DEQUEUE_NOCLOCK); |
5cc389bc | 2504 | set_task_cpu(p, new_cpu); |
8a8c69c3 | 2505 | rq_unlock(rq, rf); |
5cc389bc PZ |
2506 | |
2507 | rq = cpu_rq(new_cpu); | |
2508 | ||
8a8c69c3 | 2509 | rq_lock(rq, rf); |
09348d75 | 2510 | WARN_ON_ONCE(task_cpu(p) != new_cpu); |
58877d34 | 2511 | activate_task(rq, p, 0); |
e23edc86 | 2512 | wakeup_preempt(rq, p, 0); |
5cc389bc PZ |
2513 | |
2514 | return rq; | |
2515 | } | |
2516 | ||
2517 | struct migration_arg { | |
6d337eab PZ |
2518 | struct task_struct *task; |
2519 | int dest_cpu; | |
2520 | struct set_affinity_pending *pending; | |
2521 | }; | |
2522 | ||
50caf9c1 PZ |
2523 | /* |
2524 | * @refs: number of wait_for_completion() | |
2525 | * @stop_pending: is @stop_work in use | |
2526 | */ | |
6d337eab PZ |
2527 | struct set_affinity_pending { |
2528 | refcount_t refs; | |
9e81889c | 2529 | unsigned int stop_pending; |
6d337eab PZ |
2530 | struct completion done; |
2531 | struct cpu_stop_work stop_work; | |
2532 | struct migration_arg arg; | |
5cc389bc PZ |
2533 | }; |
2534 | ||
2535 | /* | |
d1ccc66d | 2536 | * Move (not current) task off this CPU, onto the destination CPU. We're doing |
5cc389bc PZ |
2537 | * this because either it can't run here any more (set_cpus_allowed() |
2538 | * away from this CPU, or CPU going down), or because we're | |
2539 | * attempting to rebalance this task on exec (sched_exec). | |
2540 | * | |
2541 | * So we race with normal scheduler movements, but that's OK, as long | |
2542 | * as the task is no longer on this CPU. | |
5cc389bc | 2543 | */ |
8a8c69c3 PZ |
2544 | static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf, |
2545 | struct task_struct *p, int dest_cpu) | |
5cc389bc | 2546 | { |
5cc389bc | 2547 | /* Affinity changed (again). */ |
175f0e25 | 2548 | if (!is_cpu_allowed(p, dest_cpu)) |
5e16bbc2 | 2549 | return rq; |
5cc389bc | 2550 | |
8a8c69c3 | 2551 | rq = move_queued_task(rq, rf, p, dest_cpu); |
5e16bbc2 PZ |
2552 | |
2553 | return rq; | |
5cc389bc PZ |
2554 | } |
2555 | ||
2556 | /* | |
2557 | * migration_cpu_stop - this will be executed by a highprio stopper thread | |
2558 | * and performs thread migration by bumping thread off CPU then | |
2559 | * 'pushing' onto another runqueue. | |
2560 | */ | |
2561 | static int migration_cpu_stop(void *data) | |
2562 | { | |
2563 | struct migration_arg *arg = data; | |
c20cf065 | 2564 | struct set_affinity_pending *pending = arg->pending; |
5e16bbc2 PZ |
2565 | struct task_struct *p = arg->task; |
2566 | struct rq *rq = this_rq(); | |
6d337eab | 2567 | bool complete = false; |
8a8c69c3 | 2568 | struct rq_flags rf; |
5cc389bc PZ |
2569 | |
2570 | /* | |
d1ccc66d IM |
2571 | * The original target CPU might have gone down and we might |
2572 | * be on another CPU but it doesn't matter. | |
5cc389bc | 2573 | */ |
6d337eab | 2574 | local_irq_save(rf.flags); |
5cc389bc PZ |
2575 | /* |
2576 | * We need to explicitly wake pending tasks before running | |
3bd37062 | 2577 | * __migrate_task() such that we will not miss enforcing cpus_ptr |
5cc389bc PZ |
2578 | * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. |
2579 | */ | |
16bf5a5e | 2580 | flush_smp_call_function_queue(); |
5e16bbc2 PZ |
2581 | |
2582 | raw_spin_lock(&p->pi_lock); | |
8a8c69c3 | 2583 | rq_lock(rq, &rf); |
6d337eab | 2584 | |
e140749c VS |
2585 | /* |
2586 | * If we were passed a pending, then ->stop_pending was set, thus | |
2587 | * p->migration_pending must have remained stable. | |
2588 | */ | |
2589 | WARN_ON_ONCE(pending && pending != p->migration_pending); | |
2590 | ||
5e16bbc2 PZ |
2591 | /* |
2592 | * If task_rq(p) != rq, it cannot be migrated here, because we're | |
2593 | * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because | |
2594 | * we're holding p->pi_lock. | |
2595 | */ | |
bf89a304 | 2596 | if (task_rq(p) == rq) { |
6d337eab PZ |
2597 | if (is_migration_disabled(p)) |
2598 | goto out; | |
2599 | ||
2600 | if (pending) { | |
e140749c | 2601 | p->migration_pending = NULL; |
6d337eab | 2602 | complete = true; |
6d337eab | 2603 | |
3f1bc119 PZ |
2604 | if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) |
2605 | goto out; | |
3f1bc119 | 2606 | } |
6d337eab | 2607 | |
96500560 HJ |
2608 | if (task_on_rq_queued(p)) { |
2609 | update_rq_clock(rq); | |
475ea6c6 | 2610 | rq = __migrate_task(rq, &rf, p, arg->dest_cpu); |
96500560 | 2611 | } else { |
475ea6c6 | 2612 | p->wake_cpu = arg->dest_cpu; |
96500560 | 2613 | } |
6d337eab | 2614 | |
3f1bc119 PZ |
2615 | /* |
2616 | * XXX __migrate_task() can fail, at which point we might end | |
2617 | * up running on a dodgy CPU, AFAICT this can only happen | |
2618 | * during CPU hotplug, at which point we'll get pushed out | |
2619 | * anyway, so it's probably not a big deal. | |
2620 | */ | |
2621 | ||
c20cf065 | 2622 | } else if (pending) { |
6d337eab PZ |
2623 | /* |
2624 | * This happens when we get migrated between migrate_enable()'s | |
2625 | * preempt_enable() and scheduling the stopper task. At that | |
2626 | * point we're a regular task again and not current anymore. | |
2627 | * | |
2628 | * A !PREEMPT kernel has a giant hole here, which makes it far | |
2629 | * more likely. | |
2630 | */ | |
2631 | ||
d707faa6 VS |
2632 | /* |
2633 | * The task moved before the stopper got to run. We're holding | |
2634 | * ->pi_lock, so the allowed mask is stable - if it got | |
2635 | * somewhere allowed, we're done. | |
2636 | */ | |
c20cf065 | 2637 | if (cpumask_test_cpu(task_cpu(p), p->cpus_ptr)) { |
e140749c | 2638 | p->migration_pending = NULL; |
d707faa6 VS |
2639 | complete = true; |
2640 | goto out; | |
2641 | } | |
2642 | ||
6d337eab PZ |
2643 | /* |
2644 | * When migrate_enable() hits a rq mis-match we can't reliably | |
2645 | * determine is_migration_disabled() and so have to chase after | |
2646 | * it. | |
2647 | */ | |
9e81889c | 2648 | WARN_ON_ONCE(!pending->stop_pending); |
f0498d2a | 2649 | preempt_disable(); |
6d337eab PZ |
2650 | task_rq_unlock(rq, p, &rf); |
2651 | stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop, | |
2652 | &pending->arg, &pending->stop_work); | |
f0498d2a | 2653 | preempt_enable(); |
6d337eab | 2654 | return 0; |
bf89a304 | 2655 | } |
6d337eab | 2656 | out: |
9e81889c PZ |
2657 | if (pending) |
2658 | pending->stop_pending = false; | |
6d337eab PZ |
2659 | task_rq_unlock(rq, p, &rf); |
2660 | ||
2661 | if (complete) | |
2662 | complete_all(&pending->done); | |
2663 | ||
5cc389bc PZ |
2664 | return 0; |
2665 | } | |
2666 | ||
a7c81556 PZ |
2667 | int push_cpu_stop(void *arg) |
2668 | { | |
2669 | struct rq *lowest_rq = NULL, *rq = this_rq(); | |
2670 | struct task_struct *p = arg; | |
2671 | ||
2672 | raw_spin_lock_irq(&p->pi_lock); | |
5cb9eaa3 | 2673 | raw_spin_rq_lock(rq); |
a7c81556 PZ |
2674 | |
2675 | if (task_rq(p) != rq) | |
2676 | goto out_unlock; | |
2677 | ||
2678 | if (is_migration_disabled(p)) { | |
2679 | p->migration_flags |= MDF_PUSH; | |
2680 | goto out_unlock; | |
2681 | } | |
2682 | ||
2683 | p->migration_flags &= ~MDF_PUSH; | |
2684 | ||
2685 | if (p->sched_class->find_lock_rq) | |
2686 | lowest_rq = p->sched_class->find_lock_rq(p, rq); | |
5e16bbc2 | 2687 | |
a7c81556 PZ |
2688 | if (!lowest_rq) |
2689 | goto out_unlock; | |
2690 | ||
2691 | // XXX validate p is still the highest prio task | |
2692 | if (task_rq(p) == rq) { | |
2693 | deactivate_task(rq, p, 0); | |
2694 | set_task_cpu(p, lowest_rq->cpu); | |
2695 | activate_task(lowest_rq, p, 0); | |
2696 | resched_curr(lowest_rq); | |
2697 | } | |
2698 | ||
2699 | double_unlock_balance(rq, lowest_rq); | |
2700 | ||
2701 | out_unlock: | |
2702 | rq->push_busy = false; | |
5cb9eaa3 | 2703 | raw_spin_rq_unlock(rq); |
a7c81556 PZ |
2704 | raw_spin_unlock_irq(&p->pi_lock); |
2705 | ||
2706 | put_task_struct(p); | |
5cc389bc PZ |
2707 | return 0; |
2708 | } | |
2709 | ||
c5b28038 PZ |
2710 | /* |
2711 | * sched_class::set_cpus_allowed must do the below, but is not required to | |
2712 | * actually call this function. | |
2713 | */ | |
713a2e21 | 2714 | void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx) |
5cc389bc | 2715 | { |
713a2e21 WL |
2716 | if (ctx->flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) { |
2717 | p->cpus_ptr = ctx->new_mask; | |
af449901 PZ |
2718 | return; |
2719 | } | |
2720 | ||
713a2e21 WL |
2721 | cpumask_copy(&p->cpus_mask, ctx->new_mask); |
2722 | p->nr_cpus_allowed = cpumask_weight(ctx->new_mask); | |
8f9ea86f WL |
2723 | |
2724 | /* | |
2725 | * Swap in a new user_cpus_ptr if SCA_USER flag set | |
2726 | */ | |
2727 | if (ctx->flags & SCA_USER) | |
2728 | swap(p->user_cpus_ptr, ctx->user_mask); | |
5cc389bc PZ |
2729 | } |
2730 | ||
9cfc3e18 | 2731 | static void |
713a2e21 | 2732 | __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx) |
c5b28038 | 2733 | { |
6c37067e PZ |
2734 | struct rq *rq = task_rq(p); |
2735 | bool queued, running; | |
2736 | ||
af449901 PZ |
2737 | /* |
2738 | * This here violates the locking rules for affinity, since we're only | |
2739 | * supposed to change these variables while holding both rq->lock and | |
2740 | * p->pi_lock. | |
2741 | * | |
2742 | * HOWEVER, it magically works, because ttwu() is the only code that | |
2743 | * accesses these variables under p->pi_lock and only does so after | |
2744 | * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule() | |
2745 | * before finish_task(). | |
2746 | * | |
2747 | * XXX do further audits, this smells like something putrid. | |
2748 | */ | |
713a2e21 | 2749 | if (ctx->flags & SCA_MIGRATE_DISABLE) |
af449901 PZ |
2750 | SCHED_WARN_ON(!p->on_cpu); |
2751 | else | |
2752 | lockdep_assert_held(&p->pi_lock); | |
6c37067e PZ |
2753 | |
2754 | queued = task_on_rq_queued(p); | |
2755 | running = task_current(rq, p); | |
2756 | ||
2757 | if (queued) { | |
2758 | /* | |
2759 | * Because __kthread_bind() calls this on blocked tasks without | |
2760 | * holding rq->lock. | |
2761 | */ | |
5cb9eaa3 | 2762 | lockdep_assert_rq_held(rq); |
7a57f32a | 2763 | dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); |
6c37067e PZ |
2764 | } |
2765 | if (running) | |
2766 | put_prev_task(rq, p); | |
2767 | ||
713a2e21 | 2768 | p->sched_class->set_cpus_allowed(p, ctx); |
6c37067e | 2769 | |
6c37067e | 2770 | if (queued) |
7134b3e9 | 2771 | enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); |
a399d233 | 2772 | if (running) |
03b7fad1 | 2773 | set_next_task(rq, p); |
c5b28038 PZ |
2774 | } |
2775 | ||
851a723e WL |
2776 | /* |
2777 | * Used for kthread_bind() and select_fallback_rq(), in both cases the user | |
2778 | * affinity (if any) should be destroyed too. | |
2779 | */ | |
9cfc3e18 PZ |
2780 | void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) |
2781 | { | |
713a2e21 WL |
2782 | struct affinity_context ac = { |
2783 | .new_mask = new_mask, | |
851a723e WL |
2784 | .user_mask = NULL, |
2785 | .flags = SCA_USER, /* clear the user requested mask */ | |
713a2e21 | 2786 | }; |
9a5418bc WL |
2787 | union cpumask_rcuhead { |
2788 | cpumask_t cpumask; | |
2789 | struct rcu_head rcu; | |
2790 | }; | |
713a2e21 WL |
2791 | |
2792 | __do_set_cpus_allowed(p, &ac); | |
9a5418bc WL |
2793 | |
2794 | /* | |
2795 | * Because this is called with p->pi_lock held, it is not possible | |
2796 | * to use kfree() here (when PREEMPT_RT=y), therefore punt to using | |
2797 | * kfree_rcu(). | |
2798 | */ | |
2799 | kfree_rcu((union cpumask_rcuhead *)ac.user_mask, rcu); | |
2800 | } | |
2801 | ||
2802 | static cpumask_t *alloc_user_cpus_ptr(int node) | |
2803 | { | |
2804 | /* | |
2805 | * See do_set_cpus_allowed() above for the rcu_head usage. | |
2806 | */ | |
2807 | int size = max_t(int, cpumask_size(), sizeof(struct rcu_head)); | |
2808 | ||
2809 | return kmalloc_node(size, GFP_KERNEL, node); | |
9cfc3e18 PZ |
2810 | } |
2811 | ||
b90ca8ba WD |
2812 | int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, |
2813 | int node) | |
2814 | { | |
87ca4f9e | 2815 | cpumask_t *user_mask; |
8f9ea86f WL |
2816 | unsigned long flags; |
2817 | ||
87ca4f9e WL |
2818 | /* |
2819 | * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's | |
2820 | * may differ by now due to racing. | |
2821 | */ | |
2822 | dst->user_cpus_ptr = NULL; | |
2823 | ||
2824 | /* | |
2825 | * This check is racy and losing the race is a valid situation. | |
2826 | * It is not worth the extra overhead of taking the pi_lock on | |
2827 | * every fork/clone. | |
2828 | */ | |
2829 | if (data_race(!src->user_cpus_ptr)) | |
b90ca8ba WD |
2830 | return 0; |
2831 | ||
9a5418bc | 2832 | user_mask = alloc_user_cpus_ptr(node); |
87ca4f9e | 2833 | if (!user_mask) |
b90ca8ba WD |
2834 | return -ENOMEM; |
2835 | ||
87ca4f9e WL |
2836 | /* |
2837 | * Use pi_lock to protect content of user_cpus_ptr | |
2838 | * | |
2839 | * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent | |
2840 | * do_set_cpus_allowed(). | |
2841 | */ | |
8f9ea86f | 2842 | raw_spin_lock_irqsave(&src->pi_lock, flags); |
87ca4f9e WL |
2843 | if (src->user_cpus_ptr) { |
2844 | swap(dst->user_cpus_ptr, user_mask); | |
2845 | cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr); | |
2846 | } | |
8f9ea86f | 2847 | raw_spin_unlock_irqrestore(&src->pi_lock, flags); |
87ca4f9e WL |
2848 | |
2849 | if (unlikely(user_mask)) | |
2850 | kfree(user_mask); | |
2851 | ||
b90ca8ba WD |
2852 | return 0; |
2853 | } | |
2854 | ||
07ec77a1 WD |
2855 | static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p) |
2856 | { | |
2857 | struct cpumask *user_mask = NULL; | |
2858 | ||
2859 | swap(p->user_cpus_ptr, user_mask); | |
2860 | ||
2861 | return user_mask; | |
2862 | } | |
2863 | ||
b90ca8ba WD |
2864 | void release_user_cpus_ptr(struct task_struct *p) |
2865 | { | |
07ec77a1 | 2866 | kfree(clear_user_cpus_ptr(p)); |
b90ca8ba WD |
2867 | } |
2868 | ||
6d337eab | 2869 | /* |
c777d847 VS |
2870 | * This function is wildly self concurrent; here be dragons. |
2871 | * | |
2872 | * | |
2873 | * When given a valid mask, __set_cpus_allowed_ptr() must block until the | |
2874 | * designated task is enqueued on an allowed CPU. If that task is currently | |
2875 | * running, we have to kick it out using the CPU stopper. | |
2876 | * | |
2877 | * Migrate-Disable comes along and tramples all over our nice sandcastle. | |
2878 | * Consider: | |
2879 | * | |
2880 | * Initial conditions: P0->cpus_mask = [0, 1] | |
2881 | * | |
2882 | * P0@CPU0 P1 | |
2883 | * | |
2884 | * migrate_disable(); | |
2885 | * <preempted> | |
2886 | * set_cpus_allowed_ptr(P0, [1]); | |
2887 | * | |
2888 | * P1 *cannot* return from this set_cpus_allowed_ptr() call until P0 executes | |
2889 | * its outermost migrate_enable() (i.e. it exits its Migrate-Disable region). | |
2890 | * This means we need the following scheme: | |
2891 | * | |
2892 | * P0@CPU0 P1 | |
2893 | * | |
2894 | * migrate_disable(); | |
2895 | * <preempted> | |
2896 | * set_cpus_allowed_ptr(P0, [1]); | |
2897 | * <blocks> | |
2898 | * <resumes> | |
2899 | * migrate_enable(); | |
2900 | * __set_cpus_allowed_ptr(); | |
2901 | * <wakes local stopper> | |
2902 | * `--> <woken on migration completion> | |
2903 | * | |
2904 | * Now the fun stuff: there may be several P1-like tasks, i.e. multiple | |
2905 | * concurrent set_cpus_allowed_ptr(P0, [*]) calls. CPU affinity changes of any | |
2906 | * task p are serialized by p->pi_lock, which we can leverage: the one that | |
2907 | * should come into effect at the end of the Migrate-Disable region is the last | |
2908 | * one. This means we only need to track a single cpumask (i.e. p->cpus_mask), | |
2909 | * but we still need to properly signal those waiting tasks at the appropriate | |
2910 | * moment. | |
2911 | * | |
2912 | * This is implemented using struct set_affinity_pending. The first | |
2913 | * __set_cpus_allowed_ptr() caller within a given Migrate-Disable region will | |
2914 | * setup an instance of that struct and install it on the targeted task_struct. | |
2915 | * Any and all further callers will reuse that instance. Those then wait for | |
2916 | * a completion signaled at the tail of the CPU stopper callback (1), triggered | |
2917 | * on the end of the Migrate-Disable region (i.e. outermost migrate_enable()). | |
2918 | * | |
2919 | * | |
2920 | * (1) In the cases covered above. There is one more where the completion is | |
2921 | * signaled within affine_move_task() itself: when a subsequent affinity request | |
e140749c VS |
2922 | * occurs after the stopper bailed out due to the targeted task still being |
2923 | * Migrate-Disable. Consider: | |
c777d847 VS |
2924 | * |
2925 | * Initial conditions: P0->cpus_mask = [0, 1] | |
2926 | * | |
e140749c VS |
2927 | * CPU0 P1 P2 |
2928 | * <P0> | |
2929 | * migrate_disable(); | |
2930 | * <preempted> | |
c777d847 VS |
2931 | * set_cpus_allowed_ptr(P0, [1]); |
2932 | * <blocks> | |
e140749c VS |
2933 | * <migration/0> |
2934 | * migration_cpu_stop() | |
2935 | * is_migration_disabled() | |
2936 | * <bails> | |
c777d847 VS |
2937 | * set_cpus_allowed_ptr(P0, [0, 1]); |
2938 | * <signal completion> | |
2939 | * <awakes> | |
2940 | * | |
2941 | * Note that the above is safe vs a concurrent migrate_enable(), as any | |
2942 | * pending affinity completion is preceded by an uninstallation of | |
2943 | * p->migration_pending done with p->pi_lock held. | |
6d337eab PZ |
2944 | */ |
2945 | static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf, | |
2946 | int dest_cpu, unsigned int flags) | |
5584e8ac WL |
2947 | __releases(rq->lock) |
2948 | __releases(p->pi_lock) | |
6d337eab PZ |
2949 | { |
2950 | struct set_affinity_pending my_pending = { }, *pending = NULL; | |
9e81889c | 2951 | bool stop_pending, complete = false; |
6d337eab PZ |
2952 | |
2953 | /* Can the task run on the task's current CPU? If so, we're done */ | |
2954 | if (cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) { | |
a7c81556 PZ |
2955 | struct task_struct *push_task = NULL; |
2956 | ||
2957 | if ((flags & SCA_MIGRATE_ENABLE) && | |
2958 | (p->migration_flags & MDF_PUSH) && !rq->push_busy) { | |
2959 | rq->push_busy = true; | |
2960 | push_task = get_task_struct(p); | |
2961 | } | |
2962 | ||
50caf9c1 PZ |
2963 | /* |
2964 | * If there are pending waiters, but no pending stop_work, | |
2965 | * then complete now. | |
2966 | */ | |
6d337eab | 2967 | pending = p->migration_pending; |
50caf9c1 | 2968 | if (pending && !pending->stop_pending) { |
6d337eab PZ |
2969 | p->migration_pending = NULL; |
2970 | complete = true; | |
2971 | } | |
50caf9c1 | 2972 | |
f0498d2a | 2973 | preempt_disable(); |
6d337eab | 2974 | task_rq_unlock(rq, p, rf); |
a7c81556 PZ |
2975 | if (push_task) { |
2976 | stop_one_cpu_nowait(rq->cpu, push_cpu_stop, | |
2977 | p, &rq->push_work); | |
2978 | } | |
f0498d2a | 2979 | preempt_enable(); |
a7c81556 | 2980 | |
6d337eab | 2981 | if (complete) |
50caf9c1 | 2982 | complete_all(&pending->done); |
6d337eab PZ |
2983 | |
2984 | return 0; | |
2985 | } | |
2986 | ||
2987 | if (!(flags & SCA_MIGRATE_ENABLE)) { | |
2988 | /* serialized by p->pi_lock */ | |
2989 | if (!p->migration_pending) { | |
c777d847 | 2990 | /* Install the request */ |
6d337eab PZ |
2991 | refcount_set(&my_pending.refs, 1); |
2992 | init_completion(&my_pending.done); | |
8a6edb52 PZ |
2993 | my_pending.arg = (struct migration_arg) { |
2994 | .task = p, | |
475ea6c6 | 2995 | .dest_cpu = dest_cpu, |
8a6edb52 PZ |
2996 | .pending = &my_pending, |
2997 | }; | |
2998 | ||
6d337eab PZ |
2999 | p->migration_pending = &my_pending; |
3000 | } else { | |
3001 | pending = p->migration_pending; | |
3002 | refcount_inc(&pending->refs); | |
475ea6c6 VS |
3003 | /* |
3004 | * Affinity has changed, but we've already installed a | |
3005 | * pending. migration_cpu_stop() *must* see this, else | |
3006 | * we risk a completion of the pending despite having a | |
3007 | * task on a disallowed CPU. | |
3008 | * | |
3009 | * Serialized by p->pi_lock, so this is safe. | |
3010 | */ | |
3011 | pending->arg.dest_cpu = dest_cpu; | |
6d337eab PZ |
3012 | } |
3013 | } | |
3014 | pending = p->migration_pending; | |
3015 | /* | |
3016 | * - !MIGRATE_ENABLE: | |
3017 | * we'll have installed a pending if there wasn't one already. | |
3018 | * | |
3019 | * - MIGRATE_ENABLE: | |
3020 | * we're here because the current CPU isn't matching anymore, | |
3021 | * the only way that can happen is because of a concurrent | |
3022 | * set_cpus_allowed_ptr() call, which should then still be | |
3023 | * pending completion. | |
3024 | * | |
3025 | * Either way, we really should have a @pending here. | |
3026 | */ | |
3027 | if (WARN_ON_ONCE(!pending)) { | |
3028 | task_rq_unlock(rq, p, rf); | |
3029 | return -EINVAL; | |
3030 | } | |
3031 | ||
0b9d46fc | 3032 | if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) { |
c777d847 | 3033 | /* |
58b1a450 PZ |
3034 | * MIGRATE_ENABLE gets here because 'p == current', but for |
3035 | * anything else we cannot do is_migration_disabled(), punt | |
3036 | * and have the stopper function handle it all race-free. | |
c777d847 | 3037 | */ |
9e81889c PZ |
3038 | stop_pending = pending->stop_pending; |
3039 | if (!stop_pending) | |
3040 | pending->stop_pending = true; | |
58b1a450 | 3041 | |
58b1a450 PZ |
3042 | if (flags & SCA_MIGRATE_ENABLE) |
3043 | p->migration_flags &= ~MDF_PUSH; | |
50caf9c1 | 3044 | |
f0498d2a | 3045 | preempt_disable(); |
6d337eab | 3046 | task_rq_unlock(rq, p, rf); |
9e81889c PZ |
3047 | if (!stop_pending) { |
3048 | stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop, | |
3049 | &pending->arg, &pending->stop_work); | |
3050 | } | |
f0498d2a | 3051 | preempt_enable(); |
6d337eab | 3052 | |
58b1a450 PZ |
3053 | if (flags & SCA_MIGRATE_ENABLE) |
3054 | return 0; | |
6d337eab PZ |
3055 | } else { |
3056 | ||
3057 | if (!is_migration_disabled(p)) { | |
3058 | if (task_on_rq_queued(p)) | |
3059 | rq = move_queued_task(rq, rf, p, dest_cpu); | |
3060 | ||
50caf9c1 PZ |
3061 | if (!pending->stop_pending) { |
3062 | p->migration_pending = NULL; | |
3063 | complete = true; | |
3064 | } | |
6d337eab PZ |
3065 | } |
3066 | task_rq_unlock(rq, p, rf); | |
3067 | ||
6d337eab PZ |
3068 | if (complete) |
3069 | complete_all(&pending->done); | |
3070 | } | |
3071 | ||
3072 | wait_for_completion(&pending->done); | |
3073 | ||
3074 | if (refcount_dec_and_test(&pending->refs)) | |
50caf9c1 | 3075 | wake_up_var(&pending->refs); /* No UaF, just an address */ |
6d337eab | 3076 | |
c777d847 VS |
3077 | /* |
3078 | * Block the original owner of &pending until all subsequent callers | |
3079 | * have seen the completion and decremented the refcount | |
3080 | */ | |
6d337eab PZ |
3081 | wait_var_event(&my_pending.refs, !refcount_read(&my_pending.refs)); |
3082 | ||
50caf9c1 PZ |
3083 | /* ARGH */ |
3084 | WARN_ON_ONCE(my_pending.stop_pending); | |
3085 | ||
6d337eab PZ |
3086 | return 0; |
3087 | } | |
3088 | ||
5cc389bc | 3089 | /* |
07ec77a1 | 3090 | * Called with both p->pi_lock and rq->lock held; drops both before returning. |
5cc389bc | 3091 | */ |
07ec77a1 | 3092 | static int __set_cpus_allowed_ptr_locked(struct task_struct *p, |
713a2e21 | 3093 | struct affinity_context *ctx, |
07ec77a1 WD |
3094 | struct rq *rq, |
3095 | struct rq_flags *rf) | |
3096 | __releases(rq->lock) | |
3097 | __releases(p->pi_lock) | |
5cc389bc | 3098 | { |
234a503e | 3099 | const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p); |
e9d867a6 | 3100 | const struct cpumask *cpu_valid_mask = cpu_active_mask; |
234a503e | 3101 | bool kthread = p->flags & PF_KTHREAD; |
5cc389bc PZ |
3102 | unsigned int dest_cpu; |
3103 | int ret = 0; | |
3104 | ||
a499c3ea | 3105 | update_rq_clock(rq); |
5cc389bc | 3106 | |
234a503e | 3107 | if (kthread || is_migration_disabled(p)) { |
e9d867a6 | 3108 | /* |
741ba80f PZ |
3109 | * Kernel threads are allowed on online && !active CPUs, |
3110 | * however, during cpu-hot-unplug, even these might get pushed | |
3111 | * away if not KTHREAD_IS_PER_CPU. | |
af449901 PZ |
3112 | * |
3113 | * Specifically, migration_disabled() tasks must not fail the | |
3114 | * cpumask_any_and_distribute() pick below, esp. so on | |
3115 | * SCA_MIGRATE_ENABLE, otherwise we'll not call | |
3116 | * set_cpus_allowed_common() and actually reset p->cpus_ptr. | |
e9d867a6 PZI |
3117 | */ |
3118 | cpu_valid_mask = cpu_online_mask; | |
3119 | } | |
3120 | ||
713a2e21 | 3121 | if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) { |
234a503e WD |
3122 | ret = -EINVAL; |
3123 | goto out; | |
3124 | } | |
3125 | ||
25834c73 PZ |
3126 | /* |
3127 | * Must re-check here, to close a race against __kthread_bind(), | |
3128 | * sched_setaffinity() is not guaranteed to observe the flag. | |
3129 | */ | |
713a2e21 | 3130 | if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) { |
25834c73 PZ |
3131 | ret = -EINVAL; |
3132 | goto out; | |
3133 | } | |
3134 | ||
713a2e21 | 3135 | if (!(ctx->flags & SCA_MIGRATE_ENABLE)) { |
df14b7f9 WL |
3136 | if (cpumask_equal(&p->cpus_mask, ctx->new_mask)) { |
3137 | if (ctx->flags & SCA_USER) | |
3138 | swap(p->user_cpus_ptr, ctx->user_mask); | |
885b3ba4 | 3139 | goto out; |
df14b7f9 | 3140 | } |
885b3ba4 VS |
3141 | |
3142 | if (WARN_ON_ONCE(p == current && | |
3143 | is_migration_disabled(p) && | |
713a2e21 | 3144 | !cpumask_test_cpu(task_cpu(p), ctx->new_mask))) { |
885b3ba4 VS |
3145 | ret = -EBUSY; |
3146 | goto out; | |
3147 | } | |
3148 | } | |
5cc389bc | 3149 | |
46a87b38 PT |
3150 | /* |
3151 | * Picking a ~random cpu helps in cases where we are changing affinity | |
3152 | * for groups of tasks (ie. cpuset), so that load balancing is not | |
3153 | * immediately required to distribute the tasks within their new mask. | |
3154 | */ | |
713a2e21 | 3155 | dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, ctx->new_mask); |
714e501e | 3156 | if (dest_cpu >= nr_cpu_ids) { |
5cc389bc PZ |
3157 | ret = -EINVAL; |
3158 | goto out; | |
3159 | } | |
3160 | ||
713a2e21 | 3161 | __do_set_cpus_allowed(p, ctx); |
07ec77a1 | 3162 | |
8f9ea86f | 3163 | return affine_move_task(rq, p, rf, dest_cpu, ctx->flags); |
5cc389bc | 3164 | |
5cc389bc | 3165 | out: |
07ec77a1 | 3166 | task_rq_unlock(rq, p, rf); |
5cc389bc PZ |
3167 | |
3168 | return ret; | |
3169 | } | |
25834c73 | 3170 | |
07ec77a1 WD |
3171 | /* |
3172 | * Change a given task's CPU affinity. Migrate the thread to a | |
3173 | * proper CPU and schedule it away if the CPU it's executing on | |
3174 | * is removed from the allowed bitmask. | |
3175 | * | |
3176 | * NOTE: the caller must have a valid reference to the task, the | |
3177 | * task must not exit() & deallocate itself prematurely. The | |
3178 | * call is not atomic; no spinlocks may be held. | |
3179 | */ | |
3180 | static int __set_cpus_allowed_ptr(struct task_struct *p, | |
713a2e21 | 3181 | struct affinity_context *ctx) |
07ec77a1 WD |
3182 | { |
3183 | struct rq_flags rf; | |
3184 | struct rq *rq; | |
3185 | ||
3186 | rq = task_rq_lock(p, &rf); | |
da019032 WL |
3187 | /* |
3188 | * Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_* | |
3189 | * flags are set. | |
3190 | */ | |
3191 | if (p->user_cpus_ptr && | |
3192 | !(ctx->flags & (SCA_USER | SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) && | |
3193 | cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr)) | |
3194 | ctx->new_mask = rq->scratch_mask; | |
3195 | ||
713a2e21 | 3196 | return __set_cpus_allowed_ptr_locked(p, ctx, rq, &rf); |
07ec77a1 WD |
3197 | } |
3198 | ||
25834c73 PZ |
3199 | int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) |
3200 | { | |
713a2e21 WL |
3201 | struct affinity_context ac = { |
3202 | .new_mask = new_mask, | |
3203 | .flags = 0, | |
3204 | }; | |
3205 | ||
3206 | return __set_cpus_allowed_ptr(p, &ac); | |
25834c73 | 3207 | } |
5cc389bc PZ |
3208 | EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); |
3209 | ||
07ec77a1 WD |
3210 | /* |
3211 | * Change a given task's CPU affinity to the intersection of its current | |
8f9ea86f WL |
3212 | * affinity mask and @subset_mask, writing the resulting mask to @new_mask. |
3213 | * If user_cpus_ptr is defined, use it as the basis for restricting CPU | |
3214 | * affinity or use cpu_online_mask instead. | |
3215 | * | |
07ec77a1 WD |
3216 | * If the resulting mask is empty, leave the affinity unchanged and return |
3217 | * -EINVAL. | |
3218 | */ | |
3219 | static int restrict_cpus_allowed_ptr(struct task_struct *p, | |
3220 | struct cpumask *new_mask, | |
3221 | const struct cpumask *subset_mask) | |
3222 | { | |
8f9ea86f WL |
3223 | struct affinity_context ac = { |
3224 | .new_mask = new_mask, | |
3225 | .flags = 0, | |
3226 | }; | |
07ec77a1 WD |
3227 | struct rq_flags rf; |
3228 | struct rq *rq; | |
3229 | int err; | |
3230 | ||
07ec77a1 WD |
3231 | rq = task_rq_lock(p, &rf); |
3232 | ||
3233 | /* | |
3234 | * Forcefully restricting the affinity of a deadline task is | |
3235 | * likely to cause problems, so fail and noisily override the | |
3236 | * mask entirely. | |
3237 | */ | |
3238 | if (task_has_dl_policy(p) && dl_bandwidth_enabled()) { | |
3239 | err = -EPERM; | |
3240 | goto err_unlock; | |
3241 | } | |
3242 | ||
8f9ea86f | 3243 | if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) { |
07ec77a1 WD |
3244 | err = -EINVAL; |
3245 | goto err_unlock; | |
3246 | } | |
3247 | ||
713a2e21 | 3248 | return __set_cpus_allowed_ptr_locked(p, &ac, rq, &rf); |
07ec77a1 WD |
3249 | |
3250 | err_unlock: | |
3251 | task_rq_unlock(rq, p, &rf); | |
07ec77a1 WD |
3252 | return err; |
3253 | } | |
3254 | ||
3255 | /* | |
3256 | * Restrict the CPU affinity of task @p so that it is a subset of | |
5584e8ac | 3257 | * task_cpu_possible_mask() and point @p->user_cpus_ptr to a copy of the |
07ec77a1 WD |
3258 | * old affinity mask. If the resulting mask is empty, we warn and walk |
3259 | * up the cpuset hierarchy until we find a suitable mask. | |
3260 | */ | |
3261 | void force_compatible_cpus_allowed_ptr(struct task_struct *p) | |
3262 | { | |
3263 | cpumask_var_t new_mask; | |
3264 | const struct cpumask *override_mask = task_cpu_possible_mask(p); | |
3265 | ||
3266 | alloc_cpumask_var(&new_mask, GFP_KERNEL); | |
3267 | ||
3268 | /* | |
3269 | * __migrate_task() can fail silently in the face of concurrent | |
3270 | * offlining of the chosen destination CPU, so take the hotplug | |
3271 | * lock to ensure that the migration succeeds. | |
3272 | */ | |
3273 | cpus_read_lock(); | |
3274 | if (!cpumask_available(new_mask)) | |
3275 | goto out_set_mask; | |
3276 | ||
3277 | if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask)) | |
3278 | goto out_free_mask; | |
3279 | ||
3280 | /* | |
3281 | * We failed to find a valid subset of the affinity mask for the | |
3282 | * task, so override it based on its cpuset hierarchy. | |
3283 | */ | |
3284 | cpuset_cpus_allowed(p, new_mask); | |
3285 | override_mask = new_mask; | |
3286 | ||
3287 | out_set_mask: | |
3288 | if (printk_ratelimit()) { | |
3289 | printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n", | |
3290 | task_pid_nr(p), p->comm, | |
3291 | cpumask_pr_args(override_mask)); | |
3292 | } | |
3293 | ||
3294 | WARN_ON(set_cpus_allowed_ptr(p, override_mask)); | |
3295 | out_free_mask: | |
3296 | cpus_read_unlock(); | |
3297 | free_cpumask_var(new_mask); | |
3298 | } | |
3299 | ||
3300 | static int | |
713a2e21 | 3301 | __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx); |
07ec77a1 WD |
3302 | |
3303 | /* | |
3304 | * Restore the affinity of a task @p which was previously restricted by a | |
8f9ea86f | 3305 | * call to force_compatible_cpus_allowed_ptr(). |
07ec77a1 WD |
3306 | * |
3307 | * It is the caller's responsibility to serialise this with any calls to | |
3308 | * force_compatible_cpus_allowed_ptr(@p). | |
3309 | */ | |
3310 | void relax_compatible_cpus_allowed_ptr(struct task_struct *p) | |
3311 | { | |
713a2e21 | 3312 | struct affinity_context ac = { |
8f9ea86f WL |
3313 | .new_mask = task_user_cpus(p), |
3314 | .flags = 0, | |
713a2e21 | 3315 | }; |
8f9ea86f | 3316 | int ret; |
07ec77a1 WD |
3317 | |
3318 | /* | |
8f9ea86f WL |
3319 | * Try to restore the old affinity mask with __sched_setaffinity(). |
3320 | * Cpuset masking will be done there too. | |
07ec77a1 | 3321 | */ |
8f9ea86f WL |
3322 | ret = __sched_setaffinity(p, &ac); |
3323 | WARN_ON_ONCE(ret); | |
07ec77a1 WD |
3324 | } |
3325 | ||
dd41f596 | 3326 | void set_task_cpu(struct task_struct *p, unsigned int new_cpu) |
c65cc870 | 3327 | { |
e2912009 | 3328 | #ifdef CONFIG_SCHED_DEBUG |
2f064a59 PZ |
3329 | unsigned int state = READ_ONCE(p->__state); |
3330 | ||
e2912009 PZ |
3331 | /* |
3332 | * We should never call set_task_cpu() on a blocked task, | |
3333 | * ttwu() will sort out the placement. | |
3334 | */ | |
2f064a59 | 3335 | WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq); |
0122ec5b | 3336 | |
3ea94de1 JP |
3337 | /* |
3338 | * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING, | |
3339 | * because schedstat_wait_{start,end} rebase migrating task's wait_start | |
3340 | * time relying on p->on_rq. | |
3341 | */ | |
2f064a59 | 3342 | WARN_ON_ONCE(state == TASK_RUNNING && |
3ea94de1 JP |
3343 | p->sched_class == &fair_sched_class && |
3344 | (p->on_rq && !task_on_rq_migrating(p))); | |
3345 | ||
0122ec5b | 3346 | #ifdef CONFIG_LOCKDEP |
6c6c54e1 PZ |
3347 | /* |
3348 | * The caller should hold either p->pi_lock or rq->lock, when changing | |
3349 | * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. | |
3350 | * | |
3351 | * sched_move_task() holds both and thus holding either pins the cgroup, | |
8323f26c | 3352 | * see task_group(). |
6c6c54e1 PZ |
3353 | * |
3354 | * Furthermore, all task_rq users should acquire both locks, see | |
3355 | * task_rq_lock(). | |
3356 | */ | |
0122ec5b | 3357 | WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || |
9ef7e7e3 | 3358 | lockdep_is_held(__rq_lockp(task_rq(p))))); |
0122ec5b | 3359 | #endif |
4ff9083b PZ |
3360 | /* |
3361 | * Clearly, migrating tasks to offline CPUs is a fairly daft thing. | |
3362 | */ | |
3363 | WARN_ON_ONCE(!cpu_online(new_cpu)); | |
af449901 PZ |
3364 | |
3365 | WARN_ON_ONCE(is_migration_disabled(p)); | |
e2912009 PZ |
3366 | #endif |
3367 | ||
de1d7286 | 3368 | trace_sched_migrate_task(p, new_cpu); |
cbc34ed1 | 3369 | |
0c69774e | 3370 | if (task_cpu(p) != new_cpu) { |
0a74bef8 | 3371 | if (p->sched_class->migrate_task_rq) |
1327237a | 3372 | p->sched_class->migrate_task_rq(p, new_cpu); |
0c69774e | 3373 | p->se.nr_migrations++; |
d7822b1e | 3374 | rseq_migrate(p); |
223baf9d | 3375 | sched_mm_cid_migrate_from(p); |
ff303e66 | 3376 | perf_event_task_migrate(p); |
0c69774e | 3377 | } |
dd41f596 IM |
3378 | |
3379 | __set_task_cpu(p, new_cpu); | |
c65cc870 IM |
3380 | } |
3381 | ||
0ad4e3df | 3382 | #ifdef CONFIG_NUMA_BALANCING |
ac66f547 PZ |
3383 | static void __migrate_swap_task(struct task_struct *p, int cpu) |
3384 | { | |
da0c1e65 | 3385 | if (task_on_rq_queued(p)) { |
ac66f547 | 3386 | struct rq *src_rq, *dst_rq; |
8a8c69c3 | 3387 | struct rq_flags srf, drf; |
ac66f547 PZ |
3388 | |
3389 | src_rq = task_rq(p); | |
3390 | dst_rq = cpu_rq(cpu); | |
3391 | ||
8a8c69c3 PZ |
3392 | rq_pin_lock(src_rq, &srf); |
3393 | rq_pin_lock(dst_rq, &drf); | |
3394 | ||
ac66f547 PZ |
3395 | deactivate_task(src_rq, p, 0); |
3396 | set_task_cpu(p, cpu); | |
3397 | activate_task(dst_rq, p, 0); | |
e23edc86 | 3398 | wakeup_preempt(dst_rq, p, 0); |
8a8c69c3 PZ |
3399 | |
3400 | rq_unpin_lock(dst_rq, &drf); | |
3401 | rq_unpin_lock(src_rq, &srf); | |
3402 | ||
ac66f547 PZ |
3403 | } else { |
3404 | /* | |
3405 | * Task isn't running anymore; make it appear like we migrated | |
3406 | * it before it went to sleep. This means on wakeup we make the | |
d1ccc66d | 3407 | * previous CPU our target instead of where it really is. |
ac66f547 PZ |
3408 | */ |
3409 | p->wake_cpu = cpu; | |
3410 | } | |
3411 | } | |
3412 | ||
3413 | struct migration_swap_arg { | |
3414 | struct task_struct *src_task, *dst_task; | |
3415 | int src_cpu, dst_cpu; | |
3416 | }; | |
3417 | ||
3418 | static int migrate_swap_stop(void *data) | |
3419 | { | |
3420 | struct migration_swap_arg *arg = data; | |
3421 | struct rq *src_rq, *dst_rq; | |
ac66f547 | 3422 | |
62694cd5 PZ |
3423 | if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu)) |
3424 | return -EAGAIN; | |
3425 | ||
ac66f547 PZ |
3426 | src_rq = cpu_rq(arg->src_cpu); |
3427 | dst_rq = cpu_rq(arg->dst_cpu); | |
3428 | ||
5bb76f1d PZ |
3429 | guard(double_raw_spinlock)(&arg->src_task->pi_lock, &arg->dst_task->pi_lock); |
3430 | guard(double_rq_lock)(src_rq, dst_rq); | |
62694cd5 | 3431 | |
ac66f547 | 3432 | if (task_cpu(arg->dst_task) != arg->dst_cpu) |
5bb76f1d | 3433 | return -EAGAIN; |
ac66f547 PZ |
3434 | |
3435 | if (task_cpu(arg->src_task) != arg->src_cpu) | |
5bb76f1d | 3436 | return -EAGAIN; |
ac66f547 | 3437 | |
3bd37062 | 3438 | if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr)) |
5bb76f1d | 3439 | return -EAGAIN; |
ac66f547 | 3440 | |
3bd37062 | 3441 | if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr)) |
5bb76f1d | 3442 | return -EAGAIN; |
ac66f547 PZ |
3443 | |
3444 | __migrate_swap_task(arg->src_task, arg->dst_cpu); | |
3445 | __migrate_swap_task(arg->dst_task, arg->src_cpu); | |
3446 | ||
5bb76f1d | 3447 | return 0; |
ac66f547 PZ |
3448 | } |
3449 | ||
3450 | /* | |
3451 | * Cross migrate two tasks | |
3452 | */ | |
0ad4e3df SD |
3453 | int migrate_swap(struct task_struct *cur, struct task_struct *p, |
3454 | int target_cpu, int curr_cpu) | |
ac66f547 PZ |
3455 | { |
3456 | struct migration_swap_arg arg; | |
3457 | int ret = -EINVAL; | |
3458 | ||
ac66f547 PZ |
3459 | arg = (struct migration_swap_arg){ |
3460 | .src_task = cur, | |
0ad4e3df | 3461 | .src_cpu = curr_cpu, |
ac66f547 | 3462 | .dst_task = p, |
0ad4e3df | 3463 | .dst_cpu = target_cpu, |
ac66f547 PZ |
3464 | }; |
3465 | ||
3466 | if (arg.src_cpu == arg.dst_cpu) | |
3467 | goto out; | |
3468 | ||
6acce3ef PZ |
3469 | /* |
3470 | * These three tests are all lockless; this is OK since all of them | |
3471 | * will be re-checked with proper locks held further down the line. | |
3472 | */ | |
ac66f547 PZ |
3473 | if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) |
3474 | goto out; | |
3475 | ||
3bd37062 | 3476 | if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr)) |
ac66f547 PZ |
3477 | goto out; |
3478 | ||
3bd37062 | 3479 | if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr)) |
ac66f547 PZ |
3480 | goto out; |
3481 | ||
286549dc | 3482 | trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); |
ac66f547 PZ |
3483 | ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg); |
3484 | ||
3485 | out: | |
ac66f547 PZ |
3486 | return ret; |
3487 | } | |
0ad4e3df | 3488 | #endif /* CONFIG_NUMA_BALANCING */ |
ac66f547 | 3489 | |
1da177e4 LT |
3490 | /*** |
3491 | * kick_process - kick a running thread to enter/exit the kernel | |
3492 | * @p: the to-be-kicked thread | |
3493 | * | |
3494 | * Cause a process which is running on another CPU to enter | |
3495 | * kernel-mode, without any delay. (to get signals handled.) | |
3496 | * | |
25985edc | 3497 | * NOTE: this function doesn't have to take the runqueue lock, |
1da177e4 LT |
3498 | * because all it wants to ensure is that the remote task enters |
3499 | * the kernel. If the IPI races and the task has been migrated | |
3500 | * to another CPU then no harm is done and the purpose has been | |
3501 | * achieved as well. | |
3502 | */ | |
36c8b586 | 3503 | void kick_process(struct task_struct *p) |
1da177e4 | 3504 | { |
0e34600a PZ |
3505 | guard(preempt)(); |
3506 | int cpu = task_cpu(p); | |
1da177e4 | 3507 | |
1da177e4 LT |
3508 | if ((cpu != smp_processor_id()) && task_curr(p)) |
3509 | smp_send_reschedule(cpu); | |
1da177e4 | 3510 | } |
b43e3521 | 3511 | EXPORT_SYMBOL_GPL(kick_process); |
1da177e4 | 3512 | |
30da688e | 3513 | /* |
3bd37062 | 3514 | * ->cpus_ptr is protected by both rq->lock and p->pi_lock |
e9d867a6 PZI |
3515 | * |
3516 | * A few notes on cpu_active vs cpu_online: | |
3517 | * | |
3518 | * - cpu_active must be a subset of cpu_online | |
3519 | * | |
97fb7a0a | 3520 | * - on CPU-up we allow per-CPU kthreads on the online && !active CPU, |
e9d867a6 | 3521 | * see __set_cpus_allowed_ptr(). At this point the newly online |
d1ccc66d | 3522 | * CPU isn't yet part of the sched domains, and balancing will not |
e9d867a6 PZI |
3523 | * see it. |
3524 | * | |
d1ccc66d | 3525 | * - on CPU-down we clear cpu_active() to mask the sched domains and |
e9d867a6 | 3526 | * avoid the load balancer to place new tasks on the to be removed |
d1ccc66d | 3527 | * CPU. Existing tasks will remain running there and will be taken |
e9d867a6 PZI |
3528 | * off. |
3529 | * | |
3530 | * This means that fallback selection must not select !active CPUs. | |
3531 | * And can assume that any active CPU must be online. Conversely | |
3532 | * select_task_rq() below may allow selection of !active CPUs in order | |
3533 | * to satisfy the above rules. | |
30da688e | 3534 | */ |
5da9a0fb PZ |
3535 | static int select_fallback_rq(int cpu, struct task_struct *p) |
3536 | { | |
aa00d89c TC |
3537 | int nid = cpu_to_node(cpu); |
3538 | const struct cpumask *nodemask = NULL; | |
2baab4e9 PZ |
3539 | enum { cpuset, possible, fail } state = cpuset; |
3540 | int dest_cpu; | |
5da9a0fb | 3541 | |
aa00d89c | 3542 | /* |
d1ccc66d IM |
3543 | * If the node that the CPU is on has been offlined, cpu_to_node() |
3544 | * will return -1. There is no CPU on the node, and we should | |
3545 | * select the CPU on the other node. | |
aa00d89c TC |
3546 | */ |
3547 | if (nid != -1) { | |
3548 | nodemask = cpumask_of_node(nid); | |
3549 | ||
3550 | /* Look for allowed, online CPU in same node. */ | |
3551 | for_each_cpu(dest_cpu, nodemask) { | |
9ae606bc | 3552 | if (is_cpu_allowed(p, dest_cpu)) |
aa00d89c TC |
3553 | return dest_cpu; |
3554 | } | |
2baab4e9 | 3555 | } |
5da9a0fb | 3556 | |
2baab4e9 PZ |
3557 | for (;;) { |
3558 | /* Any allowed, online CPU? */ | |
3bd37062 | 3559 | for_each_cpu(dest_cpu, p->cpus_ptr) { |
175f0e25 | 3560 | if (!is_cpu_allowed(p, dest_cpu)) |
2baab4e9 | 3561 | continue; |
175f0e25 | 3562 | |
2baab4e9 PZ |
3563 | goto out; |
3564 | } | |
5da9a0fb | 3565 | |
e73e85f0 | 3566 | /* No more Mr. Nice Guy. */ |
2baab4e9 PZ |
3567 | switch (state) { |
3568 | case cpuset: | |
97c0054d | 3569 | if (cpuset_cpus_allowed_fallback(p)) { |
e73e85f0 ON |
3570 | state = possible; |
3571 | break; | |
3572 | } | |
df561f66 | 3573 | fallthrough; |
2baab4e9 | 3574 | case possible: |
af449901 PZ |
3575 | /* |
3576 | * XXX When called from select_task_rq() we only | |
3577 | * hold p->pi_lock and again violate locking order. | |
3578 | * | |
3579 | * More yuck to audit. | |
3580 | */ | |
9ae606bc | 3581 | do_set_cpus_allowed(p, task_cpu_possible_mask(p)); |
2baab4e9 PZ |
3582 | state = fail; |
3583 | break; | |
2baab4e9 PZ |
3584 | case fail: |
3585 | BUG(); | |
3586 | break; | |
3587 | } | |
3588 | } | |
3589 | ||
3590 | out: | |
3591 | if (state != cpuset) { | |
3592 | /* | |
3593 | * Don't tell them about moving exiting tasks or | |
3594 | * kernel threads (both mm NULL), since they never | |
3595 | * leave kernel. | |
3596 | */ | |
3597 | if (p->mm && printk_ratelimit()) { | |
aac74dc4 | 3598 | printk_deferred("process %d (%s) no longer affine to cpu%d\n", |
2baab4e9 PZ |
3599 | task_pid_nr(p), p->comm, cpu); |
3600 | } | |
5da9a0fb PZ |
3601 | } |
3602 | ||
3603 | return dest_cpu; | |
3604 | } | |
3605 | ||
e2912009 | 3606 | /* |
3bd37062 | 3607 | * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable. |
e2912009 | 3608 | */ |
970b13ba | 3609 | static inline |
3aef1551 | 3610 | int select_task_rq(struct task_struct *p, int cpu, int wake_flags) |
970b13ba | 3611 | { |
cbce1a68 PZ |
3612 | lockdep_assert_held(&p->pi_lock); |
3613 | ||
af449901 | 3614 | if (p->nr_cpus_allowed > 1 && !is_migration_disabled(p)) |
3aef1551 | 3615 | cpu = p->sched_class->select_task_rq(p, cpu, wake_flags); |
e9d867a6 | 3616 | else |
3bd37062 | 3617 | cpu = cpumask_any(p->cpus_ptr); |
e2912009 PZ |
3618 | |
3619 | /* | |
3620 | * In order not to call set_task_cpu() on a blocking task we need | |
3bd37062 | 3621 | * to rely on ttwu() to place the task on a valid ->cpus_ptr |
d1ccc66d | 3622 | * CPU. |
e2912009 PZ |
3623 | * |
3624 | * Since this is common to all placement strategies, this lives here. | |
3625 | * | |
3626 | * [ this allows ->select_task() to simply return task_cpu(p) and | |
3627 | * not worry about this generic constraint ] | |
3628 | */ | |
7af443ee | 3629 | if (unlikely(!is_cpu_allowed(p, cpu))) |
5da9a0fb | 3630 | cpu = select_fallback_rq(task_cpu(p), p); |
e2912009 PZ |
3631 | |
3632 | return cpu; | |
970b13ba | 3633 | } |
09a40af5 | 3634 | |
f5832c19 NP |
3635 | void sched_set_stop_task(int cpu, struct task_struct *stop) |
3636 | { | |
ded467dc | 3637 | static struct lock_class_key stop_pi_lock; |
f5832c19 NP |
3638 | struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; |
3639 | struct task_struct *old_stop = cpu_rq(cpu)->stop; | |
3640 | ||
3641 | if (stop) { | |
3642 | /* | |
3643 | * Make it appear like a SCHED_FIFO task, its something | |
3644 | * userspace knows about and won't get confused about. | |
3645 | * | |
3646 | * Also, it will make PI more or less work without too | |
3647 | * much confusion -- but then, stop work should not | |
3648 | * rely on PI working anyway. | |
3649 | */ | |
3650 | sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m); | |
3651 | ||
3652 | stop->sched_class = &stop_sched_class; | |
ded467dc PZ |
3653 | |
3654 | /* | |
3655 | * The PI code calls rt_mutex_setprio() with ->pi_lock held to | |
3656 | * adjust the effective priority of a task. As a result, | |
3657 | * rt_mutex_setprio() can trigger (RT) balancing operations, | |
3658 | * which can then trigger wakeups of the stop thread to push | |
3659 | * around the current task. | |
3660 | * | |
3661 | * The stop task itself will never be part of the PI-chain, it | |
3662 | * never blocks, therefore that ->pi_lock recursion is safe. | |
3663 | * Tell lockdep about this by placing the stop->pi_lock in its | |
3664 | * own class. | |
3665 | */ | |
3666 | lockdep_set_class(&stop->pi_lock, &stop_pi_lock); | |
f5832c19 NP |
3667 | } |
3668 | ||
3669 | cpu_rq(cpu)->stop = stop; | |
3670 | ||
3671 | if (old_stop) { | |
3672 | /* | |
3673 | * Reset it back to a normal scheduling class so that | |
3674 | * it can die in pieces. | |
3675 | */ | |
3676 | old_stop->sched_class = &rt_sched_class; | |
3677 | } | |
3678 | } | |
3679 | ||
74d862b6 | 3680 | #else /* CONFIG_SMP */ |
25834c73 PZ |
3681 | |
3682 | static inline int __set_cpus_allowed_ptr(struct task_struct *p, | |
713a2e21 | 3683 | struct affinity_context *ctx) |
25834c73 | 3684 | { |
713a2e21 | 3685 | return set_cpus_allowed_ptr(p, ctx->new_mask); |
25834c73 PZ |
3686 | } |
3687 | ||
af449901 PZ |
3688 | static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { } |
3689 | ||
3015ef4b TG |
3690 | static inline bool rq_has_pinned_tasks(struct rq *rq) |
3691 | { | |
3692 | return false; | |
3693 | } | |
3694 | ||
9a5418bc WL |
3695 | static inline cpumask_t *alloc_user_cpus_ptr(int node) |
3696 | { | |
3697 | return NULL; | |
3698 | } | |
3699 | ||
74d862b6 | 3700 | #endif /* !CONFIG_SMP */ |
970b13ba | 3701 | |
d7c01d27 | 3702 | static void |
b84cb5df | 3703 | ttwu_stat(struct task_struct *p, int cpu, int wake_flags) |
9ed3811a | 3704 | { |
4fa8d299 | 3705 | struct rq *rq; |
b84cb5df | 3706 | |
4fa8d299 JP |
3707 | if (!schedstat_enabled()) |
3708 | return; | |
3709 | ||
3710 | rq = this_rq(); | |
d7c01d27 | 3711 | |
4fa8d299 JP |
3712 | #ifdef CONFIG_SMP |
3713 | if (cpu == rq->cpu) { | |
b85c8b71 | 3714 | __schedstat_inc(rq->ttwu_local); |
ceeadb83 | 3715 | __schedstat_inc(p->stats.nr_wakeups_local); |
d7c01d27 PZ |
3716 | } else { |
3717 | struct sched_domain *sd; | |
3718 | ||
ceeadb83 | 3719 | __schedstat_inc(p->stats.nr_wakeups_remote); |
857d315f PZ |
3720 | |
3721 | guard(rcu)(); | |
4fa8d299 | 3722 | for_each_domain(rq->cpu, sd) { |
d7c01d27 | 3723 | if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { |
b85c8b71 | 3724 | __schedstat_inc(sd->ttwu_wake_remote); |
d7c01d27 PZ |
3725 | break; |
3726 | } | |
3727 | } | |
3728 | } | |
f339b9dc PZ |
3729 | |
3730 | if (wake_flags & WF_MIGRATED) | |
ceeadb83 | 3731 | __schedstat_inc(p->stats.nr_wakeups_migrate); |
d7c01d27 PZ |
3732 | #endif /* CONFIG_SMP */ |
3733 | ||
b85c8b71 | 3734 | __schedstat_inc(rq->ttwu_count); |
ceeadb83 | 3735 | __schedstat_inc(p->stats.nr_wakeups); |
d7c01d27 PZ |
3736 | |
3737 | if (wake_flags & WF_SYNC) | |
ceeadb83 | 3738 | __schedstat_inc(p->stats.nr_wakeups_sync); |
d7c01d27 PZ |
3739 | } |
3740 | ||
23f41eeb | 3741 | /* |
160fb0d8 | 3742 | * Mark the task runnable. |
23f41eeb | 3743 | */ |
160fb0d8 | 3744 | static inline void ttwu_do_wakeup(struct task_struct *p) |
9ed3811a | 3745 | { |
2f064a59 | 3746 | WRITE_ONCE(p->__state, TASK_RUNNING); |
fbd705a0 | 3747 | trace_sched_wakeup(p); |
160fb0d8 CZ |
3748 | } |
3749 | ||
3750 | static void | |
3751 | ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, | |
3752 | struct rq_flags *rf) | |
3753 | { | |
3754 | int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK; | |
3755 | ||
3756 | lockdep_assert_rq_held(rq); | |
3757 | ||
3758 | if (p->sched_contributes_to_load) | |
3759 | rq->nr_uninterruptible--; | |
3760 | ||
3761 | #ifdef CONFIG_SMP | |
3762 | if (wake_flags & WF_MIGRATED) | |
3763 | en_flags |= ENQUEUE_MIGRATED; | |
3764 | else | |
3765 | #endif | |
3766 | if (p->in_iowait) { | |
3767 | delayacct_blkio_end(p); | |
3768 | atomic_dec(&task_rq(p)->nr_iowait); | |
3769 | } | |
3770 | ||
3771 | activate_task(rq, p, en_flags); | |
e23edc86 | 3772 | wakeup_preempt(rq, p, wake_flags); |
160fb0d8 CZ |
3773 | |
3774 | ttwu_do_wakeup(p); | |
fbd705a0 | 3775 | |
9ed3811a | 3776 | #ifdef CONFIG_SMP |
4c9a4bc8 PZ |
3777 | if (p->sched_class->task_woken) { |
3778 | /* | |
b19a888c | 3779 | * Our task @p is fully woken up and running; so it's safe to |
cbce1a68 | 3780 | * drop the rq->lock, hereafter rq is only used for statistics. |
4c9a4bc8 | 3781 | */ |
d8ac8971 | 3782 | rq_unpin_lock(rq, rf); |
9ed3811a | 3783 | p->sched_class->task_woken(rq, p); |
d8ac8971 | 3784 | rq_repin_lock(rq, rf); |
4c9a4bc8 | 3785 | } |
9ed3811a | 3786 | |
e69c6341 | 3787 | if (rq->idle_stamp) { |
78becc27 | 3788 | u64 delta = rq_clock(rq) - rq->idle_stamp; |
9bd721c5 | 3789 | u64 max = 2*rq->max_idle_balance_cost; |
9ed3811a | 3790 | |
abfafa54 JL |
3791 | update_avg(&rq->avg_idle, delta); |
3792 | ||
3793 | if (rq->avg_idle > max) | |
9ed3811a | 3794 | rq->avg_idle = max; |
abfafa54 | 3795 | |
9ed3811a TH |
3796 | rq->idle_stamp = 0; |
3797 | } | |
3798 | #endif | |
3799 | } | |
3800 | ||
c05fbafb | 3801 | /* |
58877d34 PZ |
3802 | * Consider @p being inside a wait loop: |
3803 | * | |
3804 | * for (;;) { | |
3805 | * set_current_state(TASK_UNINTERRUPTIBLE); | |
3806 | * | |
3807 | * if (CONDITION) | |
3808 | * break; | |
3809 | * | |
3810 | * schedule(); | |
3811 | * } | |
3812 | * __set_current_state(TASK_RUNNING); | |
3813 | * | |
3814 | * between set_current_state() and schedule(). In this case @p is still | |
3815 | * runnable, so all that needs doing is change p->state back to TASK_RUNNING in | |
3816 | * an atomic manner. | |
3817 | * | |
3818 | * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq | |
3819 | * then schedule() must still happen and p->state can be changed to | |
3820 | * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we | |
3821 | * need to do a full wakeup with enqueue. | |
3822 | * | |
3823 | * Returns: %true when the wakeup is done, | |
3824 | * %false otherwise. | |
c05fbafb | 3825 | */ |
58877d34 | 3826 | static int ttwu_runnable(struct task_struct *p, int wake_flags) |
c05fbafb | 3827 | { |
eb580751 | 3828 | struct rq_flags rf; |
c05fbafb PZ |
3829 | struct rq *rq; |
3830 | int ret = 0; | |
3831 | ||
eb580751 | 3832 | rq = __task_rq_lock(p, &rf); |
da0c1e65 | 3833 | if (task_on_rq_queued(p)) { |
efe09385 CZ |
3834 | if (!task_on_cpu(rq, p)) { |
3835 | /* | |
3836 | * When on_rq && !on_cpu the task is preempted, see if | |
3837 | * it should preempt the task that is current now. | |
3838 | */ | |
3839 | update_rq_clock(rq); | |
e23edc86 | 3840 | wakeup_preempt(rq, p, wake_flags); |
efe09385 | 3841 | } |
160fb0d8 | 3842 | ttwu_do_wakeup(p); |
c05fbafb PZ |
3843 | ret = 1; |
3844 | } | |
eb580751 | 3845 | __task_rq_unlock(rq, &rf); |
c05fbafb PZ |
3846 | |
3847 | return ret; | |
3848 | } | |
3849 | ||
317f3941 | 3850 | #ifdef CONFIG_SMP |
a1488664 | 3851 | void sched_ttwu_pending(void *arg) |
317f3941 | 3852 | { |
a1488664 | 3853 | struct llist_node *llist = arg; |
317f3941 | 3854 | struct rq *rq = this_rq(); |
73215849 | 3855 | struct task_struct *p, *t; |
d8ac8971 | 3856 | struct rq_flags rf; |
317f3941 | 3857 | |
e3baac47 PZ |
3858 | if (!llist) |
3859 | return; | |
3860 | ||
8a8c69c3 | 3861 | rq_lock_irqsave(rq, &rf); |
77558e4d | 3862 | update_rq_clock(rq); |
317f3941 | 3863 | |
8c4890d1 | 3864 | llist_for_each_entry_safe(p, t, llist, wake_entry.llist) { |
b6e13e85 PZ |
3865 | if (WARN_ON_ONCE(p->on_cpu)) |
3866 | smp_cond_load_acquire(&p->on_cpu, !VAL); | |
3867 | ||
3868 | if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq))) | |
3869 | set_task_cpu(p, cpu_of(rq)); | |
3870 | ||
73215849 | 3871 | ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf); |
b6e13e85 | 3872 | } |
317f3941 | 3873 | |
d6962c4f TD |
3874 | /* |
3875 | * Must be after enqueueing at least once task such that | |
3876 | * idle_cpu() does not observe a false-negative -- if it does, | |
3877 | * it is possible for select_idle_siblings() to stack a number | |
3878 | * of tasks on this CPU during that window. | |
3879 | * | |
3880 | * It is ok to clear ttwu_pending when another task pending. | |
3881 | * We will receive IPI after local irq enabled and then enqueue it. | |
3882 | * Since now nr_running > 0, idle_cpu() will always get correct result. | |
3883 | */ | |
3884 | WRITE_ONCE(rq->ttwu_pending, 0); | |
8a8c69c3 | 3885 | rq_unlock_irqrestore(rq, &rf); |
317f3941 PZ |
3886 | } |
3887 | ||
68f4ff04 VS |
3888 | /* |
3889 | * Prepare the scene for sending an IPI for a remote smp_call | |
3890 | * | |
3891 | * Returns true if the caller can proceed with sending the IPI. | |
3892 | * Returns false otherwise. | |
3893 | */ | |
3894 | bool call_function_single_prep_ipi(int cpu) | |
317f3941 | 3895 | { |
68f4ff04 | 3896 | if (set_nr_if_polling(cpu_rq(cpu)->idle)) { |
b2a02fc4 | 3897 | trace_sched_wake_idle_without_ipi(cpu); |
68f4ff04 | 3898 | return false; |
cc9cb0a7 | 3899 | } |
68f4ff04 VS |
3900 | |
3901 | return true; | |
317f3941 PZ |
3902 | } |
3903 | ||
2ebb1771 MG |
3904 | /* |
3905 | * Queue a task on the target CPUs wake_list and wake the CPU via IPI if | |
3906 | * necessary. The wakee CPU on receipt of the IPI will queue the task | |
3907 | * via sched_ttwu_wakeup() for activation so the wakee incurs the cost | |
3908 | * of the wakeup instead of the waker. | |
3909 | */ | |
3910 | static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) | |
317f3941 | 3911 | { |
e3baac47 PZ |
3912 | struct rq *rq = cpu_rq(cpu); |
3913 | ||
b7e7ade3 PZ |
3914 | p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED); |
3915 | ||
126c2092 | 3916 | WRITE_ONCE(rq->ttwu_pending, 1); |
8c4890d1 | 3917 | __smp_call_single_queue(cpu, &p->wake_entry.llist); |
317f3941 | 3918 | } |
d6aa8f85 | 3919 | |
f6be8af1 CL |
3920 | void wake_up_if_idle(int cpu) |
3921 | { | |
3922 | struct rq *rq = cpu_rq(cpu); | |
fd7de1e8 | 3923 | |
4eb054f9 PZ |
3924 | guard(rcu)(); |
3925 | if (is_idle_task(rcu_dereference(rq->curr))) { | |
3926 | guard(rq_lock_irqsave)(rq); | |
3927 | if (is_idle_task(rq->curr)) | |
3928 | resched_curr(rq); | |
3929 | } | |
f6be8af1 CL |
3930 | } |
3931 | ||
39be3501 | 3932 | bool cpus_share_cache(int this_cpu, int that_cpu) |
518cd623 | 3933 | { |
42dc938a VD |
3934 | if (this_cpu == that_cpu) |
3935 | return true; | |
3936 | ||
518cd623 PZ |
3937 | return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); |
3938 | } | |
c6e7bd7a | 3939 | |
b95303e0 BS |
3940 | /* |
3941 | * Whether CPUs are share cache resources, which means LLC on non-cluster | |
3942 | * machines and LLC tag or L2 on machines with clusters. | |
3943 | */ | |
3944 | bool cpus_share_resources(int this_cpu, int that_cpu) | |
3945 | { | |
3946 | if (this_cpu == that_cpu) | |
3947 | return true; | |
3948 | ||
3949 | return per_cpu(sd_share_id, this_cpu) == per_cpu(sd_share_id, that_cpu); | |
3950 | } | |
3951 | ||
751d4cbc | 3952 | static inline bool ttwu_queue_cond(struct task_struct *p, int cpu) |
2ebb1771 | 3953 | { |
5ba2ffba PZ |
3954 | /* |
3955 | * Do not complicate things with the async wake_list while the CPU is | |
3956 | * in hotplug state. | |
3957 | */ | |
3958 | if (!cpu_active(cpu)) | |
3959 | return false; | |
3960 | ||
751d4cbc MG |
3961 | /* Ensure the task will still be allowed to run on the CPU. */ |
3962 | if (!cpumask_test_cpu(cpu, p->cpus_ptr)) | |
3963 | return false; | |
3964 | ||
2ebb1771 MG |
3965 | /* |
3966 | * If the CPU does not share cache, then queue the task on the | |
3967 | * remote rqs wakelist to avoid accessing remote data. | |
3968 | */ | |
3969 | if (!cpus_share_cache(smp_processor_id(), cpu)) | |
3970 | return true; | |
3971 | ||
f3dd3f67 TD |
3972 | if (cpu == smp_processor_id()) |
3973 | return false; | |
3974 | ||
2ebb1771 | 3975 | /* |
f3dd3f67 TD |
3976 | * If the wakee cpu is idle, or the task is descheduling and the |
3977 | * only running task on the CPU, then use the wakelist to offload | |
3978 | * the task activation to the idle (or soon-to-be-idle) CPU as | |
3979 | * the current CPU is likely busy. nr_running is checked to | |
3980 | * avoid unnecessary task stacking. | |
28156108 TD |
3981 | * |
3982 | * Note that we can only get here with (wakee) p->on_rq=0, | |
3983 | * p->on_cpu can be whatever, we've done the dequeue, so | |
3984 | * the wakee has been accounted out of ->nr_running. | |
2ebb1771 | 3985 | */ |
f3dd3f67 | 3986 | if (!cpu_rq(cpu)->nr_running) |
2ebb1771 MG |
3987 | return true; |
3988 | ||
3989 | return false; | |
3990 | } | |
3991 | ||
3992 | static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) | |
c6e7bd7a | 3993 | { |
751d4cbc | 3994 | if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) { |
c6e7bd7a | 3995 | sched_clock_cpu(cpu); /* Sync clocks across CPUs */ |
2ebb1771 | 3996 | __ttwu_queue_wakelist(p, cpu, wake_flags); |
c6e7bd7a PZ |
3997 | return true; |
3998 | } | |
3999 | ||
4000 | return false; | |
4001 | } | |
58877d34 PZ |
4002 | |
4003 | #else /* !CONFIG_SMP */ | |
4004 | ||
4005 | static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) | |
4006 | { | |
4007 | return false; | |
4008 | } | |
4009 | ||
d6aa8f85 | 4010 | #endif /* CONFIG_SMP */ |
317f3941 | 4011 | |
b5179ac7 | 4012 | static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) |
c05fbafb PZ |
4013 | { |
4014 | struct rq *rq = cpu_rq(cpu); | |
d8ac8971 | 4015 | struct rq_flags rf; |
c05fbafb | 4016 | |
2ebb1771 | 4017 | if (ttwu_queue_wakelist(p, cpu, wake_flags)) |
317f3941 | 4018 | return; |
317f3941 | 4019 | |
8a8c69c3 | 4020 | rq_lock(rq, &rf); |
77558e4d | 4021 | update_rq_clock(rq); |
d8ac8971 | 4022 | ttwu_do_activate(rq, p, wake_flags, &rf); |
8a8c69c3 | 4023 | rq_unlock(rq, &rf); |
9ed3811a TH |
4024 | } |
4025 | ||
43295d73 TG |
4026 | /* |
4027 | * Invoked from try_to_wake_up() to check whether the task can be woken up. | |
4028 | * | |
4029 | * The caller holds p::pi_lock if p != current or has preemption | |
4030 | * disabled when p == current. | |
5f220be2 | 4031 | * |
8f0eed4a | 4032 | * The rules of saved_state: |
5f220be2 TG |
4033 | * |
4034 | * The related locking code always holds p::pi_lock when updating | |
4035 | * p::saved_state, which means the code is fully serialized in both cases. | |
4036 | * | |
8f0eed4a EB |
4037 | * For PREEMPT_RT, the lock wait and lock wakeups happen via TASK_RTLOCK_WAIT. |
4038 | * No other bits set. This allows to distinguish all wakeup scenarios. | |
4039 | * | |
4040 | * For FREEZER, the wakeup happens via TASK_FROZEN. No other bits set. This | |
4041 | * allows us to prevent early wakeup of tasks before they can be run on | |
4042 | * asymmetric ISA architectures (eg ARMv9). | |
43295d73 TG |
4043 | */ |
4044 | static __always_inline | |
4045 | bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success) | |
4046 | { | |
1c069187 PZ |
4047 | int match; |
4048 | ||
5f220be2 TG |
4049 | if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) { |
4050 | WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) && | |
4051 | state != TASK_RTLOCK_WAIT); | |
4052 | } | |
4053 | ||
1c069187 | 4054 | *success = !!(match = __task_state_match(p, state)); |
5f220be2 | 4055 | |
5f220be2 TG |
4056 | /* |
4057 | * Saved state preserves the task state across blocking on | |
8f0eed4a EB |
4058 | * an RT lock or TASK_FREEZABLE tasks. If the state matches, |
4059 | * set p::saved_state to TASK_RUNNING, but do not wake the task | |
4060 | * because it waits for a lock wakeup or __thaw_task(). Also | |
4061 | * indicate success because from the regular waker's point of | |
4062 | * view this has succeeded. | |
5f220be2 TG |
4063 | * |
4064 | * After acquiring the lock the task will restore p::__state | |
4065 | * from p::saved_state which ensures that the regular | |
4066 | * wakeup is not lost. The restore will also set | |
4067 | * p::saved_state to TASK_RUNNING so any further tests will | |
4068 | * not result in false positives vs. @success | |
4069 | */ | |
1c069187 | 4070 | if (match < 0) |
5f220be2 | 4071 | p->saved_state = TASK_RUNNING; |
fbaa6a18 | 4072 | |
1c069187 | 4073 | return match > 0; |
43295d73 TG |
4074 | } |
4075 | ||
8643cda5 PZ |
4076 | /* |
4077 | * Notes on Program-Order guarantees on SMP systems. | |
4078 | * | |
4079 | * MIGRATION | |
4080 | * | |
4081 | * The basic program-order guarantee on SMP systems is that when a task [t] | |
d1ccc66d IM |
4082 | * migrates, all its activity on its old CPU [c0] happens-before any subsequent |
4083 | * execution on its new CPU [c1]. | |
8643cda5 PZ |
4084 | * |
4085 | * For migration (of runnable tasks) this is provided by the following means: | |
4086 | * | |
4087 | * A) UNLOCK of the rq(c0)->lock scheduling out task t | |
4088 | * B) migration for t is required to synchronize *both* rq(c0)->lock and | |
4089 | * rq(c1)->lock (if not at the same time, then in that order). | |
4090 | * C) LOCK of the rq(c1)->lock scheduling in task | |
4091 | * | |
7696f991 | 4092 | * Release/acquire chaining guarantees that B happens after A and C after B. |
d1ccc66d | 4093 | * Note: the CPU doing B need not be c0 or c1 |
8643cda5 PZ |
4094 | * |
4095 | * Example: | |
4096 | * | |
4097 | * CPU0 CPU1 CPU2 | |
4098 | * | |
4099 | * LOCK rq(0)->lock | |
4100 | * sched-out X | |
4101 | * sched-in Y | |
4102 | * UNLOCK rq(0)->lock | |
4103 | * | |
4104 | * LOCK rq(0)->lock // orders against CPU0 | |
4105 | * dequeue X | |
4106 | * UNLOCK rq(0)->lock | |
4107 | * | |
4108 | * LOCK rq(1)->lock | |
4109 | * enqueue X | |
4110 | * UNLOCK rq(1)->lock | |
4111 | * | |
4112 | * LOCK rq(1)->lock // orders against CPU2 | |
4113 | * sched-out Z | |
4114 | * sched-in X | |
4115 | * UNLOCK rq(1)->lock | |
4116 | * | |
4117 | * | |
4118 | * BLOCKING -- aka. SLEEP + WAKEUP | |
4119 | * | |
4120 | * For blocking we (obviously) need to provide the same guarantee as for | |
4121 | * migration. However the means are completely different as there is no lock | |
4122 | * chain to provide order. Instead we do: | |
4123 | * | |
58877d34 PZ |
4124 | * 1) smp_store_release(X->on_cpu, 0) -- finish_task() |
4125 | * 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up() | |
8643cda5 PZ |
4126 | * |
4127 | * Example: | |
4128 | * | |
4129 | * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule) | |
4130 | * | |
4131 | * LOCK rq(0)->lock LOCK X->pi_lock | |
4132 | * dequeue X | |
4133 | * sched-out X | |
4134 | * smp_store_release(X->on_cpu, 0); | |
4135 | * | |
1f03e8d2 | 4136 | * smp_cond_load_acquire(&X->on_cpu, !VAL); |
8643cda5 PZ |
4137 | * X->state = WAKING |
4138 | * set_task_cpu(X,2) | |
4139 | * | |
4140 | * LOCK rq(2)->lock | |
4141 | * enqueue X | |
4142 | * X->state = RUNNING | |
4143 | * UNLOCK rq(2)->lock | |
4144 | * | |
4145 | * LOCK rq(2)->lock // orders against CPU1 | |
4146 | * sched-out Z | |
4147 | * sched-in X | |
4148 | * UNLOCK rq(2)->lock | |
4149 | * | |
4150 | * UNLOCK X->pi_lock | |
4151 | * UNLOCK rq(0)->lock | |
4152 | * | |
4153 | * | |
7696f991 AP |
4154 | * However, for wakeups there is a second guarantee we must provide, namely we |
4155 | * must ensure that CONDITION=1 done by the caller can not be reordered with | |
4156 | * accesses to the task state; see try_to_wake_up() and set_current_state(). | |
8643cda5 PZ |
4157 | */ |
4158 | ||
9ed3811a | 4159 | /** |
1da177e4 | 4160 | * try_to_wake_up - wake up a thread |
9ed3811a | 4161 | * @p: the thread to be awakened |
1da177e4 | 4162 | * @state: the mask of task states that can be woken |
9ed3811a | 4163 | * @wake_flags: wake modifier flags (WF_*) |
1da177e4 | 4164 | * |
58877d34 PZ |
4165 | * Conceptually does: |
4166 | * | |
4167 | * If (@state & @p->state) @p->state = TASK_RUNNING. | |
1da177e4 | 4168 | * |
a2250238 PZ |
4169 | * If the task was not queued/runnable, also place it back on a runqueue. |
4170 | * | |
58877d34 PZ |
4171 | * This function is atomic against schedule() which would dequeue the task. |
4172 | * | |
4173 | * It issues a full memory barrier before accessing @p->state, see the comment | |
4174 | * with set_current_state(). | |
a2250238 | 4175 | * |
58877d34 | 4176 | * Uses p->pi_lock to serialize against concurrent wake-ups. |
a2250238 | 4177 | * |
58877d34 PZ |
4178 | * Relies on p->pi_lock stabilizing: |
4179 | * - p->sched_class | |
4180 | * - p->cpus_ptr | |
4181 | * - p->sched_task_group | |
4182 | * in order to do migration, see its use of select_task_rq()/set_task_cpu(). | |
4183 | * | |
4184 | * Tries really hard to only take one task_rq(p)->lock for performance. | |
4185 | * Takes rq->lock in: | |
4186 | * - ttwu_runnable() -- old rq, unavoidable, see comment there; | |
4187 | * - ttwu_queue() -- new rq, for enqueue of the task; | |
4188 | * - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us. | |
4189 | * | |
4190 | * As a consequence we race really badly with just about everything. See the | |
4191 | * many memory barriers and their comments for details. | |
7696f991 | 4192 | * |
a2250238 PZ |
4193 | * Return: %true if @p->state changes (an actual wakeup was done), |
4194 | * %false otherwise. | |
1da177e4 | 4195 | */ |
ab83f455 | 4196 | int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) |
1da177e4 | 4197 | { |
857d315f | 4198 | guard(preempt)(); |
c05fbafb | 4199 | int cpu, success = 0; |
2398f2c6 | 4200 | |
aacedf26 PZ |
4201 | if (p == current) { |
4202 | /* | |
4203 | * We're waking current, this means 'p->on_rq' and 'task_cpu(p) | |
4204 | * == smp_processor_id()'. Together this means we can special | |
58877d34 | 4205 | * case the whole 'p->on_rq && ttwu_runnable()' case below |
aacedf26 PZ |
4206 | * without taking any locks. |
4207 | * | |
4208 | * In particular: | |
4209 | * - we rely on Program-Order guarantees for all the ordering, | |
4210 | * - we're serialized against set_special_state() by virtue of | |
4211 | * it disabling IRQs (this allows not taking ->pi_lock). | |
4212 | */ | |
43295d73 | 4213 | if (!ttwu_state_match(p, state, &success)) |
e3d85487 | 4214 | goto out; |
aacedf26 | 4215 | |
aacedf26 | 4216 | trace_sched_waking(p); |
160fb0d8 | 4217 | ttwu_do_wakeup(p); |
aacedf26 PZ |
4218 | goto out; |
4219 | } | |
4220 | ||
e0acd0a6 ON |
4221 | /* |
4222 | * If we are going to wake up a thread waiting for CONDITION we | |
4223 | * need to ensure that CONDITION=1 done by the caller can not be | |
58877d34 PZ |
4224 | * reordered with p->state check below. This pairs with smp_store_mb() |
4225 | * in set_current_state() that the waiting thread does. | |
e0acd0a6 | 4226 | */ |
857d315f PZ |
4227 | scoped_guard (raw_spinlock_irqsave, &p->pi_lock) { |
4228 | smp_mb__after_spinlock(); | |
4229 | if (!ttwu_state_match(p, state, &success)) | |
4230 | break; | |
1da177e4 | 4231 | |
857d315f | 4232 | trace_sched_waking(p); |
fbd705a0 | 4233 | |
857d315f PZ |
4234 | /* |
4235 | * Ensure we load p->on_rq _after_ p->state, otherwise it would | |
4236 | * be possible to, falsely, observe p->on_rq == 0 and get stuck | |
4237 | * in smp_cond_load_acquire() below. | |
4238 | * | |
4239 | * sched_ttwu_pending() try_to_wake_up() | |
4240 | * STORE p->on_rq = 1 LOAD p->state | |
4241 | * UNLOCK rq->lock | |
4242 | * | |
4243 | * __schedule() (switch to task 'p') | |
4244 | * LOCK rq->lock smp_rmb(); | |
4245 | * smp_mb__after_spinlock(); | |
4246 | * UNLOCK rq->lock | |
4247 | * | |
4248 | * [task p] | |
4249 | * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq | |
4250 | * | |
4251 | * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in | |
4252 | * __schedule(). See the comment for smp_mb__after_spinlock(). | |
4253 | * | |
ea41bb51 | 4254 | * A similar smp_rmb() lives in __task_needs_rq_lock(). |
857d315f PZ |
4255 | */ |
4256 | smp_rmb(); | |
4257 | if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags)) | |
4258 | break; | |
1da177e4 | 4259 | |
1da177e4 | 4260 | #ifdef CONFIG_SMP |
857d315f PZ |
4261 | /* |
4262 | * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be | |
4263 | * possible to, falsely, observe p->on_cpu == 0. | |
4264 | * | |
4265 | * One must be running (->on_cpu == 1) in order to remove oneself | |
4266 | * from the runqueue. | |
4267 | * | |
4268 | * __schedule() (switch to task 'p') try_to_wake_up() | |
4269 | * STORE p->on_cpu = 1 LOAD p->on_rq | |
4270 | * UNLOCK rq->lock | |
4271 | * | |
4272 | * __schedule() (put 'p' to sleep) | |
4273 | * LOCK rq->lock smp_rmb(); | |
4274 | * smp_mb__after_spinlock(); | |
4275 | * STORE p->on_rq = 0 LOAD p->on_cpu | |
4276 | * | |
4277 | * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in | |
4278 | * __schedule(). See the comment for smp_mb__after_spinlock(). | |
4279 | * | |
4280 | * Form a control-dep-acquire with p->on_rq == 0 above, to ensure | |
4281 | * schedule()'s deactivate_task() has 'happened' and p will no longer | |
4282 | * care about it's own p->state. See the comment in __schedule(). | |
4283 | */ | |
4284 | smp_acquire__after_ctrl_dep(); | |
dbfb089d | 4285 | |
857d315f PZ |
4286 | /* |
4287 | * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq | |
4288 | * == 0), which means we need to do an enqueue, change p->state to | |
4289 | * TASK_WAKING such that we can unlock p->pi_lock before doing the | |
4290 | * enqueue, such as ttwu_queue_wakelist(). | |
4291 | */ | |
4292 | WRITE_ONCE(p->__state, TASK_WAKING); | |
ecf7d01c | 4293 | |
857d315f PZ |
4294 | /* |
4295 | * If the owning (remote) CPU is still in the middle of schedule() with | |
4296 | * this task as prev, considering queueing p on the remote CPUs wake_list | |
4297 | * which potentially sends an IPI instead of spinning on p->on_cpu to | |
4298 | * let the waker make forward progress. This is safe because IRQs are | |
4299 | * disabled and the IPI will deliver after on_cpu is cleared. | |
4300 | * | |
4301 | * Ensure we load task_cpu(p) after p->on_cpu: | |
4302 | * | |
4303 | * set_task_cpu(p, cpu); | |
4304 | * STORE p->cpu = @cpu | |
4305 | * __schedule() (switch to task 'p') | |
4306 | * LOCK rq->lock | |
4307 | * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu) | |
4308 | * STORE p->on_cpu = 1 LOAD p->cpu | |
4309 | * | |
4310 | * to ensure we observe the correct CPU on which the task is currently | |
4311 | * scheduling. | |
4312 | */ | |
4313 | if (smp_load_acquire(&p->on_cpu) && | |
4314 | ttwu_queue_wakelist(p, task_cpu(p), wake_flags)) | |
4315 | break; | |
c6e7bd7a | 4316 | |
857d315f PZ |
4317 | /* |
4318 | * If the owning (remote) CPU is still in the middle of schedule() with | |
4319 | * this task as prev, wait until it's done referencing the task. | |
4320 | * | |
4321 | * Pairs with the smp_store_release() in finish_task(). | |
4322 | * | |
4323 | * This ensures that tasks getting woken will be fully ordered against | |
4324 | * their previous state and preserve Program Order. | |
4325 | */ | |
4326 | smp_cond_load_acquire(&p->on_cpu, !VAL); | |
1da177e4 | 4327 | |
857d315f PZ |
4328 | cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU); |
4329 | if (task_cpu(p) != cpu) { | |
4330 | if (p->in_iowait) { | |
4331 | delayacct_blkio_end(p); | |
4332 | atomic_dec(&task_rq(p)->nr_iowait); | |
4333 | } | |
ec618b84 | 4334 | |
857d315f PZ |
4335 | wake_flags |= WF_MIGRATED; |
4336 | psi_ttwu_dequeue(p); | |
4337 | set_task_cpu(p, cpu); | |
4338 | } | |
b6e13e85 | 4339 | #else |
857d315f | 4340 | cpu = task_cpu(p); |
1da177e4 | 4341 | #endif /* CONFIG_SMP */ |
1da177e4 | 4342 | |
857d315f PZ |
4343 | ttwu_queue(p, cpu, wake_flags); |
4344 | } | |
aacedf26 PZ |
4345 | out: |
4346 | if (success) | |
b6e13e85 | 4347 | ttwu_stat(p, task_cpu(p), wake_flags); |
1da177e4 LT |
4348 | |
4349 | return success; | |
4350 | } | |
4351 | ||
91dabf33 PZ |
4352 | static bool __task_needs_rq_lock(struct task_struct *p) |
4353 | { | |
4354 | unsigned int state = READ_ONCE(p->__state); | |
4355 | ||
4356 | /* | |
4357 | * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when | |
4358 | * the task is blocked. Make sure to check @state since ttwu() can drop | |
4359 | * locks at the end, see ttwu_queue_wakelist(). | |
4360 | */ | |
4361 | if (state == TASK_RUNNING || state == TASK_WAKING) | |
4362 | return true; | |
4363 | ||
4364 | /* | |
4365 | * Ensure we load p->on_rq after p->__state, otherwise it would be | |
4366 | * possible to, falsely, observe p->on_rq == 0. | |
4367 | * | |
4368 | * See try_to_wake_up() for a longer comment. | |
4369 | */ | |
4370 | smp_rmb(); | |
4371 | if (p->on_rq) | |
4372 | return true; | |
4373 | ||
4374 | #ifdef CONFIG_SMP | |
4375 | /* | |
4376 | * Ensure the task has finished __schedule() and will not be referenced | |
4377 | * anymore. Again, see try_to_wake_up() for a longer comment. | |
4378 | */ | |
4379 | smp_rmb(); | |
4380 | smp_cond_load_acquire(&p->on_cpu, !VAL); | |
4381 | #endif | |
4382 | ||
4383 | return false; | |
4384 | } | |
4385 | ||
2beaf328 | 4386 | /** |
9b3c4ab3 | 4387 | * task_call_func - Invoke a function on task in fixed state |
1b7af295 | 4388 | * @p: Process for which the function is to be invoked, can be @current. |
2beaf328 PM |
4389 | * @func: Function to invoke. |
4390 | * @arg: Argument to function. | |
4391 | * | |
f6ac18fa PZ |
4392 | * Fix the task in it's current state by avoiding wakeups and or rq operations |
4393 | * and call @func(@arg) on it. This function can use ->on_rq and task_curr() | |
4394 | * to work out what the state is, if required. Given that @func can be invoked | |
4395 | * with a runqueue lock held, it had better be quite lightweight. | |
2beaf328 PM |
4396 | * |
4397 | * Returns: | |
f6ac18fa | 4398 | * Whatever @func returns |
2beaf328 | 4399 | */ |
9b3c4ab3 | 4400 | int task_call_func(struct task_struct *p, task_call_f func, void *arg) |
2beaf328 | 4401 | { |
f6ac18fa | 4402 | struct rq *rq = NULL; |
2beaf328 | 4403 | struct rq_flags rf; |
9b3c4ab3 | 4404 | int ret; |
2beaf328 | 4405 | |
1b7af295 | 4406 | raw_spin_lock_irqsave(&p->pi_lock, rf.flags); |
f6ac18fa | 4407 | |
91dabf33 | 4408 | if (__task_needs_rq_lock(p)) |
2beaf328 | 4409 | rq = __task_rq_lock(p, &rf); |
f6ac18fa PZ |
4410 | |
4411 | /* | |
4412 | * At this point the task is pinned; either: | |
4413 | * - blocked and we're holding off wakeups (pi->lock) | |
4414 | * - woken, and we're holding off enqueue (rq->lock) | |
4415 | * - queued, and we're holding off schedule (rq->lock) | |
4416 | * - running, and we're holding off de-schedule (rq->lock) | |
4417 | * | |
4418 | * The called function (@func) can use: task_curr(), p->on_rq and | |
4419 | * p->__state to differentiate between these states. | |
4420 | */ | |
4421 | ret = func(p, arg); | |
4422 | ||
4423 | if (rq) | |
2beaf328 | 4424 | rq_unlock(rq, &rf); |
f6ac18fa | 4425 | |
1b7af295 | 4426 | raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags); |
2beaf328 PM |
4427 | return ret; |
4428 | } | |
4429 | ||
e386b672 PM |
4430 | /** |
4431 | * cpu_curr_snapshot - Return a snapshot of the currently running task | |
4432 | * @cpu: The CPU on which to snapshot the task. | |
4433 | * | |
4434 | * Returns the task_struct pointer of the task "currently" running on | |
4435 | * the specified CPU. If the same task is running on that CPU throughout, | |
4436 | * the return value will be a pointer to that task's task_struct structure. | |
4437 | * If the CPU did any context switches even vaguely concurrently with the | |
4438 | * execution of this function, the return value will be a pointer to the | |
4439 | * task_struct structure of a randomly chosen task that was running on | |
4440 | * that CPU somewhere around the time that this function was executing. | |
4441 | * | |
4442 | * If the specified CPU was offline, the return value is whatever it | |
4443 | * is, perhaps a pointer to the task_struct structure of that CPU's idle | |
4444 | * task, but there is no guarantee. Callers wishing a useful return | |
4445 | * value must take some action to ensure that the specified CPU remains | |
4446 | * online throughout. | |
4447 | * | |
4448 | * This function executes full memory barriers before and after fetching | |
4449 | * the pointer, which permits the caller to confine this function's fetch | |
4450 | * with respect to the caller's accesses to other shared variables. | |
4451 | */ | |
4452 | struct task_struct *cpu_curr_snapshot(int cpu) | |
4453 | { | |
4454 | struct task_struct *t; | |
4455 | ||
4456 | smp_mb(); /* Pairing determined by caller's synchronization design. */ | |
4457 | t = rcu_dereference(cpu_curr(cpu)); | |
4458 | smp_mb(); /* Pairing determined by caller's synchronization design. */ | |
4459 | return t; | |
4460 | } | |
4461 | ||
50fa610a DH |
4462 | /** |
4463 | * wake_up_process - Wake up a specific process | |
4464 | * @p: The process to be woken up. | |
4465 | * | |
4466 | * Attempt to wake up the nominated process and move it to the set of runnable | |
e69f6186 YB |
4467 | * processes. |
4468 | * | |
4469 | * Return: 1 if the process was woken up, 0 if it was already running. | |
50fa610a | 4470 | * |
7696f991 | 4471 | * This function executes a full memory barrier before accessing the task state. |
50fa610a | 4472 | */ |
7ad5b3a5 | 4473 | int wake_up_process(struct task_struct *p) |
1da177e4 | 4474 | { |
9067ac85 | 4475 | return try_to_wake_up(p, TASK_NORMAL, 0); |
1da177e4 | 4476 | } |
1da177e4 LT |
4477 | EXPORT_SYMBOL(wake_up_process); |
4478 | ||
7ad5b3a5 | 4479 | int wake_up_state(struct task_struct *p, unsigned int state) |
1da177e4 LT |
4480 | { |
4481 | return try_to_wake_up(p, state, 0); | |
4482 | } | |
4483 | ||
1da177e4 LT |
4484 | /* |
4485 | * Perform scheduler related setup for a newly forked process p. | |
4486 | * p is forked by current. | |
dd41f596 IM |
4487 | * |
4488 | * __sched_fork() is basic setup used by init_idle() too: | |
4489 | */ | |
5e1576ed | 4490 | static void __sched_fork(unsigned long clone_flags, struct task_struct *p) |
dd41f596 | 4491 | { |
fd2f4419 PZ |
4492 | p->on_rq = 0; |
4493 | ||
4494 | p->se.on_rq = 0; | |
dd41f596 IM |
4495 | p->se.exec_start = 0; |
4496 | p->se.sum_exec_runtime = 0; | |
f6cf891c | 4497 | p->se.prev_sum_exec_runtime = 0; |
6c594c21 | 4498 | p->se.nr_migrations = 0; |
da7a735e | 4499 | p->se.vruntime = 0; |
86bfbb7c | 4500 | p->se.vlag = 0; |
e4ec3318 | 4501 | p->se.slice = sysctl_sched_base_slice; |
fd2f4419 | 4502 | INIT_LIST_HEAD(&p->se.group_node); |
6cfb0d5d | 4503 | |
ad936d86 BP |
4504 | #ifdef CONFIG_FAIR_GROUP_SCHED |
4505 | p->se.cfs_rq = NULL; | |
4506 | #endif | |
4507 | ||
6cfb0d5d | 4508 | #ifdef CONFIG_SCHEDSTATS |
cb251765 | 4509 | /* Even if schedstat is disabled, there should not be garbage */ |
ceeadb83 | 4510 | memset(&p->stats, 0, sizeof(p->stats)); |
6cfb0d5d | 4511 | #endif |
476d139c | 4512 | |
aab03e05 | 4513 | RB_CLEAR_NODE(&p->dl.rb_node); |
40767b0d | 4514 | init_dl_task_timer(&p->dl); |
209a0cbd | 4515 | init_dl_inactive_task_timer(&p->dl); |
a5e7be3b | 4516 | __dl_clear_params(p); |
aab03e05 | 4517 | |
fa717060 | 4518 | INIT_LIST_HEAD(&p->rt.run_list); |
ff77e468 PZ |
4519 | p->rt.timeout = 0; |
4520 | p->rt.time_slice = sched_rr_timeslice; | |
4521 | p->rt.on_rq = 0; | |
4522 | p->rt.on_list = 0; | |
476d139c | 4523 | |
e107be36 AK |
4524 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
4525 | INIT_HLIST_HEAD(&p->preempt_notifiers); | |
4526 | #endif | |
cbee9f88 | 4527 | |
5e1f0f09 MG |
4528 | #ifdef CONFIG_COMPACTION |
4529 | p->capture_control = NULL; | |
4530 | #endif | |
13784475 | 4531 | init_numa_balancing(clone_flags, p); |
a1488664 | 4532 | #ifdef CONFIG_SMP |
8c4890d1 | 4533 | p->wake_entry.u_flags = CSD_TYPE_TTWU; |
6d337eab | 4534 | p->migration_pending = NULL; |
a1488664 | 4535 | #endif |
223baf9d | 4536 | init_sched_mm_cid(p); |
dd41f596 IM |
4537 | } |
4538 | ||
2a595721 SD |
4539 | DEFINE_STATIC_KEY_FALSE(sched_numa_balancing); |
4540 | ||
1a687c2e | 4541 | #ifdef CONFIG_NUMA_BALANCING |
c3b9bc5b | 4542 | |
c574bbe9 HY |
4543 | int sysctl_numa_balancing_mode; |
4544 | ||
4545 | static void __set_numabalancing_state(bool enabled) | |
1a687c2e MG |
4546 | { |
4547 | if (enabled) | |
2a595721 | 4548 | static_branch_enable(&sched_numa_balancing); |
1a687c2e | 4549 | else |
2a595721 | 4550 | static_branch_disable(&sched_numa_balancing); |
1a687c2e | 4551 | } |
54a43d54 | 4552 | |
c574bbe9 HY |
4553 | void set_numabalancing_state(bool enabled) |
4554 | { | |
4555 | if (enabled) | |
4556 | sysctl_numa_balancing_mode = NUMA_BALANCING_NORMAL; | |
4557 | else | |
4558 | sysctl_numa_balancing_mode = NUMA_BALANCING_DISABLED; | |
4559 | __set_numabalancing_state(enabled); | |
4560 | } | |
4561 | ||
54a43d54 | 4562 | #ifdef CONFIG_PROC_SYSCTL |
c959924b HY |
4563 | static void reset_memory_tiering(void) |
4564 | { | |
4565 | struct pglist_data *pgdat; | |
4566 | ||
4567 | for_each_online_pgdat(pgdat) { | |
4568 | pgdat->nbp_threshold = 0; | |
4569 | pgdat->nbp_th_nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE); | |
4570 | pgdat->nbp_th_start = jiffies_to_msecs(jiffies); | |
4571 | } | |
4572 | } | |
4573 | ||
0dff89c4 | 4574 | static int sysctl_numa_balancing(struct ctl_table *table, int write, |
32927393 | 4575 | void *buffer, size_t *lenp, loff_t *ppos) |
54a43d54 AK |
4576 | { |
4577 | struct ctl_table t; | |
4578 | int err; | |
c574bbe9 | 4579 | int state = sysctl_numa_balancing_mode; |
54a43d54 AK |
4580 | |
4581 | if (write && !capable(CAP_SYS_ADMIN)) | |
4582 | return -EPERM; | |
4583 | ||
4584 | t = *table; | |
4585 | t.data = &state; | |
4586 | err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); | |
4587 | if (err < 0) | |
4588 | return err; | |
c574bbe9 | 4589 | if (write) { |
c959924b HY |
4590 | if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) && |
4591 | (state & NUMA_BALANCING_MEMORY_TIERING)) | |
4592 | reset_memory_tiering(); | |
c574bbe9 HY |
4593 | sysctl_numa_balancing_mode = state; |
4594 | __set_numabalancing_state(state); | |
4595 | } | |
54a43d54 AK |
4596 | return err; |
4597 | } | |
4598 | #endif | |
4599 | #endif | |
dd41f596 | 4600 | |
4698f88c JP |
4601 | #ifdef CONFIG_SCHEDSTATS |
4602 | ||
cb251765 MG |
4603 | DEFINE_STATIC_KEY_FALSE(sched_schedstats); |
4604 | ||
cb251765 MG |
4605 | static void set_schedstats(bool enabled) |
4606 | { | |
4607 | if (enabled) | |
4608 | static_branch_enable(&sched_schedstats); | |
4609 | else | |
4610 | static_branch_disable(&sched_schedstats); | |
4611 | } | |
4612 | ||
4613 | void force_schedstat_enabled(void) | |
4614 | { | |
4615 | if (!schedstat_enabled()) { | |
4616 | pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n"); | |
4617 | static_branch_enable(&sched_schedstats); | |
4618 | } | |
4619 | } | |
4620 | ||
4621 | static int __init setup_schedstats(char *str) | |
4622 | { | |
4623 | int ret = 0; | |
4624 | if (!str) | |
4625 | goto out; | |
4626 | ||
4627 | if (!strcmp(str, "enable")) { | |
1faa491a | 4628 | set_schedstats(true); |
cb251765 MG |
4629 | ret = 1; |
4630 | } else if (!strcmp(str, "disable")) { | |
1faa491a | 4631 | set_schedstats(false); |
cb251765 MG |
4632 | ret = 1; |
4633 | } | |
4634 | out: | |
4635 | if (!ret) | |
4636 | pr_warn("Unable to parse schedstats=\n"); | |
4637 | ||
4638 | return ret; | |
4639 | } | |
4640 | __setup("schedstats=", setup_schedstats); | |
4641 | ||
4642 | #ifdef CONFIG_PROC_SYSCTL | |
f5ef06d5 | 4643 | static int sysctl_schedstats(struct ctl_table *table, int write, void *buffer, |
32927393 | 4644 | size_t *lenp, loff_t *ppos) |
cb251765 MG |
4645 | { |
4646 | struct ctl_table t; | |
4647 | int err; | |
4648 | int state = static_branch_likely(&sched_schedstats); | |
4649 | ||
4650 | if (write && !capable(CAP_SYS_ADMIN)) | |
4651 | return -EPERM; | |
4652 | ||
4653 | t = *table; | |
4654 | t.data = &state; | |
4655 | err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); | |
4656 | if (err < 0) | |
4657 | return err; | |
4658 | if (write) | |
4659 | set_schedstats(state); | |
4660 | return err; | |
4661 | } | |
4698f88c | 4662 | #endif /* CONFIG_PROC_SYSCTL */ |
4698f88c | 4663 | #endif /* CONFIG_SCHEDSTATS */ |
dd41f596 | 4664 | |
3267e015 ZN |
4665 | #ifdef CONFIG_SYSCTL |
4666 | static struct ctl_table sched_core_sysctls[] = { | |
4667 | #ifdef CONFIG_SCHEDSTATS | |
f5ef06d5 ZN |
4668 | { |
4669 | .procname = "sched_schedstats", | |
4670 | .data = NULL, | |
4671 | .maxlen = sizeof(unsigned int), | |
4672 | .mode = 0644, | |
4673 | .proc_handler = sysctl_schedstats, | |
4674 | .extra1 = SYSCTL_ZERO, | |
4675 | .extra2 = SYSCTL_ONE, | |
4676 | }, | |
3267e015 ZN |
4677 | #endif /* CONFIG_SCHEDSTATS */ |
4678 | #ifdef CONFIG_UCLAMP_TASK | |
4679 | { | |
4680 | .procname = "sched_util_clamp_min", | |
4681 | .data = &sysctl_sched_uclamp_util_min, | |
4682 | .maxlen = sizeof(unsigned int), | |
4683 | .mode = 0644, | |
4684 | .proc_handler = sysctl_sched_uclamp_handler, | |
4685 | }, | |
4686 | { | |
4687 | .procname = "sched_util_clamp_max", | |
4688 | .data = &sysctl_sched_uclamp_util_max, | |
4689 | .maxlen = sizeof(unsigned int), | |
4690 | .mode = 0644, | |
4691 | .proc_handler = sysctl_sched_uclamp_handler, | |
4692 | }, | |
4693 | { | |
4694 | .procname = "sched_util_clamp_min_rt_default", | |
4695 | .data = &sysctl_sched_uclamp_util_min_rt_default, | |
4696 | .maxlen = sizeof(unsigned int), | |
4697 | .mode = 0644, | |
4698 | .proc_handler = sysctl_sched_uclamp_handler, | |
4699 | }, | |
4700 | #endif /* CONFIG_UCLAMP_TASK */ | |
0dff89c4 KW |
4701 | #ifdef CONFIG_NUMA_BALANCING |
4702 | { | |
4703 | .procname = "numa_balancing", | |
4704 | .data = NULL, /* filled in by handler */ | |
4705 | .maxlen = sizeof(unsigned int), | |
4706 | .mode = 0644, | |
4707 | .proc_handler = sysctl_numa_balancing, | |
4708 | .extra1 = SYSCTL_ZERO, | |
4709 | .extra2 = SYSCTL_FOUR, | |
4710 | }, | |
4711 | #endif /* CONFIG_NUMA_BALANCING */ | |
f5ef06d5 ZN |
4712 | {} |
4713 | }; | |
3267e015 | 4714 | static int __init sched_core_sysctl_init(void) |
f5ef06d5 | 4715 | { |
3267e015 | 4716 | register_sysctl_init("kernel", sched_core_sysctls); |
f5ef06d5 ZN |
4717 | return 0; |
4718 | } | |
3267e015 ZN |
4719 | late_initcall(sched_core_sysctl_init); |
4720 | #endif /* CONFIG_SYSCTL */ | |
dd41f596 IM |
4721 | |
4722 | /* | |
4723 | * fork()/clone()-time setup: | |
4724 | */ | |
aab03e05 | 4725 | int sched_fork(unsigned long clone_flags, struct task_struct *p) |
dd41f596 | 4726 | { |
5e1576ed | 4727 | __sched_fork(clone_flags, p); |
06b83b5f | 4728 | /* |
7dc603c9 | 4729 | * We mark the process as NEW here. This guarantees that |
06b83b5f PZ |
4730 | * nobody will actually run it, and a signal or other external |
4731 | * event cannot wake it up and insert it on the runqueue either. | |
4732 | */ | |
2f064a59 | 4733 | p->__state = TASK_NEW; |
dd41f596 | 4734 | |
c350a04e MG |
4735 | /* |
4736 | * Make sure we do not leak PI boosting priority to the child. | |
4737 | */ | |
4738 | p->prio = current->normal_prio; | |
4739 | ||
e8f14172 PB |
4740 | uclamp_fork(p); |
4741 | ||
b9dc29e7 MG |
4742 | /* |
4743 | * Revert to default priority/policy on fork if requested. | |
4744 | */ | |
4745 | if (unlikely(p->sched_reset_on_fork)) { | |
aab03e05 | 4746 | if (task_has_dl_policy(p) || task_has_rt_policy(p)) { |
b9dc29e7 | 4747 | p->policy = SCHED_NORMAL; |
6c697bdf | 4748 | p->static_prio = NICE_TO_PRIO(0); |
c350a04e MG |
4749 | p->rt_priority = 0; |
4750 | } else if (PRIO_TO_NICE(p->static_prio) < 0) | |
4751 | p->static_prio = NICE_TO_PRIO(0); | |
4752 | ||
f558c2b8 | 4753 | p->prio = p->normal_prio = p->static_prio; |
b1e82065 | 4754 | set_load_weight(p, false); |
6c697bdf | 4755 | |
b9dc29e7 MG |
4756 | /* |
4757 | * We don't need the reset flag anymore after the fork. It has | |
4758 | * fulfilled its duty: | |
4759 | */ | |
4760 | p->sched_reset_on_fork = 0; | |
4761 | } | |
ca94c442 | 4762 | |
af0fffd9 | 4763 | if (dl_prio(p->prio)) |
aab03e05 | 4764 | return -EAGAIN; |
af0fffd9 | 4765 | else if (rt_prio(p->prio)) |
aab03e05 | 4766 | p->sched_class = &rt_sched_class; |
af0fffd9 | 4767 | else |
2ddbf952 | 4768 | p->sched_class = &fair_sched_class; |
b29739f9 | 4769 | |
7dc603c9 | 4770 | init_entity_runnable_average(&p->se); |
cd29fe6f | 4771 | |
b1e82065 | 4772 | |
f6db8347 | 4773 | #ifdef CONFIG_SCHED_INFO |
dd41f596 | 4774 | if (likely(sched_info_on())) |
52f17b6c | 4775 | memset(&p->sched_info, 0, sizeof(p->sched_info)); |
1da177e4 | 4776 | #endif |
3ca7a440 PZ |
4777 | #if defined(CONFIG_SMP) |
4778 | p->on_cpu = 0; | |
4866cde0 | 4779 | #endif |
01028747 | 4780 | init_task_preempt_count(p); |
806c09a7 | 4781 | #ifdef CONFIG_SMP |
917b627d | 4782 | plist_node_init(&p->pushable_tasks, MAX_PRIO); |
1baca4ce | 4783 | RB_CLEAR_NODE(&p->pushable_dl_tasks); |
806c09a7 | 4784 | #endif |
aab03e05 | 4785 | return 0; |
1da177e4 LT |
4786 | } |
4787 | ||
b1e82065 | 4788 | void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs) |
13685c4a | 4789 | { |
4ef0c5c6 | 4790 | unsigned long flags; |
4ef0c5c6 | 4791 | |
b1e82065 PZ |
4792 | /* |
4793 | * Because we're not yet on the pid-hash, p->pi_lock isn't strictly | |
4794 | * required yet, but lockdep gets upset if rules are violated. | |
4795 | */ | |
4ef0c5c6 ZQ |
4796 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
4797 | #ifdef CONFIG_CGROUP_SCHED | |
b1e82065 PZ |
4798 | if (1) { |
4799 | struct task_group *tg; | |
4800 | tg = container_of(kargs->cset->subsys[cpu_cgrp_id], | |
4801 | struct task_group, css); | |
4802 | tg = autogroup_task_group(p, tg); | |
4803 | p->sched_task_group = tg; | |
4804 | } | |
4ef0c5c6 ZQ |
4805 | #endif |
4806 | rseq_migrate(p); | |
4807 | /* | |
4808 | * We're setting the CPU for the first time, we don't migrate, | |
4809 | * so use __set_task_cpu(). | |
4810 | */ | |
4811 | __set_task_cpu(p, smp_processor_id()); | |
4812 | if (p->sched_class->task_fork) | |
4813 | p->sched_class->task_fork(p); | |
4814 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); | |
b1e82065 | 4815 | } |
4ef0c5c6 | 4816 | |
b1e82065 PZ |
4817 | void sched_post_fork(struct task_struct *p) |
4818 | { | |
13685c4a QY |
4819 | uclamp_post_fork(p); |
4820 | } | |
4821 | ||
332ac17e DF |
4822 | unsigned long to_ratio(u64 period, u64 runtime) |
4823 | { | |
4824 | if (runtime == RUNTIME_INF) | |
c52f14d3 | 4825 | return BW_UNIT; |
332ac17e DF |
4826 | |
4827 | /* | |
4828 | * Doing this here saves a lot of checks in all | |
4829 | * the calling paths, and returning zero seems | |
4830 | * safe for them anyway. | |
4831 | */ | |
4832 | if (period == 0) | |
4833 | return 0; | |
4834 | ||
c52f14d3 | 4835 | return div64_u64(runtime << BW_SHIFT, period); |
332ac17e DF |
4836 | } |
4837 | ||
1da177e4 LT |
4838 | /* |
4839 | * wake_up_new_task - wake up a newly created task for the first time. | |
4840 | * | |
4841 | * This function will do some initial scheduler statistics housekeeping | |
4842 | * that must be done for every newly created context, then puts the task | |
4843 | * on the runqueue and wakes it. | |
4844 | */ | |
3e51e3ed | 4845 | void wake_up_new_task(struct task_struct *p) |
1da177e4 | 4846 | { |
eb580751 | 4847 | struct rq_flags rf; |
dd41f596 | 4848 | struct rq *rq; |
fabf318e | 4849 | |
eb580751 | 4850 | raw_spin_lock_irqsave(&p->pi_lock, rf.flags); |
2f064a59 | 4851 | WRITE_ONCE(p->__state, TASK_RUNNING); |
fabf318e PZ |
4852 | #ifdef CONFIG_SMP |
4853 | /* | |
4854 | * Fork balancing, do it here and not earlier because: | |
3bd37062 | 4855 | * - cpus_ptr can change in the fork path |
d1ccc66d | 4856 | * - any previously selected CPU might disappear through hotplug |
e210bffd PZ |
4857 | * |
4858 | * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq, | |
4859 | * as we're not fully set-up yet. | |
fabf318e | 4860 | */ |
32e839dd | 4861 | p->recent_used_cpu = task_cpu(p); |
ce3614da | 4862 | rseq_migrate(p); |
3aef1551 | 4863 | __set_task_cpu(p, select_task_rq(p, task_cpu(p), WF_FORK)); |
0017d735 | 4864 | #endif |
b7fa30c9 | 4865 | rq = __task_rq_lock(p, &rf); |
4126bad6 | 4866 | update_rq_clock(rq); |
d0fe0b9c | 4867 | post_init_entity_util_avg(p); |
0017d735 | 4868 | |
7a57f32a | 4869 | activate_task(rq, p, ENQUEUE_NOCLOCK); |
fbd705a0 | 4870 | trace_sched_wakeup_new(p); |
e23edc86 | 4871 | wakeup_preempt(rq, p, WF_FORK); |
9a897c5a | 4872 | #ifdef CONFIG_SMP |
0aaafaab PZ |
4873 | if (p->sched_class->task_woken) { |
4874 | /* | |
b19a888c | 4875 | * Nothing relies on rq->lock after this, so it's fine to |
0aaafaab PZ |
4876 | * drop it. |
4877 | */ | |
d8ac8971 | 4878 | rq_unpin_lock(rq, &rf); |
efbbd05a | 4879 | p->sched_class->task_woken(rq, p); |
d8ac8971 | 4880 | rq_repin_lock(rq, &rf); |
0aaafaab | 4881 | } |
9a897c5a | 4882 | #endif |
eb580751 | 4883 | task_rq_unlock(rq, p, &rf); |
1da177e4 LT |
4884 | } |
4885 | ||
e107be36 AK |
4886 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
4887 | ||
b7203428 | 4888 | static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key); |
1cde2930 | 4889 | |
2ecd9d29 PZ |
4890 | void preempt_notifier_inc(void) |
4891 | { | |
b7203428 | 4892 | static_branch_inc(&preempt_notifier_key); |
2ecd9d29 PZ |
4893 | } |
4894 | EXPORT_SYMBOL_GPL(preempt_notifier_inc); | |
4895 | ||
4896 | void preempt_notifier_dec(void) | |
4897 | { | |
b7203428 | 4898 | static_branch_dec(&preempt_notifier_key); |
2ecd9d29 PZ |
4899 | } |
4900 | EXPORT_SYMBOL_GPL(preempt_notifier_dec); | |
4901 | ||
e107be36 | 4902 | /** |
80dd99b3 | 4903 | * preempt_notifier_register - tell me when current is being preempted & rescheduled |
421cee29 | 4904 | * @notifier: notifier struct to register |
e107be36 AK |
4905 | */ |
4906 | void preempt_notifier_register(struct preempt_notifier *notifier) | |
4907 | { | |
b7203428 | 4908 | if (!static_branch_unlikely(&preempt_notifier_key)) |
2ecd9d29 PZ |
4909 | WARN(1, "registering preempt_notifier while notifiers disabled\n"); |
4910 | ||
e107be36 AK |
4911 | hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); |
4912 | } | |
4913 | EXPORT_SYMBOL_GPL(preempt_notifier_register); | |
4914 | ||
4915 | /** | |
4916 | * preempt_notifier_unregister - no longer interested in preemption notifications | |
421cee29 | 4917 | * @notifier: notifier struct to unregister |
e107be36 | 4918 | * |
d84525a8 | 4919 | * This is *not* safe to call from within a preemption notifier. |
e107be36 AK |
4920 | */ |
4921 | void preempt_notifier_unregister(struct preempt_notifier *notifier) | |
4922 | { | |
4923 | hlist_del(¬ifier->link); | |
4924 | } | |
4925 | EXPORT_SYMBOL_GPL(preempt_notifier_unregister); | |
4926 | ||
1cde2930 | 4927 | static void __fire_sched_in_preempt_notifiers(struct task_struct *curr) |
e107be36 AK |
4928 | { |
4929 | struct preempt_notifier *notifier; | |
e107be36 | 4930 | |
b67bfe0d | 4931 | hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) |
e107be36 AK |
4932 | notifier->ops->sched_in(notifier, raw_smp_processor_id()); |
4933 | } | |
4934 | ||
1cde2930 PZ |
4935 | static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) |
4936 | { | |
b7203428 | 4937 | if (static_branch_unlikely(&preempt_notifier_key)) |
1cde2930 PZ |
4938 | __fire_sched_in_preempt_notifiers(curr); |
4939 | } | |
4940 | ||
e107be36 | 4941 | static void |
1cde2930 PZ |
4942 | __fire_sched_out_preempt_notifiers(struct task_struct *curr, |
4943 | struct task_struct *next) | |
e107be36 AK |
4944 | { |
4945 | struct preempt_notifier *notifier; | |
e107be36 | 4946 | |
b67bfe0d | 4947 | hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) |
e107be36 AK |
4948 | notifier->ops->sched_out(notifier, next); |
4949 | } | |
4950 | ||
1cde2930 PZ |
4951 | static __always_inline void |
4952 | fire_sched_out_preempt_notifiers(struct task_struct *curr, | |
4953 | struct task_struct *next) | |
4954 | { | |
b7203428 | 4955 | if (static_branch_unlikely(&preempt_notifier_key)) |
1cde2930 PZ |
4956 | __fire_sched_out_preempt_notifiers(curr, next); |
4957 | } | |
4958 | ||
6d6bc0ad | 4959 | #else /* !CONFIG_PREEMPT_NOTIFIERS */ |
e107be36 | 4960 | |
1cde2930 | 4961 | static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) |
e107be36 AK |
4962 | { |
4963 | } | |
4964 | ||
1cde2930 | 4965 | static inline void |
e107be36 AK |
4966 | fire_sched_out_preempt_notifiers(struct task_struct *curr, |
4967 | struct task_struct *next) | |
4968 | { | |
4969 | } | |
4970 | ||
6d6bc0ad | 4971 | #endif /* CONFIG_PREEMPT_NOTIFIERS */ |
e107be36 | 4972 | |
31cb1bc0 | 4973 | static inline void prepare_task(struct task_struct *next) |
4974 | { | |
4975 | #ifdef CONFIG_SMP | |
4976 | /* | |
4977 | * Claim the task as running, we do this before switching to it | |
4978 | * such that any running task will have this set. | |
58877d34 | 4979 | * |
f3dd3f67 TD |
4980 | * See the smp_load_acquire(&p->on_cpu) case in ttwu() and |
4981 | * its ordering comment. | |
31cb1bc0 | 4982 | */ |
58877d34 | 4983 | WRITE_ONCE(next->on_cpu, 1); |
31cb1bc0 | 4984 | #endif |
4985 | } | |
4986 | ||
4987 | static inline void finish_task(struct task_struct *prev) | |
4988 | { | |
4989 | #ifdef CONFIG_SMP | |
4990 | /* | |
58877d34 PZ |
4991 | * This must be the very last reference to @prev from this CPU. After |
4992 | * p->on_cpu is cleared, the task can be moved to a different CPU. We | |
4993 | * must ensure this doesn't happen until the switch is completely | |
31cb1bc0 | 4994 | * finished. |
4995 | * | |
4996 | * In particular, the load of prev->state in finish_task_switch() must | |
4997 | * happen before this. | |
4998 | * | |
4999 | * Pairs with the smp_cond_load_acquire() in try_to_wake_up(). | |
5000 | */ | |
5001 | smp_store_release(&prev->on_cpu, 0); | |
5002 | #endif | |
5003 | } | |
5004 | ||
565790d2 PZ |
5005 | #ifdef CONFIG_SMP |
5006 | ||
8e5bad7d | 5007 | static void do_balance_callbacks(struct rq *rq, struct balance_callback *head) |
565790d2 PZ |
5008 | { |
5009 | void (*func)(struct rq *rq); | |
8e5bad7d | 5010 | struct balance_callback *next; |
565790d2 | 5011 | |
5cb9eaa3 | 5012 | lockdep_assert_rq_held(rq); |
565790d2 PZ |
5013 | |
5014 | while (head) { | |
5015 | func = (void (*)(struct rq *))head->func; | |
5016 | next = head->next; | |
5017 | head->next = NULL; | |
5018 | head = next; | |
5019 | ||
5020 | func(rq); | |
5021 | } | |
5022 | } | |
5023 | ||
ae792702 PZ |
5024 | static void balance_push(struct rq *rq); |
5025 | ||
04193d59 PZ |
5026 | /* |
5027 | * balance_push_callback is a right abuse of the callback interface and plays | |
5028 | * by significantly different rules. | |
5029 | * | |
5030 | * Where the normal balance_callback's purpose is to be ran in the same context | |
5031 | * that queued it (only later, when it's safe to drop rq->lock again), | |
5032 | * balance_push_callback is specifically targeted at __schedule(). | |
5033 | * | |
5034 | * This abuse is tolerated because it places all the unlikely/odd cases behind | |
5035 | * a single test, namely: rq->balance_callback == NULL. | |
5036 | */ | |
8e5bad7d | 5037 | struct balance_callback balance_push_callback = { |
ae792702 | 5038 | .next = NULL, |
8e5bad7d | 5039 | .func = balance_push, |
ae792702 PZ |
5040 | }; |
5041 | ||
8e5bad7d | 5042 | static inline struct balance_callback * |
04193d59 | 5043 | __splice_balance_callbacks(struct rq *rq, bool split) |
565790d2 | 5044 | { |
8e5bad7d | 5045 | struct balance_callback *head = rq->balance_callback; |
565790d2 | 5046 | |
04193d59 PZ |
5047 | if (likely(!head)) |
5048 | return NULL; | |
5049 | ||
5cb9eaa3 | 5050 | lockdep_assert_rq_held(rq); |
04193d59 PZ |
5051 | /* |
5052 | * Must not take balance_push_callback off the list when | |
5053 | * splice_balance_callbacks() and balance_callbacks() are not | |
5054 | * in the same rq->lock section. | |
5055 | * | |
5056 | * In that case it would be possible for __schedule() to interleave | |
5057 | * and observe the list empty. | |
5058 | */ | |
5059 | if (split && head == &balance_push_callback) | |
5060 | head = NULL; | |
5061 | else | |
565790d2 PZ |
5062 | rq->balance_callback = NULL; |
5063 | ||
5064 | return head; | |
5065 | } | |
5066 | ||
8e5bad7d | 5067 | static inline struct balance_callback *splice_balance_callbacks(struct rq *rq) |
04193d59 PZ |
5068 | { |
5069 | return __splice_balance_callbacks(rq, true); | |
5070 | } | |
5071 | ||
565790d2 PZ |
5072 | static void __balance_callbacks(struct rq *rq) |
5073 | { | |
04193d59 | 5074 | do_balance_callbacks(rq, __splice_balance_callbacks(rq, false)); |
565790d2 PZ |
5075 | } |
5076 | ||
8e5bad7d | 5077 | static inline void balance_callbacks(struct rq *rq, struct balance_callback *head) |
565790d2 PZ |
5078 | { |
5079 | unsigned long flags; | |
5080 | ||
5081 | if (unlikely(head)) { | |
5cb9eaa3 | 5082 | raw_spin_rq_lock_irqsave(rq, flags); |
565790d2 | 5083 | do_balance_callbacks(rq, head); |
5cb9eaa3 | 5084 | raw_spin_rq_unlock_irqrestore(rq, flags); |
565790d2 PZ |
5085 | } |
5086 | } | |
5087 | ||
5088 | #else | |
5089 | ||
5090 | static inline void __balance_callbacks(struct rq *rq) | |
5091 | { | |
5092 | } | |
5093 | ||
8e5bad7d | 5094 | static inline struct balance_callback *splice_balance_callbacks(struct rq *rq) |
565790d2 PZ |
5095 | { |
5096 | return NULL; | |
5097 | } | |
5098 | ||
8e5bad7d | 5099 | static inline void balance_callbacks(struct rq *rq, struct balance_callback *head) |
565790d2 PZ |
5100 | { |
5101 | } | |
5102 | ||
5103 | #endif | |
5104 | ||
269d5992 PZ |
5105 | static inline void |
5106 | prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf) | |
31cb1bc0 | 5107 | { |
269d5992 PZ |
5108 | /* |
5109 | * Since the runqueue lock will be released by the next | |
5110 | * task (which is an invalid locking op but in the case | |
5111 | * of the scheduler it's an obvious special-case), so we | |
5112 | * do an early lockdep release here: | |
5113 | */ | |
5114 | rq_unpin_lock(rq, rf); | |
9ef7e7e3 | 5115 | spin_release(&__rq_lockp(rq)->dep_map, _THIS_IP_); |
31cb1bc0 | 5116 | #ifdef CONFIG_DEBUG_SPINLOCK |
5117 | /* this is a valid case when another task releases the spinlock */ | |
5cb9eaa3 | 5118 | rq_lockp(rq)->owner = next; |
31cb1bc0 | 5119 | #endif |
269d5992 PZ |
5120 | } |
5121 | ||
5122 | static inline void finish_lock_switch(struct rq *rq) | |
5123 | { | |
31cb1bc0 | 5124 | /* |
5125 | * If we are tracking spinlock dependencies then we have to | |
5126 | * fix up the runqueue lock - which gets 'carried over' from | |
5127 | * prev into current: | |
5128 | */ | |
9ef7e7e3 | 5129 | spin_acquire(&__rq_lockp(rq)->dep_map, 0, 0, _THIS_IP_); |
ae792702 | 5130 | __balance_callbacks(rq); |
5cb9eaa3 | 5131 | raw_spin_rq_unlock_irq(rq); |
31cb1bc0 | 5132 | } |
5133 | ||
325ea10c IM |
5134 | /* |
5135 | * NOP if the arch has not defined these: | |
5136 | */ | |
5137 | ||
5138 | #ifndef prepare_arch_switch | |
5139 | # define prepare_arch_switch(next) do { } while (0) | |
5140 | #endif | |
5141 | ||
5142 | #ifndef finish_arch_post_lock_switch | |
5143 | # define finish_arch_post_lock_switch() do { } while (0) | |
5144 | #endif | |
5145 | ||
5fbda3ec TG |
5146 | static inline void kmap_local_sched_out(void) |
5147 | { | |
5148 | #ifdef CONFIG_KMAP_LOCAL | |
5149 | if (unlikely(current->kmap_ctrl.idx)) | |
5150 | __kmap_local_sched_out(); | |
5151 | #endif | |
5152 | } | |
5153 | ||
5154 | static inline void kmap_local_sched_in(void) | |
5155 | { | |
5156 | #ifdef CONFIG_KMAP_LOCAL | |
5157 | if (unlikely(current->kmap_ctrl.idx)) | |
5158 | __kmap_local_sched_in(); | |
5159 | #endif | |
5160 | } | |
5161 | ||
4866cde0 NP |
5162 | /** |
5163 | * prepare_task_switch - prepare to switch tasks | |
5164 | * @rq: the runqueue preparing to switch | |
421cee29 | 5165 | * @prev: the current task that is being switched out |
4866cde0 NP |
5166 | * @next: the task we are going to switch to. |
5167 | * | |
5168 | * This is called with the rq lock held and interrupts off. It must | |
5169 | * be paired with a subsequent finish_task_switch after the context | |
5170 | * switch. | |
5171 | * | |
5172 | * prepare_task_switch sets up locking and calls architecture specific | |
5173 | * hooks. | |
5174 | */ | |
e107be36 AK |
5175 | static inline void |
5176 | prepare_task_switch(struct rq *rq, struct task_struct *prev, | |
5177 | struct task_struct *next) | |
4866cde0 | 5178 | { |
0ed557aa | 5179 | kcov_prepare_switch(prev); |
43148951 | 5180 | sched_info_switch(rq, prev, next); |
fe4b04fa | 5181 | perf_event_task_sched_out(prev, next); |
d7822b1e | 5182 | rseq_preempt(prev); |
e107be36 | 5183 | fire_sched_out_preempt_notifiers(prev, next); |
5fbda3ec | 5184 | kmap_local_sched_out(); |
31cb1bc0 | 5185 | prepare_task(next); |
4866cde0 NP |
5186 | prepare_arch_switch(next); |
5187 | } | |
5188 | ||
1da177e4 LT |
5189 | /** |
5190 | * finish_task_switch - clean up after a task-switch | |
5191 | * @prev: the thread we just switched away from. | |
5192 | * | |
4866cde0 NP |
5193 | * finish_task_switch must be called after the context switch, paired |
5194 | * with a prepare_task_switch call before the context switch. | |
5195 | * finish_task_switch will reconcile locking set up by prepare_task_switch, | |
5196 | * and do any other architecture-specific cleanup actions. | |
1da177e4 LT |
5197 | * |
5198 | * Note that we may have delayed dropping an mm in context_switch(). If | |
41a2d6cf | 5199 | * so, we finish that here outside of the runqueue lock. (Doing it |
1da177e4 LT |
5200 | * with the lock held can cause deadlocks; see schedule() for |
5201 | * details.) | |
dfa50b60 ON |
5202 | * |
5203 | * The context switch have flipped the stack from under us and restored the | |
5204 | * local variables which were saved when this task called schedule() in the | |
5205 | * past. prev == current is still correct but we need to recalculate this_rq | |
5206 | * because prev may have moved to another CPU. | |
1da177e4 | 5207 | */ |
dfa50b60 | 5208 | static struct rq *finish_task_switch(struct task_struct *prev) |
1da177e4 LT |
5209 | __releases(rq->lock) |
5210 | { | |
dfa50b60 | 5211 | struct rq *rq = this_rq(); |
1da177e4 | 5212 | struct mm_struct *mm = rq->prev_mm; |
fa2c3254 | 5213 | unsigned int prev_state; |
1da177e4 | 5214 | |
609ca066 PZ |
5215 | /* |
5216 | * The previous task will have left us with a preempt_count of 2 | |
5217 | * because it left us after: | |
5218 | * | |
5219 | * schedule() | |
5220 | * preempt_disable(); // 1 | |
5221 | * __schedule() | |
5222 | * raw_spin_lock_irq(&rq->lock) // 2 | |
5223 | * | |
5224 | * Also, see FORK_PREEMPT_COUNT. | |
5225 | */ | |
e2bf1c4b PZ |
5226 | if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET, |
5227 | "corrupted preempt_count: %s/%d/0x%x\n", | |
5228 | current->comm, current->pid, preempt_count())) | |
5229 | preempt_count_set(FORK_PREEMPT_COUNT); | |
609ca066 | 5230 | |
1da177e4 LT |
5231 | rq->prev_mm = NULL; |
5232 | ||
5233 | /* | |
5234 | * A task struct has one reference for the use as "current". | |
c394cc9f | 5235 | * If a task dies, then it sets TASK_DEAD in tsk->state and calls |
55a101f8 ON |
5236 | * schedule one last time. The schedule call will never return, and |
5237 | * the scheduled task must drop that reference. | |
95913d97 PZ |
5238 | * |
5239 | * We must observe prev->state before clearing prev->on_cpu (in | |
31cb1bc0 | 5240 | * finish_task), otherwise a concurrent wakeup can get prev |
95913d97 PZ |
5241 | * running on another CPU and we could rave with its RUNNING -> DEAD |
5242 | * transition, resulting in a double drop. | |
1da177e4 | 5243 | */ |
2f064a59 | 5244 | prev_state = READ_ONCE(prev->__state); |
bf9fae9f | 5245 | vtime_task_switch(prev); |
a8d757ef | 5246 | perf_event_task_sched_in(prev, current); |
31cb1bc0 | 5247 | finish_task(prev); |
0fdcccfa | 5248 | tick_nohz_task_switch(); |
31cb1bc0 | 5249 | finish_lock_switch(rq); |
01f23e16 | 5250 | finish_arch_post_lock_switch(); |
0ed557aa | 5251 | kcov_finish_switch(current); |
5fbda3ec TG |
5252 | /* |
5253 | * kmap_local_sched_out() is invoked with rq::lock held and | |
5254 | * interrupts disabled. There is no requirement for that, but the | |
5255 | * sched out code does not have an interrupt enabled section. | |
5256 | * Restoring the maps on sched in does not require interrupts being | |
5257 | * disabled either. | |
5258 | */ | |
5259 | kmap_local_sched_in(); | |
e8fa1362 | 5260 | |
e107be36 | 5261 | fire_sched_in_preempt_notifiers(current); |
306e0604 | 5262 | /* |
70216e18 MD |
5263 | * When switching through a kernel thread, the loop in |
5264 | * membarrier_{private,global}_expedited() may have observed that | |
5265 | * kernel thread and not issued an IPI. It is therefore possible to | |
5266 | * schedule between user->kernel->user threads without passing though | |
5267 | * switch_mm(). Membarrier requires a barrier after storing to | |
5268 | * rq->curr, before returning to userspace, so provide them here: | |
5269 | * | |
5270 | * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly | |
aa464ba9 | 5271 | * provided by mmdrop_lazy_tlb(), |
70216e18 | 5272 | * - a sync_core for SYNC_CORE. |
306e0604 | 5273 | */ |
70216e18 MD |
5274 | if (mm) { |
5275 | membarrier_mm_sync_core_before_usermode(mm); | |
aa464ba9 | 5276 | mmdrop_lazy_tlb_sched(mm); |
70216e18 | 5277 | } |
aa464ba9 | 5278 | |
1cef1150 PZ |
5279 | if (unlikely(prev_state == TASK_DEAD)) { |
5280 | if (prev->sched_class->task_dead) | |
5281 | prev->sched_class->task_dead(prev); | |
68f24b08 | 5282 | |
1cef1150 PZ |
5283 | /* Task is done with its stack. */ |
5284 | put_task_stack(prev); | |
5285 | ||
0ff7b2cf | 5286 | put_task_struct_rcu_user(prev); |
c6fd91f0 | 5287 | } |
99e5ada9 | 5288 | |
dfa50b60 | 5289 | return rq; |
1da177e4 LT |
5290 | } |
5291 | ||
5292 | /** | |
5293 | * schedule_tail - first thing a freshly forked thread must call. | |
5294 | * @prev: the thread we just switched away from. | |
5295 | */ | |
722a9f92 | 5296 | asmlinkage __visible void schedule_tail(struct task_struct *prev) |
1da177e4 LT |
5297 | __releases(rq->lock) |
5298 | { | |
609ca066 PZ |
5299 | /* |
5300 | * New tasks start with FORK_PREEMPT_COUNT, see there and | |
5301 | * finish_task_switch() for details. | |
5302 | * | |
5303 | * finish_task_switch() will drop rq->lock() and lower preempt_count | |
5304 | * and the preempt_enable() will end up enabling preemption (on | |
5305 | * PREEMPT_COUNT kernels). | |
5306 | */ | |
5307 | ||
13c2235b | 5308 | finish_task_switch(prev); |
1a43a14a | 5309 | preempt_enable(); |
70b97a7f | 5310 | |
1da177e4 | 5311 | if (current->set_child_tid) |
b488893a | 5312 | put_user(task_pid_vnr(current), current->set_child_tid); |
088fe47c EB |
5313 | |
5314 | calculate_sigpending(); | |
1da177e4 LT |
5315 | } |
5316 | ||
5317 | /* | |
dfa50b60 | 5318 | * context_switch - switch to the new MM and the new thread's register state. |
1da177e4 | 5319 | */ |
04936948 | 5320 | static __always_inline struct rq * |
70b97a7f | 5321 | context_switch(struct rq *rq, struct task_struct *prev, |
d8ac8971 | 5322 | struct task_struct *next, struct rq_flags *rf) |
1da177e4 | 5323 | { |
e107be36 | 5324 | prepare_task_switch(rq, prev, next); |
fe4b04fa | 5325 | |
9226d125 ZA |
5326 | /* |
5327 | * For paravirt, this is coupled with an exit in switch_to to | |
5328 | * combine the page table reload and the switch backend into | |
5329 | * one hypercall. | |
5330 | */ | |
224101ed | 5331 | arch_start_context_switch(prev); |
9226d125 | 5332 | |
306e0604 | 5333 | /* |
139d025c | 5334 | * kernel -> kernel lazy + transfer active |
aa464ba9 | 5335 | * user -> kernel lazy + mmgrab_lazy_tlb() active |
139d025c | 5336 | * |
aa464ba9 | 5337 | * kernel -> user switch + mmdrop_lazy_tlb() active |
139d025c | 5338 | * user -> user switch |
223baf9d MD |
5339 | * |
5340 | * switch_mm_cid() needs to be updated if the barriers provided | |
5341 | * by context_switch() are modified. | |
306e0604 | 5342 | */ |
139d025c PZ |
5343 | if (!next->mm) { // to kernel |
5344 | enter_lazy_tlb(prev->active_mm, next); | |
5345 | ||
5346 | next->active_mm = prev->active_mm; | |
5347 | if (prev->mm) // from user | |
aa464ba9 | 5348 | mmgrab_lazy_tlb(prev->active_mm); |
139d025c PZ |
5349 | else |
5350 | prev->active_mm = NULL; | |
5351 | } else { // to user | |
227a4aad | 5352 | membarrier_switch_mm(rq, prev->active_mm, next->mm); |
139d025c PZ |
5353 | /* |
5354 | * sys_membarrier() requires an smp_mb() between setting | |
227a4aad | 5355 | * rq->curr / membarrier_switch_mm() and returning to userspace. |
139d025c PZ |
5356 | * |
5357 | * The below provides this either through switch_mm(), or in | |
5358 | * case 'prev->active_mm == next->mm' through | |
5359 | * finish_task_switch()'s mmdrop(). | |
5360 | */ | |
139d025c | 5361 | switch_mm_irqs_off(prev->active_mm, next->mm, next); |
bd74fdae | 5362 | lru_gen_use_mm(next->mm); |
1da177e4 | 5363 | |
139d025c | 5364 | if (!prev->mm) { // from kernel |
aa464ba9 | 5365 | /* will mmdrop_lazy_tlb() in finish_task_switch(). */ |
139d025c PZ |
5366 | rq->prev_mm = prev->active_mm; |
5367 | prev->active_mm = NULL; | |
5368 | } | |
1da177e4 | 5369 | } |
92509b73 | 5370 | |
223baf9d MD |
5371 | /* switch_mm_cid() requires the memory barriers above. */ |
5372 | switch_mm_cid(rq, prev, next); | |
5373 | ||
269d5992 | 5374 | prepare_lock_switch(rq, next, rf); |
1da177e4 LT |
5375 | |
5376 | /* Here we just switch the register state and the stack. */ | |
5377 | switch_to(prev, next, prev); | |
dd41f596 | 5378 | barrier(); |
dfa50b60 ON |
5379 | |
5380 | return finish_task_switch(prev); | |
1da177e4 LT |
5381 | } |
5382 | ||
5383 | /* | |
1c3e8264 | 5384 | * nr_running and nr_context_switches: |
1da177e4 LT |
5385 | * |
5386 | * externally visible scheduler statistics: current number of runnable | |
1c3e8264 | 5387 | * threads, total number of context switches performed since bootup. |
1da177e4 | 5388 | */ |
01aee8fd | 5389 | unsigned int nr_running(void) |
1da177e4 | 5390 | { |
01aee8fd | 5391 | unsigned int i, sum = 0; |
1da177e4 LT |
5392 | |
5393 | for_each_online_cpu(i) | |
5394 | sum += cpu_rq(i)->nr_running; | |
5395 | ||
5396 | return sum; | |
f711f609 | 5397 | } |
1da177e4 | 5398 | |
2ee507c4 | 5399 | /* |
d1ccc66d | 5400 | * Check if only the current task is running on the CPU. |
00cc1633 DD |
5401 | * |
5402 | * Caution: this function does not check that the caller has disabled | |
5403 | * preemption, thus the result might have a time-of-check-to-time-of-use | |
5404 | * race. The caller is responsible to use it correctly, for example: | |
5405 | * | |
dfcb245e | 5406 | * - from a non-preemptible section (of course) |
00cc1633 DD |
5407 | * |
5408 | * - from a thread that is bound to a single CPU | |
5409 | * | |
5410 | * - in a loop with very short iterations (e.g. a polling loop) | |
2ee507c4 TC |
5411 | */ |
5412 | bool single_task_running(void) | |
5413 | { | |
00cc1633 | 5414 | return raw_rq()->nr_running == 1; |
2ee507c4 TC |
5415 | } |
5416 | EXPORT_SYMBOL(single_task_running); | |
5417 | ||
7c182722 ZL |
5418 | unsigned long long nr_context_switches_cpu(int cpu) |
5419 | { | |
5420 | return cpu_rq(cpu)->nr_switches; | |
5421 | } | |
5422 | ||
1da177e4 | 5423 | unsigned long long nr_context_switches(void) |
46cb4b7c | 5424 | { |
cc94abfc SR |
5425 | int i; |
5426 | unsigned long long sum = 0; | |
46cb4b7c | 5427 | |
0a945022 | 5428 | for_each_possible_cpu(i) |
1da177e4 | 5429 | sum += cpu_rq(i)->nr_switches; |
46cb4b7c | 5430 | |
1da177e4 LT |
5431 | return sum; |
5432 | } | |
483b4ee6 | 5433 | |
145d952a DL |
5434 | /* |
5435 | * Consumers of these two interfaces, like for example the cpuidle menu | |
5436 | * governor, are using nonsensical data. Preferring shallow idle state selection | |
5437 | * for a CPU that has IO-wait which might not even end up running the task when | |
5438 | * it does become runnable. | |
5439 | */ | |
5440 | ||
8fc2858e | 5441 | unsigned int nr_iowait_cpu(int cpu) |
145d952a DL |
5442 | { |
5443 | return atomic_read(&cpu_rq(cpu)->nr_iowait); | |
5444 | } | |
5445 | ||
e33a9bba | 5446 | /* |
b19a888c | 5447 | * IO-wait accounting, and how it's mostly bollocks (on SMP). |
e33a9bba TH |
5448 | * |
5449 | * The idea behind IO-wait account is to account the idle time that we could | |
5450 | * have spend running if it were not for IO. That is, if we were to improve the | |
5451 | * storage performance, we'd have a proportional reduction in IO-wait time. | |
5452 | * | |
5453 | * This all works nicely on UP, where, when a task blocks on IO, we account | |
5454 | * idle time as IO-wait, because if the storage were faster, it could've been | |
5455 | * running and we'd not be idle. | |
5456 | * | |
5457 | * This has been extended to SMP, by doing the same for each CPU. This however | |
5458 | * is broken. | |
5459 | * | |
5460 | * Imagine for instance the case where two tasks block on one CPU, only the one | |
5461 | * CPU will have IO-wait accounted, while the other has regular idle. Even | |
5462 | * though, if the storage were faster, both could've ran at the same time, | |
5463 | * utilising both CPUs. | |
5464 | * | |
5465 | * This means, that when looking globally, the current IO-wait accounting on | |
5466 | * SMP is a lower bound, by reason of under accounting. | |
5467 | * | |
5468 | * Worse, since the numbers are provided per CPU, they are sometimes | |
5469 | * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly | |
5470 | * associated with any one particular CPU, it can wake to another CPU than it | |
5471 | * blocked on. This means the per CPU IO-wait number is meaningless. | |
5472 | * | |
5473 | * Task CPU affinities can make all that even more 'interesting'. | |
5474 | */ | |
5475 | ||
97455168 | 5476 | unsigned int nr_iowait(void) |
1da177e4 | 5477 | { |
97455168 | 5478 | unsigned int i, sum = 0; |
483b4ee6 | 5479 | |
0a945022 | 5480 | for_each_possible_cpu(i) |
145d952a | 5481 | sum += nr_iowait_cpu(i); |
46cb4b7c | 5482 | |
1da177e4 LT |
5483 | return sum; |
5484 | } | |
483b4ee6 | 5485 | |
dd41f596 | 5486 | #ifdef CONFIG_SMP |
8a0be9ef | 5487 | |
46cb4b7c | 5488 | /* |
38022906 PZ |
5489 | * sched_exec - execve() is a valuable balancing opportunity, because at |
5490 | * this point the task has the smallest effective memory and cache footprint. | |
46cb4b7c | 5491 | */ |
38022906 | 5492 | void sched_exec(void) |
46cb4b7c | 5493 | { |
38022906 | 5494 | struct task_struct *p = current; |
4bdada79 | 5495 | struct migration_arg arg; |
0017d735 | 5496 | int dest_cpu; |
46cb4b7c | 5497 | |
4bdada79 PZ |
5498 | scoped_guard (raw_spinlock_irqsave, &p->pi_lock) { |
5499 | dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC); | |
5500 | if (dest_cpu == smp_processor_id()) | |
5501 | return; | |
38022906 | 5502 | |
4bdada79 PZ |
5503 | if (unlikely(!cpu_active(dest_cpu))) |
5504 | return; | |
46cb4b7c | 5505 | |
4bdada79 | 5506 | arg = (struct migration_arg){ p, dest_cpu }; |
1da177e4 | 5507 | } |
4bdada79 | 5508 | stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); |
1da177e4 | 5509 | } |
dd41f596 | 5510 | |
1da177e4 LT |
5511 | #endif |
5512 | ||
1da177e4 | 5513 | DEFINE_PER_CPU(struct kernel_stat, kstat); |
3292beb3 | 5514 | DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); |
1da177e4 LT |
5515 | |
5516 | EXPORT_PER_CPU_SYMBOL(kstat); | |
3292beb3 | 5517 | EXPORT_PER_CPU_SYMBOL(kernel_cpustat); |
1da177e4 | 5518 | |
6075620b GG |
5519 | /* |
5520 | * The function fair_sched_class.update_curr accesses the struct curr | |
5521 | * and its field curr->exec_start; when called from task_sched_runtime(), | |
5522 | * we observe a high rate of cache misses in practice. | |
5523 | * Prefetching this data results in improved performance. | |
5524 | */ | |
5525 | static inline void prefetch_curr_exec_start(struct task_struct *p) | |
5526 | { | |
5527 | #ifdef CONFIG_FAIR_GROUP_SCHED | |
5528 | struct sched_entity *curr = (&p->se)->cfs_rq->curr; | |
5529 | #else | |
5530 | struct sched_entity *curr = (&task_rq(p)->cfs)->curr; | |
5531 | #endif | |
5532 | prefetch(curr); | |
5533 | prefetch(&curr->exec_start); | |
5534 | } | |
5535 | ||
c5f8d995 HS |
5536 | /* |
5537 | * Return accounted runtime for the task. | |
5538 | * In case the task is currently running, return the runtime plus current's | |
5539 | * pending runtime that have not been accounted yet. | |
5540 | */ | |
5541 | unsigned long long task_sched_runtime(struct task_struct *p) | |
5542 | { | |
eb580751 | 5543 | struct rq_flags rf; |
c5f8d995 | 5544 | struct rq *rq; |
6e998916 | 5545 | u64 ns; |
c5f8d995 | 5546 | |
911b2898 PZ |
5547 | #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) |
5548 | /* | |
97fb7a0a | 5549 | * 64-bit doesn't need locks to atomically read a 64-bit value. |
911b2898 PZ |
5550 | * So we have a optimization chance when the task's delta_exec is 0. |
5551 | * Reading ->on_cpu is racy, but this is ok. | |
5552 | * | |
d1ccc66d IM |
5553 | * If we race with it leaving CPU, we'll take a lock. So we're correct. |
5554 | * If we race with it entering CPU, unaccounted time is 0. This is | |
911b2898 | 5555 | * indistinguishable from the read occurring a few cycles earlier. |
4036ac15 MG |
5556 | * If we see ->on_cpu without ->on_rq, the task is leaving, and has |
5557 | * been accounted, so we're correct here as well. | |
911b2898 | 5558 | */ |
da0c1e65 | 5559 | if (!p->on_cpu || !task_on_rq_queued(p)) |
911b2898 PZ |
5560 | return p->se.sum_exec_runtime; |
5561 | #endif | |
5562 | ||
eb580751 | 5563 | rq = task_rq_lock(p, &rf); |
6e998916 SG |
5564 | /* |
5565 | * Must be ->curr _and_ ->on_rq. If dequeued, we would | |
5566 | * project cycles that may never be accounted to this | |
5567 | * thread, breaking clock_gettime(). | |
5568 | */ | |
5569 | if (task_current(rq, p) && task_on_rq_queued(p)) { | |
6075620b | 5570 | prefetch_curr_exec_start(p); |
6e998916 SG |
5571 | update_rq_clock(rq); |
5572 | p->sched_class->update_curr(rq); | |
5573 | } | |
5574 | ns = p->se.sum_exec_runtime; | |
eb580751 | 5575 | task_rq_unlock(rq, p, &rf); |
c5f8d995 HS |
5576 | |
5577 | return ns; | |
5578 | } | |
48f24c4d | 5579 | |
c006fac5 PT |
5580 | #ifdef CONFIG_SCHED_DEBUG |
5581 | static u64 cpu_resched_latency(struct rq *rq) | |
5582 | { | |
5583 | int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms); | |
5584 | u64 resched_latency, now = rq_clock(rq); | |
5585 | static bool warned_once; | |
5586 | ||
5587 | if (sysctl_resched_latency_warn_once && warned_once) | |
5588 | return 0; | |
5589 | ||
5590 | if (!need_resched() || !latency_warn_ms) | |
5591 | return 0; | |
5592 | ||
5593 | if (system_state == SYSTEM_BOOTING) | |
5594 | return 0; | |
5595 | ||
5596 | if (!rq->last_seen_need_resched_ns) { | |
5597 | rq->last_seen_need_resched_ns = now; | |
5598 | rq->ticks_without_resched = 0; | |
5599 | return 0; | |
5600 | } | |
5601 | ||
5602 | rq->ticks_without_resched++; | |
5603 | resched_latency = now - rq->last_seen_need_resched_ns; | |
5604 | if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC) | |
5605 | return 0; | |
5606 | ||
5607 | warned_once = true; | |
5608 | ||
5609 | return resched_latency; | |
5610 | } | |
5611 | ||
5612 | static int __init setup_resched_latency_warn_ms(char *str) | |
5613 | { | |
5614 | long val; | |
5615 | ||
5616 | if ((kstrtol(str, 0, &val))) { | |
5617 | pr_warn("Unable to set resched_latency_warn_ms\n"); | |
5618 | return 1; | |
5619 | } | |
5620 | ||
5621 | sysctl_resched_latency_warn_ms = val; | |
5622 | return 1; | |
5623 | } | |
5624 | __setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms); | |
5625 | #else | |
5626 | static inline u64 cpu_resched_latency(struct rq *rq) { return 0; } | |
5627 | #endif /* CONFIG_SCHED_DEBUG */ | |
5628 | ||
7835b98b CL |
5629 | /* |
5630 | * This function gets called by the timer code, with HZ frequency. | |
5631 | * We call it with interrupts disabled. | |
7835b98b CL |
5632 | */ |
5633 | void scheduler_tick(void) | |
5634 | { | |
7835b98b CL |
5635 | int cpu = smp_processor_id(); |
5636 | struct rq *rq = cpu_rq(cpu); | |
dd41f596 | 5637 | struct task_struct *curr = rq->curr; |
8a8c69c3 | 5638 | struct rq_flags rf; |
b4eccf5f | 5639 | unsigned long thermal_pressure; |
c006fac5 | 5640 | u64 resched_latency; |
3e51f33f | 5641 | |
7fb3ff22 YP |
5642 | if (housekeeping_cpu(cpu, HK_TYPE_TICK)) |
5643 | arch_scale_freq_tick(); | |
5644 | ||
3e51f33f | 5645 | sched_clock_tick(); |
dd41f596 | 5646 | |
8a8c69c3 PZ |
5647 | rq_lock(rq, &rf); |
5648 | ||
3e51f33f | 5649 | update_rq_clock(rq); |
b4eccf5f | 5650 | thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq)); |
05289b90 | 5651 | update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure); |
fa85ae24 | 5652 | curr->sched_class->task_tick(rq, curr, 0); |
c006fac5 PT |
5653 | if (sched_feat(LATENCY_WARN)) |
5654 | resched_latency = cpu_resched_latency(rq); | |
3289bdb4 | 5655 | calc_global_load_tick(rq); |
4feee7d1 | 5656 | sched_core_tick(rq); |
223baf9d | 5657 | task_tick_mm_cid(rq, curr); |
8a8c69c3 PZ |
5658 | |
5659 | rq_unlock(rq, &rf); | |
7835b98b | 5660 | |
c006fac5 PT |
5661 | if (sched_feat(LATENCY_WARN) && resched_latency) |
5662 | resched_latency_warn(cpu, resched_latency); | |
5663 | ||
e9d2b064 | 5664 | perf_event_task_tick(); |
e220d2dc | 5665 | |
616db877 TH |
5666 | if (curr->flags & PF_WQ_WORKER) |
5667 | wq_worker_tick(curr); | |
5668 | ||
e418e1c2 | 5669 | #ifdef CONFIG_SMP |
6eb57e0d | 5670 | rq->idle_balance = idle_cpu(cpu); |
7caff66f | 5671 | trigger_load_balance(rq); |
e418e1c2 | 5672 | #endif |
1da177e4 LT |
5673 | } |
5674 | ||
265f22a9 | 5675 | #ifdef CONFIG_NO_HZ_FULL |
d84b3131 FW |
5676 | |
5677 | struct tick_work { | |
5678 | int cpu; | |
b55bd585 | 5679 | atomic_t state; |
d84b3131 FW |
5680 | struct delayed_work work; |
5681 | }; | |
b55bd585 PM |
5682 | /* Values for ->state, see diagram below. */ |
5683 | #define TICK_SCHED_REMOTE_OFFLINE 0 | |
5684 | #define TICK_SCHED_REMOTE_OFFLINING 1 | |
5685 | #define TICK_SCHED_REMOTE_RUNNING 2 | |
5686 | ||
5687 | /* | |
5688 | * State diagram for ->state: | |
5689 | * | |
5690 | * | |
5691 | * TICK_SCHED_REMOTE_OFFLINE | |
5692 | * | ^ | |
5693 | * | | | |
5694 | * | | sched_tick_remote() | |
5695 | * | | | |
5696 | * | | | |
5697 | * +--TICK_SCHED_REMOTE_OFFLINING | |
5698 | * | ^ | |
5699 | * | | | |
5700 | * sched_tick_start() | | sched_tick_stop() | |
5701 | * | | | |
5702 | * V | | |
5703 | * TICK_SCHED_REMOTE_RUNNING | |
5704 | * | |
5705 | * | |
5706 | * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote() | |
5707 | * and sched_tick_start() are happy to leave the state in RUNNING. | |
5708 | */ | |
d84b3131 FW |
5709 | |
5710 | static struct tick_work __percpu *tick_work_cpu; | |
5711 | ||
5712 | static void sched_tick_remote(struct work_struct *work) | |
5713 | { | |
5714 | struct delayed_work *dwork = to_delayed_work(work); | |
5715 | struct tick_work *twork = container_of(dwork, struct tick_work, work); | |
5716 | int cpu = twork->cpu; | |
5717 | struct rq *rq = cpu_rq(cpu); | |
b55bd585 | 5718 | int os; |
d84b3131 FW |
5719 | |
5720 | /* | |
5721 | * Handle the tick only if it appears the remote CPU is running in full | |
5722 | * dynticks mode. The check is racy by nature, but missing a tick or | |
5723 | * having one too much is no big deal because the scheduler tick updates | |
5724 | * statistics and checks timeslices in a time-independent way, regardless | |
5725 | * of when exactly it is running. | |
5726 | */ | |
6dafc713 PZ |
5727 | if (tick_nohz_tick_stopped_cpu(cpu)) { |
5728 | guard(rq_lock_irq)(rq); | |
5729 | struct task_struct *curr = rq->curr; | |
d84b3131 | 5730 | |
6dafc713 PZ |
5731 | if (cpu_online(cpu)) { |
5732 | update_rq_clock(rq); | |
d84b3131 | 5733 | |
6dafc713 PZ |
5734 | if (!is_idle_task(curr)) { |
5735 | /* | |
5736 | * Make sure the next tick runs within a | |
5737 | * reasonable amount of time. | |
5738 | */ | |
5739 | u64 delta = rq_clock_task(rq) - curr->se.exec_start; | |
5740 | WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3); | |
5741 | } | |
5742 | curr->sched_class->task_tick(rq, curr, 0); | |
d9c0ffca | 5743 | |
6dafc713 PZ |
5744 | calc_load_nohz_remote(rq); |
5745 | } | |
488603b8 | 5746 | } |
ebc0f83c | 5747 | |
d84b3131 FW |
5748 | /* |
5749 | * Run the remote tick once per second (1Hz). This arbitrary | |
5750 | * frequency is large enough to avoid overload but short enough | |
b55bd585 PM |
5751 | * to keep scheduler internal stats reasonably up to date. But |
5752 | * first update state to reflect hotplug activity if required. | |
d84b3131 | 5753 | */ |
b55bd585 PM |
5754 | os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING); |
5755 | WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE); | |
5756 | if (os == TICK_SCHED_REMOTE_RUNNING) | |
5757 | queue_delayed_work(system_unbound_wq, dwork, HZ); | |
d84b3131 FW |
5758 | } |
5759 | ||
5760 | static void sched_tick_start(int cpu) | |
5761 | { | |
b55bd585 | 5762 | int os; |
d84b3131 FW |
5763 | struct tick_work *twork; |
5764 | ||
04d4e665 | 5765 | if (housekeeping_cpu(cpu, HK_TYPE_TICK)) |
d84b3131 FW |
5766 | return; |
5767 | ||
5768 | WARN_ON_ONCE(!tick_work_cpu); | |
5769 | ||
5770 | twork = per_cpu_ptr(tick_work_cpu, cpu); | |
b55bd585 PM |
5771 | os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING); |
5772 | WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING); | |
5773 | if (os == TICK_SCHED_REMOTE_OFFLINE) { | |
5774 | twork->cpu = cpu; | |
5775 | INIT_DELAYED_WORK(&twork->work, sched_tick_remote); | |
5776 | queue_delayed_work(system_unbound_wq, &twork->work, HZ); | |
5777 | } | |
d84b3131 FW |
5778 | } |
5779 | ||
5780 | #ifdef CONFIG_HOTPLUG_CPU | |
5781 | static void sched_tick_stop(int cpu) | |
5782 | { | |
5783 | struct tick_work *twork; | |
b55bd585 | 5784 | int os; |
d84b3131 | 5785 | |
04d4e665 | 5786 | if (housekeeping_cpu(cpu, HK_TYPE_TICK)) |
d84b3131 FW |
5787 | return; |
5788 | ||
5789 | WARN_ON_ONCE(!tick_work_cpu); | |
5790 | ||
5791 | twork = per_cpu_ptr(tick_work_cpu, cpu); | |
b55bd585 PM |
5792 | /* There cannot be competing actions, but don't rely on stop-machine. */ |
5793 | os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING); | |
5794 | WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING); | |
5795 | /* Don't cancel, as this would mess up the state machine. */ | |
d84b3131 FW |
5796 | } |
5797 | #endif /* CONFIG_HOTPLUG_CPU */ | |
5798 | ||
5799 | int __init sched_tick_offload_init(void) | |
5800 | { | |
5801 | tick_work_cpu = alloc_percpu(struct tick_work); | |
5802 | BUG_ON(!tick_work_cpu); | |
d84b3131 FW |
5803 | return 0; |
5804 | } | |
5805 | ||
5806 | #else /* !CONFIG_NO_HZ_FULL */ | |
5807 | static inline void sched_tick_start(int cpu) { } | |
5808 | static inline void sched_tick_stop(int cpu) { } | |
265f22a9 | 5809 | #endif |
1da177e4 | 5810 | |
c1a280b6 | 5811 | #if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \ |
c3bc8fd6 | 5812 | defined(CONFIG_TRACE_PREEMPT_TOGGLE)) |
47252cfb SR |
5813 | /* |
5814 | * If the value passed in is equal to the current preempt count | |
5815 | * then we just disabled preemption. Start timing the latency. | |
5816 | */ | |
5817 | static inline void preempt_latency_start(int val) | |
5818 | { | |
5819 | if (preempt_count() == val) { | |
5820 | unsigned long ip = get_lock_parent_ip(); | |
5821 | #ifdef CONFIG_DEBUG_PREEMPT | |
5822 | current->preempt_disable_ip = ip; | |
5823 | #endif | |
5824 | trace_preempt_off(CALLER_ADDR0, ip); | |
5825 | } | |
5826 | } | |
7e49fcce | 5827 | |
edafe3a5 | 5828 | void preempt_count_add(int val) |
1da177e4 | 5829 | { |
6cd8a4bb | 5830 | #ifdef CONFIG_DEBUG_PREEMPT |
1da177e4 LT |
5831 | /* |
5832 | * Underflow? | |
5833 | */ | |
9a11b49a IM |
5834 | if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) |
5835 | return; | |
6cd8a4bb | 5836 | #endif |
bdb43806 | 5837 | __preempt_count_add(val); |
6cd8a4bb | 5838 | #ifdef CONFIG_DEBUG_PREEMPT |
1da177e4 LT |
5839 | /* |
5840 | * Spinlock count overflowing soon? | |
5841 | */ | |
33859f7f MOS |
5842 | DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= |
5843 | PREEMPT_MASK - 10); | |
6cd8a4bb | 5844 | #endif |
47252cfb | 5845 | preempt_latency_start(val); |
1da177e4 | 5846 | } |
bdb43806 | 5847 | EXPORT_SYMBOL(preempt_count_add); |
edafe3a5 | 5848 | NOKPROBE_SYMBOL(preempt_count_add); |
1da177e4 | 5849 | |
47252cfb SR |
5850 | /* |
5851 | * If the value passed in equals to the current preempt count | |
5852 | * then we just enabled preemption. Stop timing the latency. | |
5853 | */ | |
5854 | static inline void preempt_latency_stop(int val) | |
5855 | { | |
5856 | if (preempt_count() == val) | |
5857 | trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); | |
5858 | } | |
5859 | ||
edafe3a5 | 5860 | void preempt_count_sub(int val) |
1da177e4 | 5861 | { |
6cd8a4bb | 5862 | #ifdef CONFIG_DEBUG_PREEMPT |
1da177e4 LT |
5863 | /* |
5864 | * Underflow? | |
5865 | */ | |
01e3eb82 | 5866 | if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) |
9a11b49a | 5867 | return; |
1da177e4 LT |
5868 | /* |
5869 | * Is the spinlock portion underflowing? | |
5870 | */ | |
9a11b49a IM |
5871 | if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && |
5872 | !(preempt_count() & PREEMPT_MASK))) | |
5873 | return; | |
6cd8a4bb | 5874 | #endif |
9a11b49a | 5875 | |
47252cfb | 5876 | preempt_latency_stop(val); |
bdb43806 | 5877 | __preempt_count_sub(val); |
1da177e4 | 5878 | } |
bdb43806 | 5879 | EXPORT_SYMBOL(preempt_count_sub); |
edafe3a5 | 5880 | NOKPROBE_SYMBOL(preempt_count_sub); |
1da177e4 | 5881 | |
47252cfb SR |
5882 | #else |
5883 | static inline void preempt_latency_start(int val) { } | |
5884 | static inline void preempt_latency_stop(int val) { } | |
1da177e4 LT |
5885 | #endif |
5886 | ||
59ddbcb2 IM |
5887 | static inline unsigned long get_preempt_disable_ip(struct task_struct *p) |
5888 | { | |
5889 | #ifdef CONFIG_DEBUG_PREEMPT | |
5890 | return p->preempt_disable_ip; | |
5891 | #else | |
5892 | return 0; | |
5893 | #endif | |
5894 | } | |
5895 | ||
1da177e4 | 5896 | /* |
dd41f596 | 5897 | * Print scheduling while atomic bug: |
1da177e4 | 5898 | */ |
dd41f596 | 5899 | static noinline void __schedule_bug(struct task_struct *prev) |
1da177e4 | 5900 | { |
d1c6d149 VN |
5901 | /* Save this before calling printk(), since that will clobber it */ |
5902 | unsigned long preempt_disable_ip = get_preempt_disable_ip(current); | |
5903 | ||
664dfa65 DJ |
5904 | if (oops_in_progress) |
5905 | return; | |
5906 | ||
3df0fc5b PZ |
5907 | printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", |
5908 | prev->comm, prev->pid, preempt_count()); | |
838225b4 | 5909 | |
dd41f596 | 5910 | debug_show_held_locks(prev); |
e21f5b15 | 5911 | print_modules(); |
dd41f596 IM |
5912 | if (irqs_disabled()) |
5913 | print_irqtrace_events(prev); | |
dc461c48 | 5914 | if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) { |
8f47b187 | 5915 | pr_err("Preemption disabled at:"); |
2062a4e8 | 5916 | print_ip_sym(KERN_ERR, preempt_disable_ip); |
8f47b187 | 5917 | } |
79cc1ba7 | 5918 | check_panic_on_warn("scheduling while atomic"); |
748c7201 | 5919 | |
6135fc1e | 5920 | dump_stack(); |
373d4d09 | 5921 | add_taint(TAINT_WARN, LOCKDEP_STILL_OK); |
dd41f596 | 5922 | } |
1da177e4 | 5923 | |
dd41f596 IM |
5924 | /* |
5925 | * Various schedule()-time debugging checks and statistics: | |
5926 | */ | |
312364f3 | 5927 | static inline void schedule_debug(struct task_struct *prev, bool preempt) |
dd41f596 | 5928 | { |
0d9e2632 | 5929 | #ifdef CONFIG_SCHED_STACK_END_CHECK |
29d64551 JH |
5930 | if (task_stack_end_corrupted(prev)) |
5931 | panic("corrupted stack end detected inside scheduler\n"); | |
88485be5 WD |
5932 | |
5933 | if (task_scs_end_corrupted(prev)) | |
5934 | panic("corrupted shadow stack detected inside scheduler\n"); | |
0d9e2632 | 5935 | #endif |
b99def8b | 5936 | |
312364f3 | 5937 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
2f064a59 | 5938 | if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) { |
312364f3 DV |
5939 | printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n", |
5940 | prev->comm, prev->pid, prev->non_block_count); | |
5941 | dump_stack(); | |
5942 | add_taint(TAINT_WARN, LOCKDEP_STILL_OK); | |
5943 | } | |
5944 | #endif | |
5945 | ||
1dc0fffc | 5946 | if (unlikely(in_atomic_preempt_off())) { |
dd41f596 | 5947 | __schedule_bug(prev); |
1dc0fffc PZ |
5948 | preempt_count_set(PREEMPT_DISABLED); |
5949 | } | |
b3fbab05 | 5950 | rcu_sleep_check(); |
9f68b5b7 | 5951 | SCHED_WARN_ON(ct_state() == CONTEXT_USER); |
dd41f596 | 5952 | |
1da177e4 LT |
5953 | profile_hit(SCHED_PROFILING, __builtin_return_address(0)); |
5954 | ||
ae92882e | 5955 | schedstat_inc(this_rq()->sched_count); |
dd41f596 IM |
5956 | } |
5957 | ||
457d1f46 CY |
5958 | static void put_prev_task_balance(struct rq *rq, struct task_struct *prev, |
5959 | struct rq_flags *rf) | |
5960 | { | |
5961 | #ifdef CONFIG_SMP | |
5962 | const struct sched_class *class; | |
5963 | /* | |
5964 | * We must do the balancing pass before put_prev_task(), such | |
5965 | * that when we release the rq->lock the task is in the same | |
5966 | * state as before we took rq->lock. | |
5967 | * | |
5968 | * We can terminate the balance pass as soon as we know there is | |
5969 | * a runnable task of @class priority or higher. | |
5970 | */ | |
5971 | for_class_range(class, prev->sched_class, &idle_sched_class) { | |
5972 | if (class->balance(rq, prev, rf)) | |
5973 | break; | |
5974 | } | |
5975 | #endif | |
5976 | ||
5977 | put_prev_task(rq, prev); | |
5978 | } | |
5979 | ||
dd41f596 IM |
5980 | /* |
5981 | * Pick up the highest-prio task: | |
5982 | */ | |
5983 | static inline struct task_struct * | |
539f6512 | 5984 | __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) |
dd41f596 | 5985 | { |
49ee5768 | 5986 | const struct sched_class *class; |
dd41f596 | 5987 | struct task_struct *p; |
1da177e4 LT |
5988 | |
5989 | /* | |
0ba87bb2 PZ |
5990 | * Optimization: we know that if all tasks are in the fair class we can |
5991 | * call that function directly, but only if the @prev task wasn't of a | |
b19a888c | 5992 | * higher scheduling class, because otherwise those lose the |
0ba87bb2 | 5993 | * opportunity to pull in more work from other CPUs. |
1da177e4 | 5994 | */ |
546a3fee | 5995 | if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) && |
0ba87bb2 PZ |
5996 | rq->nr_running == rq->cfs.h_nr_running)) { |
5997 | ||
5d7d6056 | 5998 | p = pick_next_task_fair(rq, prev, rf); |
6ccdc84b | 5999 | if (unlikely(p == RETRY_TASK)) |
67692435 | 6000 | goto restart; |
6ccdc84b | 6001 | |
1699949d | 6002 | /* Assume the next prioritized class is idle_sched_class */ |
5d7d6056 | 6003 | if (!p) { |
f488e105 | 6004 | put_prev_task(rq, prev); |
98c2f700 | 6005 | p = pick_next_task_idle(rq); |
f488e105 | 6006 | } |
6ccdc84b PZ |
6007 | |
6008 | return p; | |
1da177e4 LT |
6009 | } |
6010 | ||
67692435 | 6011 | restart: |
457d1f46 | 6012 | put_prev_task_balance(rq, prev, rf); |
67692435 | 6013 | |
34f971f6 | 6014 | for_each_class(class) { |
98c2f700 | 6015 | p = class->pick_next_task(rq); |
67692435 | 6016 | if (p) |
dd41f596 | 6017 | return p; |
dd41f596 | 6018 | } |
34f971f6 | 6019 | |
bc9ffef3 | 6020 | BUG(); /* The idle class should always have a runnable task. */ |
dd41f596 | 6021 | } |
1da177e4 | 6022 | |
9edeaea1 | 6023 | #ifdef CONFIG_SCHED_CORE |
539f6512 PZ |
6024 | static inline bool is_task_rq_idle(struct task_struct *t) |
6025 | { | |
6026 | return (task_rq(t)->idle == t); | |
6027 | } | |
6028 | ||
6029 | static inline bool cookie_equals(struct task_struct *a, unsigned long cookie) | |
6030 | { | |
6031 | return is_task_rq_idle(a) || (a->core_cookie == cookie); | |
6032 | } | |
6033 | ||
6034 | static inline bool cookie_match(struct task_struct *a, struct task_struct *b) | |
6035 | { | |
6036 | if (is_task_rq_idle(a) || is_task_rq_idle(b)) | |
6037 | return true; | |
6038 | ||
6039 | return a->core_cookie == b->core_cookie; | |
6040 | } | |
6041 | ||
bc9ffef3 | 6042 | static inline struct task_struct *pick_task(struct rq *rq) |
539f6512 | 6043 | { |
bc9ffef3 PZ |
6044 | const struct sched_class *class; |
6045 | struct task_struct *p; | |
539f6512 | 6046 | |
bc9ffef3 PZ |
6047 | for_each_class(class) { |
6048 | p = class->pick_task(rq); | |
6049 | if (p) | |
6050 | return p; | |
539f6512 PZ |
6051 | } |
6052 | ||
bc9ffef3 | 6053 | BUG(); /* The idle class should always have a runnable task. */ |
539f6512 PZ |
6054 | } |
6055 | ||
c6047c2e JFG |
6056 | extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi); |
6057 | ||
5b6547ed PZ |
6058 | static void queue_core_balance(struct rq *rq); |
6059 | ||
539f6512 PZ |
6060 | static struct task_struct * |
6061 | pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) | |
6062 | { | |
bc9ffef3 | 6063 | struct task_struct *next, *p, *max = NULL; |
539f6512 | 6064 | const struct cpumask *smt_mask; |
c6047c2e | 6065 | bool fi_before = false; |
4feee7d1 | 6066 | bool core_clock_updated = (rq == rq->core); |
bc9ffef3 PZ |
6067 | unsigned long cookie; |
6068 | int i, cpu, occ = 0; | |
6069 | struct rq *rq_i; | |
539f6512 | 6070 | bool need_sync; |
539f6512 PZ |
6071 | |
6072 | if (!sched_core_enabled(rq)) | |
6073 | return __pick_next_task(rq, prev, rf); | |
6074 | ||
6075 | cpu = cpu_of(rq); | |
6076 | ||
6077 | /* Stopper task is switching into idle, no need core-wide selection. */ | |
6078 | if (cpu_is_offline(cpu)) { | |
6079 | /* | |
6080 | * Reset core_pick so that we don't enter the fastpath when | |
6081 | * coming online. core_pick would already be migrated to | |
6082 | * another cpu during offline. | |
6083 | */ | |
6084 | rq->core_pick = NULL; | |
6085 | return __pick_next_task(rq, prev, rf); | |
6086 | } | |
6087 | ||
6088 | /* | |
6089 | * If there were no {en,de}queues since we picked (IOW, the task | |
6090 | * pointers are all still valid), and we haven't scheduled the last | |
6091 | * pick yet, do so now. | |
6092 | * | |
6093 | * rq->core_pick can be NULL if no selection was made for a CPU because | |
6094 | * it was either offline or went offline during a sibling's core-wide | |
6095 | * selection. In this case, do a core-wide selection. | |
6096 | */ | |
6097 | if (rq->core->core_pick_seq == rq->core->core_task_seq && | |
6098 | rq->core->core_pick_seq != rq->core_sched_seq && | |
6099 | rq->core_pick) { | |
6100 | WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq); | |
6101 | ||
6102 | next = rq->core_pick; | |
6103 | if (next != prev) { | |
6104 | put_prev_task(rq, prev); | |
6105 | set_next_task(rq, next); | |
6106 | } | |
6107 | ||
6108 | rq->core_pick = NULL; | |
5b6547ed | 6109 | goto out; |
539f6512 PZ |
6110 | } |
6111 | ||
6112 | put_prev_task_balance(rq, prev, rf); | |
6113 | ||
6114 | smt_mask = cpu_smt_mask(cpu); | |
7afbba11 JFG |
6115 | need_sync = !!rq->core->core_cookie; |
6116 | ||
6117 | /* reset state */ | |
6118 | rq->core->core_cookie = 0UL; | |
4feee7d1 JD |
6119 | if (rq->core->core_forceidle_count) { |
6120 | if (!core_clock_updated) { | |
6121 | update_rq_clock(rq->core); | |
6122 | core_clock_updated = true; | |
6123 | } | |
6124 | sched_core_account_forceidle(rq); | |
6125 | /* reset after accounting force idle */ | |
6126 | rq->core->core_forceidle_start = 0; | |
6127 | rq->core->core_forceidle_count = 0; | |
6128 | rq->core->core_forceidle_occupation = 0; | |
7afbba11 JFG |
6129 | need_sync = true; |
6130 | fi_before = true; | |
7afbba11 | 6131 | } |
539f6512 PZ |
6132 | |
6133 | /* | |
6134 | * core->core_task_seq, core->core_pick_seq, rq->core_sched_seq | |
6135 | * | |
6136 | * @task_seq guards the task state ({en,de}queues) | |
6137 | * @pick_seq is the @task_seq we did a selection on | |
6138 | * @sched_seq is the @pick_seq we scheduled | |
6139 | * | |
6140 | * However, preemptions can cause multiple picks on the same task set. | |
6141 | * 'Fix' this by also increasing @task_seq for every pick. | |
6142 | */ | |
6143 | rq->core->core_task_seq++; | |
539f6512 | 6144 | |
7afbba11 JFG |
6145 | /* |
6146 | * Optimize for common case where this CPU has no cookies | |
6147 | * and there are no cookied tasks running on siblings. | |
6148 | */ | |
6149 | if (!need_sync) { | |
bc9ffef3 | 6150 | next = pick_task(rq); |
7afbba11 JFG |
6151 | if (!next->core_cookie) { |
6152 | rq->core_pick = NULL; | |
c6047c2e JFG |
6153 | /* |
6154 | * For robustness, update the min_vruntime_fi for | |
6155 | * unconstrained picks as well. | |
6156 | */ | |
6157 | WARN_ON_ONCE(fi_before); | |
6158 | task_vruntime_update(rq, next, false); | |
5b6547ed | 6159 | goto out_set_next; |
7afbba11 | 6160 | } |
8039e96f | 6161 | } |
7afbba11 | 6162 | |
bc9ffef3 PZ |
6163 | /* |
6164 | * For each thread: do the regular task pick and find the max prio task | |
6165 | * amongst them. | |
6166 | * | |
6167 | * Tie-break prio towards the current CPU | |
6168 | */ | |
6169 | for_each_cpu_wrap(i, smt_mask, cpu) { | |
6170 | rq_i = cpu_rq(i); | |
539f6512 | 6171 | |
4feee7d1 JD |
6172 | /* |
6173 | * Current cpu always has its clock updated on entrance to | |
6174 | * pick_next_task(). If the current cpu is not the core, | |
6175 | * the core may also have been updated above. | |
6176 | */ | |
6177 | if (i != cpu && (rq_i != rq->core || !core_clock_updated)) | |
539f6512 | 6178 | update_rq_clock(rq_i); |
bc9ffef3 PZ |
6179 | |
6180 | p = rq_i->core_pick = pick_task(rq_i); | |
6181 | if (!max || prio_less(max, p, fi_before)) | |
6182 | max = p; | |
539f6512 PZ |
6183 | } |
6184 | ||
bc9ffef3 PZ |
6185 | cookie = rq->core->core_cookie = max->core_cookie; |
6186 | ||
539f6512 | 6187 | /* |
bc9ffef3 PZ |
6188 | * For each thread: try and find a runnable task that matches @max or |
6189 | * force idle. | |
539f6512 | 6190 | */ |
bc9ffef3 PZ |
6191 | for_each_cpu(i, smt_mask) { |
6192 | rq_i = cpu_rq(i); | |
6193 | p = rq_i->core_pick; | |
539f6512 | 6194 | |
bc9ffef3 PZ |
6195 | if (!cookie_equals(p, cookie)) { |
6196 | p = NULL; | |
6197 | if (cookie) | |
6198 | p = sched_core_find(rq_i, cookie); | |
7afbba11 | 6199 | if (!p) |
bc9ffef3 PZ |
6200 | p = idle_sched_class.pick_task(rq_i); |
6201 | } | |
539f6512 | 6202 | |
bc9ffef3 | 6203 | rq_i->core_pick = p; |
d2dfa17b | 6204 | |
bc9ffef3 PZ |
6205 | if (p == rq_i->idle) { |
6206 | if (rq_i->nr_running) { | |
4feee7d1 | 6207 | rq->core->core_forceidle_count++; |
c6047c2e JFG |
6208 | if (!fi_before) |
6209 | rq->core->core_forceidle_seq++; | |
6210 | } | |
bc9ffef3 PZ |
6211 | } else { |
6212 | occ++; | |
539f6512 | 6213 | } |
539f6512 PZ |
6214 | } |
6215 | ||
4feee7d1 | 6216 | if (schedstat_enabled() && rq->core->core_forceidle_count) { |
b171501f | 6217 | rq->core->core_forceidle_start = rq_clock(rq->core); |
4feee7d1 JD |
6218 | rq->core->core_forceidle_occupation = occ; |
6219 | } | |
6220 | ||
539f6512 PZ |
6221 | rq->core->core_pick_seq = rq->core->core_task_seq; |
6222 | next = rq->core_pick; | |
6223 | rq->core_sched_seq = rq->core->core_pick_seq; | |
6224 | ||
6225 | /* Something should have been selected for current CPU */ | |
6226 | WARN_ON_ONCE(!next); | |
6227 | ||
6228 | /* | |
6229 | * Reschedule siblings | |
6230 | * | |
6231 | * NOTE: L1TF -- at this point we're no longer running the old task and | |
6232 | * sending an IPI (below) ensures the sibling will no longer be running | |
6233 | * their task. This ensures there is no inter-sibling overlap between | |
6234 | * non-matching user state. | |
6235 | */ | |
6236 | for_each_cpu(i, smt_mask) { | |
bc9ffef3 | 6237 | rq_i = cpu_rq(i); |
539f6512 PZ |
6238 | |
6239 | /* | |
6240 | * An online sibling might have gone offline before a task | |
6241 | * could be picked for it, or it might be offline but later | |
6242 | * happen to come online, but its too late and nothing was | |
6243 | * picked for it. That's Ok - it will pick tasks for itself, | |
6244 | * so ignore it. | |
6245 | */ | |
6246 | if (!rq_i->core_pick) | |
6247 | continue; | |
6248 | ||
c6047c2e JFG |
6249 | /* |
6250 | * Update for new !FI->FI transitions, or if continuing to be in !FI: | |
6251 | * fi_before fi update? | |
6252 | * 0 0 1 | |
6253 | * 0 1 1 | |
6254 | * 1 0 1 | |
6255 | * 1 1 0 | |
6256 | */ | |
4feee7d1 JD |
6257 | if (!(fi_before && rq->core->core_forceidle_count)) |
6258 | task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count); | |
539f6512 | 6259 | |
d2dfa17b PZ |
6260 | rq_i->core_pick->core_occupation = occ; |
6261 | ||
539f6512 PZ |
6262 | if (i == cpu) { |
6263 | rq_i->core_pick = NULL; | |
6264 | continue; | |
6265 | } | |
6266 | ||
6267 | /* Did we break L1TF mitigation requirements? */ | |
6268 | WARN_ON_ONCE(!cookie_match(next, rq_i->core_pick)); | |
6269 | ||
6270 | if (rq_i->curr == rq_i->core_pick) { | |
6271 | rq_i->core_pick = NULL; | |
6272 | continue; | |
6273 | } | |
6274 | ||
6275 | resched_curr(rq_i); | |
6276 | } | |
6277 | ||
5b6547ed | 6278 | out_set_next: |
539f6512 | 6279 | set_next_task(rq, next); |
5b6547ed PZ |
6280 | out: |
6281 | if (rq->core->core_forceidle_count && next == rq->idle) | |
6282 | queue_core_balance(rq); | |
6283 | ||
539f6512 PZ |
6284 | return next; |
6285 | } | |
9edeaea1 | 6286 | |
d2dfa17b PZ |
6287 | static bool try_steal_cookie(int this, int that) |
6288 | { | |
6289 | struct rq *dst = cpu_rq(this), *src = cpu_rq(that); | |
6290 | struct task_struct *p; | |
6291 | unsigned long cookie; | |
6292 | bool success = false; | |
6293 | ||
b4e1fa1e PZ |
6294 | guard(irq)(); |
6295 | guard(double_rq_lock)(dst, src); | |
d2dfa17b PZ |
6296 | |
6297 | cookie = dst->core->core_cookie; | |
6298 | if (!cookie) | |
b4e1fa1e | 6299 | return false; |
d2dfa17b PZ |
6300 | |
6301 | if (dst->curr != dst->idle) | |
b4e1fa1e | 6302 | return false; |
d2dfa17b PZ |
6303 | |
6304 | p = sched_core_find(src, cookie); | |
530bfad1 | 6305 | if (!p) |
b4e1fa1e | 6306 | return false; |
d2dfa17b PZ |
6307 | |
6308 | do { | |
6309 | if (p == src->core_pick || p == src->curr) | |
6310 | goto next; | |
6311 | ||
386ef214 | 6312 | if (!is_cpu_allowed(p, this)) |
d2dfa17b PZ |
6313 | goto next; |
6314 | ||
6315 | if (p->core_occupation > dst->idle->core_occupation) | |
6316 | goto next; | |
530bfad1 | 6317 | /* |
b4e1fa1e PZ |
6318 | * sched_core_find() and sched_core_next() will ensure |
6319 | * that task @p is not throttled now, we also need to | |
6320 | * check whether the runqueue of the destination CPU is | |
6321 | * being throttled. | |
530bfad1 HJ |
6322 | */ |
6323 | if (sched_task_is_throttled(p, this)) | |
6324 | goto next; | |
d2dfa17b | 6325 | |
d2dfa17b PZ |
6326 | deactivate_task(src, p, 0); |
6327 | set_task_cpu(p, this); | |
6328 | activate_task(dst, p, 0); | |
d2dfa17b PZ |
6329 | |
6330 | resched_curr(dst); | |
6331 | ||
6332 | success = true; | |
6333 | break; | |
6334 | ||
6335 | next: | |
6336 | p = sched_core_next(p, cookie); | |
6337 | } while (p); | |
6338 | ||
d2dfa17b PZ |
6339 | return success; |
6340 | } | |
6341 | ||
6342 | static bool steal_cookie_task(int cpu, struct sched_domain *sd) | |
6343 | { | |
6344 | int i; | |
6345 | ||
8589018a | 6346 | for_each_cpu_wrap(i, sched_domain_span(sd), cpu + 1) { |
d2dfa17b PZ |
6347 | if (i == cpu) |
6348 | continue; | |
6349 | ||
6350 | if (need_resched()) | |
6351 | break; | |
6352 | ||
6353 | if (try_steal_cookie(cpu, i)) | |
6354 | return true; | |
6355 | } | |
6356 | ||
6357 | return false; | |
6358 | } | |
6359 | ||
6360 | static void sched_core_balance(struct rq *rq) | |
6361 | { | |
6362 | struct sched_domain *sd; | |
6363 | int cpu = cpu_of(rq); | |
6364 | ||
0e34600a PZ |
6365 | guard(preempt)(); |
6366 | guard(rcu)(); | |
6367 | ||
d2dfa17b PZ |
6368 | raw_spin_rq_unlock_irq(rq); |
6369 | for_each_domain(cpu, sd) { | |
6370 | if (need_resched()) | |
6371 | break; | |
6372 | ||
6373 | if (steal_cookie_task(cpu, sd)) | |
6374 | break; | |
6375 | } | |
6376 | raw_spin_rq_lock_irq(rq); | |
d2dfa17b PZ |
6377 | } |
6378 | ||
8e5bad7d | 6379 | static DEFINE_PER_CPU(struct balance_callback, core_balance_head); |
d2dfa17b | 6380 | |
5b6547ed | 6381 | static void queue_core_balance(struct rq *rq) |
d2dfa17b PZ |
6382 | { |
6383 | if (!sched_core_enabled(rq)) | |
6384 | return; | |
6385 | ||
6386 | if (!rq->core->core_cookie) | |
6387 | return; | |
6388 | ||
6389 | if (!rq->nr_running) /* not forced idle */ | |
6390 | return; | |
6391 | ||
6392 | queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance); | |
6393 | } | |
6394 | ||
7170509c PZ |
6395 | DEFINE_LOCK_GUARD_1(core_lock, int, |
6396 | sched_core_lock(*_T->lock, &_T->flags), | |
6397 | sched_core_unlock(*_T->lock, &_T->flags), | |
6398 | unsigned long flags) | |
6399 | ||
3c474b32 | 6400 | static void sched_core_cpu_starting(unsigned int cpu) |
9edeaea1 PZ |
6401 | { |
6402 | const struct cpumask *smt_mask = cpu_smt_mask(cpu); | |
3c474b32 | 6403 | struct rq *rq = cpu_rq(cpu), *core_rq = NULL; |
3c474b32 | 6404 | int t; |
9edeaea1 | 6405 | |
7170509c | 6406 | guard(core_lock)(&cpu); |
9edeaea1 | 6407 | |
3c474b32 PZ |
6408 | WARN_ON_ONCE(rq->core != rq); |
6409 | ||
6410 | /* if we're the first, we'll be our own leader */ | |
6411 | if (cpumask_weight(smt_mask) == 1) | |
7170509c | 6412 | return; |
3c474b32 PZ |
6413 | |
6414 | /* find the leader */ | |
6415 | for_each_cpu(t, smt_mask) { | |
6416 | if (t == cpu) | |
6417 | continue; | |
6418 | rq = cpu_rq(t); | |
6419 | if (rq->core == rq) { | |
6420 | core_rq = rq; | |
6421 | break; | |
9edeaea1 | 6422 | } |
3c474b32 | 6423 | } |
9edeaea1 | 6424 | |
3c474b32 | 6425 | if (WARN_ON_ONCE(!core_rq)) /* whoopsie */ |
7170509c | 6426 | return; |
9edeaea1 | 6427 | |
3c474b32 PZ |
6428 | /* install and validate core_rq */ |
6429 | for_each_cpu(t, smt_mask) { | |
6430 | rq = cpu_rq(t); | |
9edeaea1 | 6431 | |
3c474b32 | 6432 | if (t == cpu) |
9edeaea1 | 6433 | rq->core = core_rq; |
3c474b32 PZ |
6434 | |
6435 | WARN_ON_ONCE(rq->core != core_rq); | |
9edeaea1 PZ |
6436 | } |
6437 | } | |
3c474b32 PZ |
6438 | |
6439 | static void sched_core_cpu_deactivate(unsigned int cpu) | |
6440 | { | |
6441 | const struct cpumask *smt_mask = cpu_smt_mask(cpu); | |
6442 | struct rq *rq = cpu_rq(cpu), *core_rq = NULL; | |
3c474b32 PZ |
6443 | int t; |
6444 | ||
7170509c | 6445 | guard(core_lock)(&cpu); |
3c474b32 PZ |
6446 | |
6447 | /* if we're the last man standing, nothing to do */ | |
6448 | if (cpumask_weight(smt_mask) == 1) { | |
6449 | WARN_ON_ONCE(rq->core != rq); | |
7170509c | 6450 | return; |
3c474b32 PZ |
6451 | } |
6452 | ||
6453 | /* if we're not the leader, nothing to do */ | |
6454 | if (rq->core != rq) | |
7170509c | 6455 | return; |
3c474b32 PZ |
6456 | |
6457 | /* find a new leader */ | |
6458 | for_each_cpu(t, smt_mask) { | |
6459 | if (t == cpu) | |
6460 | continue; | |
6461 | core_rq = cpu_rq(t); | |
6462 | break; | |
6463 | } | |
6464 | ||
6465 | if (WARN_ON_ONCE(!core_rq)) /* impossible */ | |
7170509c | 6466 | return; |
3c474b32 PZ |
6467 | |
6468 | /* copy the shared state to the new leader */ | |
4feee7d1 JD |
6469 | core_rq->core_task_seq = rq->core_task_seq; |
6470 | core_rq->core_pick_seq = rq->core_pick_seq; | |
6471 | core_rq->core_cookie = rq->core_cookie; | |
6472 | core_rq->core_forceidle_count = rq->core_forceidle_count; | |
6473 | core_rq->core_forceidle_seq = rq->core_forceidle_seq; | |
6474 | core_rq->core_forceidle_occupation = rq->core_forceidle_occupation; | |
6475 | ||
6476 | /* | |
6477 | * Accounting edge for forced idle is handled in pick_next_task(). | |
6478 | * Don't need another one here, since the hotplug thread shouldn't | |
6479 | * have a cookie. | |
6480 | */ | |
6481 | core_rq->core_forceidle_start = 0; | |
3c474b32 PZ |
6482 | |
6483 | /* install new leader */ | |
6484 | for_each_cpu(t, smt_mask) { | |
6485 | rq = cpu_rq(t); | |
6486 | rq->core = core_rq; | |
6487 | } | |
3c474b32 PZ |
6488 | } |
6489 | ||
6490 | static inline void sched_core_cpu_dying(unsigned int cpu) | |
6491 | { | |
6492 | struct rq *rq = cpu_rq(cpu); | |
6493 | ||
6494 | if (rq->core != rq) | |
6495 | rq->core = rq; | |
6496 | } | |
6497 | ||
9edeaea1 PZ |
6498 | #else /* !CONFIG_SCHED_CORE */ |
6499 | ||
6500 | static inline void sched_core_cpu_starting(unsigned int cpu) {} | |
3c474b32 PZ |
6501 | static inline void sched_core_cpu_deactivate(unsigned int cpu) {} |
6502 | static inline void sched_core_cpu_dying(unsigned int cpu) {} | |
9edeaea1 | 6503 | |
539f6512 PZ |
6504 | static struct task_struct * |
6505 | pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) | |
6506 | { | |
6507 | return __pick_next_task(rq, prev, rf); | |
6508 | } | |
6509 | ||
9edeaea1 PZ |
6510 | #endif /* CONFIG_SCHED_CORE */ |
6511 | ||
b4bfa3fc TG |
6512 | /* |
6513 | * Constants for the sched_mode argument of __schedule(). | |
6514 | * | |
6515 | * The mode argument allows RT enabled kernels to differentiate a | |
6516 | * preemption from blocking on an 'sleeping' spin/rwlock. Note that | |
6517 | * SM_MASK_PREEMPT for !RT has all bits set, which allows the compiler to | |
6518 | * optimize the AND operation out and just check for zero. | |
6519 | */ | |
6520 | #define SM_NONE 0x0 | |
6521 | #define SM_PREEMPT 0x1 | |
6991436c TG |
6522 | #define SM_RTLOCK_WAIT 0x2 |
6523 | ||
6524 | #ifndef CONFIG_PREEMPT_RT | |
6525 | # define SM_MASK_PREEMPT (~0U) | |
6526 | #else | |
6527 | # define SM_MASK_PREEMPT SM_PREEMPT | |
6528 | #endif | |
b4bfa3fc | 6529 | |
dd41f596 | 6530 | /* |
c259e01a | 6531 | * __schedule() is the main scheduler function. |
edde96ea PE |
6532 | * |
6533 | * The main means of driving the scheduler and thus entering this function are: | |
6534 | * | |
6535 | * 1. Explicit blocking: mutex, semaphore, waitqueue, etc. | |
6536 | * | |
6537 | * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return | |
6538 | * paths. For example, see arch/x86/entry_64.S. | |
6539 | * | |
6540 | * To drive preemption between tasks, the scheduler sets the flag in timer | |
6541 | * interrupt handler scheduler_tick(). | |
6542 | * | |
6543 | * 3. Wakeups don't really cause entry into schedule(). They add a | |
6544 | * task to the run-queue and that's it. | |
6545 | * | |
6546 | * Now, if the new task added to the run-queue preempts the current | |
6547 | * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets | |
6548 | * called on the nearest possible occasion: | |
6549 | * | |
c1a280b6 | 6550 | * - If the kernel is preemptible (CONFIG_PREEMPTION=y): |
edde96ea PE |
6551 | * |
6552 | * - in syscall or exception context, at the next outmost | |
6553 | * preempt_enable(). (this might be as soon as the wake_up()'s | |
6554 | * spin_unlock()!) | |
6555 | * | |
6556 | * - in IRQ context, return from interrupt-handler to | |
6557 | * preemptible context | |
6558 | * | |
c1a280b6 | 6559 | * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set) |
edde96ea PE |
6560 | * then at the next: |
6561 | * | |
6562 | * - cond_resched() call | |
6563 | * - explicit schedule() call | |
6564 | * - return from syscall or exception to user-space | |
6565 | * - return from interrupt-handler to user-space | |
bfd9b2b5 | 6566 | * |
b30f0e3f | 6567 | * WARNING: must be called with preemption disabled! |
dd41f596 | 6568 | */ |
b4bfa3fc | 6569 | static void __sched notrace __schedule(unsigned int sched_mode) |
dd41f596 IM |
6570 | { |
6571 | struct task_struct *prev, *next; | |
67ca7bde | 6572 | unsigned long *switch_count; |
dbfb089d | 6573 | unsigned long prev_state; |
d8ac8971 | 6574 | struct rq_flags rf; |
dd41f596 | 6575 | struct rq *rq; |
31656519 | 6576 | int cpu; |
dd41f596 | 6577 | |
dd41f596 IM |
6578 | cpu = smp_processor_id(); |
6579 | rq = cpu_rq(cpu); | |
dd41f596 | 6580 | prev = rq->curr; |
dd41f596 | 6581 | |
b4bfa3fc | 6582 | schedule_debug(prev, !!sched_mode); |
1da177e4 | 6583 | |
e0ee463c | 6584 | if (sched_feat(HRTICK) || sched_feat(HRTICK_DL)) |
f333fdc9 | 6585 | hrtick_clear(rq); |
8f4d37ec | 6586 | |
46a5d164 | 6587 | local_irq_disable(); |
b4bfa3fc | 6588 | rcu_note_context_switch(!!sched_mode); |
46a5d164 | 6589 | |
e0acd0a6 ON |
6590 | /* |
6591 | * Make sure that signal_pending_state()->signal_pending() below | |
6592 | * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) | |
dbfb089d PZ |
6593 | * done by the caller to avoid the race with signal_wake_up(): |
6594 | * | |
6595 | * __set_current_state(@state) signal_wake_up() | |
6596 | * schedule() set_tsk_thread_flag(p, TIF_SIGPENDING) | |
6597 | * wake_up_state(p, state) | |
6598 | * LOCK rq->lock LOCK p->pi_state | |
6599 | * smp_mb__after_spinlock() smp_mb__after_spinlock() | |
6600 | * if (signal_pending_state()) if (p->state & @state) | |
306e0604 | 6601 | * |
dbfb089d | 6602 | * Also, the membarrier system call requires a full memory barrier |
306e0604 | 6603 | * after coming from user-space, before storing to rq->curr. |
e0acd0a6 | 6604 | */ |
8a8c69c3 | 6605 | rq_lock(rq, &rf); |
d89e588c | 6606 | smp_mb__after_spinlock(); |
1da177e4 | 6607 | |
d1ccc66d IM |
6608 | /* Promote REQ to ACT */ |
6609 | rq->clock_update_flags <<= 1; | |
bce4dc80 | 6610 | update_rq_clock(rq); |
5ebde09d | 6611 | rq->clock_update_flags = RQCF_UPDATED; |
9edfbfed | 6612 | |
246d86b5 | 6613 | switch_count = &prev->nivcsw; |
d136122f | 6614 | |
dbfb089d | 6615 | /* |
d136122f | 6616 | * We must load prev->state once (task_struct::state is volatile), such |
2500ad1c | 6617 | * that we form a control dependency vs deactivate_task() below. |
dbfb089d | 6618 | */ |
2f064a59 | 6619 | prev_state = READ_ONCE(prev->__state); |
b4bfa3fc | 6620 | if (!(sched_mode & SM_MASK_PREEMPT) && prev_state) { |
dbfb089d | 6621 | if (signal_pending_state(prev_state, prev)) { |
2f064a59 | 6622 | WRITE_ONCE(prev->__state, TASK_RUNNING); |
21aa9af0 | 6623 | } else { |
dbfb089d PZ |
6624 | prev->sched_contributes_to_load = |
6625 | (prev_state & TASK_UNINTERRUPTIBLE) && | |
6626 | !(prev_state & TASK_NOLOAD) && | |
f5d39b02 | 6627 | !(prev_state & TASK_FROZEN); |
dbfb089d PZ |
6628 | |
6629 | if (prev->sched_contributes_to_load) | |
6630 | rq->nr_uninterruptible++; | |
6631 | ||
6632 | /* | |
6633 | * __schedule() ttwu() | |
d136122f PZ |
6634 | * prev_state = prev->state; if (p->on_rq && ...) |
6635 | * if (prev_state) goto out; | |
6636 | * p->on_rq = 0; smp_acquire__after_ctrl_dep(); | |
6637 | * p->state = TASK_WAKING | |
6638 | * | |
6639 | * Where __schedule() and ttwu() have matching control dependencies. | |
dbfb089d PZ |
6640 | * |
6641 | * After this, schedule() must not care about p->state any more. | |
6642 | */ | |
bce4dc80 | 6643 | deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK); |
2acca55e | 6644 | |
e33a9bba TH |
6645 | if (prev->in_iowait) { |
6646 | atomic_inc(&rq->nr_iowait); | |
6647 | delayacct_blkio_start(); | |
6648 | } | |
21aa9af0 | 6649 | } |
dd41f596 | 6650 | switch_count = &prev->nvcsw; |
1da177e4 LT |
6651 | } |
6652 | ||
d8ac8971 | 6653 | next = pick_next_task(rq, prev, &rf); |
f26f9aff | 6654 | clear_tsk_need_resched(prev); |
f27dde8d | 6655 | clear_preempt_need_resched(); |
c006fac5 PT |
6656 | #ifdef CONFIG_SCHED_DEBUG |
6657 | rq->last_seen_need_resched_ns = 0; | |
6658 | #endif | |
1da177e4 | 6659 | |
1da177e4 | 6660 | if (likely(prev != next)) { |
1da177e4 | 6661 | rq->nr_switches++; |
5311a98f EB |
6662 | /* |
6663 | * RCU users of rcu_dereference(rq->curr) may not see | |
6664 | * changes to task_struct made by pick_next_task(). | |
6665 | */ | |
6666 | RCU_INIT_POINTER(rq->curr, next); | |
22e4ebb9 MD |
6667 | /* |
6668 | * The membarrier system call requires each architecture | |
6669 | * to have a full memory barrier after updating | |
306e0604 MD |
6670 | * rq->curr, before returning to user-space. |
6671 | * | |
6672 | * Here are the schemes providing that barrier on the | |
6673 | * various architectures: | |
6674 | * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC. | |
6675 | * switch_mm() rely on membarrier_arch_switch_mm() on PowerPC. | |
6676 | * - finish_lock_switch() for weakly-ordered | |
6677 | * architectures where spin_unlock is a full barrier, | |
6678 | * - switch_to() for arm64 (weakly-ordered, spin_unlock | |
6679 | * is a RELEASE barrier), | |
22e4ebb9 | 6680 | */ |
1da177e4 LT |
6681 | ++*switch_count; |
6682 | ||
af449901 | 6683 | migrate_disable_switch(rq, prev); |
b05e75d6 JW |
6684 | psi_sched_switch(prev, next, !task_on_rq_queued(prev)); |
6685 | ||
9c2136be | 6686 | trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state); |
d1ccc66d IM |
6687 | |
6688 | /* Also unlocks the rq: */ | |
6689 | rq = context_switch(rq, prev, next, &rf); | |
cbce1a68 | 6690 | } else { |
565790d2 PZ |
6691 | rq_unpin_lock(rq, &rf); |
6692 | __balance_callbacks(rq); | |
5cb9eaa3 | 6693 | raw_spin_rq_unlock_irq(rq); |
565790d2 | 6694 | } |
1da177e4 | 6695 | } |
c259e01a | 6696 | |
9af6528e PZ |
6697 | void __noreturn do_task_dead(void) |
6698 | { | |
d1ccc66d | 6699 | /* Causes final put_task_struct in finish_task_switch(): */ |
b5bf9a90 | 6700 | set_special_state(TASK_DEAD); |
d1ccc66d IM |
6701 | |
6702 | /* Tell freezer to ignore us: */ | |
6703 | current->flags |= PF_NOFREEZE; | |
6704 | ||
b4bfa3fc | 6705 | __schedule(SM_NONE); |
9af6528e | 6706 | BUG(); |
d1ccc66d IM |
6707 | |
6708 | /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */ | |
9af6528e | 6709 | for (;;) |
d1ccc66d | 6710 | cpu_relax(); |
9af6528e PZ |
6711 | } |
6712 | ||
9c40cef2 TG |
6713 | static inline void sched_submit_work(struct task_struct *tsk) |
6714 | { | |
28bc55f6 | 6715 | static DEFINE_WAIT_OVERRIDE_MAP(sched_map, LD_WAIT_CONFIG); |
c1cecf88 SAS |
6716 | unsigned int task_flags; |
6717 | ||
28bc55f6 PZ |
6718 | /* |
6719 | * Establish LD_WAIT_CONFIG context to ensure none of the code called | |
6720 | * will use a blocking primitive -- which would lead to recursion. | |
6721 | */ | |
6722 | lock_map_acquire_try(&sched_map); | |
6723 | ||
c1cecf88 | 6724 | task_flags = tsk->flags; |
6d25be57 | 6725 | /* |
b945efcd TG |
6726 | * If a worker goes to sleep, notify and ask workqueue whether it |
6727 | * wants to wake up a task to maintain concurrency. | |
6d25be57 | 6728 | */ |
3eafe225 WJ |
6729 | if (task_flags & PF_WQ_WORKER) |
6730 | wq_worker_sleeping(tsk); | |
6731 | else if (task_flags & PF_IO_WORKER) | |
6732 | io_wq_worker_sleeping(tsk); | |
6d25be57 | 6733 | |
401e4963 JK |
6734 | /* |
6735 | * spinlock and rwlock must not flush block requests. This will | |
6736 | * deadlock if the callback attempts to acquire a lock which is | |
6737 | * already acquired. | |
6738 | */ | |
6739 | SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT); | |
b0fdc013 | 6740 | |
9c40cef2 TG |
6741 | /* |
6742 | * If we are going to sleep and we have plugged IO queued, | |
6743 | * make sure to submit it to avoid deadlocks. | |
6744 | */ | |
aa8dccca | 6745 | blk_flush_plug(tsk->plug, true); |
28bc55f6 PZ |
6746 | |
6747 | lock_map_release(&sched_map); | |
9c40cef2 TG |
6748 | } |
6749 | ||
6d25be57 TG |
6750 | static void sched_update_worker(struct task_struct *tsk) |
6751 | { | |
771b53d0 JA |
6752 | if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) { |
6753 | if (tsk->flags & PF_WQ_WORKER) | |
6754 | wq_worker_running(tsk); | |
6755 | else | |
6756 | io_wq_worker_running(tsk); | |
6757 | } | |
6d25be57 TG |
6758 | } |
6759 | ||
de1474b4 | 6760 | static __always_inline void __schedule_loop(unsigned int sched_mode) |
c259e01a | 6761 | { |
bfd9b2b5 | 6762 | do { |
b30f0e3f | 6763 | preempt_disable(); |
de1474b4 | 6764 | __schedule(sched_mode); |
b30f0e3f | 6765 | sched_preempt_enable_no_resched(); |
bfd9b2b5 | 6766 | } while (need_resched()); |
de1474b4 TG |
6767 | } |
6768 | ||
6769 | asmlinkage __visible void __sched schedule(void) | |
6770 | { | |
6771 | struct task_struct *tsk = current; | |
6772 | ||
6b596e62 PZ |
6773 | #ifdef CONFIG_RT_MUTEXES |
6774 | lockdep_assert(!tsk->sched_rt_mutex); | |
6775 | #endif | |
6776 | ||
6777 | if (!task_is_running(tsk)) | |
6778 | sched_submit_work(tsk); | |
de1474b4 | 6779 | __schedule_loop(SM_NONE); |
6d25be57 | 6780 | sched_update_worker(tsk); |
c259e01a | 6781 | } |
1da177e4 LT |
6782 | EXPORT_SYMBOL(schedule); |
6783 | ||
8663effb SRV |
6784 | /* |
6785 | * synchronize_rcu_tasks() makes sure that no task is stuck in preempted | |
6786 | * state (have scheduled out non-voluntarily) by making sure that all | |
6787 | * tasks have either left the run queue or have gone into user space. | |
6788 | * As idle tasks do not do either, they must not ever be preempted | |
6789 | * (schedule out non-voluntarily). | |
6790 | * | |
6791 | * schedule_idle() is similar to schedule_preempt_disable() except that it | |
6792 | * never enables preemption because it does not call sched_submit_work(). | |
6793 | */ | |
6794 | void __sched schedule_idle(void) | |
6795 | { | |
6796 | /* | |
6797 | * As this skips calling sched_submit_work(), which the idle task does | |
6798 | * regardless because that function is a nop when the task is in a | |
6799 | * TASK_RUNNING state, make sure this isn't used someplace that the | |
6800 | * current task can be in any other state. Note, idle is always in the | |
6801 | * TASK_RUNNING state. | |
6802 | */ | |
2f064a59 | 6803 | WARN_ON_ONCE(current->__state); |
8663effb | 6804 | do { |
b4bfa3fc | 6805 | __schedule(SM_NONE); |
8663effb SRV |
6806 | } while (need_resched()); |
6807 | } | |
6808 | ||
24a9c541 | 6809 | #if defined(CONFIG_CONTEXT_TRACKING_USER) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) |
722a9f92 | 6810 | asmlinkage __visible void __sched schedule_user(void) |
20ab65e3 FW |
6811 | { |
6812 | /* | |
6813 | * If we come here after a random call to set_need_resched(), | |
6814 | * or we have been woken up remotely but the IPI has not yet arrived, | |
6815 | * we haven't yet exited the RCU idle mode. Do it here manually until | |
6816 | * we find a better solution. | |
7cc78f8f AL |
6817 | * |
6818 | * NB: There are buggy callers of this function. Ideally we | |
c467ea76 | 6819 | * should warn if prev_state != CONTEXT_USER, but that will trigger |
7cc78f8f | 6820 | * too frequently to make sense yet. |
20ab65e3 | 6821 | */ |
7cc78f8f | 6822 | enum ctx_state prev_state = exception_enter(); |
20ab65e3 | 6823 | schedule(); |
7cc78f8f | 6824 | exception_exit(prev_state); |
20ab65e3 FW |
6825 | } |
6826 | #endif | |
6827 | ||
c5491ea7 TG |
6828 | /** |
6829 | * schedule_preempt_disabled - called with preemption disabled | |
6830 | * | |
6831 | * Returns with preemption disabled. Note: preempt_count must be 1 | |
6832 | */ | |
6833 | void __sched schedule_preempt_disabled(void) | |
6834 | { | |
ba74c144 | 6835 | sched_preempt_enable_no_resched(); |
c5491ea7 TG |
6836 | schedule(); |
6837 | preempt_disable(); | |
6838 | } | |
6839 | ||
6991436c TG |
6840 | #ifdef CONFIG_PREEMPT_RT |
6841 | void __sched notrace schedule_rtlock(void) | |
6842 | { | |
de1474b4 | 6843 | __schedule_loop(SM_RTLOCK_WAIT); |
6991436c TG |
6844 | } |
6845 | NOKPROBE_SYMBOL(schedule_rtlock); | |
6846 | #endif | |
6847 | ||
06b1f808 | 6848 | static void __sched notrace preempt_schedule_common(void) |
a18b5d01 FW |
6849 | { |
6850 | do { | |
47252cfb SR |
6851 | /* |
6852 | * Because the function tracer can trace preempt_count_sub() | |
6853 | * and it also uses preempt_enable/disable_notrace(), if | |
6854 | * NEED_RESCHED is set, the preempt_enable_notrace() called | |
6855 | * by the function tracer will call this function again and | |
6856 | * cause infinite recursion. | |
6857 | * | |
6858 | * Preemption must be disabled here before the function | |
6859 | * tracer can trace. Break up preempt_disable() into two | |
6860 | * calls. One to disable preemption without fear of being | |
6861 | * traced. The other to still record the preemption latency, | |
6862 | * which can also be traced by the function tracer. | |
6863 | */ | |
499d7955 | 6864 | preempt_disable_notrace(); |
47252cfb | 6865 | preempt_latency_start(1); |
b4bfa3fc | 6866 | __schedule(SM_PREEMPT); |
47252cfb | 6867 | preempt_latency_stop(1); |
499d7955 | 6868 | preempt_enable_no_resched_notrace(); |
a18b5d01 FW |
6869 | |
6870 | /* | |
6871 | * Check again in case we missed a preemption opportunity | |
6872 | * between schedule and now. | |
6873 | */ | |
a18b5d01 FW |
6874 | } while (need_resched()); |
6875 | } | |
6876 | ||
c1a280b6 | 6877 | #ifdef CONFIG_PREEMPTION |
1da177e4 | 6878 | /* |
a49b4f40 VS |
6879 | * This is the entry point to schedule() from in-kernel preemption |
6880 | * off of preempt_enable. | |
1da177e4 | 6881 | */ |
722a9f92 | 6882 | asmlinkage __visible void __sched notrace preempt_schedule(void) |
1da177e4 | 6883 | { |
1da177e4 LT |
6884 | /* |
6885 | * If there is a non-zero preempt_count or interrupts are disabled, | |
41a2d6cf | 6886 | * we do not want to preempt the current task. Just return.. |
1da177e4 | 6887 | */ |
fbb00b56 | 6888 | if (likely(!preemptible())) |
1da177e4 | 6889 | return; |
a18b5d01 | 6890 | preempt_schedule_common(); |
1da177e4 | 6891 | } |
376e2424 | 6892 | NOKPROBE_SYMBOL(preempt_schedule); |
1da177e4 | 6893 | EXPORT_SYMBOL(preempt_schedule); |
009f60e2 | 6894 | |
2c9a98d3 | 6895 | #ifdef CONFIG_PREEMPT_DYNAMIC |
99cf983c | 6896 | #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) |
8a69fe0b MR |
6897 | #ifndef preempt_schedule_dynamic_enabled |
6898 | #define preempt_schedule_dynamic_enabled preempt_schedule | |
6899 | #define preempt_schedule_dynamic_disabled NULL | |
6900 | #endif | |
6901 | DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled); | |
ef72661e | 6902 | EXPORT_STATIC_CALL_TRAMP(preempt_schedule); |
99cf983c MR |
6903 | #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) |
6904 | static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule); | |
6905 | void __sched notrace dynamic_preempt_schedule(void) | |
6906 | { | |
6907 | if (!static_branch_unlikely(&sk_dynamic_preempt_schedule)) | |
6908 | return; | |
6909 | preempt_schedule(); | |
6910 | } | |
6911 | NOKPROBE_SYMBOL(dynamic_preempt_schedule); | |
6912 | EXPORT_SYMBOL(dynamic_preempt_schedule); | |
6913 | #endif | |
2c9a98d3 | 6914 | #endif |
2c9a98d3 | 6915 | |
009f60e2 | 6916 | /** |
4eaca0a8 | 6917 | * preempt_schedule_notrace - preempt_schedule called by tracing |
009f60e2 ON |
6918 | * |
6919 | * The tracing infrastructure uses preempt_enable_notrace to prevent | |
6920 | * recursion and tracing preempt enabling caused by the tracing | |
6921 | * infrastructure itself. But as tracing can happen in areas coming | |
6922 | * from userspace or just about to enter userspace, a preempt enable | |
6923 | * can occur before user_exit() is called. This will cause the scheduler | |
6924 | * to be called when the system is still in usermode. | |
6925 | * | |
6926 | * To prevent this, the preempt_enable_notrace will use this function | |
6927 | * instead of preempt_schedule() to exit user context if needed before | |
6928 | * calling the scheduler. | |
6929 | */ | |
4eaca0a8 | 6930 | asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) |
009f60e2 ON |
6931 | { |
6932 | enum ctx_state prev_ctx; | |
6933 | ||
6934 | if (likely(!preemptible())) | |
6935 | return; | |
6936 | ||
6937 | do { | |
47252cfb SR |
6938 | /* |
6939 | * Because the function tracer can trace preempt_count_sub() | |
6940 | * and it also uses preempt_enable/disable_notrace(), if | |
6941 | * NEED_RESCHED is set, the preempt_enable_notrace() called | |
6942 | * by the function tracer will call this function again and | |
6943 | * cause infinite recursion. | |
6944 | * | |
6945 | * Preemption must be disabled here before the function | |
6946 | * tracer can trace. Break up preempt_disable() into two | |
6947 | * calls. One to disable preemption without fear of being | |
6948 | * traced. The other to still record the preemption latency, | |
6949 | * which can also be traced by the function tracer. | |
6950 | */ | |
3d8f74dd | 6951 | preempt_disable_notrace(); |
47252cfb | 6952 | preempt_latency_start(1); |
009f60e2 ON |
6953 | /* |
6954 | * Needs preempt disabled in case user_exit() is traced | |
6955 | * and the tracer calls preempt_enable_notrace() causing | |
6956 | * an infinite recursion. | |
6957 | */ | |
6958 | prev_ctx = exception_enter(); | |
b4bfa3fc | 6959 | __schedule(SM_PREEMPT); |
009f60e2 ON |
6960 | exception_exit(prev_ctx); |
6961 | ||
47252cfb | 6962 | preempt_latency_stop(1); |
3d8f74dd | 6963 | preempt_enable_no_resched_notrace(); |
009f60e2 ON |
6964 | } while (need_resched()); |
6965 | } | |
4eaca0a8 | 6966 | EXPORT_SYMBOL_GPL(preempt_schedule_notrace); |
009f60e2 | 6967 | |
2c9a98d3 | 6968 | #ifdef CONFIG_PREEMPT_DYNAMIC |
99cf983c | 6969 | #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) |
8a69fe0b MR |
6970 | #ifndef preempt_schedule_notrace_dynamic_enabled |
6971 | #define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notrace | |
6972 | #define preempt_schedule_notrace_dynamic_disabled NULL | |
2c9a98d3 | 6973 | #endif |
8a69fe0b | 6974 | DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled); |
ef72661e | 6975 | EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace); |
99cf983c MR |
6976 | #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) |
6977 | static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace); | |
6978 | void __sched notrace dynamic_preempt_schedule_notrace(void) | |
c597bfdd | 6979 | { |
99cf983c MR |
6980 | if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace)) |
6981 | return; | |
6982 | preempt_schedule_notrace(); | |
c597bfdd | 6983 | } |
99cf983c MR |
6984 | NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace); |
6985 | EXPORT_SYMBOL(dynamic_preempt_schedule_notrace); | |
6986 | #endif | |
2c9a98d3 | 6987 | #endif |
c597bfdd | 6988 | |
c1a280b6 | 6989 | #endif /* CONFIG_PREEMPTION */ |
826bfeb3 | 6990 | |
1da177e4 | 6991 | /* |
a49b4f40 | 6992 | * This is the entry point to schedule() from kernel preemption |
1da177e4 LT |
6993 | * off of irq context. |
6994 | * Note, that this is called and return with irqs disabled. This will | |
6995 | * protect us against recursive calling from irq. | |
6996 | */ | |
722a9f92 | 6997 | asmlinkage __visible void __sched preempt_schedule_irq(void) |
1da177e4 | 6998 | { |
b22366cd | 6999 | enum ctx_state prev_state; |
6478d880 | 7000 | |
2ed6e34f | 7001 | /* Catch callers which need to be fixed */ |
f27dde8d | 7002 | BUG_ON(preempt_count() || !irqs_disabled()); |
1da177e4 | 7003 | |
b22366cd FW |
7004 | prev_state = exception_enter(); |
7005 | ||
3a5c359a | 7006 | do { |
3d8f74dd | 7007 | preempt_disable(); |
3a5c359a | 7008 | local_irq_enable(); |
b4bfa3fc | 7009 | __schedule(SM_PREEMPT); |
3a5c359a | 7010 | local_irq_disable(); |
3d8f74dd | 7011 | sched_preempt_enable_no_resched(); |
5ed0cec0 | 7012 | } while (need_resched()); |
b22366cd FW |
7013 | |
7014 | exception_exit(prev_state); | |
1da177e4 LT |
7015 | } |
7016 | ||
ac6424b9 | 7017 | int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags, |
95cdf3b7 | 7018 | void *key) |
1da177e4 | 7019 | { |
6f63904c | 7020 | WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~(WF_SYNC|WF_CURRENT_CPU)); |
63859d4f | 7021 | return try_to_wake_up(curr->private, mode, wake_flags); |
1da177e4 | 7022 | } |
1da177e4 LT |
7023 | EXPORT_SYMBOL(default_wake_function); |
7024 | ||
f558c2b8 PZ |
7025 | static void __setscheduler_prio(struct task_struct *p, int prio) |
7026 | { | |
7027 | if (dl_prio(prio)) | |
7028 | p->sched_class = &dl_sched_class; | |
7029 | else if (rt_prio(prio)) | |
7030 | p->sched_class = &rt_sched_class; | |
7031 | else | |
7032 | p->sched_class = &fair_sched_class; | |
7033 | ||
7034 | p->prio = prio; | |
7035 | } | |
7036 | ||
b29739f9 IM |
7037 | #ifdef CONFIG_RT_MUTEXES |
7038 | ||
6b596e62 PZ |
7039 | /* |
7040 | * Would be more useful with typeof()/auto_type but they don't mix with | |
7041 | * bit-fields. Since it's a local thing, use int. Keep the generic sounding | |
7042 | * name such that if someone were to implement this function we get to compare | |
7043 | * notes. | |
7044 | */ | |
7045 | #define fetch_and_set(x, v) ({ int _x = (x); (x) = (v); _x; }) | |
7046 | ||
7047 | void rt_mutex_pre_schedule(void) | |
7048 | { | |
7049 | lockdep_assert(!fetch_and_set(current->sched_rt_mutex, 1)); | |
7050 | sched_submit_work(current); | |
7051 | } | |
7052 | ||
7053 | void rt_mutex_schedule(void) | |
7054 | { | |
7055 | lockdep_assert(current->sched_rt_mutex); | |
7056 | __schedule_loop(SM_NONE); | |
7057 | } | |
7058 | ||
7059 | void rt_mutex_post_schedule(void) | |
7060 | { | |
7061 | sched_update_worker(current); | |
7062 | lockdep_assert(fetch_and_set(current->sched_rt_mutex, 0)); | |
7063 | } | |
7064 | ||
acd58620 PZ |
7065 | static inline int __rt_effective_prio(struct task_struct *pi_task, int prio) |
7066 | { | |
7067 | if (pi_task) | |
7068 | prio = min(prio, pi_task->prio); | |
7069 | ||
7070 | return prio; | |
7071 | } | |
7072 | ||
7073 | static inline int rt_effective_prio(struct task_struct *p, int prio) | |
7074 | { | |
7075 | struct task_struct *pi_task = rt_mutex_get_top_task(p); | |
7076 | ||
7077 | return __rt_effective_prio(pi_task, prio); | |
7078 | } | |
7079 | ||
b29739f9 IM |
7080 | /* |
7081 | * rt_mutex_setprio - set the current priority of a task | |
acd58620 PZ |
7082 | * @p: task to boost |
7083 | * @pi_task: donor task | |
b29739f9 IM |
7084 | * |
7085 | * This function changes the 'effective' priority of a task. It does | |
7086 | * not touch ->normal_prio like __setscheduler(). | |
7087 | * | |
c365c292 TG |
7088 | * Used by the rt_mutex code to implement priority inheritance |
7089 | * logic. Call site only calls if the priority of the task changed. | |
b29739f9 | 7090 | */ |
acd58620 | 7091 | void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) |
b29739f9 | 7092 | { |
acd58620 | 7093 | int prio, oldprio, queued, running, queue_flag = |
7a57f32a | 7094 | DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; |
83ab0aa0 | 7095 | const struct sched_class *prev_class; |
eb580751 PZ |
7096 | struct rq_flags rf; |
7097 | struct rq *rq; | |
b29739f9 | 7098 | |
acd58620 PZ |
7099 | /* XXX used to be waiter->prio, not waiter->task->prio */ |
7100 | prio = __rt_effective_prio(pi_task, p->normal_prio); | |
7101 | ||
7102 | /* | |
7103 | * If nothing changed; bail early. | |
7104 | */ | |
7105 | if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio)) | |
7106 | return; | |
b29739f9 | 7107 | |
eb580751 | 7108 | rq = __task_rq_lock(p, &rf); |
80f5c1b8 | 7109 | update_rq_clock(rq); |
acd58620 PZ |
7110 | /* |
7111 | * Set under pi_lock && rq->lock, such that the value can be used under | |
7112 | * either lock. | |
7113 | * | |
7114 | * Note that there is loads of tricky to make this pointer cache work | |
7115 | * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to | |
7116 | * ensure a task is de-boosted (pi_task is set to NULL) before the | |
7117 | * task is allowed to run again (and can exit). This ensures the pointer | |
b19a888c | 7118 | * points to a blocked task -- which guarantees the task is present. |
acd58620 PZ |
7119 | */ |
7120 | p->pi_top_task = pi_task; | |
7121 | ||
7122 | /* | |
7123 | * For FIFO/RR we only need to set prio, if that matches we're done. | |
7124 | */ | |
7125 | if (prio == p->prio && !dl_prio(prio)) | |
7126 | goto out_unlock; | |
b29739f9 | 7127 | |
1c4dd99b TG |
7128 | /* |
7129 | * Idle task boosting is a nono in general. There is one | |
7130 | * exception, when PREEMPT_RT and NOHZ is active: | |
7131 | * | |
7132 | * The idle task calls get_next_timer_interrupt() and holds | |
7133 | * the timer wheel base->lock on the CPU and another CPU wants | |
7134 | * to access the timer (probably to cancel it). We can safely | |
7135 | * ignore the boosting request, as the idle CPU runs this code | |
7136 | * with interrupts disabled and will complete the lock | |
7137 | * protected section without being interrupted. So there is no | |
7138 | * real need to boost. | |
7139 | */ | |
7140 | if (unlikely(p == rq->idle)) { | |
7141 | WARN_ON(p != rq->curr); | |
7142 | WARN_ON(p->pi_blocked_on); | |
7143 | goto out_unlock; | |
7144 | } | |
7145 | ||
b91473ff | 7146 | trace_sched_pi_setprio(p, pi_task); |
d5f9f942 | 7147 | oldprio = p->prio; |
ff77e468 PZ |
7148 | |
7149 | if (oldprio == prio) | |
7150 | queue_flag &= ~DEQUEUE_MOVE; | |
7151 | ||
83ab0aa0 | 7152 | prev_class = p->sched_class; |
da0c1e65 | 7153 | queued = task_on_rq_queued(p); |
051a1d1a | 7154 | running = task_current(rq, p); |
da0c1e65 | 7155 | if (queued) |
ff77e468 | 7156 | dequeue_task(rq, p, queue_flag); |
0e1f3483 | 7157 | if (running) |
f3cd1c4e | 7158 | put_prev_task(rq, p); |
dd41f596 | 7159 | |
2d3d891d DF |
7160 | /* |
7161 | * Boosting condition are: | |
7162 | * 1. -rt task is running and holds mutex A | |
7163 | * --> -dl task blocks on mutex A | |
7164 | * | |
7165 | * 2. -dl task is running and holds mutex A | |
7166 | * --> -dl task blocks on mutex A and could preempt the | |
7167 | * running task | |
7168 | */ | |
7169 | if (dl_prio(prio)) { | |
466af29b | 7170 | if (!dl_prio(p->normal_prio) || |
740797ce JL |
7171 | (pi_task && dl_prio(pi_task->prio) && |
7172 | dl_entity_preempt(&pi_task->dl, &p->dl))) { | |
2279f540 | 7173 | p->dl.pi_se = pi_task->dl.pi_se; |
ff77e468 | 7174 | queue_flag |= ENQUEUE_REPLENISH; |
2279f540 JL |
7175 | } else { |
7176 | p->dl.pi_se = &p->dl; | |
7177 | } | |
2d3d891d DF |
7178 | } else if (rt_prio(prio)) { |
7179 | if (dl_prio(oldprio)) | |
2279f540 | 7180 | p->dl.pi_se = &p->dl; |
2d3d891d | 7181 | if (oldprio < prio) |
ff77e468 | 7182 | queue_flag |= ENQUEUE_HEAD; |
2d3d891d DF |
7183 | } else { |
7184 | if (dl_prio(oldprio)) | |
2279f540 | 7185 | p->dl.pi_se = &p->dl; |
746db944 BS |
7186 | if (rt_prio(oldprio)) |
7187 | p->rt.timeout = 0; | |
2d3d891d | 7188 | } |
dd41f596 | 7189 | |
f558c2b8 | 7190 | __setscheduler_prio(p, prio); |
b29739f9 | 7191 | |
da0c1e65 | 7192 | if (queued) |
ff77e468 | 7193 | enqueue_task(rq, p, queue_flag); |
a399d233 | 7194 | if (running) |
03b7fad1 | 7195 | set_next_task(rq, p); |
cb469845 | 7196 | |
da7a735e | 7197 | check_class_changed(rq, p, prev_class, oldprio); |
1c4dd99b | 7198 | out_unlock: |
d1ccc66d IM |
7199 | /* Avoid rq from going away on us: */ |
7200 | preempt_disable(); | |
4c9a4bc8 | 7201 | |
565790d2 PZ |
7202 | rq_unpin_lock(rq, &rf); |
7203 | __balance_callbacks(rq); | |
5cb9eaa3 | 7204 | raw_spin_rq_unlock(rq); |
565790d2 | 7205 | |
4c9a4bc8 | 7206 | preempt_enable(); |
b29739f9 | 7207 | } |
acd58620 PZ |
7208 | #else |
7209 | static inline int rt_effective_prio(struct task_struct *p, int prio) | |
7210 | { | |
7211 | return prio; | |
7212 | } | |
b29739f9 | 7213 | #endif |
d50dde5a | 7214 | |
36c8b586 | 7215 | void set_user_nice(struct task_struct *p, long nice) |
1da177e4 | 7216 | { |
49bd21ef | 7217 | bool queued, running; |
70b97a7f | 7218 | struct rq *rq; |
94b548a1 | 7219 | int old_prio; |
1da177e4 | 7220 | |
75e45d51 | 7221 | if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) |
1da177e4 LT |
7222 | return; |
7223 | /* | |
7224 | * We have to be careful, if called from sys_setpriority(), | |
7225 | * the task might be in the middle of scheduling on another CPU. | |
7226 | */ | |
94b548a1 PZ |
7227 | CLASS(task_rq_lock, rq_guard)(p); |
7228 | rq = rq_guard.rq; | |
7229 | ||
2fb8d367 PZ |
7230 | update_rq_clock(rq); |
7231 | ||
1da177e4 LT |
7232 | /* |
7233 | * The RT priorities are set via sched_setscheduler(), but we still | |
7234 | * allow the 'normal' nice value to be set - but as expected | |
b19a888c | 7235 | * it won't have any effect on scheduling until the task is |
aab03e05 | 7236 | * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR: |
1da177e4 | 7237 | */ |
aab03e05 | 7238 | if (task_has_dl_policy(p) || task_has_rt_policy(p)) { |
1da177e4 | 7239 | p->static_prio = NICE_TO_PRIO(nice); |
94b548a1 | 7240 | return; |
1da177e4 | 7241 | } |
94b548a1 | 7242 | |
da0c1e65 | 7243 | queued = task_on_rq_queued(p); |
49bd21ef | 7244 | running = task_current(rq, p); |
da0c1e65 | 7245 | if (queued) |
7a57f32a | 7246 | dequeue_task(rq, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK); |
49bd21ef PZ |
7247 | if (running) |
7248 | put_prev_task(rq, p); | |
1da177e4 | 7249 | |
1da177e4 | 7250 | p->static_prio = NICE_TO_PRIO(nice); |
b1e82065 | 7251 | set_load_weight(p, true); |
b29739f9 IM |
7252 | old_prio = p->prio; |
7253 | p->prio = effective_prio(p); | |
1da177e4 | 7254 | |
5443a0be | 7255 | if (queued) |
7134b3e9 | 7256 | enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); |
49bd21ef | 7257 | if (running) |
03b7fad1 | 7258 | set_next_task(rq, p); |
5443a0be FW |
7259 | |
7260 | /* | |
7261 | * If the task increased its priority or is running and | |
7262 | * lowered its priority, then reschedule its CPU: | |
7263 | */ | |
7264 | p->sched_class->prio_changed(rq, p, old_prio); | |
1da177e4 | 7265 | } |
1da177e4 LT |
7266 | EXPORT_SYMBOL(set_user_nice); |
7267 | ||
e43379f1 | 7268 | /* |
700a7833 CG |
7269 | * is_nice_reduction - check if nice value is an actual reduction |
7270 | * | |
7271 | * Similar to can_nice() but does not perform a capability check. | |
7272 | * | |
e43379f1 MM |
7273 | * @p: task |
7274 | * @nice: nice value | |
7275 | */ | |
700a7833 | 7276 | static bool is_nice_reduction(const struct task_struct *p, const int nice) |
e43379f1 | 7277 | { |
d1ccc66d | 7278 | /* Convert nice value [19,-20] to rlimit style value [1,40]: */ |
7aa2c016 | 7279 | int nice_rlim = nice_to_rlimit(nice); |
48f24c4d | 7280 | |
700a7833 CG |
7281 | return (nice_rlim <= task_rlimit(p, RLIMIT_NICE)); |
7282 | } | |
7283 | ||
7284 | /* | |
7285 | * can_nice - check if a task can reduce its nice value | |
7286 | * @p: task | |
7287 | * @nice: nice value | |
7288 | */ | |
7289 | int can_nice(const struct task_struct *p, const int nice) | |
7290 | { | |
7291 | return is_nice_reduction(p, nice) || capable(CAP_SYS_NICE); | |
e43379f1 MM |
7292 | } |
7293 | ||
1da177e4 LT |
7294 | #ifdef __ARCH_WANT_SYS_NICE |
7295 | ||
7296 | /* | |
7297 | * sys_nice - change the priority of the current process. | |
7298 | * @increment: priority increment | |
7299 | * | |
7300 | * sys_setpriority is a more generic, but much slower function that | |
7301 | * does similar things. | |
7302 | */ | |
5add95d4 | 7303 | SYSCALL_DEFINE1(nice, int, increment) |
1da177e4 | 7304 | { |
48f24c4d | 7305 | long nice, retval; |
1da177e4 LT |
7306 | |
7307 | /* | |
7308 | * Setpriority might change our priority at the same moment. | |
7309 | * We don't have to worry. Conceptually one call occurs first | |
7310 | * and we have a single winner. | |
7311 | */ | |
a9467fa3 | 7312 | increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH); |
d0ea0268 | 7313 | nice = task_nice(current) + increment; |
1da177e4 | 7314 | |
a9467fa3 | 7315 | nice = clamp_val(nice, MIN_NICE, MAX_NICE); |
e43379f1 MM |
7316 | if (increment < 0 && !can_nice(current, nice)) |
7317 | return -EPERM; | |
7318 | ||
1da177e4 LT |
7319 | retval = security_task_setnice(current, nice); |
7320 | if (retval) | |
7321 | return retval; | |
7322 | ||
7323 | set_user_nice(current, nice); | |
7324 | return 0; | |
7325 | } | |
7326 | ||
7327 | #endif | |
7328 | ||
7329 | /** | |
7330 | * task_prio - return the priority value of a given task. | |
7331 | * @p: the task in question. | |
7332 | * | |
e69f6186 | 7333 | * Return: The priority value as seen by users in /proc. |
c541bb78 DE |
7334 | * |
7335 | * sched policy return value kernel prio user prio/nice | |
7336 | * | |
7337 | * normal, batch, idle [0 ... 39] [100 ... 139] 0/[-20 ... 19] | |
7338 | * fifo, rr [-2 ... -100] [98 ... 0] [1 ... 99] | |
7339 | * deadline -101 -1 0 | |
1da177e4 | 7340 | */ |
36c8b586 | 7341 | int task_prio(const struct task_struct *p) |
1da177e4 LT |
7342 | { |
7343 | return p->prio - MAX_RT_PRIO; | |
7344 | } | |
7345 | ||
1da177e4 | 7346 | /** |
d1ccc66d | 7347 | * idle_cpu - is a given CPU idle currently? |
1da177e4 | 7348 | * @cpu: the processor in question. |
e69f6186 YB |
7349 | * |
7350 | * Return: 1 if the CPU is currently idle. 0 otherwise. | |
1da177e4 LT |
7351 | */ |
7352 | int idle_cpu(int cpu) | |
7353 | { | |
908a3283 TG |
7354 | struct rq *rq = cpu_rq(cpu); |
7355 | ||
7356 | if (rq->curr != rq->idle) | |
7357 | return 0; | |
7358 | ||
7359 | if (rq->nr_running) | |
7360 | return 0; | |
7361 | ||
7362 | #ifdef CONFIG_SMP | |
126c2092 | 7363 | if (rq->ttwu_pending) |
908a3283 TG |
7364 | return 0; |
7365 | #endif | |
7366 | ||
7367 | return 1; | |
1da177e4 LT |
7368 | } |
7369 | ||
943d355d RJ |
7370 | /** |
7371 | * available_idle_cpu - is a given CPU idle for enqueuing work. | |
7372 | * @cpu: the CPU in question. | |
7373 | * | |
7374 | * Return: 1 if the CPU is currently idle. 0 otherwise. | |
7375 | */ | |
7376 | int available_idle_cpu(int cpu) | |
7377 | { | |
7378 | if (!idle_cpu(cpu)) | |
7379 | return 0; | |
7380 | ||
247f2f6f RJ |
7381 | if (vcpu_is_preempted(cpu)) |
7382 | return 0; | |
7383 | ||
908a3283 | 7384 | return 1; |
1da177e4 LT |
7385 | } |
7386 | ||
1da177e4 | 7387 | /** |
d1ccc66d | 7388 | * idle_task - return the idle task for a given CPU. |
1da177e4 | 7389 | * @cpu: the processor in question. |
e69f6186 | 7390 | * |
d1ccc66d | 7391 | * Return: The idle task for the CPU @cpu. |
1da177e4 | 7392 | */ |
36c8b586 | 7393 | struct task_struct *idle_task(int cpu) |
1da177e4 LT |
7394 | { |
7395 | return cpu_rq(cpu)->idle; | |
7396 | } | |
7397 | ||
548796e2 CZ |
7398 | #ifdef CONFIG_SCHED_CORE |
7399 | int sched_core_idle_cpu(int cpu) | |
7400 | { | |
7401 | struct rq *rq = cpu_rq(cpu); | |
7402 | ||
7403 | if (sched_core_enabled(rq) && rq->curr == rq->idle) | |
7404 | return 1; | |
7405 | ||
7406 | return idle_cpu(cpu); | |
7407 | } | |
7408 | ||
7409 | #endif | |
7410 | ||
7d6a905f VK |
7411 | #ifdef CONFIG_SMP |
7412 | /* | |
7413 | * This function computes an effective utilization for the given CPU, to be | |
7414 | * used for frequency selection given the linear relation: f = u * f_max. | |
7415 | * | |
7416 | * The scheduler tracks the following metrics: | |
7417 | * | |
7418 | * cpu_util_{cfs,rt,dl,irq}() | |
7419 | * cpu_bw_dl() | |
7420 | * | |
7421 | * Where the cfs,rt and dl util numbers are tracked with the same metric and | |
7422 | * synchronized windows and are thus directly comparable. | |
7423 | * | |
7424 | * The cfs,rt,dl utilization are the running times measured with rq->clock_task | |
7425 | * which excludes things like IRQ and steal-time. These latter are then accrued | |
7426 | * in the irq utilization. | |
7427 | * | |
7428 | * The DL bandwidth number otoh is not a measured metric but a value computed | |
7429 | * based on the task model parameters and gives the minimal utilization | |
7430 | * required to meet deadlines. | |
7431 | */ | |
a5418be9 | 7432 | unsigned long effective_cpu_util(int cpu, unsigned long util_cfs, |
bb447999 | 7433 | enum cpu_util_type type, |
7d6a905f VK |
7434 | struct task_struct *p) |
7435 | { | |
bb447999 | 7436 | unsigned long dl_util, util, irq, max; |
7d6a905f VK |
7437 | struct rq *rq = cpu_rq(cpu); |
7438 | ||
bb447999 DE |
7439 | max = arch_scale_cpu_capacity(cpu); |
7440 | ||
7d6a905f VK |
7441 | if (!uclamp_is_used() && |
7442 | type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) { | |
7443 | return max; | |
7444 | } | |
7445 | ||
7446 | /* | |
7447 | * Early check to see if IRQ/steal time saturates the CPU, can be | |
7448 | * because of inaccuracies in how we track these -- see | |
7449 | * update_irq_load_avg(). | |
7450 | */ | |
7451 | irq = cpu_util_irq(rq); | |
7452 | if (unlikely(irq >= max)) | |
7453 | return max; | |
7454 | ||
7455 | /* | |
7456 | * Because the time spend on RT/DL tasks is visible as 'lost' time to | |
7457 | * CFS tasks and we use the same metric to track the effective | |
7458 | * utilization (PELT windows are synchronized) we can directly add them | |
7459 | * to obtain the CPU's actual utilization. | |
7460 | * | |
7461 | * CFS and RT utilization can be boosted or capped, depending on | |
7462 | * utilization clamp constraints requested by currently RUNNABLE | |
7463 | * tasks. | |
7464 | * When there are no CFS RUNNABLE tasks, clamps are released and | |
7465 | * frequency will be gracefully reduced with the utilization decay. | |
7466 | */ | |
7467 | util = util_cfs + cpu_util_rt(rq); | |
7468 | if (type == FREQUENCY_UTIL) | |
7469 | util = uclamp_rq_util_with(rq, util, p); | |
7470 | ||
7471 | dl_util = cpu_util_dl(rq); | |
7472 | ||
7473 | /* | |
7474 | * For frequency selection we do not make cpu_util_dl() a permanent part | |
7475 | * of this sum because we want to use cpu_bw_dl() later on, but we need | |
7476 | * to check if the CFS+RT+DL sum is saturated (ie. no idle time) such | |
7477 | * that we select f_max when there is no idle time. | |
7478 | * | |
7479 | * NOTE: numerical errors or stop class might cause us to not quite hit | |
7480 | * saturation when we should -- something for later. | |
7481 | */ | |
7482 | if (util + dl_util >= max) | |
7483 | return max; | |
7484 | ||
7485 | /* | |
7486 | * OTOH, for energy computation we need the estimated running time, so | |
7487 | * include util_dl and ignore dl_bw. | |
7488 | */ | |
7489 | if (type == ENERGY_UTIL) | |
7490 | util += dl_util; | |
7491 | ||
7492 | /* | |
7493 | * There is still idle time; further improve the number by using the | |
7494 | * irq metric. Because IRQ/steal time is hidden from the task clock we | |
7495 | * need to scale the task numbers: | |
7496 | * | |
7497 | * max - irq | |
7498 | * U' = irq + --------- * U | |
7499 | * max | |
7500 | */ | |
7501 | util = scale_irq_capacity(util, irq, max); | |
7502 | util += irq; | |
7503 | ||
7504 | /* | |
7505 | * Bandwidth required by DEADLINE must always be granted while, for | |
7506 | * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism | |
7507 | * to gracefully reduce the frequency when no tasks show up for longer | |
7508 | * periods of time. | |
7509 | * | |
7510 | * Ideally we would like to set bw_dl as min/guaranteed freq and util + | |
7511 | * bw_dl as requested freq. However, cpufreq is not yet ready for such | |
7512 | * an interface. So, we only do the latter for now. | |
7513 | */ | |
7514 | if (type == FREQUENCY_UTIL) | |
7515 | util += cpu_bw_dl(rq); | |
7516 | ||
7517 | return min(max, util); | |
7518 | } | |
a5418be9 | 7519 | |
bb447999 | 7520 | unsigned long sched_cpu_util(int cpu) |
a5418be9 | 7521 | { |
bb447999 | 7522 | return effective_cpu_util(cpu, cpu_util_cfs(cpu), ENERGY_UTIL, NULL); |
a5418be9 | 7523 | } |
7d6a905f VK |
7524 | #endif /* CONFIG_SMP */ |
7525 | ||
1da177e4 LT |
7526 | /** |
7527 | * find_process_by_pid - find a process with a matching PID value. | |
7528 | * @pid: the pid in question. | |
e69f6186 YB |
7529 | * |
7530 | * The task of @pid, if found. %NULL otherwise. | |
1da177e4 | 7531 | */ |
a9957449 | 7532 | static struct task_struct *find_process_by_pid(pid_t pid) |
1da177e4 | 7533 | { |
228ebcbe | 7534 | return pid ? find_task_by_vpid(pid) : current; |
1da177e4 LT |
7535 | } |
7536 | ||
febe162d PZ |
7537 | static struct task_struct *find_get_task(pid_t pid) |
7538 | { | |
7539 | struct task_struct *p; | |
7540 | guard(rcu)(); | |
7541 | ||
7542 | p = find_process_by_pid(pid); | |
7543 | if (likely(p)) | |
7544 | get_task_struct(p); | |
7545 | ||
7546 | return p; | |
7547 | } | |
7548 | ||
7549 | DEFINE_CLASS(find_get_task, struct task_struct *, if (_T) put_task_struct(_T), | |
7550 | find_get_task(pid), pid_t pid) | |
7551 | ||
c13db6b1 SR |
7552 | /* |
7553 | * sched_setparam() passes in -1 for its policy, to let the functions | |
7554 | * it calls know not to change it. | |
7555 | */ | |
7556 | #define SETPARAM_POLICY -1 | |
7557 | ||
c365c292 TG |
7558 | static void __setscheduler_params(struct task_struct *p, |
7559 | const struct sched_attr *attr) | |
1da177e4 | 7560 | { |
d50dde5a DF |
7561 | int policy = attr->sched_policy; |
7562 | ||
c13db6b1 | 7563 | if (policy == SETPARAM_POLICY) |
39fd8fd2 PZ |
7564 | policy = p->policy; |
7565 | ||
1da177e4 | 7566 | p->policy = policy; |
d50dde5a | 7567 | |
aab03e05 DF |
7568 | if (dl_policy(policy)) |
7569 | __setparam_dl(p, attr); | |
39fd8fd2 | 7570 | else if (fair_policy(policy)) |
d50dde5a DF |
7571 | p->static_prio = NICE_TO_PRIO(attr->sched_nice); |
7572 | ||
39fd8fd2 PZ |
7573 | /* |
7574 | * __sched_setscheduler() ensures attr->sched_priority == 0 when | |
7575 | * !rt_policy. Always setting this ensures that things like | |
7576 | * getparam()/getattr() don't report silly values for !rt tasks. | |
7577 | */ | |
7578 | p->rt_priority = attr->sched_priority; | |
383afd09 | 7579 | p->normal_prio = normal_prio(p); |
b1e82065 | 7580 | set_load_weight(p, true); |
c365c292 | 7581 | } |
39fd8fd2 | 7582 | |
c69e8d9c | 7583 | /* |
d1ccc66d | 7584 | * Check the target process has a UID that matches the current process's: |
c69e8d9c DH |
7585 | */ |
7586 | static bool check_same_owner(struct task_struct *p) | |
7587 | { | |
7588 | const struct cred *cred = current_cred(), *pcred; | |
febe162d | 7589 | guard(rcu)(); |
c69e8d9c | 7590 | |
c69e8d9c | 7591 | pcred = __task_cred(p); |
febe162d PZ |
7592 | return (uid_eq(cred->euid, pcred->euid) || |
7593 | uid_eq(cred->euid, pcred->uid)); | |
c69e8d9c DH |
7594 | } |
7595 | ||
700a7833 CG |
7596 | /* |
7597 | * Allow unprivileged RT tasks to decrease priority. | |
7598 | * Only issue a capable test if needed and only once to avoid an audit | |
7599 | * event on permitted non-privileged operations: | |
7600 | */ | |
7601 | static int user_check_sched_setscheduler(struct task_struct *p, | |
7602 | const struct sched_attr *attr, | |
7603 | int policy, int reset_on_fork) | |
7604 | { | |
7605 | if (fair_policy(policy)) { | |
7606 | if (attr->sched_nice < task_nice(p) && | |
7607 | !is_nice_reduction(p, attr->sched_nice)) | |
7608 | goto req_priv; | |
7609 | } | |
7610 | ||
7611 | if (rt_policy(policy)) { | |
7612 | unsigned long rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO); | |
7613 | ||
7614 | /* Can't set/change the rt policy: */ | |
7615 | if (policy != p->policy && !rlim_rtprio) | |
7616 | goto req_priv; | |
7617 | ||
7618 | /* Can't increase priority: */ | |
7619 | if (attr->sched_priority > p->rt_priority && | |
7620 | attr->sched_priority > rlim_rtprio) | |
7621 | goto req_priv; | |
7622 | } | |
7623 | ||
7624 | /* | |
7625 | * Can't set/change SCHED_DEADLINE policy at all for now | |
7626 | * (safest behavior); in the future we would like to allow | |
7627 | * unprivileged DL tasks to increase their relative deadline | |
7628 | * or reduce their runtime (both ways reducing utilization) | |
7629 | */ | |
7630 | if (dl_policy(policy)) | |
7631 | goto req_priv; | |
7632 | ||
7633 | /* | |
7634 | * Treat SCHED_IDLE as nice 20. Only allow a switch to | |
7635 | * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. | |
7636 | */ | |
7637 | if (task_has_idle_policy(p) && !idle_policy(policy)) { | |
7638 | if (!is_nice_reduction(p, task_nice(p))) | |
7639 | goto req_priv; | |
7640 | } | |
7641 | ||
7642 | /* Can't change other user's priorities: */ | |
7643 | if (!check_same_owner(p)) | |
7644 | goto req_priv; | |
7645 | ||
7646 | /* Normal users shall not reset the sched_reset_on_fork flag: */ | |
7647 | if (p->sched_reset_on_fork && !reset_on_fork) | |
7648 | goto req_priv; | |
7649 | ||
7650 | return 0; | |
7651 | ||
7652 | req_priv: | |
7653 | if (!capable(CAP_SYS_NICE)) | |
7654 | return -EPERM; | |
7655 | ||
7656 | return 0; | |
7657 | } | |
7658 | ||
d50dde5a DF |
7659 | static int __sched_setscheduler(struct task_struct *p, |
7660 | const struct sched_attr *attr, | |
dbc7f069 | 7661 | bool user, bool pi) |
1da177e4 | 7662 | { |
f558c2b8 PZ |
7663 | int oldpolicy = -1, policy = attr->sched_policy; |
7664 | int retval, oldprio, newprio, queued, running; | |
83ab0aa0 | 7665 | const struct sched_class *prev_class; |
8e5bad7d | 7666 | struct balance_callback *head; |
eb580751 | 7667 | struct rq_flags rf; |
ca94c442 | 7668 | int reset_on_fork; |
7a57f32a | 7669 | int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; |
eb580751 | 7670 | struct rq *rq; |
111cd11b | 7671 | bool cpuset_locked = false; |
1da177e4 | 7672 | |
896bbb25 SRV |
7673 | /* The pi code expects interrupts enabled */ |
7674 | BUG_ON(pi && in_interrupt()); | |
1da177e4 | 7675 | recheck: |
d1ccc66d | 7676 | /* Double check policy once rq lock held: */ |
ca94c442 LP |
7677 | if (policy < 0) { |
7678 | reset_on_fork = p->sched_reset_on_fork; | |
1da177e4 | 7679 | policy = oldpolicy = p->policy; |
ca94c442 | 7680 | } else { |
7479f3c9 | 7681 | reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK); |
ca94c442 | 7682 | |
20f9cd2a | 7683 | if (!valid_policy(policy)) |
ca94c442 LP |
7684 | return -EINVAL; |
7685 | } | |
7686 | ||
794a56eb | 7687 | if (attr->sched_flags & ~(SCHED_FLAG_ALL | SCHED_FLAG_SUGOV)) |
7479f3c9 PZ |
7688 | return -EINVAL; |
7689 | ||
1da177e4 LT |
7690 | /* |
7691 | * Valid priorities for SCHED_FIFO and SCHED_RR are | |
ae18ad28 | 7692 | * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL, |
dd41f596 | 7693 | * SCHED_BATCH and SCHED_IDLE is 0. |
1da177e4 | 7694 | */ |
ae18ad28 | 7695 | if (attr->sched_priority > MAX_RT_PRIO-1) |
1da177e4 | 7696 | return -EINVAL; |
aab03e05 DF |
7697 | if ((dl_policy(policy) && !__checkparam_dl(attr)) || |
7698 | (rt_policy(policy) != (attr->sched_priority != 0))) | |
1da177e4 LT |
7699 | return -EINVAL; |
7700 | ||
725aad24 | 7701 | if (user) { |
700a7833 CG |
7702 | retval = user_check_sched_setscheduler(p, attr, policy, reset_on_fork); |
7703 | if (retval) | |
7704 | return retval; | |
7705 | ||
794a56eb JL |
7706 | if (attr->sched_flags & SCHED_FLAG_SUGOV) |
7707 | return -EINVAL; | |
7708 | ||
b0ae1981 | 7709 | retval = security_task_setscheduler(p); |
725aad24 JF |
7710 | if (retval) |
7711 | return retval; | |
7712 | } | |
7713 | ||
a509a7cd PB |
7714 | /* Update task specific "requested" clamps */ |
7715 | if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) { | |
7716 | retval = uclamp_validate(p, attr); | |
7717 | if (retval) | |
7718 | return retval; | |
7719 | } | |
7720 | ||
111cd11b JL |
7721 | /* |
7722 | * SCHED_DEADLINE bandwidth accounting relies on stable cpusets | |
7723 | * information. | |
7724 | */ | |
7725 | if (dl_policy(policy) || dl_policy(p->policy)) { | |
7726 | cpuset_locked = true; | |
7727 | cpuset_lock(); | |
7728 | } | |
710da3c8 | 7729 | |
b29739f9 | 7730 | /* |
d1ccc66d | 7731 | * Make sure no PI-waiters arrive (or leave) while we are |
b29739f9 | 7732 | * changing the priority of the task: |
0122ec5b | 7733 | * |
25985edc | 7734 | * To be able to change p->policy safely, the appropriate |
1da177e4 LT |
7735 | * runqueue lock must be held. |
7736 | */ | |
eb580751 | 7737 | rq = task_rq_lock(p, &rf); |
80f5c1b8 | 7738 | update_rq_clock(rq); |
dc61b1d6 | 7739 | |
34f971f6 | 7740 | /* |
d1ccc66d | 7741 | * Changing the policy of the stop threads its a very bad idea: |
34f971f6 PZ |
7742 | */ |
7743 | if (p == rq->stop) { | |
4b211f2b MP |
7744 | retval = -EINVAL; |
7745 | goto unlock; | |
34f971f6 PZ |
7746 | } |
7747 | ||
a51e9198 | 7748 | /* |
d6b1e911 TG |
7749 | * If not changing anything there's no need to proceed further, |
7750 | * but store a possible modification of reset_on_fork. | |
a51e9198 | 7751 | */ |
d50dde5a | 7752 | if (unlikely(policy == p->policy)) { |
d0ea0268 | 7753 | if (fair_policy(policy) && attr->sched_nice != task_nice(p)) |
d50dde5a DF |
7754 | goto change; |
7755 | if (rt_policy(policy) && attr->sched_priority != p->rt_priority) | |
7756 | goto change; | |
75381608 | 7757 | if (dl_policy(policy) && dl_param_changed(p, attr)) |
aab03e05 | 7758 | goto change; |
a509a7cd PB |
7759 | if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) |
7760 | goto change; | |
d50dde5a | 7761 | |
d6b1e911 | 7762 | p->sched_reset_on_fork = reset_on_fork; |
4b211f2b MP |
7763 | retval = 0; |
7764 | goto unlock; | |
a51e9198 | 7765 | } |
d50dde5a | 7766 | change: |
a51e9198 | 7767 | |
dc61b1d6 | 7768 | if (user) { |
332ac17e | 7769 | #ifdef CONFIG_RT_GROUP_SCHED |
dc61b1d6 PZ |
7770 | /* |
7771 | * Do not allow realtime tasks into groups that have no runtime | |
7772 | * assigned. | |
7773 | */ | |
7774 | if (rt_bandwidth_enabled() && rt_policy(policy) && | |
f4493771 MG |
7775 | task_group(p)->rt_bandwidth.rt_runtime == 0 && |
7776 | !task_group_is_autogroup(task_group(p))) { | |
4b211f2b MP |
7777 | retval = -EPERM; |
7778 | goto unlock; | |
dc61b1d6 | 7779 | } |
dc61b1d6 | 7780 | #endif |
332ac17e | 7781 | #ifdef CONFIG_SMP |
794a56eb JL |
7782 | if (dl_bandwidth_enabled() && dl_policy(policy) && |
7783 | !(attr->sched_flags & SCHED_FLAG_SUGOV)) { | |
332ac17e | 7784 | cpumask_t *span = rq->rd->span; |
332ac17e DF |
7785 | |
7786 | /* | |
7787 | * Don't allow tasks with an affinity mask smaller than | |
7788 | * the entire root_domain to become SCHED_DEADLINE. We | |
7789 | * will also fail if there's no bandwidth available. | |
7790 | */ | |
3bd37062 | 7791 | if (!cpumask_subset(span, p->cpus_ptr) || |
e4099a5e | 7792 | rq->rd->dl_bw.bw == 0) { |
4b211f2b MP |
7793 | retval = -EPERM; |
7794 | goto unlock; | |
332ac17e DF |
7795 | } |
7796 | } | |
7797 | #endif | |
7798 | } | |
dc61b1d6 | 7799 | |
d1ccc66d | 7800 | /* Re-check policy now with rq lock held: */ |
1da177e4 LT |
7801 | if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { |
7802 | policy = oldpolicy = -1; | |
eb580751 | 7803 | task_rq_unlock(rq, p, &rf); |
111cd11b JL |
7804 | if (cpuset_locked) |
7805 | cpuset_unlock(); | |
1da177e4 LT |
7806 | goto recheck; |
7807 | } | |
332ac17e DF |
7808 | |
7809 | /* | |
7810 | * If setscheduling to SCHED_DEADLINE (or changing the parameters | |
7811 | * of a SCHED_DEADLINE task) we need to check if enough bandwidth | |
7812 | * is available. | |
7813 | */ | |
06a76fe0 | 7814 | if ((dl_policy(policy) || dl_task(p)) && sched_dl_overflow(p, policy, attr)) { |
4b211f2b MP |
7815 | retval = -EBUSY; |
7816 | goto unlock; | |
332ac17e DF |
7817 | } |
7818 | ||
c365c292 TG |
7819 | p->sched_reset_on_fork = reset_on_fork; |
7820 | oldprio = p->prio; | |
7821 | ||
f558c2b8 | 7822 | newprio = __normal_prio(policy, attr->sched_priority, attr->sched_nice); |
dbc7f069 PZ |
7823 | if (pi) { |
7824 | /* | |
7825 | * Take priority boosted tasks into account. If the new | |
7826 | * effective priority is unchanged, we just store the new | |
7827 | * normal parameters and do not touch the scheduler class and | |
7828 | * the runqueue. This will be done when the task deboost | |
7829 | * itself. | |
7830 | */ | |
f558c2b8 PZ |
7831 | newprio = rt_effective_prio(p, newprio); |
7832 | if (newprio == oldprio) | |
ff77e468 | 7833 | queue_flags &= ~DEQUEUE_MOVE; |
c365c292 TG |
7834 | } |
7835 | ||
da0c1e65 | 7836 | queued = task_on_rq_queued(p); |
051a1d1a | 7837 | running = task_current(rq, p); |
da0c1e65 | 7838 | if (queued) |
ff77e468 | 7839 | dequeue_task(rq, p, queue_flags); |
0e1f3483 | 7840 | if (running) |
f3cd1c4e | 7841 | put_prev_task(rq, p); |
f6b53205 | 7842 | |
83ab0aa0 | 7843 | prev_class = p->sched_class; |
a509a7cd | 7844 | |
f558c2b8 PZ |
7845 | if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) { |
7846 | __setscheduler_params(p, attr); | |
7847 | __setscheduler_prio(p, newprio); | |
7848 | } | |
a509a7cd | 7849 | __setscheduler_uclamp(p, attr); |
f6b53205 | 7850 | |
da0c1e65 | 7851 | if (queued) { |
81a44c54 TG |
7852 | /* |
7853 | * We enqueue to tail when the priority of a task is | |
7854 | * increased (user space view). | |
7855 | */ | |
ff77e468 PZ |
7856 | if (oldprio < p->prio) |
7857 | queue_flags |= ENQUEUE_HEAD; | |
1de64443 | 7858 | |
ff77e468 | 7859 | enqueue_task(rq, p, queue_flags); |
81a44c54 | 7860 | } |
a399d233 | 7861 | if (running) |
03b7fad1 | 7862 | set_next_task(rq, p); |
cb469845 | 7863 | |
da7a735e | 7864 | check_class_changed(rq, p, prev_class, oldprio); |
d1ccc66d IM |
7865 | |
7866 | /* Avoid rq from going away on us: */ | |
7867 | preempt_disable(); | |
565790d2 | 7868 | head = splice_balance_callbacks(rq); |
eb580751 | 7869 | task_rq_unlock(rq, p, &rf); |
b29739f9 | 7870 | |
710da3c8 | 7871 | if (pi) { |
111cd11b JL |
7872 | if (cpuset_locked) |
7873 | cpuset_unlock(); | |
dbc7f069 | 7874 | rt_mutex_adjust_pi(p); |
710da3c8 | 7875 | } |
95e02ca9 | 7876 | |
d1ccc66d | 7877 | /* Run balance callbacks after we've adjusted the PI chain: */ |
565790d2 | 7878 | balance_callbacks(rq, head); |
4c9a4bc8 | 7879 | preempt_enable(); |
95e02ca9 | 7880 | |
1da177e4 | 7881 | return 0; |
4b211f2b MP |
7882 | |
7883 | unlock: | |
7884 | task_rq_unlock(rq, p, &rf); | |
111cd11b JL |
7885 | if (cpuset_locked) |
7886 | cpuset_unlock(); | |
4b211f2b | 7887 | return retval; |
1da177e4 | 7888 | } |
961ccddd | 7889 | |
7479f3c9 PZ |
7890 | static int _sched_setscheduler(struct task_struct *p, int policy, |
7891 | const struct sched_param *param, bool check) | |
7892 | { | |
7893 | struct sched_attr attr = { | |
7894 | .sched_policy = policy, | |
7895 | .sched_priority = param->sched_priority, | |
7896 | .sched_nice = PRIO_TO_NICE(p->static_prio), | |
7897 | }; | |
7898 | ||
c13db6b1 SR |
7899 | /* Fixup the legacy SCHED_RESET_ON_FORK hack. */ |
7900 | if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) { | |
7479f3c9 PZ |
7901 | attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; |
7902 | policy &= ~SCHED_RESET_ON_FORK; | |
7903 | attr.sched_policy = policy; | |
7904 | } | |
7905 | ||
dbc7f069 | 7906 | return __sched_setscheduler(p, &attr, check, true); |
7479f3c9 | 7907 | } |
961ccddd RR |
7908 | /** |
7909 | * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. | |
7910 | * @p: the task in question. | |
7911 | * @policy: new policy. | |
7912 | * @param: structure containing the new RT priority. | |
7913 | * | |
7318d4cc PZ |
7914 | * Use sched_set_fifo(), read its comment. |
7915 | * | |
e69f6186 YB |
7916 | * Return: 0 on success. An error code otherwise. |
7917 | * | |
961ccddd RR |
7918 | * NOTE that the task may be already dead. |
7919 | */ | |
7920 | int sched_setscheduler(struct task_struct *p, int policy, | |
fe7de49f | 7921 | const struct sched_param *param) |
961ccddd | 7922 | { |
7479f3c9 | 7923 | return _sched_setscheduler(p, policy, param, true); |
961ccddd | 7924 | } |
1da177e4 | 7925 | |
d50dde5a DF |
7926 | int sched_setattr(struct task_struct *p, const struct sched_attr *attr) |
7927 | { | |
dbc7f069 | 7928 | return __sched_setscheduler(p, attr, true, true); |
d50dde5a | 7929 | } |
d50dde5a | 7930 | |
794a56eb JL |
7931 | int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr) |
7932 | { | |
7933 | return __sched_setscheduler(p, attr, false, true); | |
7934 | } | |
1eb5dde6 | 7935 | EXPORT_SYMBOL_GPL(sched_setattr_nocheck); |
794a56eb | 7936 | |
961ccddd RR |
7937 | /** |
7938 | * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. | |
7939 | * @p: the task in question. | |
7940 | * @policy: new policy. | |
7941 | * @param: structure containing the new RT priority. | |
7942 | * | |
7943 | * Just like sched_setscheduler, only don't bother checking if the | |
7944 | * current context has permission. For example, this is needed in | |
7945 | * stop_machine(): we create temporary high priority worker threads, | |
7946 | * but our caller might not have that capability. | |
e69f6186 YB |
7947 | * |
7948 | * Return: 0 on success. An error code otherwise. | |
961ccddd RR |
7949 | */ |
7950 | int sched_setscheduler_nocheck(struct task_struct *p, int policy, | |
fe7de49f | 7951 | const struct sched_param *param) |
961ccddd | 7952 | { |
7479f3c9 | 7953 | return _sched_setscheduler(p, policy, param, false); |
961ccddd RR |
7954 | } |
7955 | ||
7318d4cc PZ |
7956 | /* |
7957 | * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally | |
7958 | * incapable of resource management, which is the one thing an OS really should | |
7959 | * be doing. | |
7960 | * | |
7961 | * This is of course the reason it is limited to privileged users only. | |
7962 | * | |
7963 | * Worse still; it is fundamentally impossible to compose static priority | |
7964 | * workloads. You cannot take two correctly working static prio workloads | |
7965 | * and smash them together and still expect them to work. | |
7966 | * | |
7967 | * For this reason 'all' FIFO tasks the kernel creates are basically at: | |
7968 | * | |
7969 | * MAX_RT_PRIO / 2 | |
7970 | * | |
7971 | * The administrator _MUST_ configure the system, the kernel simply doesn't | |
7972 | * know enough information to make a sensible choice. | |
7973 | */ | |
8b700983 | 7974 | void sched_set_fifo(struct task_struct *p) |
7318d4cc PZ |
7975 | { |
7976 | struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 }; | |
8b700983 | 7977 | WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); |
7318d4cc PZ |
7978 | } |
7979 | EXPORT_SYMBOL_GPL(sched_set_fifo); | |
7980 | ||
7981 | /* | |
7982 | * For when you don't much care about FIFO, but want to be above SCHED_NORMAL. | |
7983 | */ | |
8b700983 | 7984 | void sched_set_fifo_low(struct task_struct *p) |
7318d4cc PZ |
7985 | { |
7986 | struct sched_param sp = { .sched_priority = 1 }; | |
8b700983 | 7987 | WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); |
7318d4cc PZ |
7988 | } |
7989 | EXPORT_SYMBOL_GPL(sched_set_fifo_low); | |
7990 | ||
8b700983 | 7991 | void sched_set_normal(struct task_struct *p, int nice) |
7318d4cc PZ |
7992 | { |
7993 | struct sched_attr attr = { | |
7994 | .sched_policy = SCHED_NORMAL, | |
7995 | .sched_nice = nice, | |
7996 | }; | |
8b700983 | 7997 | WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0); |
7318d4cc PZ |
7998 | } |
7999 | EXPORT_SYMBOL_GPL(sched_set_normal); | |
961ccddd | 8000 | |
95cdf3b7 IM |
8001 | static int |
8002 | do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) | |
1da177e4 | 8003 | { |
1da177e4 | 8004 | struct sched_param lparam; |
1da177e4 LT |
8005 | |
8006 | if (!param || pid < 0) | |
8007 | return -EINVAL; | |
8008 | if (copy_from_user(&lparam, param, sizeof(struct sched_param))) | |
8009 | return -EFAULT; | |
5fe1d75f | 8010 | |
febe162d PZ |
8011 | CLASS(find_get_task, p)(pid); |
8012 | if (!p) | |
8013 | return -ESRCH; | |
710da3c8 | 8014 | |
febe162d | 8015 | return sched_setscheduler(p, policy, &lparam); |
1da177e4 LT |
8016 | } |
8017 | ||
d50dde5a DF |
8018 | /* |
8019 | * Mimics kernel/events/core.c perf_copy_attr(). | |
8020 | */ | |
d1ccc66d | 8021 | static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr) |
d50dde5a DF |
8022 | { |
8023 | u32 size; | |
8024 | int ret; | |
8025 | ||
d1ccc66d | 8026 | /* Zero the full structure, so that a short copy will be nice: */ |
d50dde5a DF |
8027 | memset(attr, 0, sizeof(*attr)); |
8028 | ||
8029 | ret = get_user(size, &uattr->size); | |
8030 | if (ret) | |
8031 | return ret; | |
8032 | ||
d1ccc66d IM |
8033 | /* ABI compatibility quirk: */ |
8034 | if (!size) | |
d50dde5a | 8035 | size = SCHED_ATTR_SIZE_VER0; |
dff3a85f | 8036 | if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE) |
d50dde5a DF |
8037 | goto err_size; |
8038 | ||
dff3a85f AS |
8039 | ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size); |
8040 | if (ret) { | |
8041 | if (ret == -E2BIG) | |
8042 | goto err_size; | |
8043 | return ret; | |
d50dde5a DF |
8044 | } |
8045 | ||
a509a7cd PB |
8046 | if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) && |
8047 | size < SCHED_ATTR_SIZE_VER1) | |
8048 | return -EINVAL; | |
8049 | ||
d50dde5a | 8050 | /* |
d1ccc66d | 8051 | * XXX: Do we want to be lenient like existing syscalls; or do we want |
d50dde5a DF |
8052 | * to be strict and return an error on out-of-bounds values? |
8053 | */ | |
75e45d51 | 8054 | attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE); |
d50dde5a | 8055 | |
e78c7bca | 8056 | return 0; |
d50dde5a DF |
8057 | |
8058 | err_size: | |
8059 | put_user(sizeof(*attr), &uattr->size); | |
e78c7bca | 8060 | return -E2BIG; |
d50dde5a DF |
8061 | } |
8062 | ||
f4dddf90 QP |
8063 | static void get_params(struct task_struct *p, struct sched_attr *attr) |
8064 | { | |
8065 | if (task_has_dl_policy(p)) | |
8066 | __getparam_dl(p, attr); | |
8067 | else if (task_has_rt_policy(p)) | |
8068 | attr->sched_priority = p->rt_priority; | |
8069 | else | |
8070 | attr->sched_nice = task_nice(p); | |
8071 | } | |
8072 | ||
1da177e4 LT |
8073 | /** |
8074 | * sys_sched_setscheduler - set/change the scheduler policy and RT priority | |
8075 | * @pid: the pid in question. | |
8076 | * @policy: new policy. | |
8077 | * @param: structure containing the new RT priority. | |
e69f6186 YB |
8078 | * |
8079 | * Return: 0 on success. An error code otherwise. | |
1da177e4 | 8080 | */ |
d1ccc66d | 8081 | SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param) |
1da177e4 | 8082 | { |
c21761f1 JB |
8083 | if (policy < 0) |
8084 | return -EINVAL; | |
8085 | ||
1da177e4 LT |
8086 | return do_sched_setscheduler(pid, policy, param); |
8087 | } | |
8088 | ||
8089 | /** | |
8090 | * sys_sched_setparam - set/change the RT priority of a thread | |
8091 | * @pid: the pid in question. | |
8092 | * @param: structure containing the new RT priority. | |
e69f6186 YB |
8093 | * |
8094 | * Return: 0 on success. An error code otherwise. | |
1da177e4 | 8095 | */ |
5add95d4 | 8096 | SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) |
1da177e4 | 8097 | { |
c13db6b1 | 8098 | return do_sched_setscheduler(pid, SETPARAM_POLICY, param); |
1da177e4 LT |
8099 | } |
8100 | ||
d50dde5a DF |
8101 | /** |
8102 | * sys_sched_setattr - same as above, but with extended sched_attr | |
8103 | * @pid: the pid in question. | |
5778fccf | 8104 | * @uattr: structure containing the extended parameters. |
db66d756 | 8105 | * @flags: for future extension. |
d50dde5a | 8106 | */ |
6d35ab48 PZ |
8107 | SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, |
8108 | unsigned int, flags) | |
d50dde5a DF |
8109 | { |
8110 | struct sched_attr attr; | |
d50dde5a DF |
8111 | int retval; |
8112 | ||
6d35ab48 | 8113 | if (!uattr || pid < 0 || flags) |
d50dde5a DF |
8114 | return -EINVAL; |
8115 | ||
143cf23d MK |
8116 | retval = sched_copy_attr(uattr, &attr); |
8117 | if (retval) | |
8118 | return retval; | |
d50dde5a | 8119 | |
b14ed2c2 | 8120 | if ((int)attr.sched_policy < 0) |
dbdb2275 | 8121 | return -EINVAL; |
1d6362fa PB |
8122 | if (attr.sched_flags & SCHED_FLAG_KEEP_POLICY) |
8123 | attr.sched_policy = SETPARAM_POLICY; | |
d50dde5a | 8124 | |
febe162d PZ |
8125 | CLASS(find_get_task, p)(pid); |
8126 | if (!p) | |
8127 | return -ESRCH; | |
d50dde5a | 8128 | |
febe162d PZ |
8129 | if (attr.sched_flags & SCHED_FLAG_KEEP_PARAMS) |
8130 | get_params(p, &attr); | |
a509a7cd | 8131 | |
febe162d | 8132 | return sched_setattr(p, &attr); |
d50dde5a DF |
8133 | } |
8134 | ||
1da177e4 LT |
8135 | /** |
8136 | * sys_sched_getscheduler - get the policy (scheduling class) of a thread | |
8137 | * @pid: the pid in question. | |
e69f6186 YB |
8138 | * |
8139 | * Return: On success, the policy of the thread. Otherwise, a negative error | |
8140 | * code. | |
1da177e4 | 8141 | */ |
5add95d4 | 8142 | SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) |
1da177e4 | 8143 | { |
36c8b586 | 8144 | struct task_struct *p; |
3a5c359a | 8145 | int retval; |
1da177e4 LT |
8146 | |
8147 | if (pid < 0) | |
3a5c359a | 8148 | return -EINVAL; |
1da177e4 | 8149 | |
febe162d | 8150 | guard(rcu)(); |
1da177e4 | 8151 | p = find_process_by_pid(pid); |
febe162d PZ |
8152 | if (!p) |
8153 | return -ESRCH; | |
8154 | ||
8155 | retval = security_task_getscheduler(p); | |
8156 | if (!retval) { | |
8157 | retval = p->policy; | |
8158 | if (p->sched_reset_on_fork) | |
8159 | retval |= SCHED_RESET_ON_FORK; | |
1da177e4 | 8160 | } |
1da177e4 LT |
8161 | return retval; |
8162 | } | |
8163 | ||
8164 | /** | |
ca94c442 | 8165 | * sys_sched_getparam - get the RT priority of a thread |
1da177e4 LT |
8166 | * @pid: the pid in question. |
8167 | * @param: structure containing the RT priority. | |
e69f6186 YB |
8168 | * |
8169 | * Return: On success, 0 and the RT priority is in @param. Otherwise, an error | |
8170 | * code. | |
1da177e4 | 8171 | */ |
5add95d4 | 8172 | SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) |
1da177e4 | 8173 | { |
ce5f7f82 | 8174 | struct sched_param lp = { .sched_priority = 0 }; |
36c8b586 | 8175 | struct task_struct *p; |
3a5c359a | 8176 | int retval; |
1da177e4 LT |
8177 | |
8178 | if (!param || pid < 0) | |
3a5c359a | 8179 | return -EINVAL; |
1da177e4 | 8180 | |
febe162d PZ |
8181 | scoped_guard (rcu) { |
8182 | p = find_process_by_pid(pid); | |
8183 | if (!p) | |
8184 | return -ESRCH; | |
1da177e4 | 8185 | |
febe162d PZ |
8186 | retval = security_task_getscheduler(p); |
8187 | if (retval) | |
8188 | return retval; | |
1da177e4 | 8189 | |
febe162d PZ |
8190 | if (task_has_rt_policy(p)) |
8191 | lp.sched_priority = p->rt_priority; | |
8192 | } | |
1da177e4 LT |
8193 | |
8194 | /* | |
8195 | * This one might sleep, we cannot do it with a spinlock held ... | |
8196 | */ | |
febe162d | 8197 | return copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; |
1da177e4 LT |
8198 | } |
8199 | ||
1251201c IM |
8200 | /* |
8201 | * Copy the kernel size attribute structure (which might be larger | |
8202 | * than what user-space knows about) to user-space. | |
8203 | * | |
8204 | * Note that all cases are valid: user-space buffer can be larger or | |
8205 | * smaller than the kernel-space buffer. The usual case is that both | |
8206 | * have the same size. | |
8207 | */ | |
8208 | static int | |
8209 | sched_attr_copy_to_user(struct sched_attr __user *uattr, | |
8210 | struct sched_attr *kattr, | |
8211 | unsigned int usize) | |
d50dde5a | 8212 | { |
1251201c | 8213 | unsigned int ksize = sizeof(*kattr); |
d50dde5a | 8214 | |
96d4f267 | 8215 | if (!access_ok(uattr, usize)) |
d50dde5a DF |
8216 | return -EFAULT; |
8217 | ||
8218 | /* | |
1251201c IM |
8219 | * sched_getattr() ABI forwards and backwards compatibility: |
8220 | * | |
8221 | * If usize == ksize then we just copy everything to user-space and all is good. | |
8222 | * | |
8223 | * If usize < ksize then we only copy as much as user-space has space for, | |
8224 | * this keeps ABI compatibility as well. We skip the rest. | |
8225 | * | |
8226 | * If usize > ksize then user-space is using a newer version of the ABI, | |
8227 | * which part the kernel doesn't know about. Just ignore it - tooling can | |
8228 | * detect the kernel's knowledge of attributes from the attr->size value | |
8229 | * which is set to ksize in this case. | |
d50dde5a | 8230 | */ |
1251201c | 8231 | kattr->size = min(usize, ksize); |
d50dde5a | 8232 | |
1251201c | 8233 | if (copy_to_user(uattr, kattr, kattr->size)) |
d50dde5a DF |
8234 | return -EFAULT; |
8235 | ||
22400674 | 8236 | return 0; |
d50dde5a DF |
8237 | } |
8238 | ||
8239 | /** | |
aab03e05 | 8240 | * sys_sched_getattr - similar to sched_getparam, but with sched_attr |
d50dde5a | 8241 | * @pid: the pid in question. |
5778fccf | 8242 | * @uattr: structure containing the extended parameters. |
dff3a85f | 8243 | * @usize: sizeof(attr) for fwd/bwd comp. |
db66d756 | 8244 | * @flags: for future extension. |
d50dde5a | 8245 | */ |
6d35ab48 | 8246 | SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, |
1251201c | 8247 | unsigned int, usize, unsigned int, flags) |
d50dde5a | 8248 | { |
1251201c | 8249 | struct sched_attr kattr = { }; |
d50dde5a DF |
8250 | struct task_struct *p; |
8251 | int retval; | |
8252 | ||
1251201c IM |
8253 | if (!uattr || pid < 0 || usize > PAGE_SIZE || |
8254 | usize < SCHED_ATTR_SIZE_VER0 || flags) | |
d50dde5a DF |
8255 | return -EINVAL; |
8256 | ||
febe162d PZ |
8257 | scoped_guard (rcu) { |
8258 | p = find_process_by_pid(pid); | |
8259 | if (!p) | |
8260 | return -ESRCH; | |
d50dde5a | 8261 | |
febe162d PZ |
8262 | retval = security_task_getscheduler(p); |
8263 | if (retval) | |
8264 | return retval; | |
d50dde5a | 8265 | |
febe162d PZ |
8266 | kattr.sched_policy = p->policy; |
8267 | if (p->sched_reset_on_fork) | |
8268 | kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; | |
8269 | get_params(p, &kattr); | |
8270 | kattr.sched_flags &= SCHED_FLAG_ALL; | |
d50dde5a | 8271 | |
a509a7cd | 8272 | #ifdef CONFIG_UCLAMP_TASK |
febe162d PZ |
8273 | /* |
8274 | * This could race with another potential updater, but this is fine | |
8275 | * because it'll correctly read the old or the new value. We don't need | |
8276 | * to guarantee who wins the race as long as it doesn't return garbage. | |
8277 | */ | |
8278 | kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; | |
8279 | kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; | |
a509a7cd | 8280 | #endif |
febe162d | 8281 | } |
d50dde5a | 8282 | |
1251201c | 8283 | return sched_attr_copy_to_user(uattr, &kattr, usize); |
d50dde5a DF |
8284 | } |
8285 | ||
234b8ab6 WD |
8286 | #ifdef CONFIG_SMP |
8287 | int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask) | |
1da177e4 | 8288 | { |
234b8ab6 WD |
8289 | /* |
8290 | * If the task isn't a deadline task or admission control is | |
8291 | * disabled then we don't care about affinity changes. | |
8292 | */ | |
8293 | if (!task_has_dl_policy(p) || !dl_bandwidth_enabled()) | |
8294 | return 0; | |
8295 | ||
8296 | /* | |
8297 | * Since bandwidth control happens on root_domain basis, | |
8298 | * if admission test is enabled, we only admit -deadline | |
8299 | * tasks allowed to run on all the CPUs in the task's | |
8300 | * root_domain. | |
8301 | */ | |
0e34600a | 8302 | guard(rcu)(); |
234b8ab6 | 8303 | if (!cpumask_subset(task_rq(p)->rd->span, mask)) |
0e34600a PZ |
8304 | return -EBUSY; |
8305 | ||
8306 | return 0; | |
234b8ab6 WD |
8307 | } |
8308 | #endif | |
8309 | ||
db3b02ae | 8310 | static int |
713a2e21 | 8311 | __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx) |
1da177e4 | 8312 | { |
36c8b586 | 8313 | int retval; |
5a16f3d3 | 8314 | cpumask_var_t cpus_allowed, new_mask; |
1da177e4 | 8315 | |
db3b02ae WD |
8316 | if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) |
8317 | return -ENOMEM; | |
1da177e4 | 8318 | |
5a16f3d3 RR |
8319 | if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { |
8320 | retval = -ENOMEM; | |
8321 | goto out_free_cpus_allowed; | |
8322 | } | |
e4099a5e PZ |
8323 | |
8324 | cpuset_cpus_allowed(p, cpus_allowed); | |
713a2e21 WL |
8325 | cpumask_and(new_mask, ctx->new_mask, cpus_allowed); |
8326 | ||
8327 | ctx->new_mask = new_mask; | |
8328 | ctx->flags |= SCA_CHECK; | |
e4099a5e | 8329 | |
234b8ab6 WD |
8330 | retval = dl_task_check_affinity(p, new_mask); |
8331 | if (retval) | |
8332 | goto out_free_new_mask; | |
8f9ea86f | 8333 | |
713a2e21 | 8334 | retval = __set_cpus_allowed_ptr(p, ctx); |
db3b02ae WD |
8335 | if (retval) |
8336 | goto out_free_new_mask; | |
1da177e4 | 8337 | |
db3b02ae WD |
8338 | cpuset_cpus_allowed(p, cpus_allowed); |
8339 | if (!cpumask_subset(new_mask, cpus_allowed)) { | |
8340 | /* | |
8341 | * We must have raced with a concurrent cpuset update. | |
8342 | * Just reset the cpumask to the cpuset's cpus_allowed. | |
8343 | */ | |
8344 | cpumask_copy(new_mask, cpus_allowed); | |
8f9ea86f WL |
8345 | |
8346 | /* | |
8347 | * If SCA_USER is set, a 2nd call to __set_cpus_allowed_ptr() | |
8348 | * will restore the previous user_cpus_ptr value. | |
8349 | * | |
8350 | * In the unlikely event a previous user_cpus_ptr exists, | |
8351 | * we need to further restrict the mask to what is allowed | |
8352 | * by that old user_cpus_ptr. | |
8353 | */ | |
8354 | if (unlikely((ctx->flags & SCA_USER) && ctx->user_mask)) { | |
8355 | bool empty = !cpumask_and(new_mask, new_mask, | |
8356 | ctx->user_mask); | |
8357 | ||
8358 | if (WARN_ON_ONCE(empty)) | |
8359 | cpumask_copy(new_mask, cpus_allowed); | |
8360 | } | |
8361 | __set_cpus_allowed_ptr(p, ctx); | |
8362 | retval = -EINVAL; | |
8707d8b8 | 8363 | } |
db3b02ae | 8364 | |
16303ab2 | 8365 | out_free_new_mask: |
5a16f3d3 RR |
8366 | free_cpumask_var(new_mask); |
8367 | out_free_cpus_allowed: | |
8368 | free_cpumask_var(cpus_allowed); | |
db3b02ae WD |
8369 | return retval; |
8370 | } | |
8371 | ||
8372 | long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) | |
8373 | { | |
8f9ea86f WL |
8374 | struct affinity_context ac; |
8375 | struct cpumask *user_mask; | |
36c8b586 | 8376 | int retval; |
1da177e4 | 8377 | |
92c2ec5b PZ |
8378 | CLASS(find_get_task, p)(pid); |
8379 | if (!p) | |
1da177e4 | 8380 | return -ESRCH; |
1da177e4 | 8381 | |
92c2ec5b PZ |
8382 | if (p->flags & PF_NO_SETAFFINITY) |
8383 | return -EINVAL; | |
db3b02ae | 8384 | |
4c44aaaf | 8385 | if (!check_same_owner(p)) { |
92c2ec5b PZ |
8386 | guard(rcu)(); |
8387 | if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) | |
8388 | return -EPERM; | |
4c44aaaf | 8389 | } |
1da177e4 | 8390 | |
b0ae1981 | 8391 | retval = security_task_setscheduler(p); |
e7834f8f | 8392 | if (retval) |
92c2ec5b | 8393 | return retval; |
1da177e4 | 8394 | |
5657c116 WL |
8395 | /* |
8396 | * With non-SMP configs, user_cpus_ptr/user_mask isn't used and | |
8397 | * alloc_user_cpus_ptr() returns NULL. | |
8398 | */ | |
9a5418bc | 8399 | user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE); |
5657c116 WL |
8400 | if (user_mask) { |
8401 | cpumask_copy(user_mask, in_mask); | |
8402 | } else if (IS_ENABLED(CONFIG_SMP)) { | |
92c2ec5b | 8403 | return -ENOMEM; |
8f9ea86f | 8404 | } |
5657c116 | 8405 | |
8f9ea86f WL |
8406 | ac = (struct affinity_context){ |
8407 | .new_mask = in_mask, | |
8408 | .user_mask = user_mask, | |
8409 | .flags = SCA_USER, | |
8410 | }; | |
8411 | ||
713a2e21 | 8412 | retval = __sched_setaffinity(p, &ac); |
8f9ea86f WL |
8413 | kfree(ac.user_mask); |
8414 | ||
1da177e4 LT |
8415 | return retval; |
8416 | } | |
8417 | ||
8418 | static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, | |
96f874e2 | 8419 | struct cpumask *new_mask) |
1da177e4 | 8420 | { |
96f874e2 RR |
8421 | if (len < cpumask_size()) |
8422 | cpumask_clear(new_mask); | |
8423 | else if (len > cpumask_size()) | |
8424 | len = cpumask_size(); | |
8425 | ||
1da177e4 LT |
8426 | return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; |
8427 | } | |
8428 | ||
8429 | /** | |
d1ccc66d | 8430 | * sys_sched_setaffinity - set the CPU affinity of a process |
1da177e4 LT |
8431 | * @pid: pid of the process |
8432 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr | |
d1ccc66d | 8433 | * @user_mask_ptr: user-space pointer to the new CPU mask |
e69f6186 YB |
8434 | * |
8435 | * Return: 0 on success. An error code otherwise. | |
1da177e4 | 8436 | */ |
5add95d4 HC |
8437 | SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, |
8438 | unsigned long __user *, user_mask_ptr) | |
1da177e4 | 8439 | { |
5a16f3d3 | 8440 | cpumask_var_t new_mask; |
1da177e4 LT |
8441 | int retval; |
8442 | ||
5a16f3d3 RR |
8443 | if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) |
8444 | return -ENOMEM; | |
1da177e4 | 8445 | |
5a16f3d3 RR |
8446 | retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); |
8447 | if (retval == 0) | |
8448 | retval = sched_setaffinity(pid, new_mask); | |
8449 | free_cpumask_var(new_mask); | |
8450 | return retval; | |
1da177e4 LT |
8451 | } |
8452 | ||
96f874e2 | 8453 | long sched_getaffinity(pid_t pid, struct cpumask *mask) |
1da177e4 | 8454 | { |
36c8b586 | 8455 | struct task_struct *p; |
1da177e4 | 8456 | int retval; |
1da177e4 | 8457 | |
92c2ec5b | 8458 | guard(rcu)(); |
1da177e4 LT |
8459 | p = find_process_by_pid(pid); |
8460 | if (!p) | |
92c2ec5b | 8461 | return -ESRCH; |
1da177e4 | 8462 | |
e7834f8f DQ |
8463 | retval = security_task_getscheduler(p); |
8464 | if (retval) | |
92c2ec5b | 8465 | return retval; |
e7834f8f | 8466 | |
92c2ec5b | 8467 | guard(raw_spinlock_irqsave)(&p->pi_lock); |
3bd37062 | 8468 | cpumask_and(mask, &p->cpus_mask, cpu_active_mask); |
1da177e4 | 8469 | |
92c2ec5b | 8470 | return 0; |
1da177e4 LT |
8471 | } |
8472 | ||
8473 | /** | |
d1ccc66d | 8474 | * sys_sched_getaffinity - get the CPU affinity of a process |
1da177e4 LT |
8475 | * @pid: pid of the process |
8476 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr | |
d1ccc66d | 8477 | * @user_mask_ptr: user-space pointer to hold the current CPU mask |
e69f6186 | 8478 | * |
599b4840 ZW |
8479 | * Return: size of CPU mask copied to user_mask_ptr on success. An |
8480 | * error code otherwise. | |
1da177e4 | 8481 | */ |
5add95d4 HC |
8482 | SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, |
8483 | unsigned long __user *, user_mask_ptr) | |
1da177e4 LT |
8484 | { |
8485 | int ret; | |
f17c8607 | 8486 | cpumask_var_t mask; |
1da177e4 | 8487 | |
84fba5ec | 8488 | if ((len * BITS_PER_BYTE) < nr_cpu_ids) |
cd3d8031 KM |
8489 | return -EINVAL; |
8490 | if (len & (sizeof(unsigned long)-1)) | |
1da177e4 LT |
8491 | return -EINVAL; |
8492 | ||
6015b1ac | 8493 | if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) |
f17c8607 | 8494 | return -ENOMEM; |
1da177e4 | 8495 | |
f17c8607 RR |
8496 | ret = sched_getaffinity(pid, mask); |
8497 | if (ret == 0) { | |
4de373a1 | 8498 | unsigned int retlen = min(len, cpumask_size()); |
cd3d8031 | 8499 | |
6015b1ac | 8500 | if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen)) |
f17c8607 RR |
8501 | ret = -EFAULT; |
8502 | else | |
cd3d8031 | 8503 | ret = retlen; |
f17c8607 RR |
8504 | } |
8505 | free_cpumask_var(mask); | |
1da177e4 | 8506 | |
f17c8607 | 8507 | return ret; |
1da177e4 LT |
8508 | } |
8509 | ||
7d4dd4f1 | 8510 | static void do_sched_yield(void) |
1da177e4 | 8511 | { |
8a8c69c3 PZ |
8512 | struct rq_flags rf; |
8513 | struct rq *rq; | |
8514 | ||
246b3b33 | 8515 | rq = this_rq_lock_irq(&rf); |
1da177e4 | 8516 | |
ae92882e | 8517 | schedstat_inc(rq->yld_count); |
4530d7ab | 8518 | current->sched_class->yield_task(rq); |
1da177e4 | 8519 | |
8a8c69c3 | 8520 | preempt_disable(); |
345a957f | 8521 | rq_unlock_irq(rq, &rf); |
ba74c144 | 8522 | sched_preempt_enable_no_resched(); |
1da177e4 LT |
8523 | |
8524 | schedule(); | |
7d4dd4f1 | 8525 | } |
1da177e4 | 8526 | |
59a74b15 MCC |
8527 | /** |
8528 | * sys_sched_yield - yield the current processor to other threads. | |
8529 | * | |
8530 | * This function yields the current CPU to other tasks. If there are no | |
8531 | * other threads running on this CPU then this function will return. | |
8532 | * | |
8533 | * Return: 0. | |
8534 | */ | |
7d4dd4f1 DB |
8535 | SYSCALL_DEFINE0(sched_yield) |
8536 | { | |
8537 | do_sched_yield(); | |
1da177e4 LT |
8538 | return 0; |
8539 | } | |
8540 | ||
b965f1dd PZI |
8541 | #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) |
8542 | int __sched __cond_resched(void) | |
1da177e4 | 8543 | { |
fe32d3cd | 8544 | if (should_resched(0)) { |
a18b5d01 | 8545 | preempt_schedule_common(); |
1da177e4 LT |
8546 | return 1; |
8547 | } | |
50895825 FW |
8548 | /* |
8549 | * In preemptible kernels, ->rcu_read_lock_nesting tells the tick | |
8550 | * whether the current CPU is in an RCU read-side critical section, | |
8551 | * so the tick can report quiescent states even for CPUs looping | |
8552 | * in kernel context. In contrast, in non-preemptible kernels, | |
8553 | * RCU readers leave no in-memory hints, which means that CPU-bound | |
8554 | * processes executing in kernel context might never report an | |
8555 | * RCU quiescent state. Therefore, the following code causes | |
8556 | * cond_resched() to report a quiescent state, but only when RCU | |
8557 | * is in urgent need of one. | |
8558 | */ | |
b965f1dd | 8559 | #ifndef CONFIG_PREEMPT_RCU |
f79c3ad6 | 8560 | rcu_all_qs(); |
b965f1dd | 8561 | #endif |
1da177e4 LT |
8562 | return 0; |
8563 | } | |
b965f1dd PZI |
8564 | EXPORT_SYMBOL(__cond_resched); |
8565 | #endif | |
8566 | ||
8567 | #ifdef CONFIG_PREEMPT_DYNAMIC | |
99cf983c | 8568 | #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) |
8a69fe0b MR |
8569 | #define cond_resched_dynamic_enabled __cond_resched |
8570 | #define cond_resched_dynamic_disabled ((void *)&__static_call_return0) | |
b965f1dd | 8571 | DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched); |
ef72661e | 8572 | EXPORT_STATIC_CALL_TRAMP(cond_resched); |
b965f1dd | 8573 | |
8a69fe0b MR |
8574 | #define might_resched_dynamic_enabled __cond_resched |
8575 | #define might_resched_dynamic_disabled ((void *)&__static_call_return0) | |
b965f1dd | 8576 | DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched); |
ef72661e | 8577 | EXPORT_STATIC_CALL_TRAMP(might_resched); |
99cf983c MR |
8578 | #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) |
8579 | static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched); | |
8580 | int __sched dynamic_cond_resched(void) | |
8581 | { | |
e3ff7c60 | 8582 | klp_sched_try_switch(); |
99cf983c MR |
8583 | if (!static_branch_unlikely(&sk_dynamic_cond_resched)) |
8584 | return 0; | |
8585 | return __cond_resched(); | |
8586 | } | |
8587 | EXPORT_SYMBOL(dynamic_cond_resched); | |
8588 | ||
8589 | static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched); | |
8590 | int __sched dynamic_might_resched(void) | |
8591 | { | |
8592 | if (!static_branch_unlikely(&sk_dynamic_might_resched)) | |
8593 | return 0; | |
8594 | return __cond_resched(); | |
8595 | } | |
8596 | EXPORT_SYMBOL(dynamic_might_resched); | |
8597 | #endif | |
35a773a0 | 8598 | #endif |
1da177e4 LT |
8599 | |
8600 | /* | |
613afbf8 | 8601 | * __cond_resched_lock() - if a reschedule is pending, drop the given lock, |
1da177e4 LT |
8602 | * call schedule, and on return reacquire the lock. |
8603 | * | |
c1a280b6 | 8604 | * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level |
1da177e4 LT |
8605 | * operations here to prevent schedule() from being called twice (once via |
8606 | * spin_unlock(), once by hand). | |
8607 | */ | |
613afbf8 | 8608 | int __cond_resched_lock(spinlock_t *lock) |
1da177e4 | 8609 | { |
fe32d3cd | 8610 | int resched = should_resched(PREEMPT_LOCK_OFFSET); |
6df3cecb JK |
8611 | int ret = 0; |
8612 | ||
f607c668 PZ |
8613 | lockdep_assert_held(lock); |
8614 | ||
4a81e832 | 8615 | if (spin_needbreak(lock) || resched) { |
1da177e4 | 8616 | spin_unlock(lock); |
7e406d1f | 8617 | if (!_cond_resched()) |
95c354fe | 8618 | cpu_relax(); |
6df3cecb | 8619 | ret = 1; |
1da177e4 | 8620 | spin_lock(lock); |
1da177e4 | 8621 | } |
6df3cecb | 8622 | return ret; |
1da177e4 | 8623 | } |
613afbf8 | 8624 | EXPORT_SYMBOL(__cond_resched_lock); |
1da177e4 | 8625 | |
f3d4b4b1 BG |
8626 | int __cond_resched_rwlock_read(rwlock_t *lock) |
8627 | { | |
8628 | int resched = should_resched(PREEMPT_LOCK_OFFSET); | |
8629 | int ret = 0; | |
8630 | ||
8631 | lockdep_assert_held_read(lock); | |
8632 | ||
8633 | if (rwlock_needbreak(lock) || resched) { | |
8634 | read_unlock(lock); | |
7e406d1f | 8635 | if (!_cond_resched()) |
f3d4b4b1 BG |
8636 | cpu_relax(); |
8637 | ret = 1; | |
8638 | read_lock(lock); | |
8639 | } | |
8640 | return ret; | |
8641 | } | |
8642 | EXPORT_SYMBOL(__cond_resched_rwlock_read); | |
8643 | ||
8644 | int __cond_resched_rwlock_write(rwlock_t *lock) | |
8645 | { | |
8646 | int resched = should_resched(PREEMPT_LOCK_OFFSET); | |
8647 | int ret = 0; | |
8648 | ||
8649 | lockdep_assert_held_write(lock); | |
8650 | ||
8651 | if (rwlock_needbreak(lock) || resched) { | |
8652 | write_unlock(lock); | |
7e406d1f | 8653 | if (!_cond_resched()) |
f3d4b4b1 BG |
8654 | cpu_relax(); |
8655 | ret = 1; | |
8656 | write_lock(lock); | |
8657 | } | |
8658 | return ret; | |
8659 | } | |
8660 | EXPORT_SYMBOL(__cond_resched_rwlock_write); | |
8661 | ||
4c748558 MR |
8662 | #ifdef CONFIG_PREEMPT_DYNAMIC |
8663 | ||
33c64734 | 8664 | #ifdef CONFIG_GENERIC_ENTRY |
4c748558 | 8665 | #include <linux/entry-common.h> |
33c64734 | 8666 | #endif |
4c748558 MR |
8667 | |
8668 | /* | |
8669 | * SC:cond_resched | |
8670 | * SC:might_resched | |
8671 | * SC:preempt_schedule | |
8672 | * SC:preempt_schedule_notrace | |
8673 | * SC:irqentry_exit_cond_resched | |
8674 | * | |
8675 | * | |
8676 | * NONE: | |
8677 | * cond_resched <- __cond_resched | |
8678 | * might_resched <- RET0 | |
8679 | * preempt_schedule <- NOP | |
8680 | * preempt_schedule_notrace <- NOP | |
8681 | * irqentry_exit_cond_resched <- NOP | |
8682 | * | |
8683 | * VOLUNTARY: | |
8684 | * cond_resched <- __cond_resched | |
8685 | * might_resched <- __cond_resched | |
8686 | * preempt_schedule <- NOP | |
8687 | * preempt_schedule_notrace <- NOP | |
8688 | * irqentry_exit_cond_resched <- NOP | |
8689 | * | |
8690 | * FULL: | |
8691 | * cond_resched <- RET0 | |
8692 | * might_resched <- RET0 | |
8693 | * preempt_schedule <- preempt_schedule | |
8694 | * preempt_schedule_notrace <- preempt_schedule_notrace | |
8695 | * irqentry_exit_cond_resched <- irqentry_exit_cond_resched | |
8696 | */ | |
8697 | ||
8698 | enum { | |
8699 | preempt_dynamic_undefined = -1, | |
8700 | preempt_dynamic_none, | |
8701 | preempt_dynamic_voluntary, | |
8702 | preempt_dynamic_full, | |
8703 | }; | |
8704 | ||
8705 | int preempt_dynamic_mode = preempt_dynamic_undefined; | |
8706 | ||
8707 | int sched_dynamic_mode(const char *str) | |
8708 | { | |
8709 | if (!strcmp(str, "none")) | |
8710 | return preempt_dynamic_none; | |
8711 | ||
8712 | if (!strcmp(str, "voluntary")) | |
8713 | return preempt_dynamic_voluntary; | |
8714 | ||
8715 | if (!strcmp(str, "full")) | |
8716 | return preempt_dynamic_full; | |
8717 | ||
8718 | return -EINVAL; | |
8719 | } | |
8720 | ||
99cf983c | 8721 | #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) |
8a69fe0b MR |
8722 | #define preempt_dynamic_enable(f) static_call_update(f, f##_dynamic_enabled) |
8723 | #define preempt_dynamic_disable(f) static_call_update(f, f##_dynamic_disabled) | |
99cf983c MR |
8724 | #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) |
8725 | #define preempt_dynamic_enable(f) static_key_enable(&sk_dynamic_##f.key) | |
8726 | #define preempt_dynamic_disable(f) static_key_disable(&sk_dynamic_##f.key) | |
8727 | #else | |
8728 | #error "Unsupported PREEMPT_DYNAMIC mechanism" | |
8729 | #endif | |
8a69fe0b | 8730 | |
9b8e1781 | 8731 | static DEFINE_MUTEX(sched_dynamic_mutex); |
e3ff7c60 JP |
8732 | static bool klp_override; |
8733 | ||
8734 | static void __sched_dynamic_update(int mode) | |
4c748558 MR |
8735 | { |
8736 | /* | |
8737 | * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in | |
8738 | * the ZERO state, which is invalid. | |
8739 | */ | |
e3ff7c60 JP |
8740 | if (!klp_override) |
8741 | preempt_dynamic_enable(cond_resched); | |
8a69fe0b MR |
8742 | preempt_dynamic_enable(might_resched); |
8743 | preempt_dynamic_enable(preempt_schedule); | |
8744 | preempt_dynamic_enable(preempt_schedule_notrace); | |
8745 | preempt_dynamic_enable(irqentry_exit_cond_resched); | |
4c748558 MR |
8746 | |
8747 | switch (mode) { | |
8748 | case preempt_dynamic_none: | |
e3ff7c60 JP |
8749 | if (!klp_override) |
8750 | preempt_dynamic_enable(cond_resched); | |
8a69fe0b MR |
8751 | preempt_dynamic_disable(might_resched); |
8752 | preempt_dynamic_disable(preempt_schedule); | |
8753 | preempt_dynamic_disable(preempt_schedule_notrace); | |
8754 | preempt_dynamic_disable(irqentry_exit_cond_resched); | |
e3ff7c60 JP |
8755 | if (mode != preempt_dynamic_mode) |
8756 | pr_info("Dynamic Preempt: none\n"); | |
4c748558 MR |
8757 | break; |
8758 | ||
8759 | case preempt_dynamic_voluntary: | |
e3ff7c60 JP |
8760 | if (!klp_override) |
8761 | preempt_dynamic_enable(cond_resched); | |
8a69fe0b MR |
8762 | preempt_dynamic_enable(might_resched); |
8763 | preempt_dynamic_disable(preempt_schedule); | |
8764 | preempt_dynamic_disable(preempt_schedule_notrace); | |
8765 | preempt_dynamic_disable(irqentry_exit_cond_resched); | |
e3ff7c60 JP |
8766 | if (mode != preempt_dynamic_mode) |
8767 | pr_info("Dynamic Preempt: voluntary\n"); | |
4c748558 MR |
8768 | break; |
8769 | ||
8770 | case preempt_dynamic_full: | |
e3ff7c60 JP |
8771 | if (!klp_override) |
8772 | preempt_dynamic_disable(cond_resched); | |
8a69fe0b MR |
8773 | preempt_dynamic_disable(might_resched); |
8774 | preempt_dynamic_enable(preempt_schedule); | |
8775 | preempt_dynamic_enable(preempt_schedule_notrace); | |
8776 | preempt_dynamic_enable(irqentry_exit_cond_resched); | |
e3ff7c60 JP |
8777 | if (mode != preempt_dynamic_mode) |
8778 | pr_info("Dynamic Preempt: full\n"); | |
4c748558 MR |
8779 | break; |
8780 | } | |
8781 | ||
8782 | preempt_dynamic_mode = mode; | |
8783 | } | |
8784 | ||
e3ff7c60 JP |
8785 | void sched_dynamic_update(int mode) |
8786 | { | |
8787 | mutex_lock(&sched_dynamic_mutex); | |
8788 | __sched_dynamic_update(mode); | |
8789 | mutex_unlock(&sched_dynamic_mutex); | |
8790 | } | |
8791 | ||
8792 | #ifdef CONFIG_HAVE_PREEMPT_DYNAMIC_CALL | |
8793 | ||
8794 | static int klp_cond_resched(void) | |
8795 | { | |
8796 | __klp_sched_try_switch(); | |
8797 | return __cond_resched(); | |
8798 | } | |
8799 | ||
8800 | void sched_dynamic_klp_enable(void) | |
8801 | { | |
8802 | mutex_lock(&sched_dynamic_mutex); | |
8803 | ||
8804 | klp_override = true; | |
8805 | static_call_update(cond_resched, klp_cond_resched); | |
8806 | ||
8807 | mutex_unlock(&sched_dynamic_mutex); | |
8808 | } | |
8809 | ||
8810 | void sched_dynamic_klp_disable(void) | |
8811 | { | |
8812 | mutex_lock(&sched_dynamic_mutex); | |
8813 | ||
8814 | klp_override = false; | |
8815 | __sched_dynamic_update(preempt_dynamic_mode); | |
8816 | ||
8817 | mutex_unlock(&sched_dynamic_mutex); | |
8818 | } | |
8819 | ||
8820 | #endif /* CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */ | |
8821 | ||
4c748558 MR |
8822 | static int __init setup_preempt_mode(char *str) |
8823 | { | |
8824 | int mode = sched_dynamic_mode(str); | |
8825 | if (mode < 0) { | |
8826 | pr_warn("Dynamic Preempt: unsupported mode: %s\n", str); | |
8827 | return 0; | |
8828 | } | |
8829 | ||
8830 | sched_dynamic_update(mode); | |
8831 | return 1; | |
8832 | } | |
8833 | __setup("preempt=", setup_preempt_mode); | |
8834 | ||
8835 | static void __init preempt_dynamic_init(void) | |
8836 | { | |
8837 | if (preempt_dynamic_mode == preempt_dynamic_undefined) { | |
8838 | if (IS_ENABLED(CONFIG_PREEMPT_NONE)) { | |
8839 | sched_dynamic_update(preempt_dynamic_none); | |
8840 | } else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) { | |
8841 | sched_dynamic_update(preempt_dynamic_voluntary); | |
8842 | } else { | |
8843 | /* Default static call setting, nothing to do */ | |
8844 | WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT)); | |
8845 | preempt_dynamic_mode = preempt_dynamic_full; | |
8846 | pr_info("Dynamic Preempt: full\n"); | |
8847 | } | |
8848 | } | |
8849 | } | |
8850 | ||
cfe43f47 VS |
8851 | #define PREEMPT_MODEL_ACCESSOR(mode) \ |
8852 | bool preempt_model_##mode(void) \ | |
8853 | { \ | |
8854 | WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \ | |
8855 | return preempt_dynamic_mode == preempt_dynamic_##mode; \ | |
8856 | } \ | |
8857 | EXPORT_SYMBOL_GPL(preempt_model_##mode) | |
8858 | ||
8859 | PREEMPT_MODEL_ACCESSOR(none); | |
8860 | PREEMPT_MODEL_ACCESSOR(voluntary); | |
8861 | PREEMPT_MODEL_ACCESSOR(full); | |
8862 | ||
4c748558 MR |
8863 | #else /* !CONFIG_PREEMPT_DYNAMIC */ |
8864 | ||
8865 | static inline void preempt_dynamic_init(void) { } | |
8866 | ||
8867 | #endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */ | |
8868 | ||
1da177e4 LT |
8869 | /** |
8870 | * yield - yield the current processor to other threads. | |
8871 | * | |
8e3fabfd PZ |
8872 | * Do not ever use this function, there's a 99% chance you're doing it wrong. |
8873 | * | |
8874 | * The scheduler is at all times free to pick the calling task as the most | |
8875 | * eligible task to run, if removing the yield() call from your code breaks | |
b19a888c | 8876 | * it, it's already broken. |
8e3fabfd PZ |
8877 | * |
8878 | * Typical broken usage is: | |
8879 | * | |
8880 | * while (!event) | |
d1ccc66d | 8881 | * yield(); |
8e3fabfd PZ |
8882 | * |
8883 | * where one assumes that yield() will let 'the other' process run that will | |
8884 | * make event true. If the current task is a SCHED_FIFO task that will never | |
8885 | * happen. Never use yield() as a progress guarantee!! | |
8886 | * | |
8887 | * If you want to use yield() to wait for something, use wait_event(). | |
8888 | * If you want to use yield() to be 'nice' for others, use cond_resched(). | |
8889 | * If you still want to use yield(), do not! | |
1da177e4 LT |
8890 | */ |
8891 | void __sched yield(void) | |
8892 | { | |
8893 | set_current_state(TASK_RUNNING); | |
7d4dd4f1 | 8894 | do_sched_yield(); |
1da177e4 | 8895 | } |
1da177e4 LT |
8896 | EXPORT_SYMBOL(yield); |
8897 | ||
d95f4122 MG |
8898 | /** |
8899 | * yield_to - yield the current processor to another thread in | |
8900 | * your thread group, or accelerate that thread toward the | |
8901 | * processor it's on. | |
16addf95 RD |
8902 | * @p: target task |
8903 | * @preempt: whether task preemption is allowed or not | |
d95f4122 MG |
8904 | * |
8905 | * It's the caller's job to ensure that the target task struct | |
8906 | * can't go away on us before we can do any checks. | |
8907 | * | |
e69f6186 | 8908 | * Return: |
7b270f60 PZ |
8909 | * true (>0) if we indeed boosted the target task. |
8910 | * false (0) if we failed to boost the target. | |
8911 | * -ESRCH if there's no task to yield to. | |
d95f4122 | 8912 | */ |
fa93384f | 8913 | int __sched yield_to(struct task_struct *p, bool preempt) |
d95f4122 MG |
8914 | { |
8915 | struct task_struct *curr = current; | |
8916 | struct rq *rq, *p_rq; | |
c3c18640 | 8917 | int yielded = 0; |
d95f4122 | 8918 | |
7a50f766 PZ |
8919 | scoped_guard (irqsave) { |
8920 | rq = this_rq(); | |
d95f4122 MG |
8921 | |
8922 | again: | |
7a50f766 PZ |
8923 | p_rq = task_rq(p); |
8924 | /* | |
8925 | * If we're the only runnable task on the rq and target rq also | |
8926 | * has only one task, there's absolutely no point in yielding. | |
8927 | */ | |
8928 | if (rq->nr_running == 1 && p_rq->nr_running == 1) | |
8929 | return -ESRCH; | |
7b270f60 | 8930 | |
7a50f766 PZ |
8931 | guard(double_rq_lock)(rq, p_rq); |
8932 | if (task_rq(p) != p_rq) | |
8933 | goto again; | |
d95f4122 | 8934 | |
7a50f766 PZ |
8935 | if (!curr->sched_class->yield_to_task) |
8936 | return 0; | |
d95f4122 | 8937 | |
7a50f766 PZ |
8938 | if (curr->sched_class != p->sched_class) |
8939 | return 0; | |
d95f4122 | 8940 | |
7a50f766 PZ |
8941 | if (task_on_cpu(p_rq, p) || !task_is_running(p)) |
8942 | return 0; | |
d95f4122 | 8943 | |
7a50f766 PZ |
8944 | yielded = curr->sched_class->yield_to_task(rq, p); |
8945 | if (yielded) { | |
8946 | schedstat_inc(rq->yld_count); | |
8947 | /* | |
8948 | * Make p's CPU reschedule; pick_next_entity | |
8949 | * takes care of fairness. | |
8950 | */ | |
8951 | if (preempt && rq != p_rq) | |
8952 | resched_curr(p_rq); | |
8953 | } | |
6d1cafd8 | 8954 | } |
d95f4122 | 8955 | |
7a50f766 | 8956 | if (yielded) |
d95f4122 MG |
8957 | schedule(); |
8958 | ||
8959 | return yielded; | |
8960 | } | |
8961 | EXPORT_SYMBOL_GPL(yield_to); | |
8962 | ||
10ab5643 TH |
8963 | int io_schedule_prepare(void) |
8964 | { | |
8965 | int old_iowait = current->in_iowait; | |
8966 | ||
8967 | current->in_iowait = 1; | |
aa8dccca | 8968 | blk_flush_plug(current->plug, true); |
10ab5643 TH |
8969 | return old_iowait; |
8970 | } | |
8971 | ||
8972 | void io_schedule_finish(int token) | |
8973 | { | |
8974 | current->in_iowait = token; | |
8975 | } | |
8976 | ||
1da177e4 | 8977 | /* |
41a2d6cf | 8978 | * This task is about to go to sleep on IO. Increment rq->nr_iowait so |
1da177e4 | 8979 | * that process accounting knows that this is a task in IO wait state. |
1da177e4 | 8980 | */ |
1da177e4 LT |
8981 | long __sched io_schedule_timeout(long timeout) |
8982 | { | |
10ab5643 | 8983 | int token; |
1da177e4 LT |
8984 | long ret; |
8985 | ||
10ab5643 | 8986 | token = io_schedule_prepare(); |
1da177e4 | 8987 | ret = schedule_timeout(timeout); |
10ab5643 | 8988 | io_schedule_finish(token); |
9cff8ade | 8989 | |
1da177e4 LT |
8990 | return ret; |
8991 | } | |
9cff8ade | 8992 | EXPORT_SYMBOL(io_schedule_timeout); |
1da177e4 | 8993 | |
e3b929b0 | 8994 | void __sched io_schedule(void) |
10ab5643 TH |
8995 | { |
8996 | int token; | |
8997 | ||
8998 | token = io_schedule_prepare(); | |
8999 | schedule(); | |
9000 | io_schedule_finish(token); | |
9001 | } | |
9002 | EXPORT_SYMBOL(io_schedule); | |
9003 | ||
1da177e4 LT |
9004 | /** |
9005 | * sys_sched_get_priority_max - return maximum RT priority. | |
9006 | * @policy: scheduling class. | |
9007 | * | |
e69f6186 YB |
9008 | * Return: On success, this syscall returns the maximum |
9009 | * rt_priority that can be used by a given scheduling class. | |
9010 | * On failure, a negative error code is returned. | |
1da177e4 | 9011 | */ |
5add95d4 | 9012 | SYSCALL_DEFINE1(sched_get_priority_max, int, policy) |
1da177e4 LT |
9013 | { |
9014 | int ret = -EINVAL; | |
9015 | ||
9016 | switch (policy) { | |
9017 | case SCHED_FIFO: | |
9018 | case SCHED_RR: | |
ae18ad28 | 9019 | ret = MAX_RT_PRIO-1; |
1da177e4 | 9020 | break; |
aab03e05 | 9021 | case SCHED_DEADLINE: |
1da177e4 | 9022 | case SCHED_NORMAL: |
b0a9499c | 9023 | case SCHED_BATCH: |
dd41f596 | 9024 | case SCHED_IDLE: |
1da177e4 LT |
9025 | ret = 0; |
9026 | break; | |
9027 | } | |
9028 | return ret; | |
9029 | } | |
9030 | ||
9031 | /** | |
9032 | * sys_sched_get_priority_min - return minimum RT priority. | |
9033 | * @policy: scheduling class. | |
9034 | * | |
e69f6186 YB |
9035 | * Return: On success, this syscall returns the minimum |
9036 | * rt_priority that can be used by a given scheduling class. | |
9037 | * On failure, a negative error code is returned. | |
1da177e4 | 9038 | */ |
5add95d4 | 9039 | SYSCALL_DEFINE1(sched_get_priority_min, int, policy) |
1da177e4 LT |
9040 | { |
9041 | int ret = -EINVAL; | |
9042 | ||
9043 | switch (policy) { | |
9044 | case SCHED_FIFO: | |
9045 | case SCHED_RR: | |
9046 | ret = 1; | |
9047 | break; | |
aab03e05 | 9048 | case SCHED_DEADLINE: |
1da177e4 | 9049 | case SCHED_NORMAL: |
b0a9499c | 9050 | case SCHED_BATCH: |
dd41f596 | 9051 | case SCHED_IDLE: |
1da177e4 LT |
9052 | ret = 0; |
9053 | } | |
9054 | return ret; | |
9055 | } | |
9056 | ||
abca5fc5 | 9057 | static int sched_rr_get_interval(pid_t pid, struct timespec64 *t) |
1da177e4 | 9058 | { |
af7c5763 | 9059 | unsigned int time_slice = 0; |
3a5c359a | 9060 | int retval; |
1da177e4 LT |
9061 | |
9062 | if (pid < 0) | |
3a5c359a | 9063 | return -EINVAL; |
1da177e4 | 9064 | |
af7c5763 PZ |
9065 | scoped_guard (rcu) { |
9066 | struct task_struct *p = find_process_by_pid(pid); | |
9067 | if (!p) | |
9068 | return -ESRCH; | |
1da177e4 | 9069 | |
af7c5763 PZ |
9070 | retval = security_task_getscheduler(p); |
9071 | if (retval) | |
9072 | return retval; | |
1da177e4 | 9073 | |
af7c5763 PZ |
9074 | scoped_guard (task_rq_lock, p) { |
9075 | struct rq *rq = scope.rq; | |
9076 | if (p->sched_class->get_rr_interval) | |
9077 | time_slice = p->sched_class->get_rr_interval(rq, p); | |
9078 | } | |
9079 | } | |
a4ec24b4 | 9080 | |
abca5fc5 AV |
9081 | jiffies_to_timespec64(time_slice, t); |
9082 | return 0; | |
1da177e4 LT |
9083 | } |
9084 | ||
2064a5ab RD |
9085 | /** |
9086 | * sys_sched_rr_get_interval - return the default timeslice of a process. | |
9087 | * @pid: pid of the process. | |
9088 | * @interval: userspace pointer to the timeslice value. | |
9089 | * | |
9090 | * this syscall writes the default timeslice value of a given process | |
9091 | * into the user-space timespec buffer. A value of '0' means infinity. | |
9092 | * | |
9093 | * Return: On success, 0 and the timeslice is in @interval. Otherwise, | |
9094 | * an error code. | |
9095 | */ | |
abca5fc5 | 9096 | SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, |
474b9c77 | 9097 | struct __kernel_timespec __user *, interval) |
abca5fc5 AV |
9098 | { |
9099 | struct timespec64 t; | |
9100 | int retval = sched_rr_get_interval(pid, &t); | |
9101 | ||
9102 | if (retval == 0) | |
9103 | retval = put_timespec64(&t, interval); | |
9104 | ||
9105 | return retval; | |
9106 | } | |
9107 | ||
474b9c77 | 9108 | #ifdef CONFIG_COMPAT_32BIT_TIME |
8dabe724 AB |
9109 | SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid, |
9110 | struct old_timespec32 __user *, interval) | |
abca5fc5 AV |
9111 | { |
9112 | struct timespec64 t; | |
9113 | int retval = sched_rr_get_interval(pid, &t); | |
9114 | ||
9115 | if (retval == 0) | |
9afc5eee | 9116 | retval = put_old_timespec32(&t, interval); |
abca5fc5 AV |
9117 | return retval; |
9118 | } | |
9119 | #endif | |
9120 | ||
82a1fcb9 | 9121 | void sched_show_task(struct task_struct *p) |
1da177e4 | 9122 | { |
1da177e4 | 9123 | unsigned long free = 0; |
4e79752c | 9124 | int ppid; |
c930b2c0 | 9125 | |
38200502 TH |
9126 | if (!try_get_task_stack(p)) |
9127 | return; | |
20435d84 | 9128 | |
cc172ff3 | 9129 | pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p)); |
20435d84 | 9130 | |
b03fbd4f | 9131 | if (task_is_running(p)) |
cc172ff3 | 9132 | pr_cont(" running task "); |
1da177e4 | 9133 | #ifdef CONFIG_DEBUG_STACK_USAGE |
7c9f8861 | 9134 | free = stack_not_used(p); |
1da177e4 | 9135 | #endif |
a90e984c | 9136 | ppid = 0; |
4e79752c | 9137 | rcu_read_lock(); |
a90e984c ON |
9138 | if (pid_alive(p)) |
9139 | ppid = task_pid_nr(rcu_dereference(p->real_parent)); | |
4e79752c | 9140 | rcu_read_unlock(); |
bc87127a YD |
9141 | pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d flags:0x%08lx\n", |
9142 | free, task_pid_nr(p), task_tgid_nr(p), | |
9143 | ppid, read_task_thread_flags(p)); | |
1da177e4 | 9144 | |
3d1cb205 | 9145 | print_worker_info(KERN_INFO, p); |
a8b62fd0 | 9146 | print_stop_info(KERN_INFO, p); |
9cb8f069 | 9147 | show_stack(p, NULL, KERN_INFO); |
38200502 | 9148 | put_task_stack(p); |
1da177e4 | 9149 | } |
0032f4e8 | 9150 | EXPORT_SYMBOL_GPL(sched_show_task); |
1da177e4 | 9151 | |
5d68cc95 PZ |
9152 | static inline bool |
9153 | state_filter_match(unsigned long state_filter, struct task_struct *p) | |
9154 | { | |
2f064a59 PZ |
9155 | unsigned int state = READ_ONCE(p->__state); |
9156 | ||
5d68cc95 PZ |
9157 | /* no filter, everything matches */ |
9158 | if (!state_filter) | |
9159 | return true; | |
9160 | ||
9161 | /* filter, but doesn't match */ | |
2f064a59 | 9162 | if (!(state & state_filter)) |
5d68cc95 PZ |
9163 | return false; |
9164 | ||
9165 | /* | |
9166 | * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows | |
9167 | * TASK_KILLABLE). | |
9168 | */ | |
5aec788a | 9169 | if (state_filter == TASK_UNINTERRUPTIBLE && (state & TASK_NOLOAD)) |
5d68cc95 PZ |
9170 | return false; |
9171 | ||
9172 | return true; | |
9173 | } | |
9174 | ||
9175 | ||
2f064a59 | 9176 | void show_state_filter(unsigned int state_filter) |
1da177e4 | 9177 | { |
36c8b586 | 9178 | struct task_struct *g, *p; |
1da177e4 | 9179 | |
510f5acc | 9180 | rcu_read_lock(); |
5d07f420 | 9181 | for_each_process_thread(g, p) { |
1da177e4 LT |
9182 | /* |
9183 | * reset the NMI-timeout, listing all files on a slow | |
25985edc | 9184 | * console might take a lot of time: |
57675cb9 AR |
9185 | * Also, reset softlockup watchdogs on all CPUs, because |
9186 | * another CPU might be blocked waiting for us to process | |
9187 | * an IPI. | |
1da177e4 LT |
9188 | */ |
9189 | touch_nmi_watchdog(); | |
57675cb9 | 9190 | touch_all_softlockup_watchdogs(); |
5d68cc95 | 9191 | if (state_filter_match(state_filter, p)) |
82a1fcb9 | 9192 | sched_show_task(p); |
5d07f420 | 9193 | } |
1da177e4 | 9194 | |
dd41f596 | 9195 | #ifdef CONFIG_SCHED_DEBUG |
fb90a6e9 RV |
9196 | if (!state_filter) |
9197 | sysrq_sched_debug_show(); | |
dd41f596 | 9198 | #endif |
510f5acc | 9199 | rcu_read_unlock(); |
e59e2ae2 IM |
9200 | /* |
9201 | * Only show locks if all tasks are dumped: | |
9202 | */ | |
93335a21 | 9203 | if (!state_filter) |
e59e2ae2 | 9204 | debug_show_all_locks(); |
1da177e4 LT |
9205 | } |
9206 | ||
f340c0d1 IM |
9207 | /** |
9208 | * init_idle - set up an idle thread for a given CPU | |
9209 | * @idle: task in question | |
d1ccc66d | 9210 | * @cpu: CPU the idle task belongs to |
f340c0d1 IM |
9211 | * |
9212 | * NOTE: this function does not set the idle thread's NEED_RESCHED | |
9213 | * flag, to make booting more robust. | |
9214 | */ | |
f1a0a376 | 9215 | void __init init_idle(struct task_struct *idle, int cpu) |
1da177e4 | 9216 | { |
713a2e21 WL |
9217 | #ifdef CONFIG_SMP |
9218 | struct affinity_context ac = (struct affinity_context) { | |
9219 | .new_mask = cpumask_of(cpu), | |
9220 | .flags = 0, | |
9221 | }; | |
9222 | #endif | |
70b97a7f | 9223 | struct rq *rq = cpu_rq(cpu); |
1da177e4 LT |
9224 | unsigned long flags; |
9225 | ||
ff51ff84 PZ |
9226 | __sched_fork(0, idle); |
9227 | ||
25834c73 | 9228 | raw_spin_lock_irqsave(&idle->pi_lock, flags); |
5cb9eaa3 | 9229 | raw_spin_rq_lock(rq); |
5cbd54ef | 9230 | |
2f064a59 | 9231 | idle->__state = TASK_RUNNING; |
dd41f596 | 9232 | idle->se.exec_start = sched_clock(); |
00b89fe0 VS |
9233 | /* |
9234 | * PF_KTHREAD should already be set at this point; regardless, make it | |
9235 | * look like a proper per-CPU kthread. | |
9236 | */ | |
cff9b233 | 9237 | idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY; |
00b89fe0 | 9238 | kthread_set_per_cpu(idle, cpu); |
dd41f596 | 9239 | |
de9b8f5d PZ |
9240 | #ifdef CONFIG_SMP |
9241 | /* | |
b19a888c | 9242 | * It's possible that init_idle() gets called multiple times on a task, |
de9b8f5d PZ |
9243 | * in that case do_set_cpus_allowed() will not do the right thing. |
9244 | * | |
9245 | * And since this is boot we can forgo the serialization. | |
9246 | */ | |
713a2e21 | 9247 | set_cpus_allowed_common(idle, &ac); |
de9b8f5d | 9248 | #endif |
6506cf6c PZ |
9249 | /* |
9250 | * We're having a chicken and egg problem, even though we are | |
d1ccc66d | 9251 | * holding rq->lock, the CPU isn't yet set to this CPU so the |
6506cf6c PZ |
9252 | * lockdep check in task_group() will fail. |
9253 | * | |
9254 | * Similar case to sched_fork(). / Alternatively we could | |
9255 | * use task_rq_lock() here and obtain the other rq->lock. | |
9256 | * | |
9257 | * Silence PROVE_RCU | |
9258 | */ | |
9259 | rcu_read_lock(); | |
dd41f596 | 9260 | __set_task_cpu(idle, cpu); |
6506cf6c | 9261 | rcu_read_unlock(); |
1da177e4 | 9262 | |
5311a98f EB |
9263 | rq->idle = idle; |
9264 | rcu_assign_pointer(rq->curr, idle); | |
da0c1e65 | 9265 | idle->on_rq = TASK_ON_RQ_QUEUED; |
de9b8f5d | 9266 | #ifdef CONFIG_SMP |
3ca7a440 | 9267 | idle->on_cpu = 1; |
4866cde0 | 9268 | #endif |
5cb9eaa3 | 9269 | raw_spin_rq_unlock(rq); |
25834c73 | 9270 | raw_spin_unlock_irqrestore(&idle->pi_lock, flags); |
1da177e4 LT |
9271 | |
9272 | /* Set the preempt count _outside_ the spinlocks! */ | |
01028747 | 9273 | init_idle_preempt_count(idle, cpu); |
55cd5340 | 9274 | |
dd41f596 IM |
9275 | /* |
9276 | * The idle tasks have their own, simple scheduling class: | |
9277 | */ | |
9278 | idle->sched_class = &idle_sched_class; | |
868baf07 | 9279 | ftrace_graph_init_idle_task(idle, cpu); |
45eacc69 | 9280 | vtime_init_idle(idle, cpu); |
de9b8f5d | 9281 | #ifdef CONFIG_SMP |
f1c6f1a7 CE |
9282 | sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); |
9283 | #endif | |
19978ca6 IM |
9284 | } |
9285 | ||
e1d4eeec NP |
9286 | #ifdef CONFIG_SMP |
9287 | ||
f82f8042 JL |
9288 | int cpuset_cpumask_can_shrink(const struct cpumask *cur, |
9289 | const struct cpumask *trial) | |
9290 | { | |
06a76fe0 | 9291 | int ret = 1; |
f82f8042 | 9292 | |
1087ad4e | 9293 | if (cpumask_empty(cur)) |
bb2bc55a MG |
9294 | return ret; |
9295 | ||
06a76fe0 | 9296 | ret = dl_cpuset_cpumask_can_shrink(cur, trial); |
f82f8042 JL |
9297 | |
9298 | return ret; | |
9299 | } | |
9300 | ||
2ef269ef | 9301 | int task_can_attach(struct task_struct *p) |
7f51412a JL |
9302 | { |
9303 | int ret = 0; | |
9304 | ||
9305 | /* | |
9306 | * Kthreads which disallow setaffinity shouldn't be moved | |
d1ccc66d | 9307 | * to a new cpuset; we don't want to change their CPU |
7f51412a JL |
9308 | * affinity and isolating such threads by their set of |
9309 | * allowed nodes is unnecessary. Thus, cpusets are not | |
9310 | * applicable for such threads. This prevents checking for | |
9311 | * success of set_cpus_allowed_ptr() on all attached tasks | |
3bd37062 | 9312 | * before cpus_mask may be changed. |
7f51412a | 9313 | */ |
2ef269ef | 9314 | if (p->flags & PF_NO_SETAFFINITY) |
7f51412a | 9315 | ret = -EINVAL; |
7f51412a | 9316 | |
7f51412a JL |
9317 | return ret; |
9318 | } | |
9319 | ||
f2cb1360 | 9320 | bool sched_smp_initialized __read_mostly; |
e26fbffd | 9321 | |
e6628d5b MG |
9322 | #ifdef CONFIG_NUMA_BALANCING |
9323 | /* Migrate current task p to target_cpu */ | |
9324 | int migrate_task_to(struct task_struct *p, int target_cpu) | |
9325 | { | |
9326 | struct migration_arg arg = { p, target_cpu }; | |
9327 | int curr_cpu = task_cpu(p); | |
9328 | ||
9329 | if (curr_cpu == target_cpu) | |
9330 | return 0; | |
9331 | ||
3bd37062 | 9332 | if (!cpumask_test_cpu(target_cpu, p->cpus_ptr)) |
e6628d5b MG |
9333 | return -EINVAL; |
9334 | ||
9335 | /* TODO: This is not properly updating schedstats */ | |
9336 | ||
286549dc | 9337 | trace_sched_move_numa(p, curr_cpu, target_cpu); |
e6628d5b MG |
9338 | return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg); |
9339 | } | |
0ec8aa00 PZ |
9340 | |
9341 | /* | |
9342 | * Requeue a task on a given node and accurately track the number of NUMA | |
9343 | * tasks on the runqueues | |
9344 | */ | |
9345 | void sched_setnuma(struct task_struct *p, int nid) | |
9346 | { | |
da0c1e65 | 9347 | bool queued, running; |
eb580751 PZ |
9348 | struct rq_flags rf; |
9349 | struct rq *rq; | |
0ec8aa00 | 9350 | |
eb580751 | 9351 | rq = task_rq_lock(p, &rf); |
da0c1e65 | 9352 | queued = task_on_rq_queued(p); |
0ec8aa00 PZ |
9353 | running = task_current(rq, p); |
9354 | ||
da0c1e65 | 9355 | if (queued) |
1de64443 | 9356 | dequeue_task(rq, p, DEQUEUE_SAVE); |
0ec8aa00 | 9357 | if (running) |
f3cd1c4e | 9358 | put_prev_task(rq, p); |
0ec8aa00 PZ |
9359 | |
9360 | p->numa_preferred_nid = nid; | |
0ec8aa00 | 9361 | |
da0c1e65 | 9362 | if (queued) |
7134b3e9 | 9363 | enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK); |
a399d233 | 9364 | if (running) |
03b7fad1 | 9365 | set_next_task(rq, p); |
eb580751 | 9366 | task_rq_unlock(rq, p, &rf); |
0ec8aa00 | 9367 | } |
5cc389bc | 9368 | #endif /* CONFIG_NUMA_BALANCING */ |
f7b4cddc | 9369 | |
1da177e4 | 9370 | #ifdef CONFIG_HOTPLUG_CPU |
054b9108 | 9371 | /* |
d1ccc66d | 9372 | * Ensure that the idle task is using init_mm right before its CPU goes |
48c5ccae | 9373 | * offline. |
054b9108 | 9374 | */ |
48c5ccae | 9375 | void idle_task_exit(void) |
1da177e4 | 9376 | { |
48c5ccae | 9377 | struct mm_struct *mm = current->active_mm; |
e76bd8d9 | 9378 | |
48c5ccae | 9379 | BUG_ON(cpu_online(smp_processor_id())); |
bf2c59fc | 9380 | BUG_ON(current != this_rq()->idle); |
e76bd8d9 | 9381 | |
a53efe5f | 9382 | if (mm != &init_mm) { |
252d2a41 | 9383 | switch_mm(mm, &init_mm, current); |
a53efe5f MS |
9384 | finish_arch_post_lock_switch(); |
9385 | } | |
bf2c59fc PZ |
9386 | |
9387 | /* finish_cpu(), as ran on the BP, will clean up the active_mm state */ | |
1da177e4 LT |
9388 | } |
9389 | ||
2558aacf | 9390 | static int __balance_push_cpu_stop(void *arg) |
1da177e4 | 9391 | { |
2558aacf PZ |
9392 | struct task_struct *p = arg; |
9393 | struct rq *rq = this_rq(); | |
9394 | struct rq_flags rf; | |
9395 | int cpu; | |
1da177e4 | 9396 | |
2558aacf PZ |
9397 | raw_spin_lock_irq(&p->pi_lock); |
9398 | rq_lock(rq, &rf); | |
3f1d2a31 | 9399 | |
2558aacf PZ |
9400 | update_rq_clock(rq); |
9401 | ||
9402 | if (task_rq(p) == rq && task_on_rq_queued(p)) { | |
9403 | cpu = select_fallback_rq(rq->cpu, p); | |
9404 | rq = __migrate_task(rq, &rf, p, cpu); | |
10e7071b | 9405 | } |
3f1d2a31 | 9406 | |
2558aacf PZ |
9407 | rq_unlock(rq, &rf); |
9408 | raw_spin_unlock_irq(&p->pi_lock); | |
9409 | ||
9410 | put_task_struct(p); | |
9411 | ||
9412 | return 0; | |
10e7071b | 9413 | } |
3f1d2a31 | 9414 | |
2558aacf PZ |
9415 | static DEFINE_PER_CPU(struct cpu_stop_work, push_work); |
9416 | ||
48f24c4d | 9417 | /* |
2558aacf | 9418 | * Ensure we only run per-cpu kthreads once the CPU goes !active. |
b5c44773 PZ |
9419 | * |
9420 | * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only | |
9421 | * effective when the hotplug motion is down. | |
1da177e4 | 9422 | */ |
2558aacf | 9423 | static void balance_push(struct rq *rq) |
1da177e4 | 9424 | { |
2558aacf PZ |
9425 | struct task_struct *push_task = rq->curr; |
9426 | ||
5cb9eaa3 | 9427 | lockdep_assert_rq_held(rq); |
b5c44773 | 9428 | |
ae792702 PZ |
9429 | /* |
9430 | * Ensure the thing is persistent until balance_push_set(.on = false); | |
9431 | */ | |
9432 | rq->balance_callback = &balance_push_callback; | |
1da177e4 | 9433 | |
b5c44773 | 9434 | /* |
868ad33b TG |
9435 | * Only active while going offline and when invoked on the outgoing |
9436 | * CPU. | |
b5c44773 | 9437 | */ |
868ad33b | 9438 | if (!cpu_dying(rq->cpu) || rq != this_rq()) |
b5c44773 PZ |
9439 | return; |
9440 | ||
1da177e4 | 9441 | /* |
2558aacf PZ |
9442 | * Both the cpu-hotplug and stop task are in this case and are |
9443 | * required to complete the hotplug process. | |
1da177e4 | 9444 | */ |
00b89fe0 | 9445 | if (kthread_is_per_cpu(push_task) || |
5ba2ffba PZ |
9446 | is_migration_disabled(push_task)) { |
9447 | ||
f2469a1f TG |
9448 | /* |
9449 | * If this is the idle task on the outgoing CPU try to wake | |
9450 | * up the hotplug control thread which might wait for the | |
9451 | * last task to vanish. The rcuwait_active() check is | |
9452 | * accurate here because the waiter is pinned on this CPU | |
9453 | * and can't obviously be running in parallel. | |
3015ef4b TG |
9454 | * |
9455 | * On RT kernels this also has to check whether there are | |
9456 | * pinned and scheduled out tasks on the runqueue. They | |
9457 | * need to leave the migrate disabled section first. | |
f2469a1f | 9458 | */ |
3015ef4b TG |
9459 | if (!rq->nr_running && !rq_has_pinned_tasks(rq) && |
9460 | rcuwait_active(&rq->hotplug_wait)) { | |
5cb9eaa3 | 9461 | raw_spin_rq_unlock(rq); |
f2469a1f | 9462 | rcuwait_wake_up(&rq->hotplug_wait); |
5cb9eaa3 | 9463 | raw_spin_rq_lock(rq); |
f2469a1f | 9464 | } |
2558aacf | 9465 | return; |
f2469a1f | 9466 | } |
48f24c4d | 9467 | |
2558aacf | 9468 | get_task_struct(push_task); |
77bd3970 | 9469 | /* |
2558aacf PZ |
9470 | * Temporarily drop rq->lock such that we can wake-up the stop task. |
9471 | * Both preemption and IRQs are still disabled. | |
77bd3970 | 9472 | */ |
f0498d2a | 9473 | preempt_disable(); |
5cb9eaa3 | 9474 | raw_spin_rq_unlock(rq); |
2558aacf PZ |
9475 | stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task, |
9476 | this_cpu_ptr(&push_work)); | |
f0498d2a | 9477 | preempt_enable(); |
2558aacf PZ |
9478 | /* |
9479 | * At this point need_resched() is true and we'll take the loop in | |
9480 | * schedule(). The next pick is obviously going to be the stop task | |
5ba2ffba | 9481 | * which kthread_is_per_cpu() and will push this task away. |
2558aacf | 9482 | */ |
5cb9eaa3 | 9483 | raw_spin_rq_lock(rq); |
2558aacf | 9484 | } |
77bd3970 | 9485 | |
2558aacf PZ |
9486 | static void balance_push_set(int cpu, bool on) |
9487 | { | |
9488 | struct rq *rq = cpu_rq(cpu); | |
9489 | struct rq_flags rf; | |
48c5ccae | 9490 | |
2558aacf | 9491 | rq_lock_irqsave(rq, &rf); |
22f667c9 PZ |
9492 | if (on) { |
9493 | WARN_ON_ONCE(rq->balance_callback); | |
ae792702 | 9494 | rq->balance_callback = &balance_push_callback; |
22f667c9 | 9495 | } else if (rq->balance_callback == &balance_push_callback) { |
ae792702 | 9496 | rq->balance_callback = NULL; |
22f667c9 | 9497 | } |
2558aacf PZ |
9498 | rq_unlock_irqrestore(rq, &rf); |
9499 | } | |
e692ab53 | 9500 | |
f2469a1f TG |
9501 | /* |
9502 | * Invoked from a CPUs hotplug control thread after the CPU has been marked | |
9503 | * inactive. All tasks which are not per CPU kernel threads are either | |
9504 | * pushed off this CPU now via balance_push() or placed on a different CPU | |
9505 | * during wakeup. Wait until the CPU is quiescent. | |
9506 | */ | |
9507 | static void balance_hotplug_wait(void) | |
9508 | { | |
9509 | struct rq *rq = this_rq(); | |
5473e0cc | 9510 | |
3015ef4b TG |
9511 | rcuwait_wait_event(&rq->hotplug_wait, |
9512 | rq->nr_running == 1 && !rq_has_pinned_tasks(rq), | |
f2469a1f TG |
9513 | TASK_UNINTERRUPTIBLE); |
9514 | } | |
5473e0cc | 9515 | |
2558aacf | 9516 | #else |
dce48a84 | 9517 | |
2558aacf PZ |
9518 | static inline void balance_push(struct rq *rq) |
9519 | { | |
dce48a84 | 9520 | } |
dce48a84 | 9521 | |
2558aacf PZ |
9522 | static inline void balance_push_set(int cpu, bool on) |
9523 | { | |
9524 | } | |
9525 | ||
f2469a1f TG |
9526 | static inline void balance_hotplug_wait(void) |
9527 | { | |
dce48a84 | 9528 | } |
f2469a1f | 9529 | |
1da177e4 LT |
9530 | #endif /* CONFIG_HOTPLUG_CPU */ |
9531 | ||
f2cb1360 | 9532 | void set_rq_online(struct rq *rq) |
1f11eb6a GH |
9533 | { |
9534 | if (!rq->online) { | |
9535 | const struct sched_class *class; | |
9536 | ||
c6c4927b | 9537 | cpumask_set_cpu(rq->cpu, rq->rd->online); |
1f11eb6a GH |
9538 | rq->online = 1; |
9539 | ||
9540 | for_each_class(class) { | |
9541 | if (class->rq_online) | |
9542 | class->rq_online(rq); | |
9543 | } | |
9544 | } | |
9545 | } | |
9546 | ||
f2cb1360 | 9547 | void set_rq_offline(struct rq *rq) |
1f11eb6a GH |
9548 | { |
9549 | if (rq->online) { | |
9550 | const struct sched_class *class; | |
9551 | ||
cab3ecae | 9552 | update_rq_clock(rq); |
1f11eb6a GH |
9553 | for_each_class(class) { |
9554 | if (class->rq_offline) | |
9555 | class->rq_offline(rq); | |
9556 | } | |
9557 | ||
c6c4927b | 9558 | cpumask_clear_cpu(rq->cpu, rq->rd->online); |
1f11eb6a GH |
9559 | rq->online = 0; |
9560 | } | |
9561 | } | |
9562 | ||
d1ccc66d IM |
9563 | /* |
9564 | * used to mark begin/end of suspend/resume: | |
9565 | */ | |
9566 | static int num_cpus_frozen; | |
d35be8ba | 9567 | |
1da177e4 | 9568 | /* |
3a101d05 TH |
9569 | * Update cpusets according to cpu_active mask. If cpusets are |
9570 | * disabled, cpuset_update_active_cpus() becomes a simple wrapper | |
9571 | * around partition_sched_domains(). | |
d35be8ba SB |
9572 | * |
9573 | * If we come here as part of a suspend/resume, don't touch cpusets because we | |
9574 | * want to restore it back to its original state upon resume anyway. | |
1da177e4 | 9575 | */ |
40190a78 | 9576 | static void cpuset_cpu_active(void) |
e761b772 | 9577 | { |
40190a78 | 9578 | if (cpuhp_tasks_frozen) { |
d35be8ba SB |
9579 | /* |
9580 | * num_cpus_frozen tracks how many CPUs are involved in suspend | |
9581 | * resume sequence. As long as this is not the last online | |
9582 | * operation in the resume sequence, just build a single sched | |
9583 | * domain, ignoring cpusets. | |
9584 | */ | |
50e76632 PZ |
9585 | partition_sched_domains(1, NULL, NULL); |
9586 | if (--num_cpus_frozen) | |
135fb3e1 | 9587 | return; |
d35be8ba SB |
9588 | /* |
9589 | * This is the last CPU online operation. So fall through and | |
9590 | * restore the original sched domains by considering the | |
9591 | * cpuset configurations. | |
9592 | */ | |
50e76632 | 9593 | cpuset_force_rebuild(); |
3a101d05 | 9594 | } |
30e03acd | 9595 | cpuset_update_active_cpus(); |
3a101d05 | 9596 | } |
e761b772 | 9597 | |
40190a78 | 9598 | static int cpuset_cpu_inactive(unsigned int cpu) |
3a101d05 | 9599 | { |
40190a78 | 9600 | if (!cpuhp_tasks_frozen) { |
85989106 | 9601 | int ret = dl_bw_check_overflow(cpu); |
772b6539 DE |
9602 | |
9603 | if (ret) | |
9604 | return ret; | |
30e03acd | 9605 | cpuset_update_active_cpus(); |
135fb3e1 | 9606 | } else { |
d35be8ba SB |
9607 | num_cpus_frozen++; |
9608 | partition_sched_domains(1, NULL, NULL); | |
e761b772 | 9609 | } |
135fb3e1 | 9610 | return 0; |
e761b772 | 9611 | } |
e761b772 | 9612 | |
40190a78 | 9613 | int sched_cpu_activate(unsigned int cpu) |
135fb3e1 | 9614 | { |
7d976699 | 9615 | struct rq *rq = cpu_rq(cpu); |
8a8c69c3 | 9616 | struct rq_flags rf; |
7d976699 | 9617 | |
22f667c9 | 9618 | /* |
b5c44773 PZ |
9619 | * Clear the balance_push callback and prepare to schedule |
9620 | * regular tasks. | |
22f667c9 | 9621 | */ |
2558aacf PZ |
9622 | balance_push_set(cpu, false); |
9623 | ||
ba2591a5 PZ |
9624 | #ifdef CONFIG_SCHED_SMT |
9625 | /* | |
c5511d03 | 9626 | * When going up, increment the number of cores with SMT present. |
ba2591a5 | 9627 | */ |
c5511d03 PZI |
9628 | if (cpumask_weight(cpu_smt_mask(cpu)) == 2) |
9629 | static_branch_inc_cpuslocked(&sched_smt_present); | |
ba2591a5 | 9630 | #endif |
40190a78 | 9631 | set_cpu_active(cpu, true); |
135fb3e1 | 9632 | |
40190a78 | 9633 | if (sched_smp_initialized) { |
0fb3978b | 9634 | sched_update_numa(cpu, true); |
135fb3e1 | 9635 | sched_domains_numa_masks_set(cpu); |
40190a78 | 9636 | cpuset_cpu_active(); |
e761b772 | 9637 | } |
7d976699 TG |
9638 | |
9639 | /* | |
9640 | * Put the rq online, if not already. This happens: | |
9641 | * | |
9642 | * 1) In the early boot process, because we build the real domains | |
d1ccc66d | 9643 | * after all CPUs have been brought up. |
7d976699 TG |
9644 | * |
9645 | * 2) At runtime, if cpuset_cpu_active() fails to rebuild the | |
9646 | * domains. | |
9647 | */ | |
8a8c69c3 | 9648 | rq_lock_irqsave(rq, &rf); |
7d976699 TG |
9649 | if (rq->rd) { |
9650 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); | |
9651 | set_rq_online(rq); | |
9652 | } | |
8a8c69c3 | 9653 | rq_unlock_irqrestore(rq, &rf); |
7d976699 | 9654 | |
40190a78 | 9655 | return 0; |
135fb3e1 TG |
9656 | } |
9657 | ||
40190a78 | 9658 | int sched_cpu_deactivate(unsigned int cpu) |
135fb3e1 | 9659 | { |
120455c5 PZ |
9660 | struct rq *rq = cpu_rq(cpu); |
9661 | struct rq_flags rf; | |
135fb3e1 TG |
9662 | int ret; |
9663 | ||
e0b257c3 AMB |
9664 | /* |
9665 | * Remove CPU from nohz.idle_cpus_mask to prevent participating in | |
9666 | * load balancing when not active | |
9667 | */ | |
9668 | nohz_balance_exit_idle(rq); | |
9669 | ||
40190a78 | 9670 | set_cpu_active(cpu, false); |
741ba80f PZ |
9671 | |
9672 | /* | |
9673 | * From this point forward, this CPU will refuse to run any task that | |
9674 | * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively | |
9675 | * push those tasks away until this gets cleared, see | |
9676 | * sched_cpu_dying(). | |
9677 | */ | |
975707f2 PZ |
9678 | balance_push_set(cpu, true); |
9679 | ||
b2454caa | 9680 | /* |
975707f2 PZ |
9681 | * We've cleared cpu_active_mask / set balance_push, wait for all |
9682 | * preempt-disabled and RCU users of this state to go away such that | |
9683 | * all new such users will observe it. | |
b2454caa | 9684 | * |
5ba2ffba PZ |
9685 | * Specifically, we rely on ttwu to no longer target this CPU, see |
9686 | * ttwu_queue_cond() and is_cpu_allowed(). | |
9687 | * | |
b2454caa PZ |
9688 | * Do sync before park smpboot threads to take care the rcu boost case. |
9689 | */ | |
309ba859 | 9690 | synchronize_rcu(); |
40190a78 | 9691 | |
120455c5 PZ |
9692 | rq_lock_irqsave(rq, &rf); |
9693 | if (rq->rd) { | |
120455c5 PZ |
9694 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |
9695 | set_rq_offline(rq); | |
9696 | } | |
9697 | rq_unlock_irqrestore(rq, &rf); | |
9698 | ||
c5511d03 PZI |
9699 | #ifdef CONFIG_SCHED_SMT |
9700 | /* | |
9701 | * When going down, decrement the number of cores with SMT present. | |
9702 | */ | |
9703 | if (cpumask_weight(cpu_smt_mask(cpu)) == 2) | |
9704 | static_branch_dec_cpuslocked(&sched_smt_present); | |
3c474b32 PZ |
9705 | |
9706 | sched_core_cpu_deactivate(cpu); | |
c5511d03 PZI |
9707 | #endif |
9708 | ||
40190a78 TG |
9709 | if (!sched_smp_initialized) |
9710 | return 0; | |
9711 | ||
0fb3978b | 9712 | sched_update_numa(cpu, false); |
40190a78 TG |
9713 | ret = cpuset_cpu_inactive(cpu); |
9714 | if (ret) { | |
2558aacf | 9715 | balance_push_set(cpu, false); |
40190a78 | 9716 | set_cpu_active(cpu, true); |
0fb3978b | 9717 | sched_update_numa(cpu, true); |
40190a78 | 9718 | return ret; |
135fb3e1 | 9719 | } |
40190a78 TG |
9720 | sched_domains_numa_masks_clear(cpu); |
9721 | return 0; | |
135fb3e1 TG |
9722 | } |
9723 | ||
94baf7a5 TG |
9724 | static void sched_rq_cpu_starting(unsigned int cpu) |
9725 | { | |
9726 | struct rq *rq = cpu_rq(cpu); | |
9727 | ||
9728 | rq->calc_load_update = calc_load_update; | |
94baf7a5 TG |
9729 | update_max_interval(); |
9730 | } | |
9731 | ||
135fb3e1 TG |
9732 | int sched_cpu_starting(unsigned int cpu) |
9733 | { | |
9edeaea1 | 9734 | sched_core_cpu_starting(cpu); |
94baf7a5 | 9735 | sched_rq_cpu_starting(cpu); |
d84b3131 | 9736 | sched_tick_start(cpu); |
135fb3e1 | 9737 | return 0; |
e761b772 | 9738 | } |
e761b772 | 9739 | |
f2785ddb | 9740 | #ifdef CONFIG_HOTPLUG_CPU |
1cf12e08 TG |
9741 | |
9742 | /* | |
9743 | * Invoked immediately before the stopper thread is invoked to bring the | |
9744 | * CPU down completely. At this point all per CPU kthreads except the | |
9745 | * hotplug thread (current) and the stopper thread (inactive) have been | |
9746 | * either parked or have been unbound from the outgoing CPU. Ensure that | |
9747 | * any of those which might be on the way out are gone. | |
9748 | * | |
9749 | * If after this point a bound task is being woken on this CPU then the | |
9750 | * responsible hotplug callback has failed to do it's job. | |
9751 | * sched_cpu_dying() will catch it with the appropriate fireworks. | |
9752 | */ | |
9753 | int sched_cpu_wait_empty(unsigned int cpu) | |
9754 | { | |
9755 | balance_hotplug_wait(); | |
9756 | return 0; | |
9757 | } | |
9758 | ||
9759 | /* | |
9760 | * Since this CPU is going 'away' for a while, fold any nr_active delta we | |
9761 | * might have. Called from the CPU stopper task after ensuring that the | |
9762 | * stopper is the last running task on the CPU, so nr_active count is | |
9763 | * stable. We need to take the teardown thread which is calling this into | |
9764 | * account, so we hand in adjust = 1 to the load calculation. | |
9765 | * | |
9766 | * Also see the comment "Global load-average calculations". | |
9767 | */ | |
9768 | static void calc_load_migrate(struct rq *rq) | |
9769 | { | |
9770 | long delta = calc_load_fold_active(rq, 1); | |
9771 | ||
9772 | if (delta) | |
9773 | atomic_long_add(delta, &calc_load_tasks); | |
9774 | } | |
9775 | ||
36c6e17b VS |
9776 | static void dump_rq_tasks(struct rq *rq, const char *loglvl) |
9777 | { | |
9778 | struct task_struct *g, *p; | |
9779 | int cpu = cpu_of(rq); | |
9780 | ||
5cb9eaa3 | 9781 | lockdep_assert_rq_held(rq); |
36c6e17b VS |
9782 | |
9783 | printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running); | |
9784 | for_each_process_thread(g, p) { | |
9785 | if (task_cpu(p) != cpu) | |
9786 | continue; | |
9787 | ||
9788 | if (!task_on_rq_queued(p)) | |
9789 | continue; | |
9790 | ||
9791 | printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm); | |
9792 | } | |
9793 | } | |
9794 | ||
f2785ddb TG |
9795 | int sched_cpu_dying(unsigned int cpu) |
9796 | { | |
9797 | struct rq *rq = cpu_rq(cpu); | |
8a8c69c3 | 9798 | struct rq_flags rf; |
f2785ddb TG |
9799 | |
9800 | /* Handle pending wakeups and then migrate everything off */ | |
d84b3131 | 9801 | sched_tick_stop(cpu); |
8a8c69c3 PZ |
9802 | |
9803 | rq_lock_irqsave(rq, &rf); | |
36c6e17b VS |
9804 | if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) { |
9805 | WARN(true, "Dying CPU not properly vacated!"); | |
9806 | dump_rq_tasks(rq, KERN_WARNING); | |
9807 | } | |
8a8c69c3 PZ |
9808 | rq_unlock_irqrestore(rq, &rf); |
9809 | ||
f2785ddb TG |
9810 | calc_load_migrate(rq); |
9811 | update_max_interval(); | |
e5ef27d0 | 9812 | hrtick_clear(rq); |
3c474b32 | 9813 | sched_core_cpu_dying(cpu); |
f2785ddb TG |
9814 | return 0; |
9815 | } | |
9816 | #endif | |
9817 | ||
1da177e4 LT |
9818 | void __init sched_init_smp(void) |
9819 | { | |
0fb3978b | 9820 | sched_init_numa(NUMA_NO_NODE); |
cb83b629 | 9821 | |
6acce3ef PZ |
9822 | /* |
9823 | * There's no userspace yet to cause hotplug operations; hence all the | |
d1ccc66d | 9824 | * CPU masks are stable and all blatant races in the below code cannot |
b5a4e2bb | 9825 | * happen. |
6acce3ef | 9826 | */ |
712555ee | 9827 | mutex_lock(&sched_domains_mutex); |
8d5dc512 | 9828 | sched_init_domains(cpu_active_mask); |
712555ee | 9829 | mutex_unlock(&sched_domains_mutex); |
e761b772 | 9830 | |
5c1e1767 | 9831 | /* Move init over to a non-isolated CPU */ |
04d4e665 | 9832 | if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0) |
5c1e1767 | 9833 | BUG(); |
15faafc6 | 9834 | current->flags &= ~PF_NO_SETAFFINITY; |
19978ca6 | 9835 | sched_init_granularity(); |
4212823f | 9836 | |
0e3900e6 | 9837 | init_sched_rt_class(); |
1baca4ce | 9838 | init_sched_dl_class(); |
1b568f0a | 9839 | |
e26fbffd | 9840 | sched_smp_initialized = true; |
1da177e4 | 9841 | } |
e26fbffd TG |
9842 | |
9843 | static int __init migration_init(void) | |
9844 | { | |
77a5352b | 9845 | sched_cpu_starting(smp_processor_id()); |
e26fbffd | 9846 | return 0; |
1da177e4 | 9847 | } |
e26fbffd TG |
9848 | early_initcall(migration_init); |
9849 | ||
1da177e4 LT |
9850 | #else |
9851 | void __init sched_init_smp(void) | |
9852 | { | |
19978ca6 | 9853 | sched_init_granularity(); |
1da177e4 LT |
9854 | } |
9855 | #endif /* CONFIG_SMP */ | |
9856 | ||
9857 | int in_sched_functions(unsigned long addr) | |
9858 | { | |
1da177e4 LT |
9859 | return in_lock_functions(addr) || |
9860 | (addr >= (unsigned long)__sched_text_start | |
9861 | && addr < (unsigned long)__sched_text_end); | |
9862 | } | |
9863 | ||
029632fb | 9864 | #ifdef CONFIG_CGROUP_SCHED |
27b4b931 LZ |
9865 | /* |
9866 | * Default task group. | |
9867 | * Every task in system belongs to this group at bootup. | |
9868 | */ | |
029632fb | 9869 | struct task_group root_task_group; |
35cf4e50 | 9870 | LIST_HEAD(task_groups); |
b0367629 WL |
9871 | |
9872 | /* Cacheline aligned slab cache for task_group */ | |
68279f9c | 9873 | static struct kmem_cache *task_group_cache __ro_after_init; |
052f1dc7 | 9874 | #endif |
6f505b16 | 9875 | |
1da177e4 LT |
9876 | void __init sched_init(void) |
9877 | { | |
a1dc0446 | 9878 | unsigned long ptr = 0; |
55627e3c | 9879 | int i; |
434d53b0 | 9880 | |
c3a340f7 | 9881 | /* Make sure the linker didn't screw up */ |
546a3fee PZ |
9882 | BUG_ON(&idle_sched_class != &fair_sched_class + 1 || |
9883 | &fair_sched_class != &rt_sched_class + 1 || | |
9884 | &rt_sched_class != &dl_sched_class + 1); | |
c3a340f7 | 9885 | #ifdef CONFIG_SMP |
546a3fee | 9886 | BUG_ON(&dl_sched_class != &stop_sched_class + 1); |
c3a340f7 SRV |
9887 | #endif |
9888 | ||
5822a454 | 9889 | wait_bit_init(); |
9dcb8b68 | 9890 | |
434d53b0 | 9891 | #ifdef CONFIG_FAIR_GROUP_SCHED |
a1dc0446 | 9892 | ptr += 2 * nr_cpu_ids * sizeof(void **); |
434d53b0 MT |
9893 | #endif |
9894 | #ifdef CONFIG_RT_GROUP_SCHED | |
a1dc0446 | 9895 | ptr += 2 * nr_cpu_ids * sizeof(void **); |
434d53b0 | 9896 | #endif |
a1dc0446 QC |
9897 | if (ptr) { |
9898 | ptr = (unsigned long)kzalloc(ptr, GFP_NOWAIT); | |
434d53b0 MT |
9899 | |
9900 | #ifdef CONFIG_FAIR_GROUP_SCHED | |
07e06b01 | 9901 | root_task_group.se = (struct sched_entity **)ptr; |
434d53b0 MT |
9902 | ptr += nr_cpu_ids * sizeof(void **); |
9903 | ||
07e06b01 | 9904 | root_task_group.cfs_rq = (struct cfs_rq **)ptr; |
434d53b0 | 9905 | ptr += nr_cpu_ids * sizeof(void **); |
eff766a6 | 9906 | |
b1d1779e | 9907 | root_task_group.shares = ROOT_TASK_GROUP_LOAD; |
c98c1827 | 9908 | init_cfs_bandwidth(&root_task_group.cfs_bandwidth, NULL); |
6d6bc0ad | 9909 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
434d53b0 | 9910 | #ifdef CONFIG_RT_GROUP_SCHED |
07e06b01 | 9911 | root_task_group.rt_se = (struct sched_rt_entity **)ptr; |
434d53b0 MT |
9912 | ptr += nr_cpu_ids * sizeof(void **); |
9913 | ||
07e06b01 | 9914 | root_task_group.rt_rq = (struct rt_rq **)ptr; |
eff766a6 PZ |
9915 | ptr += nr_cpu_ids * sizeof(void **); |
9916 | ||
6d6bc0ad | 9917 | #endif /* CONFIG_RT_GROUP_SCHED */ |
b74e6278 | 9918 | } |
dd41f596 | 9919 | |
d1ccc66d | 9920 | init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime()); |
332ac17e | 9921 | |
57d885fe GH |
9922 | #ifdef CONFIG_SMP |
9923 | init_defrootdomain(); | |
9924 | #endif | |
9925 | ||
d0b27fa7 | 9926 | #ifdef CONFIG_RT_GROUP_SCHED |
07e06b01 | 9927 | init_rt_bandwidth(&root_task_group.rt_bandwidth, |
d0b27fa7 | 9928 | global_rt_period(), global_rt_runtime()); |
6d6bc0ad | 9929 | #endif /* CONFIG_RT_GROUP_SCHED */ |
d0b27fa7 | 9930 | |
7c941438 | 9931 | #ifdef CONFIG_CGROUP_SCHED |
b0367629 WL |
9932 | task_group_cache = KMEM_CACHE(task_group, 0); |
9933 | ||
07e06b01 YZ |
9934 | list_add(&root_task_group.list, &task_groups); |
9935 | INIT_LIST_HEAD(&root_task_group.children); | |
f4d6f6c2 | 9936 | INIT_LIST_HEAD(&root_task_group.siblings); |
5091faa4 | 9937 | autogroup_init(&init_task); |
7c941438 | 9938 | #endif /* CONFIG_CGROUP_SCHED */ |
6f505b16 | 9939 | |
0a945022 | 9940 | for_each_possible_cpu(i) { |
70b97a7f | 9941 | struct rq *rq; |
1da177e4 LT |
9942 | |
9943 | rq = cpu_rq(i); | |
5cb9eaa3 | 9944 | raw_spin_lock_init(&rq->__lock); |
7897986b | 9945 | rq->nr_running = 0; |
dce48a84 TG |
9946 | rq->calc_load_active = 0; |
9947 | rq->calc_load_update = jiffies + LOAD_FREQ; | |
acb5a9ba | 9948 | init_cfs_rq(&rq->cfs); |
07c54f7a AV |
9949 | init_rt_rq(&rq->rt); |
9950 | init_dl_rq(&rq->dl); | |
dd41f596 | 9951 | #ifdef CONFIG_FAIR_GROUP_SCHED |
6f505b16 | 9952 | INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); |
9c2791f9 | 9953 | rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; |
354d60c2 | 9954 | /* |
d1ccc66d | 9955 | * How much CPU bandwidth does root_task_group get? |
354d60c2 DG |
9956 | * |
9957 | * In case of task-groups formed thr' the cgroup filesystem, it | |
d1ccc66d IM |
9958 | * gets 100% of the CPU resources in the system. This overall |
9959 | * system CPU resource is divided among the tasks of | |
07e06b01 | 9960 | * root_task_group and its child task-groups in a fair manner, |
354d60c2 DG |
9961 | * based on each entity's (task or task-group's) weight |
9962 | * (se->load.weight). | |
9963 | * | |
07e06b01 | 9964 | * In other words, if root_task_group has 10 tasks of weight |
354d60c2 | 9965 | * 1024) and two child groups A0 and A1 (of weight 1024 each), |
d1ccc66d | 9966 | * then A0's share of the CPU resource is: |
354d60c2 | 9967 | * |
0d905bca | 9968 | * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% |
354d60c2 | 9969 | * |
07e06b01 YZ |
9970 | * We achieve this by letting root_task_group's tasks sit |
9971 | * directly in rq->cfs (i.e root_task_group->se[] = NULL). | |
354d60c2 | 9972 | */ |
07e06b01 | 9973 | init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); |
354d60c2 DG |
9974 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
9975 | ||
9976 | rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; | |
052f1dc7 | 9977 | #ifdef CONFIG_RT_GROUP_SCHED |
07e06b01 | 9978 | init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); |
dd41f596 | 9979 | #endif |
1da177e4 | 9980 | #ifdef CONFIG_SMP |
41c7ce9a | 9981 | rq->sd = NULL; |
57d885fe | 9982 | rq->rd = NULL; |
7bc26384 | 9983 | rq->cpu_capacity = SCHED_CAPACITY_SCALE; |
b5c44773 | 9984 | rq->balance_callback = &balance_push_callback; |
1da177e4 | 9985 | rq->active_balance = 0; |
dd41f596 | 9986 | rq->next_balance = jiffies; |
1da177e4 | 9987 | rq->push_cpu = 0; |
0a2966b4 | 9988 | rq->cpu = i; |
1f11eb6a | 9989 | rq->online = 0; |
eae0c9df MG |
9990 | rq->idle_stamp = 0; |
9991 | rq->avg_idle = 2*sysctl_sched_migration_cost; | |
9bd721c5 | 9992 | rq->max_idle_balance_cost = sysctl_sched_migration_cost; |
367456c7 PZ |
9993 | |
9994 | INIT_LIST_HEAD(&rq->cfs_tasks); | |
9995 | ||
dc938520 | 9996 | rq_attach_root(rq, &def_root_domain); |
3451d024 | 9997 | #ifdef CONFIG_NO_HZ_COMMON |
e022e0d3 | 9998 | rq->last_blocked_load_update_tick = jiffies; |
a22e47a4 | 9999 | atomic_set(&rq->nohz_flags, 0); |
90b5363a | 10000 | |
545b8c8d | 10001 | INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq); |
83cd4fe2 | 10002 | #endif |
f2469a1f TG |
10003 | #ifdef CONFIG_HOTPLUG_CPU |
10004 | rcuwait_init(&rq->hotplug_wait); | |
83cd4fe2 | 10005 | #endif |
9fd81dd5 | 10006 | #endif /* CONFIG_SMP */ |
77a021be | 10007 | hrtick_rq_init(rq); |
1da177e4 | 10008 | atomic_set(&rq->nr_iowait, 0); |
9edeaea1 PZ |
10009 | |
10010 | #ifdef CONFIG_SCHED_CORE | |
3c474b32 | 10011 | rq->core = rq; |
539f6512 | 10012 | rq->core_pick = NULL; |
9edeaea1 | 10013 | rq->core_enabled = 0; |
539f6512 | 10014 | rq->core_tree = RB_ROOT; |
4feee7d1 JD |
10015 | rq->core_forceidle_count = 0; |
10016 | rq->core_forceidle_occupation = 0; | |
10017 | rq->core_forceidle_start = 0; | |
539f6512 PZ |
10018 | |
10019 | rq->core_cookie = 0UL; | |
9edeaea1 | 10020 | #endif |
da019032 | 10021 | zalloc_cpumask_var_node(&rq->scratch_mask, GFP_KERNEL, cpu_to_node(i)); |
1da177e4 LT |
10022 | } |
10023 | ||
b1e82065 | 10024 | set_load_weight(&init_task, false); |
b50f60ce | 10025 | |
1da177e4 LT |
10026 | /* |
10027 | * The boot idle thread does lazy MMU switching as well: | |
10028 | */ | |
aa464ba9 | 10029 | mmgrab_lazy_tlb(&init_mm); |
1da177e4 LT |
10030 | enter_lazy_tlb(&init_mm, current); |
10031 | ||
40966e31 EB |
10032 | /* |
10033 | * The idle task doesn't need the kthread struct to function, but it | |
10034 | * is dressed up as a per-CPU kthread and thus needs to play the part | |
10035 | * if we want to avoid special-casing it in code that deals with per-CPU | |
10036 | * kthreads. | |
10037 | */ | |
dd621ee0 | 10038 | WARN_ON(!set_kthread_struct(current)); |
40966e31 | 10039 | |
1da177e4 LT |
10040 | /* |
10041 | * Make us the idle thread. Technically, schedule() should not be | |
10042 | * called from this thread, however somewhere below it might be, | |
10043 | * but because we are the idle thread, we just pick up running again | |
10044 | * when this runqueue becomes "idle". | |
10045 | */ | |
10046 | init_idle(current, smp_processor_id()); | |
dce48a84 TG |
10047 | |
10048 | calc_load_update = jiffies + LOAD_FREQ; | |
10049 | ||
bf4d83f6 | 10050 | #ifdef CONFIG_SMP |
29d5e047 | 10051 | idle_thread_set_boot_cpu(); |
b5c44773 | 10052 | balance_push_set(smp_processor_id(), false); |
029632fb PZ |
10053 | #endif |
10054 | init_sched_fair_class(); | |
6a7b3dc3 | 10055 | |
eb414681 JW |
10056 | psi_init(); |
10057 | ||
69842cba PB |
10058 | init_uclamp(); |
10059 | ||
c597bfdd FW |
10060 | preempt_dynamic_init(); |
10061 | ||
6892b75e | 10062 | scheduler_running = 1; |
1da177e4 LT |
10063 | } |
10064 | ||
d902db1e | 10065 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
e4aafea2 | 10066 | |
42a38756 | 10067 | void __might_sleep(const char *file, int line) |
1da177e4 | 10068 | { |
d6c23bb3 | 10069 | unsigned int state = get_current_state(); |
8eb23b9f PZ |
10070 | /* |
10071 | * Blocking primitives will set (and therefore destroy) current->state, | |
10072 | * since we will exit with TASK_RUNNING make sure we enter with it, | |
10073 | * otherwise we will destroy state. | |
10074 | */ | |
d6c23bb3 | 10075 | WARN_ONCE(state != TASK_RUNNING && current->task_state_change, |
8eb23b9f | 10076 | "do not call blocking ops when !TASK_RUNNING; " |
d6c23bb3 | 10077 | "state=%x set at [<%p>] %pS\n", state, |
8eb23b9f | 10078 | (void *)current->task_state_change, |
00845eb9 | 10079 | (void *)current->task_state_change); |
8eb23b9f | 10080 | |
42a38756 | 10081 | __might_resched(file, line, 0); |
3427445a PZ |
10082 | } |
10083 | EXPORT_SYMBOL(__might_sleep); | |
10084 | ||
8d713b69 TG |
10085 | static void print_preempt_disable_ip(int preempt_offset, unsigned long ip) |
10086 | { | |
10087 | if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT)) | |
10088 | return; | |
10089 | ||
10090 | if (preempt_count() == preempt_offset) | |
10091 | return; | |
10092 | ||
10093 | pr_err("Preemption disabled at:"); | |
10094 | print_ip_sym(KERN_ERR, ip); | |
10095 | } | |
10096 | ||
50e081b9 TG |
10097 | static inline bool resched_offsets_ok(unsigned int offsets) |
10098 | { | |
10099 | unsigned int nested = preempt_count(); | |
10100 | ||
10101 | nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT; | |
10102 | ||
10103 | return nested == offsets; | |
10104 | } | |
10105 | ||
10106 | void __might_resched(const char *file, int line, unsigned int offsets) | |
1da177e4 | 10107 | { |
d1ccc66d IM |
10108 | /* Ratelimiting timestamp: */ |
10109 | static unsigned long prev_jiffy; | |
10110 | ||
d1c6d149 | 10111 | unsigned long preempt_disable_ip; |
1da177e4 | 10112 | |
d1ccc66d IM |
10113 | /* WARN_ON_ONCE() by default, no rate limit required: */ |
10114 | rcu_sleep_check(); | |
10115 | ||
50e081b9 | 10116 | if ((resched_offsets_ok(offsets) && !irqs_disabled() && |
312364f3 | 10117 | !is_idle_task(current) && !current->non_block_count) || |
1c3c5eab TG |
10118 | system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING || |
10119 | oops_in_progress) | |
aef745fc | 10120 | return; |
1c3c5eab | 10121 | |
aef745fc IM |
10122 | if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) |
10123 | return; | |
10124 | prev_jiffy = jiffies; | |
10125 | ||
d1ccc66d | 10126 | /* Save this before calling printk(), since that will clobber it: */ |
d1c6d149 VN |
10127 | preempt_disable_ip = get_preempt_disable_ip(current); |
10128 | ||
a45ed302 TG |
10129 | pr_err("BUG: sleeping function called from invalid context at %s:%d\n", |
10130 | file, line); | |
10131 | pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n", | |
10132 | in_atomic(), irqs_disabled(), current->non_block_count, | |
10133 | current->pid, current->comm); | |
8d713b69 | 10134 | pr_err("preempt_count: %x, expected: %x\n", preempt_count(), |
50e081b9 | 10135 | offsets & MIGHT_RESCHED_PREEMPT_MASK); |
8d713b69 TG |
10136 | |
10137 | if (IS_ENABLED(CONFIG_PREEMPT_RCU)) { | |
50e081b9 TG |
10138 | pr_err("RCU nest depth: %d, expected: %u\n", |
10139 | rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT); | |
8d713b69 | 10140 | } |
aef745fc | 10141 | |
a8b686b3 | 10142 | if (task_stack_end_corrupted(current)) |
a45ed302 | 10143 | pr_emerg("Thread overran stack, or stack corrupted\n"); |
a8b686b3 | 10144 | |
aef745fc IM |
10145 | debug_show_held_locks(current); |
10146 | if (irqs_disabled()) | |
10147 | print_irqtrace_events(current); | |
8d713b69 | 10148 | |
50e081b9 TG |
10149 | print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK, |
10150 | preempt_disable_ip); | |
8d713b69 | 10151 | |
aef745fc | 10152 | dump_stack(); |
f0b22e39 | 10153 | add_taint(TAINT_WARN, LOCKDEP_STILL_OK); |
1da177e4 | 10154 | } |
874f670e | 10155 | EXPORT_SYMBOL(__might_resched); |
568f1967 PZ |
10156 | |
10157 | void __cant_sleep(const char *file, int line, int preempt_offset) | |
10158 | { | |
10159 | static unsigned long prev_jiffy; | |
10160 | ||
10161 | if (irqs_disabled()) | |
10162 | return; | |
10163 | ||
10164 | if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) | |
10165 | return; | |
10166 | ||
10167 | if (preempt_count() > preempt_offset) | |
10168 | return; | |
10169 | ||
10170 | if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) | |
10171 | return; | |
10172 | prev_jiffy = jiffies; | |
10173 | ||
10174 | printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line); | |
10175 | printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", | |
10176 | in_atomic(), irqs_disabled(), | |
10177 | current->pid, current->comm); | |
10178 | ||
10179 | debug_show_held_locks(current); | |
10180 | dump_stack(); | |
10181 | add_taint(TAINT_WARN, LOCKDEP_STILL_OK); | |
10182 | } | |
10183 | EXPORT_SYMBOL_GPL(__cant_sleep); | |
74d862b6 TG |
10184 | |
10185 | #ifdef CONFIG_SMP | |
10186 | void __cant_migrate(const char *file, int line) | |
10187 | { | |
10188 | static unsigned long prev_jiffy; | |
10189 | ||
10190 | if (irqs_disabled()) | |
10191 | return; | |
10192 | ||
10193 | if (is_migration_disabled(current)) | |
10194 | return; | |
10195 | ||
10196 | if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) | |
10197 | return; | |
10198 | ||
10199 | if (preempt_count() > 0) | |
10200 | return; | |
10201 | ||
10202 | if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) | |
10203 | return; | |
10204 | prev_jiffy = jiffies; | |
10205 | ||
10206 | pr_err("BUG: assuming non migratable context at %s:%d\n", file, line); | |
10207 | pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n", | |
10208 | in_atomic(), irqs_disabled(), is_migration_disabled(current), | |
10209 | current->pid, current->comm); | |
10210 | ||
10211 | debug_show_held_locks(current); | |
10212 | dump_stack(); | |
10213 | add_taint(TAINT_WARN, LOCKDEP_STILL_OK); | |
10214 | } | |
10215 | EXPORT_SYMBOL_GPL(__cant_migrate); | |
10216 | #endif | |
1da177e4 LT |
10217 | #endif |
10218 | ||
10219 | #ifdef CONFIG_MAGIC_SYSRQ | |
dbc7f069 | 10220 | void normalize_rt_tasks(void) |
3a5e4dc1 | 10221 | { |
dbc7f069 | 10222 | struct task_struct *g, *p; |
d50dde5a DF |
10223 | struct sched_attr attr = { |
10224 | .sched_policy = SCHED_NORMAL, | |
10225 | }; | |
1da177e4 | 10226 | |
3472eaa1 | 10227 | read_lock(&tasklist_lock); |
5d07f420 | 10228 | for_each_process_thread(g, p) { |
178be793 IM |
10229 | /* |
10230 | * Only normalize user tasks: | |
10231 | */ | |
3472eaa1 | 10232 | if (p->flags & PF_KTHREAD) |
178be793 IM |
10233 | continue; |
10234 | ||
4fa8d299 | 10235 | p->se.exec_start = 0; |
ceeadb83 YS |
10236 | schedstat_set(p->stats.wait_start, 0); |
10237 | schedstat_set(p->stats.sleep_start, 0); | |
10238 | schedstat_set(p->stats.block_start, 0); | |
dd41f596 | 10239 | |
aab03e05 | 10240 | if (!dl_task(p) && !rt_task(p)) { |
dd41f596 IM |
10241 | /* |
10242 | * Renice negative nice level userspace | |
10243 | * tasks back to 0: | |
10244 | */ | |
3472eaa1 | 10245 | if (task_nice(p) < 0) |
dd41f596 | 10246 | set_user_nice(p, 0); |
1da177e4 | 10247 | continue; |
dd41f596 | 10248 | } |
1da177e4 | 10249 | |
dbc7f069 | 10250 | __sched_setscheduler(p, &attr, false, false); |
5d07f420 | 10251 | } |
3472eaa1 | 10252 | read_unlock(&tasklist_lock); |
1da177e4 LT |
10253 | } |
10254 | ||
10255 | #endif /* CONFIG_MAGIC_SYSRQ */ | |
1df5c10a | 10256 | |
cf8e8658 | 10257 | #if defined(CONFIG_KGDB_KDB) |
1df5c10a | 10258 | /* |
cf8e8658 | 10259 | * These functions are only useful for kdb. |
1df5c10a LT |
10260 | * |
10261 | * They can only be called when the whole system has been | |
10262 | * stopped - every CPU needs to be quiescent, and no scheduling | |
10263 | * activity can take place. Using them for anything else would | |
10264 | * be a serious bug, and as a result, they aren't even visible | |
10265 | * under any other configuration. | |
10266 | */ | |
10267 | ||
10268 | /** | |
d1ccc66d | 10269 | * curr_task - return the current task for a given CPU. |
1df5c10a LT |
10270 | * @cpu: the processor in question. |
10271 | * | |
10272 | * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! | |
e69f6186 YB |
10273 | * |
10274 | * Return: The current task for @cpu. | |
1df5c10a | 10275 | */ |
36c8b586 | 10276 | struct task_struct *curr_task(int cpu) |
1df5c10a LT |
10277 | { |
10278 | return cpu_curr(cpu); | |
10279 | } | |
10280 | ||
cf8e8658 | 10281 | #endif /* defined(CONFIG_KGDB_KDB) */ |
29f59db3 | 10282 | |
7c941438 | 10283 | #ifdef CONFIG_CGROUP_SCHED |
029632fb PZ |
10284 | /* task_group_lock serializes the addition/removal of task groups */ |
10285 | static DEFINE_SPINLOCK(task_group_lock); | |
10286 | ||
2480c093 PB |
10287 | static inline void alloc_uclamp_sched_group(struct task_group *tg, |
10288 | struct task_group *parent) | |
10289 | { | |
10290 | #ifdef CONFIG_UCLAMP_TASK_GROUP | |
0413d7f3 | 10291 | enum uclamp_id clamp_id; |
2480c093 PB |
10292 | |
10293 | for_each_clamp_id(clamp_id) { | |
10294 | uclamp_se_set(&tg->uclamp_req[clamp_id], | |
10295 | uclamp_none(clamp_id), false); | |
0b60ba2d | 10296 | tg->uclamp[clamp_id] = parent->uclamp[clamp_id]; |
2480c093 PB |
10297 | } |
10298 | #endif | |
10299 | } | |
10300 | ||
2f5177f0 | 10301 | static void sched_free_group(struct task_group *tg) |
bccbe08a PZ |
10302 | { |
10303 | free_fair_sched_group(tg); | |
10304 | free_rt_sched_group(tg); | |
e9aa1dd1 | 10305 | autogroup_free(tg); |
b0367629 | 10306 | kmem_cache_free(task_group_cache, tg); |
bccbe08a PZ |
10307 | } |
10308 | ||
b027789e MK |
10309 | static void sched_free_group_rcu(struct rcu_head *rcu) |
10310 | { | |
10311 | sched_free_group(container_of(rcu, struct task_group, rcu)); | |
10312 | } | |
10313 | ||
10314 | static void sched_unregister_group(struct task_group *tg) | |
10315 | { | |
10316 | unregister_fair_sched_group(tg); | |
10317 | unregister_rt_sched_group(tg); | |
10318 | /* | |
10319 | * We have to wait for yet another RCU grace period to expire, as | |
10320 | * print_cfs_stats() might run concurrently. | |
10321 | */ | |
10322 | call_rcu(&tg->rcu, sched_free_group_rcu); | |
10323 | } | |
10324 | ||
bccbe08a | 10325 | /* allocate runqueue etc for a new task group */ |
ec7dc8ac | 10326 | struct task_group *sched_create_group(struct task_group *parent) |
bccbe08a PZ |
10327 | { |
10328 | struct task_group *tg; | |
bccbe08a | 10329 | |
b0367629 | 10330 | tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO); |
bccbe08a PZ |
10331 | if (!tg) |
10332 | return ERR_PTR(-ENOMEM); | |
10333 | ||
ec7dc8ac | 10334 | if (!alloc_fair_sched_group(tg, parent)) |
bccbe08a PZ |
10335 | goto err; |
10336 | ||
ec7dc8ac | 10337 | if (!alloc_rt_sched_group(tg, parent)) |
bccbe08a PZ |
10338 | goto err; |
10339 | ||
2480c093 PB |
10340 | alloc_uclamp_sched_group(tg, parent); |
10341 | ||
ace783b9 LZ |
10342 | return tg; |
10343 | ||
10344 | err: | |
2f5177f0 | 10345 | sched_free_group(tg); |
ace783b9 LZ |
10346 | return ERR_PTR(-ENOMEM); |
10347 | } | |
10348 | ||
10349 | void sched_online_group(struct task_group *tg, struct task_group *parent) | |
10350 | { | |
10351 | unsigned long flags; | |
10352 | ||
8ed36996 | 10353 | spin_lock_irqsave(&task_group_lock, flags); |
6f505b16 | 10354 | list_add_rcu(&tg->list, &task_groups); |
f473aa5e | 10355 | |
d1ccc66d IM |
10356 | /* Root should already exist: */ |
10357 | WARN_ON(!parent); | |
f473aa5e PZ |
10358 | |
10359 | tg->parent = parent; | |
f473aa5e | 10360 | INIT_LIST_HEAD(&tg->children); |
09f2724a | 10361 | list_add_rcu(&tg->siblings, &parent->children); |
8ed36996 | 10362 | spin_unlock_irqrestore(&task_group_lock, flags); |
8663e24d PZ |
10363 | |
10364 | online_fair_sched_group(tg); | |
29f59db3 SV |
10365 | } |
10366 | ||
9b5b7751 | 10367 | /* rcu callback to free various structures associated with a task group */ |
b027789e | 10368 | static void sched_unregister_group_rcu(struct rcu_head *rhp) |
29f59db3 | 10369 | { |
d1ccc66d | 10370 | /* Now it should be safe to free those cfs_rqs: */ |
b027789e | 10371 | sched_unregister_group(container_of(rhp, struct task_group, rcu)); |
29f59db3 SV |
10372 | } |
10373 | ||
4cf86d77 | 10374 | void sched_destroy_group(struct task_group *tg) |
ace783b9 | 10375 | { |
d1ccc66d | 10376 | /* Wait for possible concurrent references to cfs_rqs complete: */ |
b027789e | 10377 | call_rcu(&tg->rcu, sched_unregister_group_rcu); |
ace783b9 LZ |
10378 | } |
10379 | ||
b027789e | 10380 | void sched_release_group(struct task_group *tg) |
29f59db3 | 10381 | { |
8ed36996 | 10382 | unsigned long flags; |
29f59db3 | 10383 | |
b027789e MK |
10384 | /* |
10385 | * Unlink first, to avoid walk_tg_tree_from() from finding us (via | |
10386 | * sched_cfs_period_timer()). | |
10387 | * | |
10388 | * For this to be effective, we have to wait for all pending users of | |
10389 | * this task group to leave their RCU critical section to ensure no new | |
10390 | * user will see our dying task group any more. Specifically ensure | |
10391 | * that tg_unthrottle_up() won't add decayed cfs_rq's to it. | |
10392 | * | |
10393 | * We therefore defer calling unregister_fair_sched_group() to | |
10394 | * sched_unregister_group() which is guarantied to get called only after the | |
10395 | * current RCU grace period has expired. | |
10396 | */ | |
3d4b47b4 | 10397 | spin_lock_irqsave(&task_group_lock, flags); |
6f505b16 | 10398 | list_del_rcu(&tg->list); |
f473aa5e | 10399 | list_del_rcu(&tg->siblings); |
8ed36996 | 10400 | spin_unlock_irqrestore(&task_group_lock, flags); |
29f59db3 SV |
10401 | } |
10402 | ||
eff6c8ce | 10403 | static struct task_group *sched_get_task_group(struct task_struct *tsk) |
29f59db3 | 10404 | { |
8323f26c | 10405 | struct task_group *tg; |
29f59db3 | 10406 | |
f7b8a47d KT |
10407 | /* |
10408 | * All callers are synchronized by task_rq_lock(); we do not use RCU | |
10409 | * which is pointless here. Thus, we pass "true" to task_css_check() | |
10410 | * to prevent lockdep warnings. | |
10411 | */ | |
10412 | tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), | |
8323f26c PZ |
10413 | struct task_group, css); |
10414 | tg = autogroup_task_group(tsk, tg); | |
eff6c8ce | 10415 | |
10416 | return tg; | |
10417 | } | |
10418 | ||
10419 | static void sched_change_group(struct task_struct *tsk, struct task_group *group) | |
10420 | { | |
10421 | tsk->sched_task_group = group; | |
8323f26c | 10422 | |
810b3817 | 10423 | #ifdef CONFIG_FAIR_GROUP_SCHED |
ea86cb4b | 10424 | if (tsk->sched_class->task_change_group) |
39c42611 | 10425 | tsk->sched_class->task_change_group(tsk); |
b2b5ce02 | 10426 | else |
810b3817 | 10427 | #endif |
b2b5ce02 | 10428 | set_task_rq(tsk, task_cpu(tsk)); |
ea86cb4b VG |
10429 | } |
10430 | ||
10431 | /* | |
10432 | * Change task's runqueue when it moves between groups. | |
10433 | * | |
10434 | * The caller of this function should have put the task in its new group by | |
10435 | * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect | |
10436 | * its new group. | |
10437 | */ | |
10438 | void sched_move_task(struct task_struct *tsk) | |
10439 | { | |
7a57f32a PZ |
10440 | int queued, running, queue_flags = |
10441 | DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; | |
eff6c8ce | 10442 | struct task_group *group; |
ea86cb4b VG |
10443 | struct rq *rq; |
10444 | ||
fa614b4f PZ |
10445 | CLASS(task_rq_lock, rq_guard)(tsk); |
10446 | rq = rq_guard.rq; | |
10447 | ||
eff6c8ce | 10448 | /* |
10449 | * Esp. with SCHED_AUTOGROUP enabled it is possible to get superfluous | |
10450 | * group changes. | |
10451 | */ | |
10452 | group = sched_get_task_group(tsk); | |
10453 | if (group == tsk->sched_task_group) | |
fa614b4f | 10454 | return; |
eff6c8ce | 10455 | |
1b1d6225 | 10456 | update_rq_clock(rq); |
ea86cb4b VG |
10457 | |
10458 | running = task_current(rq, tsk); | |
10459 | queued = task_on_rq_queued(tsk); | |
10460 | ||
10461 | if (queued) | |
7a57f32a | 10462 | dequeue_task(rq, tsk, queue_flags); |
bb3bac2c | 10463 | if (running) |
ea86cb4b VG |
10464 | put_prev_task(rq, tsk); |
10465 | ||
eff6c8ce | 10466 | sched_change_group(tsk, group); |
810b3817 | 10467 | |
da0c1e65 | 10468 | if (queued) |
7a57f32a | 10469 | enqueue_task(rq, tsk, queue_flags); |
2a4b03ff | 10470 | if (running) { |
03b7fad1 | 10471 | set_next_task(rq, tsk); |
2a4b03ff VG |
10472 | /* |
10473 | * After changing group, the running task may have joined a | |
10474 | * throttled one but it's still the running task. Trigger a | |
10475 | * resched to make sure that task can still run. | |
10476 | */ | |
10477 | resched_curr(rq); | |
10478 | } | |
29f59db3 | 10479 | } |
68318b8e | 10480 | |
a7c6d554 | 10481 | static inline struct task_group *css_tg(struct cgroup_subsys_state *css) |
68318b8e | 10482 | { |
a7c6d554 | 10483 | return css ? container_of(css, struct task_group, css) : NULL; |
68318b8e SV |
10484 | } |
10485 | ||
eb95419b TH |
10486 | static struct cgroup_subsys_state * |
10487 | cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) | |
68318b8e | 10488 | { |
eb95419b TH |
10489 | struct task_group *parent = css_tg(parent_css); |
10490 | struct task_group *tg; | |
68318b8e | 10491 | |
eb95419b | 10492 | if (!parent) { |
68318b8e | 10493 | /* This is early initialization for the top cgroup */ |
07e06b01 | 10494 | return &root_task_group.css; |
68318b8e SV |
10495 | } |
10496 | ||
ec7dc8ac | 10497 | tg = sched_create_group(parent); |
68318b8e SV |
10498 | if (IS_ERR(tg)) |
10499 | return ERR_PTR(-ENOMEM); | |
10500 | ||
68318b8e SV |
10501 | return &tg->css; |
10502 | } | |
10503 | ||
96b77745 KK |
10504 | /* Expose task group only after completing cgroup initialization */ |
10505 | static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) | |
10506 | { | |
10507 | struct task_group *tg = css_tg(css); | |
10508 | struct task_group *parent = css_tg(css->parent); | |
10509 | ||
10510 | if (parent) | |
10511 | sched_online_group(tg, parent); | |
7226017a QY |
10512 | |
10513 | #ifdef CONFIG_UCLAMP_TASK_GROUP | |
10514 | /* Propagate the effective uclamp value for the new group */ | |
0e34600a PZ |
10515 | guard(mutex)(&uclamp_mutex); |
10516 | guard(rcu)(); | |
7226017a QY |
10517 | cpu_util_update_eff(css); |
10518 | #endif | |
10519 | ||
96b77745 KK |
10520 | return 0; |
10521 | } | |
10522 | ||
2f5177f0 | 10523 | static void cpu_cgroup_css_released(struct cgroup_subsys_state *css) |
ace783b9 | 10524 | { |
eb95419b | 10525 | struct task_group *tg = css_tg(css); |
ace783b9 | 10526 | |
b027789e | 10527 | sched_release_group(tg); |
ace783b9 LZ |
10528 | } |
10529 | ||
eb95419b | 10530 | static void cpu_cgroup_css_free(struct cgroup_subsys_state *css) |
68318b8e | 10531 | { |
eb95419b | 10532 | struct task_group *tg = css_tg(css); |
68318b8e | 10533 | |
2f5177f0 PZ |
10534 | /* |
10535 | * Relies on the RCU grace period between css_released() and this. | |
10536 | */ | |
b027789e | 10537 | sched_unregister_group(tg); |
ace783b9 LZ |
10538 | } |
10539 | ||
df16b71c | 10540 | #ifdef CONFIG_RT_GROUP_SCHED |
1f7dd3e5 | 10541 | static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) |
68318b8e | 10542 | { |
bb9d97b6 | 10543 | struct task_struct *task; |
1f7dd3e5 | 10544 | struct cgroup_subsys_state *css; |
bb9d97b6 | 10545 | |
1f7dd3e5 | 10546 | cgroup_taskset_for_each(task, css, tset) { |
eb95419b | 10547 | if (!sched_rt_can_attach(css_tg(css), task)) |
bb9d97b6 | 10548 | return -EINVAL; |
bb9d97b6 | 10549 | } |
df16b71c | 10550 | return 0; |
be367d09 | 10551 | } |
df16b71c | 10552 | #endif |
68318b8e | 10553 | |
1f7dd3e5 | 10554 | static void cpu_cgroup_attach(struct cgroup_taskset *tset) |
68318b8e | 10555 | { |
bb9d97b6 | 10556 | struct task_struct *task; |
1f7dd3e5 | 10557 | struct cgroup_subsys_state *css; |
bb9d97b6 | 10558 | |
1f7dd3e5 | 10559 | cgroup_taskset_for_each(task, css, tset) |
bb9d97b6 | 10560 | sched_move_task(task); |
68318b8e SV |
10561 | } |
10562 | ||
2480c093 | 10563 | #ifdef CONFIG_UCLAMP_TASK_GROUP |
0b60ba2d PB |
10564 | static void cpu_util_update_eff(struct cgroup_subsys_state *css) |
10565 | { | |
10566 | struct cgroup_subsys_state *top_css = css; | |
10567 | struct uclamp_se *uc_parent = NULL; | |
10568 | struct uclamp_se *uc_se = NULL; | |
10569 | unsigned int eff[UCLAMP_CNT]; | |
0413d7f3 | 10570 | enum uclamp_id clamp_id; |
0b60ba2d PB |
10571 | unsigned int clamps; |
10572 | ||
93b73858 QY |
10573 | lockdep_assert_held(&uclamp_mutex); |
10574 | SCHED_WARN_ON(!rcu_read_lock_held()); | |
10575 | ||
0b60ba2d PB |
10576 | css_for_each_descendant_pre(css, top_css) { |
10577 | uc_parent = css_tg(css)->parent | |
10578 | ? css_tg(css)->parent->uclamp : NULL; | |
10579 | ||
10580 | for_each_clamp_id(clamp_id) { | |
10581 | /* Assume effective clamps matches requested clamps */ | |
10582 | eff[clamp_id] = css_tg(css)->uclamp_req[clamp_id].value; | |
10583 | /* Cap effective clamps with parent's effective clamps */ | |
10584 | if (uc_parent && | |
10585 | eff[clamp_id] > uc_parent[clamp_id].value) { | |
10586 | eff[clamp_id] = uc_parent[clamp_id].value; | |
10587 | } | |
10588 | } | |
10589 | /* Ensure protection is always capped by limit */ | |
10590 | eff[UCLAMP_MIN] = min(eff[UCLAMP_MIN], eff[UCLAMP_MAX]); | |
10591 | ||
10592 | /* Propagate most restrictive effective clamps */ | |
10593 | clamps = 0x0; | |
10594 | uc_se = css_tg(css)->uclamp; | |
10595 | for_each_clamp_id(clamp_id) { | |
10596 | if (eff[clamp_id] == uc_se[clamp_id].value) | |
10597 | continue; | |
10598 | uc_se[clamp_id].value = eff[clamp_id]; | |
10599 | uc_se[clamp_id].bucket_id = uclamp_bucket_id(eff[clamp_id]); | |
10600 | clamps |= (0x1 << clamp_id); | |
10601 | } | |
babbe170 | 10602 | if (!clamps) { |
0b60ba2d | 10603 | css = css_rightmost_descendant(css); |
babbe170 PB |
10604 | continue; |
10605 | } | |
10606 | ||
10607 | /* Immediately update descendants RUNNABLE tasks */ | |
0213b708 | 10608 | uclamp_update_active_tasks(css); |
0b60ba2d PB |
10609 | } |
10610 | } | |
2480c093 PB |
10611 | |
10612 | /* | |
10613 | * Integer 10^N with a given N exponent by casting to integer the literal "1eN" | |
10614 | * C expression. Since there is no way to convert a macro argument (N) into a | |
10615 | * character constant, use two levels of macros. | |
10616 | */ | |
10617 | #define _POW10(exp) ((unsigned int)1e##exp) | |
10618 | #define POW10(exp) _POW10(exp) | |
10619 | ||
10620 | struct uclamp_request { | |
10621 | #define UCLAMP_PERCENT_SHIFT 2 | |
10622 | #define UCLAMP_PERCENT_SCALE (100 * POW10(UCLAMP_PERCENT_SHIFT)) | |
10623 | s64 percent; | |
10624 | u64 util; | |
10625 | int ret; | |
10626 | }; | |
10627 | ||
10628 | static inline struct uclamp_request | |
10629 | capacity_from_percent(char *buf) | |
10630 | { | |
10631 | struct uclamp_request req = { | |
10632 | .percent = UCLAMP_PERCENT_SCALE, | |
10633 | .util = SCHED_CAPACITY_SCALE, | |
10634 | .ret = 0, | |
10635 | }; | |
10636 | ||
10637 | buf = strim(buf); | |
10638 | if (strcmp(buf, "max")) { | |
10639 | req.ret = cgroup_parse_float(buf, UCLAMP_PERCENT_SHIFT, | |
10640 | &req.percent); | |
10641 | if (req.ret) | |
10642 | return req; | |
b562d140 | 10643 | if ((u64)req.percent > UCLAMP_PERCENT_SCALE) { |
2480c093 PB |
10644 | req.ret = -ERANGE; |
10645 | return req; | |
10646 | } | |
10647 | ||
10648 | req.util = req.percent << SCHED_CAPACITY_SHIFT; | |
10649 | req.util = DIV_ROUND_CLOSEST_ULL(req.util, UCLAMP_PERCENT_SCALE); | |
10650 | } | |
10651 | ||
10652 | return req; | |
10653 | } | |
10654 | ||
10655 | static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf, | |
10656 | size_t nbytes, loff_t off, | |
10657 | enum uclamp_id clamp_id) | |
10658 | { | |
10659 | struct uclamp_request req; | |
10660 | struct task_group *tg; | |
10661 | ||
10662 | req = capacity_from_percent(buf); | |
10663 | if (req.ret) | |
10664 | return req.ret; | |
10665 | ||
46609ce2 QY |
10666 | static_branch_enable(&sched_uclamp_used); |
10667 | ||
0e34600a PZ |
10668 | guard(mutex)(&uclamp_mutex); |
10669 | guard(rcu)(); | |
2480c093 PB |
10670 | |
10671 | tg = css_tg(of_css(of)); | |
10672 | if (tg->uclamp_req[clamp_id].value != req.util) | |
10673 | uclamp_se_set(&tg->uclamp_req[clamp_id], req.util, false); | |
10674 | ||
10675 | /* | |
10676 | * Because of not recoverable conversion rounding we keep track of the | |
10677 | * exact requested value | |
10678 | */ | |
10679 | tg->uclamp_pct[clamp_id] = req.percent; | |
10680 | ||
0b60ba2d PB |
10681 | /* Update effective clamps to track the most restrictive value */ |
10682 | cpu_util_update_eff(of_css(of)); | |
10683 | ||
2480c093 PB |
10684 | return nbytes; |
10685 | } | |
10686 | ||
10687 | static ssize_t cpu_uclamp_min_write(struct kernfs_open_file *of, | |
10688 | char *buf, size_t nbytes, | |
10689 | loff_t off) | |
10690 | { | |
10691 | return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MIN); | |
10692 | } | |
10693 | ||
10694 | static ssize_t cpu_uclamp_max_write(struct kernfs_open_file *of, | |
10695 | char *buf, size_t nbytes, | |
10696 | loff_t off) | |
10697 | { | |
10698 | return cpu_uclamp_write(of, buf, nbytes, off, UCLAMP_MAX); | |
10699 | } | |
10700 | ||
10701 | static inline void cpu_uclamp_print(struct seq_file *sf, | |
10702 | enum uclamp_id clamp_id) | |
10703 | { | |
10704 | struct task_group *tg; | |
10705 | u64 util_clamp; | |
10706 | u64 percent; | |
10707 | u32 rem; | |
10708 | ||
0e34600a PZ |
10709 | scoped_guard (rcu) { |
10710 | tg = css_tg(seq_css(sf)); | |
10711 | util_clamp = tg->uclamp_req[clamp_id].value; | |
10712 | } | |
2480c093 PB |
10713 | |
10714 | if (util_clamp == SCHED_CAPACITY_SCALE) { | |
10715 | seq_puts(sf, "max\n"); | |
10716 | return; | |
10717 | } | |
10718 | ||
10719 | percent = tg->uclamp_pct[clamp_id]; | |
10720 | percent = div_u64_rem(percent, POW10(UCLAMP_PERCENT_SHIFT), &rem); | |
10721 | seq_printf(sf, "%llu.%0*u\n", percent, UCLAMP_PERCENT_SHIFT, rem); | |
10722 | } | |
10723 | ||
10724 | static int cpu_uclamp_min_show(struct seq_file *sf, void *v) | |
10725 | { | |
10726 | cpu_uclamp_print(sf, UCLAMP_MIN); | |
10727 | return 0; | |
10728 | } | |
10729 | ||
10730 | static int cpu_uclamp_max_show(struct seq_file *sf, void *v) | |
10731 | { | |
10732 | cpu_uclamp_print(sf, UCLAMP_MAX); | |
10733 | return 0; | |
10734 | } | |
10735 | #endif /* CONFIG_UCLAMP_TASK_GROUP */ | |
10736 | ||
052f1dc7 | 10737 | #ifdef CONFIG_FAIR_GROUP_SCHED |
182446d0 TH |
10738 | static int cpu_shares_write_u64(struct cgroup_subsys_state *css, |
10739 | struct cftype *cftype, u64 shareval) | |
68318b8e | 10740 | { |
5b61d50a KK |
10741 | if (shareval > scale_load_down(ULONG_MAX)) |
10742 | shareval = MAX_SHARES; | |
182446d0 | 10743 | return sched_group_set_shares(css_tg(css), scale_load(shareval)); |
68318b8e SV |
10744 | } |
10745 | ||
182446d0 TH |
10746 | static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css, |
10747 | struct cftype *cft) | |
68318b8e | 10748 | { |
182446d0 | 10749 | struct task_group *tg = css_tg(css); |
68318b8e | 10750 | |
c8b28116 | 10751 | return (u64) scale_load_down(tg->shares); |
68318b8e | 10752 | } |
ab84d31e PT |
10753 | |
10754 | #ifdef CONFIG_CFS_BANDWIDTH | |
a790de99 PT |
10755 | static DEFINE_MUTEX(cfs_constraints_mutex); |
10756 | ||
ab84d31e | 10757 | const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */ |
b1546edc | 10758 | static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ |
d505b8af HC |
10759 | /* More than 203 days if BW_SHIFT equals 20. */ |
10760 | static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC; | |
ab84d31e | 10761 | |
a790de99 PT |
10762 | static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); |
10763 | ||
f4183717 HC |
10764 | static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, |
10765 | u64 burst) | |
ab84d31e | 10766 | { |
56f570e5 | 10767 | int i, ret = 0, runtime_enabled, runtime_was_enabled; |
029632fb | 10768 | struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; |
ab84d31e PT |
10769 | |
10770 | if (tg == &root_task_group) | |
10771 | return -EINVAL; | |
10772 | ||
10773 | /* | |
10774 | * Ensure we have at some amount of bandwidth every period. This is | |
10775 | * to prevent reaching a state of large arrears when throttled via | |
10776 | * entity_tick() resulting in prolonged exit starvation. | |
10777 | */ | |
10778 | if (quota < min_cfs_quota_period || period < min_cfs_quota_period) | |
10779 | return -EINVAL; | |
10780 | ||
10781 | /* | |
3b03706f | 10782 | * Likewise, bound things on the other side by preventing insane quota |
ab84d31e PT |
10783 | * periods. This also allows us to normalize in computing quota |
10784 | * feasibility. | |
10785 | */ | |
10786 | if (period > max_cfs_quota_period) | |
10787 | return -EINVAL; | |
10788 | ||
d505b8af HC |
10789 | /* |
10790 | * Bound quota to defend quota against overflow during bandwidth shift. | |
10791 | */ | |
10792 | if (quota != RUNTIME_INF && quota > max_cfs_runtime) | |
10793 | return -EINVAL; | |
10794 | ||
f4183717 HC |
10795 | if (quota != RUNTIME_INF && (burst > quota || |
10796 | burst + quota > max_cfs_runtime)) | |
10797 | return -EINVAL; | |
10798 | ||
0e59bdae KT |
10799 | /* |
10800 | * Prevent race between setting of cfs_rq->runtime_enabled and | |
10801 | * unthrottle_offline_cfs_rqs(). | |
10802 | */ | |
6fb45460 PZ |
10803 | guard(cpus_read_lock)(); |
10804 | guard(mutex)(&cfs_constraints_mutex); | |
10805 | ||
a790de99 PT |
10806 | ret = __cfs_schedulable(tg, period, quota); |
10807 | if (ret) | |
6fb45460 | 10808 | return ret; |
a790de99 | 10809 | |
58088ad0 | 10810 | runtime_enabled = quota != RUNTIME_INF; |
56f570e5 | 10811 | runtime_was_enabled = cfs_b->quota != RUNTIME_INF; |
1ee14e6c BS |
10812 | /* |
10813 | * If we need to toggle cfs_bandwidth_used, off->on must occur | |
10814 | * before making related changes, and on->off must occur afterwards | |
10815 | */ | |
10816 | if (runtime_enabled && !runtime_was_enabled) | |
10817 | cfs_bandwidth_usage_inc(); | |
58088ad0 | 10818 | |
6fb45460 PZ |
10819 | scoped_guard (raw_spinlock_irq, &cfs_b->lock) { |
10820 | cfs_b->period = ns_to_ktime(period); | |
10821 | cfs_b->quota = quota; | |
10822 | cfs_b->burst = burst; | |
d1ccc66d | 10823 | |
6fb45460 | 10824 | __refill_cfs_bandwidth_runtime(cfs_b); |
d1ccc66d | 10825 | |
6fb45460 PZ |
10826 | /* |
10827 | * Restart the period timer (if active) to handle new | |
10828 | * period expiry: | |
10829 | */ | |
10830 | if (runtime_enabled) | |
10831 | start_cfs_bandwidth(cfs_b); | |
10832 | } | |
ab84d31e | 10833 | |
0e59bdae | 10834 | for_each_online_cpu(i) { |
ab84d31e | 10835 | struct cfs_rq *cfs_rq = tg->cfs_rq[i]; |
029632fb | 10836 | struct rq *rq = cfs_rq->rq; |
ab84d31e | 10837 | |
6fb45460 | 10838 | guard(rq_lock_irq)(rq); |
58088ad0 | 10839 | cfs_rq->runtime_enabled = runtime_enabled; |
ab84d31e | 10840 | cfs_rq->runtime_remaining = 0; |
671fd9da | 10841 | |
029632fb | 10842 | if (cfs_rq->throttled) |
671fd9da | 10843 | unthrottle_cfs_rq(cfs_rq); |
ab84d31e | 10844 | } |
6fb45460 | 10845 | |
1ee14e6c BS |
10846 | if (runtime_was_enabled && !runtime_enabled) |
10847 | cfs_bandwidth_usage_dec(); | |
ab84d31e | 10848 | |
6fb45460 | 10849 | return 0; |
ab84d31e PT |
10850 | } |
10851 | ||
b1546edc | 10852 | static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) |
ab84d31e | 10853 | { |
f4183717 | 10854 | u64 quota, period, burst; |
ab84d31e | 10855 | |
029632fb | 10856 | period = ktime_to_ns(tg->cfs_bandwidth.period); |
f4183717 | 10857 | burst = tg->cfs_bandwidth.burst; |
ab84d31e PT |
10858 | if (cfs_quota_us < 0) |
10859 | quota = RUNTIME_INF; | |
1a8b4540 | 10860 | else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC) |
ab84d31e | 10861 | quota = (u64)cfs_quota_us * NSEC_PER_USEC; |
1a8b4540 KK |
10862 | else |
10863 | return -EINVAL; | |
ab84d31e | 10864 | |
f4183717 | 10865 | return tg_set_cfs_bandwidth(tg, period, quota, burst); |
ab84d31e PT |
10866 | } |
10867 | ||
b1546edc | 10868 | static long tg_get_cfs_quota(struct task_group *tg) |
ab84d31e PT |
10869 | { |
10870 | u64 quota_us; | |
10871 | ||
029632fb | 10872 | if (tg->cfs_bandwidth.quota == RUNTIME_INF) |
ab84d31e PT |
10873 | return -1; |
10874 | ||
029632fb | 10875 | quota_us = tg->cfs_bandwidth.quota; |
ab84d31e PT |
10876 | do_div(quota_us, NSEC_PER_USEC); |
10877 | ||
10878 | return quota_us; | |
10879 | } | |
10880 | ||
b1546edc | 10881 | static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) |
ab84d31e | 10882 | { |
f4183717 | 10883 | u64 quota, period, burst; |
ab84d31e | 10884 | |
1a8b4540 KK |
10885 | if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC) |
10886 | return -EINVAL; | |
10887 | ||
ab84d31e | 10888 | period = (u64)cfs_period_us * NSEC_PER_USEC; |
029632fb | 10889 | quota = tg->cfs_bandwidth.quota; |
f4183717 | 10890 | burst = tg->cfs_bandwidth.burst; |
ab84d31e | 10891 | |
f4183717 | 10892 | return tg_set_cfs_bandwidth(tg, period, quota, burst); |
ab84d31e PT |
10893 | } |
10894 | ||
b1546edc | 10895 | static long tg_get_cfs_period(struct task_group *tg) |
ab84d31e PT |
10896 | { |
10897 | u64 cfs_period_us; | |
10898 | ||
029632fb | 10899 | cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); |
ab84d31e PT |
10900 | do_div(cfs_period_us, NSEC_PER_USEC); |
10901 | ||
10902 | return cfs_period_us; | |
10903 | } | |
10904 | ||
f4183717 HC |
10905 | static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us) |
10906 | { | |
10907 | u64 quota, period, burst; | |
10908 | ||
10909 | if ((u64)cfs_burst_us > U64_MAX / NSEC_PER_USEC) | |
10910 | return -EINVAL; | |
10911 | ||
10912 | burst = (u64)cfs_burst_us * NSEC_PER_USEC; | |
10913 | period = ktime_to_ns(tg->cfs_bandwidth.period); | |
10914 | quota = tg->cfs_bandwidth.quota; | |
10915 | ||
10916 | return tg_set_cfs_bandwidth(tg, period, quota, burst); | |
10917 | } | |
10918 | ||
10919 | static long tg_get_cfs_burst(struct task_group *tg) | |
10920 | { | |
10921 | u64 burst_us; | |
10922 | ||
10923 | burst_us = tg->cfs_bandwidth.burst; | |
10924 | do_div(burst_us, NSEC_PER_USEC); | |
10925 | ||
10926 | return burst_us; | |
10927 | } | |
10928 | ||
182446d0 TH |
10929 | static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css, |
10930 | struct cftype *cft) | |
ab84d31e | 10931 | { |
182446d0 | 10932 | return tg_get_cfs_quota(css_tg(css)); |
ab84d31e PT |
10933 | } |
10934 | ||
182446d0 TH |
10935 | static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css, |
10936 | struct cftype *cftype, s64 cfs_quota_us) | |
ab84d31e | 10937 | { |
182446d0 | 10938 | return tg_set_cfs_quota(css_tg(css), cfs_quota_us); |
ab84d31e PT |
10939 | } |
10940 | ||
182446d0 TH |
10941 | static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css, |
10942 | struct cftype *cft) | |
ab84d31e | 10943 | { |
182446d0 | 10944 | return tg_get_cfs_period(css_tg(css)); |
ab84d31e PT |
10945 | } |
10946 | ||
182446d0 TH |
10947 | static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css, |
10948 | struct cftype *cftype, u64 cfs_period_us) | |
ab84d31e | 10949 | { |
182446d0 | 10950 | return tg_set_cfs_period(css_tg(css), cfs_period_us); |
ab84d31e PT |
10951 | } |
10952 | ||
f4183717 HC |
10953 | static u64 cpu_cfs_burst_read_u64(struct cgroup_subsys_state *css, |
10954 | struct cftype *cft) | |
10955 | { | |
10956 | return tg_get_cfs_burst(css_tg(css)); | |
10957 | } | |
10958 | ||
10959 | static int cpu_cfs_burst_write_u64(struct cgroup_subsys_state *css, | |
10960 | struct cftype *cftype, u64 cfs_burst_us) | |
10961 | { | |
10962 | return tg_set_cfs_burst(css_tg(css), cfs_burst_us); | |
10963 | } | |
10964 | ||
a790de99 PT |
10965 | struct cfs_schedulable_data { |
10966 | struct task_group *tg; | |
10967 | u64 period, quota; | |
10968 | }; | |
10969 | ||
10970 | /* | |
10971 | * normalize group quota/period to be quota/max_period | |
10972 | * note: units are usecs | |
10973 | */ | |
10974 | static u64 normalize_cfs_quota(struct task_group *tg, | |
10975 | struct cfs_schedulable_data *d) | |
10976 | { | |
10977 | u64 quota, period; | |
10978 | ||
10979 | if (tg == d->tg) { | |
10980 | period = d->period; | |
10981 | quota = d->quota; | |
10982 | } else { | |
10983 | period = tg_get_cfs_period(tg); | |
10984 | quota = tg_get_cfs_quota(tg); | |
10985 | } | |
10986 | ||
10987 | /* note: these should typically be equivalent */ | |
10988 | if (quota == RUNTIME_INF || quota == -1) | |
10989 | return RUNTIME_INF; | |
10990 | ||
10991 | return to_ratio(period, quota); | |
10992 | } | |
10993 | ||
10994 | static int tg_cfs_schedulable_down(struct task_group *tg, void *data) | |
10995 | { | |
10996 | struct cfs_schedulable_data *d = data; | |
029632fb | 10997 | struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; |
a790de99 PT |
10998 | s64 quota = 0, parent_quota = -1; |
10999 | ||
11000 | if (!tg->parent) { | |
11001 | quota = RUNTIME_INF; | |
11002 | } else { | |
029632fb | 11003 | struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; |
a790de99 PT |
11004 | |
11005 | quota = normalize_cfs_quota(tg, d); | |
9c58c79a | 11006 | parent_quota = parent_b->hierarchical_quota; |
a790de99 PT |
11007 | |
11008 | /* | |
c53593e5 | 11009 | * Ensure max(child_quota) <= parent_quota. On cgroup2, |
c98c1827 PA |
11010 | * always take the non-RUNTIME_INF min. On cgroup1, only |
11011 | * inherit when no limit is set. In both cases this is used | |
11012 | * by the scheduler to determine if a given CFS task has a | |
11013 | * bandwidth constraint at some higher level. | |
a790de99 | 11014 | */ |
c53593e5 | 11015 | if (cgroup_subsys_on_dfl(cpu_cgrp_subsys)) { |
c98c1827 PA |
11016 | if (quota == RUNTIME_INF) |
11017 | quota = parent_quota; | |
11018 | else if (parent_quota != RUNTIME_INF) | |
11019 | quota = min(quota, parent_quota); | |
c53593e5 TH |
11020 | } else { |
11021 | if (quota == RUNTIME_INF) | |
11022 | quota = parent_quota; | |
11023 | else if (parent_quota != RUNTIME_INF && quota > parent_quota) | |
11024 | return -EINVAL; | |
11025 | } | |
a790de99 | 11026 | } |
9c58c79a | 11027 | cfs_b->hierarchical_quota = quota; |
a790de99 PT |
11028 | |
11029 | return 0; | |
11030 | } | |
11031 | ||
11032 | static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) | |
11033 | { | |
11034 | struct cfs_schedulable_data data = { | |
11035 | .tg = tg, | |
11036 | .period = period, | |
11037 | .quota = quota, | |
11038 | }; | |
11039 | ||
11040 | if (quota != RUNTIME_INF) { | |
11041 | do_div(data.period, NSEC_PER_USEC); | |
11042 | do_div(data.quota, NSEC_PER_USEC); | |
11043 | } | |
11044 | ||
0e34600a PZ |
11045 | guard(rcu)(); |
11046 | return walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data); | |
a790de99 | 11047 | } |
e8da1b18 | 11048 | |
a1f7164c | 11049 | static int cpu_cfs_stat_show(struct seq_file *sf, void *v) |
e8da1b18 | 11050 | { |
2da8ca82 | 11051 | struct task_group *tg = css_tg(seq_css(sf)); |
029632fb | 11052 | struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; |
e8da1b18 | 11053 | |
44ffc75b TH |
11054 | seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods); |
11055 | seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled); | |
11056 | seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time); | |
e8da1b18 | 11057 | |
3d6c50c2 | 11058 | if (schedstat_enabled() && tg != &root_task_group) { |
ceeadb83 | 11059 | struct sched_statistics *stats; |
3d6c50c2 YW |
11060 | u64 ws = 0; |
11061 | int i; | |
11062 | ||
ceeadb83 YS |
11063 | for_each_possible_cpu(i) { |
11064 | stats = __schedstats_from_se(tg->se[i]); | |
11065 | ws += schedstat_val(stats->wait_sum); | |
11066 | } | |
3d6c50c2 YW |
11067 | |
11068 | seq_printf(sf, "wait_sum %llu\n", ws); | |
11069 | } | |
11070 | ||
bcb1704a HC |
11071 | seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst); |
11072 | seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time); | |
11073 | ||
e8da1b18 NR |
11074 | return 0; |
11075 | } | |
677ea015 JD |
11076 | |
11077 | static u64 throttled_time_self(struct task_group *tg) | |
11078 | { | |
11079 | int i; | |
11080 | u64 total = 0; | |
11081 | ||
11082 | for_each_possible_cpu(i) { | |
11083 | total += READ_ONCE(tg->cfs_rq[i]->throttled_clock_self_time); | |
11084 | } | |
11085 | ||
11086 | return total; | |
11087 | } | |
11088 | ||
11089 | static int cpu_cfs_local_stat_show(struct seq_file *sf, void *v) | |
11090 | { | |
11091 | struct task_group *tg = css_tg(seq_css(sf)); | |
11092 | ||
11093 | seq_printf(sf, "throttled_time %llu\n", throttled_time_self(tg)); | |
11094 | ||
11095 | return 0; | |
11096 | } | |
ab84d31e | 11097 | #endif /* CONFIG_CFS_BANDWIDTH */ |
6d6bc0ad | 11098 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
68318b8e | 11099 | |
052f1dc7 | 11100 | #ifdef CONFIG_RT_GROUP_SCHED |
182446d0 TH |
11101 | static int cpu_rt_runtime_write(struct cgroup_subsys_state *css, |
11102 | struct cftype *cft, s64 val) | |
6f505b16 | 11103 | { |
182446d0 | 11104 | return sched_group_set_rt_runtime(css_tg(css), val); |
6f505b16 PZ |
11105 | } |
11106 | ||
182446d0 TH |
11107 | static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css, |
11108 | struct cftype *cft) | |
6f505b16 | 11109 | { |
182446d0 | 11110 | return sched_group_rt_runtime(css_tg(css)); |
6f505b16 | 11111 | } |
d0b27fa7 | 11112 | |
182446d0 TH |
11113 | static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css, |
11114 | struct cftype *cftype, u64 rt_period_us) | |
d0b27fa7 | 11115 | { |
182446d0 | 11116 | return sched_group_set_rt_period(css_tg(css), rt_period_us); |
d0b27fa7 PZ |
11117 | } |
11118 | ||
182446d0 TH |
11119 | static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css, |
11120 | struct cftype *cft) | |
d0b27fa7 | 11121 | { |
182446d0 | 11122 | return sched_group_rt_period(css_tg(css)); |
d0b27fa7 | 11123 | } |
6d6bc0ad | 11124 | #endif /* CONFIG_RT_GROUP_SCHED */ |
6f505b16 | 11125 | |
30400039 JD |
11126 | #ifdef CONFIG_FAIR_GROUP_SCHED |
11127 | static s64 cpu_idle_read_s64(struct cgroup_subsys_state *css, | |
11128 | struct cftype *cft) | |
11129 | { | |
11130 | return css_tg(css)->idle; | |
11131 | } | |
11132 | ||
11133 | static int cpu_idle_write_s64(struct cgroup_subsys_state *css, | |
11134 | struct cftype *cft, s64 idle) | |
11135 | { | |
11136 | return sched_group_set_idle(css_tg(css), idle); | |
11137 | } | |
11138 | #endif | |
11139 | ||
a1f7164c | 11140 | static struct cftype cpu_legacy_files[] = { |
052f1dc7 | 11141 | #ifdef CONFIG_FAIR_GROUP_SCHED |
fe5c7cc2 PM |
11142 | { |
11143 | .name = "shares", | |
f4c753b7 PM |
11144 | .read_u64 = cpu_shares_read_u64, |
11145 | .write_u64 = cpu_shares_write_u64, | |
fe5c7cc2 | 11146 | }, |
30400039 JD |
11147 | { |
11148 | .name = "idle", | |
11149 | .read_s64 = cpu_idle_read_s64, | |
11150 | .write_s64 = cpu_idle_write_s64, | |
11151 | }, | |
052f1dc7 | 11152 | #endif |
ab84d31e PT |
11153 | #ifdef CONFIG_CFS_BANDWIDTH |
11154 | { | |
11155 | .name = "cfs_quota_us", | |
11156 | .read_s64 = cpu_cfs_quota_read_s64, | |
11157 | .write_s64 = cpu_cfs_quota_write_s64, | |
11158 | }, | |
11159 | { | |
11160 | .name = "cfs_period_us", | |
11161 | .read_u64 = cpu_cfs_period_read_u64, | |
11162 | .write_u64 = cpu_cfs_period_write_u64, | |
11163 | }, | |
f4183717 HC |
11164 | { |
11165 | .name = "cfs_burst_us", | |
11166 | .read_u64 = cpu_cfs_burst_read_u64, | |
11167 | .write_u64 = cpu_cfs_burst_write_u64, | |
11168 | }, | |
e8da1b18 NR |
11169 | { |
11170 | .name = "stat", | |
a1f7164c | 11171 | .seq_show = cpu_cfs_stat_show, |
e8da1b18 | 11172 | }, |
677ea015 JD |
11173 | { |
11174 | .name = "stat.local", | |
11175 | .seq_show = cpu_cfs_local_stat_show, | |
11176 | }, | |
ab84d31e | 11177 | #endif |
052f1dc7 | 11178 | #ifdef CONFIG_RT_GROUP_SCHED |
6f505b16 | 11179 | { |
9f0c1e56 | 11180 | .name = "rt_runtime_us", |
06ecb27c PM |
11181 | .read_s64 = cpu_rt_runtime_read, |
11182 | .write_s64 = cpu_rt_runtime_write, | |
6f505b16 | 11183 | }, |
d0b27fa7 PZ |
11184 | { |
11185 | .name = "rt_period_us", | |
f4c753b7 PM |
11186 | .read_u64 = cpu_rt_period_read_uint, |
11187 | .write_u64 = cpu_rt_period_write_uint, | |
d0b27fa7 | 11188 | }, |
2480c093 PB |
11189 | #endif |
11190 | #ifdef CONFIG_UCLAMP_TASK_GROUP | |
11191 | { | |
11192 | .name = "uclamp.min", | |
11193 | .flags = CFTYPE_NOT_ON_ROOT, | |
11194 | .seq_show = cpu_uclamp_min_show, | |
11195 | .write = cpu_uclamp_min_write, | |
11196 | }, | |
11197 | { | |
11198 | .name = "uclamp.max", | |
11199 | .flags = CFTYPE_NOT_ON_ROOT, | |
11200 | .seq_show = cpu_uclamp_max_show, | |
11201 | .write = cpu_uclamp_max_write, | |
11202 | }, | |
052f1dc7 | 11203 | #endif |
d1ccc66d | 11204 | { } /* Terminate */ |
68318b8e SV |
11205 | }; |
11206 | ||
d41bf8c9 TH |
11207 | static int cpu_extra_stat_show(struct seq_file *sf, |
11208 | struct cgroup_subsys_state *css) | |
0d593634 | 11209 | { |
0d593634 TH |
11210 | #ifdef CONFIG_CFS_BANDWIDTH |
11211 | { | |
d41bf8c9 | 11212 | struct task_group *tg = css_tg(css); |
0d593634 | 11213 | struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; |
bcb1704a | 11214 | u64 throttled_usec, burst_usec; |
0d593634 TH |
11215 | |
11216 | throttled_usec = cfs_b->throttled_time; | |
11217 | do_div(throttled_usec, NSEC_PER_USEC); | |
bcb1704a HC |
11218 | burst_usec = cfs_b->burst_time; |
11219 | do_div(burst_usec, NSEC_PER_USEC); | |
0d593634 TH |
11220 | |
11221 | seq_printf(sf, "nr_periods %d\n" | |
11222 | "nr_throttled %d\n" | |
bcb1704a HC |
11223 | "throttled_usec %llu\n" |
11224 | "nr_bursts %d\n" | |
11225 | "burst_usec %llu\n", | |
0d593634 | 11226 | cfs_b->nr_periods, cfs_b->nr_throttled, |
bcb1704a | 11227 | throttled_usec, cfs_b->nr_burst, burst_usec); |
0d593634 TH |
11228 | } |
11229 | #endif | |
11230 | return 0; | |
11231 | } | |
11232 | ||
677ea015 JD |
11233 | static int cpu_local_stat_show(struct seq_file *sf, |
11234 | struct cgroup_subsys_state *css) | |
11235 | { | |
11236 | #ifdef CONFIG_CFS_BANDWIDTH | |
11237 | { | |
11238 | struct task_group *tg = css_tg(css); | |
11239 | u64 throttled_self_usec; | |
11240 | ||
11241 | throttled_self_usec = throttled_time_self(tg); | |
11242 | do_div(throttled_self_usec, NSEC_PER_USEC); | |
11243 | ||
11244 | seq_printf(sf, "throttled_usec %llu\n", | |
11245 | throttled_self_usec); | |
11246 | } | |
11247 | #endif | |
11248 | return 0; | |
11249 | } | |
11250 | ||
0d593634 TH |
11251 | #ifdef CONFIG_FAIR_GROUP_SCHED |
11252 | static u64 cpu_weight_read_u64(struct cgroup_subsys_state *css, | |
11253 | struct cftype *cft) | |
11254 | { | |
11255 | struct task_group *tg = css_tg(css); | |
11256 | u64 weight = scale_load_down(tg->shares); | |
11257 | ||
11258 | return DIV_ROUND_CLOSEST_ULL(weight * CGROUP_WEIGHT_DFL, 1024); | |
11259 | } | |
11260 | ||
11261 | static int cpu_weight_write_u64(struct cgroup_subsys_state *css, | |
11262 | struct cftype *cft, u64 weight) | |
11263 | { | |
11264 | /* | |
11265 | * cgroup weight knobs should use the common MIN, DFL and MAX | |
11266 | * values which are 1, 100 and 10000 respectively. While it loses | |
11267 | * a bit of range on both ends, it maps pretty well onto the shares | |
11268 | * value used by scheduler and the round-trip conversions preserve | |
11269 | * the original value over the entire range. | |
11270 | */ | |
11271 | if (weight < CGROUP_WEIGHT_MIN || weight > CGROUP_WEIGHT_MAX) | |
11272 | return -ERANGE; | |
11273 | ||
11274 | weight = DIV_ROUND_CLOSEST_ULL(weight * 1024, CGROUP_WEIGHT_DFL); | |
11275 | ||
11276 | return sched_group_set_shares(css_tg(css), scale_load(weight)); | |
11277 | } | |
11278 | ||
11279 | static s64 cpu_weight_nice_read_s64(struct cgroup_subsys_state *css, | |
11280 | struct cftype *cft) | |
11281 | { | |
11282 | unsigned long weight = scale_load_down(css_tg(css)->shares); | |
11283 | int last_delta = INT_MAX; | |
11284 | int prio, delta; | |
11285 | ||
11286 | /* find the closest nice value to the current weight */ | |
11287 | for (prio = 0; prio < ARRAY_SIZE(sched_prio_to_weight); prio++) { | |
11288 | delta = abs(sched_prio_to_weight[prio] - weight); | |
11289 | if (delta >= last_delta) | |
11290 | break; | |
11291 | last_delta = delta; | |
11292 | } | |
11293 | ||
11294 | return PRIO_TO_NICE(prio - 1 + MAX_RT_PRIO); | |
11295 | } | |
11296 | ||
11297 | static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css, | |
11298 | struct cftype *cft, s64 nice) | |
11299 | { | |
11300 | unsigned long weight; | |
7281c8de | 11301 | int idx; |
0d593634 TH |
11302 | |
11303 | if (nice < MIN_NICE || nice > MAX_NICE) | |
11304 | return -ERANGE; | |
11305 | ||
7281c8de PZ |
11306 | idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO; |
11307 | idx = array_index_nospec(idx, 40); | |
11308 | weight = sched_prio_to_weight[idx]; | |
11309 | ||
0d593634 TH |
11310 | return sched_group_set_shares(css_tg(css), scale_load(weight)); |
11311 | } | |
11312 | #endif | |
11313 | ||
11314 | static void __maybe_unused cpu_period_quota_print(struct seq_file *sf, | |
11315 | long period, long quota) | |
11316 | { | |
11317 | if (quota < 0) | |
11318 | seq_puts(sf, "max"); | |
11319 | else | |
11320 | seq_printf(sf, "%ld", quota); | |
11321 | ||
11322 | seq_printf(sf, " %ld\n", period); | |
11323 | } | |
11324 | ||
11325 | /* caller should put the current value in *@periodp before calling */ | |
11326 | static int __maybe_unused cpu_period_quota_parse(char *buf, | |
11327 | u64 *periodp, u64 *quotap) | |
11328 | { | |
11329 | char tok[21]; /* U64_MAX */ | |
11330 | ||
4c47acd8 | 11331 | if (sscanf(buf, "%20s %llu", tok, periodp) < 1) |
0d593634 TH |
11332 | return -EINVAL; |
11333 | ||
11334 | *periodp *= NSEC_PER_USEC; | |
11335 | ||
11336 | if (sscanf(tok, "%llu", quotap)) | |
11337 | *quotap *= NSEC_PER_USEC; | |
11338 | else if (!strcmp(tok, "max")) | |
11339 | *quotap = RUNTIME_INF; | |
11340 | else | |
11341 | return -EINVAL; | |
11342 | ||
11343 | return 0; | |
11344 | } | |
11345 | ||
11346 | #ifdef CONFIG_CFS_BANDWIDTH | |
11347 | static int cpu_max_show(struct seq_file *sf, void *v) | |
11348 | { | |
11349 | struct task_group *tg = css_tg(seq_css(sf)); | |
11350 | ||
11351 | cpu_period_quota_print(sf, tg_get_cfs_period(tg), tg_get_cfs_quota(tg)); | |
11352 | return 0; | |
11353 | } | |
11354 | ||
11355 | static ssize_t cpu_max_write(struct kernfs_open_file *of, | |
11356 | char *buf, size_t nbytes, loff_t off) | |
11357 | { | |
11358 | struct task_group *tg = css_tg(of_css(of)); | |
11359 | u64 period = tg_get_cfs_period(tg); | |
f4183717 | 11360 | u64 burst = tg_get_cfs_burst(tg); |
0d593634 TH |
11361 | u64 quota; |
11362 | int ret; | |
11363 | ||
11364 | ret = cpu_period_quota_parse(buf, &period, "a); | |
11365 | if (!ret) | |
f4183717 | 11366 | ret = tg_set_cfs_bandwidth(tg, period, quota, burst); |
0d593634 TH |
11367 | return ret ?: nbytes; |
11368 | } | |
11369 | #endif | |
11370 | ||
11371 | static struct cftype cpu_files[] = { | |
0d593634 TH |
11372 | #ifdef CONFIG_FAIR_GROUP_SCHED |
11373 | { | |
11374 | .name = "weight", | |
11375 | .flags = CFTYPE_NOT_ON_ROOT, | |
11376 | .read_u64 = cpu_weight_read_u64, | |
11377 | .write_u64 = cpu_weight_write_u64, | |
11378 | }, | |
11379 | { | |
11380 | .name = "weight.nice", | |
11381 | .flags = CFTYPE_NOT_ON_ROOT, | |
11382 | .read_s64 = cpu_weight_nice_read_s64, | |
11383 | .write_s64 = cpu_weight_nice_write_s64, | |
11384 | }, | |
30400039 JD |
11385 | { |
11386 | .name = "idle", | |
11387 | .flags = CFTYPE_NOT_ON_ROOT, | |
11388 | .read_s64 = cpu_idle_read_s64, | |
11389 | .write_s64 = cpu_idle_write_s64, | |
11390 | }, | |
0d593634 TH |
11391 | #endif |
11392 | #ifdef CONFIG_CFS_BANDWIDTH | |
11393 | { | |
11394 | .name = "max", | |
11395 | .flags = CFTYPE_NOT_ON_ROOT, | |
11396 | .seq_show = cpu_max_show, | |
11397 | .write = cpu_max_write, | |
11398 | }, | |
f4183717 HC |
11399 | { |
11400 | .name = "max.burst", | |
11401 | .flags = CFTYPE_NOT_ON_ROOT, | |
11402 | .read_u64 = cpu_cfs_burst_read_u64, | |
11403 | .write_u64 = cpu_cfs_burst_write_u64, | |
11404 | }, | |
2480c093 PB |
11405 | #endif |
11406 | #ifdef CONFIG_UCLAMP_TASK_GROUP | |
11407 | { | |
11408 | .name = "uclamp.min", | |
11409 | .flags = CFTYPE_NOT_ON_ROOT, | |
11410 | .seq_show = cpu_uclamp_min_show, | |
11411 | .write = cpu_uclamp_min_write, | |
11412 | }, | |
11413 | { | |
11414 | .name = "uclamp.max", | |
11415 | .flags = CFTYPE_NOT_ON_ROOT, | |
11416 | .seq_show = cpu_uclamp_max_show, | |
11417 | .write = cpu_uclamp_max_write, | |
11418 | }, | |
0d593634 TH |
11419 | #endif |
11420 | { } /* terminate */ | |
11421 | }; | |
11422 | ||
073219e9 | 11423 | struct cgroup_subsys cpu_cgrp_subsys = { |
92fb9748 | 11424 | .css_alloc = cpu_cgroup_css_alloc, |
96b77745 | 11425 | .css_online = cpu_cgroup_css_online, |
2f5177f0 | 11426 | .css_released = cpu_cgroup_css_released, |
92fb9748 | 11427 | .css_free = cpu_cgroup_css_free, |
d41bf8c9 | 11428 | .css_extra_stat_show = cpu_extra_stat_show, |
677ea015 | 11429 | .css_local_stat_show = cpu_local_stat_show, |
df16b71c | 11430 | #ifdef CONFIG_RT_GROUP_SCHED |
bb9d97b6 | 11431 | .can_attach = cpu_cgroup_can_attach, |
df16b71c | 11432 | #endif |
bb9d97b6 | 11433 | .attach = cpu_cgroup_attach, |
a1f7164c | 11434 | .legacy_cftypes = cpu_legacy_files, |
0d593634 | 11435 | .dfl_cftypes = cpu_files, |
b38e42e9 | 11436 | .early_init = true, |
0d593634 | 11437 | .threaded = true, |
68318b8e SV |
11438 | }; |
11439 | ||
052f1dc7 | 11440 | #endif /* CONFIG_CGROUP_SCHED */ |
d842de87 | 11441 | |
b637a328 PM |
11442 | void dump_cpu_task(int cpu) |
11443 | { | |
bc1cca97 ZL |
11444 | if (cpu == smp_processor_id() && in_hardirq()) { |
11445 | struct pt_regs *regs; | |
11446 | ||
11447 | regs = get_irq_regs(); | |
11448 | if (regs) { | |
11449 | show_regs(regs); | |
11450 | return; | |
11451 | } | |
11452 | } | |
11453 | ||
e73dfe30 ZL |
11454 | if (trigger_single_cpu_backtrace(cpu)) |
11455 | return; | |
11456 | ||
b637a328 PM |
11457 | pr_info("Task dump for CPU %d:\n", cpu); |
11458 | sched_show_task(cpu_curr(cpu)); | |
11459 | } | |
ed82b8a1 AK |
11460 | |
11461 | /* | |
11462 | * Nice levels are multiplicative, with a gentle 10% change for every | |
11463 | * nice level changed. I.e. when a CPU-bound task goes from nice 0 to | |
11464 | * nice 1, it will get ~10% less CPU time than another CPU-bound task | |
11465 | * that remained on nice 0. | |
11466 | * | |
11467 | * The "10% effect" is relative and cumulative: from _any_ nice level, | |
11468 | * if you go up 1 level, it's -10% CPU usage, if you go down 1 level | |
11469 | * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25. | |
11470 | * If a task goes up by ~10% and another task goes down by ~10% then | |
11471 | * the relative distance between them is ~25%.) | |
11472 | */ | |
11473 | const int sched_prio_to_weight[40] = { | |
11474 | /* -20 */ 88761, 71755, 56483, 46273, 36291, | |
11475 | /* -15 */ 29154, 23254, 18705, 14949, 11916, | |
11476 | /* -10 */ 9548, 7620, 6100, 4904, 3906, | |
11477 | /* -5 */ 3121, 2501, 1991, 1586, 1277, | |
11478 | /* 0 */ 1024, 820, 655, 526, 423, | |
11479 | /* 5 */ 335, 272, 215, 172, 137, | |
11480 | /* 10 */ 110, 87, 70, 56, 45, | |
11481 | /* 15 */ 36, 29, 23, 18, 15, | |
11482 | }; | |
11483 | ||
11484 | /* | |
11485 | * Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated. | |
11486 | * | |
11487 | * In cases where the weight does not change often, we can use the | |
11488 | * precalculated inverse to speed up arithmetics by turning divisions | |
11489 | * into multiplications: | |
11490 | */ | |
11491 | const u32 sched_prio_to_wmult[40] = { | |
11492 | /* -20 */ 48388, 59856, 76040, 92818, 118348, | |
11493 | /* -15 */ 147320, 184698, 229616, 287308, 360437, | |
11494 | /* -10 */ 449829, 563644, 704093, 875809, 1099582, | |
11495 | /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326, | |
11496 | /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587, | |
11497 | /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126, | |
11498 | /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717, | |
11499 | /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, | |
11500 | }; | |
14a7405b | 11501 | |
9d246053 PA |
11502 | void call_trace_sched_update_nr_running(struct rq *rq, int count) |
11503 | { | |
11504 | trace_sched_update_nr_running_tp(rq, count); | |
11505 | } | |
af7f588d MD |
11506 | |
11507 | #ifdef CONFIG_SCHED_MM_CID | |
223baf9d | 11508 | |
0019a2d4 | 11509 | /* |
223baf9d MD |
11510 | * @cid_lock: Guarantee forward-progress of cid allocation. |
11511 | * | |
11512 | * Concurrency ID allocation within a bitmap is mostly lock-free. The cid_lock | |
11513 | * is only used when contention is detected by the lock-free allocation so | |
11514 | * forward progress can be guaranteed. | |
11515 | */ | |
11516 | DEFINE_RAW_SPINLOCK(cid_lock); | |
11517 | ||
0019a2d4 | 11518 | /* |
223baf9d MD |
11519 | * @use_cid_lock: Select cid allocation behavior: lock-free vs spinlock. |
11520 | * | |
11521 | * When @use_cid_lock is 0, the cid allocation is lock-free. When contention is | |
11522 | * detected, it is set to 1 to ensure that all newly coming allocations are | |
11523 | * serialized by @cid_lock until the allocation which detected contention | |
11524 | * completes and sets @use_cid_lock back to 0. This guarantees forward progress | |
11525 | * of a cid allocation. | |
11526 | */ | |
11527 | int use_cid_lock; | |
11528 | ||
11529 | /* | |
11530 | * mm_cid remote-clear implements a lock-free algorithm to clear per-mm/cpu cid | |
11531 | * concurrently with respect to the execution of the source runqueue context | |
11532 | * switch. | |
11533 | * | |
11534 | * There is one basic properties we want to guarantee here: | |
11535 | * | |
11536 | * (1) Remote-clear should _never_ mark a per-cpu cid UNSET when it is actively | |
11537 | * used by a task. That would lead to concurrent allocation of the cid and | |
11538 | * userspace corruption. | |
11539 | * | |
11540 | * Provide this guarantee by introducing a Dekker memory ordering to guarantee | |
11541 | * that a pair of loads observe at least one of a pair of stores, which can be | |
11542 | * shown as: | |
11543 | * | |
11544 | * X = Y = 0 | |
11545 | * | |
11546 | * w[X]=1 w[Y]=1 | |
11547 | * MB MB | |
11548 | * r[Y]=y r[X]=x | |
11549 | * | |
11550 | * Which guarantees that x==0 && y==0 is impossible. But rather than using | |
11551 | * values 0 and 1, this algorithm cares about specific state transitions of the | |
11552 | * runqueue current task (as updated by the scheduler context switch), and the | |
11553 | * per-mm/cpu cid value. | |
11554 | * | |
11555 | * Let's introduce task (Y) which has task->mm == mm and task (N) which has | |
11556 | * task->mm != mm for the rest of the discussion. There are two scheduler state | |
11557 | * transitions on context switch we care about: | |
11558 | * | |
11559 | * (TSA) Store to rq->curr with transition from (N) to (Y) | |
11560 | * | |
11561 | * (TSB) Store to rq->curr with transition from (Y) to (N) | |
11562 | * | |
11563 | * On the remote-clear side, there is one transition we care about: | |
11564 | * | |
11565 | * (TMA) cmpxchg to *pcpu_cid to set the LAZY flag | |
11566 | * | |
11567 | * There is also a transition to UNSET state which can be performed from all | |
11568 | * sides (scheduler, remote-clear). It is always performed with a cmpxchg which | |
11569 | * guarantees that only a single thread will succeed: | |
11570 | * | |
11571 | * (TMB) cmpxchg to *pcpu_cid to mark UNSET | |
11572 | * | |
11573 | * Just to be clear, what we do _not_ want to happen is a transition to UNSET | |
11574 | * when a thread is actively using the cid (property (1)). | |
11575 | * | |
11576 | * Let's looks at the relevant combinations of TSA/TSB, and TMA transitions. | |
11577 | * | |
11578 | * Scenario A) (TSA)+(TMA) (from next task perspective) | |
11579 | * | |
11580 | * CPU0 CPU1 | |
11581 | * | |
11582 | * Context switch CS-1 Remote-clear | |
11583 | * - store to rq->curr: (N)->(Y) (TSA) - cmpxchg to *pcpu_id to LAZY (TMA) | |
11584 | * (implied barrier after cmpxchg) | |
11585 | * - switch_mm_cid() | |
11586 | * - memory barrier (see switch_mm_cid() | |
11587 | * comment explaining how this barrier | |
11588 | * is combined with other scheduler | |
11589 | * barriers) | |
11590 | * - mm_cid_get (next) | |
11591 | * - READ_ONCE(*pcpu_cid) - rcu_dereference(src_rq->curr) | |
11592 | * | |
11593 | * This Dekker ensures that either task (Y) is observed by the | |
11594 | * rcu_dereference() or the LAZY flag is observed by READ_ONCE(), or both are | |
11595 | * observed. | |
11596 | * | |
11597 | * If task (Y) store is observed by rcu_dereference(), it means that there is | |
11598 | * still an active task on the cpu. Remote-clear will therefore not transition | |
11599 | * to UNSET, which fulfills property (1). | |
11600 | * | |
11601 | * If task (Y) is not observed, but the lazy flag is observed by READ_ONCE(), | |
11602 | * it will move its state to UNSET, which clears the percpu cid perhaps | |
11603 | * uselessly (which is not an issue for correctness). Because task (Y) is not | |
11604 | * observed, CPU1 can move ahead to set the state to UNSET. Because moving | |
11605 | * state to UNSET is done with a cmpxchg expecting that the old state has the | |
11606 | * LAZY flag set, only one thread will successfully UNSET. | |
11607 | * | |
11608 | * If both states (LAZY flag and task (Y)) are observed, the thread on CPU0 | |
11609 | * will observe the LAZY flag and transition to UNSET (perhaps uselessly), and | |
11610 | * CPU1 will observe task (Y) and do nothing more, which is fine. | |
11611 | * | |
11612 | * What we are effectively preventing with this Dekker is a scenario where | |
11613 | * neither LAZY flag nor store (Y) are observed, which would fail property (1) | |
11614 | * because this would UNSET a cid which is actively used. | |
11615 | */ | |
11616 | ||
11617 | void sched_mm_cid_migrate_from(struct task_struct *t) | |
11618 | { | |
11619 | t->migrate_from_cpu = task_cpu(t); | |
11620 | } | |
11621 | ||
11622 | static | |
11623 | int __sched_mm_cid_migrate_from_fetch_cid(struct rq *src_rq, | |
11624 | struct task_struct *t, | |
11625 | struct mm_cid *src_pcpu_cid) | |
af7f588d MD |
11626 | { |
11627 | struct mm_struct *mm = t->mm; | |
223baf9d MD |
11628 | struct task_struct *src_task; |
11629 | int src_cid, last_mm_cid; | |
af7f588d MD |
11630 | |
11631 | if (!mm) | |
223baf9d MD |
11632 | return -1; |
11633 | ||
11634 | last_mm_cid = t->last_mm_cid; | |
11635 | /* | |
11636 | * If the migrated task has no last cid, or if the current | |
11637 | * task on src rq uses the cid, it means the source cid does not need | |
11638 | * to be moved to the destination cpu. | |
11639 | */ | |
11640 | if (last_mm_cid == -1) | |
11641 | return -1; | |
11642 | src_cid = READ_ONCE(src_pcpu_cid->cid); | |
11643 | if (!mm_cid_is_valid(src_cid) || last_mm_cid != src_cid) | |
11644 | return -1; | |
11645 | ||
11646 | /* | |
11647 | * If we observe an active task using the mm on this rq, it means we | |
11648 | * are not the last task to be migrated from this cpu for this mm, so | |
11649 | * there is no need to move src_cid to the destination cpu. | |
11650 | */ | |
0e34600a | 11651 | guard(rcu)(); |
223baf9d MD |
11652 | src_task = rcu_dereference(src_rq->curr); |
11653 | if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) { | |
223baf9d MD |
11654 | t->last_mm_cid = -1; |
11655 | return -1; | |
11656 | } | |
223baf9d MD |
11657 | |
11658 | return src_cid; | |
11659 | } | |
11660 | ||
11661 | static | |
11662 | int __sched_mm_cid_migrate_from_try_steal_cid(struct rq *src_rq, | |
11663 | struct task_struct *t, | |
11664 | struct mm_cid *src_pcpu_cid, | |
11665 | int src_cid) | |
11666 | { | |
11667 | struct task_struct *src_task; | |
11668 | struct mm_struct *mm = t->mm; | |
11669 | int lazy_cid; | |
11670 | ||
11671 | if (src_cid == -1) | |
11672 | return -1; | |
11673 | ||
11674 | /* | |
11675 | * Attempt to clear the source cpu cid to move it to the destination | |
11676 | * cpu. | |
11677 | */ | |
11678 | lazy_cid = mm_cid_set_lazy_put(src_cid); | |
11679 | if (!try_cmpxchg(&src_pcpu_cid->cid, &src_cid, lazy_cid)) | |
11680 | return -1; | |
11681 | ||
11682 | /* | |
11683 | * The implicit barrier after cmpxchg per-mm/cpu cid before loading | |
11684 | * rq->curr->mm matches the scheduler barrier in context_switch() | |
11685 | * between store to rq->curr and load of prev and next task's | |
11686 | * per-mm/cpu cid. | |
11687 | * | |
11688 | * The implicit barrier after cmpxchg per-mm/cpu cid before loading | |
11689 | * rq->curr->mm_cid_active matches the barrier in | |
11690 | * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and | |
11691 | * sched_mm_cid_after_execve() between store to t->mm_cid_active and | |
11692 | * load of per-mm/cpu cid. | |
11693 | */ | |
11694 | ||
11695 | /* | |
11696 | * If we observe an active task using the mm on this rq after setting | |
11697 | * the lazy-put flag, this task will be responsible for transitioning | |
11698 | * from lazy-put flag set to MM_CID_UNSET. | |
11699 | */ | |
0e34600a PZ |
11700 | scoped_guard (rcu) { |
11701 | src_task = rcu_dereference(src_rq->curr); | |
11702 | if (READ_ONCE(src_task->mm_cid_active) && src_task->mm == mm) { | |
11703 | /* | |
11704 | * We observed an active task for this mm, there is therefore | |
11705 | * no point in moving this cid to the destination cpu. | |
11706 | */ | |
11707 | t->last_mm_cid = -1; | |
11708 | return -1; | |
11709 | } | |
223baf9d | 11710 | } |
223baf9d MD |
11711 | |
11712 | /* | |
11713 | * The src_cid is unused, so it can be unset. | |
11714 | */ | |
11715 | if (!try_cmpxchg(&src_pcpu_cid->cid, &lazy_cid, MM_CID_UNSET)) | |
11716 | return -1; | |
11717 | return src_cid; | |
11718 | } | |
11719 | ||
11720 | /* | |
11721 | * Migration to dst cpu. Called with dst_rq lock held. | |
11722 | * Interrupts are disabled, which keeps the window of cid ownership without the | |
11723 | * source rq lock held small. | |
11724 | */ | |
11725 | void sched_mm_cid_migrate_to(struct rq *dst_rq, struct task_struct *t) | |
11726 | { | |
11727 | struct mm_cid *src_pcpu_cid, *dst_pcpu_cid; | |
11728 | struct mm_struct *mm = t->mm; | |
11729 | int src_cid, dst_cid, src_cpu; | |
11730 | struct rq *src_rq; | |
11731 | ||
11732 | lockdep_assert_rq_held(dst_rq); | |
af7f588d MD |
11733 | |
11734 | if (!mm) | |
11735 | return; | |
223baf9d MD |
11736 | src_cpu = t->migrate_from_cpu; |
11737 | if (src_cpu == -1) { | |
11738 | t->last_mm_cid = -1; | |
11739 | return; | |
11740 | } | |
11741 | /* | |
11742 | * Move the src cid if the dst cid is unset. This keeps id | |
11743 | * allocation closest to 0 in cases where few threads migrate around | |
11744 | * many cpus. | |
11745 | * | |
11746 | * If destination cid is already set, we may have to just clear | |
11747 | * the src cid to ensure compactness in frequent migrations | |
11748 | * scenarios. | |
11749 | * | |
11750 | * It is not useful to clear the src cid when the number of threads is | |
11751 | * greater or equal to the number of allowed cpus, because user-space | |
11752 | * can expect that the number of allowed cids can reach the number of | |
11753 | * allowed cpus. | |
11754 | */ | |
11755 | dst_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu_of(dst_rq)); | |
11756 | dst_cid = READ_ONCE(dst_pcpu_cid->cid); | |
11757 | if (!mm_cid_is_unset(dst_cid) && | |
11758 | atomic_read(&mm->mm_users) >= t->nr_cpus_allowed) | |
11759 | return; | |
11760 | src_pcpu_cid = per_cpu_ptr(mm->pcpu_cid, src_cpu); | |
11761 | src_rq = cpu_rq(src_cpu); | |
11762 | src_cid = __sched_mm_cid_migrate_from_fetch_cid(src_rq, t, src_pcpu_cid); | |
11763 | if (src_cid == -1) | |
11764 | return; | |
11765 | src_cid = __sched_mm_cid_migrate_from_try_steal_cid(src_rq, t, src_pcpu_cid, | |
11766 | src_cid); | |
11767 | if (src_cid == -1) | |
11768 | return; | |
11769 | if (!mm_cid_is_unset(dst_cid)) { | |
11770 | __mm_cid_put(mm, src_cid); | |
11771 | return; | |
11772 | } | |
11773 | /* Move src_cid to dst cpu. */ | |
11774 | mm_cid_snapshot_time(dst_rq, mm); | |
11775 | WRITE_ONCE(dst_pcpu_cid->cid, src_cid); | |
11776 | } | |
11777 | ||
11778 | static void sched_mm_cid_remote_clear(struct mm_struct *mm, struct mm_cid *pcpu_cid, | |
11779 | int cpu) | |
11780 | { | |
11781 | struct rq *rq = cpu_rq(cpu); | |
11782 | struct task_struct *t; | |
223baf9d MD |
11783 | int cid, lazy_cid; |
11784 | ||
11785 | cid = READ_ONCE(pcpu_cid->cid); | |
11786 | if (!mm_cid_is_valid(cid)) | |
af7f588d | 11787 | return; |
223baf9d MD |
11788 | |
11789 | /* | |
11790 | * Clear the cpu cid if it is set to keep cid allocation compact. If | |
11791 | * there happens to be other tasks left on the source cpu using this | |
11792 | * mm, the next task using this mm will reallocate its cid on context | |
11793 | * switch. | |
11794 | */ | |
11795 | lazy_cid = mm_cid_set_lazy_put(cid); | |
11796 | if (!try_cmpxchg(&pcpu_cid->cid, &cid, lazy_cid)) | |
11797 | return; | |
11798 | ||
11799 | /* | |
11800 | * The implicit barrier after cmpxchg per-mm/cpu cid before loading | |
11801 | * rq->curr->mm matches the scheduler barrier in context_switch() | |
11802 | * between store to rq->curr and load of prev and next task's | |
11803 | * per-mm/cpu cid. | |
11804 | * | |
11805 | * The implicit barrier after cmpxchg per-mm/cpu cid before loading | |
11806 | * rq->curr->mm_cid_active matches the barrier in | |
11807 | * sched_mm_cid_exit_signals(), sched_mm_cid_before_execve(), and | |
11808 | * sched_mm_cid_after_execve() between store to t->mm_cid_active and | |
11809 | * load of per-mm/cpu cid. | |
11810 | */ | |
11811 | ||
11812 | /* | |
11813 | * If we observe an active task using the mm on this rq after setting | |
11814 | * the lazy-put flag, that task will be responsible for transitioning | |
11815 | * from lazy-put flag set to MM_CID_UNSET. | |
11816 | */ | |
0e34600a PZ |
11817 | scoped_guard (rcu) { |
11818 | t = rcu_dereference(rq->curr); | |
11819 | if (READ_ONCE(t->mm_cid_active) && t->mm == mm) | |
11820 | return; | |
223baf9d | 11821 | } |
223baf9d MD |
11822 | |
11823 | /* | |
11824 | * The cid is unused, so it can be unset. | |
11825 | * Disable interrupts to keep the window of cid ownership without rq | |
11826 | * lock small. | |
11827 | */ | |
0e34600a PZ |
11828 | scoped_guard (irqsave) { |
11829 | if (try_cmpxchg(&pcpu_cid->cid, &lazy_cid, MM_CID_UNSET)) | |
11830 | __mm_cid_put(mm, cid); | |
11831 | } | |
af7f588d MD |
11832 | } |
11833 | ||
223baf9d MD |
11834 | static void sched_mm_cid_remote_clear_old(struct mm_struct *mm, int cpu) |
11835 | { | |
11836 | struct rq *rq = cpu_rq(cpu); | |
11837 | struct mm_cid *pcpu_cid; | |
11838 | struct task_struct *curr; | |
11839 | u64 rq_clock; | |
11840 | ||
11841 | /* | |
11842 | * rq->clock load is racy on 32-bit but one spurious clear once in a | |
11843 | * while is irrelevant. | |
11844 | */ | |
11845 | rq_clock = READ_ONCE(rq->clock); | |
11846 | pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu); | |
11847 | ||
11848 | /* | |
11849 | * In order to take care of infrequently scheduled tasks, bump the time | |
11850 | * snapshot associated with this cid if an active task using the mm is | |
11851 | * observed on this rq. | |
11852 | */ | |
0e34600a PZ |
11853 | scoped_guard (rcu) { |
11854 | curr = rcu_dereference(rq->curr); | |
11855 | if (READ_ONCE(curr->mm_cid_active) && curr->mm == mm) { | |
11856 | WRITE_ONCE(pcpu_cid->time, rq_clock); | |
11857 | return; | |
11858 | } | |
223baf9d | 11859 | } |
223baf9d MD |
11860 | |
11861 | if (rq_clock < pcpu_cid->time + SCHED_MM_CID_PERIOD_NS) | |
11862 | return; | |
11863 | sched_mm_cid_remote_clear(mm, pcpu_cid, cpu); | |
11864 | } | |
11865 | ||
11866 | static void sched_mm_cid_remote_clear_weight(struct mm_struct *mm, int cpu, | |
11867 | int weight) | |
11868 | { | |
11869 | struct mm_cid *pcpu_cid; | |
11870 | int cid; | |
11871 | ||
11872 | pcpu_cid = per_cpu_ptr(mm->pcpu_cid, cpu); | |
11873 | cid = READ_ONCE(pcpu_cid->cid); | |
11874 | if (!mm_cid_is_valid(cid) || cid < weight) | |
11875 | return; | |
11876 | sched_mm_cid_remote_clear(mm, pcpu_cid, cpu); | |
11877 | } | |
11878 | ||
11879 | static void task_mm_cid_work(struct callback_head *work) | |
11880 | { | |
11881 | unsigned long now = jiffies, old_scan, next_scan; | |
11882 | struct task_struct *t = current; | |
11883 | struct cpumask *cidmask; | |
11884 | struct mm_struct *mm; | |
11885 | int weight, cpu; | |
11886 | ||
11887 | SCHED_WARN_ON(t != container_of(work, struct task_struct, cid_work)); | |
11888 | ||
11889 | work->next = work; /* Prevent double-add */ | |
11890 | if (t->flags & PF_EXITING) | |
11891 | return; | |
11892 | mm = t->mm; | |
11893 | if (!mm) | |
11894 | return; | |
11895 | old_scan = READ_ONCE(mm->mm_cid_next_scan); | |
11896 | next_scan = now + msecs_to_jiffies(MM_CID_SCAN_DELAY); | |
11897 | if (!old_scan) { | |
11898 | unsigned long res; | |
11899 | ||
11900 | res = cmpxchg(&mm->mm_cid_next_scan, old_scan, next_scan); | |
11901 | if (res != old_scan) | |
11902 | old_scan = res; | |
11903 | else | |
11904 | old_scan = next_scan; | |
11905 | } | |
11906 | if (time_before(now, old_scan)) | |
11907 | return; | |
11908 | if (!try_cmpxchg(&mm->mm_cid_next_scan, &old_scan, next_scan)) | |
11909 | return; | |
11910 | cidmask = mm_cidmask(mm); | |
11911 | /* Clear cids that were not recently used. */ | |
11912 | for_each_possible_cpu(cpu) | |
11913 | sched_mm_cid_remote_clear_old(mm, cpu); | |
11914 | weight = cpumask_weight(cidmask); | |
11915 | /* | |
11916 | * Clear cids that are greater or equal to the cidmask weight to | |
11917 | * recompact it. | |
11918 | */ | |
11919 | for_each_possible_cpu(cpu) | |
11920 | sched_mm_cid_remote_clear_weight(mm, cpu, weight); | |
11921 | } | |
11922 | ||
11923 | void init_sched_mm_cid(struct task_struct *t) | |
11924 | { | |
11925 | struct mm_struct *mm = t->mm; | |
11926 | int mm_users = 0; | |
11927 | ||
11928 | if (mm) { | |
11929 | mm_users = atomic_read(&mm->mm_users); | |
11930 | if (mm_users == 1) | |
11931 | mm->mm_cid_next_scan = jiffies + msecs_to_jiffies(MM_CID_SCAN_DELAY); | |
11932 | } | |
11933 | t->cid_work.next = &t->cid_work; /* Protect against double add */ | |
11934 | init_task_work(&t->cid_work, task_mm_cid_work); | |
11935 | } | |
11936 | ||
11937 | void task_tick_mm_cid(struct rq *rq, struct task_struct *curr) | |
11938 | { | |
11939 | struct callback_head *work = &curr->cid_work; | |
11940 | unsigned long now = jiffies; | |
11941 | ||
11942 | if (!curr->mm || (curr->flags & (PF_EXITING | PF_KTHREAD)) || | |
11943 | work->next != work) | |
11944 | return; | |
11945 | if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan))) | |
11946 | return; | |
11947 | task_work_add(curr, work, TWA_RESUME); | |
11948 | } | |
11949 | ||
11950 | void sched_mm_cid_exit_signals(struct task_struct *t) | |
11951 | { | |
11952 | struct mm_struct *mm = t->mm; | |
223baf9d MD |
11953 | struct rq *rq; |
11954 | ||
11955 | if (!mm) | |
11956 | return; | |
11957 | ||
11958 | preempt_disable(); | |
11959 | rq = this_rq(); | |
0e34600a | 11960 | guard(rq_lock_irqsave)(rq); |
223baf9d MD |
11961 | preempt_enable_no_resched(); /* holding spinlock */ |
11962 | WRITE_ONCE(t->mm_cid_active, 0); | |
11963 | /* | |
11964 | * Store t->mm_cid_active before loading per-mm/cpu cid. | |
11965 | * Matches barrier in sched_mm_cid_remote_clear_old(). | |
11966 | */ | |
11967 | smp_mb(); | |
11968 | mm_cid_put(mm); | |
11969 | t->last_mm_cid = t->mm_cid = -1; | |
223baf9d MD |
11970 | } |
11971 | ||
af7f588d MD |
11972 | void sched_mm_cid_before_execve(struct task_struct *t) |
11973 | { | |
11974 | struct mm_struct *mm = t->mm; | |
223baf9d | 11975 | struct rq *rq; |
af7f588d MD |
11976 | |
11977 | if (!mm) | |
11978 | return; | |
223baf9d MD |
11979 | |
11980 | preempt_disable(); | |
11981 | rq = this_rq(); | |
0e34600a | 11982 | guard(rq_lock_irqsave)(rq); |
223baf9d MD |
11983 | preempt_enable_no_resched(); /* holding spinlock */ |
11984 | WRITE_ONCE(t->mm_cid_active, 0); | |
11985 | /* | |
11986 | * Store t->mm_cid_active before loading per-mm/cpu cid. | |
11987 | * Matches barrier in sched_mm_cid_remote_clear_old(). | |
11988 | */ | |
11989 | smp_mb(); | |
11990 | mm_cid_put(mm); | |
11991 | t->last_mm_cid = t->mm_cid = -1; | |
af7f588d MD |
11992 | } |
11993 | ||
11994 | void sched_mm_cid_after_execve(struct task_struct *t) | |
11995 | { | |
11996 | struct mm_struct *mm = t->mm; | |
223baf9d | 11997 | struct rq *rq; |
af7f588d | 11998 | |
bbd0b031 MD |
11999 | if (!mm) |
12000 | return; | |
223baf9d MD |
12001 | |
12002 | preempt_disable(); | |
12003 | rq = this_rq(); | |
0e34600a PZ |
12004 | scoped_guard (rq_lock_irqsave, rq) { |
12005 | preempt_enable_no_resched(); /* holding spinlock */ | |
12006 | WRITE_ONCE(t->mm_cid_active, 1); | |
12007 | /* | |
12008 | * Store t->mm_cid_active before loading per-mm/cpu cid. | |
12009 | * Matches barrier in sched_mm_cid_remote_clear_old(). | |
12010 | */ | |
12011 | smp_mb(); | |
12012 | t->last_mm_cid = t->mm_cid = mm_cid_get(rq, mm); | |
12013 | } | |
af7f588d MD |
12014 | rseq_set_notify_resume(t); |
12015 | } | |
12016 | ||
12017 | void sched_mm_cid_fork(struct task_struct *t) | |
12018 | { | |
bbd0b031 | 12019 | WARN_ON_ONCE(!t->mm || t->mm_cid != -1); |
af7f588d MD |
12020 | t->mm_cid_active = 1; |
12021 | } | |
12022 | #endif |