| 1 | /* |
| 2 | * kernel/sched/core.c |
| 3 | * |
| 4 | * Kernel scheduler and related syscalls |
| 5 | * |
| 6 | * Copyright (C) 1991-2002 Linus Torvalds |
| 7 | * |
| 8 | * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and |
| 9 | * make semaphores SMP safe |
| 10 | * 1998-11-19 Implemented schedule_timeout() and related stuff |
| 11 | * by Andrea Arcangeli |
| 12 | * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar: |
| 13 | * hybrid priority-list and round-robin design with |
| 14 | * an array-switch method of distributing timeslices |
| 15 | * and per-CPU runqueues. Cleanups and useful suggestions |
| 16 | * by Davide Libenzi, preemptible kernel bits by Robert Love. |
| 17 | * 2003-09-03 Interactivity tuning by Con Kolivas. |
| 18 | * 2004-04-02 Scheduler domains code by Nick Piggin |
| 19 | * 2007-04-15 Work begun on replacing all interactivity tuning with a |
| 20 | * fair scheduling design by Con Kolivas. |
| 21 | * 2007-05-05 Load balancing (smp-nice) and other improvements |
| 22 | * by Peter Williams |
| 23 | * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith |
| 24 | * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri |
| 25 | * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins, |
| 26 | * Thomas Gleixner, Mike Kravetz |
| 27 | */ |
| 28 | |
| 29 | #include <linux/kasan.h> |
| 30 | #include <linux/mm.h> |
| 31 | #include <linux/module.h> |
| 32 | #include <linux/nmi.h> |
| 33 | #include <linux/init.h> |
| 34 | #include <linux/uaccess.h> |
| 35 | #include <linux/highmem.h> |
| 36 | #include <linux/mmu_context.h> |
| 37 | #include <linux/interrupt.h> |
| 38 | #include <linux/capability.h> |
| 39 | #include <linux/completion.h> |
| 40 | #include <linux/kernel_stat.h> |
| 41 | #include <linux/debug_locks.h> |
| 42 | #include <linux/perf_event.h> |
| 43 | #include <linux/security.h> |
| 44 | #include <linux/notifier.h> |
| 45 | #include <linux/profile.h> |
| 46 | #include <linux/freezer.h> |
| 47 | #include <linux/vmalloc.h> |
| 48 | #include <linux/blkdev.h> |
| 49 | #include <linux/delay.h> |
| 50 | #include <linux/pid_namespace.h> |
| 51 | #include <linux/smp.h> |
| 52 | #include <linux/threads.h> |
| 53 | #include <linux/timer.h> |
| 54 | #include <linux/rcupdate.h> |
| 55 | #include <linux/cpu.h> |
| 56 | #include <linux/cpuset.h> |
| 57 | #include <linux/percpu.h> |
| 58 | #include <linux/proc_fs.h> |
| 59 | #include <linux/seq_file.h> |
| 60 | #include <linux/sysctl.h> |
| 61 | #include <linux/syscalls.h> |
| 62 | #include <linux/times.h> |
| 63 | #include <linux/tsacct_kern.h> |
| 64 | #include <linux/kprobes.h> |
| 65 | #include <linux/delayacct.h> |
| 66 | #include <linux/unistd.h> |
| 67 | #include <linux/pagemap.h> |
| 68 | #include <linux/hrtimer.h> |
| 69 | #include <linux/tick.h> |
| 70 | #include <linux/ctype.h> |
| 71 | #include <linux/ftrace.h> |
| 72 | #include <linux/slab.h> |
| 73 | #include <linux/init_task.h> |
| 74 | #include <linux/context_tracking.h> |
| 75 | #include <linux/compiler.h> |
| 76 | #include <linux/frame.h> |
| 77 | |
| 78 | #include <asm/switch_to.h> |
| 79 | #include <asm/tlb.h> |
| 80 | #include <asm/irq_regs.h> |
| 81 | #include <asm/mutex.h> |
| 82 | #ifdef CONFIG_PARAVIRT |
| 83 | #include <asm/paravirt.h> |
| 84 | #endif |
| 85 | |
| 86 | #include "sched.h" |
| 87 | #include "../workqueue_internal.h" |
| 88 | #include "../smpboot.h" |
| 89 | |
| 90 | #define CREATE_TRACE_POINTS |
| 91 | #include <trace/events/sched.h> |
| 92 | |
| 93 | DEFINE_MUTEX(sched_domains_mutex); |
| 94 | DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); |
| 95 | |
| 96 | static void update_rq_clock_task(struct rq *rq, s64 delta); |
| 97 | |
| 98 | void update_rq_clock(struct rq *rq) |
| 99 | { |
| 100 | s64 delta; |
| 101 | |
| 102 | lockdep_assert_held(&rq->lock); |
| 103 | |
| 104 | if (rq->clock_skip_update & RQCF_ACT_SKIP) |
| 105 | return; |
| 106 | |
| 107 | delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; |
| 108 | if (delta < 0) |
| 109 | return; |
| 110 | rq->clock += delta; |
| 111 | update_rq_clock_task(rq, delta); |
| 112 | } |
| 113 | |
| 114 | /* |
| 115 | * Debugging: various feature bits |
| 116 | */ |
| 117 | |
| 118 | #define SCHED_FEAT(name, enabled) \ |
| 119 | (1UL << __SCHED_FEAT_##name) * enabled | |
| 120 | |
| 121 | const_debug unsigned int sysctl_sched_features = |
| 122 | #include "features.h" |
| 123 | 0; |
| 124 | |
| 125 | #undef SCHED_FEAT |
| 126 | |
| 127 | /* |
| 128 | * Number of tasks to iterate in a single balance run. |
| 129 | * Limited because this is done with IRQs disabled. |
| 130 | */ |
| 131 | const_debug unsigned int sysctl_sched_nr_migrate = 32; |
| 132 | |
| 133 | /* |
| 134 | * period over which we average the RT time consumption, measured |
| 135 | * in ms. |
| 136 | * |
| 137 | * default: 1s |
| 138 | */ |
| 139 | const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC; |
| 140 | |
| 141 | /* |
| 142 | * period over which we measure -rt task cpu usage in us. |
| 143 | * default: 1s |
| 144 | */ |
| 145 | unsigned int sysctl_sched_rt_period = 1000000; |
| 146 | |
| 147 | __read_mostly int scheduler_running; |
| 148 | |
| 149 | /* |
| 150 | * part of the period that we allow rt tasks to run in us. |
| 151 | * default: 0.95s |
| 152 | */ |
| 153 | int sysctl_sched_rt_runtime = 950000; |
| 154 | |
| 155 | /* cpus with isolated domains */ |
| 156 | cpumask_var_t cpu_isolated_map; |
| 157 | |
| 158 | /* |
| 159 | * this_rq_lock - lock this runqueue and disable interrupts. |
| 160 | */ |
| 161 | static struct rq *this_rq_lock(void) |
| 162 | __acquires(rq->lock) |
| 163 | { |
| 164 | struct rq *rq; |
| 165 | |
| 166 | local_irq_disable(); |
| 167 | rq = this_rq(); |
| 168 | raw_spin_lock(&rq->lock); |
| 169 | |
| 170 | return rq; |
| 171 | } |
| 172 | |
| 173 | /* |
| 174 | * __task_rq_lock - lock the rq @p resides on. |
| 175 | */ |
| 176 | struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) |
| 177 | __acquires(rq->lock) |
| 178 | { |
| 179 | struct rq *rq; |
| 180 | |
| 181 | lockdep_assert_held(&p->pi_lock); |
| 182 | |
| 183 | for (;;) { |
| 184 | rq = task_rq(p); |
| 185 | raw_spin_lock(&rq->lock); |
| 186 | if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { |
| 187 | rf->cookie = lockdep_pin_lock(&rq->lock); |
| 188 | return rq; |
| 189 | } |
| 190 | raw_spin_unlock(&rq->lock); |
| 191 | |
| 192 | while (unlikely(task_on_rq_migrating(p))) |
| 193 | cpu_relax(); |
| 194 | } |
| 195 | } |
| 196 | |
| 197 | /* |
| 198 | * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. |
| 199 | */ |
| 200 | struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) |
| 201 | __acquires(p->pi_lock) |
| 202 | __acquires(rq->lock) |
| 203 | { |
| 204 | struct rq *rq; |
| 205 | |
| 206 | for (;;) { |
| 207 | raw_spin_lock_irqsave(&p->pi_lock, rf->flags); |
| 208 | rq = task_rq(p); |
| 209 | raw_spin_lock(&rq->lock); |
| 210 | /* |
| 211 | * move_queued_task() task_rq_lock() |
| 212 | * |
| 213 | * ACQUIRE (rq->lock) |
| 214 | * [S] ->on_rq = MIGRATING [L] rq = task_rq() |
| 215 | * WMB (__set_task_cpu()) ACQUIRE (rq->lock); |
| 216 | * [S] ->cpu = new_cpu [L] task_rq() |
| 217 | * [L] ->on_rq |
| 218 | * RELEASE (rq->lock) |
| 219 | * |
| 220 | * If we observe the old cpu in task_rq_lock, the acquire of |
| 221 | * the old rq->lock will fully serialize against the stores. |
| 222 | * |
| 223 | * If we observe the new cpu in task_rq_lock, the acquire will |
| 224 | * pair with the WMB to ensure we must then also see migrating. |
| 225 | */ |
| 226 | if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { |
| 227 | rf->cookie = lockdep_pin_lock(&rq->lock); |
| 228 | return rq; |
| 229 | } |
| 230 | raw_spin_unlock(&rq->lock); |
| 231 | raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); |
| 232 | |
| 233 | while (unlikely(task_on_rq_migrating(p))) |
| 234 | cpu_relax(); |
| 235 | } |
| 236 | } |
| 237 | |
| 238 | #ifdef CONFIG_SCHED_HRTICK |
| 239 | /* |
| 240 | * Use HR-timers to deliver accurate preemption points. |
| 241 | */ |
| 242 | |
| 243 | static void hrtick_clear(struct rq *rq) |
| 244 | { |
| 245 | if (hrtimer_active(&rq->hrtick_timer)) |
| 246 | hrtimer_cancel(&rq->hrtick_timer); |
| 247 | } |
| 248 | |
| 249 | /* |
| 250 | * High-resolution timer tick. |
| 251 | * Runs from hardirq context with interrupts disabled. |
| 252 | */ |
| 253 | static enum hrtimer_restart hrtick(struct hrtimer *timer) |
| 254 | { |
| 255 | struct rq *rq = container_of(timer, struct rq, hrtick_timer); |
| 256 | |
| 257 | WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); |
| 258 | |
| 259 | raw_spin_lock(&rq->lock); |
| 260 | update_rq_clock(rq); |
| 261 | rq->curr->sched_class->task_tick(rq, rq->curr, 1); |
| 262 | raw_spin_unlock(&rq->lock); |
| 263 | |
| 264 | return HRTIMER_NORESTART; |
| 265 | } |
| 266 | |
| 267 | #ifdef CONFIG_SMP |
| 268 | |
| 269 | static void __hrtick_restart(struct rq *rq) |
| 270 | { |
| 271 | struct hrtimer *timer = &rq->hrtick_timer; |
| 272 | |
| 273 | hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED); |
| 274 | } |
| 275 | |
| 276 | /* |
| 277 | * called from hardirq (IPI) context |
| 278 | */ |
| 279 | static void __hrtick_start(void *arg) |
| 280 | { |
| 281 | struct rq *rq = arg; |
| 282 | |
| 283 | raw_spin_lock(&rq->lock); |
| 284 | __hrtick_restart(rq); |
| 285 | rq->hrtick_csd_pending = 0; |
| 286 | raw_spin_unlock(&rq->lock); |
| 287 | } |
| 288 | |
| 289 | /* |
| 290 | * Called to set the hrtick timer state. |
| 291 | * |
| 292 | * called with rq->lock held and irqs disabled |
| 293 | */ |
| 294 | void hrtick_start(struct rq *rq, u64 delay) |
| 295 | { |
| 296 | struct hrtimer *timer = &rq->hrtick_timer; |
| 297 | ktime_t time; |
| 298 | s64 delta; |
| 299 | |
| 300 | /* |
| 301 | * Don't schedule slices shorter than 10000ns, that just |
| 302 | * doesn't make sense and can cause timer DoS. |
| 303 | */ |
| 304 | delta = max_t(s64, delay, 10000LL); |
| 305 | time = ktime_add_ns(timer->base->get_time(), delta); |
| 306 | |
| 307 | hrtimer_set_expires(timer, time); |
| 308 | |
| 309 | if (rq == this_rq()) { |
| 310 | __hrtick_restart(rq); |
| 311 | } else if (!rq->hrtick_csd_pending) { |
| 312 | smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); |
| 313 | rq->hrtick_csd_pending = 1; |
| 314 | } |
| 315 | } |
| 316 | |
| 317 | #else |
| 318 | /* |
| 319 | * Called to set the hrtick timer state. |
| 320 | * |
| 321 | * called with rq->lock held and irqs disabled |
| 322 | */ |
| 323 | void hrtick_start(struct rq *rq, u64 delay) |
| 324 | { |
| 325 | /* |
| 326 | * Don't schedule slices shorter than 10000ns, that just |
| 327 | * doesn't make sense. Rely on vruntime for fairness. |
| 328 | */ |
| 329 | delay = max_t(u64, delay, 10000LL); |
| 330 | hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), |
| 331 | HRTIMER_MODE_REL_PINNED); |
| 332 | } |
| 333 | #endif /* CONFIG_SMP */ |
| 334 | |
| 335 | static void init_rq_hrtick(struct rq *rq) |
| 336 | { |
| 337 | #ifdef CONFIG_SMP |
| 338 | rq->hrtick_csd_pending = 0; |
| 339 | |
| 340 | rq->hrtick_csd.flags = 0; |
| 341 | rq->hrtick_csd.func = __hrtick_start; |
| 342 | rq->hrtick_csd.info = rq; |
| 343 | #endif |
| 344 | |
| 345 | hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
| 346 | rq->hrtick_timer.function = hrtick; |
| 347 | } |
| 348 | #else /* CONFIG_SCHED_HRTICK */ |
| 349 | static inline void hrtick_clear(struct rq *rq) |
| 350 | { |
| 351 | } |
| 352 | |
| 353 | static inline void init_rq_hrtick(struct rq *rq) |
| 354 | { |
| 355 | } |
| 356 | #endif /* CONFIG_SCHED_HRTICK */ |
| 357 | |
| 358 | /* |
| 359 | * cmpxchg based fetch_or, macro so it works for different integer types |
| 360 | */ |
| 361 | #define fetch_or(ptr, mask) \ |
| 362 | ({ \ |
| 363 | typeof(ptr) _ptr = (ptr); \ |
| 364 | typeof(mask) _mask = (mask); \ |
| 365 | typeof(*_ptr) _old, _val = *_ptr; \ |
| 366 | \ |
| 367 | for (;;) { \ |
| 368 | _old = cmpxchg(_ptr, _val, _val | _mask); \ |
| 369 | if (_old == _val) \ |
| 370 | break; \ |
| 371 | _val = _old; \ |
| 372 | } \ |
| 373 | _old; \ |
| 374 | }) |
| 375 | |
| 376 | #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) |
| 377 | /* |
| 378 | * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, |
| 379 | * this avoids any races wrt polling state changes and thereby avoids |
| 380 | * spurious IPIs. |
| 381 | */ |
| 382 | static bool set_nr_and_not_polling(struct task_struct *p) |
| 383 | { |
| 384 | struct thread_info *ti = task_thread_info(p); |
| 385 | return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG); |
| 386 | } |
| 387 | |
| 388 | /* |
| 389 | * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set. |
| 390 | * |
| 391 | * If this returns true, then the idle task promises to call |
| 392 | * sched_ttwu_pending() and reschedule soon. |
| 393 | */ |
| 394 | static bool set_nr_if_polling(struct task_struct *p) |
| 395 | { |
| 396 | struct thread_info *ti = task_thread_info(p); |
| 397 | typeof(ti->flags) old, val = READ_ONCE(ti->flags); |
| 398 | |
| 399 | for (;;) { |
| 400 | if (!(val & _TIF_POLLING_NRFLAG)) |
| 401 | return false; |
| 402 | if (val & _TIF_NEED_RESCHED) |
| 403 | return true; |
| 404 | old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED); |
| 405 | if (old == val) |
| 406 | break; |
| 407 | val = old; |
| 408 | } |
| 409 | return true; |
| 410 | } |
| 411 | |
| 412 | #else |
| 413 | static bool set_nr_and_not_polling(struct task_struct *p) |
| 414 | { |
| 415 | set_tsk_need_resched(p); |
| 416 | return true; |
| 417 | } |
| 418 | |
| 419 | #ifdef CONFIG_SMP |
| 420 | static bool set_nr_if_polling(struct task_struct *p) |
| 421 | { |
| 422 | return false; |
| 423 | } |
| 424 | #endif |
| 425 | #endif |
| 426 | |
| 427 | void wake_q_add(struct wake_q_head *head, struct task_struct *task) |
| 428 | { |
| 429 | struct wake_q_node *node = &task->wake_q; |
| 430 | |
| 431 | /* |
| 432 | * Atomically grab the task, if ->wake_q is !nil already it means |
| 433 | * its already queued (either by us or someone else) and will get the |
| 434 | * wakeup due to that. |
| 435 | * |
| 436 | * This cmpxchg() implies a full barrier, which pairs with the write |
| 437 | * barrier implied by the wakeup in wake_up_q(). |
| 438 | */ |
| 439 | if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL)) |
| 440 | return; |
| 441 | |
| 442 | get_task_struct(task); |
| 443 | |
| 444 | /* |
| 445 | * The head is context local, there can be no concurrency. |
| 446 | */ |
| 447 | *head->lastp = node; |
| 448 | head->lastp = &node->next; |
| 449 | } |
| 450 | |
| 451 | void wake_up_q(struct wake_q_head *head) |
| 452 | { |
| 453 | struct wake_q_node *node = head->first; |
| 454 | |
| 455 | while (node != WAKE_Q_TAIL) { |
| 456 | struct task_struct *task; |
| 457 | |
| 458 | task = container_of(node, struct task_struct, wake_q); |
| 459 | BUG_ON(!task); |
| 460 | /* task can safely be re-inserted now */ |
| 461 | node = node->next; |
| 462 | task->wake_q.next = NULL; |
| 463 | |
| 464 | /* |
| 465 | * wake_up_process() implies a wmb() to pair with the queueing |
| 466 | * in wake_q_add() so as not to miss wakeups. |
| 467 | */ |
| 468 | wake_up_process(task); |
| 469 | put_task_struct(task); |
| 470 | } |
| 471 | } |
| 472 | |
| 473 | /* |
| 474 | * resched_curr - mark rq's current task 'to be rescheduled now'. |
| 475 | * |
| 476 | * On UP this means the setting of the need_resched flag, on SMP it |
| 477 | * might also involve a cross-CPU call to trigger the scheduler on |
| 478 | * the target CPU. |
| 479 | */ |
| 480 | void resched_curr(struct rq *rq) |
| 481 | { |
| 482 | struct task_struct *curr = rq->curr; |
| 483 | int cpu; |
| 484 | |
| 485 | lockdep_assert_held(&rq->lock); |
| 486 | |
| 487 | if (test_tsk_need_resched(curr)) |
| 488 | return; |
| 489 | |
| 490 | cpu = cpu_of(rq); |
| 491 | |
| 492 | if (cpu == smp_processor_id()) { |
| 493 | set_tsk_need_resched(curr); |
| 494 | set_preempt_need_resched(); |
| 495 | return; |
| 496 | } |
| 497 | |
| 498 | if (set_nr_and_not_polling(curr)) |
| 499 | smp_send_reschedule(cpu); |
| 500 | else |
| 501 | trace_sched_wake_idle_without_ipi(cpu); |
| 502 | } |
| 503 | |
| 504 | void resched_cpu(int cpu) |
| 505 | { |
| 506 | struct rq *rq = cpu_rq(cpu); |
| 507 | unsigned long flags; |
| 508 | |
| 509 | if (!raw_spin_trylock_irqsave(&rq->lock, flags)) |
| 510 | return; |
| 511 | resched_curr(rq); |
| 512 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
| 513 | } |
| 514 | |
| 515 | #ifdef CONFIG_SMP |
| 516 | #ifdef CONFIG_NO_HZ_COMMON |
| 517 | /* |
| 518 | * In the semi idle case, use the nearest busy cpu for migrating timers |
| 519 | * from an idle cpu. This is good for power-savings. |
| 520 | * |
| 521 | * We don't do similar optimization for completely idle system, as |
| 522 | * selecting an idle cpu will add more delays to the timers than intended |
| 523 | * (as that cpu's timer base may not be uptodate wrt jiffies etc). |
| 524 | */ |
| 525 | int get_nohz_timer_target(void) |
| 526 | { |
| 527 | int i, cpu = smp_processor_id(); |
| 528 | struct sched_domain *sd; |
| 529 | |
| 530 | if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu)) |
| 531 | return cpu; |
| 532 | |
| 533 | rcu_read_lock(); |
| 534 | for_each_domain(cpu, sd) { |
| 535 | for_each_cpu(i, sched_domain_span(sd)) { |
| 536 | if (cpu == i) |
| 537 | continue; |
| 538 | |
| 539 | if (!idle_cpu(i) && is_housekeeping_cpu(i)) { |
| 540 | cpu = i; |
| 541 | goto unlock; |
| 542 | } |
| 543 | } |
| 544 | } |
| 545 | |
| 546 | if (!is_housekeeping_cpu(cpu)) |
| 547 | cpu = housekeeping_any_cpu(); |
| 548 | unlock: |
| 549 | rcu_read_unlock(); |
| 550 | return cpu; |
| 551 | } |
| 552 | /* |
| 553 | * When add_timer_on() enqueues a timer into the timer wheel of an |
| 554 | * idle CPU then this timer might expire before the next timer event |
| 555 | * which is scheduled to wake up that CPU. In case of a completely |
| 556 | * idle system the next event might even be infinite time into the |
| 557 | * future. wake_up_idle_cpu() ensures that the CPU is woken up and |
| 558 | * leaves the inner idle loop so the newly added timer is taken into |
| 559 | * account when the CPU goes back to idle and evaluates the timer |
| 560 | * wheel for the next timer event. |
| 561 | */ |
| 562 | static void wake_up_idle_cpu(int cpu) |
| 563 | { |
| 564 | struct rq *rq = cpu_rq(cpu); |
| 565 | |
| 566 | if (cpu == smp_processor_id()) |
| 567 | return; |
| 568 | |
| 569 | if (set_nr_and_not_polling(rq->idle)) |
| 570 | smp_send_reschedule(cpu); |
| 571 | else |
| 572 | trace_sched_wake_idle_without_ipi(cpu); |
| 573 | } |
| 574 | |
| 575 | static bool wake_up_full_nohz_cpu(int cpu) |
| 576 | { |
| 577 | /* |
| 578 | * We just need the target to call irq_exit() and re-evaluate |
| 579 | * the next tick. The nohz full kick at least implies that. |
| 580 | * If needed we can still optimize that later with an |
| 581 | * empty IRQ. |
| 582 | */ |
| 583 | if (tick_nohz_full_cpu(cpu)) { |
| 584 | if (cpu != smp_processor_id() || |
| 585 | tick_nohz_tick_stopped()) |
| 586 | tick_nohz_full_kick_cpu(cpu); |
| 587 | return true; |
| 588 | } |
| 589 | |
| 590 | return false; |
| 591 | } |
| 592 | |
| 593 | void wake_up_nohz_cpu(int cpu) |
| 594 | { |
| 595 | if (!wake_up_full_nohz_cpu(cpu)) |
| 596 | wake_up_idle_cpu(cpu); |
| 597 | } |
| 598 | |
| 599 | static inline bool got_nohz_idle_kick(void) |
| 600 | { |
| 601 | int cpu = smp_processor_id(); |
| 602 | |
| 603 | if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu))) |
| 604 | return false; |
| 605 | |
| 606 | if (idle_cpu(cpu) && !need_resched()) |
| 607 | return true; |
| 608 | |
| 609 | /* |
| 610 | * We can't run Idle Load Balance on this CPU for this time so we |
| 611 | * cancel it and clear NOHZ_BALANCE_KICK |
| 612 | */ |
| 613 | clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)); |
| 614 | return false; |
| 615 | } |
| 616 | |
| 617 | #else /* CONFIG_NO_HZ_COMMON */ |
| 618 | |
| 619 | static inline bool got_nohz_idle_kick(void) |
| 620 | { |
| 621 | return false; |
| 622 | } |
| 623 | |
| 624 | #endif /* CONFIG_NO_HZ_COMMON */ |
| 625 | |
| 626 | #ifdef CONFIG_NO_HZ_FULL |
| 627 | bool sched_can_stop_tick(struct rq *rq) |
| 628 | { |
| 629 | int fifo_nr_running; |
| 630 | |
| 631 | /* Deadline tasks, even if single, need the tick */ |
| 632 | if (rq->dl.dl_nr_running) |
| 633 | return false; |
| 634 | |
| 635 | /* |
| 636 | * If there are more than one RR tasks, we need the tick to effect the |
| 637 | * actual RR behaviour. |
| 638 | */ |
| 639 | if (rq->rt.rr_nr_running) { |
| 640 | if (rq->rt.rr_nr_running == 1) |
| 641 | return true; |
| 642 | else |
| 643 | return false; |
| 644 | } |
| 645 | |
| 646 | /* |
| 647 | * If there's no RR tasks, but FIFO tasks, we can skip the tick, no |
| 648 | * forced preemption between FIFO tasks. |
| 649 | */ |
| 650 | fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running; |
| 651 | if (fifo_nr_running) |
| 652 | return true; |
| 653 | |
| 654 | /* |
| 655 | * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left; |
| 656 | * if there's more than one we need the tick for involuntary |
| 657 | * preemption. |
| 658 | */ |
| 659 | if (rq->nr_running > 1) |
| 660 | return false; |
| 661 | |
| 662 | return true; |
| 663 | } |
| 664 | #endif /* CONFIG_NO_HZ_FULL */ |
| 665 | |
| 666 | void sched_avg_update(struct rq *rq) |
| 667 | { |
| 668 | s64 period = sched_avg_period(); |
| 669 | |
| 670 | while ((s64)(rq_clock(rq) - rq->age_stamp) > period) { |
| 671 | /* |
| 672 | * Inline assembly required to prevent the compiler |
| 673 | * optimising this loop into a divmod call. |
| 674 | * See __iter_div_u64_rem() for another example of this. |
| 675 | */ |
| 676 | asm("" : "+rm" (rq->age_stamp)); |
| 677 | rq->age_stamp += period; |
| 678 | rq->rt_avg /= 2; |
| 679 | } |
| 680 | } |
| 681 | |
| 682 | #endif /* CONFIG_SMP */ |
| 683 | |
| 684 | #if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ |
| 685 | (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH))) |
| 686 | /* |
| 687 | * Iterate task_group tree rooted at *from, calling @down when first entering a |
| 688 | * node and @up when leaving it for the final time. |
| 689 | * |
| 690 | * Caller must hold rcu_lock or sufficient equivalent. |
| 691 | */ |
| 692 | int walk_tg_tree_from(struct task_group *from, |
| 693 | tg_visitor down, tg_visitor up, void *data) |
| 694 | { |
| 695 | struct task_group *parent, *child; |
| 696 | int ret; |
| 697 | |
| 698 | parent = from; |
| 699 | |
| 700 | down: |
| 701 | ret = (*down)(parent, data); |
| 702 | if (ret) |
| 703 | goto out; |
| 704 | list_for_each_entry_rcu(child, &parent->children, siblings) { |
| 705 | parent = child; |
| 706 | goto down; |
| 707 | |
| 708 | up: |
| 709 | continue; |
| 710 | } |
| 711 | ret = (*up)(parent, data); |
| 712 | if (ret || parent == from) |
| 713 | goto out; |
| 714 | |
| 715 | child = parent; |
| 716 | parent = parent->parent; |
| 717 | if (parent) |
| 718 | goto up; |
| 719 | out: |
| 720 | return ret; |
| 721 | } |
| 722 | |
| 723 | int tg_nop(struct task_group *tg, void *data) |
| 724 | { |
| 725 | return 0; |
| 726 | } |
| 727 | #endif |
| 728 | |
| 729 | static void set_load_weight(struct task_struct *p) |
| 730 | { |
| 731 | int prio = p->static_prio - MAX_RT_PRIO; |
| 732 | struct load_weight *load = &p->se.load; |
| 733 | |
| 734 | /* |
| 735 | * SCHED_IDLE tasks get minimal weight: |
| 736 | */ |
| 737 | if (idle_policy(p->policy)) { |
| 738 | load->weight = scale_load(WEIGHT_IDLEPRIO); |
| 739 | load->inv_weight = WMULT_IDLEPRIO; |
| 740 | return; |
| 741 | } |
| 742 | |
| 743 | load->weight = scale_load(sched_prio_to_weight[prio]); |
| 744 | load->inv_weight = sched_prio_to_wmult[prio]; |
| 745 | } |
| 746 | |
| 747 | static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) |
| 748 | { |
| 749 | update_rq_clock(rq); |
| 750 | if (!(flags & ENQUEUE_RESTORE)) |
| 751 | sched_info_queued(rq, p); |
| 752 | p->sched_class->enqueue_task(rq, p, flags); |
| 753 | } |
| 754 | |
| 755 | static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) |
| 756 | { |
| 757 | update_rq_clock(rq); |
| 758 | if (!(flags & DEQUEUE_SAVE)) |
| 759 | sched_info_dequeued(rq, p); |
| 760 | p->sched_class->dequeue_task(rq, p, flags); |
| 761 | } |
| 762 | |
| 763 | void activate_task(struct rq *rq, struct task_struct *p, int flags) |
| 764 | { |
| 765 | if (task_contributes_to_load(p)) |
| 766 | rq->nr_uninterruptible--; |
| 767 | |
| 768 | enqueue_task(rq, p, flags); |
| 769 | } |
| 770 | |
| 771 | void deactivate_task(struct rq *rq, struct task_struct *p, int flags) |
| 772 | { |
| 773 | if (task_contributes_to_load(p)) |
| 774 | rq->nr_uninterruptible++; |
| 775 | |
| 776 | dequeue_task(rq, p, flags); |
| 777 | } |
| 778 | |
| 779 | static void update_rq_clock_task(struct rq *rq, s64 delta) |
| 780 | { |
| 781 | /* |
| 782 | * In theory, the compile should just see 0 here, and optimize out the call |
| 783 | * to sched_rt_avg_update. But I don't trust it... |
| 784 | */ |
| 785 | #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) |
| 786 | s64 steal = 0, irq_delta = 0; |
| 787 | #endif |
| 788 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
| 789 | irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; |
| 790 | |
| 791 | /* |
| 792 | * Since irq_time is only updated on {soft,}irq_exit, we might run into |
| 793 | * this case when a previous update_rq_clock() happened inside a |
| 794 | * {soft,}irq region. |
| 795 | * |
| 796 | * When this happens, we stop ->clock_task and only update the |
| 797 | * prev_irq_time stamp to account for the part that fit, so that a next |
| 798 | * update will consume the rest. This ensures ->clock_task is |
| 799 | * monotonic. |
| 800 | * |
| 801 | * It does however cause some slight miss-attribution of {soft,}irq |
| 802 | * time, a more accurate solution would be to update the irq_time using |
| 803 | * the current rq->clock timestamp, except that would require using |
| 804 | * atomic ops. |
| 805 | */ |
| 806 | if (irq_delta > delta) |
| 807 | irq_delta = delta; |
| 808 | |
| 809 | rq->prev_irq_time += irq_delta; |
| 810 | delta -= irq_delta; |
| 811 | #endif |
| 812 | #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING |
| 813 | if (static_key_false((¶virt_steal_rq_enabled))) { |
| 814 | steal = paravirt_steal_clock(cpu_of(rq)); |
| 815 | steal -= rq->prev_steal_time_rq; |
| 816 | |
| 817 | if (unlikely(steal > delta)) |
| 818 | steal = delta; |
| 819 | |
| 820 | rq->prev_steal_time_rq += steal; |
| 821 | delta -= steal; |
| 822 | } |
| 823 | #endif |
| 824 | |
| 825 | rq->clock_task += delta; |
| 826 | |
| 827 | #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) |
| 828 | if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) |
| 829 | sched_rt_avg_update(rq, irq_delta + steal); |
| 830 | #endif |
| 831 | } |
| 832 | |
| 833 | void sched_set_stop_task(int cpu, struct task_struct *stop) |
| 834 | { |
| 835 | struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; |
| 836 | struct task_struct *old_stop = cpu_rq(cpu)->stop; |
| 837 | |
| 838 | if (stop) { |
| 839 | /* |
| 840 | * Make it appear like a SCHED_FIFO task, its something |
| 841 | * userspace knows about and won't get confused about. |
| 842 | * |
| 843 | * Also, it will make PI more or less work without too |
| 844 | * much confusion -- but then, stop work should not |
| 845 | * rely on PI working anyway. |
| 846 | */ |
| 847 | sched_setscheduler_nocheck(stop, SCHED_FIFO, ¶m); |
| 848 | |
| 849 | stop->sched_class = &stop_sched_class; |
| 850 | } |
| 851 | |
| 852 | cpu_rq(cpu)->stop = stop; |
| 853 | |
| 854 | if (old_stop) { |
| 855 | /* |
| 856 | * Reset it back to a normal scheduling class so that |
| 857 | * it can die in pieces. |
| 858 | */ |
| 859 | old_stop->sched_class = &rt_sched_class; |
| 860 | } |
| 861 | } |
| 862 | |
| 863 | /* |
| 864 | * __normal_prio - return the priority that is based on the static prio |
| 865 | */ |
| 866 | static inline int __normal_prio(struct task_struct *p) |
| 867 | { |
| 868 | return p->static_prio; |
| 869 | } |
| 870 | |
| 871 | /* |
| 872 | * Calculate the expected normal priority: i.e. priority |
| 873 | * without taking RT-inheritance into account. Might be |
| 874 | * boosted by interactivity modifiers. Changes upon fork, |
| 875 | * setprio syscalls, and whenever the interactivity |
| 876 | * estimator recalculates. |
| 877 | */ |
| 878 | static inline int normal_prio(struct task_struct *p) |
| 879 | { |
| 880 | int prio; |
| 881 | |
| 882 | if (task_has_dl_policy(p)) |
| 883 | prio = MAX_DL_PRIO-1; |
| 884 | else if (task_has_rt_policy(p)) |
| 885 | prio = MAX_RT_PRIO-1 - p->rt_priority; |
| 886 | else |
| 887 | prio = __normal_prio(p); |
| 888 | return prio; |
| 889 | } |
| 890 | |
| 891 | /* |
| 892 | * Calculate the current priority, i.e. the priority |
| 893 | * taken into account by the scheduler. This value might |
| 894 | * be boosted by RT tasks, or might be boosted by |
| 895 | * interactivity modifiers. Will be RT if the task got |
| 896 | * RT-boosted. If not then it returns p->normal_prio. |
| 897 | */ |
| 898 | static int effective_prio(struct task_struct *p) |
| 899 | { |
| 900 | p->normal_prio = normal_prio(p); |
| 901 | /* |
| 902 | * If we are RT tasks or we were boosted to RT priority, |
| 903 | * keep the priority unchanged. Otherwise, update priority |
| 904 | * to the normal priority: |
| 905 | */ |
| 906 | if (!rt_prio(p->prio)) |
| 907 | return p->normal_prio; |
| 908 | return p->prio; |
| 909 | } |
| 910 | |
| 911 | /** |
| 912 | * task_curr - is this task currently executing on a CPU? |
| 913 | * @p: the task in question. |
| 914 | * |
| 915 | * Return: 1 if the task is currently executing. 0 otherwise. |
| 916 | */ |
| 917 | inline int task_curr(const struct task_struct *p) |
| 918 | { |
| 919 | return cpu_curr(task_cpu(p)) == p; |
| 920 | } |
| 921 | |
| 922 | /* |
| 923 | * switched_from, switched_to and prio_changed must _NOT_ drop rq->lock, |
| 924 | * use the balance_callback list if you want balancing. |
| 925 | * |
| 926 | * this means any call to check_class_changed() must be followed by a call to |
| 927 | * balance_callback(). |
| 928 | */ |
| 929 | static inline void check_class_changed(struct rq *rq, struct task_struct *p, |
| 930 | const struct sched_class *prev_class, |
| 931 | int oldprio) |
| 932 | { |
| 933 | if (prev_class != p->sched_class) { |
| 934 | if (prev_class->switched_from) |
| 935 | prev_class->switched_from(rq, p); |
| 936 | |
| 937 | p->sched_class->switched_to(rq, p); |
| 938 | } else if (oldprio != p->prio || dl_task(p)) |
| 939 | p->sched_class->prio_changed(rq, p, oldprio); |
| 940 | } |
| 941 | |
| 942 | void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) |
| 943 | { |
| 944 | const struct sched_class *class; |
| 945 | |
| 946 | if (p->sched_class == rq->curr->sched_class) { |
| 947 | rq->curr->sched_class->check_preempt_curr(rq, p, flags); |
| 948 | } else { |
| 949 | for_each_class(class) { |
| 950 | if (class == rq->curr->sched_class) |
| 951 | break; |
| 952 | if (class == p->sched_class) { |
| 953 | resched_curr(rq); |
| 954 | break; |
| 955 | } |
| 956 | } |
| 957 | } |
| 958 | |
| 959 | /* |
| 960 | * A queue event has occurred, and we're going to schedule. In |
| 961 | * this case, we can save a useless back to back clock update. |
| 962 | */ |
| 963 | if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) |
| 964 | rq_clock_skip_update(rq, true); |
| 965 | } |
| 966 | |
| 967 | #ifdef CONFIG_SMP |
| 968 | /* |
| 969 | * This is how migration works: |
| 970 | * |
| 971 | * 1) we invoke migration_cpu_stop() on the target CPU using |
| 972 | * stop_one_cpu(). |
| 973 | * 2) stopper starts to run (implicitly forcing the migrated thread |
| 974 | * off the CPU) |
| 975 | * 3) it checks whether the migrated task is still in the wrong runqueue. |
| 976 | * 4) if it's in the wrong runqueue then the migration thread removes |
| 977 | * it and puts it into the right queue. |
| 978 | * 5) stopper completes and stop_one_cpu() returns and the migration |
| 979 | * is done. |
| 980 | */ |
| 981 | |
| 982 | /* |
| 983 | * move_queued_task - move a queued task to new rq. |
| 984 | * |
| 985 | * Returns (locked) new rq. Old rq's lock is released. |
| 986 | */ |
| 987 | static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int new_cpu) |
| 988 | { |
| 989 | lockdep_assert_held(&rq->lock); |
| 990 | |
| 991 | p->on_rq = TASK_ON_RQ_MIGRATING; |
| 992 | dequeue_task(rq, p, 0); |
| 993 | set_task_cpu(p, new_cpu); |
| 994 | raw_spin_unlock(&rq->lock); |
| 995 | |
| 996 | rq = cpu_rq(new_cpu); |
| 997 | |
| 998 | raw_spin_lock(&rq->lock); |
| 999 | BUG_ON(task_cpu(p) != new_cpu); |
| 1000 | enqueue_task(rq, p, 0); |
| 1001 | p->on_rq = TASK_ON_RQ_QUEUED; |
| 1002 | check_preempt_curr(rq, p, 0); |
| 1003 | |
| 1004 | return rq; |
| 1005 | } |
| 1006 | |
| 1007 | struct migration_arg { |
| 1008 | struct task_struct *task; |
| 1009 | int dest_cpu; |
| 1010 | }; |
| 1011 | |
| 1012 | /* |
| 1013 | * Move (not current) task off this cpu, onto dest cpu. We're doing |
| 1014 | * this because either it can't run here any more (set_cpus_allowed() |
| 1015 | * away from this CPU, or CPU going down), or because we're |
| 1016 | * attempting to rebalance this task on exec (sched_exec). |
| 1017 | * |
| 1018 | * So we race with normal scheduler movements, but that's OK, as long |
| 1019 | * as the task is no longer on this CPU. |
| 1020 | */ |
| 1021 | static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_cpu) |
| 1022 | { |
| 1023 | if (unlikely(!cpu_active(dest_cpu))) |
| 1024 | return rq; |
| 1025 | |
| 1026 | /* Affinity changed (again). */ |
| 1027 | if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) |
| 1028 | return rq; |
| 1029 | |
| 1030 | rq = move_queued_task(rq, p, dest_cpu); |
| 1031 | |
| 1032 | return rq; |
| 1033 | } |
| 1034 | |
| 1035 | /* |
| 1036 | * migration_cpu_stop - this will be executed by a highprio stopper thread |
| 1037 | * and performs thread migration by bumping thread off CPU then |
| 1038 | * 'pushing' onto another runqueue. |
| 1039 | */ |
| 1040 | static int migration_cpu_stop(void *data) |
| 1041 | { |
| 1042 | struct migration_arg *arg = data; |
| 1043 | struct task_struct *p = arg->task; |
| 1044 | struct rq *rq = this_rq(); |
| 1045 | |
| 1046 | /* |
| 1047 | * The original target cpu might have gone down and we might |
| 1048 | * be on another cpu but it doesn't matter. |
| 1049 | */ |
| 1050 | local_irq_disable(); |
| 1051 | /* |
| 1052 | * We need to explicitly wake pending tasks before running |
| 1053 | * __migrate_task() such that we will not miss enforcing cpus_allowed |
| 1054 | * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. |
| 1055 | */ |
| 1056 | sched_ttwu_pending(); |
| 1057 | |
| 1058 | raw_spin_lock(&p->pi_lock); |
| 1059 | raw_spin_lock(&rq->lock); |
| 1060 | /* |
| 1061 | * If task_rq(p) != rq, it cannot be migrated here, because we're |
| 1062 | * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because |
| 1063 | * we're holding p->pi_lock. |
| 1064 | */ |
| 1065 | if (task_rq(p) == rq && task_on_rq_queued(p)) |
| 1066 | rq = __migrate_task(rq, p, arg->dest_cpu); |
| 1067 | raw_spin_unlock(&rq->lock); |
| 1068 | raw_spin_unlock(&p->pi_lock); |
| 1069 | |
| 1070 | local_irq_enable(); |
| 1071 | return 0; |
| 1072 | } |
| 1073 | |
| 1074 | /* |
| 1075 | * sched_class::set_cpus_allowed must do the below, but is not required to |
| 1076 | * actually call this function. |
| 1077 | */ |
| 1078 | void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask) |
| 1079 | { |
| 1080 | cpumask_copy(&p->cpus_allowed, new_mask); |
| 1081 | p->nr_cpus_allowed = cpumask_weight(new_mask); |
| 1082 | } |
| 1083 | |
| 1084 | void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) |
| 1085 | { |
| 1086 | struct rq *rq = task_rq(p); |
| 1087 | bool queued, running; |
| 1088 | |
| 1089 | lockdep_assert_held(&p->pi_lock); |
| 1090 | |
| 1091 | queued = task_on_rq_queued(p); |
| 1092 | running = task_current(rq, p); |
| 1093 | |
| 1094 | if (queued) { |
| 1095 | /* |
| 1096 | * Because __kthread_bind() calls this on blocked tasks without |
| 1097 | * holding rq->lock. |
| 1098 | */ |
| 1099 | lockdep_assert_held(&rq->lock); |
| 1100 | dequeue_task(rq, p, DEQUEUE_SAVE); |
| 1101 | } |
| 1102 | if (running) |
| 1103 | put_prev_task(rq, p); |
| 1104 | |
| 1105 | p->sched_class->set_cpus_allowed(p, new_mask); |
| 1106 | |
| 1107 | if (running) |
| 1108 | p->sched_class->set_curr_task(rq); |
| 1109 | if (queued) |
| 1110 | enqueue_task(rq, p, ENQUEUE_RESTORE); |
| 1111 | } |
| 1112 | |
| 1113 | /* |
| 1114 | * Change a given task's CPU affinity. Migrate the thread to a |
| 1115 | * proper CPU and schedule it away if the CPU it's executing on |
| 1116 | * is removed from the allowed bitmask. |
| 1117 | * |
| 1118 | * NOTE: the caller must have a valid reference to the task, the |
| 1119 | * task must not exit() & deallocate itself prematurely. The |
| 1120 | * call is not atomic; no spinlocks may be held. |
| 1121 | */ |
| 1122 | static int __set_cpus_allowed_ptr(struct task_struct *p, |
| 1123 | const struct cpumask *new_mask, bool check) |
| 1124 | { |
| 1125 | const struct cpumask *cpu_valid_mask = cpu_active_mask; |
| 1126 | unsigned int dest_cpu; |
| 1127 | struct rq_flags rf; |
| 1128 | struct rq *rq; |
| 1129 | int ret = 0; |
| 1130 | |
| 1131 | rq = task_rq_lock(p, &rf); |
| 1132 | |
| 1133 | if (p->flags & PF_KTHREAD) { |
| 1134 | /* |
| 1135 | * Kernel threads are allowed on online && !active CPUs |
| 1136 | */ |
| 1137 | cpu_valid_mask = cpu_online_mask; |
| 1138 | } |
| 1139 | |
| 1140 | /* |
| 1141 | * Must re-check here, to close a race against __kthread_bind(), |
| 1142 | * sched_setaffinity() is not guaranteed to observe the flag. |
| 1143 | */ |
| 1144 | if (check && (p->flags & PF_NO_SETAFFINITY)) { |
| 1145 | ret = -EINVAL; |
| 1146 | goto out; |
| 1147 | } |
| 1148 | |
| 1149 | if (cpumask_equal(&p->cpus_allowed, new_mask)) |
| 1150 | goto out; |
| 1151 | |
| 1152 | if (!cpumask_intersects(new_mask, cpu_valid_mask)) { |
| 1153 | ret = -EINVAL; |
| 1154 | goto out; |
| 1155 | } |
| 1156 | |
| 1157 | do_set_cpus_allowed(p, new_mask); |
| 1158 | |
| 1159 | if (p->flags & PF_KTHREAD) { |
| 1160 | /* |
| 1161 | * For kernel threads that do indeed end up on online && |
| 1162 | * !active we want to ensure they are strict per-cpu threads. |
| 1163 | */ |
| 1164 | WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) && |
| 1165 | !cpumask_intersects(new_mask, cpu_active_mask) && |
| 1166 | p->nr_cpus_allowed != 1); |
| 1167 | } |
| 1168 | |
| 1169 | /* Can the task run on the task's current CPU? If so, we're done */ |
| 1170 | if (cpumask_test_cpu(task_cpu(p), new_mask)) |
| 1171 | goto out; |
| 1172 | |
| 1173 | dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask); |
| 1174 | if (task_running(rq, p) || p->state == TASK_WAKING) { |
| 1175 | struct migration_arg arg = { p, dest_cpu }; |
| 1176 | /* Need help from migration thread: drop lock and wait. */ |
| 1177 | task_rq_unlock(rq, p, &rf); |
| 1178 | stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); |
| 1179 | tlb_migrate_finish(p->mm); |
| 1180 | return 0; |
| 1181 | } else if (task_on_rq_queued(p)) { |
| 1182 | /* |
| 1183 | * OK, since we're going to drop the lock immediately |
| 1184 | * afterwards anyway. |
| 1185 | */ |
| 1186 | lockdep_unpin_lock(&rq->lock, rf.cookie); |
| 1187 | rq = move_queued_task(rq, p, dest_cpu); |
| 1188 | lockdep_repin_lock(&rq->lock, rf.cookie); |
| 1189 | } |
| 1190 | out: |
| 1191 | task_rq_unlock(rq, p, &rf); |
| 1192 | |
| 1193 | return ret; |
| 1194 | } |
| 1195 | |
| 1196 | int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) |
| 1197 | { |
| 1198 | return __set_cpus_allowed_ptr(p, new_mask, false); |
| 1199 | } |
| 1200 | EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); |
| 1201 | |
| 1202 | void set_task_cpu(struct task_struct *p, unsigned int new_cpu) |
| 1203 | { |
| 1204 | #ifdef CONFIG_SCHED_DEBUG |
| 1205 | /* |
| 1206 | * We should never call set_task_cpu() on a blocked task, |
| 1207 | * ttwu() will sort out the placement. |
| 1208 | */ |
| 1209 | WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING && |
| 1210 | !p->on_rq); |
| 1211 | |
| 1212 | /* |
| 1213 | * Migrating fair class task must have p->on_rq = TASK_ON_RQ_MIGRATING, |
| 1214 | * because schedstat_wait_{start,end} rebase migrating task's wait_start |
| 1215 | * time relying on p->on_rq. |
| 1216 | */ |
| 1217 | WARN_ON_ONCE(p->state == TASK_RUNNING && |
| 1218 | p->sched_class == &fair_sched_class && |
| 1219 | (p->on_rq && !task_on_rq_migrating(p))); |
| 1220 | |
| 1221 | #ifdef CONFIG_LOCKDEP |
| 1222 | /* |
| 1223 | * The caller should hold either p->pi_lock or rq->lock, when changing |
| 1224 | * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. |
| 1225 | * |
| 1226 | * sched_move_task() holds both and thus holding either pins the cgroup, |
| 1227 | * see task_group(). |
| 1228 | * |
| 1229 | * Furthermore, all task_rq users should acquire both locks, see |
| 1230 | * task_rq_lock(). |
| 1231 | */ |
| 1232 | WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || |
| 1233 | lockdep_is_held(&task_rq(p)->lock))); |
| 1234 | #endif |
| 1235 | #endif |
| 1236 | |
| 1237 | trace_sched_migrate_task(p, new_cpu); |
| 1238 | |
| 1239 | if (task_cpu(p) != new_cpu) { |
| 1240 | if (p->sched_class->migrate_task_rq) |
| 1241 | p->sched_class->migrate_task_rq(p); |
| 1242 | p->se.nr_migrations++; |
| 1243 | perf_event_task_migrate(p); |
| 1244 | } |
| 1245 | |
| 1246 | __set_task_cpu(p, new_cpu); |
| 1247 | } |
| 1248 | |
| 1249 | static void __migrate_swap_task(struct task_struct *p, int cpu) |
| 1250 | { |
| 1251 | if (task_on_rq_queued(p)) { |
| 1252 | struct rq *src_rq, *dst_rq; |
| 1253 | |
| 1254 | src_rq = task_rq(p); |
| 1255 | dst_rq = cpu_rq(cpu); |
| 1256 | |
| 1257 | p->on_rq = TASK_ON_RQ_MIGRATING; |
| 1258 | deactivate_task(src_rq, p, 0); |
| 1259 | set_task_cpu(p, cpu); |
| 1260 | activate_task(dst_rq, p, 0); |
| 1261 | p->on_rq = TASK_ON_RQ_QUEUED; |
| 1262 | check_preempt_curr(dst_rq, p, 0); |
| 1263 | } else { |
| 1264 | /* |
| 1265 | * Task isn't running anymore; make it appear like we migrated |
| 1266 | * it before it went to sleep. This means on wakeup we make the |
| 1267 | * previous cpu our targer instead of where it really is. |
| 1268 | */ |
| 1269 | p->wake_cpu = cpu; |
| 1270 | } |
| 1271 | } |
| 1272 | |
| 1273 | struct migration_swap_arg { |
| 1274 | struct task_struct *src_task, *dst_task; |
| 1275 | int src_cpu, dst_cpu; |
| 1276 | }; |
| 1277 | |
| 1278 | static int migrate_swap_stop(void *data) |
| 1279 | { |
| 1280 | struct migration_swap_arg *arg = data; |
| 1281 | struct rq *src_rq, *dst_rq; |
| 1282 | int ret = -EAGAIN; |
| 1283 | |
| 1284 | if (!cpu_active(arg->src_cpu) || !cpu_active(arg->dst_cpu)) |
| 1285 | return -EAGAIN; |
| 1286 | |
| 1287 | src_rq = cpu_rq(arg->src_cpu); |
| 1288 | dst_rq = cpu_rq(arg->dst_cpu); |
| 1289 | |
| 1290 | double_raw_lock(&arg->src_task->pi_lock, |
| 1291 | &arg->dst_task->pi_lock); |
| 1292 | double_rq_lock(src_rq, dst_rq); |
| 1293 | |
| 1294 | if (task_cpu(arg->dst_task) != arg->dst_cpu) |
| 1295 | goto unlock; |
| 1296 | |
| 1297 | if (task_cpu(arg->src_task) != arg->src_cpu) |
| 1298 | goto unlock; |
| 1299 | |
| 1300 | if (!cpumask_test_cpu(arg->dst_cpu, tsk_cpus_allowed(arg->src_task))) |
| 1301 | goto unlock; |
| 1302 | |
| 1303 | if (!cpumask_test_cpu(arg->src_cpu, tsk_cpus_allowed(arg->dst_task))) |
| 1304 | goto unlock; |
| 1305 | |
| 1306 | __migrate_swap_task(arg->src_task, arg->dst_cpu); |
| 1307 | __migrate_swap_task(arg->dst_task, arg->src_cpu); |
| 1308 | |
| 1309 | ret = 0; |
| 1310 | |
| 1311 | unlock: |
| 1312 | double_rq_unlock(src_rq, dst_rq); |
| 1313 | raw_spin_unlock(&arg->dst_task->pi_lock); |
| 1314 | raw_spin_unlock(&arg->src_task->pi_lock); |
| 1315 | |
| 1316 | return ret; |
| 1317 | } |
| 1318 | |
| 1319 | /* |
| 1320 | * Cross migrate two tasks |
| 1321 | */ |
| 1322 | int migrate_swap(struct task_struct *cur, struct task_struct *p) |
| 1323 | { |
| 1324 | struct migration_swap_arg arg; |
| 1325 | int ret = -EINVAL; |
| 1326 | |
| 1327 | arg = (struct migration_swap_arg){ |
| 1328 | .src_task = cur, |
| 1329 | .src_cpu = task_cpu(cur), |
| 1330 | .dst_task = p, |
| 1331 | .dst_cpu = task_cpu(p), |
| 1332 | }; |
| 1333 | |
| 1334 | if (arg.src_cpu == arg.dst_cpu) |
| 1335 | goto out; |
| 1336 | |
| 1337 | /* |
| 1338 | * These three tests are all lockless; this is OK since all of them |
| 1339 | * will be re-checked with proper locks held further down the line. |
| 1340 | */ |
| 1341 | if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) |
| 1342 | goto out; |
| 1343 | |
| 1344 | if (!cpumask_test_cpu(arg.dst_cpu, tsk_cpus_allowed(arg.src_task))) |
| 1345 | goto out; |
| 1346 | |
| 1347 | if (!cpumask_test_cpu(arg.src_cpu, tsk_cpus_allowed(arg.dst_task))) |
| 1348 | goto out; |
| 1349 | |
| 1350 | trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); |
| 1351 | ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg); |
| 1352 | |
| 1353 | out: |
| 1354 | return ret; |
| 1355 | } |
| 1356 | |
| 1357 | /* |
| 1358 | * wait_task_inactive - wait for a thread to unschedule. |
| 1359 | * |
| 1360 | * If @match_state is nonzero, it's the @p->state value just checked and |
| 1361 | * not expected to change. If it changes, i.e. @p might have woken up, |
| 1362 | * then return zero. When we succeed in waiting for @p to be off its CPU, |
| 1363 | * we return a positive number (its total switch count). If a second call |
| 1364 | * a short while later returns the same number, the caller can be sure that |
| 1365 | * @p has remained unscheduled the whole time. |
| 1366 | * |
| 1367 | * The caller must ensure that the task *will* unschedule sometime soon, |
| 1368 | * else this function might spin for a *long* time. This function can't |
| 1369 | * be called with interrupts off, or it may introduce deadlock with |
| 1370 | * smp_call_function() if an IPI is sent by the same process we are |
| 1371 | * waiting to become inactive. |
| 1372 | */ |
| 1373 | unsigned long wait_task_inactive(struct task_struct *p, long match_state) |
| 1374 | { |
| 1375 | int running, queued; |
| 1376 | struct rq_flags rf; |
| 1377 | unsigned long ncsw; |
| 1378 | struct rq *rq; |
| 1379 | |
| 1380 | for (;;) { |
| 1381 | /* |
| 1382 | * We do the initial early heuristics without holding |
| 1383 | * any task-queue locks at all. We'll only try to get |
| 1384 | * the runqueue lock when things look like they will |
| 1385 | * work out! |
| 1386 | */ |
| 1387 | rq = task_rq(p); |
| 1388 | |
| 1389 | /* |
| 1390 | * If the task is actively running on another CPU |
| 1391 | * still, just relax and busy-wait without holding |
| 1392 | * any locks. |
| 1393 | * |
| 1394 | * NOTE! Since we don't hold any locks, it's not |
| 1395 | * even sure that "rq" stays as the right runqueue! |
| 1396 | * But we don't care, since "task_running()" will |
| 1397 | * return false if the runqueue has changed and p |
| 1398 | * is actually now running somewhere else! |
| 1399 | */ |
| 1400 | while (task_running(rq, p)) { |
| 1401 | if (match_state && unlikely(p->state != match_state)) |
| 1402 | return 0; |
| 1403 | cpu_relax(); |
| 1404 | } |
| 1405 | |
| 1406 | /* |
| 1407 | * Ok, time to look more closely! We need the rq |
| 1408 | * lock now, to be *sure*. If we're wrong, we'll |
| 1409 | * just go back and repeat. |
| 1410 | */ |
| 1411 | rq = task_rq_lock(p, &rf); |
| 1412 | trace_sched_wait_task(p); |
| 1413 | running = task_running(rq, p); |
| 1414 | queued = task_on_rq_queued(p); |
| 1415 | ncsw = 0; |
| 1416 | if (!match_state || p->state == match_state) |
| 1417 | ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ |
| 1418 | task_rq_unlock(rq, p, &rf); |
| 1419 | |
| 1420 | /* |
| 1421 | * If it changed from the expected state, bail out now. |
| 1422 | */ |
| 1423 | if (unlikely(!ncsw)) |
| 1424 | break; |
| 1425 | |
| 1426 | /* |
| 1427 | * Was it really running after all now that we |
| 1428 | * checked with the proper locks actually held? |
| 1429 | * |
| 1430 | * Oops. Go back and try again.. |
| 1431 | */ |
| 1432 | if (unlikely(running)) { |
| 1433 | cpu_relax(); |
| 1434 | continue; |
| 1435 | } |
| 1436 | |
| 1437 | /* |
| 1438 | * It's not enough that it's not actively running, |
| 1439 | * it must be off the runqueue _entirely_, and not |
| 1440 | * preempted! |
| 1441 | * |
| 1442 | * So if it was still runnable (but just not actively |
| 1443 | * running right now), it's preempted, and we should |
| 1444 | * yield - it could be a while. |
| 1445 | */ |
| 1446 | if (unlikely(queued)) { |
| 1447 | ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ); |
| 1448 | |
| 1449 | set_current_state(TASK_UNINTERRUPTIBLE); |
| 1450 | schedule_hrtimeout(&to, HRTIMER_MODE_REL); |
| 1451 | continue; |
| 1452 | } |
| 1453 | |
| 1454 | /* |
| 1455 | * Ahh, all good. It wasn't running, and it wasn't |
| 1456 | * runnable, which means that it will never become |
| 1457 | * running in the future either. We're all done! |
| 1458 | */ |
| 1459 | break; |
| 1460 | } |
| 1461 | |
| 1462 | return ncsw; |
| 1463 | } |
| 1464 | |
| 1465 | /*** |
| 1466 | * kick_process - kick a running thread to enter/exit the kernel |
| 1467 | * @p: the to-be-kicked thread |
| 1468 | * |
| 1469 | * Cause a process which is running on another CPU to enter |
| 1470 | * kernel-mode, without any delay. (to get signals handled.) |
| 1471 | * |
| 1472 | * NOTE: this function doesn't have to take the runqueue lock, |
| 1473 | * because all it wants to ensure is that the remote task enters |
| 1474 | * the kernel. If the IPI races and the task has been migrated |
| 1475 | * to another CPU then no harm is done and the purpose has been |
| 1476 | * achieved as well. |
| 1477 | */ |
| 1478 | void kick_process(struct task_struct *p) |
| 1479 | { |
| 1480 | int cpu; |
| 1481 | |
| 1482 | preempt_disable(); |
| 1483 | cpu = task_cpu(p); |
| 1484 | if ((cpu != smp_processor_id()) && task_curr(p)) |
| 1485 | smp_send_reschedule(cpu); |
| 1486 | preempt_enable(); |
| 1487 | } |
| 1488 | EXPORT_SYMBOL_GPL(kick_process); |
| 1489 | |
| 1490 | /* |
| 1491 | * ->cpus_allowed is protected by both rq->lock and p->pi_lock |
| 1492 | * |
| 1493 | * A few notes on cpu_active vs cpu_online: |
| 1494 | * |
| 1495 | * - cpu_active must be a subset of cpu_online |
| 1496 | * |
| 1497 | * - on cpu-up we allow per-cpu kthreads on the online && !active cpu, |
| 1498 | * see __set_cpus_allowed_ptr(). At this point the newly online |
| 1499 | * cpu isn't yet part of the sched domains, and balancing will not |
| 1500 | * see it. |
| 1501 | * |
| 1502 | * - on cpu-down we clear cpu_active() to mask the sched domains and |
| 1503 | * avoid the load balancer to place new tasks on the to be removed |
| 1504 | * cpu. Existing tasks will remain running there and will be taken |
| 1505 | * off. |
| 1506 | * |
| 1507 | * This means that fallback selection must not select !active CPUs. |
| 1508 | * And can assume that any active CPU must be online. Conversely |
| 1509 | * select_task_rq() below may allow selection of !active CPUs in order |
| 1510 | * to satisfy the above rules. |
| 1511 | */ |
| 1512 | static int select_fallback_rq(int cpu, struct task_struct *p) |
| 1513 | { |
| 1514 | int nid = cpu_to_node(cpu); |
| 1515 | const struct cpumask *nodemask = NULL; |
| 1516 | enum { cpuset, possible, fail } state = cpuset; |
| 1517 | int dest_cpu; |
| 1518 | |
| 1519 | /* |
| 1520 | * If the node that the cpu is on has been offlined, cpu_to_node() |
| 1521 | * will return -1. There is no cpu on the node, and we should |
| 1522 | * select the cpu on the other node. |
| 1523 | */ |
| 1524 | if (nid != -1) { |
| 1525 | nodemask = cpumask_of_node(nid); |
| 1526 | |
| 1527 | /* Look for allowed, online CPU in same node. */ |
| 1528 | for_each_cpu(dest_cpu, nodemask) { |
| 1529 | if (!cpu_active(dest_cpu)) |
| 1530 | continue; |
| 1531 | if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) |
| 1532 | return dest_cpu; |
| 1533 | } |
| 1534 | } |
| 1535 | |
| 1536 | for (;;) { |
| 1537 | /* Any allowed, online CPU? */ |
| 1538 | for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) { |
| 1539 | if (!cpu_active(dest_cpu)) |
| 1540 | continue; |
| 1541 | goto out; |
| 1542 | } |
| 1543 | |
| 1544 | /* No more Mr. Nice Guy. */ |
| 1545 | switch (state) { |
| 1546 | case cpuset: |
| 1547 | if (IS_ENABLED(CONFIG_CPUSETS)) { |
| 1548 | cpuset_cpus_allowed_fallback(p); |
| 1549 | state = possible; |
| 1550 | break; |
| 1551 | } |
| 1552 | /* fall-through */ |
| 1553 | case possible: |
| 1554 | do_set_cpus_allowed(p, cpu_possible_mask); |
| 1555 | state = fail; |
| 1556 | break; |
| 1557 | |
| 1558 | case fail: |
| 1559 | BUG(); |
| 1560 | break; |
| 1561 | } |
| 1562 | } |
| 1563 | |
| 1564 | out: |
| 1565 | if (state != cpuset) { |
| 1566 | /* |
| 1567 | * Don't tell them about moving exiting tasks or |
| 1568 | * kernel threads (both mm NULL), since they never |
| 1569 | * leave kernel. |
| 1570 | */ |
| 1571 | if (p->mm && printk_ratelimit()) { |
| 1572 | printk_deferred("process %d (%s) no longer affine to cpu%d\n", |
| 1573 | task_pid_nr(p), p->comm, cpu); |
| 1574 | } |
| 1575 | } |
| 1576 | |
| 1577 | return dest_cpu; |
| 1578 | } |
| 1579 | |
| 1580 | /* |
| 1581 | * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable. |
| 1582 | */ |
| 1583 | static inline |
| 1584 | int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) |
| 1585 | { |
| 1586 | lockdep_assert_held(&p->pi_lock); |
| 1587 | |
| 1588 | if (tsk_nr_cpus_allowed(p) > 1) |
| 1589 | cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); |
| 1590 | else |
| 1591 | cpu = cpumask_any(tsk_cpus_allowed(p)); |
| 1592 | |
| 1593 | /* |
| 1594 | * In order not to call set_task_cpu() on a blocking task we need |
| 1595 | * to rely on ttwu() to place the task on a valid ->cpus_allowed |
| 1596 | * cpu. |
| 1597 | * |
| 1598 | * Since this is common to all placement strategies, this lives here. |
| 1599 | * |
| 1600 | * [ this allows ->select_task() to simply return task_cpu(p) and |
| 1601 | * not worry about this generic constraint ] |
| 1602 | */ |
| 1603 | if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) || |
| 1604 | !cpu_online(cpu))) |
| 1605 | cpu = select_fallback_rq(task_cpu(p), p); |
| 1606 | |
| 1607 | return cpu; |
| 1608 | } |
| 1609 | |
| 1610 | static void update_avg(u64 *avg, u64 sample) |
| 1611 | { |
| 1612 | s64 diff = sample - *avg; |
| 1613 | *avg += diff >> 3; |
| 1614 | } |
| 1615 | |
| 1616 | #else |
| 1617 | |
| 1618 | static inline int __set_cpus_allowed_ptr(struct task_struct *p, |
| 1619 | const struct cpumask *new_mask, bool check) |
| 1620 | { |
| 1621 | return set_cpus_allowed_ptr(p, new_mask); |
| 1622 | } |
| 1623 | |
| 1624 | #endif /* CONFIG_SMP */ |
| 1625 | |
| 1626 | static void |
| 1627 | ttwu_stat(struct task_struct *p, int cpu, int wake_flags) |
| 1628 | { |
| 1629 | #ifdef CONFIG_SCHEDSTATS |
| 1630 | struct rq *rq = this_rq(); |
| 1631 | |
| 1632 | #ifdef CONFIG_SMP |
| 1633 | int this_cpu = smp_processor_id(); |
| 1634 | |
| 1635 | if (cpu == this_cpu) { |
| 1636 | schedstat_inc(rq, ttwu_local); |
| 1637 | schedstat_inc(p, se.statistics.nr_wakeups_local); |
| 1638 | } else { |
| 1639 | struct sched_domain *sd; |
| 1640 | |
| 1641 | schedstat_inc(p, se.statistics.nr_wakeups_remote); |
| 1642 | rcu_read_lock(); |
| 1643 | for_each_domain(this_cpu, sd) { |
| 1644 | if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { |
| 1645 | schedstat_inc(sd, ttwu_wake_remote); |
| 1646 | break; |
| 1647 | } |
| 1648 | } |
| 1649 | rcu_read_unlock(); |
| 1650 | } |
| 1651 | |
| 1652 | if (wake_flags & WF_MIGRATED) |
| 1653 | schedstat_inc(p, se.statistics.nr_wakeups_migrate); |
| 1654 | |
| 1655 | #endif /* CONFIG_SMP */ |
| 1656 | |
| 1657 | schedstat_inc(rq, ttwu_count); |
| 1658 | schedstat_inc(p, se.statistics.nr_wakeups); |
| 1659 | |
| 1660 | if (wake_flags & WF_SYNC) |
| 1661 | schedstat_inc(p, se.statistics.nr_wakeups_sync); |
| 1662 | |
| 1663 | #endif /* CONFIG_SCHEDSTATS */ |
| 1664 | } |
| 1665 | |
| 1666 | static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags) |
| 1667 | { |
| 1668 | activate_task(rq, p, en_flags); |
| 1669 | p->on_rq = TASK_ON_RQ_QUEUED; |
| 1670 | |
| 1671 | /* if a worker is waking up, notify workqueue */ |
| 1672 | if (p->flags & PF_WQ_WORKER) |
| 1673 | wq_worker_waking_up(p, cpu_of(rq)); |
| 1674 | } |
| 1675 | |
| 1676 | /* |
| 1677 | * Mark the task runnable and perform wakeup-preemption. |
| 1678 | */ |
| 1679 | static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags, |
| 1680 | struct pin_cookie cookie) |
| 1681 | { |
| 1682 | check_preempt_curr(rq, p, wake_flags); |
| 1683 | p->state = TASK_RUNNING; |
| 1684 | trace_sched_wakeup(p); |
| 1685 | |
| 1686 | #ifdef CONFIG_SMP |
| 1687 | if (p->sched_class->task_woken) { |
| 1688 | /* |
| 1689 | * Our task @p is fully woken up and running; so its safe to |
| 1690 | * drop the rq->lock, hereafter rq is only used for statistics. |
| 1691 | */ |
| 1692 | lockdep_unpin_lock(&rq->lock, cookie); |
| 1693 | p->sched_class->task_woken(rq, p); |
| 1694 | lockdep_repin_lock(&rq->lock, cookie); |
| 1695 | } |
| 1696 | |
| 1697 | if (rq->idle_stamp) { |
| 1698 | u64 delta = rq_clock(rq) - rq->idle_stamp; |
| 1699 | u64 max = 2*rq->max_idle_balance_cost; |
| 1700 | |
| 1701 | update_avg(&rq->avg_idle, delta); |
| 1702 | |
| 1703 | if (rq->avg_idle > max) |
| 1704 | rq->avg_idle = max; |
| 1705 | |
| 1706 | rq->idle_stamp = 0; |
| 1707 | } |
| 1708 | #endif |
| 1709 | } |
| 1710 | |
| 1711 | static void |
| 1712 | ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, |
| 1713 | struct pin_cookie cookie) |
| 1714 | { |
| 1715 | int en_flags = ENQUEUE_WAKEUP; |
| 1716 | |
| 1717 | lockdep_assert_held(&rq->lock); |
| 1718 | |
| 1719 | #ifdef CONFIG_SMP |
| 1720 | if (p->sched_contributes_to_load) |
| 1721 | rq->nr_uninterruptible--; |
| 1722 | |
| 1723 | if (wake_flags & WF_MIGRATED) |
| 1724 | en_flags |= ENQUEUE_MIGRATED; |
| 1725 | #endif |
| 1726 | |
| 1727 | ttwu_activate(rq, p, en_flags); |
| 1728 | ttwu_do_wakeup(rq, p, wake_flags, cookie); |
| 1729 | } |
| 1730 | |
| 1731 | /* |
| 1732 | * Called in case the task @p isn't fully descheduled from its runqueue, |
| 1733 | * in this case we must do a remote wakeup. Its a 'light' wakeup though, |
| 1734 | * since all we need to do is flip p->state to TASK_RUNNING, since |
| 1735 | * the task is still ->on_rq. |
| 1736 | */ |
| 1737 | static int ttwu_remote(struct task_struct *p, int wake_flags) |
| 1738 | { |
| 1739 | struct rq_flags rf; |
| 1740 | struct rq *rq; |
| 1741 | int ret = 0; |
| 1742 | |
| 1743 | rq = __task_rq_lock(p, &rf); |
| 1744 | if (task_on_rq_queued(p)) { |
| 1745 | /* check_preempt_curr() may use rq clock */ |
| 1746 | update_rq_clock(rq); |
| 1747 | ttwu_do_wakeup(rq, p, wake_flags, rf.cookie); |
| 1748 | ret = 1; |
| 1749 | } |
| 1750 | __task_rq_unlock(rq, &rf); |
| 1751 | |
| 1752 | return ret; |
| 1753 | } |
| 1754 | |
| 1755 | #ifdef CONFIG_SMP |
| 1756 | void sched_ttwu_pending(void) |
| 1757 | { |
| 1758 | struct rq *rq = this_rq(); |
| 1759 | struct llist_node *llist = llist_del_all(&rq->wake_list); |
| 1760 | struct pin_cookie cookie; |
| 1761 | struct task_struct *p; |
| 1762 | unsigned long flags; |
| 1763 | |
| 1764 | if (!llist) |
| 1765 | return; |
| 1766 | |
| 1767 | raw_spin_lock_irqsave(&rq->lock, flags); |
| 1768 | cookie = lockdep_pin_lock(&rq->lock); |
| 1769 | |
| 1770 | while (llist) { |
| 1771 | int wake_flags = 0; |
| 1772 | |
| 1773 | p = llist_entry(llist, struct task_struct, wake_entry); |
| 1774 | llist = llist_next(llist); |
| 1775 | |
| 1776 | if (p->sched_remote_wakeup) |
| 1777 | wake_flags = WF_MIGRATED; |
| 1778 | |
| 1779 | ttwu_do_activate(rq, p, wake_flags, cookie); |
| 1780 | } |
| 1781 | |
| 1782 | lockdep_unpin_lock(&rq->lock, cookie); |
| 1783 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
| 1784 | } |
| 1785 | |
| 1786 | void scheduler_ipi(void) |
| 1787 | { |
| 1788 | /* |
| 1789 | * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting |
| 1790 | * TIF_NEED_RESCHED remotely (for the first time) will also send |
| 1791 | * this IPI. |
| 1792 | */ |
| 1793 | preempt_fold_need_resched(); |
| 1794 | |
| 1795 | if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick()) |
| 1796 | return; |
| 1797 | |
| 1798 | /* |
| 1799 | * Not all reschedule IPI handlers call irq_enter/irq_exit, since |
| 1800 | * traditionally all their work was done from the interrupt return |
| 1801 | * path. Now that we actually do some work, we need to make sure |
| 1802 | * we do call them. |
| 1803 | * |
| 1804 | * Some archs already do call them, luckily irq_enter/exit nest |
| 1805 | * properly. |
| 1806 | * |
| 1807 | * Arguably we should visit all archs and update all handlers, |
| 1808 | * however a fair share of IPIs are still resched only so this would |
| 1809 | * somewhat pessimize the simple resched case. |
| 1810 | */ |
| 1811 | irq_enter(); |
| 1812 | sched_ttwu_pending(); |
| 1813 | |
| 1814 | /* |
| 1815 | * Check if someone kicked us for doing the nohz idle load balance. |
| 1816 | */ |
| 1817 | if (unlikely(got_nohz_idle_kick())) { |
| 1818 | this_rq()->idle_balance = 1; |
| 1819 | raise_softirq_irqoff(SCHED_SOFTIRQ); |
| 1820 | } |
| 1821 | irq_exit(); |
| 1822 | } |
| 1823 | |
| 1824 | static void ttwu_queue_remote(struct task_struct *p, int cpu, int wake_flags) |
| 1825 | { |
| 1826 | struct rq *rq = cpu_rq(cpu); |
| 1827 | |
| 1828 | p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED); |
| 1829 | |
| 1830 | if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) { |
| 1831 | if (!set_nr_if_polling(rq->idle)) |
| 1832 | smp_send_reschedule(cpu); |
| 1833 | else |
| 1834 | trace_sched_wake_idle_without_ipi(cpu); |
| 1835 | } |
| 1836 | } |
| 1837 | |
| 1838 | void wake_up_if_idle(int cpu) |
| 1839 | { |
| 1840 | struct rq *rq = cpu_rq(cpu); |
| 1841 | unsigned long flags; |
| 1842 | |
| 1843 | rcu_read_lock(); |
| 1844 | |
| 1845 | if (!is_idle_task(rcu_dereference(rq->curr))) |
| 1846 | goto out; |
| 1847 | |
| 1848 | if (set_nr_if_polling(rq->idle)) { |
| 1849 | trace_sched_wake_idle_without_ipi(cpu); |
| 1850 | } else { |
| 1851 | raw_spin_lock_irqsave(&rq->lock, flags); |
| 1852 | if (is_idle_task(rq->curr)) |
| 1853 | smp_send_reschedule(cpu); |
| 1854 | /* Else cpu is not in idle, do nothing here */ |
| 1855 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
| 1856 | } |
| 1857 | |
| 1858 | out: |
| 1859 | rcu_read_unlock(); |
| 1860 | } |
| 1861 | |
| 1862 | bool cpus_share_cache(int this_cpu, int that_cpu) |
| 1863 | { |
| 1864 | return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); |
| 1865 | } |
| 1866 | #endif /* CONFIG_SMP */ |
| 1867 | |
| 1868 | static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) |
| 1869 | { |
| 1870 | struct rq *rq = cpu_rq(cpu); |
| 1871 | struct pin_cookie cookie; |
| 1872 | |
| 1873 | #if defined(CONFIG_SMP) |
| 1874 | if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) { |
| 1875 | sched_clock_cpu(cpu); /* sync clocks x-cpu */ |
| 1876 | ttwu_queue_remote(p, cpu, wake_flags); |
| 1877 | return; |
| 1878 | } |
| 1879 | #endif |
| 1880 | |
| 1881 | raw_spin_lock(&rq->lock); |
| 1882 | cookie = lockdep_pin_lock(&rq->lock); |
| 1883 | ttwu_do_activate(rq, p, wake_flags, cookie); |
| 1884 | lockdep_unpin_lock(&rq->lock, cookie); |
| 1885 | raw_spin_unlock(&rq->lock); |
| 1886 | } |
| 1887 | |
| 1888 | /* |
| 1889 | * Notes on Program-Order guarantees on SMP systems. |
| 1890 | * |
| 1891 | * MIGRATION |
| 1892 | * |
| 1893 | * The basic program-order guarantee on SMP systems is that when a task [t] |
| 1894 | * migrates, all its activity on its old cpu [c0] happens-before any subsequent |
| 1895 | * execution on its new cpu [c1]. |
| 1896 | * |
| 1897 | * For migration (of runnable tasks) this is provided by the following means: |
| 1898 | * |
| 1899 | * A) UNLOCK of the rq(c0)->lock scheduling out task t |
| 1900 | * B) migration for t is required to synchronize *both* rq(c0)->lock and |
| 1901 | * rq(c1)->lock (if not at the same time, then in that order). |
| 1902 | * C) LOCK of the rq(c1)->lock scheduling in task |
| 1903 | * |
| 1904 | * Transitivity guarantees that B happens after A and C after B. |
| 1905 | * Note: we only require RCpc transitivity. |
| 1906 | * Note: the cpu doing B need not be c0 or c1 |
| 1907 | * |
| 1908 | * Example: |
| 1909 | * |
| 1910 | * CPU0 CPU1 CPU2 |
| 1911 | * |
| 1912 | * LOCK rq(0)->lock |
| 1913 | * sched-out X |
| 1914 | * sched-in Y |
| 1915 | * UNLOCK rq(0)->lock |
| 1916 | * |
| 1917 | * LOCK rq(0)->lock // orders against CPU0 |
| 1918 | * dequeue X |
| 1919 | * UNLOCK rq(0)->lock |
| 1920 | * |
| 1921 | * LOCK rq(1)->lock |
| 1922 | * enqueue X |
| 1923 | * UNLOCK rq(1)->lock |
| 1924 | * |
| 1925 | * LOCK rq(1)->lock // orders against CPU2 |
| 1926 | * sched-out Z |
| 1927 | * sched-in X |
| 1928 | * UNLOCK rq(1)->lock |
| 1929 | * |
| 1930 | * |
| 1931 | * BLOCKING -- aka. SLEEP + WAKEUP |
| 1932 | * |
| 1933 | * For blocking we (obviously) need to provide the same guarantee as for |
| 1934 | * migration. However the means are completely different as there is no lock |
| 1935 | * chain to provide order. Instead we do: |
| 1936 | * |
| 1937 | * 1) smp_store_release(X->on_cpu, 0) |
| 1938 | * 2) smp_cond_acquire(!X->on_cpu) |
| 1939 | * |
| 1940 | * Example: |
| 1941 | * |
| 1942 | * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule) |
| 1943 | * |
| 1944 | * LOCK rq(0)->lock LOCK X->pi_lock |
| 1945 | * dequeue X |
| 1946 | * sched-out X |
| 1947 | * smp_store_release(X->on_cpu, 0); |
| 1948 | * |
| 1949 | * smp_cond_acquire(!X->on_cpu); |
| 1950 | * X->state = WAKING |
| 1951 | * set_task_cpu(X,2) |
| 1952 | * |
| 1953 | * LOCK rq(2)->lock |
| 1954 | * enqueue X |
| 1955 | * X->state = RUNNING |
| 1956 | * UNLOCK rq(2)->lock |
| 1957 | * |
| 1958 | * LOCK rq(2)->lock // orders against CPU1 |
| 1959 | * sched-out Z |
| 1960 | * sched-in X |
| 1961 | * UNLOCK rq(2)->lock |
| 1962 | * |
| 1963 | * UNLOCK X->pi_lock |
| 1964 | * UNLOCK rq(0)->lock |
| 1965 | * |
| 1966 | * |
| 1967 | * However; for wakeups there is a second guarantee we must provide, namely we |
| 1968 | * must observe the state that lead to our wakeup. That is, not only must our |
| 1969 | * task observe its own prior state, it must also observe the stores prior to |
| 1970 | * its wakeup. |
| 1971 | * |
| 1972 | * This means that any means of doing remote wakeups must order the CPU doing |
| 1973 | * the wakeup against the CPU the task is going to end up running on. This, |
| 1974 | * however, is already required for the regular Program-Order guarantee above, |
| 1975 | * since the waking CPU is the one issueing the ACQUIRE (smp_cond_acquire). |
| 1976 | * |
| 1977 | */ |
| 1978 | |
| 1979 | /** |
| 1980 | * try_to_wake_up - wake up a thread |
| 1981 | * @p: the thread to be awakened |
| 1982 | * @state: the mask of task states that can be woken |
| 1983 | * @wake_flags: wake modifier flags (WF_*) |
| 1984 | * |
| 1985 | * Put it on the run-queue if it's not already there. The "current" |
| 1986 | * thread is always on the run-queue (except when the actual |
| 1987 | * re-schedule is in progress), and as such you're allowed to do |
| 1988 | * the simpler "current->state = TASK_RUNNING" to mark yourself |
| 1989 | * runnable without the overhead of this. |
| 1990 | * |
| 1991 | * Return: %true if @p was woken up, %false if it was already running. |
| 1992 | * or @state didn't match @p's state. |
| 1993 | */ |
| 1994 | static int |
| 1995 | try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) |
| 1996 | { |
| 1997 | unsigned long flags; |
| 1998 | int cpu, success = 0; |
| 1999 | |
| 2000 | /* |
| 2001 | * If we are going to wake up a thread waiting for CONDITION we |
| 2002 | * need to ensure that CONDITION=1 done by the caller can not be |
| 2003 | * reordered with p->state check below. This pairs with mb() in |
| 2004 | * set_current_state() the waiting thread does. |
| 2005 | */ |
| 2006 | smp_mb__before_spinlock(); |
| 2007 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
| 2008 | if (!(p->state & state)) |
| 2009 | goto out; |
| 2010 | |
| 2011 | trace_sched_waking(p); |
| 2012 | |
| 2013 | success = 1; /* we're going to change ->state */ |
| 2014 | cpu = task_cpu(p); |
| 2015 | |
| 2016 | if (p->on_rq && ttwu_remote(p, wake_flags)) |
| 2017 | goto stat; |
| 2018 | |
| 2019 | #ifdef CONFIG_SMP |
| 2020 | /* |
| 2021 | * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be |
| 2022 | * possible to, falsely, observe p->on_cpu == 0. |
| 2023 | * |
| 2024 | * One must be running (->on_cpu == 1) in order to remove oneself |
| 2025 | * from the runqueue. |
| 2026 | * |
| 2027 | * [S] ->on_cpu = 1; [L] ->on_rq |
| 2028 | * UNLOCK rq->lock |
| 2029 | * RMB |
| 2030 | * LOCK rq->lock |
| 2031 | * [S] ->on_rq = 0; [L] ->on_cpu |
| 2032 | * |
| 2033 | * Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock |
| 2034 | * from the consecutive calls to schedule(); the first switching to our |
| 2035 | * task, the second putting it to sleep. |
| 2036 | */ |
| 2037 | smp_rmb(); |
| 2038 | |
| 2039 | /* |
| 2040 | * If the owning (remote) cpu is still in the middle of schedule() with |
| 2041 | * this task as prev, wait until its done referencing the task. |
| 2042 | * |
| 2043 | * Pairs with the smp_store_release() in finish_lock_switch(). |
| 2044 | * |
| 2045 | * This ensures that tasks getting woken will be fully ordered against |
| 2046 | * their previous state and preserve Program Order. |
| 2047 | */ |
| 2048 | smp_cond_acquire(!p->on_cpu); |
| 2049 | |
| 2050 | p->sched_contributes_to_load = !!task_contributes_to_load(p); |
| 2051 | p->state = TASK_WAKING; |
| 2052 | |
| 2053 | cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags); |
| 2054 | if (task_cpu(p) != cpu) { |
| 2055 | wake_flags |= WF_MIGRATED; |
| 2056 | set_task_cpu(p, cpu); |
| 2057 | } |
| 2058 | #endif /* CONFIG_SMP */ |
| 2059 | |
| 2060 | ttwu_queue(p, cpu, wake_flags); |
| 2061 | stat: |
| 2062 | if (schedstat_enabled()) |
| 2063 | ttwu_stat(p, cpu, wake_flags); |
| 2064 | out: |
| 2065 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
| 2066 | |
| 2067 | return success; |
| 2068 | } |
| 2069 | |
| 2070 | /** |
| 2071 | * try_to_wake_up_local - try to wake up a local task with rq lock held |
| 2072 | * @p: the thread to be awakened |
| 2073 | * |
| 2074 | * Put @p on the run-queue if it's not already there. The caller must |
| 2075 | * ensure that this_rq() is locked, @p is bound to this_rq() and not |
| 2076 | * the current task. |
| 2077 | */ |
| 2078 | static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie) |
| 2079 | { |
| 2080 | struct rq *rq = task_rq(p); |
| 2081 | |
| 2082 | if (WARN_ON_ONCE(rq != this_rq()) || |
| 2083 | WARN_ON_ONCE(p == current)) |
| 2084 | return; |
| 2085 | |
| 2086 | lockdep_assert_held(&rq->lock); |
| 2087 | |
| 2088 | if (!raw_spin_trylock(&p->pi_lock)) { |
| 2089 | /* |
| 2090 | * This is OK, because current is on_cpu, which avoids it being |
| 2091 | * picked for load-balance and preemption/IRQs are still |
| 2092 | * disabled avoiding further scheduler activity on it and we've |
| 2093 | * not yet picked a replacement task. |
| 2094 | */ |
| 2095 | lockdep_unpin_lock(&rq->lock, cookie); |
| 2096 | raw_spin_unlock(&rq->lock); |
| 2097 | raw_spin_lock(&p->pi_lock); |
| 2098 | raw_spin_lock(&rq->lock); |
| 2099 | lockdep_repin_lock(&rq->lock, cookie); |
| 2100 | } |
| 2101 | |
| 2102 | if (!(p->state & TASK_NORMAL)) |
| 2103 | goto out; |
| 2104 | |
| 2105 | trace_sched_waking(p); |
| 2106 | |
| 2107 | if (!task_on_rq_queued(p)) |
| 2108 | ttwu_activate(rq, p, ENQUEUE_WAKEUP); |
| 2109 | |
| 2110 | ttwu_do_wakeup(rq, p, 0, cookie); |
| 2111 | if (schedstat_enabled()) |
| 2112 | ttwu_stat(p, smp_processor_id(), 0); |
| 2113 | out: |
| 2114 | raw_spin_unlock(&p->pi_lock); |
| 2115 | } |
| 2116 | |
| 2117 | /** |
| 2118 | * wake_up_process - Wake up a specific process |
| 2119 | * @p: The process to be woken up. |
| 2120 | * |
| 2121 | * Attempt to wake up the nominated process and move it to the set of runnable |
| 2122 | * processes. |
| 2123 | * |
| 2124 | * Return: 1 if the process was woken up, 0 if it was already running. |
| 2125 | * |
| 2126 | * It may be assumed that this function implies a write memory barrier before |
| 2127 | * changing the task state if and only if any tasks are woken up. |
| 2128 | */ |
| 2129 | int wake_up_process(struct task_struct *p) |
| 2130 | { |
| 2131 | return try_to_wake_up(p, TASK_NORMAL, 0); |
| 2132 | } |
| 2133 | EXPORT_SYMBOL(wake_up_process); |
| 2134 | |
| 2135 | int wake_up_state(struct task_struct *p, unsigned int state) |
| 2136 | { |
| 2137 | return try_to_wake_up(p, state, 0); |
| 2138 | } |
| 2139 | |
| 2140 | /* |
| 2141 | * This function clears the sched_dl_entity static params. |
| 2142 | */ |
| 2143 | void __dl_clear_params(struct task_struct *p) |
| 2144 | { |
| 2145 | struct sched_dl_entity *dl_se = &p->dl; |
| 2146 | |
| 2147 | dl_se->dl_runtime = 0; |
| 2148 | dl_se->dl_deadline = 0; |
| 2149 | dl_se->dl_period = 0; |
| 2150 | dl_se->flags = 0; |
| 2151 | dl_se->dl_bw = 0; |
| 2152 | |
| 2153 | dl_se->dl_throttled = 0; |
| 2154 | dl_se->dl_yielded = 0; |
| 2155 | } |
| 2156 | |
| 2157 | /* |
| 2158 | * Perform scheduler related setup for a newly forked process p. |
| 2159 | * p is forked by current. |
| 2160 | * |
| 2161 | * __sched_fork() is basic setup used by init_idle() too: |
| 2162 | */ |
| 2163 | static void __sched_fork(unsigned long clone_flags, struct task_struct *p) |
| 2164 | { |
| 2165 | p->on_rq = 0; |
| 2166 | |
| 2167 | p->se.on_rq = 0; |
| 2168 | p->se.exec_start = 0; |
| 2169 | p->se.sum_exec_runtime = 0; |
| 2170 | p->se.prev_sum_exec_runtime = 0; |
| 2171 | p->se.nr_migrations = 0; |
| 2172 | p->se.vruntime = 0; |
| 2173 | INIT_LIST_HEAD(&p->se.group_node); |
| 2174 | |
| 2175 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 2176 | p->se.cfs_rq = NULL; |
| 2177 | #endif |
| 2178 | |
| 2179 | #ifdef CONFIG_SCHEDSTATS |
| 2180 | /* Even if schedstat is disabled, there should not be garbage */ |
| 2181 | memset(&p->se.statistics, 0, sizeof(p->se.statistics)); |
| 2182 | #endif |
| 2183 | |
| 2184 | RB_CLEAR_NODE(&p->dl.rb_node); |
| 2185 | init_dl_task_timer(&p->dl); |
| 2186 | __dl_clear_params(p); |
| 2187 | |
| 2188 | INIT_LIST_HEAD(&p->rt.run_list); |
| 2189 | p->rt.timeout = 0; |
| 2190 | p->rt.time_slice = sched_rr_timeslice; |
| 2191 | p->rt.on_rq = 0; |
| 2192 | p->rt.on_list = 0; |
| 2193 | |
| 2194 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
| 2195 | INIT_HLIST_HEAD(&p->preempt_notifiers); |
| 2196 | #endif |
| 2197 | |
| 2198 | #ifdef CONFIG_NUMA_BALANCING |
| 2199 | if (p->mm && atomic_read(&p->mm->mm_users) == 1) { |
| 2200 | p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay); |
| 2201 | p->mm->numa_scan_seq = 0; |
| 2202 | } |
| 2203 | |
| 2204 | if (clone_flags & CLONE_VM) |
| 2205 | p->numa_preferred_nid = current->numa_preferred_nid; |
| 2206 | else |
| 2207 | p->numa_preferred_nid = -1; |
| 2208 | |
| 2209 | p->node_stamp = 0ULL; |
| 2210 | p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0; |
| 2211 | p->numa_scan_period = sysctl_numa_balancing_scan_delay; |
| 2212 | p->numa_work.next = &p->numa_work; |
| 2213 | p->numa_faults = NULL; |
| 2214 | p->last_task_numa_placement = 0; |
| 2215 | p->last_sum_exec_runtime = 0; |
| 2216 | |
| 2217 | p->numa_group = NULL; |
| 2218 | #endif /* CONFIG_NUMA_BALANCING */ |
| 2219 | } |
| 2220 | |
| 2221 | DEFINE_STATIC_KEY_FALSE(sched_numa_balancing); |
| 2222 | |
| 2223 | #ifdef CONFIG_NUMA_BALANCING |
| 2224 | |
| 2225 | void set_numabalancing_state(bool enabled) |
| 2226 | { |
| 2227 | if (enabled) |
| 2228 | static_branch_enable(&sched_numa_balancing); |
| 2229 | else |
| 2230 | static_branch_disable(&sched_numa_balancing); |
| 2231 | } |
| 2232 | |
| 2233 | #ifdef CONFIG_PROC_SYSCTL |
| 2234 | int sysctl_numa_balancing(struct ctl_table *table, int write, |
| 2235 | void __user *buffer, size_t *lenp, loff_t *ppos) |
| 2236 | { |
| 2237 | struct ctl_table t; |
| 2238 | int err; |
| 2239 | int state = static_branch_likely(&sched_numa_balancing); |
| 2240 | |
| 2241 | if (write && !capable(CAP_SYS_ADMIN)) |
| 2242 | return -EPERM; |
| 2243 | |
| 2244 | t = *table; |
| 2245 | t.data = &state; |
| 2246 | err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); |
| 2247 | if (err < 0) |
| 2248 | return err; |
| 2249 | if (write) |
| 2250 | set_numabalancing_state(state); |
| 2251 | return err; |
| 2252 | } |
| 2253 | #endif |
| 2254 | #endif |
| 2255 | |
| 2256 | #ifdef CONFIG_SCHEDSTATS |
| 2257 | |
| 2258 | DEFINE_STATIC_KEY_FALSE(sched_schedstats); |
| 2259 | static bool __initdata __sched_schedstats = false; |
| 2260 | |
| 2261 | static void set_schedstats(bool enabled) |
| 2262 | { |
| 2263 | if (enabled) |
| 2264 | static_branch_enable(&sched_schedstats); |
| 2265 | else |
| 2266 | static_branch_disable(&sched_schedstats); |
| 2267 | } |
| 2268 | |
| 2269 | void force_schedstat_enabled(void) |
| 2270 | { |
| 2271 | if (!schedstat_enabled()) { |
| 2272 | pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n"); |
| 2273 | static_branch_enable(&sched_schedstats); |
| 2274 | } |
| 2275 | } |
| 2276 | |
| 2277 | static int __init setup_schedstats(char *str) |
| 2278 | { |
| 2279 | int ret = 0; |
| 2280 | if (!str) |
| 2281 | goto out; |
| 2282 | |
| 2283 | /* |
| 2284 | * This code is called before jump labels have been set up, so we can't |
| 2285 | * change the static branch directly just yet. Instead set a temporary |
| 2286 | * variable so init_schedstats() can do it later. |
| 2287 | */ |
| 2288 | if (!strcmp(str, "enable")) { |
| 2289 | __sched_schedstats = true; |
| 2290 | ret = 1; |
| 2291 | } else if (!strcmp(str, "disable")) { |
| 2292 | __sched_schedstats = false; |
| 2293 | ret = 1; |
| 2294 | } |
| 2295 | out: |
| 2296 | if (!ret) |
| 2297 | pr_warn("Unable to parse schedstats=\n"); |
| 2298 | |
| 2299 | return ret; |
| 2300 | } |
| 2301 | __setup("schedstats=", setup_schedstats); |
| 2302 | |
| 2303 | static void __init init_schedstats(void) |
| 2304 | { |
| 2305 | set_schedstats(__sched_schedstats); |
| 2306 | } |
| 2307 | |
| 2308 | #ifdef CONFIG_PROC_SYSCTL |
| 2309 | int sysctl_schedstats(struct ctl_table *table, int write, |
| 2310 | void __user *buffer, size_t *lenp, loff_t *ppos) |
| 2311 | { |
| 2312 | struct ctl_table t; |
| 2313 | int err; |
| 2314 | int state = static_branch_likely(&sched_schedstats); |
| 2315 | |
| 2316 | if (write && !capable(CAP_SYS_ADMIN)) |
| 2317 | return -EPERM; |
| 2318 | |
| 2319 | t = *table; |
| 2320 | t.data = &state; |
| 2321 | err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); |
| 2322 | if (err < 0) |
| 2323 | return err; |
| 2324 | if (write) |
| 2325 | set_schedstats(state); |
| 2326 | return err; |
| 2327 | } |
| 2328 | #endif /* CONFIG_PROC_SYSCTL */ |
| 2329 | #else /* !CONFIG_SCHEDSTATS */ |
| 2330 | static inline void init_schedstats(void) {} |
| 2331 | #endif /* CONFIG_SCHEDSTATS */ |
| 2332 | |
| 2333 | /* |
| 2334 | * fork()/clone()-time setup: |
| 2335 | */ |
| 2336 | int sched_fork(unsigned long clone_flags, struct task_struct *p) |
| 2337 | { |
| 2338 | unsigned long flags; |
| 2339 | int cpu = get_cpu(); |
| 2340 | |
| 2341 | __sched_fork(clone_flags, p); |
| 2342 | /* |
| 2343 | * We mark the process as running here. This guarantees that |
| 2344 | * nobody will actually run it, and a signal or other external |
| 2345 | * event cannot wake it up and insert it on the runqueue either. |
| 2346 | */ |
| 2347 | p->state = TASK_RUNNING; |
| 2348 | |
| 2349 | /* |
| 2350 | * Make sure we do not leak PI boosting priority to the child. |
| 2351 | */ |
| 2352 | p->prio = current->normal_prio; |
| 2353 | |
| 2354 | /* |
| 2355 | * Revert to default priority/policy on fork if requested. |
| 2356 | */ |
| 2357 | if (unlikely(p->sched_reset_on_fork)) { |
| 2358 | if (task_has_dl_policy(p) || task_has_rt_policy(p)) { |
| 2359 | p->policy = SCHED_NORMAL; |
| 2360 | p->static_prio = NICE_TO_PRIO(0); |
| 2361 | p->rt_priority = 0; |
| 2362 | } else if (PRIO_TO_NICE(p->static_prio) < 0) |
| 2363 | p->static_prio = NICE_TO_PRIO(0); |
| 2364 | |
| 2365 | p->prio = p->normal_prio = __normal_prio(p); |
| 2366 | set_load_weight(p); |
| 2367 | |
| 2368 | /* |
| 2369 | * We don't need the reset flag anymore after the fork. It has |
| 2370 | * fulfilled its duty: |
| 2371 | */ |
| 2372 | p->sched_reset_on_fork = 0; |
| 2373 | } |
| 2374 | |
| 2375 | if (dl_prio(p->prio)) { |
| 2376 | put_cpu(); |
| 2377 | return -EAGAIN; |
| 2378 | } else if (rt_prio(p->prio)) { |
| 2379 | p->sched_class = &rt_sched_class; |
| 2380 | } else { |
| 2381 | p->sched_class = &fair_sched_class; |
| 2382 | } |
| 2383 | |
| 2384 | if (p->sched_class->task_fork) |
| 2385 | p->sched_class->task_fork(p); |
| 2386 | |
| 2387 | /* |
| 2388 | * The child is not yet in the pid-hash so no cgroup attach races, |
| 2389 | * and the cgroup is pinned to this child due to cgroup_fork() |
| 2390 | * is ran before sched_fork(). |
| 2391 | * |
| 2392 | * Silence PROVE_RCU. |
| 2393 | */ |
| 2394 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
| 2395 | set_task_cpu(p, cpu); |
| 2396 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
| 2397 | |
| 2398 | #ifdef CONFIG_SCHED_INFO |
| 2399 | if (likely(sched_info_on())) |
| 2400 | memset(&p->sched_info, 0, sizeof(p->sched_info)); |
| 2401 | #endif |
| 2402 | #if defined(CONFIG_SMP) |
| 2403 | p->on_cpu = 0; |
| 2404 | #endif |
| 2405 | init_task_preempt_count(p); |
| 2406 | #ifdef CONFIG_SMP |
| 2407 | plist_node_init(&p->pushable_tasks, MAX_PRIO); |
| 2408 | RB_CLEAR_NODE(&p->pushable_dl_tasks); |
| 2409 | #endif |
| 2410 | |
| 2411 | put_cpu(); |
| 2412 | return 0; |
| 2413 | } |
| 2414 | |
| 2415 | unsigned long to_ratio(u64 period, u64 runtime) |
| 2416 | { |
| 2417 | if (runtime == RUNTIME_INF) |
| 2418 | return 1ULL << 20; |
| 2419 | |
| 2420 | /* |
| 2421 | * Doing this here saves a lot of checks in all |
| 2422 | * the calling paths, and returning zero seems |
| 2423 | * safe for them anyway. |
| 2424 | */ |
| 2425 | if (period == 0) |
| 2426 | return 0; |
| 2427 | |
| 2428 | return div64_u64(runtime << 20, period); |
| 2429 | } |
| 2430 | |
| 2431 | #ifdef CONFIG_SMP |
| 2432 | inline struct dl_bw *dl_bw_of(int i) |
| 2433 | { |
| 2434 | RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), |
| 2435 | "sched RCU must be held"); |
| 2436 | return &cpu_rq(i)->rd->dl_bw; |
| 2437 | } |
| 2438 | |
| 2439 | static inline int dl_bw_cpus(int i) |
| 2440 | { |
| 2441 | struct root_domain *rd = cpu_rq(i)->rd; |
| 2442 | int cpus = 0; |
| 2443 | |
| 2444 | RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(), |
| 2445 | "sched RCU must be held"); |
| 2446 | for_each_cpu_and(i, rd->span, cpu_active_mask) |
| 2447 | cpus++; |
| 2448 | |
| 2449 | return cpus; |
| 2450 | } |
| 2451 | #else |
| 2452 | inline struct dl_bw *dl_bw_of(int i) |
| 2453 | { |
| 2454 | return &cpu_rq(i)->dl.dl_bw; |
| 2455 | } |
| 2456 | |
| 2457 | static inline int dl_bw_cpus(int i) |
| 2458 | { |
| 2459 | return 1; |
| 2460 | } |
| 2461 | #endif |
| 2462 | |
| 2463 | /* |
| 2464 | * We must be sure that accepting a new task (or allowing changing the |
| 2465 | * parameters of an existing one) is consistent with the bandwidth |
| 2466 | * constraints. If yes, this function also accordingly updates the currently |
| 2467 | * allocated bandwidth to reflect the new situation. |
| 2468 | * |
| 2469 | * This function is called while holding p's rq->lock. |
| 2470 | * |
| 2471 | * XXX we should delay bw change until the task's 0-lag point, see |
| 2472 | * __setparam_dl(). |
| 2473 | */ |
| 2474 | static int dl_overflow(struct task_struct *p, int policy, |
| 2475 | const struct sched_attr *attr) |
| 2476 | { |
| 2477 | |
| 2478 | struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); |
| 2479 | u64 period = attr->sched_period ?: attr->sched_deadline; |
| 2480 | u64 runtime = attr->sched_runtime; |
| 2481 | u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0; |
| 2482 | int cpus, err = -1; |
| 2483 | |
| 2484 | /* !deadline task may carry old deadline bandwidth */ |
| 2485 | if (new_bw == p->dl.dl_bw && task_has_dl_policy(p)) |
| 2486 | return 0; |
| 2487 | |
| 2488 | /* |
| 2489 | * Either if a task, enters, leave, or stays -deadline but changes |
| 2490 | * its parameters, we may need to update accordingly the total |
| 2491 | * allocated bandwidth of the container. |
| 2492 | */ |
| 2493 | raw_spin_lock(&dl_b->lock); |
| 2494 | cpus = dl_bw_cpus(task_cpu(p)); |
| 2495 | if (dl_policy(policy) && !task_has_dl_policy(p) && |
| 2496 | !__dl_overflow(dl_b, cpus, 0, new_bw)) { |
| 2497 | __dl_add(dl_b, new_bw); |
| 2498 | err = 0; |
| 2499 | } else if (dl_policy(policy) && task_has_dl_policy(p) && |
| 2500 | !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) { |
| 2501 | __dl_clear(dl_b, p->dl.dl_bw); |
| 2502 | __dl_add(dl_b, new_bw); |
| 2503 | err = 0; |
| 2504 | } else if (!dl_policy(policy) && task_has_dl_policy(p)) { |
| 2505 | __dl_clear(dl_b, p->dl.dl_bw); |
| 2506 | err = 0; |
| 2507 | } |
| 2508 | raw_spin_unlock(&dl_b->lock); |
| 2509 | |
| 2510 | return err; |
| 2511 | } |
| 2512 | |
| 2513 | extern void init_dl_bw(struct dl_bw *dl_b); |
| 2514 | |
| 2515 | /* |
| 2516 | * wake_up_new_task - wake up a newly created task for the first time. |
| 2517 | * |
| 2518 | * This function will do some initial scheduler statistics housekeeping |
| 2519 | * that must be done for every newly created context, then puts the task |
| 2520 | * on the runqueue and wakes it. |
| 2521 | */ |
| 2522 | void wake_up_new_task(struct task_struct *p) |
| 2523 | { |
| 2524 | struct rq_flags rf; |
| 2525 | struct rq *rq; |
| 2526 | |
| 2527 | /* Initialize new task's runnable average */ |
| 2528 | init_entity_runnable_average(&p->se); |
| 2529 | raw_spin_lock_irqsave(&p->pi_lock, rf.flags); |
| 2530 | #ifdef CONFIG_SMP |
| 2531 | /* |
| 2532 | * Fork balancing, do it here and not earlier because: |
| 2533 | * - cpus_allowed can change in the fork path |
| 2534 | * - any previously selected cpu might disappear through hotplug |
| 2535 | */ |
| 2536 | set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0)); |
| 2537 | #endif |
| 2538 | /* Post initialize new task's util average when its cfs_rq is set */ |
| 2539 | post_init_entity_util_avg(&p->se); |
| 2540 | |
| 2541 | rq = __task_rq_lock(p, &rf); |
| 2542 | activate_task(rq, p, 0); |
| 2543 | p->on_rq = TASK_ON_RQ_QUEUED; |
| 2544 | trace_sched_wakeup_new(p); |
| 2545 | check_preempt_curr(rq, p, WF_FORK); |
| 2546 | #ifdef CONFIG_SMP |
| 2547 | if (p->sched_class->task_woken) { |
| 2548 | /* |
| 2549 | * Nothing relies on rq->lock after this, so its fine to |
| 2550 | * drop it. |
| 2551 | */ |
| 2552 | lockdep_unpin_lock(&rq->lock, rf.cookie); |
| 2553 | p->sched_class->task_woken(rq, p); |
| 2554 | lockdep_repin_lock(&rq->lock, rf.cookie); |
| 2555 | } |
| 2556 | #endif |
| 2557 | task_rq_unlock(rq, p, &rf); |
| 2558 | } |
| 2559 | |
| 2560 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
| 2561 | |
| 2562 | static struct static_key preempt_notifier_key = STATIC_KEY_INIT_FALSE; |
| 2563 | |
| 2564 | void preempt_notifier_inc(void) |
| 2565 | { |
| 2566 | static_key_slow_inc(&preempt_notifier_key); |
| 2567 | } |
| 2568 | EXPORT_SYMBOL_GPL(preempt_notifier_inc); |
| 2569 | |
| 2570 | void preempt_notifier_dec(void) |
| 2571 | { |
| 2572 | static_key_slow_dec(&preempt_notifier_key); |
| 2573 | } |
| 2574 | EXPORT_SYMBOL_GPL(preempt_notifier_dec); |
| 2575 | |
| 2576 | /** |
| 2577 | * preempt_notifier_register - tell me when current is being preempted & rescheduled |
| 2578 | * @notifier: notifier struct to register |
| 2579 | */ |
| 2580 | void preempt_notifier_register(struct preempt_notifier *notifier) |
| 2581 | { |
| 2582 | if (!static_key_false(&preempt_notifier_key)) |
| 2583 | WARN(1, "registering preempt_notifier while notifiers disabled\n"); |
| 2584 | |
| 2585 | hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); |
| 2586 | } |
| 2587 | EXPORT_SYMBOL_GPL(preempt_notifier_register); |
| 2588 | |
| 2589 | /** |
| 2590 | * preempt_notifier_unregister - no longer interested in preemption notifications |
| 2591 | * @notifier: notifier struct to unregister |
| 2592 | * |
| 2593 | * This is *not* safe to call from within a preemption notifier. |
| 2594 | */ |
| 2595 | void preempt_notifier_unregister(struct preempt_notifier *notifier) |
| 2596 | { |
| 2597 | hlist_del(¬ifier->link); |
| 2598 | } |
| 2599 | EXPORT_SYMBOL_GPL(preempt_notifier_unregister); |
| 2600 | |
| 2601 | static void __fire_sched_in_preempt_notifiers(struct task_struct *curr) |
| 2602 | { |
| 2603 | struct preempt_notifier *notifier; |
| 2604 | |
| 2605 | hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) |
| 2606 | notifier->ops->sched_in(notifier, raw_smp_processor_id()); |
| 2607 | } |
| 2608 | |
| 2609 | static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) |
| 2610 | { |
| 2611 | if (static_key_false(&preempt_notifier_key)) |
| 2612 | __fire_sched_in_preempt_notifiers(curr); |
| 2613 | } |
| 2614 | |
| 2615 | static void |
| 2616 | __fire_sched_out_preempt_notifiers(struct task_struct *curr, |
| 2617 | struct task_struct *next) |
| 2618 | { |
| 2619 | struct preempt_notifier *notifier; |
| 2620 | |
| 2621 | hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) |
| 2622 | notifier->ops->sched_out(notifier, next); |
| 2623 | } |
| 2624 | |
| 2625 | static __always_inline void |
| 2626 | fire_sched_out_preempt_notifiers(struct task_struct *curr, |
| 2627 | struct task_struct *next) |
| 2628 | { |
| 2629 | if (static_key_false(&preempt_notifier_key)) |
| 2630 | __fire_sched_out_preempt_notifiers(curr, next); |
| 2631 | } |
| 2632 | |
| 2633 | #else /* !CONFIG_PREEMPT_NOTIFIERS */ |
| 2634 | |
| 2635 | static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) |
| 2636 | { |
| 2637 | } |
| 2638 | |
| 2639 | static inline void |
| 2640 | fire_sched_out_preempt_notifiers(struct task_struct *curr, |
| 2641 | struct task_struct *next) |
| 2642 | { |
| 2643 | } |
| 2644 | |
| 2645 | #endif /* CONFIG_PREEMPT_NOTIFIERS */ |
| 2646 | |
| 2647 | /** |
| 2648 | * prepare_task_switch - prepare to switch tasks |
| 2649 | * @rq: the runqueue preparing to switch |
| 2650 | * @prev: the current task that is being switched out |
| 2651 | * @next: the task we are going to switch to. |
| 2652 | * |
| 2653 | * This is called with the rq lock held and interrupts off. It must |
| 2654 | * be paired with a subsequent finish_task_switch after the context |
| 2655 | * switch. |
| 2656 | * |
| 2657 | * prepare_task_switch sets up locking and calls architecture specific |
| 2658 | * hooks. |
| 2659 | */ |
| 2660 | static inline void |
| 2661 | prepare_task_switch(struct rq *rq, struct task_struct *prev, |
| 2662 | struct task_struct *next) |
| 2663 | { |
| 2664 | sched_info_switch(rq, prev, next); |
| 2665 | perf_event_task_sched_out(prev, next); |
| 2666 | fire_sched_out_preempt_notifiers(prev, next); |
| 2667 | prepare_lock_switch(rq, next); |
| 2668 | prepare_arch_switch(next); |
| 2669 | } |
| 2670 | |
| 2671 | /** |
| 2672 | * finish_task_switch - clean up after a task-switch |
| 2673 | * @prev: the thread we just switched away from. |
| 2674 | * |
| 2675 | * finish_task_switch must be called after the context switch, paired |
| 2676 | * with a prepare_task_switch call before the context switch. |
| 2677 | * finish_task_switch will reconcile locking set up by prepare_task_switch, |
| 2678 | * and do any other architecture-specific cleanup actions. |
| 2679 | * |
| 2680 | * Note that we may have delayed dropping an mm in context_switch(). If |
| 2681 | * so, we finish that here outside of the runqueue lock. (Doing it |
| 2682 | * with the lock held can cause deadlocks; see schedule() for |
| 2683 | * details.) |
| 2684 | * |
| 2685 | * The context switch have flipped the stack from under us and restored the |
| 2686 | * local variables which were saved when this task called schedule() in the |
| 2687 | * past. prev == current is still correct but we need to recalculate this_rq |
| 2688 | * because prev may have moved to another CPU. |
| 2689 | */ |
| 2690 | static struct rq *finish_task_switch(struct task_struct *prev) |
| 2691 | __releases(rq->lock) |
| 2692 | { |
| 2693 | struct rq *rq = this_rq(); |
| 2694 | struct mm_struct *mm = rq->prev_mm; |
| 2695 | long prev_state; |
| 2696 | |
| 2697 | /* |
| 2698 | * The previous task will have left us with a preempt_count of 2 |
| 2699 | * because it left us after: |
| 2700 | * |
| 2701 | * schedule() |
| 2702 | * preempt_disable(); // 1 |
| 2703 | * __schedule() |
| 2704 | * raw_spin_lock_irq(&rq->lock) // 2 |
| 2705 | * |
| 2706 | * Also, see FORK_PREEMPT_COUNT. |
| 2707 | */ |
| 2708 | if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET, |
| 2709 | "corrupted preempt_count: %s/%d/0x%x\n", |
| 2710 | current->comm, current->pid, preempt_count())) |
| 2711 | preempt_count_set(FORK_PREEMPT_COUNT); |
| 2712 | |
| 2713 | rq->prev_mm = NULL; |
| 2714 | |
| 2715 | /* |
| 2716 | * A task struct has one reference for the use as "current". |
| 2717 | * If a task dies, then it sets TASK_DEAD in tsk->state and calls |
| 2718 | * schedule one last time. The schedule call will never return, and |
| 2719 | * the scheduled task must drop that reference. |
| 2720 | * |
| 2721 | * We must observe prev->state before clearing prev->on_cpu (in |
| 2722 | * finish_lock_switch), otherwise a concurrent wakeup can get prev |
| 2723 | * running on another CPU and we could rave with its RUNNING -> DEAD |
| 2724 | * transition, resulting in a double drop. |
| 2725 | */ |
| 2726 | prev_state = prev->state; |
| 2727 | vtime_task_switch(prev); |
| 2728 | perf_event_task_sched_in(prev, current); |
| 2729 | finish_lock_switch(rq, prev); |
| 2730 | finish_arch_post_lock_switch(); |
| 2731 | |
| 2732 | fire_sched_in_preempt_notifiers(current); |
| 2733 | if (mm) |
| 2734 | mmdrop(mm); |
| 2735 | if (unlikely(prev_state == TASK_DEAD)) { |
| 2736 | if (prev->sched_class->task_dead) |
| 2737 | prev->sched_class->task_dead(prev); |
| 2738 | |
| 2739 | /* |
| 2740 | * Remove function-return probe instances associated with this |
| 2741 | * task and put them back on the free list. |
| 2742 | */ |
| 2743 | kprobe_flush_task(prev); |
| 2744 | put_task_struct(prev); |
| 2745 | } |
| 2746 | |
| 2747 | tick_nohz_task_switch(); |
| 2748 | return rq; |
| 2749 | } |
| 2750 | |
| 2751 | #ifdef CONFIG_SMP |
| 2752 | |
| 2753 | /* rq->lock is NOT held, but preemption is disabled */ |
| 2754 | static void __balance_callback(struct rq *rq) |
| 2755 | { |
| 2756 | struct callback_head *head, *next; |
| 2757 | void (*func)(struct rq *rq); |
| 2758 | unsigned long flags; |
| 2759 | |
| 2760 | raw_spin_lock_irqsave(&rq->lock, flags); |
| 2761 | head = rq->balance_callback; |
| 2762 | rq->balance_callback = NULL; |
| 2763 | while (head) { |
| 2764 | func = (void (*)(struct rq *))head->func; |
| 2765 | next = head->next; |
| 2766 | head->next = NULL; |
| 2767 | head = next; |
| 2768 | |
| 2769 | func(rq); |
| 2770 | } |
| 2771 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
| 2772 | } |
| 2773 | |
| 2774 | static inline void balance_callback(struct rq *rq) |
| 2775 | { |
| 2776 | if (unlikely(rq->balance_callback)) |
| 2777 | __balance_callback(rq); |
| 2778 | } |
| 2779 | |
| 2780 | #else |
| 2781 | |
| 2782 | static inline void balance_callback(struct rq *rq) |
| 2783 | { |
| 2784 | } |
| 2785 | |
| 2786 | #endif |
| 2787 | |
| 2788 | /** |
| 2789 | * schedule_tail - first thing a freshly forked thread must call. |
| 2790 | * @prev: the thread we just switched away from. |
| 2791 | */ |
| 2792 | asmlinkage __visible void schedule_tail(struct task_struct *prev) |
| 2793 | __releases(rq->lock) |
| 2794 | { |
| 2795 | struct rq *rq; |
| 2796 | |
| 2797 | /* |
| 2798 | * New tasks start with FORK_PREEMPT_COUNT, see there and |
| 2799 | * finish_task_switch() for details. |
| 2800 | * |
| 2801 | * finish_task_switch() will drop rq->lock() and lower preempt_count |
| 2802 | * and the preempt_enable() will end up enabling preemption (on |
| 2803 | * PREEMPT_COUNT kernels). |
| 2804 | */ |
| 2805 | |
| 2806 | rq = finish_task_switch(prev); |
| 2807 | balance_callback(rq); |
| 2808 | preempt_enable(); |
| 2809 | |
| 2810 | if (current->set_child_tid) |
| 2811 | put_user(task_pid_vnr(current), current->set_child_tid); |
| 2812 | } |
| 2813 | |
| 2814 | /* |
| 2815 | * context_switch - switch to the new MM and the new thread's register state. |
| 2816 | */ |
| 2817 | static __always_inline struct rq * |
| 2818 | context_switch(struct rq *rq, struct task_struct *prev, |
| 2819 | struct task_struct *next, struct pin_cookie cookie) |
| 2820 | { |
| 2821 | struct mm_struct *mm, *oldmm; |
| 2822 | |
| 2823 | prepare_task_switch(rq, prev, next); |
| 2824 | |
| 2825 | mm = next->mm; |
| 2826 | oldmm = prev->active_mm; |
| 2827 | /* |
| 2828 | * For paravirt, this is coupled with an exit in switch_to to |
| 2829 | * combine the page table reload and the switch backend into |
| 2830 | * one hypercall. |
| 2831 | */ |
| 2832 | arch_start_context_switch(prev); |
| 2833 | |
| 2834 | if (!mm) { |
| 2835 | next->active_mm = oldmm; |
| 2836 | atomic_inc(&oldmm->mm_count); |
| 2837 | enter_lazy_tlb(oldmm, next); |
| 2838 | } else |
| 2839 | switch_mm_irqs_off(oldmm, mm, next); |
| 2840 | |
| 2841 | if (!prev->mm) { |
| 2842 | prev->active_mm = NULL; |
| 2843 | rq->prev_mm = oldmm; |
| 2844 | } |
| 2845 | /* |
| 2846 | * Since the runqueue lock will be released by the next |
| 2847 | * task (which is an invalid locking op but in the case |
| 2848 | * of the scheduler it's an obvious special-case), so we |
| 2849 | * do an early lockdep release here: |
| 2850 | */ |
| 2851 | lockdep_unpin_lock(&rq->lock, cookie); |
| 2852 | spin_release(&rq->lock.dep_map, 1, _THIS_IP_); |
| 2853 | |
| 2854 | /* Here we just switch the register state and the stack. */ |
| 2855 | switch_to(prev, next, prev); |
| 2856 | barrier(); |
| 2857 | |
| 2858 | return finish_task_switch(prev); |
| 2859 | } |
| 2860 | |
| 2861 | /* |
| 2862 | * nr_running and nr_context_switches: |
| 2863 | * |
| 2864 | * externally visible scheduler statistics: current number of runnable |
| 2865 | * threads, total number of context switches performed since bootup. |
| 2866 | */ |
| 2867 | unsigned long nr_running(void) |
| 2868 | { |
| 2869 | unsigned long i, sum = 0; |
| 2870 | |
| 2871 | for_each_online_cpu(i) |
| 2872 | sum += cpu_rq(i)->nr_running; |
| 2873 | |
| 2874 | return sum; |
| 2875 | } |
| 2876 | |
| 2877 | /* |
| 2878 | * Check if only the current task is running on the cpu. |
| 2879 | * |
| 2880 | * Caution: this function does not check that the caller has disabled |
| 2881 | * preemption, thus the result might have a time-of-check-to-time-of-use |
| 2882 | * race. The caller is responsible to use it correctly, for example: |
| 2883 | * |
| 2884 | * - from a non-preemptable section (of course) |
| 2885 | * |
| 2886 | * - from a thread that is bound to a single CPU |
| 2887 | * |
| 2888 | * - in a loop with very short iterations (e.g. a polling loop) |
| 2889 | */ |
| 2890 | bool single_task_running(void) |
| 2891 | { |
| 2892 | return raw_rq()->nr_running == 1; |
| 2893 | } |
| 2894 | EXPORT_SYMBOL(single_task_running); |
| 2895 | |
| 2896 | unsigned long long nr_context_switches(void) |
| 2897 | { |
| 2898 | int i; |
| 2899 | unsigned long long sum = 0; |
| 2900 | |
| 2901 | for_each_possible_cpu(i) |
| 2902 | sum += cpu_rq(i)->nr_switches; |
| 2903 | |
| 2904 | return sum; |
| 2905 | } |
| 2906 | |
| 2907 | unsigned long nr_iowait(void) |
| 2908 | { |
| 2909 | unsigned long i, sum = 0; |
| 2910 | |
| 2911 | for_each_possible_cpu(i) |
| 2912 | sum += atomic_read(&cpu_rq(i)->nr_iowait); |
| 2913 | |
| 2914 | return sum; |
| 2915 | } |
| 2916 | |
| 2917 | unsigned long nr_iowait_cpu(int cpu) |
| 2918 | { |
| 2919 | struct rq *this = cpu_rq(cpu); |
| 2920 | return atomic_read(&this->nr_iowait); |
| 2921 | } |
| 2922 | |
| 2923 | void get_iowait_load(unsigned long *nr_waiters, unsigned long *load) |
| 2924 | { |
| 2925 | struct rq *rq = this_rq(); |
| 2926 | *nr_waiters = atomic_read(&rq->nr_iowait); |
| 2927 | *load = rq->load.weight; |
| 2928 | } |
| 2929 | |
| 2930 | #ifdef CONFIG_SMP |
| 2931 | |
| 2932 | /* |
| 2933 | * sched_exec - execve() is a valuable balancing opportunity, because at |
| 2934 | * this point the task has the smallest effective memory and cache footprint. |
| 2935 | */ |
| 2936 | void sched_exec(void) |
| 2937 | { |
| 2938 | struct task_struct *p = current; |
| 2939 | unsigned long flags; |
| 2940 | int dest_cpu; |
| 2941 | |
| 2942 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
| 2943 | dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0); |
| 2944 | if (dest_cpu == smp_processor_id()) |
| 2945 | goto unlock; |
| 2946 | |
| 2947 | if (likely(cpu_active(dest_cpu))) { |
| 2948 | struct migration_arg arg = { p, dest_cpu }; |
| 2949 | |
| 2950 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
| 2951 | stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); |
| 2952 | return; |
| 2953 | } |
| 2954 | unlock: |
| 2955 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
| 2956 | } |
| 2957 | |
| 2958 | #endif |
| 2959 | |
| 2960 | DEFINE_PER_CPU(struct kernel_stat, kstat); |
| 2961 | DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); |
| 2962 | |
| 2963 | EXPORT_PER_CPU_SYMBOL(kstat); |
| 2964 | EXPORT_PER_CPU_SYMBOL(kernel_cpustat); |
| 2965 | |
| 2966 | /* |
| 2967 | * Return accounted runtime for the task. |
| 2968 | * In case the task is currently running, return the runtime plus current's |
| 2969 | * pending runtime that have not been accounted yet. |
| 2970 | */ |
| 2971 | unsigned long long task_sched_runtime(struct task_struct *p) |
| 2972 | { |
| 2973 | struct rq_flags rf; |
| 2974 | struct rq *rq; |
| 2975 | u64 ns; |
| 2976 | |
| 2977 | #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) |
| 2978 | /* |
| 2979 | * 64-bit doesn't need locks to atomically read a 64bit value. |
| 2980 | * So we have a optimization chance when the task's delta_exec is 0. |
| 2981 | * Reading ->on_cpu is racy, but this is ok. |
| 2982 | * |
| 2983 | * If we race with it leaving cpu, we'll take a lock. So we're correct. |
| 2984 | * If we race with it entering cpu, unaccounted time is 0. This is |
| 2985 | * indistinguishable from the read occurring a few cycles earlier. |
| 2986 | * If we see ->on_cpu without ->on_rq, the task is leaving, and has |
| 2987 | * been accounted, so we're correct here as well. |
| 2988 | */ |
| 2989 | if (!p->on_cpu || !task_on_rq_queued(p)) |
| 2990 | return p->se.sum_exec_runtime; |
| 2991 | #endif |
| 2992 | |
| 2993 | rq = task_rq_lock(p, &rf); |
| 2994 | /* |
| 2995 | * Must be ->curr _and_ ->on_rq. If dequeued, we would |
| 2996 | * project cycles that may never be accounted to this |
| 2997 | * thread, breaking clock_gettime(). |
| 2998 | */ |
| 2999 | if (task_current(rq, p) && task_on_rq_queued(p)) { |
| 3000 | update_rq_clock(rq); |
| 3001 | p->sched_class->update_curr(rq); |
| 3002 | } |
| 3003 | ns = p->se.sum_exec_runtime; |
| 3004 | task_rq_unlock(rq, p, &rf); |
| 3005 | |
| 3006 | return ns; |
| 3007 | } |
| 3008 | |
| 3009 | /* |
| 3010 | * This function gets called by the timer code, with HZ frequency. |
| 3011 | * We call it with interrupts disabled. |
| 3012 | */ |
| 3013 | void scheduler_tick(void) |
| 3014 | { |
| 3015 | int cpu = smp_processor_id(); |
| 3016 | struct rq *rq = cpu_rq(cpu); |
| 3017 | struct task_struct *curr = rq->curr; |
| 3018 | |
| 3019 | sched_clock_tick(); |
| 3020 | |
| 3021 | raw_spin_lock(&rq->lock); |
| 3022 | update_rq_clock(rq); |
| 3023 | curr->sched_class->task_tick(rq, curr, 0); |
| 3024 | cpu_load_update_active(rq); |
| 3025 | calc_global_load_tick(rq); |
| 3026 | raw_spin_unlock(&rq->lock); |
| 3027 | |
| 3028 | perf_event_task_tick(); |
| 3029 | |
| 3030 | #ifdef CONFIG_SMP |
| 3031 | rq->idle_balance = idle_cpu(cpu); |
| 3032 | trigger_load_balance(rq); |
| 3033 | #endif |
| 3034 | rq_last_tick_reset(rq); |
| 3035 | } |
| 3036 | |
| 3037 | #ifdef CONFIG_NO_HZ_FULL |
| 3038 | /** |
| 3039 | * scheduler_tick_max_deferment |
| 3040 | * |
| 3041 | * Keep at least one tick per second when a single |
| 3042 | * active task is running because the scheduler doesn't |
| 3043 | * yet completely support full dynticks environment. |
| 3044 | * |
| 3045 | * This makes sure that uptime, CFS vruntime, load |
| 3046 | * balancing, etc... continue to move forward, even |
| 3047 | * with a very low granularity. |
| 3048 | * |
| 3049 | * Return: Maximum deferment in nanoseconds. |
| 3050 | */ |
| 3051 | u64 scheduler_tick_max_deferment(void) |
| 3052 | { |
| 3053 | struct rq *rq = this_rq(); |
| 3054 | unsigned long next, now = READ_ONCE(jiffies); |
| 3055 | |
| 3056 | next = rq->last_sched_tick + HZ; |
| 3057 | |
| 3058 | if (time_before_eq(next, now)) |
| 3059 | return 0; |
| 3060 | |
| 3061 | return jiffies_to_nsecs(next - now); |
| 3062 | } |
| 3063 | #endif |
| 3064 | |
| 3065 | #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ |
| 3066 | defined(CONFIG_PREEMPT_TRACER)) |
| 3067 | /* |
| 3068 | * If the value passed in is equal to the current preempt count |
| 3069 | * then we just disabled preemption. Start timing the latency. |
| 3070 | */ |
| 3071 | static inline void preempt_latency_start(int val) |
| 3072 | { |
| 3073 | if (preempt_count() == val) { |
| 3074 | unsigned long ip = get_lock_parent_ip(); |
| 3075 | #ifdef CONFIG_DEBUG_PREEMPT |
| 3076 | current->preempt_disable_ip = ip; |
| 3077 | #endif |
| 3078 | trace_preempt_off(CALLER_ADDR0, ip); |
| 3079 | } |
| 3080 | } |
| 3081 | |
| 3082 | void preempt_count_add(int val) |
| 3083 | { |
| 3084 | #ifdef CONFIG_DEBUG_PREEMPT |
| 3085 | /* |
| 3086 | * Underflow? |
| 3087 | */ |
| 3088 | if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) |
| 3089 | return; |
| 3090 | #endif |
| 3091 | __preempt_count_add(val); |
| 3092 | #ifdef CONFIG_DEBUG_PREEMPT |
| 3093 | /* |
| 3094 | * Spinlock count overflowing soon? |
| 3095 | */ |
| 3096 | DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= |
| 3097 | PREEMPT_MASK - 10); |
| 3098 | #endif |
| 3099 | preempt_latency_start(val); |
| 3100 | } |
| 3101 | EXPORT_SYMBOL(preempt_count_add); |
| 3102 | NOKPROBE_SYMBOL(preempt_count_add); |
| 3103 | |
| 3104 | /* |
| 3105 | * If the value passed in equals to the current preempt count |
| 3106 | * then we just enabled preemption. Stop timing the latency. |
| 3107 | */ |
| 3108 | static inline void preempt_latency_stop(int val) |
| 3109 | { |
| 3110 | if (preempt_count() == val) |
| 3111 | trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); |
| 3112 | } |
| 3113 | |
| 3114 | void preempt_count_sub(int val) |
| 3115 | { |
| 3116 | #ifdef CONFIG_DEBUG_PREEMPT |
| 3117 | /* |
| 3118 | * Underflow? |
| 3119 | */ |
| 3120 | if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) |
| 3121 | return; |
| 3122 | /* |
| 3123 | * Is the spinlock portion underflowing? |
| 3124 | */ |
| 3125 | if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && |
| 3126 | !(preempt_count() & PREEMPT_MASK))) |
| 3127 | return; |
| 3128 | #endif |
| 3129 | |
| 3130 | preempt_latency_stop(val); |
| 3131 | __preempt_count_sub(val); |
| 3132 | } |
| 3133 | EXPORT_SYMBOL(preempt_count_sub); |
| 3134 | NOKPROBE_SYMBOL(preempt_count_sub); |
| 3135 | |
| 3136 | #else |
| 3137 | static inline void preempt_latency_start(int val) { } |
| 3138 | static inline void preempt_latency_stop(int val) { } |
| 3139 | #endif |
| 3140 | |
| 3141 | /* |
| 3142 | * Print scheduling while atomic bug: |
| 3143 | */ |
| 3144 | static noinline void __schedule_bug(struct task_struct *prev) |
| 3145 | { |
| 3146 | if (oops_in_progress) |
| 3147 | return; |
| 3148 | |
| 3149 | printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", |
| 3150 | prev->comm, prev->pid, preempt_count()); |
| 3151 | |
| 3152 | debug_show_held_locks(prev); |
| 3153 | print_modules(); |
| 3154 | if (irqs_disabled()) |
| 3155 | print_irqtrace_events(prev); |
| 3156 | #ifdef CONFIG_DEBUG_PREEMPT |
| 3157 | if (in_atomic_preempt_off()) { |
| 3158 | pr_err("Preemption disabled at:"); |
| 3159 | print_ip_sym(current->preempt_disable_ip); |
| 3160 | pr_cont("\n"); |
| 3161 | } |
| 3162 | #endif |
| 3163 | dump_stack(); |
| 3164 | add_taint(TAINT_WARN, LOCKDEP_STILL_OK); |
| 3165 | } |
| 3166 | |
| 3167 | /* |
| 3168 | * Various schedule()-time debugging checks and statistics: |
| 3169 | */ |
| 3170 | static inline void schedule_debug(struct task_struct *prev) |
| 3171 | { |
| 3172 | #ifdef CONFIG_SCHED_STACK_END_CHECK |
| 3173 | if (task_stack_end_corrupted(prev)) |
| 3174 | panic("corrupted stack end detected inside scheduler\n"); |
| 3175 | #endif |
| 3176 | |
| 3177 | if (unlikely(in_atomic_preempt_off())) { |
| 3178 | __schedule_bug(prev); |
| 3179 | preempt_count_set(PREEMPT_DISABLED); |
| 3180 | } |
| 3181 | rcu_sleep_check(); |
| 3182 | |
| 3183 | profile_hit(SCHED_PROFILING, __builtin_return_address(0)); |
| 3184 | |
| 3185 | schedstat_inc(this_rq(), sched_count); |
| 3186 | } |
| 3187 | |
| 3188 | /* |
| 3189 | * Pick up the highest-prio task: |
| 3190 | */ |
| 3191 | static inline struct task_struct * |
| 3192 | pick_next_task(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie) |
| 3193 | { |
| 3194 | const struct sched_class *class = &fair_sched_class; |
| 3195 | struct task_struct *p; |
| 3196 | |
| 3197 | /* |
| 3198 | * Optimization: we know that if all tasks are in |
| 3199 | * the fair class we can call that function directly: |
| 3200 | */ |
| 3201 | if (likely(prev->sched_class == class && |
| 3202 | rq->nr_running == rq->cfs.h_nr_running)) { |
| 3203 | p = fair_sched_class.pick_next_task(rq, prev, cookie); |
| 3204 | if (unlikely(p == RETRY_TASK)) |
| 3205 | goto again; |
| 3206 | |
| 3207 | /* assumes fair_sched_class->next == idle_sched_class */ |
| 3208 | if (unlikely(!p)) |
| 3209 | p = idle_sched_class.pick_next_task(rq, prev, cookie); |
| 3210 | |
| 3211 | return p; |
| 3212 | } |
| 3213 | |
| 3214 | again: |
| 3215 | for_each_class(class) { |
| 3216 | p = class->pick_next_task(rq, prev, cookie); |
| 3217 | if (p) { |
| 3218 | if (unlikely(p == RETRY_TASK)) |
| 3219 | goto again; |
| 3220 | return p; |
| 3221 | } |
| 3222 | } |
| 3223 | |
| 3224 | BUG(); /* the idle class will always have a runnable task */ |
| 3225 | } |
| 3226 | |
| 3227 | /* |
| 3228 | * __schedule() is the main scheduler function. |
| 3229 | * |
| 3230 | * The main means of driving the scheduler and thus entering this function are: |
| 3231 | * |
| 3232 | * 1. Explicit blocking: mutex, semaphore, waitqueue, etc. |
| 3233 | * |
| 3234 | * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return |
| 3235 | * paths. For example, see arch/x86/entry_64.S. |
| 3236 | * |
| 3237 | * To drive preemption between tasks, the scheduler sets the flag in timer |
| 3238 | * interrupt handler scheduler_tick(). |
| 3239 | * |
| 3240 | * 3. Wakeups don't really cause entry into schedule(). They add a |
| 3241 | * task to the run-queue and that's it. |
| 3242 | * |
| 3243 | * Now, if the new task added to the run-queue preempts the current |
| 3244 | * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets |
| 3245 | * called on the nearest possible occasion: |
| 3246 | * |
| 3247 | * - If the kernel is preemptible (CONFIG_PREEMPT=y): |
| 3248 | * |
| 3249 | * - in syscall or exception context, at the next outmost |
| 3250 | * preempt_enable(). (this might be as soon as the wake_up()'s |
| 3251 | * spin_unlock()!) |
| 3252 | * |
| 3253 | * - in IRQ context, return from interrupt-handler to |
| 3254 | * preemptible context |
| 3255 | * |
| 3256 | * - If the kernel is not preemptible (CONFIG_PREEMPT is not set) |
| 3257 | * then at the next: |
| 3258 | * |
| 3259 | * - cond_resched() call |
| 3260 | * - explicit schedule() call |
| 3261 | * - return from syscall or exception to user-space |
| 3262 | * - return from interrupt-handler to user-space |
| 3263 | * |
| 3264 | * WARNING: must be called with preemption disabled! |
| 3265 | */ |
| 3266 | static void __sched notrace __schedule(bool preempt) |
| 3267 | { |
| 3268 | struct task_struct *prev, *next; |
| 3269 | unsigned long *switch_count; |
| 3270 | struct pin_cookie cookie; |
| 3271 | struct rq *rq; |
| 3272 | int cpu; |
| 3273 | |
| 3274 | cpu = smp_processor_id(); |
| 3275 | rq = cpu_rq(cpu); |
| 3276 | prev = rq->curr; |
| 3277 | |
| 3278 | /* |
| 3279 | * do_exit() calls schedule() with preemption disabled as an exception; |
| 3280 | * however we must fix that up, otherwise the next task will see an |
| 3281 | * inconsistent (higher) preempt count. |
| 3282 | * |
| 3283 | * It also avoids the below schedule_debug() test from complaining |
| 3284 | * about this. |
| 3285 | */ |
| 3286 | if (unlikely(prev->state == TASK_DEAD)) |
| 3287 | preempt_enable_no_resched_notrace(); |
| 3288 | |
| 3289 | schedule_debug(prev); |
| 3290 | |
| 3291 | if (sched_feat(HRTICK)) |
| 3292 | hrtick_clear(rq); |
| 3293 | |
| 3294 | local_irq_disable(); |
| 3295 | rcu_note_context_switch(); |
| 3296 | |
| 3297 | /* |
| 3298 | * Make sure that signal_pending_state()->signal_pending() below |
| 3299 | * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) |
| 3300 | * done by the caller to avoid the race with signal_wake_up(). |
| 3301 | */ |
| 3302 | smp_mb__before_spinlock(); |
| 3303 | raw_spin_lock(&rq->lock); |
| 3304 | cookie = lockdep_pin_lock(&rq->lock); |
| 3305 | |
| 3306 | rq->clock_skip_update <<= 1; /* promote REQ to ACT */ |
| 3307 | |
| 3308 | switch_count = &prev->nivcsw; |
| 3309 | if (!preempt && prev->state) { |
| 3310 | if (unlikely(signal_pending_state(prev->state, prev))) { |
| 3311 | prev->state = TASK_RUNNING; |
| 3312 | } else { |
| 3313 | deactivate_task(rq, prev, DEQUEUE_SLEEP); |
| 3314 | prev->on_rq = 0; |
| 3315 | |
| 3316 | /* |
| 3317 | * If a worker went to sleep, notify and ask workqueue |
| 3318 | * whether it wants to wake up a task to maintain |
| 3319 | * concurrency. |
| 3320 | */ |
| 3321 | if (prev->flags & PF_WQ_WORKER) { |
| 3322 | struct task_struct *to_wakeup; |
| 3323 | |
| 3324 | to_wakeup = wq_worker_sleeping(prev); |
| 3325 | if (to_wakeup) |
| 3326 | try_to_wake_up_local(to_wakeup, cookie); |
| 3327 | } |
| 3328 | } |
| 3329 | switch_count = &prev->nvcsw; |
| 3330 | } |
| 3331 | |
| 3332 | if (task_on_rq_queued(prev)) |
| 3333 | update_rq_clock(rq); |
| 3334 | |
| 3335 | next = pick_next_task(rq, prev, cookie); |
| 3336 | clear_tsk_need_resched(prev); |
| 3337 | clear_preempt_need_resched(); |
| 3338 | rq->clock_skip_update = 0; |
| 3339 | |
| 3340 | if (likely(prev != next)) { |
| 3341 | rq->nr_switches++; |
| 3342 | rq->curr = next; |
| 3343 | ++*switch_count; |
| 3344 | |
| 3345 | trace_sched_switch(preempt, prev, next); |
| 3346 | rq = context_switch(rq, prev, next, cookie); /* unlocks the rq */ |
| 3347 | } else { |
| 3348 | lockdep_unpin_lock(&rq->lock, cookie); |
| 3349 | raw_spin_unlock_irq(&rq->lock); |
| 3350 | } |
| 3351 | |
| 3352 | balance_callback(rq); |
| 3353 | } |
| 3354 | STACK_FRAME_NON_STANDARD(__schedule); /* switch_to() */ |
| 3355 | |
| 3356 | static inline void sched_submit_work(struct task_struct *tsk) |
| 3357 | { |
| 3358 | if (!tsk->state || tsk_is_pi_blocked(tsk)) |
| 3359 | return; |
| 3360 | /* |
| 3361 | * If we are going to sleep and we have plugged IO queued, |
| 3362 | * make sure to submit it to avoid deadlocks. |
| 3363 | */ |
| 3364 | if (blk_needs_flush_plug(tsk)) |
| 3365 | blk_schedule_flush_plug(tsk); |
| 3366 | } |
| 3367 | |
| 3368 | asmlinkage __visible void __sched schedule(void) |
| 3369 | { |
| 3370 | struct task_struct *tsk = current; |
| 3371 | |
| 3372 | sched_submit_work(tsk); |
| 3373 | do { |
| 3374 | preempt_disable(); |
| 3375 | __schedule(false); |
| 3376 | sched_preempt_enable_no_resched(); |
| 3377 | } while (need_resched()); |
| 3378 | } |
| 3379 | EXPORT_SYMBOL(schedule); |
| 3380 | |
| 3381 | #ifdef CONFIG_CONTEXT_TRACKING |
| 3382 | asmlinkage __visible void __sched schedule_user(void) |
| 3383 | { |
| 3384 | /* |
| 3385 | * If we come here after a random call to set_need_resched(), |
| 3386 | * or we have been woken up remotely but the IPI has not yet arrived, |
| 3387 | * we haven't yet exited the RCU idle mode. Do it here manually until |
| 3388 | * we find a better solution. |
| 3389 | * |
| 3390 | * NB: There are buggy callers of this function. Ideally we |
| 3391 | * should warn if prev_state != CONTEXT_USER, but that will trigger |
| 3392 | * too frequently to make sense yet. |
| 3393 | */ |
| 3394 | enum ctx_state prev_state = exception_enter(); |
| 3395 | schedule(); |
| 3396 | exception_exit(prev_state); |
| 3397 | } |
| 3398 | #endif |
| 3399 | |
| 3400 | /** |
| 3401 | * schedule_preempt_disabled - called with preemption disabled |
| 3402 | * |
| 3403 | * Returns with preemption disabled. Note: preempt_count must be 1 |
| 3404 | */ |
| 3405 | void __sched schedule_preempt_disabled(void) |
| 3406 | { |
| 3407 | sched_preempt_enable_no_resched(); |
| 3408 | schedule(); |
| 3409 | preempt_disable(); |
| 3410 | } |
| 3411 | |
| 3412 | static void __sched notrace preempt_schedule_common(void) |
| 3413 | { |
| 3414 | do { |
| 3415 | /* |
| 3416 | * Because the function tracer can trace preempt_count_sub() |
| 3417 | * and it also uses preempt_enable/disable_notrace(), if |
| 3418 | * NEED_RESCHED is set, the preempt_enable_notrace() called |
| 3419 | * by the function tracer will call this function again and |
| 3420 | * cause infinite recursion. |
| 3421 | * |
| 3422 | * Preemption must be disabled here before the function |
| 3423 | * tracer can trace. Break up preempt_disable() into two |
| 3424 | * calls. One to disable preemption without fear of being |
| 3425 | * traced. The other to still record the preemption latency, |
| 3426 | * which can also be traced by the function tracer. |
| 3427 | */ |
| 3428 | preempt_disable_notrace(); |
| 3429 | preempt_latency_start(1); |
| 3430 | __schedule(true); |
| 3431 | preempt_latency_stop(1); |
| 3432 | preempt_enable_no_resched_notrace(); |
| 3433 | |
| 3434 | /* |
| 3435 | * Check again in case we missed a preemption opportunity |
| 3436 | * between schedule and now. |
| 3437 | */ |
| 3438 | } while (need_resched()); |
| 3439 | } |
| 3440 | |
| 3441 | #ifdef CONFIG_PREEMPT |
| 3442 | /* |
| 3443 | * this is the entry point to schedule() from in-kernel preemption |
| 3444 | * off of preempt_enable. Kernel preemptions off return from interrupt |
| 3445 | * occur there and call schedule directly. |
| 3446 | */ |
| 3447 | asmlinkage __visible void __sched notrace preempt_schedule(void) |
| 3448 | { |
| 3449 | /* |
| 3450 | * If there is a non-zero preempt_count or interrupts are disabled, |
| 3451 | * we do not want to preempt the current task. Just return.. |
| 3452 | */ |
| 3453 | if (likely(!preemptible())) |
| 3454 | return; |
| 3455 | |
| 3456 | preempt_schedule_common(); |
| 3457 | } |
| 3458 | NOKPROBE_SYMBOL(preempt_schedule); |
| 3459 | EXPORT_SYMBOL(preempt_schedule); |
| 3460 | |
| 3461 | /** |
| 3462 | * preempt_schedule_notrace - preempt_schedule called by tracing |
| 3463 | * |
| 3464 | * The tracing infrastructure uses preempt_enable_notrace to prevent |
| 3465 | * recursion and tracing preempt enabling caused by the tracing |
| 3466 | * infrastructure itself. But as tracing can happen in areas coming |
| 3467 | * from userspace or just about to enter userspace, a preempt enable |
| 3468 | * can occur before user_exit() is called. This will cause the scheduler |
| 3469 | * to be called when the system is still in usermode. |
| 3470 | * |
| 3471 | * To prevent this, the preempt_enable_notrace will use this function |
| 3472 | * instead of preempt_schedule() to exit user context if needed before |
| 3473 | * calling the scheduler. |
| 3474 | */ |
| 3475 | asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) |
| 3476 | { |
| 3477 | enum ctx_state prev_ctx; |
| 3478 | |
| 3479 | if (likely(!preemptible())) |
| 3480 | return; |
| 3481 | |
| 3482 | do { |
| 3483 | /* |
| 3484 | * Because the function tracer can trace preempt_count_sub() |
| 3485 | * and it also uses preempt_enable/disable_notrace(), if |
| 3486 | * NEED_RESCHED is set, the preempt_enable_notrace() called |
| 3487 | * by the function tracer will call this function again and |
| 3488 | * cause infinite recursion. |
| 3489 | * |
| 3490 | * Preemption must be disabled here before the function |
| 3491 | * tracer can trace. Break up preempt_disable() into two |
| 3492 | * calls. One to disable preemption without fear of being |
| 3493 | * traced. The other to still record the preemption latency, |
| 3494 | * which can also be traced by the function tracer. |
| 3495 | */ |
| 3496 | preempt_disable_notrace(); |
| 3497 | preempt_latency_start(1); |
| 3498 | /* |
| 3499 | * Needs preempt disabled in case user_exit() is traced |
| 3500 | * and the tracer calls preempt_enable_notrace() causing |
| 3501 | * an infinite recursion. |
| 3502 | */ |
| 3503 | prev_ctx = exception_enter(); |
| 3504 | __schedule(true); |
| 3505 | exception_exit(prev_ctx); |
| 3506 | |
| 3507 | preempt_latency_stop(1); |
| 3508 | preempt_enable_no_resched_notrace(); |
| 3509 | } while (need_resched()); |
| 3510 | } |
| 3511 | EXPORT_SYMBOL_GPL(preempt_schedule_notrace); |
| 3512 | |
| 3513 | #endif /* CONFIG_PREEMPT */ |
| 3514 | |
| 3515 | /* |
| 3516 | * this is the entry point to schedule() from kernel preemption |
| 3517 | * off of irq context. |
| 3518 | * Note, that this is called and return with irqs disabled. This will |
| 3519 | * protect us against recursive calling from irq. |
| 3520 | */ |
| 3521 | asmlinkage __visible void __sched preempt_schedule_irq(void) |
| 3522 | { |
| 3523 | enum ctx_state prev_state; |
| 3524 | |
| 3525 | /* Catch callers which need to be fixed */ |
| 3526 | BUG_ON(preempt_count() || !irqs_disabled()); |
| 3527 | |
| 3528 | prev_state = exception_enter(); |
| 3529 | |
| 3530 | do { |
| 3531 | preempt_disable(); |
| 3532 | local_irq_enable(); |
| 3533 | __schedule(true); |
| 3534 | local_irq_disable(); |
| 3535 | sched_preempt_enable_no_resched(); |
| 3536 | } while (need_resched()); |
| 3537 | |
| 3538 | exception_exit(prev_state); |
| 3539 | } |
| 3540 | |
| 3541 | int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, |
| 3542 | void *key) |
| 3543 | { |
| 3544 | return try_to_wake_up(curr->private, mode, wake_flags); |
| 3545 | } |
| 3546 | EXPORT_SYMBOL(default_wake_function); |
| 3547 | |
| 3548 | #ifdef CONFIG_RT_MUTEXES |
| 3549 | |
| 3550 | /* |
| 3551 | * rt_mutex_setprio - set the current priority of a task |
| 3552 | * @p: task |
| 3553 | * @prio: prio value (kernel-internal form) |
| 3554 | * |
| 3555 | * This function changes the 'effective' priority of a task. It does |
| 3556 | * not touch ->normal_prio like __setscheduler(). |
| 3557 | * |
| 3558 | * Used by the rt_mutex code to implement priority inheritance |
| 3559 | * logic. Call site only calls if the priority of the task changed. |
| 3560 | */ |
| 3561 | void rt_mutex_setprio(struct task_struct *p, int prio) |
| 3562 | { |
| 3563 | int oldprio, queued, running, queue_flag = DEQUEUE_SAVE | DEQUEUE_MOVE; |
| 3564 | const struct sched_class *prev_class; |
| 3565 | struct rq_flags rf; |
| 3566 | struct rq *rq; |
| 3567 | |
| 3568 | BUG_ON(prio > MAX_PRIO); |
| 3569 | |
| 3570 | rq = __task_rq_lock(p, &rf); |
| 3571 | |
| 3572 | /* |
| 3573 | * Idle task boosting is a nono in general. There is one |
| 3574 | * exception, when PREEMPT_RT and NOHZ is active: |
| 3575 | * |
| 3576 | * The idle task calls get_next_timer_interrupt() and holds |
| 3577 | * the timer wheel base->lock on the CPU and another CPU wants |
| 3578 | * to access the timer (probably to cancel it). We can safely |
| 3579 | * ignore the boosting request, as the idle CPU runs this code |
| 3580 | * with interrupts disabled and will complete the lock |
| 3581 | * protected section without being interrupted. So there is no |
| 3582 | * real need to boost. |
| 3583 | */ |
| 3584 | if (unlikely(p == rq->idle)) { |
| 3585 | WARN_ON(p != rq->curr); |
| 3586 | WARN_ON(p->pi_blocked_on); |
| 3587 | goto out_unlock; |
| 3588 | } |
| 3589 | |
| 3590 | trace_sched_pi_setprio(p, prio); |
| 3591 | oldprio = p->prio; |
| 3592 | |
| 3593 | if (oldprio == prio) |
| 3594 | queue_flag &= ~DEQUEUE_MOVE; |
| 3595 | |
| 3596 | prev_class = p->sched_class; |
| 3597 | queued = task_on_rq_queued(p); |
| 3598 | running = task_current(rq, p); |
| 3599 | if (queued) |
| 3600 | dequeue_task(rq, p, queue_flag); |
| 3601 | if (running) |
| 3602 | put_prev_task(rq, p); |
| 3603 | |
| 3604 | /* |
| 3605 | * Boosting condition are: |
| 3606 | * 1. -rt task is running and holds mutex A |
| 3607 | * --> -dl task blocks on mutex A |
| 3608 | * |
| 3609 | * 2. -dl task is running and holds mutex A |
| 3610 | * --> -dl task blocks on mutex A and could preempt the |
| 3611 | * running task |
| 3612 | */ |
| 3613 | if (dl_prio(prio)) { |
| 3614 | struct task_struct *pi_task = rt_mutex_get_top_task(p); |
| 3615 | if (!dl_prio(p->normal_prio) || |
| 3616 | (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) { |
| 3617 | p->dl.dl_boosted = 1; |
| 3618 | queue_flag |= ENQUEUE_REPLENISH; |
| 3619 | } else |
| 3620 | p->dl.dl_boosted = 0; |
| 3621 | p->sched_class = &dl_sched_class; |
| 3622 | } else if (rt_prio(prio)) { |
| 3623 | if (dl_prio(oldprio)) |
| 3624 | p->dl.dl_boosted = 0; |
| 3625 | if (oldprio < prio) |
| 3626 | queue_flag |= ENQUEUE_HEAD; |
| 3627 | p->sched_class = &rt_sched_class; |
| 3628 | } else { |
| 3629 | if (dl_prio(oldprio)) |
| 3630 | p->dl.dl_boosted = 0; |
| 3631 | if (rt_prio(oldprio)) |
| 3632 | p->rt.timeout = 0; |
| 3633 | p->sched_class = &fair_sched_class; |
| 3634 | } |
| 3635 | |
| 3636 | p->prio = prio; |
| 3637 | |
| 3638 | if (running) |
| 3639 | p->sched_class->set_curr_task(rq); |
| 3640 | if (queued) |
| 3641 | enqueue_task(rq, p, queue_flag); |
| 3642 | |
| 3643 | check_class_changed(rq, p, prev_class, oldprio); |
| 3644 | out_unlock: |
| 3645 | preempt_disable(); /* avoid rq from going away on us */ |
| 3646 | __task_rq_unlock(rq, &rf); |
| 3647 | |
| 3648 | balance_callback(rq); |
| 3649 | preempt_enable(); |
| 3650 | } |
| 3651 | #endif |
| 3652 | |
| 3653 | void set_user_nice(struct task_struct *p, long nice) |
| 3654 | { |
| 3655 | int old_prio, delta, queued; |
| 3656 | struct rq_flags rf; |
| 3657 | struct rq *rq; |
| 3658 | |
| 3659 | if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) |
| 3660 | return; |
| 3661 | /* |
| 3662 | * We have to be careful, if called from sys_setpriority(), |
| 3663 | * the task might be in the middle of scheduling on another CPU. |
| 3664 | */ |
| 3665 | rq = task_rq_lock(p, &rf); |
| 3666 | /* |
| 3667 | * The RT priorities are set via sched_setscheduler(), but we still |
| 3668 | * allow the 'normal' nice value to be set - but as expected |
| 3669 | * it wont have any effect on scheduling until the task is |
| 3670 | * SCHED_DEADLINE, SCHED_FIFO or SCHED_RR: |
| 3671 | */ |
| 3672 | if (task_has_dl_policy(p) || task_has_rt_policy(p)) { |
| 3673 | p->static_prio = NICE_TO_PRIO(nice); |
| 3674 | goto out_unlock; |
| 3675 | } |
| 3676 | queued = task_on_rq_queued(p); |
| 3677 | if (queued) |
| 3678 | dequeue_task(rq, p, DEQUEUE_SAVE); |
| 3679 | |
| 3680 | p->static_prio = NICE_TO_PRIO(nice); |
| 3681 | set_load_weight(p); |
| 3682 | old_prio = p->prio; |
| 3683 | p->prio = effective_prio(p); |
| 3684 | delta = p->prio - old_prio; |
| 3685 | |
| 3686 | if (queued) { |
| 3687 | enqueue_task(rq, p, ENQUEUE_RESTORE); |
| 3688 | /* |
| 3689 | * If the task increased its priority or is running and |
| 3690 | * lowered its priority, then reschedule its CPU: |
| 3691 | */ |
| 3692 | if (delta < 0 || (delta > 0 && task_running(rq, p))) |
| 3693 | resched_curr(rq); |
| 3694 | } |
| 3695 | out_unlock: |
| 3696 | task_rq_unlock(rq, p, &rf); |
| 3697 | } |
| 3698 | EXPORT_SYMBOL(set_user_nice); |
| 3699 | |
| 3700 | /* |
| 3701 | * can_nice - check if a task can reduce its nice value |
| 3702 | * @p: task |
| 3703 | * @nice: nice value |
| 3704 | */ |
| 3705 | int can_nice(const struct task_struct *p, const int nice) |
| 3706 | { |
| 3707 | /* convert nice value [19,-20] to rlimit style value [1,40] */ |
| 3708 | int nice_rlim = nice_to_rlimit(nice); |
| 3709 | |
| 3710 | return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || |
| 3711 | capable(CAP_SYS_NICE)); |
| 3712 | } |
| 3713 | |
| 3714 | #ifdef __ARCH_WANT_SYS_NICE |
| 3715 | |
| 3716 | /* |
| 3717 | * sys_nice - change the priority of the current process. |
| 3718 | * @increment: priority increment |
| 3719 | * |
| 3720 | * sys_setpriority is a more generic, but much slower function that |
| 3721 | * does similar things. |
| 3722 | */ |
| 3723 | SYSCALL_DEFINE1(nice, int, increment) |
| 3724 | { |
| 3725 | long nice, retval; |
| 3726 | |
| 3727 | /* |
| 3728 | * Setpriority might change our priority at the same moment. |
| 3729 | * We don't have to worry. Conceptually one call occurs first |
| 3730 | * and we have a single winner. |
| 3731 | */ |
| 3732 | increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH); |
| 3733 | nice = task_nice(current) + increment; |
| 3734 | |
| 3735 | nice = clamp_val(nice, MIN_NICE, MAX_NICE); |
| 3736 | if (increment < 0 && !can_nice(current, nice)) |
| 3737 | return -EPERM; |
| 3738 | |
| 3739 | retval = security_task_setnice(current, nice); |
| 3740 | if (retval) |
| 3741 | return retval; |
| 3742 | |
| 3743 | set_user_nice(current, nice); |
| 3744 | return 0; |
| 3745 | } |
| 3746 | |
| 3747 | #endif |
| 3748 | |
| 3749 | /** |
| 3750 | * task_prio - return the priority value of a given task. |
| 3751 | * @p: the task in question. |
| 3752 | * |
| 3753 | * Return: The priority value as seen by users in /proc. |
| 3754 | * RT tasks are offset by -200. Normal tasks are centered |
| 3755 | * around 0, value goes from -16 to +15. |
| 3756 | */ |
| 3757 | int task_prio(const struct task_struct *p) |
| 3758 | { |
| 3759 | return p->prio - MAX_RT_PRIO; |
| 3760 | } |
| 3761 | |
| 3762 | /** |
| 3763 | * idle_cpu - is a given cpu idle currently? |
| 3764 | * @cpu: the processor in question. |
| 3765 | * |
| 3766 | * Return: 1 if the CPU is currently idle. 0 otherwise. |
| 3767 | */ |
| 3768 | int idle_cpu(int cpu) |
| 3769 | { |
| 3770 | struct rq *rq = cpu_rq(cpu); |
| 3771 | |
| 3772 | if (rq->curr != rq->idle) |
| 3773 | return 0; |
| 3774 | |
| 3775 | if (rq->nr_running) |
| 3776 | return 0; |
| 3777 | |
| 3778 | #ifdef CONFIG_SMP |
| 3779 | if (!llist_empty(&rq->wake_list)) |
| 3780 | return 0; |
| 3781 | #endif |
| 3782 | |
| 3783 | return 1; |
| 3784 | } |
| 3785 | |
| 3786 | /** |
| 3787 | * idle_task - return the idle task for a given cpu. |
| 3788 | * @cpu: the processor in question. |
| 3789 | * |
| 3790 | * Return: The idle task for the cpu @cpu. |
| 3791 | */ |
| 3792 | struct task_struct *idle_task(int cpu) |
| 3793 | { |
| 3794 | return cpu_rq(cpu)->idle; |
| 3795 | } |
| 3796 | |
| 3797 | /** |
| 3798 | * find_process_by_pid - find a process with a matching PID value. |
| 3799 | * @pid: the pid in question. |
| 3800 | * |
| 3801 | * The task of @pid, if found. %NULL otherwise. |
| 3802 | */ |
| 3803 | static struct task_struct *find_process_by_pid(pid_t pid) |
| 3804 | { |
| 3805 | return pid ? find_task_by_vpid(pid) : current; |
| 3806 | } |
| 3807 | |
| 3808 | /* |
| 3809 | * This function initializes the sched_dl_entity of a newly becoming |
| 3810 | * SCHED_DEADLINE task. |
| 3811 | * |
| 3812 | * Only the static values are considered here, the actual runtime and the |
| 3813 | * absolute deadline will be properly calculated when the task is enqueued |
| 3814 | * for the first time with its new policy. |
| 3815 | */ |
| 3816 | static void |
| 3817 | __setparam_dl(struct task_struct *p, const struct sched_attr *attr) |
| 3818 | { |
| 3819 | struct sched_dl_entity *dl_se = &p->dl; |
| 3820 | |
| 3821 | dl_se->dl_runtime = attr->sched_runtime; |
| 3822 | dl_se->dl_deadline = attr->sched_deadline; |
| 3823 | dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline; |
| 3824 | dl_se->flags = attr->sched_flags; |
| 3825 | dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); |
| 3826 | |
| 3827 | /* |
| 3828 | * Changing the parameters of a task is 'tricky' and we're not doing |
| 3829 | * the correct thing -- also see task_dead_dl() and switched_from_dl(). |
| 3830 | * |
| 3831 | * What we SHOULD do is delay the bandwidth release until the 0-lag |
| 3832 | * point. This would include retaining the task_struct until that time |
| 3833 | * and change dl_overflow() to not immediately decrement the current |
| 3834 | * amount. |
| 3835 | * |
| 3836 | * Instead we retain the current runtime/deadline and let the new |
| 3837 | * parameters take effect after the current reservation period lapses. |
| 3838 | * This is safe (albeit pessimistic) because the 0-lag point is always |
| 3839 | * before the current scheduling deadline. |
| 3840 | * |
| 3841 | * We can still have temporary overloads because we do not delay the |
| 3842 | * change in bandwidth until that time; so admission control is |
| 3843 | * not on the safe side. It does however guarantee tasks will never |
| 3844 | * consume more than promised. |
| 3845 | */ |
| 3846 | } |
| 3847 | |
| 3848 | /* |
| 3849 | * sched_setparam() passes in -1 for its policy, to let the functions |
| 3850 | * it calls know not to change it. |
| 3851 | */ |
| 3852 | #define SETPARAM_POLICY -1 |
| 3853 | |
| 3854 | static void __setscheduler_params(struct task_struct *p, |
| 3855 | const struct sched_attr *attr) |
| 3856 | { |
| 3857 | int policy = attr->sched_policy; |
| 3858 | |
| 3859 | if (policy == SETPARAM_POLICY) |
| 3860 | policy = p->policy; |
| 3861 | |
| 3862 | p->policy = policy; |
| 3863 | |
| 3864 | if (dl_policy(policy)) |
| 3865 | __setparam_dl(p, attr); |
| 3866 | else if (fair_policy(policy)) |
| 3867 | p->static_prio = NICE_TO_PRIO(attr->sched_nice); |
| 3868 | |
| 3869 | /* |
| 3870 | * __sched_setscheduler() ensures attr->sched_priority == 0 when |
| 3871 | * !rt_policy. Always setting this ensures that things like |
| 3872 | * getparam()/getattr() don't report silly values for !rt tasks. |
| 3873 | */ |
| 3874 | p->rt_priority = attr->sched_priority; |
| 3875 | p->normal_prio = normal_prio(p); |
| 3876 | set_load_weight(p); |
| 3877 | } |
| 3878 | |
| 3879 | /* Actually do priority change: must hold pi & rq lock. */ |
| 3880 | static void __setscheduler(struct rq *rq, struct task_struct *p, |
| 3881 | const struct sched_attr *attr, bool keep_boost) |
| 3882 | { |
| 3883 | __setscheduler_params(p, attr); |
| 3884 | |
| 3885 | /* |
| 3886 | * Keep a potential priority boosting if called from |
| 3887 | * sched_setscheduler(). |
| 3888 | */ |
| 3889 | if (keep_boost) |
| 3890 | p->prio = rt_mutex_get_effective_prio(p, normal_prio(p)); |
| 3891 | else |
| 3892 | p->prio = normal_prio(p); |
| 3893 | |
| 3894 | if (dl_prio(p->prio)) |
| 3895 | p->sched_class = &dl_sched_class; |
| 3896 | else if (rt_prio(p->prio)) |
| 3897 | p->sched_class = &rt_sched_class; |
| 3898 | else |
| 3899 | p->sched_class = &fair_sched_class; |
| 3900 | } |
| 3901 | |
| 3902 | static void |
| 3903 | __getparam_dl(struct task_struct *p, struct sched_attr *attr) |
| 3904 | { |
| 3905 | struct sched_dl_entity *dl_se = &p->dl; |
| 3906 | |
| 3907 | attr->sched_priority = p->rt_priority; |
| 3908 | attr->sched_runtime = dl_se->dl_runtime; |
| 3909 | attr->sched_deadline = dl_se->dl_deadline; |
| 3910 | attr->sched_period = dl_se->dl_period; |
| 3911 | attr->sched_flags = dl_se->flags; |
| 3912 | } |
| 3913 | |
| 3914 | /* |
| 3915 | * This function validates the new parameters of a -deadline task. |
| 3916 | * We ask for the deadline not being zero, and greater or equal |
| 3917 | * than the runtime, as well as the period of being zero or |
| 3918 | * greater than deadline. Furthermore, we have to be sure that |
| 3919 | * user parameters are above the internal resolution of 1us (we |
| 3920 | * check sched_runtime only since it is always the smaller one) and |
| 3921 | * below 2^63 ns (we have to check both sched_deadline and |
| 3922 | * sched_period, as the latter can be zero). |
| 3923 | */ |
| 3924 | static bool |
| 3925 | __checkparam_dl(const struct sched_attr *attr) |
| 3926 | { |
| 3927 | /* deadline != 0 */ |
| 3928 | if (attr->sched_deadline == 0) |
| 3929 | return false; |
| 3930 | |
| 3931 | /* |
| 3932 | * Since we truncate DL_SCALE bits, make sure we're at least |
| 3933 | * that big. |
| 3934 | */ |
| 3935 | if (attr->sched_runtime < (1ULL << DL_SCALE)) |
| 3936 | return false; |
| 3937 | |
| 3938 | /* |
| 3939 | * Since we use the MSB for wrap-around and sign issues, make |
| 3940 | * sure it's not set (mind that period can be equal to zero). |
| 3941 | */ |
| 3942 | if (attr->sched_deadline & (1ULL << 63) || |
| 3943 | attr->sched_period & (1ULL << 63)) |
| 3944 | return false; |
| 3945 | |
| 3946 | /* runtime <= deadline <= period (if period != 0) */ |
| 3947 | if ((attr->sched_period != 0 && |
| 3948 | attr->sched_period < attr->sched_deadline) || |
| 3949 | attr->sched_deadline < attr->sched_runtime) |
| 3950 | return false; |
| 3951 | |
| 3952 | return true; |
| 3953 | } |
| 3954 | |
| 3955 | /* |
| 3956 | * check the target process has a UID that matches the current process's |
| 3957 | */ |
| 3958 | static bool check_same_owner(struct task_struct *p) |
| 3959 | { |
| 3960 | const struct cred *cred = current_cred(), *pcred; |
| 3961 | bool match; |
| 3962 | |
| 3963 | rcu_read_lock(); |
| 3964 | pcred = __task_cred(p); |
| 3965 | match = (uid_eq(cred->euid, pcred->euid) || |
| 3966 | uid_eq(cred->euid, pcred->uid)); |
| 3967 | rcu_read_unlock(); |
| 3968 | return match; |
| 3969 | } |
| 3970 | |
| 3971 | static bool dl_param_changed(struct task_struct *p, |
| 3972 | const struct sched_attr *attr) |
| 3973 | { |
| 3974 | struct sched_dl_entity *dl_se = &p->dl; |
| 3975 | |
| 3976 | if (dl_se->dl_runtime != attr->sched_runtime || |
| 3977 | dl_se->dl_deadline != attr->sched_deadline || |
| 3978 | dl_se->dl_period != attr->sched_period || |
| 3979 | dl_se->flags != attr->sched_flags) |
| 3980 | return true; |
| 3981 | |
| 3982 | return false; |
| 3983 | } |
| 3984 | |
| 3985 | static int __sched_setscheduler(struct task_struct *p, |
| 3986 | const struct sched_attr *attr, |
| 3987 | bool user, bool pi) |
| 3988 | { |
| 3989 | int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 : |
| 3990 | MAX_RT_PRIO - 1 - attr->sched_priority; |
| 3991 | int retval, oldprio, oldpolicy = -1, queued, running; |
| 3992 | int new_effective_prio, policy = attr->sched_policy; |
| 3993 | const struct sched_class *prev_class; |
| 3994 | struct rq_flags rf; |
| 3995 | int reset_on_fork; |
| 3996 | int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE; |
| 3997 | struct rq *rq; |
| 3998 | |
| 3999 | /* may grab non-irq protected spin_locks */ |
| 4000 | BUG_ON(in_interrupt()); |
| 4001 | recheck: |
| 4002 | /* double check policy once rq lock held */ |
| 4003 | if (policy < 0) { |
| 4004 | reset_on_fork = p->sched_reset_on_fork; |
| 4005 | policy = oldpolicy = p->policy; |
| 4006 | } else { |
| 4007 | reset_on_fork = !!(attr->sched_flags & SCHED_FLAG_RESET_ON_FORK); |
| 4008 | |
| 4009 | if (!valid_policy(policy)) |
| 4010 | return -EINVAL; |
| 4011 | } |
| 4012 | |
| 4013 | if (attr->sched_flags & ~(SCHED_FLAG_RESET_ON_FORK)) |
| 4014 | return -EINVAL; |
| 4015 | |
| 4016 | /* |
| 4017 | * Valid priorities for SCHED_FIFO and SCHED_RR are |
| 4018 | * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL, |
| 4019 | * SCHED_BATCH and SCHED_IDLE is 0. |
| 4020 | */ |
| 4021 | if ((p->mm && attr->sched_priority > MAX_USER_RT_PRIO-1) || |
| 4022 | (!p->mm && attr->sched_priority > MAX_RT_PRIO-1)) |
| 4023 | return -EINVAL; |
| 4024 | if ((dl_policy(policy) && !__checkparam_dl(attr)) || |
| 4025 | (rt_policy(policy) != (attr->sched_priority != 0))) |
| 4026 | return -EINVAL; |
| 4027 | |
| 4028 | /* |
| 4029 | * Allow unprivileged RT tasks to decrease priority: |
| 4030 | */ |
| 4031 | if (user && !capable(CAP_SYS_NICE)) { |
| 4032 | if (fair_policy(policy)) { |
| 4033 | if (attr->sched_nice < task_nice(p) && |
| 4034 | !can_nice(p, attr->sched_nice)) |
| 4035 | return -EPERM; |
| 4036 | } |
| 4037 | |
| 4038 | if (rt_policy(policy)) { |
| 4039 | unsigned long rlim_rtprio = |
| 4040 | task_rlimit(p, RLIMIT_RTPRIO); |
| 4041 | |
| 4042 | /* can't set/change the rt policy */ |
| 4043 | if (policy != p->policy && !rlim_rtprio) |
| 4044 | return -EPERM; |
| 4045 | |
| 4046 | /* can't increase priority */ |
| 4047 | if (attr->sched_priority > p->rt_priority && |
| 4048 | attr->sched_priority > rlim_rtprio) |
| 4049 | return -EPERM; |
| 4050 | } |
| 4051 | |
| 4052 | /* |
| 4053 | * Can't set/change SCHED_DEADLINE policy at all for now |
| 4054 | * (safest behavior); in the future we would like to allow |
| 4055 | * unprivileged DL tasks to increase their relative deadline |
| 4056 | * or reduce their runtime (both ways reducing utilization) |
| 4057 | */ |
| 4058 | if (dl_policy(policy)) |
| 4059 | return -EPERM; |
| 4060 | |
| 4061 | /* |
| 4062 | * Treat SCHED_IDLE as nice 20. Only allow a switch to |
| 4063 | * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. |
| 4064 | */ |
| 4065 | if (idle_policy(p->policy) && !idle_policy(policy)) { |
| 4066 | if (!can_nice(p, task_nice(p))) |
| 4067 | return -EPERM; |
| 4068 | } |
| 4069 | |
| 4070 | /* can't change other user's priorities */ |
| 4071 | if (!check_same_owner(p)) |
| 4072 | return -EPERM; |
| 4073 | |
| 4074 | /* Normal users shall not reset the sched_reset_on_fork flag */ |
| 4075 | if (p->sched_reset_on_fork && !reset_on_fork) |
| 4076 | return -EPERM; |
| 4077 | } |
| 4078 | |
| 4079 | if (user) { |
| 4080 | retval = security_task_setscheduler(p); |
| 4081 | if (retval) |
| 4082 | return retval; |
| 4083 | } |
| 4084 | |
| 4085 | /* |
| 4086 | * make sure no PI-waiters arrive (or leave) while we are |
| 4087 | * changing the priority of the task: |
| 4088 | * |
| 4089 | * To be able to change p->policy safely, the appropriate |
| 4090 | * runqueue lock must be held. |
| 4091 | */ |
| 4092 | rq = task_rq_lock(p, &rf); |
| 4093 | |
| 4094 | /* |
| 4095 | * Changing the policy of the stop threads its a very bad idea |
| 4096 | */ |
| 4097 | if (p == rq->stop) { |
| 4098 | task_rq_unlock(rq, p, &rf); |
| 4099 | return -EINVAL; |
| 4100 | } |
| 4101 | |
| 4102 | /* |
| 4103 | * If not changing anything there's no need to proceed further, |
| 4104 | * but store a possible modification of reset_on_fork. |
| 4105 | */ |
| 4106 | if (unlikely(policy == p->policy)) { |
| 4107 | if (fair_policy(policy) && attr->sched_nice != task_nice(p)) |
| 4108 | goto change; |
| 4109 | if (rt_policy(policy) && attr->sched_priority != p->rt_priority) |
| 4110 | goto change; |
| 4111 | if (dl_policy(policy) && dl_param_changed(p, attr)) |
| 4112 | goto change; |
| 4113 | |
| 4114 | p->sched_reset_on_fork = reset_on_fork; |
| 4115 | task_rq_unlock(rq, p, &rf); |
| 4116 | return 0; |
| 4117 | } |
| 4118 | change: |
| 4119 | |
| 4120 | if (user) { |
| 4121 | #ifdef CONFIG_RT_GROUP_SCHED |
| 4122 | /* |
| 4123 | * Do not allow realtime tasks into groups that have no runtime |
| 4124 | * assigned. |
| 4125 | */ |
| 4126 | if (rt_bandwidth_enabled() && rt_policy(policy) && |
| 4127 | task_group(p)->rt_bandwidth.rt_runtime == 0 && |
| 4128 | !task_group_is_autogroup(task_group(p))) { |
| 4129 | task_rq_unlock(rq, p, &rf); |
| 4130 | return -EPERM; |
| 4131 | } |
| 4132 | #endif |
| 4133 | #ifdef CONFIG_SMP |
| 4134 | if (dl_bandwidth_enabled() && dl_policy(policy)) { |
| 4135 | cpumask_t *span = rq->rd->span; |
| 4136 | |
| 4137 | /* |
| 4138 | * Don't allow tasks with an affinity mask smaller than |
| 4139 | * the entire root_domain to become SCHED_DEADLINE. We |
| 4140 | * will also fail if there's no bandwidth available. |
| 4141 | */ |
| 4142 | if (!cpumask_subset(span, &p->cpus_allowed) || |
| 4143 | rq->rd->dl_bw.bw == 0) { |
| 4144 | task_rq_unlock(rq, p, &rf); |
| 4145 | return -EPERM; |
| 4146 | } |
| 4147 | } |
| 4148 | #endif |
| 4149 | } |
| 4150 | |
| 4151 | /* recheck policy now with rq lock held */ |
| 4152 | if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { |
| 4153 | policy = oldpolicy = -1; |
| 4154 | task_rq_unlock(rq, p, &rf); |
| 4155 | goto recheck; |
| 4156 | } |
| 4157 | |
| 4158 | /* |
| 4159 | * If setscheduling to SCHED_DEADLINE (or changing the parameters |
| 4160 | * of a SCHED_DEADLINE task) we need to check if enough bandwidth |
| 4161 | * is available. |
| 4162 | */ |
| 4163 | if ((dl_policy(policy) || dl_task(p)) && dl_overflow(p, policy, attr)) { |
| 4164 | task_rq_unlock(rq, p, &rf); |
| 4165 | return -EBUSY; |
| 4166 | } |
| 4167 | |
| 4168 | p->sched_reset_on_fork = reset_on_fork; |
| 4169 | oldprio = p->prio; |
| 4170 | |
| 4171 | if (pi) { |
| 4172 | /* |
| 4173 | * Take priority boosted tasks into account. If the new |
| 4174 | * effective priority is unchanged, we just store the new |
| 4175 | * normal parameters and do not touch the scheduler class and |
| 4176 | * the runqueue. This will be done when the task deboost |
| 4177 | * itself. |
| 4178 | */ |
| 4179 | new_effective_prio = rt_mutex_get_effective_prio(p, newprio); |
| 4180 | if (new_effective_prio == oldprio) |
| 4181 | queue_flags &= ~DEQUEUE_MOVE; |
| 4182 | } |
| 4183 | |
| 4184 | queued = task_on_rq_queued(p); |
| 4185 | running = task_current(rq, p); |
| 4186 | if (queued) |
| 4187 | dequeue_task(rq, p, queue_flags); |
| 4188 | if (running) |
| 4189 | put_prev_task(rq, p); |
| 4190 | |
| 4191 | prev_class = p->sched_class; |
| 4192 | __setscheduler(rq, p, attr, pi); |
| 4193 | |
| 4194 | if (running) |
| 4195 | p->sched_class->set_curr_task(rq); |
| 4196 | if (queued) { |
| 4197 | /* |
| 4198 | * We enqueue to tail when the priority of a task is |
| 4199 | * increased (user space view). |
| 4200 | */ |
| 4201 | if (oldprio < p->prio) |
| 4202 | queue_flags |= ENQUEUE_HEAD; |
| 4203 | |
| 4204 | enqueue_task(rq, p, queue_flags); |
| 4205 | } |
| 4206 | |
| 4207 | check_class_changed(rq, p, prev_class, oldprio); |
| 4208 | preempt_disable(); /* avoid rq from going away on us */ |
| 4209 | task_rq_unlock(rq, p, &rf); |
| 4210 | |
| 4211 | if (pi) |
| 4212 | rt_mutex_adjust_pi(p); |
| 4213 | |
| 4214 | /* |
| 4215 | * Run balance callbacks after we've adjusted the PI chain. |
| 4216 | */ |
| 4217 | balance_callback(rq); |
| 4218 | preempt_enable(); |
| 4219 | |
| 4220 | return 0; |
| 4221 | } |
| 4222 | |
| 4223 | static int _sched_setscheduler(struct task_struct *p, int policy, |
| 4224 | const struct sched_param *param, bool check) |
| 4225 | { |
| 4226 | struct sched_attr attr = { |
| 4227 | .sched_policy = policy, |
| 4228 | .sched_priority = param->sched_priority, |
| 4229 | .sched_nice = PRIO_TO_NICE(p->static_prio), |
| 4230 | }; |
| 4231 | |
| 4232 | /* Fixup the legacy SCHED_RESET_ON_FORK hack. */ |
| 4233 | if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) { |
| 4234 | attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; |
| 4235 | policy &= ~SCHED_RESET_ON_FORK; |
| 4236 | attr.sched_policy = policy; |
| 4237 | } |
| 4238 | |
| 4239 | return __sched_setscheduler(p, &attr, check, true); |
| 4240 | } |
| 4241 | /** |
| 4242 | * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. |
| 4243 | * @p: the task in question. |
| 4244 | * @policy: new policy. |
| 4245 | * @param: structure containing the new RT priority. |
| 4246 | * |
| 4247 | * Return: 0 on success. An error code otherwise. |
| 4248 | * |
| 4249 | * NOTE that the task may be already dead. |
| 4250 | */ |
| 4251 | int sched_setscheduler(struct task_struct *p, int policy, |
| 4252 | const struct sched_param *param) |
| 4253 | { |
| 4254 | return _sched_setscheduler(p, policy, param, true); |
| 4255 | } |
| 4256 | EXPORT_SYMBOL_GPL(sched_setscheduler); |
| 4257 | |
| 4258 | int sched_setattr(struct task_struct *p, const struct sched_attr *attr) |
| 4259 | { |
| 4260 | return __sched_setscheduler(p, attr, true, true); |
| 4261 | } |
| 4262 | EXPORT_SYMBOL_GPL(sched_setattr); |
| 4263 | |
| 4264 | /** |
| 4265 | * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. |
| 4266 | * @p: the task in question. |
| 4267 | * @policy: new policy. |
| 4268 | * @param: structure containing the new RT priority. |
| 4269 | * |
| 4270 | * Just like sched_setscheduler, only don't bother checking if the |
| 4271 | * current context has permission. For example, this is needed in |
| 4272 | * stop_machine(): we create temporary high priority worker threads, |
| 4273 | * but our caller might not have that capability. |
| 4274 | * |
| 4275 | * Return: 0 on success. An error code otherwise. |
| 4276 | */ |
| 4277 | int sched_setscheduler_nocheck(struct task_struct *p, int policy, |
| 4278 | const struct sched_param *param) |
| 4279 | { |
| 4280 | return _sched_setscheduler(p, policy, param, false); |
| 4281 | } |
| 4282 | EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck); |
| 4283 | |
| 4284 | static int |
| 4285 | do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) |
| 4286 | { |
| 4287 | struct sched_param lparam; |
| 4288 | struct task_struct *p; |
| 4289 | int retval; |
| 4290 | |
| 4291 | if (!param || pid < 0) |
| 4292 | return -EINVAL; |
| 4293 | if (copy_from_user(&lparam, param, sizeof(struct sched_param))) |
| 4294 | return -EFAULT; |
| 4295 | |
| 4296 | rcu_read_lock(); |
| 4297 | retval = -ESRCH; |
| 4298 | p = find_process_by_pid(pid); |
| 4299 | if (p != NULL) |
| 4300 | retval = sched_setscheduler(p, policy, &lparam); |
| 4301 | rcu_read_unlock(); |
| 4302 | |
| 4303 | return retval; |
| 4304 | } |
| 4305 | |
| 4306 | /* |
| 4307 | * Mimics kernel/events/core.c perf_copy_attr(). |
| 4308 | */ |
| 4309 | static int sched_copy_attr(struct sched_attr __user *uattr, |
| 4310 | struct sched_attr *attr) |
| 4311 | { |
| 4312 | u32 size; |
| 4313 | int ret; |
| 4314 | |
| 4315 | if (!access_ok(VERIFY_WRITE, uattr, SCHED_ATTR_SIZE_VER0)) |
| 4316 | return -EFAULT; |
| 4317 | |
| 4318 | /* |
| 4319 | * zero the full structure, so that a short copy will be nice. |
| 4320 | */ |
| 4321 | memset(attr, 0, sizeof(*attr)); |
| 4322 | |
| 4323 | ret = get_user(size, &uattr->size); |
| 4324 | if (ret) |
| 4325 | return ret; |
| 4326 | |
| 4327 | if (size > PAGE_SIZE) /* silly large */ |
| 4328 | goto err_size; |
| 4329 | |
| 4330 | if (!size) /* abi compat */ |
| 4331 | size = SCHED_ATTR_SIZE_VER0; |
| 4332 | |
| 4333 | if (size < SCHED_ATTR_SIZE_VER0) |
| 4334 | goto err_size; |
| 4335 | |
| 4336 | /* |
| 4337 | * If we're handed a bigger struct than we know of, |
| 4338 | * ensure all the unknown bits are 0 - i.e. new |
| 4339 | * user-space does not rely on any kernel feature |
| 4340 | * extensions we dont know about yet. |
| 4341 | */ |
| 4342 | if (size > sizeof(*attr)) { |
| 4343 | unsigned char __user *addr; |
| 4344 | unsigned char __user *end; |
| 4345 | unsigned char val; |
| 4346 | |
| 4347 | addr = (void __user *)uattr + sizeof(*attr); |
| 4348 | end = (void __user *)uattr + size; |
| 4349 | |
| 4350 | for (; addr < end; addr++) { |
| 4351 | ret = get_user(val, addr); |
| 4352 | if (ret) |
| 4353 | return ret; |
| 4354 | if (val) |
| 4355 | goto err_size; |
| 4356 | } |
| 4357 | size = sizeof(*attr); |
| 4358 | } |
| 4359 | |
| 4360 | ret = copy_from_user(attr, uattr, size); |
| 4361 | if (ret) |
| 4362 | return -EFAULT; |
| 4363 | |
| 4364 | /* |
| 4365 | * XXX: do we want to be lenient like existing syscalls; or do we want |
| 4366 | * to be strict and return an error on out-of-bounds values? |
| 4367 | */ |
| 4368 | attr->sched_nice = clamp(attr->sched_nice, MIN_NICE, MAX_NICE); |
| 4369 | |
| 4370 | return 0; |
| 4371 | |
| 4372 | err_size: |
| 4373 | put_user(sizeof(*attr), &uattr->size); |
| 4374 | return -E2BIG; |
| 4375 | } |
| 4376 | |
| 4377 | /** |
| 4378 | * sys_sched_setscheduler - set/change the scheduler policy and RT priority |
| 4379 | * @pid: the pid in question. |
| 4380 | * @policy: new policy. |
| 4381 | * @param: structure containing the new RT priority. |
| 4382 | * |
| 4383 | * Return: 0 on success. An error code otherwise. |
| 4384 | */ |
| 4385 | SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, |
| 4386 | struct sched_param __user *, param) |
| 4387 | { |
| 4388 | /* negative values for policy are not valid */ |
| 4389 | if (policy < 0) |
| 4390 | return -EINVAL; |
| 4391 | |
| 4392 | return do_sched_setscheduler(pid, policy, param); |
| 4393 | } |
| 4394 | |
| 4395 | /** |
| 4396 | * sys_sched_setparam - set/change the RT priority of a thread |
| 4397 | * @pid: the pid in question. |
| 4398 | * @param: structure containing the new RT priority. |
| 4399 | * |
| 4400 | * Return: 0 on success. An error code otherwise. |
| 4401 | */ |
| 4402 | SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) |
| 4403 | { |
| 4404 | return do_sched_setscheduler(pid, SETPARAM_POLICY, param); |
| 4405 | } |
| 4406 | |
| 4407 | /** |
| 4408 | * sys_sched_setattr - same as above, but with extended sched_attr |
| 4409 | * @pid: the pid in question. |
| 4410 | * @uattr: structure containing the extended parameters. |
| 4411 | * @flags: for future extension. |
| 4412 | */ |
| 4413 | SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, |
| 4414 | unsigned int, flags) |
| 4415 | { |
| 4416 | struct sched_attr attr; |
| 4417 | struct task_struct *p; |
| 4418 | int retval; |
| 4419 | |
| 4420 | if (!uattr || pid < 0 || flags) |
| 4421 | return -EINVAL; |
| 4422 | |
| 4423 | retval = sched_copy_attr(uattr, &attr); |
| 4424 | if (retval) |
| 4425 | return retval; |
| 4426 | |
| 4427 | if ((int)attr.sched_policy < 0) |
| 4428 | return -EINVAL; |
| 4429 | |
| 4430 | rcu_read_lock(); |
| 4431 | retval = -ESRCH; |
| 4432 | p = find_process_by_pid(pid); |
| 4433 | if (p != NULL) |
| 4434 | retval = sched_setattr(p, &attr); |
| 4435 | rcu_read_unlock(); |
| 4436 | |
| 4437 | return retval; |
| 4438 | } |
| 4439 | |
| 4440 | /** |
| 4441 | * sys_sched_getscheduler - get the policy (scheduling class) of a thread |
| 4442 | * @pid: the pid in question. |
| 4443 | * |
| 4444 | * Return: On success, the policy of the thread. Otherwise, a negative error |
| 4445 | * code. |
| 4446 | */ |
| 4447 | SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) |
| 4448 | { |
| 4449 | struct task_struct *p; |
| 4450 | int retval; |
| 4451 | |
| 4452 | if (pid < 0) |
| 4453 | return -EINVAL; |
| 4454 | |
| 4455 | retval = -ESRCH; |
| 4456 | rcu_read_lock(); |
| 4457 | p = find_process_by_pid(pid); |
| 4458 | if (p) { |
| 4459 | retval = security_task_getscheduler(p); |
| 4460 | if (!retval) |
| 4461 | retval = p->policy |
| 4462 | | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0); |
| 4463 | } |
| 4464 | rcu_read_unlock(); |
| 4465 | return retval; |
| 4466 | } |
| 4467 | |
| 4468 | /** |
| 4469 | * sys_sched_getparam - get the RT priority of a thread |
| 4470 | * @pid: the pid in question. |
| 4471 | * @param: structure containing the RT priority. |
| 4472 | * |
| 4473 | * Return: On success, 0 and the RT priority is in @param. Otherwise, an error |
| 4474 | * code. |
| 4475 | */ |
| 4476 | SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) |
| 4477 | { |
| 4478 | struct sched_param lp = { .sched_priority = 0 }; |
| 4479 | struct task_struct *p; |
| 4480 | int retval; |
| 4481 | |
| 4482 | if (!param || pid < 0) |
| 4483 | return -EINVAL; |
| 4484 | |
| 4485 | rcu_read_lock(); |
| 4486 | p = find_process_by_pid(pid); |
| 4487 | retval = -ESRCH; |
| 4488 | if (!p) |
| 4489 | goto out_unlock; |
| 4490 | |
| 4491 | retval = security_task_getscheduler(p); |
| 4492 | if (retval) |
| 4493 | goto out_unlock; |
| 4494 | |
| 4495 | if (task_has_rt_policy(p)) |
| 4496 | lp.sched_priority = p->rt_priority; |
| 4497 | rcu_read_unlock(); |
| 4498 | |
| 4499 | /* |
| 4500 | * This one might sleep, we cannot do it with a spinlock held ... |
| 4501 | */ |
| 4502 | retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; |
| 4503 | |
| 4504 | return retval; |
| 4505 | |
| 4506 | out_unlock: |
| 4507 | rcu_read_unlock(); |
| 4508 | return retval; |
| 4509 | } |
| 4510 | |
| 4511 | static int sched_read_attr(struct sched_attr __user *uattr, |
| 4512 | struct sched_attr *attr, |
| 4513 | unsigned int usize) |
| 4514 | { |
| 4515 | int ret; |
| 4516 | |
| 4517 | if (!access_ok(VERIFY_WRITE, uattr, usize)) |
| 4518 | return -EFAULT; |
| 4519 | |
| 4520 | /* |
| 4521 | * If we're handed a smaller struct than we know of, |
| 4522 | * ensure all the unknown bits are 0 - i.e. old |
| 4523 | * user-space does not get uncomplete information. |
| 4524 | */ |
| 4525 | if (usize < sizeof(*attr)) { |
| 4526 | unsigned char *addr; |
| 4527 | unsigned char *end; |
| 4528 | |
| 4529 | addr = (void *)attr + usize; |
| 4530 | end = (void *)attr + sizeof(*attr); |
| 4531 | |
| 4532 | for (; addr < end; addr++) { |
| 4533 | if (*addr) |
| 4534 | return -EFBIG; |
| 4535 | } |
| 4536 | |
| 4537 | attr->size = usize; |
| 4538 | } |
| 4539 | |
| 4540 | ret = copy_to_user(uattr, attr, attr->size); |
| 4541 | if (ret) |
| 4542 | return -EFAULT; |
| 4543 | |
| 4544 | return 0; |
| 4545 | } |
| 4546 | |
| 4547 | /** |
| 4548 | * sys_sched_getattr - similar to sched_getparam, but with sched_attr |
| 4549 | * @pid: the pid in question. |
| 4550 | * @uattr: structure containing the extended parameters. |
| 4551 | * @size: sizeof(attr) for fwd/bwd comp. |
| 4552 | * @flags: for future extension. |
| 4553 | */ |
| 4554 | SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, |
| 4555 | unsigned int, size, unsigned int, flags) |
| 4556 | { |
| 4557 | struct sched_attr attr = { |
| 4558 | .size = sizeof(struct sched_attr), |
| 4559 | }; |
| 4560 | struct task_struct *p; |
| 4561 | int retval; |
| 4562 | |
| 4563 | if (!uattr || pid < 0 || size > PAGE_SIZE || |
| 4564 | size < SCHED_ATTR_SIZE_VER0 || flags) |
| 4565 | return -EINVAL; |
| 4566 | |
| 4567 | rcu_read_lock(); |
| 4568 | p = find_process_by_pid(pid); |
| 4569 | retval = -ESRCH; |
| 4570 | if (!p) |
| 4571 | goto out_unlock; |
| 4572 | |
| 4573 | retval = security_task_getscheduler(p); |
| 4574 | if (retval) |
| 4575 | goto out_unlock; |
| 4576 | |
| 4577 | attr.sched_policy = p->policy; |
| 4578 | if (p->sched_reset_on_fork) |
| 4579 | attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; |
| 4580 | if (task_has_dl_policy(p)) |
| 4581 | __getparam_dl(p, &attr); |
| 4582 | else if (task_has_rt_policy(p)) |
| 4583 | attr.sched_priority = p->rt_priority; |
| 4584 | else |
| 4585 | attr.sched_nice = task_nice(p); |
| 4586 | |
| 4587 | rcu_read_unlock(); |
| 4588 | |
| 4589 | retval = sched_read_attr(uattr, &attr, size); |
| 4590 | return retval; |
| 4591 | |
| 4592 | out_unlock: |
| 4593 | rcu_read_unlock(); |
| 4594 | return retval; |
| 4595 | } |
| 4596 | |
| 4597 | long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) |
| 4598 | { |
| 4599 | cpumask_var_t cpus_allowed, new_mask; |
| 4600 | struct task_struct *p; |
| 4601 | int retval; |
| 4602 | |
| 4603 | rcu_read_lock(); |
| 4604 | |
| 4605 | p = find_process_by_pid(pid); |
| 4606 | if (!p) { |
| 4607 | rcu_read_unlock(); |
| 4608 | return -ESRCH; |
| 4609 | } |
| 4610 | |
| 4611 | /* Prevent p going away */ |
| 4612 | get_task_struct(p); |
| 4613 | rcu_read_unlock(); |
| 4614 | |
| 4615 | if (p->flags & PF_NO_SETAFFINITY) { |
| 4616 | retval = -EINVAL; |
| 4617 | goto out_put_task; |
| 4618 | } |
| 4619 | if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { |
| 4620 | retval = -ENOMEM; |
| 4621 | goto out_put_task; |
| 4622 | } |
| 4623 | if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { |
| 4624 | retval = -ENOMEM; |
| 4625 | goto out_free_cpus_allowed; |
| 4626 | } |
| 4627 | retval = -EPERM; |
| 4628 | if (!check_same_owner(p)) { |
| 4629 | rcu_read_lock(); |
| 4630 | if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { |
| 4631 | rcu_read_unlock(); |
| 4632 | goto out_free_new_mask; |
| 4633 | } |
| 4634 | rcu_read_unlock(); |
| 4635 | } |
| 4636 | |
| 4637 | retval = security_task_setscheduler(p); |
| 4638 | if (retval) |
| 4639 | goto out_free_new_mask; |
| 4640 | |
| 4641 | |
| 4642 | cpuset_cpus_allowed(p, cpus_allowed); |
| 4643 | cpumask_and(new_mask, in_mask, cpus_allowed); |
| 4644 | |
| 4645 | /* |
| 4646 | * Since bandwidth control happens on root_domain basis, |
| 4647 | * if admission test is enabled, we only admit -deadline |
| 4648 | * tasks allowed to run on all the CPUs in the task's |
| 4649 | * root_domain. |
| 4650 | */ |
| 4651 | #ifdef CONFIG_SMP |
| 4652 | if (task_has_dl_policy(p) && dl_bandwidth_enabled()) { |
| 4653 | rcu_read_lock(); |
| 4654 | if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) { |
| 4655 | retval = -EBUSY; |
| 4656 | rcu_read_unlock(); |
| 4657 | goto out_free_new_mask; |
| 4658 | } |
| 4659 | rcu_read_unlock(); |
| 4660 | } |
| 4661 | #endif |
| 4662 | again: |
| 4663 | retval = __set_cpus_allowed_ptr(p, new_mask, true); |
| 4664 | |
| 4665 | if (!retval) { |
| 4666 | cpuset_cpus_allowed(p, cpus_allowed); |
| 4667 | if (!cpumask_subset(new_mask, cpus_allowed)) { |
| 4668 | /* |
| 4669 | * We must have raced with a concurrent cpuset |
| 4670 | * update. Just reset the cpus_allowed to the |
| 4671 | * cpuset's cpus_allowed |
| 4672 | */ |
| 4673 | cpumask_copy(new_mask, cpus_allowed); |
| 4674 | goto again; |
| 4675 | } |
| 4676 | } |
| 4677 | out_free_new_mask: |
| 4678 | free_cpumask_var(new_mask); |
| 4679 | out_free_cpus_allowed: |
| 4680 | free_cpumask_var(cpus_allowed); |
| 4681 | out_put_task: |
| 4682 | put_task_struct(p); |
| 4683 | return retval; |
| 4684 | } |
| 4685 | |
| 4686 | static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, |
| 4687 | struct cpumask *new_mask) |
| 4688 | { |
| 4689 | if (len < cpumask_size()) |
| 4690 | cpumask_clear(new_mask); |
| 4691 | else if (len > cpumask_size()) |
| 4692 | len = cpumask_size(); |
| 4693 | |
| 4694 | return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; |
| 4695 | } |
| 4696 | |
| 4697 | /** |
| 4698 | * sys_sched_setaffinity - set the cpu affinity of a process |
| 4699 | * @pid: pid of the process |
| 4700 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr |
| 4701 | * @user_mask_ptr: user-space pointer to the new cpu mask |
| 4702 | * |
| 4703 | * Return: 0 on success. An error code otherwise. |
| 4704 | */ |
| 4705 | SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, |
| 4706 | unsigned long __user *, user_mask_ptr) |
| 4707 | { |
| 4708 | cpumask_var_t new_mask; |
| 4709 | int retval; |
| 4710 | |
| 4711 | if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) |
| 4712 | return -ENOMEM; |
| 4713 | |
| 4714 | retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); |
| 4715 | if (retval == 0) |
| 4716 | retval = sched_setaffinity(pid, new_mask); |
| 4717 | free_cpumask_var(new_mask); |
| 4718 | return retval; |
| 4719 | } |
| 4720 | |
| 4721 | long sched_getaffinity(pid_t pid, struct cpumask *mask) |
| 4722 | { |
| 4723 | struct task_struct *p; |
| 4724 | unsigned long flags; |
| 4725 | int retval; |
| 4726 | |
| 4727 | rcu_read_lock(); |
| 4728 | |
| 4729 | retval = -ESRCH; |
| 4730 | p = find_process_by_pid(pid); |
| 4731 | if (!p) |
| 4732 | goto out_unlock; |
| 4733 | |
| 4734 | retval = security_task_getscheduler(p); |
| 4735 | if (retval) |
| 4736 | goto out_unlock; |
| 4737 | |
| 4738 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
| 4739 | cpumask_and(mask, &p->cpus_allowed, cpu_active_mask); |
| 4740 | raw_spin_unlock_irqrestore(&p->pi_lock, flags); |
| 4741 | |
| 4742 | out_unlock: |
| 4743 | rcu_read_unlock(); |
| 4744 | |
| 4745 | return retval; |
| 4746 | } |
| 4747 | |
| 4748 | /** |
| 4749 | * sys_sched_getaffinity - get the cpu affinity of a process |
| 4750 | * @pid: pid of the process |
| 4751 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr |
| 4752 | * @user_mask_ptr: user-space pointer to hold the current cpu mask |
| 4753 | * |
| 4754 | * Return: 0 on success. An error code otherwise. |
| 4755 | */ |
| 4756 | SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, |
| 4757 | unsigned long __user *, user_mask_ptr) |
| 4758 | { |
| 4759 | int ret; |
| 4760 | cpumask_var_t mask; |
| 4761 | |
| 4762 | if ((len * BITS_PER_BYTE) < nr_cpu_ids) |
| 4763 | return -EINVAL; |
| 4764 | if (len & (sizeof(unsigned long)-1)) |
| 4765 | return -EINVAL; |
| 4766 | |
| 4767 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) |
| 4768 | return -ENOMEM; |
| 4769 | |
| 4770 | ret = sched_getaffinity(pid, mask); |
| 4771 | if (ret == 0) { |
| 4772 | size_t retlen = min_t(size_t, len, cpumask_size()); |
| 4773 | |
| 4774 | if (copy_to_user(user_mask_ptr, mask, retlen)) |
| 4775 | ret = -EFAULT; |
| 4776 | else |
| 4777 | ret = retlen; |
| 4778 | } |
| 4779 | free_cpumask_var(mask); |
| 4780 | |
| 4781 | return ret; |
| 4782 | } |
| 4783 | |
| 4784 | /** |
| 4785 | * sys_sched_yield - yield the current processor to other threads. |
| 4786 | * |
| 4787 | * This function yields the current CPU to other tasks. If there are no |
| 4788 | * other threads running on this CPU then this function will return. |
| 4789 | * |
| 4790 | * Return: 0. |
| 4791 | */ |
| 4792 | SYSCALL_DEFINE0(sched_yield) |
| 4793 | { |
| 4794 | struct rq *rq = this_rq_lock(); |
| 4795 | |
| 4796 | schedstat_inc(rq, yld_count); |
| 4797 | current->sched_class->yield_task(rq); |
| 4798 | |
| 4799 | /* |
| 4800 | * Since we are going to call schedule() anyway, there's |
| 4801 | * no need to preempt or enable interrupts: |
| 4802 | */ |
| 4803 | __release(rq->lock); |
| 4804 | spin_release(&rq->lock.dep_map, 1, _THIS_IP_); |
| 4805 | do_raw_spin_unlock(&rq->lock); |
| 4806 | sched_preempt_enable_no_resched(); |
| 4807 | |
| 4808 | schedule(); |
| 4809 | |
| 4810 | return 0; |
| 4811 | } |
| 4812 | |
| 4813 | int __sched _cond_resched(void) |
| 4814 | { |
| 4815 | if (should_resched(0)) { |
| 4816 | preempt_schedule_common(); |
| 4817 | return 1; |
| 4818 | } |
| 4819 | return 0; |
| 4820 | } |
| 4821 | EXPORT_SYMBOL(_cond_resched); |
| 4822 | |
| 4823 | /* |
| 4824 | * __cond_resched_lock() - if a reschedule is pending, drop the given lock, |
| 4825 | * call schedule, and on return reacquire the lock. |
| 4826 | * |
| 4827 | * This works OK both with and without CONFIG_PREEMPT. We do strange low-level |
| 4828 | * operations here to prevent schedule() from being called twice (once via |
| 4829 | * spin_unlock(), once by hand). |
| 4830 | */ |
| 4831 | int __cond_resched_lock(spinlock_t *lock) |
| 4832 | { |
| 4833 | int resched = should_resched(PREEMPT_LOCK_OFFSET); |
| 4834 | int ret = 0; |
| 4835 | |
| 4836 | lockdep_assert_held(lock); |
| 4837 | |
| 4838 | if (spin_needbreak(lock) || resched) { |
| 4839 | spin_unlock(lock); |
| 4840 | if (resched) |
| 4841 | preempt_schedule_common(); |
| 4842 | else |
| 4843 | cpu_relax(); |
| 4844 | ret = 1; |
| 4845 | spin_lock(lock); |
| 4846 | } |
| 4847 | return ret; |
| 4848 | } |
| 4849 | EXPORT_SYMBOL(__cond_resched_lock); |
| 4850 | |
| 4851 | int __sched __cond_resched_softirq(void) |
| 4852 | { |
| 4853 | BUG_ON(!in_softirq()); |
| 4854 | |
| 4855 | if (should_resched(SOFTIRQ_DISABLE_OFFSET)) { |
| 4856 | local_bh_enable(); |
| 4857 | preempt_schedule_common(); |
| 4858 | local_bh_disable(); |
| 4859 | return 1; |
| 4860 | } |
| 4861 | return 0; |
| 4862 | } |
| 4863 | EXPORT_SYMBOL(__cond_resched_softirq); |
| 4864 | |
| 4865 | /** |
| 4866 | * yield - yield the current processor to other threads. |
| 4867 | * |
| 4868 | * Do not ever use this function, there's a 99% chance you're doing it wrong. |
| 4869 | * |
| 4870 | * The scheduler is at all times free to pick the calling task as the most |
| 4871 | * eligible task to run, if removing the yield() call from your code breaks |
| 4872 | * it, its already broken. |
| 4873 | * |
| 4874 | * Typical broken usage is: |
| 4875 | * |
| 4876 | * while (!event) |
| 4877 | * yield(); |
| 4878 | * |
| 4879 | * where one assumes that yield() will let 'the other' process run that will |
| 4880 | * make event true. If the current task is a SCHED_FIFO task that will never |
| 4881 | * happen. Never use yield() as a progress guarantee!! |
| 4882 | * |
| 4883 | * If you want to use yield() to wait for something, use wait_event(). |
| 4884 | * If you want to use yield() to be 'nice' for others, use cond_resched(). |
| 4885 | * If you still want to use yield(), do not! |
| 4886 | */ |
| 4887 | void __sched yield(void) |
| 4888 | { |
| 4889 | set_current_state(TASK_RUNNING); |
| 4890 | sys_sched_yield(); |
| 4891 | } |
| 4892 | EXPORT_SYMBOL(yield); |
| 4893 | |
| 4894 | /** |
| 4895 | * yield_to - yield the current processor to another thread in |
| 4896 | * your thread group, or accelerate that thread toward the |
| 4897 | * processor it's on. |
| 4898 | * @p: target task |
| 4899 | * @preempt: whether task preemption is allowed or not |
| 4900 | * |
| 4901 | * It's the caller's job to ensure that the target task struct |
| 4902 | * can't go away on us before we can do any checks. |
| 4903 | * |
| 4904 | * Return: |
| 4905 | * true (>0) if we indeed boosted the target task. |
| 4906 | * false (0) if we failed to boost the target. |
| 4907 | * -ESRCH if there's no task to yield to. |
| 4908 | */ |
| 4909 | int __sched yield_to(struct task_struct *p, bool preempt) |
| 4910 | { |
| 4911 | struct task_struct *curr = current; |
| 4912 | struct rq *rq, *p_rq; |
| 4913 | unsigned long flags; |
| 4914 | int yielded = 0; |
| 4915 | |
| 4916 | local_irq_save(flags); |
| 4917 | rq = this_rq(); |
| 4918 | |
| 4919 | again: |
| 4920 | p_rq = task_rq(p); |
| 4921 | /* |
| 4922 | * If we're the only runnable task on the rq and target rq also |
| 4923 | * has only one task, there's absolutely no point in yielding. |
| 4924 | */ |
| 4925 | if (rq->nr_running == 1 && p_rq->nr_running == 1) { |
| 4926 | yielded = -ESRCH; |
| 4927 | goto out_irq; |
| 4928 | } |
| 4929 | |
| 4930 | double_rq_lock(rq, p_rq); |
| 4931 | if (task_rq(p) != p_rq) { |
| 4932 | double_rq_unlock(rq, p_rq); |
| 4933 | goto again; |
| 4934 | } |
| 4935 | |
| 4936 | if (!curr->sched_class->yield_to_task) |
| 4937 | goto out_unlock; |
| 4938 | |
| 4939 | if (curr->sched_class != p->sched_class) |
| 4940 | goto out_unlock; |
| 4941 | |
| 4942 | if (task_running(p_rq, p) || p->state) |
| 4943 | goto out_unlock; |
| 4944 | |
| 4945 | yielded = curr->sched_class->yield_to_task(rq, p, preempt); |
| 4946 | if (yielded) { |
| 4947 | schedstat_inc(rq, yld_count); |
| 4948 | /* |
| 4949 | * Make p's CPU reschedule; pick_next_entity takes care of |
| 4950 | * fairness. |
| 4951 | */ |
| 4952 | if (preempt && rq != p_rq) |
| 4953 | resched_curr(p_rq); |
| 4954 | } |
| 4955 | |
| 4956 | out_unlock: |
| 4957 | double_rq_unlock(rq, p_rq); |
| 4958 | out_irq: |
| 4959 | local_irq_restore(flags); |
| 4960 | |
| 4961 | if (yielded > 0) |
| 4962 | schedule(); |
| 4963 | |
| 4964 | return yielded; |
| 4965 | } |
| 4966 | EXPORT_SYMBOL_GPL(yield_to); |
| 4967 | |
| 4968 | /* |
| 4969 | * This task is about to go to sleep on IO. Increment rq->nr_iowait so |
| 4970 | * that process accounting knows that this is a task in IO wait state. |
| 4971 | */ |
| 4972 | long __sched io_schedule_timeout(long timeout) |
| 4973 | { |
| 4974 | int old_iowait = current->in_iowait; |
| 4975 | struct rq *rq; |
| 4976 | long ret; |
| 4977 | |
| 4978 | current->in_iowait = 1; |
| 4979 | blk_schedule_flush_plug(current); |
| 4980 | |
| 4981 | delayacct_blkio_start(); |
| 4982 | rq = raw_rq(); |
| 4983 | atomic_inc(&rq->nr_iowait); |
| 4984 | ret = schedule_timeout(timeout); |
| 4985 | current->in_iowait = old_iowait; |
| 4986 | atomic_dec(&rq->nr_iowait); |
| 4987 | delayacct_blkio_end(); |
| 4988 | |
| 4989 | return ret; |
| 4990 | } |
| 4991 | EXPORT_SYMBOL(io_schedule_timeout); |
| 4992 | |
| 4993 | /** |
| 4994 | * sys_sched_get_priority_max - return maximum RT priority. |
| 4995 | * @policy: scheduling class. |
| 4996 | * |
| 4997 | * Return: On success, this syscall returns the maximum |
| 4998 | * rt_priority that can be used by a given scheduling class. |
| 4999 | * On failure, a negative error code is returned. |
| 5000 | */ |
| 5001 | SYSCALL_DEFINE1(sched_get_priority_max, int, policy) |
| 5002 | { |
| 5003 | int ret = -EINVAL; |
| 5004 | |
| 5005 | switch (policy) { |
| 5006 | case SCHED_FIFO: |
| 5007 | case SCHED_RR: |
| 5008 | ret = MAX_USER_RT_PRIO-1; |
| 5009 | break; |
| 5010 | case SCHED_DEADLINE: |
| 5011 | case SCHED_NORMAL: |
| 5012 | case SCHED_BATCH: |
| 5013 | case SCHED_IDLE: |
| 5014 | ret = 0; |
| 5015 | break; |
| 5016 | } |
| 5017 | return ret; |
| 5018 | } |
| 5019 | |
| 5020 | /** |
| 5021 | * sys_sched_get_priority_min - return minimum RT priority. |
| 5022 | * @policy: scheduling class. |
| 5023 | * |
| 5024 | * Return: On success, this syscall returns the minimum |
| 5025 | * rt_priority that can be used by a given scheduling class. |
| 5026 | * On failure, a negative error code is returned. |
| 5027 | */ |
| 5028 | SYSCALL_DEFINE1(sched_get_priority_min, int, policy) |
| 5029 | { |
| 5030 | int ret = -EINVAL; |
| 5031 | |
| 5032 | switch (policy) { |
| 5033 | case SCHED_FIFO: |
| 5034 | case SCHED_RR: |
| 5035 | ret = 1; |
| 5036 | break; |
| 5037 | case SCHED_DEADLINE: |
| 5038 | case SCHED_NORMAL: |
| 5039 | case SCHED_BATCH: |
| 5040 | case SCHED_IDLE: |
| 5041 | ret = 0; |
| 5042 | } |
| 5043 | return ret; |
| 5044 | } |
| 5045 | |
| 5046 | /** |
| 5047 | * sys_sched_rr_get_interval - return the default timeslice of a process. |
| 5048 | * @pid: pid of the process. |
| 5049 | * @interval: userspace pointer to the timeslice value. |
| 5050 | * |
| 5051 | * this syscall writes the default timeslice value of a given process |
| 5052 | * into the user-space timespec buffer. A value of '0' means infinity. |
| 5053 | * |
| 5054 | * Return: On success, 0 and the timeslice is in @interval. Otherwise, |
| 5055 | * an error code. |
| 5056 | */ |
| 5057 | SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, |
| 5058 | struct timespec __user *, interval) |
| 5059 | { |
| 5060 | struct task_struct *p; |
| 5061 | unsigned int time_slice; |
| 5062 | struct rq_flags rf; |
| 5063 | struct timespec t; |
| 5064 | struct rq *rq; |
| 5065 | int retval; |
| 5066 | |
| 5067 | if (pid < 0) |
| 5068 | return -EINVAL; |
| 5069 | |
| 5070 | retval = -ESRCH; |
| 5071 | rcu_read_lock(); |
| 5072 | p = find_process_by_pid(pid); |
| 5073 | if (!p) |
| 5074 | goto out_unlock; |
| 5075 | |
| 5076 | retval = security_task_getscheduler(p); |
| 5077 | if (retval) |
| 5078 | goto out_unlock; |
| 5079 | |
| 5080 | rq = task_rq_lock(p, &rf); |
| 5081 | time_slice = 0; |
| 5082 | if (p->sched_class->get_rr_interval) |
| 5083 | time_slice = p->sched_class->get_rr_interval(rq, p); |
| 5084 | task_rq_unlock(rq, p, &rf); |
| 5085 | |
| 5086 | rcu_read_unlock(); |
| 5087 | jiffies_to_timespec(time_slice, &t); |
| 5088 | retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; |
| 5089 | return retval; |
| 5090 | |
| 5091 | out_unlock: |
| 5092 | rcu_read_unlock(); |
| 5093 | return retval; |
| 5094 | } |
| 5095 | |
| 5096 | static const char stat_nam[] = TASK_STATE_TO_CHAR_STR; |
| 5097 | |
| 5098 | void sched_show_task(struct task_struct *p) |
| 5099 | { |
| 5100 | unsigned long free = 0; |
| 5101 | int ppid; |
| 5102 | unsigned long state = p->state; |
| 5103 | |
| 5104 | if (state) |
| 5105 | state = __ffs(state) + 1; |
| 5106 | printk(KERN_INFO "%-15.15s %c", p->comm, |
| 5107 | state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); |
| 5108 | #if BITS_PER_LONG == 32 |
| 5109 | if (state == TASK_RUNNING) |
| 5110 | printk(KERN_CONT " running "); |
| 5111 | else |
| 5112 | printk(KERN_CONT " %08lx ", thread_saved_pc(p)); |
| 5113 | #else |
| 5114 | if (state == TASK_RUNNING) |
| 5115 | printk(KERN_CONT " running task "); |
| 5116 | else |
| 5117 | printk(KERN_CONT " %016lx ", thread_saved_pc(p)); |
| 5118 | #endif |
| 5119 | #ifdef CONFIG_DEBUG_STACK_USAGE |
| 5120 | free = stack_not_used(p); |
| 5121 | #endif |
| 5122 | ppid = 0; |
| 5123 | rcu_read_lock(); |
| 5124 | if (pid_alive(p)) |
| 5125 | ppid = task_pid_nr(rcu_dereference(p->real_parent)); |
| 5126 | rcu_read_unlock(); |
| 5127 | printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free, |
| 5128 | task_pid_nr(p), ppid, |
| 5129 | (unsigned long)task_thread_info(p)->flags); |
| 5130 | |
| 5131 | print_worker_info(KERN_INFO, p); |
| 5132 | show_stack(p, NULL); |
| 5133 | } |
| 5134 | |
| 5135 | void show_state_filter(unsigned long state_filter) |
| 5136 | { |
| 5137 | struct task_struct *g, *p; |
| 5138 | |
| 5139 | #if BITS_PER_LONG == 32 |
| 5140 | printk(KERN_INFO |
| 5141 | " task PC stack pid father\n"); |
| 5142 | #else |
| 5143 | printk(KERN_INFO |
| 5144 | " task PC stack pid father\n"); |
| 5145 | #endif |
| 5146 | rcu_read_lock(); |
| 5147 | for_each_process_thread(g, p) { |
| 5148 | /* |
| 5149 | * reset the NMI-timeout, listing all files on a slow |
| 5150 | * console might take a lot of time: |
| 5151 | */ |
| 5152 | touch_nmi_watchdog(); |
| 5153 | if (!state_filter || (p->state & state_filter)) |
| 5154 | sched_show_task(p); |
| 5155 | } |
| 5156 | |
| 5157 | touch_all_softlockup_watchdogs(); |
| 5158 | |
| 5159 | #ifdef CONFIG_SCHED_DEBUG |
| 5160 | if (!state_filter) |
| 5161 | sysrq_sched_debug_show(); |
| 5162 | #endif |
| 5163 | rcu_read_unlock(); |
| 5164 | /* |
| 5165 | * Only show locks if all tasks are dumped: |
| 5166 | */ |
| 5167 | if (!state_filter) |
| 5168 | debug_show_all_locks(); |
| 5169 | } |
| 5170 | |
| 5171 | void init_idle_bootup_task(struct task_struct *idle) |
| 5172 | { |
| 5173 | idle->sched_class = &idle_sched_class; |
| 5174 | } |
| 5175 | |
| 5176 | /** |
| 5177 | * init_idle - set up an idle thread for a given CPU |
| 5178 | * @idle: task in question |
| 5179 | * @cpu: cpu the idle task belongs to |
| 5180 | * |
| 5181 | * NOTE: this function does not set the idle thread's NEED_RESCHED |
| 5182 | * flag, to make booting more robust. |
| 5183 | */ |
| 5184 | void init_idle(struct task_struct *idle, int cpu) |
| 5185 | { |
| 5186 | struct rq *rq = cpu_rq(cpu); |
| 5187 | unsigned long flags; |
| 5188 | |
| 5189 | raw_spin_lock_irqsave(&idle->pi_lock, flags); |
| 5190 | raw_spin_lock(&rq->lock); |
| 5191 | |
| 5192 | __sched_fork(0, idle); |
| 5193 | idle->state = TASK_RUNNING; |
| 5194 | idle->se.exec_start = sched_clock(); |
| 5195 | |
| 5196 | kasan_unpoison_task_stack(idle); |
| 5197 | |
| 5198 | #ifdef CONFIG_SMP |
| 5199 | /* |
| 5200 | * Its possible that init_idle() gets called multiple times on a task, |
| 5201 | * in that case do_set_cpus_allowed() will not do the right thing. |
| 5202 | * |
| 5203 | * And since this is boot we can forgo the serialization. |
| 5204 | */ |
| 5205 | set_cpus_allowed_common(idle, cpumask_of(cpu)); |
| 5206 | #endif |
| 5207 | /* |
| 5208 | * We're having a chicken and egg problem, even though we are |
| 5209 | * holding rq->lock, the cpu isn't yet set to this cpu so the |
| 5210 | * lockdep check in task_group() will fail. |
| 5211 | * |
| 5212 | * Similar case to sched_fork(). / Alternatively we could |
| 5213 | * use task_rq_lock() here and obtain the other rq->lock. |
| 5214 | * |
| 5215 | * Silence PROVE_RCU |
| 5216 | */ |
| 5217 | rcu_read_lock(); |
| 5218 | __set_task_cpu(idle, cpu); |
| 5219 | rcu_read_unlock(); |
| 5220 | |
| 5221 | rq->curr = rq->idle = idle; |
| 5222 | idle->on_rq = TASK_ON_RQ_QUEUED; |
| 5223 | #ifdef CONFIG_SMP |
| 5224 | idle->on_cpu = 1; |
| 5225 | #endif |
| 5226 | raw_spin_unlock(&rq->lock); |
| 5227 | raw_spin_unlock_irqrestore(&idle->pi_lock, flags); |
| 5228 | |
| 5229 | /* Set the preempt count _outside_ the spinlocks! */ |
| 5230 | init_idle_preempt_count(idle, cpu); |
| 5231 | |
| 5232 | /* |
| 5233 | * The idle tasks have their own, simple scheduling class: |
| 5234 | */ |
| 5235 | idle->sched_class = &idle_sched_class; |
| 5236 | ftrace_graph_init_idle_task(idle, cpu); |
| 5237 | vtime_init_idle(idle, cpu); |
| 5238 | #ifdef CONFIG_SMP |
| 5239 | sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); |
| 5240 | #endif |
| 5241 | } |
| 5242 | |
| 5243 | int cpuset_cpumask_can_shrink(const struct cpumask *cur, |
| 5244 | const struct cpumask *trial) |
| 5245 | { |
| 5246 | int ret = 1, trial_cpus; |
| 5247 | struct dl_bw *cur_dl_b; |
| 5248 | unsigned long flags; |
| 5249 | |
| 5250 | if (!cpumask_weight(cur)) |
| 5251 | return ret; |
| 5252 | |
| 5253 | rcu_read_lock_sched(); |
| 5254 | cur_dl_b = dl_bw_of(cpumask_any(cur)); |
| 5255 | trial_cpus = cpumask_weight(trial); |
| 5256 | |
| 5257 | raw_spin_lock_irqsave(&cur_dl_b->lock, flags); |
| 5258 | if (cur_dl_b->bw != -1 && |
| 5259 | cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw) |
| 5260 | ret = 0; |
| 5261 | raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags); |
| 5262 | rcu_read_unlock_sched(); |
| 5263 | |
| 5264 | return ret; |
| 5265 | } |
| 5266 | |
| 5267 | int task_can_attach(struct task_struct *p, |
| 5268 | const struct cpumask *cs_cpus_allowed) |
| 5269 | { |
| 5270 | int ret = 0; |
| 5271 | |
| 5272 | /* |
| 5273 | * Kthreads which disallow setaffinity shouldn't be moved |
| 5274 | * to a new cpuset; we don't want to change their cpu |
| 5275 | * affinity and isolating such threads by their set of |
| 5276 | * allowed nodes is unnecessary. Thus, cpusets are not |
| 5277 | * applicable for such threads. This prevents checking for |
| 5278 | * success of set_cpus_allowed_ptr() on all attached tasks |
| 5279 | * before cpus_allowed may be changed. |
| 5280 | */ |
| 5281 | if (p->flags & PF_NO_SETAFFINITY) { |
| 5282 | ret = -EINVAL; |
| 5283 | goto out; |
| 5284 | } |
| 5285 | |
| 5286 | #ifdef CONFIG_SMP |
| 5287 | if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span, |
| 5288 | cs_cpus_allowed)) { |
| 5289 | unsigned int dest_cpu = cpumask_any_and(cpu_active_mask, |
| 5290 | cs_cpus_allowed); |
| 5291 | struct dl_bw *dl_b; |
| 5292 | bool overflow; |
| 5293 | int cpus; |
| 5294 | unsigned long flags; |
| 5295 | |
| 5296 | rcu_read_lock_sched(); |
| 5297 | dl_b = dl_bw_of(dest_cpu); |
| 5298 | raw_spin_lock_irqsave(&dl_b->lock, flags); |
| 5299 | cpus = dl_bw_cpus(dest_cpu); |
| 5300 | overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw); |
| 5301 | if (overflow) |
| 5302 | ret = -EBUSY; |
| 5303 | else { |
| 5304 | /* |
| 5305 | * We reserve space for this task in the destination |
| 5306 | * root_domain, as we can't fail after this point. |
| 5307 | * We will free resources in the source root_domain |
| 5308 | * later on (see set_cpus_allowed_dl()). |
| 5309 | */ |
| 5310 | __dl_add(dl_b, p->dl.dl_bw); |
| 5311 | } |
| 5312 | raw_spin_unlock_irqrestore(&dl_b->lock, flags); |
| 5313 | rcu_read_unlock_sched(); |
| 5314 | |
| 5315 | } |
| 5316 | #endif |
| 5317 | out: |
| 5318 | return ret; |
| 5319 | } |
| 5320 | |
| 5321 | #ifdef CONFIG_SMP |
| 5322 | |
| 5323 | static bool sched_smp_initialized __read_mostly; |
| 5324 | |
| 5325 | #ifdef CONFIG_NUMA_BALANCING |
| 5326 | /* Migrate current task p to target_cpu */ |
| 5327 | int migrate_task_to(struct task_struct *p, int target_cpu) |
| 5328 | { |
| 5329 | struct migration_arg arg = { p, target_cpu }; |
| 5330 | int curr_cpu = task_cpu(p); |
| 5331 | |
| 5332 | if (curr_cpu == target_cpu) |
| 5333 | return 0; |
| 5334 | |
| 5335 | if (!cpumask_test_cpu(target_cpu, tsk_cpus_allowed(p))) |
| 5336 | return -EINVAL; |
| 5337 | |
| 5338 | /* TODO: This is not properly updating schedstats */ |
| 5339 | |
| 5340 | trace_sched_move_numa(p, curr_cpu, target_cpu); |
| 5341 | return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg); |
| 5342 | } |
| 5343 | |
| 5344 | /* |
| 5345 | * Requeue a task on a given node and accurately track the number of NUMA |
| 5346 | * tasks on the runqueues |
| 5347 | */ |
| 5348 | void sched_setnuma(struct task_struct *p, int nid) |
| 5349 | { |
| 5350 | bool queued, running; |
| 5351 | struct rq_flags rf; |
| 5352 | struct rq *rq; |
| 5353 | |
| 5354 | rq = task_rq_lock(p, &rf); |
| 5355 | queued = task_on_rq_queued(p); |
| 5356 | running = task_current(rq, p); |
| 5357 | |
| 5358 | if (queued) |
| 5359 | dequeue_task(rq, p, DEQUEUE_SAVE); |
| 5360 | if (running) |
| 5361 | put_prev_task(rq, p); |
| 5362 | |
| 5363 | p->numa_preferred_nid = nid; |
| 5364 | |
| 5365 | if (running) |
| 5366 | p->sched_class->set_curr_task(rq); |
| 5367 | if (queued) |
| 5368 | enqueue_task(rq, p, ENQUEUE_RESTORE); |
| 5369 | task_rq_unlock(rq, p, &rf); |
| 5370 | } |
| 5371 | #endif /* CONFIG_NUMA_BALANCING */ |
| 5372 | |
| 5373 | #ifdef CONFIG_HOTPLUG_CPU |
| 5374 | /* |
| 5375 | * Ensures that the idle task is using init_mm right before its cpu goes |
| 5376 | * offline. |
| 5377 | */ |
| 5378 | void idle_task_exit(void) |
| 5379 | { |
| 5380 | struct mm_struct *mm = current->active_mm; |
| 5381 | |
| 5382 | BUG_ON(cpu_online(smp_processor_id())); |
| 5383 | |
| 5384 | if (mm != &init_mm) { |
| 5385 | switch_mm_irqs_off(mm, &init_mm, current); |
| 5386 | finish_arch_post_lock_switch(); |
| 5387 | } |
| 5388 | mmdrop(mm); |
| 5389 | } |
| 5390 | |
| 5391 | /* |
| 5392 | * Since this CPU is going 'away' for a while, fold any nr_active delta |
| 5393 | * we might have. Assumes we're called after migrate_tasks() so that the |
| 5394 | * nr_active count is stable. |
| 5395 | * |
| 5396 | * Also see the comment "Global load-average calculations". |
| 5397 | */ |
| 5398 | static void calc_load_migrate(struct rq *rq) |
| 5399 | { |
| 5400 | long delta = calc_load_fold_active(rq); |
| 5401 | if (delta) |
| 5402 | atomic_long_add(delta, &calc_load_tasks); |
| 5403 | } |
| 5404 | |
| 5405 | static void put_prev_task_fake(struct rq *rq, struct task_struct *prev) |
| 5406 | { |
| 5407 | } |
| 5408 | |
| 5409 | static const struct sched_class fake_sched_class = { |
| 5410 | .put_prev_task = put_prev_task_fake, |
| 5411 | }; |
| 5412 | |
| 5413 | static struct task_struct fake_task = { |
| 5414 | /* |
| 5415 | * Avoid pull_{rt,dl}_task() |
| 5416 | */ |
| 5417 | .prio = MAX_PRIO + 1, |
| 5418 | .sched_class = &fake_sched_class, |
| 5419 | }; |
| 5420 | |
| 5421 | /* |
| 5422 | * Migrate all tasks from the rq, sleeping tasks will be migrated by |
| 5423 | * try_to_wake_up()->select_task_rq(). |
| 5424 | * |
| 5425 | * Called with rq->lock held even though we'er in stop_machine() and |
| 5426 | * there's no concurrency possible, we hold the required locks anyway |
| 5427 | * because of lock validation efforts. |
| 5428 | */ |
| 5429 | static void migrate_tasks(struct rq *dead_rq) |
| 5430 | { |
| 5431 | struct rq *rq = dead_rq; |
| 5432 | struct task_struct *next, *stop = rq->stop; |
| 5433 | struct pin_cookie cookie; |
| 5434 | int dest_cpu; |
| 5435 | |
| 5436 | /* |
| 5437 | * Fudge the rq selection such that the below task selection loop |
| 5438 | * doesn't get stuck on the currently eligible stop task. |
| 5439 | * |
| 5440 | * We're currently inside stop_machine() and the rq is either stuck |
| 5441 | * in the stop_machine_cpu_stop() loop, or we're executing this code, |
| 5442 | * either way we should never end up calling schedule() until we're |
| 5443 | * done here. |
| 5444 | */ |
| 5445 | rq->stop = NULL; |
| 5446 | |
| 5447 | /* |
| 5448 | * put_prev_task() and pick_next_task() sched |
| 5449 | * class method both need to have an up-to-date |
| 5450 | * value of rq->clock[_task] |
| 5451 | */ |
| 5452 | update_rq_clock(rq); |
| 5453 | |
| 5454 | for (;;) { |
| 5455 | /* |
| 5456 | * There's this thread running, bail when that's the only |
| 5457 | * remaining thread. |
| 5458 | */ |
| 5459 | if (rq->nr_running == 1) |
| 5460 | break; |
| 5461 | |
| 5462 | /* |
| 5463 | * pick_next_task assumes pinned rq->lock. |
| 5464 | */ |
| 5465 | cookie = lockdep_pin_lock(&rq->lock); |
| 5466 | next = pick_next_task(rq, &fake_task, cookie); |
| 5467 | BUG_ON(!next); |
| 5468 | next->sched_class->put_prev_task(rq, next); |
| 5469 | |
| 5470 | /* |
| 5471 | * Rules for changing task_struct::cpus_allowed are holding |
| 5472 | * both pi_lock and rq->lock, such that holding either |
| 5473 | * stabilizes the mask. |
| 5474 | * |
| 5475 | * Drop rq->lock is not quite as disastrous as it usually is |
| 5476 | * because !cpu_active at this point, which means load-balance |
| 5477 | * will not interfere. Also, stop-machine. |
| 5478 | */ |
| 5479 | lockdep_unpin_lock(&rq->lock, cookie); |
| 5480 | raw_spin_unlock(&rq->lock); |
| 5481 | raw_spin_lock(&next->pi_lock); |
| 5482 | raw_spin_lock(&rq->lock); |
| 5483 | |
| 5484 | /* |
| 5485 | * Since we're inside stop-machine, _nothing_ should have |
| 5486 | * changed the task, WARN if weird stuff happened, because in |
| 5487 | * that case the above rq->lock drop is a fail too. |
| 5488 | */ |
| 5489 | if (WARN_ON(task_rq(next) != rq || !task_on_rq_queued(next))) { |
| 5490 | raw_spin_unlock(&next->pi_lock); |
| 5491 | continue; |
| 5492 | } |
| 5493 | |
| 5494 | /* Find suitable destination for @next, with force if needed. */ |
| 5495 | dest_cpu = select_fallback_rq(dead_rq->cpu, next); |
| 5496 | |
| 5497 | rq = __migrate_task(rq, next, dest_cpu); |
| 5498 | if (rq != dead_rq) { |
| 5499 | raw_spin_unlock(&rq->lock); |
| 5500 | rq = dead_rq; |
| 5501 | raw_spin_lock(&rq->lock); |
| 5502 | } |
| 5503 | raw_spin_unlock(&next->pi_lock); |
| 5504 | } |
| 5505 | |
| 5506 | rq->stop = stop; |
| 5507 | } |
| 5508 | #endif /* CONFIG_HOTPLUG_CPU */ |
| 5509 | |
| 5510 | static void set_rq_online(struct rq *rq) |
| 5511 | { |
| 5512 | if (!rq->online) { |
| 5513 | const struct sched_class *class; |
| 5514 | |
| 5515 | cpumask_set_cpu(rq->cpu, rq->rd->online); |
| 5516 | rq->online = 1; |
| 5517 | |
| 5518 | for_each_class(class) { |
| 5519 | if (class->rq_online) |
| 5520 | class->rq_online(rq); |
| 5521 | } |
| 5522 | } |
| 5523 | } |
| 5524 | |
| 5525 | static void set_rq_offline(struct rq *rq) |
| 5526 | { |
| 5527 | if (rq->online) { |
| 5528 | const struct sched_class *class; |
| 5529 | |
| 5530 | for_each_class(class) { |
| 5531 | if (class->rq_offline) |
| 5532 | class->rq_offline(rq); |
| 5533 | } |
| 5534 | |
| 5535 | cpumask_clear_cpu(rq->cpu, rq->rd->online); |
| 5536 | rq->online = 0; |
| 5537 | } |
| 5538 | } |
| 5539 | |
| 5540 | static void set_cpu_rq_start_time(unsigned int cpu) |
| 5541 | { |
| 5542 | struct rq *rq = cpu_rq(cpu); |
| 5543 | |
| 5544 | rq->age_stamp = sched_clock_cpu(cpu); |
| 5545 | } |
| 5546 | |
| 5547 | static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */ |
| 5548 | |
| 5549 | #ifdef CONFIG_SCHED_DEBUG |
| 5550 | |
| 5551 | static __read_mostly int sched_debug_enabled; |
| 5552 | |
| 5553 | static int __init sched_debug_setup(char *str) |
| 5554 | { |
| 5555 | sched_debug_enabled = 1; |
| 5556 | |
| 5557 | return 0; |
| 5558 | } |
| 5559 | early_param("sched_debug", sched_debug_setup); |
| 5560 | |
| 5561 | static inline bool sched_debug(void) |
| 5562 | { |
| 5563 | return sched_debug_enabled; |
| 5564 | } |
| 5565 | |
| 5566 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, |
| 5567 | struct cpumask *groupmask) |
| 5568 | { |
| 5569 | struct sched_group *group = sd->groups; |
| 5570 | |
| 5571 | cpumask_clear(groupmask); |
| 5572 | |
| 5573 | printk(KERN_DEBUG "%*s domain %d: ", level, "", level); |
| 5574 | |
| 5575 | if (!(sd->flags & SD_LOAD_BALANCE)) { |
| 5576 | printk("does not load-balance\n"); |
| 5577 | if (sd->parent) |
| 5578 | printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" |
| 5579 | " has parent"); |
| 5580 | return -1; |
| 5581 | } |
| 5582 | |
| 5583 | printk(KERN_CONT "span %*pbl level %s\n", |
| 5584 | cpumask_pr_args(sched_domain_span(sd)), sd->name); |
| 5585 | |
| 5586 | if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { |
| 5587 | printk(KERN_ERR "ERROR: domain->span does not contain " |
| 5588 | "CPU%d\n", cpu); |
| 5589 | } |
| 5590 | if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) { |
| 5591 | printk(KERN_ERR "ERROR: domain->groups does not contain" |
| 5592 | " CPU%d\n", cpu); |
| 5593 | } |
| 5594 | |
| 5595 | printk(KERN_DEBUG "%*s groups:", level + 1, ""); |
| 5596 | do { |
| 5597 | if (!group) { |
| 5598 | printk("\n"); |
| 5599 | printk(KERN_ERR "ERROR: group is NULL\n"); |
| 5600 | break; |
| 5601 | } |
| 5602 | |
| 5603 | if (!cpumask_weight(sched_group_cpus(group))) { |
| 5604 | printk(KERN_CONT "\n"); |
| 5605 | printk(KERN_ERR "ERROR: empty group\n"); |
| 5606 | break; |
| 5607 | } |
| 5608 | |
| 5609 | if (!(sd->flags & SD_OVERLAP) && |
| 5610 | cpumask_intersects(groupmask, sched_group_cpus(group))) { |
| 5611 | printk(KERN_CONT "\n"); |
| 5612 | printk(KERN_ERR "ERROR: repeated CPUs\n"); |
| 5613 | break; |
| 5614 | } |
| 5615 | |
| 5616 | cpumask_or(groupmask, groupmask, sched_group_cpus(group)); |
| 5617 | |
| 5618 | printk(KERN_CONT " %*pbl", |
| 5619 | cpumask_pr_args(sched_group_cpus(group))); |
| 5620 | if (group->sgc->capacity != SCHED_CAPACITY_SCALE) { |
| 5621 | printk(KERN_CONT " (cpu_capacity = %d)", |
| 5622 | group->sgc->capacity); |
| 5623 | } |
| 5624 | |
| 5625 | group = group->next; |
| 5626 | } while (group != sd->groups); |
| 5627 | printk(KERN_CONT "\n"); |
| 5628 | |
| 5629 | if (!cpumask_equal(sched_domain_span(sd), groupmask)) |
| 5630 | printk(KERN_ERR "ERROR: groups don't span domain->span\n"); |
| 5631 | |
| 5632 | if (sd->parent && |
| 5633 | !cpumask_subset(groupmask, sched_domain_span(sd->parent))) |
| 5634 | printk(KERN_ERR "ERROR: parent span is not a superset " |
| 5635 | "of domain->span\n"); |
| 5636 | return 0; |
| 5637 | } |
| 5638 | |
| 5639 | static void sched_domain_debug(struct sched_domain *sd, int cpu) |
| 5640 | { |
| 5641 | int level = 0; |
| 5642 | |
| 5643 | if (!sched_debug_enabled) |
| 5644 | return; |
| 5645 | |
| 5646 | if (!sd) { |
| 5647 | printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); |
| 5648 | return; |
| 5649 | } |
| 5650 | |
| 5651 | printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); |
| 5652 | |
| 5653 | for (;;) { |
| 5654 | if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) |
| 5655 | break; |
| 5656 | level++; |
| 5657 | sd = sd->parent; |
| 5658 | if (!sd) |
| 5659 | break; |
| 5660 | } |
| 5661 | } |
| 5662 | #else /* !CONFIG_SCHED_DEBUG */ |
| 5663 | # define sched_domain_debug(sd, cpu) do { } while (0) |
| 5664 | static inline bool sched_debug(void) |
| 5665 | { |
| 5666 | return false; |
| 5667 | } |
| 5668 | #endif /* CONFIG_SCHED_DEBUG */ |
| 5669 | |
| 5670 | static int sd_degenerate(struct sched_domain *sd) |
| 5671 | { |
| 5672 | if (cpumask_weight(sched_domain_span(sd)) == 1) |
| 5673 | return 1; |
| 5674 | |
| 5675 | /* Following flags need at least 2 groups */ |
| 5676 | if (sd->flags & (SD_LOAD_BALANCE | |
| 5677 | SD_BALANCE_NEWIDLE | |
| 5678 | SD_BALANCE_FORK | |
| 5679 | SD_BALANCE_EXEC | |
| 5680 | SD_SHARE_CPUCAPACITY | |
| 5681 | SD_SHARE_PKG_RESOURCES | |
| 5682 | SD_SHARE_POWERDOMAIN)) { |
| 5683 | if (sd->groups != sd->groups->next) |
| 5684 | return 0; |
| 5685 | } |
| 5686 | |
| 5687 | /* Following flags don't use groups */ |
| 5688 | if (sd->flags & (SD_WAKE_AFFINE)) |
| 5689 | return 0; |
| 5690 | |
| 5691 | return 1; |
| 5692 | } |
| 5693 | |
| 5694 | static int |
| 5695 | sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) |
| 5696 | { |
| 5697 | unsigned long cflags = sd->flags, pflags = parent->flags; |
| 5698 | |
| 5699 | if (sd_degenerate(parent)) |
| 5700 | return 1; |
| 5701 | |
| 5702 | if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) |
| 5703 | return 0; |
| 5704 | |
| 5705 | /* Flags needing groups don't count if only 1 group in parent */ |
| 5706 | if (parent->groups == parent->groups->next) { |
| 5707 | pflags &= ~(SD_LOAD_BALANCE | |
| 5708 | SD_BALANCE_NEWIDLE | |
| 5709 | SD_BALANCE_FORK | |
| 5710 | SD_BALANCE_EXEC | |
| 5711 | SD_SHARE_CPUCAPACITY | |
| 5712 | SD_SHARE_PKG_RESOURCES | |
| 5713 | SD_PREFER_SIBLING | |
| 5714 | SD_SHARE_POWERDOMAIN); |
| 5715 | if (nr_node_ids == 1) |
| 5716 | pflags &= ~SD_SERIALIZE; |
| 5717 | } |
| 5718 | if (~cflags & pflags) |
| 5719 | return 0; |
| 5720 | |
| 5721 | return 1; |
| 5722 | } |
| 5723 | |
| 5724 | static void free_rootdomain(struct rcu_head *rcu) |
| 5725 | { |
| 5726 | struct root_domain *rd = container_of(rcu, struct root_domain, rcu); |
| 5727 | |
| 5728 | cpupri_cleanup(&rd->cpupri); |
| 5729 | cpudl_cleanup(&rd->cpudl); |
| 5730 | free_cpumask_var(rd->dlo_mask); |
| 5731 | free_cpumask_var(rd->rto_mask); |
| 5732 | free_cpumask_var(rd->online); |
| 5733 | free_cpumask_var(rd->span); |
| 5734 | kfree(rd); |
| 5735 | } |
| 5736 | |
| 5737 | static void rq_attach_root(struct rq *rq, struct root_domain *rd) |
| 5738 | { |
| 5739 | struct root_domain *old_rd = NULL; |
| 5740 | unsigned long flags; |
| 5741 | |
| 5742 | raw_spin_lock_irqsave(&rq->lock, flags); |
| 5743 | |
| 5744 | if (rq->rd) { |
| 5745 | old_rd = rq->rd; |
| 5746 | |
| 5747 | if (cpumask_test_cpu(rq->cpu, old_rd->online)) |
| 5748 | set_rq_offline(rq); |
| 5749 | |
| 5750 | cpumask_clear_cpu(rq->cpu, old_rd->span); |
| 5751 | |
| 5752 | /* |
| 5753 | * If we dont want to free the old_rd yet then |
| 5754 | * set old_rd to NULL to skip the freeing later |
| 5755 | * in this function: |
| 5756 | */ |
| 5757 | if (!atomic_dec_and_test(&old_rd->refcount)) |
| 5758 | old_rd = NULL; |
| 5759 | } |
| 5760 | |
| 5761 | atomic_inc(&rd->refcount); |
| 5762 | rq->rd = rd; |
| 5763 | |
| 5764 | cpumask_set_cpu(rq->cpu, rd->span); |
| 5765 | if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) |
| 5766 | set_rq_online(rq); |
| 5767 | |
| 5768 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
| 5769 | |
| 5770 | if (old_rd) |
| 5771 | call_rcu_sched(&old_rd->rcu, free_rootdomain); |
| 5772 | } |
| 5773 | |
| 5774 | static int init_rootdomain(struct root_domain *rd) |
| 5775 | { |
| 5776 | memset(rd, 0, sizeof(*rd)); |
| 5777 | |
| 5778 | if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) |
| 5779 | goto out; |
| 5780 | if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL)) |
| 5781 | goto free_span; |
| 5782 | if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) |
| 5783 | goto free_online; |
| 5784 | if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) |
| 5785 | goto free_dlo_mask; |
| 5786 | |
| 5787 | init_dl_bw(&rd->dl_bw); |
| 5788 | if (cpudl_init(&rd->cpudl) != 0) |
| 5789 | goto free_dlo_mask; |
| 5790 | |
| 5791 | if (cpupri_init(&rd->cpupri) != 0) |
| 5792 | goto free_rto_mask; |
| 5793 | return 0; |
| 5794 | |
| 5795 | free_rto_mask: |
| 5796 | free_cpumask_var(rd->rto_mask); |
| 5797 | free_dlo_mask: |
| 5798 | free_cpumask_var(rd->dlo_mask); |
| 5799 | free_online: |
| 5800 | free_cpumask_var(rd->online); |
| 5801 | free_span: |
| 5802 | free_cpumask_var(rd->span); |
| 5803 | out: |
| 5804 | return -ENOMEM; |
| 5805 | } |
| 5806 | |
| 5807 | /* |
| 5808 | * By default the system creates a single root-domain with all cpus as |
| 5809 | * members (mimicking the global state we have today). |
| 5810 | */ |
| 5811 | struct root_domain def_root_domain; |
| 5812 | |
| 5813 | static void init_defrootdomain(void) |
| 5814 | { |
| 5815 | init_rootdomain(&def_root_domain); |
| 5816 | |
| 5817 | atomic_set(&def_root_domain.refcount, 1); |
| 5818 | } |
| 5819 | |
| 5820 | static struct root_domain *alloc_rootdomain(void) |
| 5821 | { |
| 5822 | struct root_domain *rd; |
| 5823 | |
| 5824 | rd = kmalloc(sizeof(*rd), GFP_KERNEL); |
| 5825 | if (!rd) |
| 5826 | return NULL; |
| 5827 | |
| 5828 | if (init_rootdomain(rd) != 0) { |
| 5829 | kfree(rd); |
| 5830 | return NULL; |
| 5831 | } |
| 5832 | |
| 5833 | return rd; |
| 5834 | } |
| 5835 | |
| 5836 | static void free_sched_groups(struct sched_group *sg, int free_sgc) |
| 5837 | { |
| 5838 | struct sched_group *tmp, *first; |
| 5839 | |
| 5840 | if (!sg) |
| 5841 | return; |
| 5842 | |
| 5843 | first = sg; |
| 5844 | do { |
| 5845 | tmp = sg->next; |
| 5846 | |
| 5847 | if (free_sgc && atomic_dec_and_test(&sg->sgc->ref)) |
| 5848 | kfree(sg->sgc); |
| 5849 | |
| 5850 | kfree(sg); |
| 5851 | sg = tmp; |
| 5852 | } while (sg != first); |
| 5853 | } |
| 5854 | |
| 5855 | static void free_sched_domain(struct rcu_head *rcu) |
| 5856 | { |
| 5857 | struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); |
| 5858 | |
| 5859 | /* |
| 5860 | * If its an overlapping domain it has private groups, iterate and |
| 5861 | * nuke them all. |
| 5862 | */ |
| 5863 | if (sd->flags & SD_OVERLAP) { |
| 5864 | free_sched_groups(sd->groups, 1); |
| 5865 | } else if (atomic_dec_and_test(&sd->groups->ref)) { |
| 5866 | kfree(sd->groups->sgc); |
| 5867 | kfree(sd->groups); |
| 5868 | } |
| 5869 | kfree(sd); |
| 5870 | } |
| 5871 | |
| 5872 | static void destroy_sched_domain(struct sched_domain *sd, int cpu) |
| 5873 | { |
| 5874 | call_rcu(&sd->rcu, free_sched_domain); |
| 5875 | } |
| 5876 | |
| 5877 | static void destroy_sched_domains(struct sched_domain *sd, int cpu) |
| 5878 | { |
| 5879 | for (; sd; sd = sd->parent) |
| 5880 | destroy_sched_domain(sd, cpu); |
| 5881 | } |
| 5882 | |
| 5883 | /* |
| 5884 | * Keep a special pointer to the highest sched_domain that has |
| 5885 | * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this |
| 5886 | * allows us to avoid some pointer chasing select_idle_sibling(). |
| 5887 | * |
| 5888 | * Also keep a unique ID per domain (we use the first cpu number in |
| 5889 | * the cpumask of the domain), this allows us to quickly tell if |
| 5890 | * two cpus are in the same cache domain, see cpus_share_cache(). |
| 5891 | */ |
| 5892 | DEFINE_PER_CPU(struct sched_domain *, sd_llc); |
| 5893 | DEFINE_PER_CPU(int, sd_llc_size); |
| 5894 | DEFINE_PER_CPU(int, sd_llc_id); |
| 5895 | DEFINE_PER_CPU(struct sched_domain *, sd_numa); |
| 5896 | DEFINE_PER_CPU(struct sched_domain *, sd_busy); |
| 5897 | DEFINE_PER_CPU(struct sched_domain *, sd_asym); |
| 5898 | |
| 5899 | static void update_top_cache_domain(int cpu) |
| 5900 | { |
| 5901 | struct sched_domain *sd; |
| 5902 | struct sched_domain *busy_sd = NULL; |
| 5903 | int id = cpu; |
| 5904 | int size = 1; |
| 5905 | |
| 5906 | sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); |
| 5907 | if (sd) { |
| 5908 | id = cpumask_first(sched_domain_span(sd)); |
| 5909 | size = cpumask_weight(sched_domain_span(sd)); |
| 5910 | busy_sd = sd->parent; /* sd_busy */ |
| 5911 | } |
| 5912 | rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd); |
| 5913 | |
| 5914 | rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); |
| 5915 | per_cpu(sd_llc_size, cpu) = size; |
| 5916 | per_cpu(sd_llc_id, cpu) = id; |
| 5917 | |
| 5918 | sd = lowest_flag_domain(cpu, SD_NUMA); |
| 5919 | rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); |
| 5920 | |
| 5921 | sd = highest_flag_domain(cpu, SD_ASYM_PACKING); |
| 5922 | rcu_assign_pointer(per_cpu(sd_asym, cpu), sd); |
| 5923 | } |
| 5924 | |
| 5925 | /* |
| 5926 | * Attach the domain 'sd' to 'cpu' as its base domain. Callers must |
| 5927 | * hold the hotplug lock. |
| 5928 | */ |
| 5929 | static void |
| 5930 | cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) |
| 5931 | { |
| 5932 | struct rq *rq = cpu_rq(cpu); |
| 5933 | struct sched_domain *tmp; |
| 5934 | |
| 5935 | /* Remove the sched domains which do not contribute to scheduling. */ |
| 5936 | for (tmp = sd; tmp; ) { |
| 5937 | struct sched_domain *parent = tmp->parent; |
| 5938 | if (!parent) |
| 5939 | break; |
| 5940 | |
| 5941 | if (sd_parent_degenerate(tmp, parent)) { |
| 5942 | tmp->parent = parent->parent; |
| 5943 | if (parent->parent) |
| 5944 | parent->parent->child = tmp; |
| 5945 | /* |
| 5946 | * Transfer SD_PREFER_SIBLING down in case of a |
| 5947 | * degenerate parent; the spans match for this |
| 5948 | * so the property transfers. |
| 5949 | */ |
| 5950 | if (parent->flags & SD_PREFER_SIBLING) |
| 5951 | tmp->flags |= SD_PREFER_SIBLING; |
| 5952 | destroy_sched_domain(parent, cpu); |
| 5953 | } else |
| 5954 | tmp = tmp->parent; |
| 5955 | } |
| 5956 | |
| 5957 | if (sd && sd_degenerate(sd)) { |
| 5958 | tmp = sd; |
| 5959 | sd = sd->parent; |
| 5960 | destroy_sched_domain(tmp, cpu); |
| 5961 | if (sd) |
| 5962 | sd->child = NULL; |
| 5963 | } |
| 5964 | |
| 5965 | sched_domain_debug(sd, cpu); |
| 5966 | |
| 5967 | rq_attach_root(rq, rd); |
| 5968 | tmp = rq->sd; |
| 5969 | rcu_assign_pointer(rq->sd, sd); |
| 5970 | destroy_sched_domains(tmp, cpu); |
| 5971 | |
| 5972 | update_top_cache_domain(cpu); |
| 5973 | } |
| 5974 | |
| 5975 | /* Setup the mask of cpus configured for isolated domains */ |
| 5976 | static int __init isolated_cpu_setup(char *str) |
| 5977 | { |
| 5978 | int ret; |
| 5979 | |
| 5980 | alloc_bootmem_cpumask_var(&cpu_isolated_map); |
| 5981 | ret = cpulist_parse(str, cpu_isolated_map); |
| 5982 | if (ret) { |
| 5983 | pr_err("sched: Error, all isolcpus= values must be between 0 and %d\n", nr_cpu_ids); |
| 5984 | return 0; |
| 5985 | } |
| 5986 | return 1; |
| 5987 | } |
| 5988 | __setup("isolcpus=", isolated_cpu_setup); |
| 5989 | |
| 5990 | struct s_data { |
| 5991 | struct sched_domain ** __percpu sd; |
| 5992 | struct root_domain *rd; |
| 5993 | }; |
| 5994 | |
| 5995 | enum s_alloc { |
| 5996 | sa_rootdomain, |
| 5997 | sa_sd, |
| 5998 | sa_sd_storage, |
| 5999 | sa_none, |
| 6000 | }; |
| 6001 | |
| 6002 | /* |
| 6003 | * Build an iteration mask that can exclude certain CPUs from the upwards |
| 6004 | * domain traversal. |
| 6005 | * |
| 6006 | * Asymmetric node setups can result in situations where the domain tree is of |
| 6007 | * unequal depth, make sure to skip domains that already cover the entire |
| 6008 | * range. |
| 6009 | * |
| 6010 | * In that case build_sched_domains() will have terminated the iteration early |
| 6011 | * and our sibling sd spans will be empty. Domains should always include the |
| 6012 | * cpu they're built on, so check that. |
| 6013 | * |
| 6014 | */ |
| 6015 | static void build_group_mask(struct sched_domain *sd, struct sched_group *sg) |
| 6016 | { |
| 6017 | const struct cpumask *span = sched_domain_span(sd); |
| 6018 | struct sd_data *sdd = sd->private; |
| 6019 | struct sched_domain *sibling; |
| 6020 | int i; |
| 6021 | |
| 6022 | for_each_cpu(i, span) { |
| 6023 | sibling = *per_cpu_ptr(sdd->sd, i); |
| 6024 | if (!cpumask_test_cpu(i, sched_domain_span(sibling))) |
| 6025 | continue; |
| 6026 | |
| 6027 | cpumask_set_cpu(i, sched_group_mask(sg)); |
| 6028 | } |
| 6029 | } |
| 6030 | |
| 6031 | /* |
| 6032 | * Return the canonical balance cpu for this group, this is the first cpu |
| 6033 | * of this group that's also in the iteration mask. |
| 6034 | */ |
| 6035 | int group_balance_cpu(struct sched_group *sg) |
| 6036 | { |
| 6037 | return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg)); |
| 6038 | } |
| 6039 | |
| 6040 | static int |
| 6041 | build_overlap_sched_groups(struct sched_domain *sd, int cpu) |
| 6042 | { |
| 6043 | struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg; |
| 6044 | const struct cpumask *span = sched_domain_span(sd); |
| 6045 | struct cpumask *covered = sched_domains_tmpmask; |
| 6046 | struct sd_data *sdd = sd->private; |
| 6047 | struct sched_domain *sibling; |
| 6048 | int i; |
| 6049 | |
| 6050 | cpumask_clear(covered); |
| 6051 | |
| 6052 | for_each_cpu(i, span) { |
| 6053 | struct cpumask *sg_span; |
| 6054 | |
| 6055 | if (cpumask_test_cpu(i, covered)) |
| 6056 | continue; |
| 6057 | |
| 6058 | sibling = *per_cpu_ptr(sdd->sd, i); |
| 6059 | |
| 6060 | /* See the comment near build_group_mask(). */ |
| 6061 | if (!cpumask_test_cpu(i, sched_domain_span(sibling))) |
| 6062 | continue; |
| 6063 | |
| 6064 | sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), |
| 6065 | GFP_KERNEL, cpu_to_node(cpu)); |
| 6066 | |
| 6067 | if (!sg) |
| 6068 | goto fail; |
| 6069 | |
| 6070 | sg_span = sched_group_cpus(sg); |
| 6071 | if (sibling->child) |
| 6072 | cpumask_copy(sg_span, sched_domain_span(sibling->child)); |
| 6073 | else |
| 6074 | cpumask_set_cpu(i, sg_span); |
| 6075 | |
| 6076 | cpumask_or(covered, covered, sg_span); |
| 6077 | |
| 6078 | sg->sgc = *per_cpu_ptr(sdd->sgc, i); |
| 6079 | if (atomic_inc_return(&sg->sgc->ref) == 1) |
| 6080 | build_group_mask(sd, sg); |
| 6081 | |
| 6082 | /* |
| 6083 | * Initialize sgc->capacity such that even if we mess up the |
| 6084 | * domains and no possible iteration will get us here, we won't |
| 6085 | * die on a /0 trap. |
| 6086 | */ |
| 6087 | sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); |
| 6088 | |
| 6089 | /* |
| 6090 | * Make sure the first group of this domain contains the |
| 6091 | * canonical balance cpu. Otherwise the sched_domain iteration |
| 6092 | * breaks. See update_sg_lb_stats(). |
| 6093 | */ |
| 6094 | if ((!groups && cpumask_test_cpu(cpu, sg_span)) || |
| 6095 | group_balance_cpu(sg) == cpu) |
| 6096 | groups = sg; |
| 6097 | |
| 6098 | if (!first) |
| 6099 | first = sg; |
| 6100 | if (last) |
| 6101 | last->next = sg; |
| 6102 | last = sg; |
| 6103 | last->next = first; |
| 6104 | } |
| 6105 | sd->groups = groups; |
| 6106 | |
| 6107 | return 0; |
| 6108 | |
| 6109 | fail: |
| 6110 | free_sched_groups(first, 0); |
| 6111 | |
| 6112 | return -ENOMEM; |
| 6113 | } |
| 6114 | |
| 6115 | static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg) |
| 6116 | { |
| 6117 | struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); |
| 6118 | struct sched_domain *child = sd->child; |
| 6119 | |
| 6120 | if (child) |
| 6121 | cpu = cpumask_first(sched_domain_span(child)); |
| 6122 | |
| 6123 | if (sg) { |
| 6124 | *sg = *per_cpu_ptr(sdd->sg, cpu); |
| 6125 | (*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu); |
| 6126 | atomic_set(&(*sg)->sgc->ref, 1); /* for claim_allocations */ |
| 6127 | } |
| 6128 | |
| 6129 | return cpu; |
| 6130 | } |
| 6131 | |
| 6132 | /* |
| 6133 | * build_sched_groups will build a circular linked list of the groups |
| 6134 | * covered by the given span, and will set each group's ->cpumask correctly, |
| 6135 | * and ->cpu_capacity to 0. |
| 6136 | * |
| 6137 | * Assumes the sched_domain tree is fully constructed |
| 6138 | */ |
| 6139 | static int |
| 6140 | build_sched_groups(struct sched_domain *sd, int cpu) |
| 6141 | { |
| 6142 | struct sched_group *first = NULL, *last = NULL; |
| 6143 | struct sd_data *sdd = sd->private; |
| 6144 | const struct cpumask *span = sched_domain_span(sd); |
| 6145 | struct cpumask *covered; |
| 6146 | int i; |
| 6147 | |
| 6148 | get_group(cpu, sdd, &sd->groups); |
| 6149 | atomic_inc(&sd->groups->ref); |
| 6150 | |
| 6151 | if (cpu != cpumask_first(span)) |
| 6152 | return 0; |
| 6153 | |
| 6154 | lockdep_assert_held(&sched_domains_mutex); |
| 6155 | covered = sched_domains_tmpmask; |
| 6156 | |
| 6157 | cpumask_clear(covered); |
| 6158 | |
| 6159 | for_each_cpu(i, span) { |
| 6160 | struct sched_group *sg; |
| 6161 | int group, j; |
| 6162 | |
| 6163 | if (cpumask_test_cpu(i, covered)) |
| 6164 | continue; |
| 6165 | |
| 6166 | group = get_group(i, sdd, &sg); |
| 6167 | cpumask_setall(sched_group_mask(sg)); |
| 6168 | |
| 6169 | for_each_cpu(j, span) { |
| 6170 | if (get_group(j, sdd, NULL) != group) |
| 6171 | continue; |
| 6172 | |
| 6173 | cpumask_set_cpu(j, covered); |
| 6174 | cpumask_set_cpu(j, sched_group_cpus(sg)); |
| 6175 | } |
| 6176 | |
| 6177 | if (!first) |
| 6178 | first = sg; |
| 6179 | if (last) |
| 6180 | last->next = sg; |
| 6181 | last = sg; |
| 6182 | } |
| 6183 | last->next = first; |
| 6184 | |
| 6185 | return 0; |
| 6186 | } |
| 6187 | |
| 6188 | /* |
| 6189 | * Initialize sched groups cpu_capacity. |
| 6190 | * |
| 6191 | * cpu_capacity indicates the capacity of sched group, which is used while |
| 6192 | * distributing the load between different sched groups in a sched domain. |
| 6193 | * Typically cpu_capacity for all the groups in a sched domain will be same |
| 6194 | * unless there are asymmetries in the topology. If there are asymmetries, |
| 6195 | * group having more cpu_capacity will pickup more load compared to the |
| 6196 | * group having less cpu_capacity. |
| 6197 | */ |
| 6198 | static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) |
| 6199 | { |
| 6200 | struct sched_group *sg = sd->groups; |
| 6201 | |
| 6202 | WARN_ON(!sg); |
| 6203 | |
| 6204 | do { |
| 6205 | sg->group_weight = cpumask_weight(sched_group_cpus(sg)); |
| 6206 | sg = sg->next; |
| 6207 | } while (sg != sd->groups); |
| 6208 | |
| 6209 | if (cpu != group_balance_cpu(sg)) |
| 6210 | return; |
| 6211 | |
| 6212 | update_group_capacity(sd, cpu); |
| 6213 | atomic_set(&sg->sgc->nr_busy_cpus, sg->group_weight); |
| 6214 | } |
| 6215 | |
| 6216 | /* |
| 6217 | * Initializers for schedule domains |
| 6218 | * Non-inlined to reduce accumulated stack pressure in build_sched_domains() |
| 6219 | */ |
| 6220 | |
| 6221 | static int default_relax_domain_level = -1; |
| 6222 | int sched_domain_level_max; |
| 6223 | |
| 6224 | static int __init setup_relax_domain_level(char *str) |
| 6225 | { |
| 6226 | if (kstrtoint(str, 0, &default_relax_domain_level)) |
| 6227 | pr_warn("Unable to set relax_domain_level\n"); |
| 6228 | |
| 6229 | return 1; |
| 6230 | } |
| 6231 | __setup("relax_domain_level=", setup_relax_domain_level); |
| 6232 | |
| 6233 | static void set_domain_attribute(struct sched_domain *sd, |
| 6234 | struct sched_domain_attr *attr) |
| 6235 | { |
| 6236 | int request; |
| 6237 | |
| 6238 | if (!attr || attr->relax_domain_level < 0) { |
| 6239 | if (default_relax_domain_level < 0) |
| 6240 | return; |
| 6241 | else |
| 6242 | request = default_relax_domain_level; |
| 6243 | } else |
| 6244 | request = attr->relax_domain_level; |
| 6245 | if (request < sd->level) { |
| 6246 | /* turn off idle balance on this domain */ |
| 6247 | sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); |
| 6248 | } else { |
| 6249 | /* turn on idle balance on this domain */ |
| 6250 | sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); |
| 6251 | } |
| 6252 | } |
| 6253 | |
| 6254 | static void __sdt_free(const struct cpumask *cpu_map); |
| 6255 | static int __sdt_alloc(const struct cpumask *cpu_map); |
| 6256 | |
| 6257 | static void __free_domain_allocs(struct s_data *d, enum s_alloc what, |
| 6258 | const struct cpumask *cpu_map) |
| 6259 | { |
| 6260 | switch (what) { |
| 6261 | case sa_rootdomain: |
| 6262 | if (!atomic_read(&d->rd->refcount)) |
| 6263 | free_rootdomain(&d->rd->rcu); /* fall through */ |
| 6264 | case sa_sd: |
| 6265 | free_percpu(d->sd); /* fall through */ |
| 6266 | case sa_sd_storage: |
| 6267 | __sdt_free(cpu_map); /* fall through */ |
| 6268 | case sa_none: |
| 6269 | break; |
| 6270 | } |
| 6271 | } |
| 6272 | |
| 6273 | static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, |
| 6274 | const struct cpumask *cpu_map) |
| 6275 | { |
| 6276 | memset(d, 0, sizeof(*d)); |
| 6277 | |
| 6278 | if (__sdt_alloc(cpu_map)) |
| 6279 | return sa_sd_storage; |
| 6280 | d->sd = alloc_percpu(struct sched_domain *); |
| 6281 | if (!d->sd) |
| 6282 | return sa_sd_storage; |
| 6283 | d->rd = alloc_rootdomain(); |
| 6284 | if (!d->rd) |
| 6285 | return sa_sd; |
| 6286 | return sa_rootdomain; |
| 6287 | } |
| 6288 | |
| 6289 | /* |
| 6290 | * NULL the sd_data elements we've used to build the sched_domain and |
| 6291 | * sched_group structure so that the subsequent __free_domain_allocs() |
| 6292 | * will not free the data we're using. |
| 6293 | */ |
| 6294 | static void claim_allocations(int cpu, struct sched_domain *sd) |
| 6295 | { |
| 6296 | struct sd_data *sdd = sd->private; |
| 6297 | |
| 6298 | WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); |
| 6299 | *per_cpu_ptr(sdd->sd, cpu) = NULL; |
| 6300 | |
| 6301 | if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) |
| 6302 | *per_cpu_ptr(sdd->sg, cpu) = NULL; |
| 6303 | |
| 6304 | if (atomic_read(&(*per_cpu_ptr(sdd->sgc, cpu))->ref)) |
| 6305 | *per_cpu_ptr(sdd->sgc, cpu) = NULL; |
| 6306 | } |
| 6307 | |
| 6308 | #ifdef CONFIG_NUMA |
| 6309 | static int sched_domains_numa_levels; |
| 6310 | enum numa_topology_type sched_numa_topology_type; |
| 6311 | static int *sched_domains_numa_distance; |
| 6312 | int sched_max_numa_distance; |
| 6313 | static struct cpumask ***sched_domains_numa_masks; |
| 6314 | static int sched_domains_curr_level; |
| 6315 | #endif |
| 6316 | |
| 6317 | /* |
| 6318 | * SD_flags allowed in topology descriptions. |
| 6319 | * |
| 6320 | * SD_SHARE_CPUCAPACITY - describes SMT topologies |
| 6321 | * SD_SHARE_PKG_RESOURCES - describes shared caches |
| 6322 | * SD_NUMA - describes NUMA topologies |
| 6323 | * SD_SHARE_POWERDOMAIN - describes shared power domain |
| 6324 | * |
| 6325 | * Odd one out: |
| 6326 | * SD_ASYM_PACKING - describes SMT quirks |
| 6327 | */ |
| 6328 | #define TOPOLOGY_SD_FLAGS \ |
| 6329 | (SD_SHARE_CPUCAPACITY | \ |
| 6330 | SD_SHARE_PKG_RESOURCES | \ |
| 6331 | SD_NUMA | \ |
| 6332 | SD_ASYM_PACKING | \ |
| 6333 | SD_SHARE_POWERDOMAIN) |
| 6334 | |
| 6335 | static struct sched_domain * |
| 6336 | sd_init(struct sched_domain_topology_level *tl, int cpu) |
| 6337 | { |
| 6338 | struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); |
| 6339 | int sd_weight, sd_flags = 0; |
| 6340 | |
| 6341 | #ifdef CONFIG_NUMA |
| 6342 | /* |
| 6343 | * Ugly hack to pass state to sd_numa_mask()... |
| 6344 | */ |
| 6345 | sched_domains_curr_level = tl->numa_level; |
| 6346 | #endif |
| 6347 | |
| 6348 | sd_weight = cpumask_weight(tl->mask(cpu)); |
| 6349 | |
| 6350 | if (tl->sd_flags) |
| 6351 | sd_flags = (*tl->sd_flags)(); |
| 6352 | if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS, |
| 6353 | "wrong sd_flags in topology description\n")) |
| 6354 | sd_flags &= ~TOPOLOGY_SD_FLAGS; |
| 6355 | |
| 6356 | *sd = (struct sched_domain){ |
| 6357 | .min_interval = sd_weight, |
| 6358 | .max_interval = 2*sd_weight, |
| 6359 | .busy_factor = 32, |
| 6360 | .imbalance_pct = 125, |
| 6361 | |
| 6362 | .cache_nice_tries = 0, |
| 6363 | .busy_idx = 0, |
| 6364 | .idle_idx = 0, |
| 6365 | .newidle_idx = 0, |
| 6366 | .wake_idx = 0, |
| 6367 | .forkexec_idx = 0, |
| 6368 | |
| 6369 | .flags = 1*SD_LOAD_BALANCE |
| 6370 | | 1*SD_BALANCE_NEWIDLE |
| 6371 | | 1*SD_BALANCE_EXEC |
| 6372 | | 1*SD_BALANCE_FORK |
| 6373 | | 0*SD_BALANCE_WAKE |
| 6374 | | 1*SD_WAKE_AFFINE |
| 6375 | | 0*SD_SHARE_CPUCAPACITY |
| 6376 | | 0*SD_SHARE_PKG_RESOURCES |
| 6377 | | 0*SD_SERIALIZE |
| 6378 | | 0*SD_PREFER_SIBLING |
| 6379 | | 0*SD_NUMA |
| 6380 | | sd_flags |
| 6381 | , |
| 6382 | |
| 6383 | .last_balance = jiffies, |
| 6384 | .balance_interval = sd_weight, |
| 6385 | .smt_gain = 0, |
| 6386 | .max_newidle_lb_cost = 0, |
| 6387 | .next_decay_max_lb_cost = jiffies, |
| 6388 | #ifdef CONFIG_SCHED_DEBUG |
| 6389 | .name = tl->name, |
| 6390 | #endif |
| 6391 | }; |
| 6392 | |
| 6393 | /* |
| 6394 | * Convert topological properties into behaviour. |
| 6395 | */ |
| 6396 | |
| 6397 | if (sd->flags & SD_SHARE_CPUCAPACITY) { |
| 6398 | sd->flags |= SD_PREFER_SIBLING; |
| 6399 | sd->imbalance_pct = 110; |
| 6400 | sd->smt_gain = 1178; /* ~15% */ |
| 6401 | |
| 6402 | } else if (sd->flags & SD_SHARE_PKG_RESOURCES) { |
| 6403 | sd->imbalance_pct = 117; |
| 6404 | sd->cache_nice_tries = 1; |
| 6405 | sd->busy_idx = 2; |
| 6406 | |
| 6407 | #ifdef CONFIG_NUMA |
| 6408 | } else if (sd->flags & SD_NUMA) { |
| 6409 | sd->cache_nice_tries = 2; |
| 6410 | sd->busy_idx = 3; |
| 6411 | sd->idle_idx = 2; |
| 6412 | |
| 6413 | sd->flags |= SD_SERIALIZE; |
| 6414 | if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) { |
| 6415 | sd->flags &= ~(SD_BALANCE_EXEC | |
| 6416 | SD_BALANCE_FORK | |
| 6417 | SD_WAKE_AFFINE); |
| 6418 | } |
| 6419 | |
| 6420 | #endif |
| 6421 | } else { |
| 6422 | sd->flags |= SD_PREFER_SIBLING; |
| 6423 | sd->cache_nice_tries = 1; |
| 6424 | sd->busy_idx = 2; |
| 6425 | sd->idle_idx = 1; |
| 6426 | } |
| 6427 | |
| 6428 | sd->private = &tl->data; |
| 6429 | |
| 6430 | return sd; |
| 6431 | } |
| 6432 | |
| 6433 | /* |
| 6434 | * Topology list, bottom-up. |
| 6435 | */ |
| 6436 | static struct sched_domain_topology_level default_topology[] = { |
| 6437 | #ifdef CONFIG_SCHED_SMT |
| 6438 | { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, |
| 6439 | #endif |
| 6440 | #ifdef CONFIG_SCHED_MC |
| 6441 | { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, |
| 6442 | #endif |
| 6443 | { cpu_cpu_mask, SD_INIT_NAME(DIE) }, |
| 6444 | { NULL, }, |
| 6445 | }; |
| 6446 | |
| 6447 | static struct sched_domain_topology_level *sched_domain_topology = |
| 6448 | default_topology; |
| 6449 | |
| 6450 | #define for_each_sd_topology(tl) \ |
| 6451 | for (tl = sched_domain_topology; tl->mask; tl++) |
| 6452 | |
| 6453 | void set_sched_topology(struct sched_domain_topology_level *tl) |
| 6454 | { |
| 6455 | sched_domain_topology = tl; |
| 6456 | } |
| 6457 | |
| 6458 | #ifdef CONFIG_NUMA |
| 6459 | |
| 6460 | static const struct cpumask *sd_numa_mask(int cpu) |
| 6461 | { |
| 6462 | return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; |
| 6463 | } |
| 6464 | |
| 6465 | static void sched_numa_warn(const char *str) |
| 6466 | { |
| 6467 | static int done = false; |
| 6468 | int i,j; |
| 6469 | |
| 6470 | if (done) |
| 6471 | return; |
| 6472 | |
| 6473 | done = true; |
| 6474 | |
| 6475 | printk(KERN_WARNING "ERROR: %s\n\n", str); |
| 6476 | |
| 6477 | for (i = 0; i < nr_node_ids; i++) { |
| 6478 | printk(KERN_WARNING " "); |
| 6479 | for (j = 0; j < nr_node_ids; j++) |
| 6480 | printk(KERN_CONT "%02d ", node_distance(i,j)); |
| 6481 | printk(KERN_CONT "\n"); |
| 6482 | } |
| 6483 | printk(KERN_WARNING "\n"); |
| 6484 | } |
| 6485 | |
| 6486 | bool find_numa_distance(int distance) |
| 6487 | { |
| 6488 | int i; |
| 6489 | |
| 6490 | if (distance == node_distance(0, 0)) |
| 6491 | return true; |
| 6492 | |
| 6493 | for (i = 0; i < sched_domains_numa_levels; i++) { |
| 6494 | if (sched_domains_numa_distance[i] == distance) |
| 6495 | return true; |
| 6496 | } |
| 6497 | |
| 6498 | return false; |
| 6499 | } |
| 6500 | |
| 6501 | /* |
| 6502 | * A system can have three types of NUMA topology: |
| 6503 | * NUMA_DIRECT: all nodes are directly connected, or not a NUMA system |
| 6504 | * NUMA_GLUELESS_MESH: some nodes reachable through intermediary nodes |
| 6505 | * NUMA_BACKPLANE: nodes can reach other nodes through a backplane |
| 6506 | * |
| 6507 | * The difference between a glueless mesh topology and a backplane |
| 6508 | * topology lies in whether communication between not directly |
| 6509 | * connected nodes goes through intermediary nodes (where programs |
| 6510 | * could run), or through backplane controllers. This affects |
| 6511 | * placement of programs. |
| 6512 | * |
| 6513 | * The type of topology can be discerned with the following tests: |
| 6514 | * - If the maximum distance between any nodes is 1 hop, the system |
| 6515 | * is directly connected. |
| 6516 | * - If for two nodes A and B, located N > 1 hops away from each other, |
| 6517 | * there is an intermediary node C, which is < N hops away from both |
| 6518 | * nodes A and B, the system is a glueless mesh. |
| 6519 | */ |
| 6520 | static void init_numa_topology_type(void) |
| 6521 | { |
| 6522 | int a, b, c, n; |
| 6523 | |
| 6524 | n = sched_max_numa_distance; |
| 6525 | |
| 6526 | if (sched_domains_numa_levels <= 1) { |
| 6527 | sched_numa_topology_type = NUMA_DIRECT; |
| 6528 | return; |
| 6529 | } |
| 6530 | |
| 6531 | for_each_online_node(a) { |
| 6532 | for_each_online_node(b) { |
| 6533 | /* Find two nodes furthest removed from each other. */ |
| 6534 | if (node_distance(a, b) < n) |
| 6535 | continue; |
| 6536 | |
| 6537 | /* Is there an intermediary node between a and b? */ |
| 6538 | for_each_online_node(c) { |
| 6539 | if (node_distance(a, c) < n && |
| 6540 | node_distance(b, c) < n) { |
| 6541 | sched_numa_topology_type = |
| 6542 | NUMA_GLUELESS_MESH; |
| 6543 | return; |
| 6544 | } |
| 6545 | } |
| 6546 | |
| 6547 | sched_numa_topology_type = NUMA_BACKPLANE; |
| 6548 | return; |
| 6549 | } |
| 6550 | } |
| 6551 | } |
| 6552 | |
| 6553 | static void sched_init_numa(void) |
| 6554 | { |
| 6555 | int next_distance, curr_distance = node_distance(0, 0); |
| 6556 | struct sched_domain_topology_level *tl; |
| 6557 | int level = 0; |
| 6558 | int i, j, k; |
| 6559 | |
| 6560 | sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL); |
| 6561 | if (!sched_domains_numa_distance) |
| 6562 | return; |
| 6563 | |
| 6564 | /* |
| 6565 | * O(nr_nodes^2) deduplicating selection sort -- in order to find the |
| 6566 | * unique distances in the node_distance() table. |
| 6567 | * |
| 6568 | * Assumes node_distance(0,j) includes all distances in |
| 6569 | * node_distance(i,j) in order to avoid cubic time. |
| 6570 | */ |
| 6571 | next_distance = curr_distance; |
| 6572 | for (i = 0; i < nr_node_ids; i++) { |
| 6573 | for (j = 0; j < nr_node_ids; j++) { |
| 6574 | for (k = 0; k < nr_node_ids; k++) { |
| 6575 | int distance = node_distance(i, k); |
| 6576 | |
| 6577 | if (distance > curr_distance && |
| 6578 | (distance < next_distance || |
| 6579 | next_distance == curr_distance)) |
| 6580 | next_distance = distance; |
| 6581 | |
| 6582 | /* |
| 6583 | * While not a strong assumption it would be nice to know |
| 6584 | * about cases where if node A is connected to B, B is not |
| 6585 | * equally connected to A. |
| 6586 | */ |
| 6587 | if (sched_debug() && node_distance(k, i) != distance) |
| 6588 | sched_numa_warn("Node-distance not symmetric"); |
| 6589 | |
| 6590 | if (sched_debug() && i && !find_numa_distance(distance)) |
| 6591 | sched_numa_warn("Node-0 not representative"); |
| 6592 | } |
| 6593 | if (next_distance != curr_distance) { |
| 6594 | sched_domains_numa_distance[level++] = next_distance; |
| 6595 | sched_domains_numa_levels = level; |
| 6596 | curr_distance = next_distance; |
| 6597 | } else break; |
| 6598 | } |
| 6599 | |
| 6600 | /* |
| 6601 | * In case of sched_debug() we verify the above assumption. |
| 6602 | */ |
| 6603 | if (!sched_debug()) |
| 6604 | break; |
| 6605 | } |
| 6606 | |
| 6607 | if (!level) |
| 6608 | return; |
| 6609 | |
| 6610 | /* |
| 6611 | * 'level' contains the number of unique distances, excluding the |
| 6612 | * identity distance node_distance(i,i). |
| 6613 | * |
| 6614 | * The sched_domains_numa_distance[] array includes the actual distance |
| 6615 | * numbers. |
| 6616 | */ |
| 6617 | |
| 6618 | /* |
| 6619 | * Here, we should temporarily reset sched_domains_numa_levels to 0. |
| 6620 | * If it fails to allocate memory for array sched_domains_numa_masks[][], |
| 6621 | * the array will contain less then 'level' members. This could be |
| 6622 | * dangerous when we use it to iterate array sched_domains_numa_masks[][] |
| 6623 | * in other functions. |
| 6624 | * |
| 6625 | * We reset it to 'level' at the end of this function. |
| 6626 | */ |
| 6627 | sched_domains_numa_levels = 0; |
| 6628 | |
| 6629 | sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL); |
| 6630 | if (!sched_domains_numa_masks) |
| 6631 | return; |
| 6632 | |
| 6633 | /* |
| 6634 | * Now for each level, construct a mask per node which contains all |
| 6635 | * cpus of nodes that are that many hops away from us. |
| 6636 | */ |
| 6637 | for (i = 0; i < level; i++) { |
| 6638 | sched_domains_numa_masks[i] = |
| 6639 | kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL); |
| 6640 | if (!sched_domains_numa_masks[i]) |
| 6641 | return; |
| 6642 | |
| 6643 | for (j = 0; j < nr_node_ids; j++) { |
| 6644 | struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL); |
| 6645 | if (!mask) |
| 6646 | return; |
| 6647 | |
| 6648 | sched_domains_numa_masks[i][j] = mask; |
| 6649 | |
| 6650 | for_each_node(k) { |
| 6651 | if (node_distance(j, k) > sched_domains_numa_distance[i]) |
| 6652 | continue; |
| 6653 | |
| 6654 | cpumask_or(mask, mask, cpumask_of_node(k)); |
| 6655 | } |
| 6656 | } |
| 6657 | } |
| 6658 | |
| 6659 | /* Compute default topology size */ |
| 6660 | for (i = 0; sched_domain_topology[i].mask; i++); |
| 6661 | |
| 6662 | tl = kzalloc((i + level + 1) * |
| 6663 | sizeof(struct sched_domain_topology_level), GFP_KERNEL); |
| 6664 | if (!tl) |
| 6665 | return; |
| 6666 | |
| 6667 | /* |
| 6668 | * Copy the default topology bits.. |
| 6669 | */ |
| 6670 | for (i = 0; sched_domain_topology[i].mask; i++) |
| 6671 | tl[i] = sched_domain_topology[i]; |
| 6672 | |
| 6673 | /* |
| 6674 | * .. and append 'j' levels of NUMA goodness. |
| 6675 | */ |
| 6676 | for (j = 0; j < level; i++, j++) { |
| 6677 | tl[i] = (struct sched_domain_topology_level){ |
| 6678 | .mask = sd_numa_mask, |
| 6679 | .sd_flags = cpu_numa_flags, |
| 6680 | .flags = SDTL_OVERLAP, |
| 6681 | .numa_level = j, |
| 6682 | SD_INIT_NAME(NUMA) |
| 6683 | }; |
| 6684 | } |
| 6685 | |
| 6686 | sched_domain_topology = tl; |
| 6687 | |
| 6688 | sched_domains_numa_levels = level; |
| 6689 | sched_max_numa_distance = sched_domains_numa_distance[level - 1]; |
| 6690 | |
| 6691 | init_numa_topology_type(); |
| 6692 | } |
| 6693 | |
| 6694 | static void sched_domains_numa_masks_set(unsigned int cpu) |
| 6695 | { |
| 6696 | int node = cpu_to_node(cpu); |
| 6697 | int i, j; |
| 6698 | |
| 6699 | for (i = 0; i < sched_domains_numa_levels; i++) { |
| 6700 | for (j = 0; j < nr_node_ids; j++) { |
| 6701 | if (node_distance(j, node) <= sched_domains_numa_distance[i]) |
| 6702 | cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]); |
| 6703 | } |
| 6704 | } |
| 6705 | } |
| 6706 | |
| 6707 | static void sched_domains_numa_masks_clear(unsigned int cpu) |
| 6708 | { |
| 6709 | int i, j; |
| 6710 | |
| 6711 | for (i = 0; i < sched_domains_numa_levels; i++) { |
| 6712 | for (j = 0; j < nr_node_ids; j++) |
| 6713 | cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]); |
| 6714 | } |
| 6715 | } |
| 6716 | |
| 6717 | #else |
| 6718 | static inline void sched_init_numa(void) { } |
| 6719 | static void sched_domains_numa_masks_set(unsigned int cpu) { } |
| 6720 | static void sched_domains_numa_masks_clear(unsigned int cpu) { } |
| 6721 | #endif /* CONFIG_NUMA */ |
| 6722 | |
| 6723 | static int __sdt_alloc(const struct cpumask *cpu_map) |
| 6724 | { |
| 6725 | struct sched_domain_topology_level *tl; |
| 6726 | int j; |
| 6727 | |
| 6728 | for_each_sd_topology(tl) { |
| 6729 | struct sd_data *sdd = &tl->data; |
| 6730 | |
| 6731 | sdd->sd = alloc_percpu(struct sched_domain *); |
| 6732 | if (!sdd->sd) |
| 6733 | return -ENOMEM; |
| 6734 | |
| 6735 | sdd->sg = alloc_percpu(struct sched_group *); |
| 6736 | if (!sdd->sg) |
| 6737 | return -ENOMEM; |
| 6738 | |
| 6739 | sdd->sgc = alloc_percpu(struct sched_group_capacity *); |
| 6740 | if (!sdd->sgc) |
| 6741 | return -ENOMEM; |
| 6742 | |
| 6743 | for_each_cpu(j, cpu_map) { |
| 6744 | struct sched_domain *sd; |
| 6745 | struct sched_group *sg; |
| 6746 | struct sched_group_capacity *sgc; |
| 6747 | |
| 6748 | sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), |
| 6749 | GFP_KERNEL, cpu_to_node(j)); |
| 6750 | if (!sd) |
| 6751 | return -ENOMEM; |
| 6752 | |
| 6753 | *per_cpu_ptr(sdd->sd, j) = sd; |
| 6754 | |
| 6755 | sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), |
| 6756 | GFP_KERNEL, cpu_to_node(j)); |
| 6757 | if (!sg) |
| 6758 | return -ENOMEM; |
| 6759 | |
| 6760 | sg->next = sg; |
| 6761 | |
| 6762 | *per_cpu_ptr(sdd->sg, j) = sg; |
| 6763 | |
| 6764 | sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(), |
| 6765 | GFP_KERNEL, cpu_to_node(j)); |
| 6766 | if (!sgc) |
| 6767 | return -ENOMEM; |
| 6768 | |
| 6769 | *per_cpu_ptr(sdd->sgc, j) = sgc; |
| 6770 | } |
| 6771 | } |
| 6772 | |
| 6773 | return 0; |
| 6774 | } |
| 6775 | |
| 6776 | static void __sdt_free(const struct cpumask *cpu_map) |
| 6777 | { |
| 6778 | struct sched_domain_topology_level *tl; |
| 6779 | int j; |
| 6780 | |
| 6781 | for_each_sd_topology(tl) { |
| 6782 | struct sd_data *sdd = &tl->data; |
| 6783 | |
| 6784 | for_each_cpu(j, cpu_map) { |
| 6785 | struct sched_domain *sd; |
| 6786 | |
| 6787 | if (sdd->sd) { |
| 6788 | sd = *per_cpu_ptr(sdd->sd, j); |
| 6789 | if (sd && (sd->flags & SD_OVERLAP)) |
| 6790 | free_sched_groups(sd->groups, 0); |
| 6791 | kfree(*per_cpu_ptr(sdd->sd, j)); |
| 6792 | } |
| 6793 | |
| 6794 | if (sdd->sg) |
| 6795 | kfree(*per_cpu_ptr(sdd->sg, j)); |
| 6796 | if (sdd->sgc) |
| 6797 | kfree(*per_cpu_ptr(sdd->sgc, j)); |
| 6798 | } |
| 6799 | free_percpu(sdd->sd); |
| 6800 | sdd->sd = NULL; |
| 6801 | free_percpu(sdd->sg); |
| 6802 | sdd->sg = NULL; |
| 6803 | free_percpu(sdd->sgc); |
| 6804 | sdd->sgc = NULL; |
| 6805 | } |
| 6806 | } |
| 6807 | |
| 6808 | struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, |
| 6809 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, |
| 6810 | struct sched_domain *child, int cpu) |
| 6811 | { |
| 6812 | struct sched_domain *sd = sd_init(tl, cpu); |
| 6813 | if (!sd) |
| 6814 | return child; |
| 6815 | |
| 6816 | cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); |
| 6817 | if (child) { |
| 6818 | sd->level = child->level + 1; |
| 6819 | sched_domain_level_max = max(sched_domain_level_max, sd->level); |
| 6820 | child->parent = sd; |
| 6821 | sd->child = child; |
| 6822 | |
| 6823 | if (!cpumask_subset(sched_domain_span(child), |
| 6824 | sched_domain_span(sd))) { |
| 6825 | pr_err("BUG: arch topology borken\n"); |
| 6826 | #ifdef CONFIG_SCHED_DEBUG |
| 6827 | pr_err(" the %s domain not a subset of the %s domain\n", |
| 6828 | child->name, sd->name); |
| 6829 | #endif |
| 6830 | /* Fixup, ensure @sd has at least @child cpus. */ |
| 6831 | cpumask_or(sched_domain_span(sd), |
| 6832 | sched_domain_span(sd), |
| 6833 | sched_domain_span(child)); |
| 6834 | } |
| 6835 | |
| 6836 | } |
| 6837 | set_domain_attribute(sd, attr); |
| 6838 | |
| 6839 | return sd; |
| 6840 | } |
| 6841 | |
| 6842 | /* |
| 6843 | * Build sched domains for a given set of cpus and attach the sched domains |
| 6844 | * to the individual cpus |
| 6845 | */ |
| 6846 | static int build_sched_domains(const struct cpumask *cpu_map, |
| 6847 | struct sched_domain_attr *attr) |
| 6848 | { |
| 6849 | enum s_alloc alloc_state; |
| 6850 | struct sched_domain *sd; |
| 6851 | struct s_data d; |
| 6852 | int i, ret = -ENOMEM; |
| 6853 | |
| 6854 | alloc_state = __visit_domain_allocation_hell(&d, cpu_map); |
| 6855 | if (alloc_state != sa_rootdomain) |
| 6856 | goto error; |
| 6857 | |
| 6858 | /* Set up domains for cpus specified by the cpu_map. */ |
| 6859 | for_each_cpu(i, cpu_map) { |
| 6860 | struct sched_domain_topology_level *tl; |
| 6861 | |
| 6862 | sd = NULL; |
| 6863 | for_each_sd_topology(tl) { |
| 6864 | sd = build_sched_domain(tl, cpu_map, attr, sd, i); |
| 6865 | if (tl == sched_domain_topology) |
| 6866 | *per_cpu_ptr(d.sd, i) = sd; |
| 6867 | if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP)) |
| 6868 | sd->flags |= SD_OVERLAP; |
| 6869 | if (cpumask_equal(cpu_map, sched_domain_span(sd))) |
| 6870 | break; |
| 6871 | } |
| 6872 | } |
| 6873 | |
| 6874 | /* Build the groups for the domains */ |
| 6875 | for_each_cpu(i, cpu_map) { |
| 6876 | for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { |
| 6877 | sd->span_weight = cpumask_weight(sched_domain_span(sd)); |
| 6878 | if (sd->flags & SD_OVERLAP) { |
| 6879 | if (build_overlap_sched_groups(sd, i)) |
| 6880 | goto error; |
| 6881 | } else { |
| 6882 | if (build_sched_groups(sd, i)) |
| 6883 | goto error; |
| 6884 | } |
| 6885 | } |
| 6886 | } |
| 6887 | |
| 6888 | /* Calculate CPU capacity for physical packages and nodes */ |
| 6889 | for (i = nr_cpumask_bits-1; i >= 0; i--) { |
| 6890 | if (!cpumask_test_cpu(i, cpu_map)) |
| 6891 | continue; |
| 6892 | |
| 6893 | for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { |
| 6894 | claim_allocations(i, sd); |
| 6895 | init_sched_groups_capacity(i, sd); |
| 6896 | } |
| 6897 | } |
| 6898 | |
| 6899 | /* Attach the domains */ |
| 6900 | rcu_read_lock(); |
| 6901 | for_each_cpu(i, cpu_map) { |
| 6902 | sd = *per_cpu_ptr(d.sd, i); |
| 6903 | cpu_attach_domain(sd, d.rd, i); |
| 6904 | } |
| 6905 | rcu_read_unlock(); |
| 6906 | |
| 6907 | ret = 0; |
| 6908 | error: |
| 6909 | __free_domain_allocs(&d, alloc_state, cpu_map); |
| 6910 | return ret; |
| 6911 | } |
| 6912 | |
| 6913 | static cpumask_var_t *doms_cur; /* current sched domains */ |
| 6914 | static int ndoms_cur; /* number of sched domains in 'doms_cur' */ |
| 6915 | static struct sched_domain_attr *dattr_cur; |
| 6916 | /* attribues of custom domains in 'doms_cur' */ |
| 6917 | |
| 6918 | /* |
| 6919 | * Special case: If a kmalloc of a doms_cur partition (array of |
| 6920 | * cpumask) fails, then fallback to a single sched domain, |
| 6921 | * as determined by the single cpumask fallback_doms. |
| 6922 | */ |
| 6923 | static cpumask_var_t fallback_doms; |
| 6924 | |
| 6925 | /* |
| 6926 | * arch_update_cpu_topology lets virtualized architectures update the |
| 6927 | * cpu core maps. It is supposed to return 1 if the topology changed |
| 6928 | * or 0 if it stayed the same. |
| 6929 | */ |
| 6930 | int __weak arch_update_cpu_topology(void) |
| 6931 | { |
| 6932 | return 0; |
| 6933 | } |
| 6934 | |
| 6935 | cpumask_var_t *alloc_sched_domains(unsigned int ndoms) |
| 6936 | { |
| 6937 | int i; |
| 6938 | cpumask_var_t *doms; |
| 6939 | |
| 6940 | doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL); |
| 6941 | if (!doms) |
| 6942 | return NULL; |
| 6943 | for (i = 0; i < ndoms; i++) { |
| 6944 | if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) { |
| 6945 | free_sched_domains(doms, i); |
| 6946 | return NULL; |
| 6947 | } |
| 6948 | } |
| 6949 | return doms; |
| 6950 | } |
| 6951 | |
| 6952 | void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) |
| 6953 | { |
| 6954 | unsigned int i; |
| 6955 | for (i = 0; i < ndoms; i++) |
| 6956 | free_cpumask_var(doms[i]); |
| 6957 | kfree(doms); |
| 6958 | } |
| 6959 | |
| 6960 | /* |
| 6961 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. |
| 6962 | * For now this just excludes isolated cpus, but could be used to |
| 6963 | * exclude other special cases in the future. |
| 6964 | */ |
| 6965 | static int init_sched_domains(const struct cpumask *cpu_map) |
| 6966 | { |
| 6967 | int err; |
| 6968 | |
| 6969 | arch_update_cpu_topology(); |
| 6970 | ndoms_cur = 1; |
| 6971 | doms_cur = alloc_sched_domains(ndoms_cur); |
| 6972 | if (!doms_cur) |
| 6973 | doms_cur = &fallback_doms; |
| 6974 | cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map); |
| 6975 | err = build_sched_domains(doms_cur[0], NULL); |
| 6976 | register_sched_domain_sysctl(); |
| 6977 | |
| 6978 | return err; |
| 6979 | } |
| 6980 | |
| 6981 | /* |
| 6982 | * Detach sched domains from a group of cpus specified in cpu_map |
| 6983 | * These cpus will now be attached to the NULL domain |
| 6984 | */ |
| 6985 | static void detach_destroy_domains(const struct cpumask *cpu_map) |
| 6986 | { |
| 6987 | int i; |
| 6988 | |
| 6989 | rcu_read_lock(); |
| 6990 | for_each_cpu(i, cpu_map) |
| 6991 | cpu_attach_domain(NULL, &def_root_domain, i); |
| 6992 | rcu_read_unlock(); |
| 6993 | } |
| 6994 | |
| 6995 | /* handle null as "default" */ |
| 6996 | static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, |
| 6997 | struct sched_domain_attr *new, int idx_new) |
| 6998 | { |
| 6999 | struct sched_domain_attr tmp; |
| 7000 | |
| 7001 | /* fast path */ |
| 7002 | if (!new && !cur) |
| 7003 | return 1; |
| 7004 | |
| 7005 | tmp = SD_ATTR_INIT; |
| 7006 | return !memcmp(cur ? (cur + idx_cur) : &tmp, |
| 7007 | new ? (new + idx_new) : &tmp, |
| 7008 | sizeof(struct sched_domain_attr)); |
| 7009 | } |
| 7010 | |
| 7011 | /* |
| 7012 | * Partition sched domains as specified by the 'ndoms_new' |
| 7013 | * cpumasks in the array doms_new[] of cpumasks. This compares |
| 7014 | * doms_new[] to the current sched domain partitioning, doms_cur[]. |
| 7015 | * It destroys each deleted domain and builds each new domain. |
| 7016 | * |
| 7017 | * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'. |
| 7018 | * The masks don't intersect (don't overlap.) We should setup one |
| 7019 | * sched domain for each mask. CPUs not in any of the cpumasks will |
| 7020 | * not be load balanced. If the same cpumask appears both in the |
| 7021 | * current 'doms_cur' domains and in the new 'doms_new', we can leave |
| 7022 | * it as it is. |
| 7023 | * |
| 7024 | * The passed in 'doms_new' should be allocated using |
| 7025 | * alloc_sched_domains. This routine takes ownership of it and will |
| 7026 | * free_sched_domains it when done with it. If the caller failed the |
| 7027 | * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1, |
| 7028 | * and partition_sched_domains() will fallback to the single partition |
| 7029 | * 'fallback_doms', it also forces the domains to be rebuilt. |
| 7030 | * |
| 7031 | * If doms_new == NULL it will be replaced with cpu_online_mask. |
| 7032 | * ndoms_new == 0 is a special case for destroying existing domains, |
| 7033 | * and it will not create the default domain. |
| 7034 | * |
| 7035 | * Call with hotplug lock held |
| 7036 | */ |
| 7037 | void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], |
| 7038 | struct sched_domain_attr *dattr_new) |
| 7039 | { |
| 7040 | int i, j, n; |
| 7041 | int new_topology; |
| 7042 | |
| 7043 | mutex_lock(&sched_domains_mutex); |
| 7044 | |
| 7045 | /* always unregister in case we don't destroy any domains */ |
| 7046 | unregister_sched_domain_sysctl(); |
| 7047 | |
| 7048 | /* Let architecture update cpu core mappings. */ |
| 7049 | new_topology = arch_update_cpu_topology(); |
| 7050 | |
| 7051 | n = doms_new ? ndoms_new : 0; |
| 7052 | |
| 7053 | /* Destroy deleted domains */ |
| 7054 | for (i = 0; i < ndoms_cur; i++) { |
| 7055 | for (j = 0; j < n && !new_topology; j++) { |
| 7056 | if (cpumask_equal(doms_cur[i], doms_new[j]) |
| 7057 | && dattrs_equal(dattr_cur, i, dattr_new, j)) |
| 7058 | goto match1; |
| 7059 | } |
| 7060 | /* no match - a current sched domain not in new doms_new[] */ |
| 7061 | detach_destroy_domains(doms_cur[i]); |
| 7062 | match1: |
| 7063 | ; |
| 7064 | } |
| 7065 | |
| 7066 | n = ndoms_cur; |
| 7067 | if (doms_new == NULL) { |
| 7068 | n = 0; |
| 7069 | doms_new = &fallback_doms; |
| 7070 | cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map); |
| 7071 | WARN_ON_ONCE(dattr_new); |
| 7072 | } |
| 7073 | |
| 7074 | /* Build new domains */ |
| 7075 | for (i = 0; i < ndoms_new; i++) { |
| 7076 | for (j = 0; j < n && !new_topology; j++) { |
| 7077 | if (cpumask_equal(doms_new[i], doms_cur[j]) |
| 7078 | && dattrs_equal(dattr_new, i, dattr_cur, j)) |
| 7079 | goto match2; |
| 7080 | } |
| 7081 | /* no match - add a new doms_new */ |
| 7082 | build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL); |
| 7083 | match2: |
| 7084 | ; |
| 7085 | } |
| 7086 | |
| 7087 | /* Remember the new sched domains */ |
| 7088 | if (doms_cur != &fallback_doms) |
| 7089 | free_sched_domains(doms_cur, ndoms_cur); |
| 7090 | kfree(dattr_cur); /* kfree(NULL) is safe */ |
| 7091 | doms_cur = doms_new; |
| 7092 | dattr_cur = dattr_new; |
| 7093 | ndoms_cur = ndoms_new; |
| 7094 | |
| 7095 | register_sched_domain_sysctl(); |
| 7096 | |
| 7097 | mutex_unlock(&sched_domains_mutex); |
| 7098 | } |
| 7099 | |
| 7100 | static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */ |
| 7101 | |
| 7102 | /* |
| 7103 | * Update cpusets according to cpu_active mask. If cpusets are |
| 7104 | * disabled, cpuset_update_active_cpus() becomes a simple wrapper |
| 7105 | * around partition_sched_domains(). |
| 7106 | * |
| 7107 | * If we come here as part of a suspend/resume, don't touch cpusets because we |
| 7108 | * want to restore it back to its original state upon resume anyway. |
| 7109 | */ |
| 7110 | static void cpuset_cpu_active(void) |
| 7111 | { |
| 7112 | if (cpuhp_tasks_frozen) { |
| 7113 | /* |
| 7114 | * num_cpus_frozen tracks how many CPUs are involved in suspend |
| 7115 | * resume sequence. As long as this is not the last online |
| 7116 | * operation in the resume sequence, just build a single sched |
| 7117 | * domain, ignoring cpusets. |
| 7118 | */ |
| 7119 | num_cpus_frozen--; |
| 7120 | if (likely(num_cpus_frozen)) { |
| 7121 | partition_sched_domains(1, NULL, NULL); |
| 7122 | return; |
| 7123 | } |
| 7124 | /* |
| 7125 | * This is the last CPU online operation. So fall through and |
| 7126 | * restore the original sched domains by considering the |
| 7127 | * cpuset configurations. |
| 7128 | */ |
| 7129 | } |
| 7130 | cpuset_update_active_cpus(true); |
| 7131 | } |
| 7132 | |
| 7133 | static int cpuset_cpu_inactive(unsigned int cpu) |
| 7134 | { |
| 7135 | unsigned long flags; |
| 7136 | struct dl_bw *dl_b; |
| 7137 | bool overflow; |
| 7138 | int cpus; |
| 7139 | |
| 7140 | if (!cpuhp_tasks_frozen) { |
| 7141 | rcu_read_lock_sched(); |
| 7142 | dl_b = dl_bw_of(cpu); |
| 7143 | |
| 7144 | raw_spin_lock_irqsave(&dl_b->lock, flags); |
| 7145 | cpus = dl_bw_cpus(cpu); |
| 7146 | overflow = __dl_overflow(dl_b, cpus, 0, 0); |
| 7147 | raw_spin_unlock_irqrestore(&dl_b->lock, flags); |
| 7148 | |
| 7149 | rcu_read_unlock_sched(); |
| 7150 | |
| 7151 | if (overflow) |
| 7152 | return -EBUSY; |
| 7153 | cpuset_update_active_cpus(false); |
| 7154 | } else { |
| 7155 | num_cpus_frozen++; |
| 7156 | partition_sched_domains(1, NULL, NULL); |
| 7157 | } |
| 7158 | return 0; |
| 7159 | } |
| 7160 | |
| 7161 | int sched_cpu_activate(unsigned int cpu) |
| 7162 | { |
| 7163 | struct rq *rq = cpu_rq(cpu); |
| 7164 | unsigned long flags; |
| 7165 | |
| 7166 | set_cpu_active(cpu, true); |
| 7167 | |
| 7168 | if (sched_smp_initialized) { |
| 7169 | sched_domains_numa_masks_set(cpu); |
| 7170 | cpuset_cpu_active(); |
| 7171 | } |
| 7172 | |
| 7173 | /* |
| 7174 | * Put the rq online, if not already. This happens: |
| 7175 | * |
| 7176 | * 1) In the early boot process, because we build the real domains |
| 7177 | * after all cpus have been brought up. |
| 7178 | * |
| 7179 | * 2) At runtime, if cpuset_cpu_active() fails to rebuild the |
| 7180 | * domains. |
| 7181 | */ |
| 7182 | raw_spin_lock_irqsave(&rq->lock, flags); |
| 7183 | if (rq->rd) { |
| 7184 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |
| 7185 | set_rq_online(rq); |
| 7186 | } |
| 7187 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
| 7188 | |
| 7189 | update_max_interval(); |
| 7190 | |
| 7191 | return 0; |
| 7192 | } |
| 7193 | |
| 7194 | int sched_cpu_deactivate(unsigned int cpu) |
| 7195 | { |
| 7196 | int ret; |
| 7197 | |
| 7198 | set_cpu_active(cpu, false); |
| 7199 | /* |
| 7200 | * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU |
| 7201 | * users of this state to go away such that all new such users will |
| 7202 | * observe it. |
| 7203 | * |
| 7204 | * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might |
| 7205 | * not imply sync_sched(), so wait for both. |
| 7206 | * |
| 7207 | * Do sync before park smpboot threads to take care the rcu boost case. |
| 7208 | */ |
| 7209 | if (IS_ENABLED(CONFIG_PREEMPT)) |
| 7210 | synchronize_rcu_mult(call_rcu, call_rcu_sched); |
| 7211 | else |
| 7212 | synchronize_rcu(); |
| 7213 | |
| 7214 | if (!sched_smp_initialized) |
| 7215 | return 0; |
| 7216 | |
| 7217 | ret = cpuset_cpu_inactive(cpu); |
| 7218 | if (ret) { |
| 7219 | set_cpu_active(cpu, true); |
| 7220 | return ret; |
| 7221 | } |
| 7222 | sched_domains_numa_masks_clear(cpu); |
| 7223 | return 0; |
| 7224 | } |
| 7225 | |
| 7226 | static void sched_rq_cpu_starting(unsigned int cpu) |
| 7227 | { |
| 7228 | struct rq *rq = cpu_rq(cpu); |
| 7229 | |
| 7230 | rq->calc_load_update = calc_load_update; |
| 7231 | account_reset_rq(rq); |
| 7232 | update_max_interval(); |
| 7233 | } |
| 7234 | |
| 7235 | int sched_cpu_starting(unsigned int cpu) |
| 7236 | { |
| 7237 | set_cpu_rq_start_time(cpu); |
| 7238 | sched_rq_cpu_starting(cpu); |
| 7239 | return 0; |
| 7240 | } |
| 7241 | |
| 7242 | #ifdef CONFIG_HOTPLUG_CPU |
| 7243 | int sched_cpu_dying(unsigned int cpu) |
| 7244 | { |
| 7245 | struct rq *rq = cpu_rq(cpu); |
| 7246 | unsigned long flags; |
| 7247 | |
| 7248 | /* Handle pending wakeups and then migrate everything off */ |
| 7249 | sched_ttwu_pending(); |
| 7250 | raw_spin_lock_irqsave(&rq->lock, flags); |
| 7251 | if (rq->rd) { |
| 7252 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |
| 7253 | set_rq_offline(rq); |
| 7254 | } |
| 7255 | migrate_tasks(rq); |
| 7256 | BUG_ON(rq->nr_running != 1); |
| 7257 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
| 7258 | calc_load_migrate(rq); |
| 7259 | update_max_interval(); |
| 7260 | nohz_balance_exit_idle(cpu); |
| 7261 | hrtick_clear(rq); |
| 7262 | return 0; |
| 7263 | } |
| 7264 | #endif |
| 7265 | |
| 7266 | void __init sched_init_smp(void) |
| 7267 | { |
| 7268 | cpumask_var_t non_isolated_cpus; |
| 7269 | |
| 7270 | alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); |
| 7271 | alloc_cpumask_var(&fallback_doms, GFP_KERNEL); |
| 7272 | |
| 7273 | sched_init_numa(); |
| 7274 | |
| 7275 | /* |
| 7276 | * There's no userspace yet to cause hotplug operations; hence all the |
| 7277 | * cpu masks are stable and all blatant races in the below code cannot |
| 7278 | * happen. |
| 7279 | */ |
| 7280 | mutex_lock(&sched_domains_mutex); |
| 7281 | init_sched_domains(cpu_active_mask); |
| 7282 | cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); |
| 7283 | if (cpumask_empty(non_isolated_cpus)) |
| 7284 | cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); |
| 7285 | mutex_unlock(&sched_domains_mutex); |
| 7286 | |
| 7287 | /* Move init over to a non-isolated CPU */ |
| 7288 | if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0) |
| 7289 | BUG(); |
| 7290 | sched_init_granularity(); |
| 7291 | free_cpumask_var(non_isolated_cpus); |
| 7292 | |
| 7293 | init_sched_rt_class(); |
| 7294 | init_sched_dl_class(); |
| 7295 | sched_smp_initialized = true; |
| 7296 | } |
| 7297 | |
| 7298 | static int __init migration_init(void) |
| 7299 | { |
| 7300 | sched_rq_cpu_starting(smp_processor_id()); |
| 7301 | return 0; |
| 7302 | } |
| 7303 | early_initcall(migration_init); |
| 7304 | |
| 7305 | #else |
| 7306 | void __init sched_init_smp(void) |
| 7307 | { |
| 7308 | sched_init_granularity(); |
| 7309 | } |
| 7310 | #endif /* CONFIG_SMP */ |
| 7311 | |
| 7312 | int in_sched_functions(unsigned long addr) |
| 7313 | { |
| 7314 | return in_lock_functions(addr) || |
| 7315 | (addr >= (unsigned long)__sched_text_start |
| 7316 | && addr < (unsigned long)__sched_text_end); |
| 7317 | } |
| 7318 | |
| 7319 | #ifdef CONFIG_CGROUP_SCHED |
| 7320 | /* |
| 7321 | * Default task group. |
| 7322 | * Every task in system belongs to this group at bootup. |
| 7323 | */ |
| 7324 | struct task_group root_task_group; |
| 7325 | LIST_HEAD(task_groups); |
| 7326 | |
| 7327 | /* Cacheline aligned slab cache for task_group */ |
| 7328 | static struct kmem_cache *task_group_cache __read_mostly; |
| 7329 | #endif |
| 7330 | |
| 7331 | DECLARE_PER_CPU(cpumask_var_t, load_balance_mask); |
| 7332 | |
| 7333 | void __init sched_init(void) |
| 7334 | { |
| 7335 | int i, j; |
| 7336 | unsigned long alloc_size = 0, ptr; |
| 7337 | |
| 7338 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 7339 | alloc_size += 2 * nr_cpu_ids * sizeof(void **); |
| 7340 | #endif |
| 7341 | #ifdef CONFIG_RT_GROUP_SCHED |
| 7342 | alloc_size += 2 * nr_cpu_ids * sizeof(void **); |
| 7343 | #endif |
| 7344 | if (alloc_size) { |
| 7345 | ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); |
| 7346 | |
| 7347 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 7348 | root_task_group.se = (struct sched_entity **)ptr; |
| 7349 | ptr += nr_cpu_ids * sizeof(void **); |
| 7350 | |
| 7351 | root_task_group.cfs_rq = (struct cfs_rq **)ptr; |
| 7352 | ptr += nr_cpu_ids * sizeof(void **); |
| 7353 | |
| 7354 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
| 7355 | #ifdef CONFIG_RT_GROUP_SCHED |
| 7356 | root_task_group.rt_se = (struct sched_rt_entity **)ptr; |
| 7357 | ptr += nr_cpu_ids * sizeof(void **); |
| 7358 | |
| 7359 | root_task_group.rt_rq = (struct rt_rq **)ptr; |
| 7360 | ptr += nr_cpu_ids * sizeof(void **); |
| 7361 | |
| 7362 | #endif /* CONFIG_RT_GROUP_SCHED */ |
| 7363 | } |
| 7364 | #ifdef CONFIG_CPUMASK_OFFSTACK |
| 7365 | for_each_possible_cpu(i) { |
| 7366 | per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node( |
| 7367 | cpumask_size(), GFP_KERNEL, cpu_to_node(i)); |
| 7368 | } |
| 7369 | #endif /* CONFIG_CPUMASK_OFFSTACK */ |
| 7370 | |
| 7371 | init_rt_bandwidth(&def_rt_bandwidth, |
| 7372 | global_rt_period(), global_rt_runtime()); |
| 7373 | init_dl_bandwidth(&def_dl_bandwidth, |
| 7374 | global_rt_period(), global_rt_runtime()); |
| 7375 | |
| 7376 | #ifdef CONFIG_SMP |
| 7377 | init_defrootdomain(); |
| 7378 | #endif |
| 7379 | |
| 7380 | #ifdef CONFIG_RT_GROUP_SCHED |
| 7381 | init_rt_bandwidth(&root_task_group.rt_bandwidth, |
| 7382 | global_rt_period(), global_rt_runtime()); |
| 7383 | #endif /* CONFIG_RT_GROUP_SCHED */ |
| 7384 | |
| 7385 | #ifdef CONFIG_CGROUP_SCHED |
| 7386 | task_group_cache = KMEM_CACHE(task_group, 0); |
| 7387 | |
| 7388 | list_add(&root_task_group.list, &task_groups); |
| 7389 | INIT_LIST_HEAD(&root_task_group.children); |
| 7390 | INIT_LIST_HEAD(&root_task_group.siblings); |
| 7391 | autogroup_init(&init_task); |
| 7392 | #endif /* CONFIG_CGROUP_SCHED */ |
| 7393 | |
| 7394 | for_each_possible_cpu(i) { |
| 7395 | struct rq *rq; |
| 7396 | |
| 7397 | rq = cpu_rq(i); |
| 7398 | raw_spin_lock_init(&rq->lock); |
| 7399 | rq->nr_running = 0; |
| 7400 | rq->calc_load_active = 0; |
| 7401 | rq->calc_load_update = jiffies + LOAD_FREQ; |
| 7402 | init_cfs_rq(&rq->cfs); |
| 7403 | init_rt_rq(&rq->rt); |
| 7404 | init_dl_rq(&rq->dl); |
| 7405 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 7406 | root_task_group.shares = ROOT_TASK_GROUP_LOAD; |
| 7407 | INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); |
| 7408 | /* |
| 7409 | * How much cpu bandwidth does root_task_group get? |
| 7410 | * |
| 7411 | * In case of task-groups formed thr' the cgroup filesystem, it |
| 7412 | * gets 100% of the cpu resources in the system. This overall |
| 7413 | * system cpu resource is divided among the tasks of |
| 7414 | * root_task_group and its child task-groups in a fair manner, |
| 7415 | * based on each entity's (task or task-group's) weight |
| 7416 | * (se->load.weight). |
| 7417 | * |
| 7418 | * In other words, if root_task_group has 10 tasks of weight |
| 7419 | * 1024) and two child groups A0 and A1 (of weight 1024 each), |
| 7420 | * then A0's share of the cpu resource is: |
| 7421 | * |
| 7422 | * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% |
| 7423 | * |
| 7424 | * We achieve this by letting root_task_group's tasks sit |
| 7425 | * directly in rq->cfs (i.e root_task_group->se[] = NULL). |
| 7426 | */ |
| 7427 | init_cfs_bandwidth(&root_task_group.cfs_bandwidth); |
| 7428 | init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); |
| 7429 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
| 7430 | |
| 7431 | rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; |
| 7432 | #ifdef CONFIG_RT_GROUP_SCHED |
| 7433 | init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); |
| 7434 | #endif |
| 7435 | |
| 7436 | for (j = 0; j < CPU_LOAD_IDX_MAX; j++) |
| 7437 | rq->cpu_load[j] = 0; |
| 7438 | |
| 7439 | #ifdef CONFIG_SMP |
| 7440 | rq->sd = NULL; |
| 7441 | rq->rd = NULL; |
| 7442 | rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE; |
| 7443 | rq->balance_callback = NULL; |
| 7444 | rq->active_balance = 0; |
| 7445 | rq->next_balance = jiffies; |
| 7446 | rq->push_cpu = 0; |
| 7447 | rq->cpu = i; |
| 7448 | rq->online = 0; |
| 7449 | rq->idle_stamp = 0; |
| 7450 | rq->avg_idle = 2*sysctl_sched_migration_cost; |
| 7451 | rq->max_idle_balance_cost = sysctl_sched_migration_cost; |
| 7452 | |
| 7453 | INIT_LIST_HEAD(&rq->cfs_tasks); |
| 7454 | |
| 7455 | rq_attach_root(rq, &def_root_domain); |
| 7456 | #ifdef CONFIG_NO_HZ_COMMON |
| 7457 | rq->last_load_update_tick = jiffies; |
| 7458 | rq->nohz_flags = 0; |
| 7459 | #endif |
| 7460 | #ifdef CONFIG_NO_HZ_FULL |
| 7461 | rq->last_sched_tick = 0; |
| 7462 | #endif |
| 7463 | #endif /* CONFIG_SMP */ |
| 7464 | init_rq_hrtick(rq); |
| 7465 | atomic_set(&rq->nr_iowait, 0); |
| 7466 | } |
| 7467 | |
| 7468 | set_load_weight(&init_task); |
| 7469 | |
| 7470 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
| 7471 | INIT_HLIST_HEAD(&init_task.preempt_notifiers); |
| 7472 | #endif |
| 7473 | |
| 7474 | /* |
| 7475 | * The boot idle thread does lazy MMU switching as well: |
| 7476 | */ |
| 7477 | atomic_inc(&init_mm.mm_count); |
| 7478 | enter_lazy_tlb(&init_mm, current); |
| 7479 | |
| 7480 | /* |
| 7481 | * During early bootup we pretend to be a normal task: |
| 7482 | */ |
| 7483 | current->sched_class = &fair_sched_class; |
| 7484 | |
| 7485 | /* |
| 7486 | * Make us the idle thread. Technically, schedule() should not be |
| 7487 | * called from this thread, however somewhere below it might be, |
| 7488 | * but because we are the idle thread, we just pick up running again |
| 7489 | * when this runqueue becomes "idle". |
| 7490 | */ |
| 7491 | init_idle(current, smp_processor_id()); |
| 7492 | |
| 7493 | calc_load_update = jiffies + LOAD_FREQ; |
| 7494 | |
| 7495 | #ifdef CONFIG_SMP |
| 7496 | zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT); |
| 7497 | /* May be allocated at isolcpus cmdline parse time */ |
| 7498 | if (cpu_isolated_map == NULL) |
| 7499 | zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); |
| 7500 | idle_thread_set_boot_cpu(); |
| 7501 | set_cpu_rq_start_time(smp_processor_id()); |
| 7502 | #endif |
| 7503 | init_sched_fair_class(); |
| 7504 | |
| 7505 | init_schedstats(); |
| 7506 | |
| 7507 | scheduler_running = 1; |
| 7508 | } |
| 7509 | |
| 7510 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
| 7511 | static inline int preempt_count_equals(int preempt_offset) |
| 7512 | { |
| 7513 | int nested = preempt_count() + rcu_preempt_depth(); |
| 7514 | |
| 7515 | return (nested == preempt_offset); |
| 7516 | } |
| 7517 | |
| 7518 | void __might_sleep(const char *file, int line, int preempt_offset) |
| 7519 | { |
| 7520 | /* |
| 7521 | * Blocking primitives will set (and therefore destroy) current->state, |
| 7522 | * since we will exit with TASK_RUNNING make sure we enter with it, |
| 7523 | * otherwise we will destroy state. |
| 7524 | */ |
| 7525 | WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change, |
| 7526 | "do not call blocking ops when !TASK_RUNNING; " |
| 7527 | "state=%lx set at [<%p>] %pS\n", |
| 7528 | current->state, |
| 7529 | (void *)current->task_state_change, |
| 7530 | (void *)current->task_state_change); |
| 7531 | |
| 7532 | ___might_sleep(file, line, preempt_offset); |
| 7533 | } |
| 7534 | EXPORT_SYMBOL(__might_sleep); |
| 7535 | |
| 7536 | void ___might_sleep(const char *file, int line, int preempt_offset) |
| 7537 | { |
| 7538 | static unsigned long prev_jiffy; /* ratelimiting */ |
| 7539 | |
| 7540 | rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */ |
| 7541 | if ((preempt_count_equals(preempt_offset) && !irqs_disabled() && |
| 7542 | !is_idle_task(current)) || |
| 7543 | system_state != SYSTEM_RUNNING || oops_in_progress) |
| 7544 | return; |
| 7545 | if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) |
| 7546 | return; |
| 7547 | prev_jiffy = jiffies; |
| 7548 | |
| 7549 | printk(KERN_ERR |
| 7550 | "BUG: sleeping function called from invalid context at %s:%d\n", |
| 7551 | file, line); |
| 7552 | printk(KERN_ERR |
| 7553 | "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", |
| 7554 | in_atomic(), irqs_disabled(), |
| 7555 | current->pid, current->comm); |
| 7556 | |
| 7557 | if (task_stack_end_corrupted(current)) |
| 7558 | printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); |
| 7559 | |
| 7560 | debug_show_held_locks(current); |
| 7561 | if (irqs_disabled()) |
| 7562 | print_irqtrace_events(current); |
| 7563 | #ifdef CONFIG_DEBUG_PREEMPT |
| 7564 | if (!preempt_count_equals(preempt_offset)) { |
| 7565 | pr_err("Preemption disabled at:"); |
| 7566 | print_ip_sym(current->preempt_disable_ip); |
| 7567 | pr_cont("\n"); |
| 7568 | } |
| 7569 | #endif |
| 7570 | dump_stack(); |
| 7571 | } |
| 7572 | EXPORT_SYMBOL(___might_sleep); |
| 7573 | #endif |
| 7574 | |
| 7575 | #ifdef CONFIG_MAGIC_SYSRQ |
| 7576 | void normalize_rt_tasks(void) |
| 7577 | { |
| 7578 | struct task_struct *g, *p; |
| 7579 | struct sched_attr attr = { |
| 7580 | .sched_policy = SCHED_NORMAL, |
| 7581 | }; |
| 7582 | |
| 7583 | read_lock(&tasklist_lock); |
| 7584 | for_each_process_thread(g, p) { |
| 7585 | /* |
| 7586 | * Only normalize user tasks: |
| 7587 | */ |
| 7588 | if (p->flags & PF_KTHREAD) |
| 7589 | continue; |
| 7590 | |
| 7591 | p->se.exec_start = 0; |
| 7592 | #ifdef CONFIG_SCHEDSTATS |
| 7593 | p->se.statistics.wait_start = 0; |
| 7594 | p->se.statistics.sleep_start = 0; |
| 7595 | p->se.statistics.block_start = 0; |
| 7596 | #endif |
| 7597 | |
| 7598 | if (!dl_task(p) && !rt_task(p)) { |
| 7599 | /* |
| 7600 | * Renice negative nice level userspace |
| 7601 | * tasks back to 0: |
| 7602 | */ |
| 7603 | if (task_nice(p) < 0) |
| 7604 | set_user_nice(p, 0); |
| 7605 | continue; |
| 7606 | } |
| 7607 | |
| 7608 | __sched_setscheduler(p, &attr, false, false); |
| 7609 | } |
| 7610 | read_unlock(&tasklist_lock); |
| 7611 | } |
| 7612 | |
| 7613 | #endif /* CONFIG_MAGIC_SYSRQ */ |
| 7614 | |
| 7615 | #if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) |
| 7616 | /* |
| 7617 | * These functions are only useful for the IA64 MCA handling, or kdb. |
| 7618 | * |
| 7619 | * They can only be called when the whole system has been |
| 7620 | * stopped - every CPU needs to be quiescent, and no scheduling |
| 7621 | * activity can take place. Using them for anything else would |
| 7622 | * be a serious bug, and as a result, they aren't even visible |
| 7623 | * under any other configuration. |
| 7624 | */ |
| 7625 | |
| 7626 | /** |
| 7627 | * curr_task - return the current task for a given cpu. |
| 7628 | * @cpu: the processor in question. |
| 7629 | * |
| 7630 | * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! |
| 7631 | * |
| 7632 | * Return: The current task for @cpu. |
| 7633 | */ |
| 7634 | struct task_struct *curr_task(int cpu) |
| 7635 | { |
| 7636 | return cpu_curr(cpu); |
| 7637 | } |
| 7638 | |
| 7639 | #endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */ |
| 7640 | |
| 7641 | #ifdef CONFIG_IA64 |
| 7642 | /** |
| 7643 | * set_curr_task - set the current task for a given cpu. |
| 7644 | * @cpu: the processor in question. |
| 7645 | * @p: the task pointer to set. |
| 7646 | * |
| 7647 | * Description: This function must only be used when non-maskable interrupts |
| 7648 | * are serviced on a separate stack. It allows the architecture to switch the |
| 7649 | * notion of the current task on a cpu in a non-blocking manner. This function |
| 7650 | * must be called with all CPU's synchronized, and interrupts disabled, the |
| 7651 | * and caller must save the original value of the current task (see |
| 7652 | * curr_task() above) and restore that value before reenabling interrupts and |
| 7653 | * re-starting the system. |
| 7654 | * |
| 7655 | * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! |
| 7656 | */ |
| 7657 | void set_curr_task(int cpu, struct task_struct *p) |
| 7658 | { |
| 7659 | cpu_curr(cpu) = p; |
| 7660 | } |
| 7661 | |
| 7662 | #endif |
| 7663 | |
| 7664 | #ifdef CONFIG_CGROUP_SCHED |
| 7665 | /* task_group_lock serializes the addition/removal of task groups */ |
| 7666 | static DEFINE_SPINLOCK(task_group_lock); |
| 7667 | |
| 7668 | static void sched_free_group(struct task_group *tg) |
| 7669 | { |
| 7670 | free_fair_sched_group(tg); |
| 7671 | free_rt_sched_group(tg); |
| 7672 | autogroup_free(tg); |
| 7673 | kmem_cache_free(task_group_cache, tg); |
| 7674 | } |
| 7675 | |
| 7676 | /* allocate runqueue etc for a new task group */ |
| 7677 | struct task_group *sched_create_group(struct task_group *parent) |
| 7678 | { |
| 7679 | struct task_group *tg; |
| 7680 | |
| 7681 | tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO); |
| 7682 | if (!tg) |
| 7683 | return ERR_PTR(-ENOMEM); |
| 7684 | |
| 7685 | if (!alloc_fair_sched_group(tg, parent)) |
| 7686 | goto err; |
| 7687 | |
| 7688 | if (!alloc_rt_sched_group(tg, parent)) |
| 7689 | goto err; |
| 7690 | |
| 7691 | return tg; |
| 7692 | |
| 7693 | err: |
| 7694 | sched_free_group(tg); |
| 7695 | return ERR_PTR(-ENOMEM); |
| 7696 | } |
| 7697 | |
| 7698 | void sched_online_group(struct task_group *tg, struct task_group *parent) |
| 7699 | { |
| 7700 | unsigned long flags; |
| 7701 | |
| 7702 | spin_lock_irqsave(&task_group_lock, flags); |
| 7703 | list_add_rcu(&tg->list, &task_groups); |
| 7704 | |
| 7705 | WARN_ON(!parent); /* root should already exist */ |
| 7706 | |
| 7707 | tg->parent = parent; |
| 7708 | INIT_LIST_HEAD(&tg->children); |
| 7709 | list_add_rcu(&tg->siblings, &parent->children); |
| 7710 | spin_unlock_irqrestore(&task_group_lock, flags); |
| 7711 | } |
| 7712 | |
| 7713 | /* rcu callback to free various structures associated with a task group */ |
| 7714 | static void sched_free_group_rcu(struct rcu_head *rhp) |
| 7715 | { |
| 7716 | /* now it should be safe to free those cfs_rqs */ |
| 7717 | sched_free_group(container_of(rhp, struct task_group, rcu)); |
| 7718 | } |
| 7719 | |
| 7720 | void sched_destroy_group(struct task_group *tg) |
| 7721 | { |
| 7722 | /* wait for possible concurrent references to cfs_rqs complete */ |
| 7723 | call_rcu(&tg->rcu, sched_free_group_rcu); |
| 7724 | } |
| 7725 | |
| 7726 | void sched_offline_group(struct task_group *tg) |
| 7727 | { |
| 7728 | unsigned long flags; |
| 7729 | |
| 7730 | /* end participation in shares distribution */ |
| 7731 | unregister_fair_sched_group(tg); |
| 7732 | |
| 7733 | spin_lock_irqsave(&task_group_lock, flags); |
| 7734 | list_del_rcu(&tg->list); |
| 7735 | list_del_rcu(&tg->siblings); |
| 7736 | spin_unlock_irqrestore(&task_group_lock, flags); |
| 7737 | } |
| 7738 | |
| 7739 | /* change task's runqueue when it moves between groups. |
| 7740 | * The caller of this function should have put the task in its new group |
| 7741 | * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to |
| 7742 | * reflect its new group. |
| 7743 | */ |
| 7744 | void sched_move_task(struct task_struct *tsk) |
| 7745 | { |
| 7746 | struct task_group *tg; |
| 7747 | int queued, running; |
| 7748 | struct rq_flags rf; |
| 7749 | struct rq *rq; |
| 7750 | |
| 7751 | rq = task_rq_lock(tsk, &rf); |
| 7752 | |
| 7753 | running = task_current(rq, tsk); |
| 7754 | queued = task_on_rq_queued(tsk); |
| 7755 | |
| 7756 | if (queued) |
| 7757 | dequeue_task(rq, tsk, DEQUEUE_SAVE | DEQUEUE_MOVE); |
| 7758 | if (unlikely(running)) |
| 7759 | put_prev_task(rq, tsk); |
| 7760 | |
| 7761 | /* |
| 7762 | * All callers are synchronized by task_rq_lock(); we do not use RCU |
| 7763 | * which is pointless here. Thus, we pass "true" to task_css_check() |
| 7764 | * to prevent lockdep warnings. |
| 7765 | */ |
| 7766 | tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), |
| 7767 | struct task_group, css); |
| 7768 | tg = autogroup_task_group(tsk, tg); |
| 7769 | tsk->sched_task_group = tg; |
| 7770 | |
| 7771 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 7772 | if (tsk->sched_class->task_move_group) |
| 7773 | tsk->sched_class->task_move_group(tsk); |
| 7774 | else |
| 7775 | #endif |
| 7776 | set_task_rq(tsk, task_cpu(tsk)); |
| 7777 | |
| 7778 | if (unlikely(running)) |
| 7779 | tsk->sched_class->set_curr_task(rq); |
| 7780 | if (queued) |
| 7781 | enqueue_task(rq, tsk, ENQUEUE_RESTORE | ENQUEUE_MOVE); |
| 7782 | |
| 7783 | task_rq_unlock(rq, tsk, &rf); |
| 7784 | } |
| 7785 | #endif /* CONFIG_CGROUP_SCHED */ |
| 7786 | |
| 7787 | #ifdef CONFIG_RT_GROUP_SCHED |
| 7788 | /* |
| 7789 | * Ensure that the real time constraints are schedulable. |
| 7790 | */ |
| 7791 | static DEFINE_MUTEX(rt_constraints_mutex); |
| 7792 | |
| 7793 | /* Must be called with tasklist_lock held */ |
| 7794 | static inline int tg_has_rt_tasks(struct task_group *tg) |
| 7795 | { |
| 7796 | struct task_struct *g, *p; |
| 7797 | |
| 7798 | /* |
| 7799 | * Autogroups do not have RT tasks; see autogroup_create(). |
| 7800 | */ |
| 7801 | if (task_group_is_autogroup(tg)) |
| 7802 | return 0; |
| 7803 | |
| 7804 | for_each_process_thread(g, p) { |
| 7805 | if (rt_task(p) && task_group(p) == tg) |
| 7806 | return 1; |
| 7807 | } |
| 7808 | |
| 7809 | return 0; |
| 7810 | } |
| 7811 | |
| 7812 | struct rt_schedulable_data { |
| 7813 | struct task_group *tg; |
| 7814 | u64 rt_period; |
| 7815 | u64 rt_runtime; |
| 7816 | }; |
| 7817 | |
| 7818 | static int tg_rt_schedulable(struct task_group *tg, void *data) |
| 7819 | { |
| 7820 | struct rt_schedulable_data *d = data; |
| 7821 | struct task_group *child; |
| 7822 | unsigned long total, sum = 0; |
| 7823 | u64 period, runtime; |
| 7824 | |
| 7825 | period = ktime_to_ns(tg->rt_bandwidth.rt_period); |
| 7826 | runtime = tg->rt_bandwidth.rt_runtime; |
| 7827 | |
| 7828 | if (tg == d->tg) { |
| 7829 | period = d->rt_period; |
| 7830 | runtime = d->rt_runtime; |
| 7831 | } |
| 7832 | |
| 7833 | /* |
| 7834 | * Cannot have more runtime than the period. |
| 7835 | */ |
| 7836 | if (runtime > period && runtime != RUNTIME_INF) |
| 7837 | return -EINVAL; |
| 7838 | |
| 7839 | /* |
| 7840 | * Ensure we don't starve existing RT tasks. |
| 7841 | */ |
| 7842 | if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg)) |
| 7843 | return -EBUSY; |
| 7844 | |
| 7845 | total = to_ratio(period, runtime); |
| 7846 | |
| 7847 | /* |
| 7848 | * Nobody can have more than the global setting allows. |
| 7849 | */ |
| 7850 | if (total > to_ratio(global_rt_period(), global_rt_runtime())) |
| 7851 | return -EINVAL; |
| 7852 | |
| 7853 | /* |
| 7854 | * The sum of our children's runtime should not exceed our own. |
| 7855 | */ |
| 7856 | list_for_each_entry_rcu(child, &tg->children, siblings) { |
| 7857 | period = ktime_to_ns(child->rt_bandwidth.rt_period); |
| 7858 | runtime = child->rt_bandwidth.rt_runtime; |
| 7859 | |
| 7860 | if (child == d->tg) { |
| 7861 | period = d->rt_period; |
| 7862 | runtime = d->rt_runtime; |
| 7863 | } |
| 7864 | |
| 7865 | sum += to_ratio(period, runtime); |
| 7866 | } |
| 7867 | |
| 7868 | if (sum > total) |
| 7869 | return -EINVAL; |
| 7870 | |
| 7871 | return 0; |
| 7872 | } |
| 7873 | |
| 7874 | static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) |
| 7875 | { |
| 7876 | int ret; |
| 7877 | |
| 7878 | struct rt_schedulable_data data = { |
| 7879 | .tg = tg, |
| 7880 | .rt_period = period, |
| 7881 | .rt_runtime = runtime, |
| 7882 | }; |
| 7883 | |
| 7884 | rcu_read_lock(); |
| 7885 | ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data); |
| 7886 | rcu_read_unlock(); |
| 7887 | |
| 7888 | return ret; |
| 7889 | } |
| 7890 | |
| 7891 | static int tg_set_rt_bandwidth(struct task_group *tg, |
| 7892 | u64 rt_period, u64 rt_runtime) |
| 7893 | { |
| 7894 | int i, err = 0; |
| 7895 | |
| 7896 | /* |
| 7897 | * Disallowing the root group RT runtime is BAD, it would disallow the |
| 7898 | * kernel creating (and or operating) RT threads. |
| 7899 | */ |
| 7900 | if (tg == &root_task_group && rt_runtime == 0) |
| 7901 | return -EINVAL; |
| 7902 | |
| 7903 | /* No period doesn't make any sense. */ |
| 7904 | if (rt_period == 0) |
| 7905 | return -EINVAL; |
| 7906 | |
| 7907 | mutex_lock(&rt_constraints_mutex); |
| 7908 | read_lock(&tasklist_lock); |
| 7909 | err = __rt_schedulable(tg, rt_period, rt_runtime); |
| 7910 | if (err) |
| 7911 | goto unlock; |
| 7912 | |
| 7913 | raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); |
| 7914 | tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); |
| 7915 | tg->rt_bandwidth.rt_runtime = rt_runtime; |
| 7916 | |
| 7917 | for_each_possible_cpu(i) { |
| 7918 | struct rt_rq *rt_rq = tg->rt_rq[i]; |
| 7919 | |
| 7920 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
| 7921 | rt_rq->rt_runtime = rt_runtime; |
| 7922 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
| 7923 | } |
| 7924 | raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); |
| 7925 | unlock: |
| 7926 | read_unlock(&tasklist_lock); |
| 7927 | mutex_unlock(&rt_constraints_mutex); |
| 7928 | |
| 7929 | return err; |
| 7930 | } |
| 7931 | |
| 7932 | static int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) |
| 7933 | { |
| 7934 | u64 rt_runtime, rt_period; |
| 7935 | |
| 7936 | rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); |
| 7937 | rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC; |
| 7938 | if (rt_runtime_us < 0) |
| 7939 | rt_runtime = RUNTIME_INF; |
| 7940 | |
| 7941 | return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); |
| 7942 | } |
| 7943 | |
| 7944 | static long sched_group_rt_runtime(struct task_group *tg) |
| 7945 | { |
| 7946 | u64 rt_runtime_us; |
| 7947 | |
| 7948 | if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF) |
| 7949 | return -1; |
| 7950 | |
| 7951 | rt_runtime_us = tg->rt_bandwidth.rt_runtime; |
| 7952 | do_div(rt_runtime_us, NSEC_PER_USEC); |
| 7953 | return rt_runtime_us; |
| 7954 | } |
| 7955 | |
| 7956 | static int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us) |
| 7957 | { |
| 7958 | u64 rt_runtime, rt_period; |
| 7959 | |
| 7960 | rt_period = rt_period_us * NSEC_PER_USEC; |
| 7961 | rt_runtime = tg->rt_bandwidth.rt_runtime; |
| 7962 | |
| 7963 | return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); |
| 7964 | } |
| 7965 | |
| 7966 | static long sched_group_rt_period(struct task_group *tg) |
| 7967 | { |
| 7968 | u64 rt_period_us; |
| 7969 | |
| 7970 | rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period); |
| 7971 | do_div(rt_period_us, NSEC_PER_USEC); |
| 7972 | return rt_period_us; |
| 7973 | } |
| 7974 | #endif /* CONFIG_RT_GROUP_SCHED */ |
| 7975 | |
| 7976 | #ifdef CONFIG_RT_GROUP_SCHED |
| 7977 | static int sched_rt_global_constraints(void) |
| 7978 | { |
| 7979 | int ret = 0; |
| 7980 | |
| 7981 | mutex_lock(&rt_constraints_mutex); |
| 7982 | read_lock(&tasklist_lock); |
| 7983 | ret = __rt_schedulable(NULL, 0, 0); |
| 7984 | read_unlock(&tasklist_lock); |
| 7985 | mutex_unlock(&rt_constraints_mutex); |
| 7986 | |
| 7987 | return ret; |
| 7988 | } |
| 7989 | |
| 7990 | static int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk) |
| 7991 | { |
| 7992 | /* Don't accept realtime tasks when there is no way for them to run */ |
| 7993 | if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0) |
| 7994 | return 0; |
| 7995 | |
| 7996 | return 1; |
| 7997 | } |
| 7998 | |
| 7999 | #else /* !CONFIG_RT_GROUP_SCHED */ |
| 8000 | static int sched_rt_global_constraints(void) |
| 8001 | { |
| 8002 | unsigned long flags; |
| 8003 | int i; |
| 8004 | |
| 8005 | raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); |
| 8006 | for_each_possible_cpu(i) { |
| 8007 | struct rt_rq *rt_rq = &cpu_rq(i)->rt; |
| 8008 | |
| 8009 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
| 8010 | rt_rq->rt_runtime = global_rt_runtime(); |
| 8011 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
| 8012 | } |
| 8013 | raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); |
| 8014 | |
| 8015 | return 0; |
| 8016 | } |
| 8017 | #endif /* CONFIG_RT_GROUP_SCHED */ |
| 8018 | |
| 8019 | static int sched_dl_global_validate(void) |
| 8020 | { |
| 8021 | u64 runtime = global_rt_runtime(); |
| 8022 | u64 period = global_rt_period(); |
| 8023 | u64 new_bw = to_ratio(period, runtime); |
| 8024 | struct dl_bw *dl_b; |
| 8025 | int cpu, ret = 0; |
| 8026 | unsigned long flags; |
| 8027 | |
| 8028 | /* |
| 8029 | * Here we want to check the bandwidth not being set to some |
| 8030 | * value smaller than the currently allocated bandwidth in |
| 8031 | * any of the root_domains. |
| 8032 | * |
| 8033 | * FIXME: Cycling on all the CPUs is overdoing, but simpler than |
| 8034 | * cycling on root_domains... Discussion on different/better |
| 8035 | * solutions is welcome! |
| 8036 | */ |
| 8037 | for_each_possible_cpu(cpu) { |
| 8038 | rcu_read_lock_sched(); |
| 8039 | dl_b = dl_bw_of(cpu); |
| 8040 | |
| 8041 | raw_spin_lock_irqsave(&dl_b->lock, flags); |
| 8042 | if (new_bw < dl_b->total_bw) |
| 8043 | ret = -EBUSY; |
| 8044 | raw_spin_unlock_irqrestore(&dl_b->lock, flags); |
| 8045 | |
| 8046 | rcu_read_unlock_sched(); |
| 8047 | |
| 8048 | if (ret) |
| 8049 | break; |
| 8050 | } |
| 8051 | |
| 8052 | return ret; |
| 8053 | } |
| 8054 | |
| 8055 | static void sched_dl_do_global(void) |
| 8056 | { |
| 8057 | u64 new_bw = -1; |
| 8058 | struct dl_bw *dl_b; |
| 8059 | int cpu; |
| 8060 | unsigned long flags; |
| 8061 | |
| 8062 | def_dl_bandwidth.dl_period = global_rt_period(); |
| 8063 | def_dl_bandwidth.dl_runtime = global_rt_runtime(); |
| 8064 | |
| 8065 | if (global_rt_runtime() != RUNTIME_INF) |
| 8066 | new_bw = to_ratio(global_rt_period(), global_rt_runtime()); |
| 8067 | |
| 8068 | /* |
| 8069 | * FIXME: As above... |
| 8070 | */ |
| 8071 | for_each_possible_cpu(cpu) { |
| 8072 | rcu_read_lock_sched(); |
| 8073 | dl_b = dl_bw_of(cpu); |
| 8074 | |
| 8075 | raw_spin_lock_irqsave(&dl_b->lock, flags); |
| 8076 | dl_b->bw = new_bw; |
| 8077 | raw_spin_unlock_irqrestore(&dl_b->lock, flags); |
| 8078 | |
| 8079 | rcu_read_unlock_sched(); |
| 8080 | } |
| 8081 | } |
| 8082 | |
| 8083 | static int sched_rt_global_validate(void) |
| 8084 | { |
| 8085 | if (sysctl_sched_rt_period <= 0) |
| 8086 | return -EINVAL; |
| 8087 | |
| 8088 | if ((sysctl_sched_rt_runtime != RUNTIME_INF) && |
| 8089 | (sysctl_sched_rt_runtime > sysctl_sched_rt_period)) |
| 8090 | return -EINVAL; |
| 8091 | |
| 8092 | return 0; |
| 8093 | } |
| 8094 | |
| 8095 | static void sched_rt_do_global(void) |
| 8096 | { |
| 8097 | def_rt_bandwidth.rt_runtime = global_rt_runtime(); |
| 8098 | def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period()); |
| 8099 | } |
| 8100 | |
| 8101 | int sched_rt_handler(struct ctl_table *table, int write, |
| 8102 | void __user *buffer, size_t *lenp, |
| 8103 | loff_t *ppos) |
| 8104 | { |
| 8105 | int old_period, old_runtime; |
| 8106 | static DEFINE_MUTEX(mutex); |
| 8107 | int ret; |
| 8108 | |
| 8109 | mutex_lock(&mutex); |
| 8110 | old_period = sysctl_sched_rt_period; |
| 8111 | old_runtime = sysctl_sched_rt_runtime; |
| 8112 | |
| 8113 | ret = proc_dointvec(table, write, buffer, lenp, ppos); |
| 8114 | |
| 8115 | if (!ret && write) { |
| 8116 | ret = sched_rt_global_validate(); |
| 8117 | if (ret) |
| 8118 | goto undo; |
| 8119 | |
| 8120 | ret = sched_dl_global_validate(); |
| 8121 | if (ret) |
| 8122 | goto undo; |
| 8123 | |
| 8124 | ret = sched_rt_global_constraints(); |
| 8125 | if (ret) |
| 8126 | goto undo; |
| 8127 | |
| 8128 | sched_rt_do_global(); |
| 8129 | sched_dl_do_global(); |
| 8130 | } |
| 8131 | if (0) { |
| 8132 | undo: |
| 8133 | sysctl_sched_rt_period = old_period; |
| 8134 | sysctl_sched_rt_runtime = old_runtime; |
| 8135 | } |
| 8136 | mutex_unlock(&mutex); |
| 8137 | |
| 8138 | return ret; |
| 8139 | } |
| 8140 | |
| 8141 | int sched_rr_handler(struct ctl_table *table, int write, |
| 8142 | void __user *buffer, size_t *lenp, |
| 8143 | loff_t *ppos) |
| 8144 | { |
| 8145 | int ret; |
| 8146 | static DEFINE_MUTEX(mutex); |
| 8147 | |
| 8148 | mutex_lock(&mutex); |
| 8149 | ret = proc_dointvec(table, write, buffer, lenp, ppos); |
| 8150 | /* make sure that internally we keep jiffies */ |
| 8151 | /* also, writing zero resets timeslice to default */ |
| 8152 | if (!ret && write) { |
| 8153 | sched_rr_timeslice = sched_rr_timeslice <= 0 ? |
| 8154 | RR_TIMESLICE : msecs_to_jiffies(sched_rr_timeslice); |
| 8155 | } |
| 8156 | mutex_unlock(&mutex); |
| 8157 | return ret; |
| 8158 | } |
| 8159 | |
| 8160 | #ifdef CONFIG_CGROUP_SCHED |
| 8161 | |
| 8162 | static inline struct task_group *css_tg(struct cgroup_subsys_state *css) |
| 8163 | { |
| 8164 | return css ? container_of(css, struct task_group, css) : NULL; |
| 8165 | } |
| 8166 | |
| 8167 | static struct cgroup_subsys_state * |
| 8168 | cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) |
| 8169 | { |
| 8170 | struct task_group *parent = css_tg(parent_css); |
| 8171 | struct task_group *tg; |
| 8172 | |
| 8173 | if (!parent) { |
| 8174 | /* This is early initialization for the top cgroup */ |
| 8175 | return &root_task_group.css; |
| 8176 | } |
| 8177 | |
| 8178 | tg = sched_create_group(parent); |
| 8179 | if (IS_ERR(tg)) |
| 8180 | return ERR_PTR(-ENOMEM); |
| 8181 | |
| 8182 | sched_online_group(tg, parent); |
| 8183 | |
| 8184 | return &tg->css; |
| 8185 | } |
| 8186 | |
| 8187 | static void cpu_cgroup_css_released(struct cgroup_subsys_state *css) |
| 8188 | { |
| 8189 | struct task_group *tg = css_tg(css); |
| 8190 | |
| 8191 | sched_offline_group(tg); |
| 8192 | } |
| 8193 | |
| 8194 | static void cpu_cgroup_css_free(struct cgroup_subsys_state *css) |
| 8195 | { |
| 8196 | struct task_group *tg = css_tg(css); |
| 8197 | |
| 8198 | /* |
| 8199 | * Relies on the RCU grace period between css_released() and this. |
| 8200 | */ |
| 8201 | sched_free_group(tg); |
| 8202 | } |
| 8203 | |
| 8204 | static void cpu_cgroup_fork(struct task_struct *task) |
| 8205 | { |
| 8206 | sched_move_task(task); |
| 8207 | } |
| 8208 | |
| 8209 | static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) |
| 8210 | { |
| 8211 | struct task_struct *task; |
| 8212 | struct cgroup_subsys_state *css; |
| 8213 | |
| 8214 | cgroup_taskset_for_each(task, css, tset) { |
| 8215 | #ifdef CONFIG_RT_GROUP_SCHED |
| 8216 | if (!sched_rt_can_attach(css_tg(css), task)) |
| 8217 | return -EINVAL; |
| 8218 | #else |
| 8219 | /* We don't support RT-tasks being in separate groups */ |
| 8220 | if (task->sched_class != &fair_sched_class) |
| 8221 | return -EINVAL; |
| 8222 | #endif |
| 8223 | } |
| 8224 | return 0; |
| 8225 | } |
| 8226 | |
| 8227 | static void cpu_cgroup_attach(struct cgroup_taskset *tset) |
| 8228 | { |
| 8229 | struct task_struct *task; |
| 8230 | struct cgroup_subsys_state *css; |
| 8231 | |
| 8232 | cgroup_taskset_for_each(task, css, tset) |
| 8233 | sched_move_task(task); |
| 8234 | } |
| 8235 | |
| 8236 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 8237 | static int cpu_shares_write_u64(struct cgroup_subsys_state *css, |
| 8238 | struct cftype *cftype, u64 shareval) |
| 8239 | { |
| 8240 | return sched_group_set_shares(css_tg(css), scale_load(shareval)); |
| 8241 | } |
| 8242 | |
| 8243 | static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css, |
| 8244 | struct cftype *cft) |
| 8245 | { |
| 8246 | struct task_group *tg = css_tg(css); |
| 8247 | |
| 8248 | return (u64) scale_load_down(tg->shares); |
| 8249 | } |
| 8250 | |
| 8251 | #ifdef CONFIG_CFS_BANDWIDTH |
| 8252 | static DEFINE_MUTEX(cfs_constraints_mutex); |
| 8253 | |
| 8254 | const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */ |
| 8255 | const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ |
| 8256 | |
| 8257 | static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); |
| 8258 | |
| 8259 | static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) |
| 8260 | { |
| 8261 | int i, ret = 0, runtime_enabled, runtime_was_enabled; |
| 8262 | struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; |
| 8263 | |
| 8264 | if (tg == &root_task_group) |
| 8265 | return -EINVAL; |
| 8266 | |
| 8267 | /* |
| 8268 | * Ensure we have at some amount of bandwidth every period. This is |
| 8269 | * to prevent reaching a state of large arrears when throttled via |
| 8270 | * entity_tick() resulting in prolonged exit starvation. |
| 8271 | */ |
| 8272 | if (quota < min_cfs_quota_period || period < min_cfs_quota_period) |
| 8273 | return -EINVAL; |
| 8274 | |
| 8275 | /* |
| 8276 | * Likewise, bound things on the otherside by preventing insane quota |
| 8277 | * periods. This also allows us to normalize in computing quota |
| 8278 | * feasibility. |
| 8279 | */ |
| 8280 | if (period > max_cfs_quota_period) |
| 8281 | return -EINVAL; |
| 8282 | |
| 8283 | /* |
| 8284 | * Prevent race between setting of cfs_rq->runtime_enabled and |
| 8285 | * unthrottle_offline_cfs_rqs(). |
| 8286 | */ |
| 8287 | get_online_cpus(); |
| 8288 | mutex_lock(&cfs_constraints_mutex); |
| 8289 | ret = __cfs_schedulable(tg, period, quota); |
| 8290 | if (ret) |
| 8291 | goto out_unlock; |
| 8292 | |
| 8293 | runtime_enabled = quota != RUNTIME_INF; |
| 8294 | runtime_was_enabled = cfs_b->quota != RUNTIME_INF; |
| 8295 | /* |
| 8296 | * If we need to toggle cfs_bandwidth_used, off->on must occur |
| 8297 | * before making related changes, and on->off must occur afterwards |
| 8298 | */ |
| 8299 | if (runtime_enabled && !runtime_was_enabled) |
| 8300 | cfs_bandwidth_usage_inc(); |
| 8301 | raw_spin_lock_irq(&cfs_b->lock); |
| 8302 | cfs_b->period = ns_to_ktime(period); |
| 8303 | cfs_b->quota = quota; |
| 8304 | |
| 8305 | __refill_cfs_bandwidth_runtime(cfs_b); |
| 8306 | /* restart the period timer (if active) to handle new period expiry */ |
| 8307 | if (runtime_enabled) |
| 8308 | start_cfs_bandwidth(cfs_b); |
| 8309 | raw_spin_unlock_irq(&cfs_b->lock); |
| 8310 | |
| 8311 | for_each_online_cpu(i) { |
| 8312 | struct cfs_rq *cfs_rq = tg->cfs_rq[i]; |
| 8313 | struct rq *rq = cfs_rq->rq; |
| 8314 | |
| 8315 | raw_spin_lock_irq(&rq->lock); |
| 8316 | cfs_rq->runtime_enabled = runtime_enabled; |
| 8317 | cfs_rq->runtime_remaining = 0; |
| 8318 | |
| 8319 | if (cfs_rq->throttled) |
| 8320 | unthrottle_cfs_rq(cfs_rq); |
| 8321 | raw_spin_unlock_irq(&rq->lock); |
| 8322 | } |
| 8323 | if (runtime_was_enabled && !runtime_enabled) |
| 8324 | cfs_bandwidth_usage_dec(); |
| 8325 | out_unlock: |
| 8326 | mutex_unlock(&cfs_constraints_mutex); |
| 8327 | put_online_cpus(); |
| 8328 | |
| 8329 | return ret; |
| 8330 | } |
| 8331 | |
| 8332 | int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) |
| 8333 | { |
| 8334 | u64 quota, period; |
| 8335 | |
| 8336 | period = ktime_to_ns(tg->cfs_bandwidth.period); |
| 8337 | if (cfs_quota_us < 0) |
| 8338 | quota = RUNTIME_INF; |
| 8339 | else |
| 8340 | quota = (u64)cfs_quota_us * NSEC_PER_USEC; |
| 8341 | |
| 8342 | return tg_set_cfs_bandwidth(tg, period, quota); |
| 8343 | } |
| 8344 | |
| 8345 | long tg_get_cfs_quota(struct task_group *tg) |
| 8346 | { |
| 8347 | u64 quota_us; |
| 8348 | |
| 8349 | if (tg->cfs_bandwidth.quota == RUNTIME_INF) |
| 8350 | return -1; |
| 8351 | |
| 8352 | quota_us = tg->cfs_bandwidth.quota; |
| 8353 | do_div(quota_us, NSEC_PER_USEC); |
| 8354 | |
| 8355 | return quota_us; |
| 8356 | } |
| 8357 | |
| 8358 | int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) |
| 8359 | { |
| 8360 | u64 quota, period; |
| 8361 | |
| 8362 | period = (u64)cfs_period_us * NSEC_PER_USEC; |
| 8363 | quota = tg->cfs_bandwidth.quota; |
| 8364 | |
| 8365 | return tg_set_cfs_bandwidth(tg, period, quota); |
| 8366 | } |
| 8367 | |
| 8368 | long tg_get_cfs_period(struct task_group *tg) |
| 8369 | { |
| 8370 | u64 cfs_period_us; |
| 8371 | |
| 8372 | cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period); |
| 8373 | do_div(cfs_period_us, NSEC_PER_USEC); |
| 8374 | |
| 8375 | return cfs_period_us; |
| 8376 | } |
| 8377 | |
| 8378 | static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css, |
| 8379 | struct cftype *cft) |
| 8380 | { |
| 8381 | return tg_get_cfs_quota(css_tg(css)); |
| 8382 | } |
| 8383 | |
| 8384 | static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css, |
| 8385 | struct cftype *cftype, s64 cfs_quota_us) |
| 8386 | { |
| 8387 | return tg_set_cfs_quota(css_tg(css), cfs_quota_us); |
| 8388 | } |
| 8389 | |
| 8390 | static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css, |
| 8391 | struct cftype *cft) |
| 8392 | { |
| 8393 | return tg_get_cfs_period(css_tg(css)); |
| 8394 | } |
| 8395 | |
| 8396 | static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css, |
| 8397 | struct cftype *cftype, u64 cfs_period_us) |
| 8398 | { |
| 8399 | return tg_set_cfs_period(css_tg(css), cfs_period_us); |
| 8400 | } |
| 8401 | |
| 8402 | struct cfs_schedulable_data { |
| 8403 | struct task_group *tg; |
| 8404 | u64 period, quota; |
| 8405 | }; |
| 8406 | |
| 8407 | /* |
| 8408 | * normalize group quota/period to be quota/max_period |
| 8409 | * note: units are usecs |
| 8410 | */ |
| 8411 | static u64 normalize_cfs_quota(struct task_group *tg, |
| 8412 | struct cfs_schedulable_data *d) |
| 8413 | { |
| 8414 | u64 quota, period; |
| 8415 | |
| 8416 | if (tg == d->tg) { |
| 8417 | period = d->period; |
| 8418 | quota = d->quota; |
| 8419 | } else { |
| 8420 | period = tg_get_cfs_period(tg); |
| 8421 | quota = tg_get_cfs_quota(tg); |
| 8422 | } |
| 8423 | |
| 8424 | /* note: these should typically be equivalent */ |
| 8425 | if (quota == RUNTIME_INF || quota == -1) |
| 8426 | return RUNTIME_INF; |
| 8427 | |
| 8428 | return to_ratio(period, quota); |
| 8429 | } |
| 8430 | |
| 8431 | static int tg_cfs_schedulable_down(struct task_group *tg, void *data) |
| 8432 | { |
| 8433 | struct cfs_schedulable_data *d = data; |
| 8434 | struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; |
| 8435 | s64 quota = 0, parent_quota = -1; |
| 8436 | |
| 8437 | if (!tg->parent) { |
| 8438 | quota = RUNTIME_INF; |
| 8439 | } else { |
| 8440 | struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; |
| 8441 | |
| 8442 | quota = normalize_cfs_quota(tg, d); |
| 8443 | parent_quota = parent_b->hierarchical_quota; |
| 8444 | |
| 8445 | /* |
| 8446 | * ensure max(child_quota) <= parent_quota, inherit when no |
| 8447 | * limit is set |
| 8448 | */ |
| 8449 | if (quota == RUNTIME_INF) |
| 8450 | quota = parent_quota; |
| 8451 | else if (parent_quota != RUNTIME_INF && quota > parent_quota) |
| 8452 | return -EINVAL; |
| 8453 | } |
| 8454 | cfs_b->hierarchical_quota = quota; |
| 8455 | |
| 8456 | return 0; |
| 8457 | } |
| 8458 | |
| 8459 | static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) |
| 8460 | { |
| 8461 | int ret; |
| 8462 | struct cfs_schedulable_data data = { |
| 8463 | .tg = tg, |
| 8464 | .period = period, |
| 8465 | .quota = quota, |
| 8466 | }; |
| 8467 | |
| 8468 | if (quota != RUNTIME_INF) { |
| 8469 | do_div(data.period, NSEC_PER_USEC); |
| 8470 | do_div(data.quota, NSEC_PER_USEC); |
| 8471 | } |
| 8472 | |
| 8473 | rcu_read_lock(); |
| 8474 | ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data); |
| 8475 | rcu_read_unlock(); |
| 8476 | |
| 8477 | return ret; |
| 8478 | } |
| 8479 | |
| 8480 | static int cpu_stats_show(struct seq_file *sf, void *v) |
| 8481 | { |
| 8482 | struct task_group *tg = css_tg(seq_css(sf)); |
| 8483 | struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; |
| 8484 | |
| 8485 | seq_printf(sf, "nr_periods %d\n", cfs_b->nr_periods); |
| 8486 | seq_printf(sf, "nr_throttled %d\n", cfs_b->nr_throttled); |
| 8487 | seq_printf(sf, "throttled_time %llu\n", cfs_b->throttled_time); |
| 8488 | |
| 8489 | return 0; |
| 8490 | } |
| 8491 | #endif /* CONFIG_CFS_BANDWIDTH */ |
| 8492 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
| 8493 | |
| 8494 | #ifdef CONFIG_RT_GROUP_SCHED |
| 8495 | static int cpu_rt_runtime_write(struct cgroup_subsys_state *css, |
| 8496 | struct cftype *cft, s64 val) |
| 8497 | { |
| 8498 | return sched_group_set_rt_runtime(css_tg(css), val); |
| 8499 | } |
| 8500 | |
| 8501 | static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css, |
| 8502 | struct cftype *cft) |
| 8503 | { |
| 8504 | return sched_group_rt_runtime(css_tg(css)); |
| 8505 | } |
| 8506 | |
| 8507 | static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css, |
| 8508 | struct cftype *cftype, u64 rt_period_us) |
| 8509 | { |
| 8510 | return sched_group_set_rt_period(css_tg(css), rt_period_us); |
| 8511 | } |
| 8512 | |
| 8513 | static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css, |
| 8514 | struct cftype *cft) |
| 8515 | { |
| 8516 | return sched_group_rt_period(css_tg(css)); |
| 8517 | } |
| 8518 | #endif /* CONFIG_RT_GROUP_SCHED */ |
| 8519 | |
| 8520 | static struct cftype cpu_files[] = { |
| 8521 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 8522 | { |
| 8523 | .name = "shares", |
| 8524 | .read_u64 = cpu_shares_read_u64, |
| 8525 | .write_u64 = cpu_shares_write_u64, |
| 8526 | }, |
| 8527 | #endif |
| 8528 | #ifdef CONFIG_CFS_BANDWIDTH |
| 8529 | { |
| 8530 | .name = "cfs_quota_us", |
| 8531 | .read_s64 = cpu_cfs_quota_read_s64, |
| 8532 | .write_s64 = cpu_cfs_quota_write_s64, |
| 8533 | }, |
| 8534 | { |
| 8535 | .name = "cfs_period_us", |
| 8536 | .read_u64 = cpu_cfs_period_read_u64, |
| 8537 | .write_u64 = cpu_cfs_period_write_u64, |
| 8538 | }, |
| 8539 | { |
| 8540 | .name = "stat", |
| 8541 | .seq_show = cpu_stats_show, |
| 8542 | }, |
| 8543 | #endif |
| 8544 | #ifdef CONFIG_RT_GROUP_SCHED |
| 8545 | { |
| 8546 | .name = "rt_runtime_us", |
| 8547 | .read_s64 = cpu_rt_runtime_read, |
| 8548 | .write_s64 = cpu_rt_runtime_write, |
| 8549 | }, |
| 8550 | { |
| 8551 | .name = "rt_period_us", |
| 8552 | .read_u64 = cpu_rt_period_read_uint, |
| 8553 | .write_u64 = cpu_rt_period_write_uint, |
| 8554 | }, |
| 8555 | #endif |
| 8556 | { } /* terminate */ |
| 8557 | }; |
| 8558 | |
| 8559 | struct cgroup_subsys cpu_cgrp_subsys = { |
| 8560 | .css_alloc = cpu_cgroup_css_alloc, |
| 8561 | .css_released = cpu_cgroup_css_released, |
| 8562 | .css_free = cpu_cgroup_css_free, |
| 8563 | .fork = cpu_cgroup_fork, |
| 8564 | .can_attach = cpu_cgroup_can_attach, |
| 8565 | .attach = cpu_cgroup_attach, |
| 8566 | .legacy_cftypes = cpu_files, |
| 8567 | .early_init = true, |
| 8568 | }; |
| 8569 | |
| 8570 | #endif /* CONFIG_CGROUP_SCHED */ |
| 8571 | |
| 8572 | void dump_cpu_task(int cpu) |
| 8573 | { |
| 8574 | pr_info("Task dump for CPU %d:\n", cpu); |
| 8575 | sched_show_task(cpu_curr(cpu)); |
| 8576 | } |
| 8577 | |
| 8578 | /* |
| 8579 | * Nice levels are multiplicative, with a gentle 10% change for every |
| 8580 | * nice level changed. I.e. when a CPU-bound task goes from nice 0 to |
| 8581 | * nice 1, it will get ~10% less CPU time than another CPU-bound task |
| 8582 | * that remained on nice 0. |
| 8583 | * |
| 8584 | * The "10% effect" is relative and cumulative: from _any_ nice level, |
| 8585 | * if you go up 1 level, it's -10% CPU usage, if you go down 1 level |
| 8586 | * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25. |
| 8587 | * If a task goes up by ~10% and another task goes down by ~10% then |
| 8588 | * the relative distance between them is ~25%.) |
| 8589 | */ |
| 8590 | const int sched_prio_to_weight[40] = { |
| 8591 | /* -20 */ 88761, 71755, 56483, 46273, 36291, |
| 8592 | /* -15 */ 29154, 23254, 18705, 14949, 11916, |
| 8593 | /* -10 */ 9548, 7620, 6100, 4904, 3906, |
| 8594 | /* -5 */ 3121, 2501, 1991, 1586, 1277, |
| 8595 | /* 0 */ 1024, 820, 655, 526, 423, |
| 8596 | /* 5 */ 335, 272, 215, 172, 137, |
| 8597 | /* 10 */ 110, 87, 70, 56, 45, |
| 8598 | /* 15 */ 36, 29, 23, 18, 15, |
| 8599 | }; |
| 8600 | |
| 8601 | /* |
| 8602 | * Inverse (2^32/x) values of the sched_prio_to_weight[] array, precalculated. |
| 8603 | * |
| 8604 | * In cases where the weight does not change often, we can use the |
| 8605 | * precalculated inverse to speed up arithmetics by turning divisions |
| 8606 | * into multiplications: |
| 8607 | */ |
| 8608 | const u32 sched_prio_to_wmult[40] = { |
| 8609 | /* -20 */ 48388, 59856, 76040, 92818, 118348, |
| 8610 | /* -15 */ 147320, 184698, 229616, 287308, 360437, |
| 8611 | /* -10 */ 449829, 563644, 704093, 875809, 1099582, |
| 8612 | /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326, |
| 8613 | /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587, |
| 8614 | /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126, |
| 8615 | /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717, |
| 8616 | /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, |
| 8617 | }; |