sched: More sched_domain iterations fixes
[linux-2.6-block.git] / kernel / sched.c
CommitLineData
1da177e4
LT
1/*
2 * kernel/sched.c
3 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
11 * by Andrea Arcangeli
12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
13 * hybrid priority-list and round-robin design with
14 * an array-switch method of distributing timeslices
15 * and per-CPU runqueues. Cleanups and useful suggestions
16 * by Davide Libenzi, preemptible kernel bits by Robert Love.
17 * 2003-09-03 Interactivity tuning by Con Kolivas.
18 * 2004-04-02 Scheduler domains code by Nick Piggin
c31f2e8a
IM
19 * 2007-04-15 Work begun on replacing all interactivity tuning with a
20 * fair scheduling design by Con Kolivas.
21 * 2007-05-05 Load balancing (smp-nice) and other improvements
22 * by Peter Williams
23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
b9131769
IM
25 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
26 * Thomas Gleixner, Mike Kravetz
1da177e4
LT
27 */
28
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/nmi.h>
32#include <linux/init.h>
dff06c15 33#include <linux/uaccess.h>
1da177e4 34#include <linux/highmem.h>
1da177e4
LT
35#include <asm/mmu_context.h>
36#include <linux/interrupt.h>
c59ede7b 37#include <linux/capability.h>
1da177e4
LT
38#include <linux/completion.h>
39#include <linux/kernel_stat.h>
9a11b49a 40#include <linux/debug_locks.h>
cdd6c482 41#include <linux/perf_event.h>
1da177e4
LT
42#include <linux/security.h>
43#include <linux/notifier.h>
44#include <linux/profile.h>
7dfb7103 45#include <linux/freezer.h>
198e2f18 46#include <linux/vmalloc.h>
1da177e4
LT
47#include <linux/blkdev.h>
48#include <linux/delay.h>
b488893a 49#include <linux/pid_namespace.h>
1da177e4
LT
50#include <linux/smp.h>
51#include <linux/threads.h>
52#include <linux/timer.h>
53#include <linux/rcupdate.h>
54#include <linux/cpu.h>
55#include <linux/cpuset.h>
56#include <linux/percpu.h>
b5aadf7f 57#include <linux/proc_fs.h>
1da177e4 58#include <linux/seq_file.h>
969c7921 59#include <linux/stop_machine.h>
e692ab53 60#include <linux/sysctl.h>
1da177e4
LT
61#include <linux/syscalls.h>
62#include <linux/times.h>
8f0ab514 63#include <linux/tsacct_kern.h>
c6fd91f0 64#include <linux/kprobes.h>
0ff92245 65#include <linux/delayacct.h>
dff06c15 66#include <linux/unistd.h>
f5ff8422 67#include <linux/pagemap.h>
8f4d37ec 68#include <linux/hrtimer.h>
30914a58 69#include <linux/tick.h>
f00b45c1
PZ
70#include <linux/debugfs.h>
71#include <linux/ctype.h>
6cd8a4bb 72#include <linux/ftrace.h>
5a0e3ad6 73#include <linux/slab.h>
1da177e4 74
5517d86b 75#include <asm/tlb.h>
838225b4 76#include <asm/irq_regs.h>
335d7afb 77#include <asm/mutex.h>
1da177e4 78
6e0534f2 79#include "sched_cpupri.h"
21aa9af0 80#include "workqueue_sched.h"
5091faa4 81#include "sched_autogroup.h"
6e0534f2 82
a8d154b0 83#define CREATE_TRACE_POINTS
ad8d75ff 84#include <trace/events/sched.h>
a8d154b0 85
1da177e4
LT
86/*
87 * Convert user-nice values [ -20 ... 0 ... 19 ]
88 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
89 * and back.
90 */
91#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
92#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
93#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
94
95/*
96 * 'User priority' is the nice value converted to something we
97 * can work with better when scaling various scheduler parameters,
98 * it's a [ 0 ... 39 ] range.
99 */
100#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
101#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
102#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
103
104/*
d7876a08 105 * Helpers for converting nanosecond timing to jiffy resolution
1da177e4 106 */
d6322faf 107#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
1da177e4 108
6aa645ea
IM
109#define NICE_0_LOAD SCHED_LOAD_SCALE
110#define NICE_0_SHIFT SCHED_LOAD_SHIFT
111
1da177e4
LT
112/*
113 * These are the 'tuning knobs' of the scheduler:
114 *
a4ec24b4 115 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
1da177e4
LT
116 * Timeslices get refilled after they expire.
117 */
1da177e4 118#define DEF_TIMESLICE (100 * HZ / 1000)
2dd73a4f 119
d0b27fa7
PZ
120/*
121 * single value that denotes runtime == period, ie unlimited time.
122 */
123#define RUNTIME_INF ((u64)~0ULL)
124
e05606d3
IM
125static inline int rt_policy(int policy)
126{
3f33a7ce 127 if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
e05606d3
IM
128 return 1;
129 return 0;
130}
131
132static inline int task_has_rt_policy(struct task_struct *p)
133{
134 return rt_policy(p->policy);
135}
136
1da177e4 137/*
6aa645ea 138 * This is the priority-queue data structure of the RT scheduling class:
1da177e4 139 */
6aa645ea
IM
140struct rt_prio_array {
141 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
142 struct list_head queue[MAX_RT_PRIO];
143};
144
d0b27fa7 145struct rt_bandwidth {
ea736ed5 146 /* nests inside the rq lock: */
0986b11b 147 raw_spinlock_t rt_runtime_lock;
ea736ed5
IM
148 ktime_t rt_period;
149 u64 rt_runtime;
150 struct hrtimer rt_period_timer;
d0b27fa7
PZ
151};
152
153static struct rt_bandwidth def_rt_bandwidth;
154
155static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
156
157static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
158{
159 struct rt_bandwidth *rt_b =
160 container_of(timer, struct rt_bandwidth, rt_period_timer);
161 ktime_t now;
162 int overrun;
163 int idle = 0;
164
165 for (;;) {
166 now = hrtimer_cb_get_time(timer);
167 overrun = hrtimer_forward(timer, now, rt_b->rt_period);
168
169 if (!overrun)
170 break;
171
172 idle = do_sched_rt_period_timer(rt_b, overrun);
173 }
174
175 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
176}
177
178static
179void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
180{
181 rt_b->rt_period = ns_to_ktime(period);
182 rt_b->rt_runtime = runtime;
183
0986b11b 184 raw_spin_lock_init(&rt_b->rt_runtime_lock);
ac086bc2 185
d0b27fa7
PZ
186 hrtimer_init(&rt_b->rt_period_timer,
187 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
188 rt_b->rt_period_timer.function = sched_rt_period_timer;
d0b27fa7
PZ
189}
190
c8bfff6d
KH
191static inline int rt_bandwidth_enabled(void)
192{
193 return sysctl_sched_rt_runtime >= 0;
d0b27fa7
PZ
194}
195
196static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
197{
198 ktime_t now;
199
cac64d00 200 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
d0b27fa7
PZ
201 return;
202
203 if (hrtimer_active(&rt_b->rt_period_timer))
204 return;
205
0986b11b 206 raw_spin_lock(&rt_b->rt_runtime_lock);
d0b27fa7 207 for (;;) {
7f1e2ca9
PZ
208 unsigned long delta;
209 ktime_t soft, hard;
210
d0b27fa7
PZ
211 if (hrtimer_active(&rt_b->rt_period_timer))
212 break;
213
214 now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
215 hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
7f1e2ca9
PZ
216
217 soft = hrtimer_get_softexpires(&rt_b->rt_period_timer);
218 hard = hrtimer_get_expires(&rt_b->rt_period_timer);
219 delta = ktime_to_ns(ktime_sub(hard, soft));
220 __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
5c333864 221 HRTIMER_MODE_ABS_PINNED, 0);
d0b27fa7 222 }
0986b11b 223 raw_spin_unlock(&rt_b->rt_runtime_lock);
d0b27fa7
PZ
224}
225
226#ifdef CONFIG_RT_GROUP_SCHED
227static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
228{
229 hrtimer_cancel(&rt_b->rt_period_timer);
230}
231#endif
232
712555ee 233/*
c4a8849a 234 * sched_domains_mutex serializes calls to init_sched_domains,
712555ee
HC
235 * detach_destroy_domains and partition_sched_domains.
236 */
237static DEFINE_MUTEX(sched_domains_mutex);
238
7c941438 239#ifdef CONFIG_CGROUP_SCHED
29f59db3 240
68318b8e
SV
241#include <linux/cgroup.h>
242
29f59db3
SV
243struct cfs_rq;
244
6f505b16
PZ
245static LIST_HEAD(task_groups);
246
29f59db3 247/* task group related information */
4cf86d77 248struct task_group {
68318b8e 249 struct cgroup_subsys_state css;
6c415b92 250
052f1dc7 251#ifdef CONFIG_FAIR_GROUP_SCHED
29f59db3
SV
252 /* schedulable entities of this group on each cpu */
253 struct sched_entity **se;
254 /* runqueue "owned" by this group on each cpu */
255 struct cfs_rq **cfs_rq;
256 unsigned long shares;
2069dd75
PZ
257
258 atomic_t load_weight;
052f1dc7
PZ
259#endif
260
261#ifdef CONFIG_RT_GROUP_SCHED
262 struct sched_rt_entity **rt_se;
263 struct rt_rq **rt_rq;
264
d0b27fa7 265 struct rt_bandwidth rt_bandwidth;
052f1dc7 266#endif
6b2d7700 267
ae8393e5 268 struct rcu_head rcu;
6f505b16 269 struct list_head list;
f473aa5e
PZ
270
271 struct task_group *parent;
272 struct list_head siblings;
273 struct list_head children;
5091faa4
MG
274
275#ifdef CONFIG_SCHED_AUTOGROUP
276 struct autogroup *autogroup;
277#endif
29f59db3
SV
278};
279
3d4b47b4 280/* task_group_lock serializes the addition/removal of task groups */
8ed36996 281static DEFINE_SPINLOCK(task_group_lock);
ec2c507f 282
e9036b36
CG
283#ifdef CONFIG_FAIR_GROUP_SCHED
284
07e06b01 285# define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
052f1dc7 286
cb4ad1ff 287/*
2e084786
LJ
288 * A weight of 0 or 1 can cause arithmetics problems.
289 * A weight of a cfs_rq is the sum of weights of which entities
290 * are queued on this cfs_rq, so a weight of a entity should not be
291 * too large, so as the shares value of a task group.
cb4ad1ff
MX
292 * (The default weight is 1024 - so there's no practical
293 * limitation from this.)
294 */
18d95a28 295#define MIN_SHARES 2
c8b28116 296#define MAX_SHARES (1UL << (18 + SCHED_LOAD_RESOLUTION))
18d95a28 297
07e06b01 298static int root_task_group_load = ROOT_TASK_GROUP_LOAD;
052f1dc7
PZ
299#endif
300
29f59db3 301/* Default task group.
3a252015 302 * Every task in system belong to this group at bootup.
29f59db3 303 */
07e06b01 304struct task_group root_task_group;
29f59db3 305
7c941438 306#endif /* CONFIG_CGROUP_SCHED */
29f59db3 307
6aa645ea
IM
308/* CFS-related fields in a runqueue */
309struct cfs_rq {
310 struct load_weight load;
311 unsigned long nr_running;
312
6aa645ea 313 u64 exec_clock;
e9acbff6 314 u64 min_vruntime;
3fe1698b
PZ
315#ifndef CONFIG_64BIT
316 u64 min_vruntime_copy;
317#endif
6aa645ea
IM
318
319 struct rb_root tasks_timeline;
320 struct rb_node *rb_leftmost;
4a55bd5e
PZ
321
322 struct list_head tasks;
323 struct list_head *balance_iterator;
324
325 /*
326 * 'curr' points to currently running entity on this cfs_rq.
6aa645ea
IM
327 * It is set to NULL otherwise (i.e when none are currently running).
328 */
ac53db59 329 struct sched_entity *curr, *next, *last, *skip;
ddc97297 330
4934a4d3 331#ifdef CONFIG_SCHED_DEBUG
5ac5c4d6 332 unsigned int nr_spread_over;
4934a4d3 333#endif
ddc97297 334
62160e3f 335#ifdef CONFIG_FAIR_GROUP_SCHED
6aa645ea
IM
336 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
337
41a2d6cf
IM
338 /*
339 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
6aa645ea
IM
340 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
341 * (like users, containers etc.)
342 *
343 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
344 * list is used during load balance.
345 */
3d4b47b4 346 int on_list;
41a2d6cf
IM
347 struct list_head leaf_cfs_rq_list;
348 struct task_group *tg; /* group that "owns" this runqueue */
c09595f6
PZ
349
350#ifdef CONFIG_SMP
c09595f6 351 /*
c8cba857 352 * the part of load.weight contributed by tasks
c09595f6 353 */
c8cba857 354 unsigned long task_weight;
c09595f6 355
c8cba857
PZ
356 /*
357 * h_load = weight * f(tg)
358 *
359 * Where f(tg) is the recursive weight fraction assigned to
360 * this group.
361 */
362 unsigned long h_load;
c09595f6 363
c8cba857 364 /*
3b3d190e
PT
365 * Maintaining per-cpu shares distribution for group scheduling
366 *
367 * load_stamp is the last time we updated the load average
368 * load_last is the last time we updated the load average and saw load
369 * load_unacc_exec_time is currently unaccounted execution time
c8cba857 370 */
2069dd75
PZ
371 u64 load_avg;
372 u64 load_period;
3b3d190e 373 u64 load_stamp, load_last, load_unacc_exec_time;
f1d239f7 374
2069dd75 375 unsigned long load_contribution;
c09595f6 376#endif
6aa645ea
IM
377#endif
378};
1da177e4 379
6aa645ea
IM
380/* Real-Time classes' related field in a runqueue: */
381struct rt_rq {
382 struct rt_prio_array active;
63489e45 383 unsigned long rt_nr_running;
052f1dc7 384#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
e864c499
GH
385 struct {
386 int curr; /* highest queued rt task prio */
398a153b 387#ifdef CONFIG_SMP
e864c499 388 int next; /* next highest */
398a153b 389#endif
e864c499 390 } highest_prio;
6f505b16 391#endif
fa85ae24 392#ifdef CONFIG_SMP
73fe6aae 393 unsigned long rt_nr_migratory;
a1ba4d8b 394 unsigned long rt_nr_total;
a22d7fc1 395 int overloaded;
917b627d 396 struct plist_head pushable_tasks;
fa85ae24 397#endif
6f505b16 398 int rt_throttled;
fa85ae24 399 u64 rt_time;
ac086bc2 400 u64 rt_runtime;
ea736ed5 401 /* Nests inside the rq lock: */
0986b11b 402 raw_spinlock_t rt_runtime_lock;
6f505b16 403
052f1dc7 404#ifdef CONFIG_RT_GROUP_SCHED
23b0fdfc
PZ
405 unsigned long rt_nr_boosted;
406
6f505b16
PZ
407 struct rq *rq;
408 struct list_head leaf_rt_rq_list;
409 struct task_group *tg;
6f505b16 410#endif
6aa645ea
IM
411};
412
57d885fe
GH
413#ifdef CONFIG_SMP
414
415/*
416 * We add the notion of a root-domain which will be used to define per-domain
0eab9146
IM
417 * variables. Each exclusive cpuset essentially defines an island domain by
418 * fully partitioning the member cpus from any other cpuset. Whenever a new
57d885fe
GH
419 * exclusive cpuset is created, we also create and attach a new root-domain
420 * object.
421 *
57d885fe
GH
422 */
423struct root_domain {
424 atomic_t refcount;
dce840a0 425 struct rcu_head rcu;
c6c4927b
RR
426 cpumask_var_t span;
427 cpumask_var_t online;
637f5085 428
0eab9146 429 /*
637f5085
GH
430 * The "RT overload" flag: it gets set if a CPU has more than
431 * one runnable RT task.
432 */
c6c4927b 433 cpumask_var_t rto_mask;
0eab9146 434 atomic_t rto_count;
6e0534f2 435 struct cpupri cpupri;
57d885fe
GH
436};
437
dc938520
GH
438/*
439 * By default the system creates a single root-domain with all cpus as
440 * members (mimicking the global state we have today).
441 */
57d885fe
GH
442static struct root_domain def_root_domain;
443
ed2d372c 444#endif /* CONFIG_SMP */
57d885fe 445
1da177e4
LT
446/*
447 * This is the main, per-CPU runqueue data structure.
448 *
449 * Locking rule: those places that want to lock multiple runqueues
450 * (such as the load balancing or the thread migration code), lock
451 * acquire operations must be ordered by ascending &runqueue.
452 */
70b97a7f 453struct rq {
d8016491 454 /* runqueue lock: */
05fa785c 455 raw_spinlock_t lock;
1da177e4
LT
456
457 /*
458 * nr_running and cpu_load should be in the same cacheline because
459 * remote CPUs use both these fields when doing load calculation.
460 */
461 unsigned long nr_running;
6aa645ea
IM
462 #define CPU_LOAD_IDX_MAX 5
463 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
fdf3e95d 464 unsigned long last_load_update_tick;
46cb4b7c 465#ifdef CONFIG_NO_HZ
39c0cbe2 466 u64 nohz_stamp;
83cd4fe2 467 unsigned char nohz_balance_kick;
46cb4b7c 468#endif
61eadef6 469 int skip_clock_update;
a64692a3 470
d8016491
IM
471 /* capture load from *all* tasks on this cpu: */
472 struct load_weight load;
6aa645ea
IM
473 unsigned long nr_load_updates;
474 u64 nr_switches;
475
476 struct cfs_rq cfs;
6f505b16 477 struct rt_rq rt;
6f505b16 478
6aa645ea 479#ifdef CONFIG_FAIR_GROUP_SCHED
d8016491
IM
480 /* list of leaf cfs_rq on this cpu: */
481 struct list_head leaf_cfs_rq_list;
052f1dc7
PZ
482#endif
483#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 484 struct list_head leaf_rt_rq_list;
1da177e4 485#endif
1da177e4
LT
486
487 /*
488 * This is part of a global counter where only the total sum
489 * over all CPUs matters. A task can increase this counter on
490 * one CPU and if it got migrated afterwards it may decrease
491 * it on another CPU. Always updated under the runqueue lock:
492 */
493 unsigned long nr_uninterruptible;
494
34f971f6 495 struct task_struct *curr, *idle, *stop;
c9819f45 496 unsigned long next_balance;
1da177e4 497 struct mm_struct *prev_mm;
6aa645ea 498
3e51f33f 499 u64 clock;
305e6835 500 u64 clock_task;
6aa645ea 501
1da177e4
LT
502 atomic_t nr_iowait;
503
504#ifdef CONFIG_SMP
0eab9146 505 struct root_domain *rd;
1da177e4
LT
506 struct sched_domain *sd;
507
e51fd5e2
PZ
508 unsigned long cpu_power;
509
a0a522ce 510 unsigned char idle_at_tick;
1da177e4 511 /* For active balancing */
3f029d3c 512 int post_schedule;
1da177e4
LT
513 int active_balance;
514 int push_cpu;
969c7921 515 struct cpu_stop_work active_balance_work;
d8016491
IM
516 /* cpu of this runqueue: */
517 int cpu;
1f11eb6a 518 int online;
1da177e4 519
a8a51d5e 520 unsigned long avg_load_per_task;
1da177e4 521
e9e9250b
PZ
522 u64 rt_avg;
523 u64 age_stamp;
1b9508f6
MG
524 u64 idle_stamp;
525 u64 avg_idle;
1da177e4
LT
526#endif
527
aa483808
VP
528#ifdef CONFIG_IRQ_TIME_ACCOUNTING
529 u64 prev_irq_time;
530#endif
531
dce48a84
TG
532 /* calc_load related fields */
533 unsigned long calc_load_update;
534 long calc_load_active;
535
8f4d37ec 536#ifdef CONFIG_SCHED_HRTICK
31656519
PZ
537#ifdef CONFIG_SMP
538 int hrtick_csd_pending;
539 struct call_single_data hrtick_csd;
540#endif
8f4d37ec
PZ
541 struct hrtimer hrtick_timer;
542#endif
543
1da177e4
LT
544#ifdef CONFIG_SCHEDSTATS
545 /* latency stats */
546 struct sched_info rq_sched_info;
9c2c4802
KC
547 unsigned long long rq_cpu_time;
548 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
1da177e4
LT
549
550 /* sys_sched_yield() stats */
480b9434 551 unsigned int yld_count;
1da177e4
LT
552
553 /* schedule() stats */
480b9434
KC
554 unsigned int sched_switch;
555 unsigned int sched_count;
556 unsigned int sched_goidle;
1da177e4
LT
557
558 /* try_to_wake_up() stats */
480b9434
KC
559 unsigned int ttwu_count;
560 unsigned int ttwu_local;
1da177e4 561#endif
317f3941
PZ
562
563#ifdef CONFIG_SMP
564 struct task_struct *wake_list;
565#endif
1da177e4
LT
566};
567
f34e3b61 568static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
1da177e4 569
a64692a3 570
1e5a7405 571static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
dd41f596 572
0a2966b4
CL
573static inline int cpu_of(struct rq *rq)
574{
575#ifdef CONFIG_SMP
576 return rq->cpu;
577#else
578 return 0;
579#endif
580}
581
497f0ab3 582#define rcu_dereference_check_sched_domain(p) \
d11c563d 583 rcu_dereference_check((p), \
dce840a0 584 rcu_read_lock_held() || \
d11c563d
PM
585 lockdep_is_held(&sched_domains_mutex))
586
674311d5
NP
587/*
588 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
1a20ff27 589 * See detach_destroy_domains: synchronize_sched for details.
674311d5
NP
590 *
591 * The domain tree of any CPU may only be accessed from within
592 * preempt-disabled sections.
593 */
48f24c4d 594#define for_each_domain(cpu, __sd) \
497f0ab3 595 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
1da177e4
LT
596
597#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
598#define this_rq() (&__get_cpu_var(runqueues))
599#define task_rq(p) cpu_rq(task_cpu(p))
600#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
54d35f29 601#define raw_rq() (&__raw_get_cpu_var(runqueues))
1da177e4 602
dc61b1d6
PZ
603#ifdef CONFIG_CGROUP_SCHED
604
605/*
606 * Return the group to which this tasks belongs.
607 *
608 * We use task_subsys_state_check() and extend the RCU verification
0122ec5b 609 * with lockdep_is_held(&p->pi_lock) because cpu_cgroup_attach()
dc61b1d6
PZ
610 * holds that lock for each task it moves into the cgroup. Therefore
611 * by holding that lock, we pin the task to the current cgroup.
612 */
613static inline struct task_group *task_group(struct task_struct *p)
614{
5091faa4 615 struct task_group *tg;
dc61b1d6
PZ
616 struct cgroup_subsys_state *css;
617
618 css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
0122ec5b 619 lockdep_is_held(&p->pi_lock));
5091faa4
MG
620 tg = container_of(css, struct task_group, css);
621
622 return autogroup_task_group(p, tg);
dc61b1d6
PZ
623}
624
625/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
626static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
627{
628#ifdef CONFIG_FAIR_GROUP_SCHED
629 p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
630 p->se.parent = task_group(p)->se[cpu];
631#endif
632
633#ifdef CONFIG_RT_GROUP_SCHED
634 p->rt.rt_rq = task_group(p)->rt_rq[cpu];
635 p->rt.parent = task_group(p)->rt_se[cpu];
636#endif
637}
638
639#else /* CONFIG_CGROUP_SCHED */
640
641static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
642static inline struct task_group *task_group(struct task_struct *p)
643{
644 return NULL;
645}
646
647#endif /* CONFIG_CGROUP_SCHED */
648
fe44d621 649static void update_rq_clock_task(struct rq *rq, s64 delta);
305e6835 650
fe44d621 651static void update_rq_clock(struct rq *rq)
3e51f33f 652{
fe44d621 653 s64 delta;
305e6835 654
61eadef6 655 if (rq->skip_clock_update > 0)
f26f9aff 656 return;
aa483808 657
fe44d621
PZ
658 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
659 rq->clock += delta;
660 update_rq_clock_task(rq, delta);
3e51f33f
PZ
661}
662
bf5c91ba
IM
663/*
664 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
665 */
666#ifdef CONFIG_SCHED_DEBUG
667# define const_debug __read_mostly
668#else
669# define const_debug static const
670#endif
671
017730c1 672/**
1fd06bb1 673 * runqueue_is_locked - Returns true if the current cpu runqueue is locked
e17b38bf 674 * @cpu: the processor in question.
017730c1 675 *
017730c1
IM
676 * This interface allows printk to be called with the runqueue lock
677 * held and know whether or not it is OK to wake up the klogd.
678 */
89f19f04 679int runqueue_is_locked(int cpu)
017730c1 680{
05fa785c 681 return raw_spin_is_locked(&cpu_rq(cpu)->lock);
017730c1
IM
682}
683
bf5c91ba
IM
684/*
685 * Debugging: various feature bits
686 */
f00b45c1
PZ
687
688#define SCHED_FEAT(name, enabled) \
689 __SCHED_FEAT_##name ,
690
bf5c91ba 691enum {
f00b45c1 692#include "sched_features.h"
bf5c91ba
IM
693};
694
f00b45c1
PZ
695#undef SCHED_FEAT
696
697#define SCHED_FEAT(name, enabled) \
698 (1UL << __SCHED_FEAT_##name) * enabled |
699
bf5c91ba 700const_debug unsigned int sysctl_sched_features =
f00b45c1
PZ
701#include "sched_features.h"
702 0;
703
704#undef SCHED_FEAT
705
706#ifdef CONFIG_SCHED_DEBUG
707#define SCHED_FEAT(name, enabled) \
708 #name ,
709
983ed7a6 710static __read_mostly char *sched_feat_names[] = {
f00b45c1
PZ
711#include "sched_features.h"
712 NULL
713};
714
715#undef SCHED_FEAT
716
34f3a814 717static int sched_feat_show(struct seq_file *m, void *v)
f00b45c1 718{
f00b45c1
PZ
719 int i;
720
721 for (i = 0; sched_feat_names[i]; i++) {
34f3a814
LZ
722 if (!(sysctl_sched_features & (1UL << i)))
723 seq_puts(m, "NO_");
724 seq_printf(m, "%s ", sched_feat_names[i]);
f00b45c1 725 }
34f3a814 726 seq_puts(m, "\n");
f00b45c1 727
34f3a814 728 return 0;
f00b45c1
PZ
729}
730
731static ssize_t
732sched_feat_write(struct file *filp, const char __user *ubuf,
733 size_t cnt, loff_t *ppos)
734{
735 char buf[64];
7740191c 736 char *cmp;
f00b45c1
PZ
737 int neg = 0;
738 int i;
739
740 if (cnt > 63)
741 cnt = 63;
742
743 if (copy_from_user(&buf, ubuf, cnt))
744 return -EFAULT;
745
746 buf[cnt] = 0;
7740191c 747 cmp = strstrip(buf);
f00b45c1 748
524429c3 749 if (strncmp(cmp, "NO_", 3) == 0) {
f00b45c1
PZ
750 neg = 1;
751 cmp += 3;
752 }
753
754 for (i = 0; sched_feat_names[i]; i++) {
7740191c 755 if (strcmp(cmp, sched_feat_names[i]) == 0) {
f00b45c1
PZ
756 if (neg)
757 sysctl_sched_features &= ~(1UL << i);
758 else
759 sysctl_sched_features |= (1UL << i);
760 break;
761 }
762 }
763
764 if (!sched_feat_names[i])
765 return -EINVAL;
766
42994724 767 *ppos += cnt;
f00b45c1
PZ
768
769 return cnt;
770}
771
34f3a814
LZ
772static int sched_feat_open(struct inode *inode, struct file *filp)
773{
774 return single_open(filp, sched_feat_show, NULL);
775}
776
828c0950 777static const struct file_operations sched_feat_fops = {
34f3a814
LZ
778 .open = sched_feat_open,
779 .write = sched_feat_write,
780 .read = seq_read,
781 .llseek = seq_lseek,
782 .release = single_release,
f00b45c1
PZ
783};
784
785static __init int sched_init_debug(void)
786{
f00b45c1
PZ
787 debugfs_create_file("sched_features", 0644, NULL, NULL,
788 &sched_feat_fops);
789
790 return 0;
791}
792late_initcall(sched_init_debug);
793
794#endif
795
796#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
bf5c91ba 797
b82d9fdd
PZ
798/*
799 * Number of tasks to iterate in a single balance run.
800 * Limited because this is done with IRQs disabled.
801 */
802const_debug unsigned int sysctl_sched_nr_migrate = 32;
803
e9e9250b
PZ
804/*
805 * period over which we average the RT time consumption, measured
806 * in ms.
807 *
808 * default: 1s
809 */
810const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
811
fa85ae24 812/*
9f0c1e56 813 * period over which we measure -rt task cpu usage in us.
fa85ae24
PZ
814 * default: 1s
815 */
9f0c1e56 816unsigned int sysctl_sched_rt_period = 1000000;
fa85ae24 817
6892b75e
IM
818static __read_mostly int scheduler_running;
819
9f0c1e56
PZ
820/*
821 * part of the period that we allow rt tasks to run in us.
822 * default: 0.95s
823 */
824int sysctl_sched_rt_runtime = 950000;
fa85ae24 825
d0b27fa7
PZ
826static inline u64 global_rt_period(void)
827{
828 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
829}
830
831static inline u64 global_rt_runtime(void)
832{
e26873bb 833 if (sysctl_sched_rt_runtime < 0)
d0b27fa7
PZ
834 return RUNTIME_INF;
835
836 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
837}
fa85ae24 838
1da177e4 839#ifndef prepare_arch_switch
4866cde0
NP
840# define prepare_arch_switch(next) do { } while (0)
841#endif
842#ifndef finish_arch_switch
843# define finish_arch_switch(prev) do { } while (0)
844#endif
845
051a1d1a
DA
846static inline int task_current(struct rq *rq, struct task_struct *p)
847{
848 return rq->curr == p;
849}
850
70b97a7f 851static inline int task_running(struct rq *rq, struct task_struct *p)
4866cde0 852{
3ca7a440
PZ
853#ifdef CONFIG_SMP
854 return p->on_cpu;
855#else
051a1d1a 856 return task_current(rq, p);
3ca7a440 857#endif
4866cde0
NP
858}
859
3ca7a440 860#ifndef __ARCH_WANT_UNLOCKED_CTXSW
70b97a7f 861static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
4866cde0 862{
3ca7a440
PZ
863#ifdef CONFIG_SMP
864 /*
865 * We can optimise this out completely for !SMP, because the
866 * SMP rebalancing from interrupt is the only thing that cares
867 * here.
868 */
869 next->on_cpu = 1;
870#endif
4866cde0
NP
871}
872
70b97a7f 873static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
4866cde0 874{
3ca7a440
PZ
875#ifdef CONFIG_SMP
876 /*
877 * After ->on_cpu is cleared, the task can be moved to a different CPU.
878 * We must ensure this doesn't happen until the switch is completely
879 * finished.
880 */
881 smp_wmb();
882 prev->on_cpu = 0;
883#endif
da04c035
IM
884#ifdef CONFIG_DEBUG_SPINLOCK
885 /* this is a valid case when another task releases the spinlock */
886 rq->lock.owner = current;
887#endif
8a25d5de
IM
888 /*
889 * If we are tracking spinlock dependencies then we have to
890 * fix up the runqueue lock - which gets 'carried over' from
891 * prev into current:
892 */
893 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
894
05fa785c 895 raw_spin_unlock_irq(&rq->lock);
4866cde0
NP
896}
897
898#else /* __ARCH_WANT_UNLOCKED_CTXSW */
70b97a7f 899static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
4866cde0
NP
900{
901#ifdef CONFIG_SMP
902 /*
903 * We can optimise this out completely for !SMP, because the
904 * SMP rebalancing from interrupt is the only thing that cares
905 * here.
906 */
3ca7a440 907 next->on_cpu = 1;
4866cde0
NP
908#endif
909#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
05fa785c 910 raw_spin_unlock_irq(&rq->lock);
4866cde0 911#else
05fa785c 912 raw_spin_unlock(&rq->lock);
4866cde0
NP
913#endif
914}
915
70b97a7f 916static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
4866cde0
NP
917{
918#ifdef CONFIG_SMP
919 /*
3ca7a440 920 * After ->on_cpu is cleared, the task can be moved to a different CPU.
4866cde0
NP
921 * We must ensure this doesn't happen until the switch is completely
922 * finished.
923 */
924 smp_wmb();
3ca7a440 925 prev->on_cpu = 0;
4866cde0
NP
926#endif
927#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
928 local_irq_enable();
1da177e4 929#endif
4866cde0
NP
930}
931#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
1da177e4 932
0970d299 933/*
0122ec5b 934 * __task_rq_lock - lock the rq @p resides on.
b29739f9 935 */
70b97a7f 936static inline struct rq *__task_rq_lock(struct task_struct *p)
b29739f9
IM
937 __acquires(rq->lock)
938{
0970d299
PZ
939 struct rq *rq;
940
0122ec5b
PZ
941 lockdep_assert_held(&p->pi_lock);
942
3a5c359a 943 for (;;) {
0970d299 944 rq = task_rq(p);
05fa785c 945 raw_spin_lock(&rq->lock);
65cc8e48 946 if (likely(rq == task_rq(p)))
3a5c359a 947 return rq;
05fa785c 948 raw_spin_unlock(&rq->lock);
b29739f9 949 }
b29739f9
IM
950}
951
1da177e4 952/*
0122ec5b 953 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
1da177e4 954 */
70b97a7f 955static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
0122ec5b 956 __acquires(p->pi_lock)
1da177e4
LT
957 __acquires(rq->lock)
958{
70b97a7f 959 struct rq *rq;
1da177e4 960
3a5c359a 961 for (;;) {
0122ec5b 962 raw_spin_lock_irqsave(&p->pi_lock, *flags);
3a5c359a 963 rq = task_rq(p);
05fa785c 964 raw_spin_lock(&rq->lock);
65cc8e48 965 if (likely(rq == task_rq(p)))
3a5c359a 966 return rq;
0122ec5b
PZ
967 raw_spin_unlock(&rq->lock);
968 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
1da177e4 969 }
1da177e4
LT
970}
971
a9957449 972static void __task_rq_unlock(struct rq *rq)
b29739f9
IM
973 __releases(rq->lock)
974{
05fa785c 975 raw_spin_unlock(&rq->lock);
b29739f9
IM
976}
977
0122ec5b
PZ
978static inline void
979task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
1da177e4 980 __releases(rq->lock)
0122ec5b 981 __releases(p->pi_lock)
1da177e4 982{
0122ec5b
PZ
983 raw_spin_unlock(&rq->lock);
984 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
1da177e4
LT
985}
986
1da177e4 987/*
cc2a73b5 988 * this_rq_lock - lock this runqueue and disable interrupts.
1da177e4 989 */
a9957449 990static struct rq *this_rq_lock(void)
1da177e4
LT
991 __acquires(rq->lock)
992{
70b97a7f 993 struct rq *rq;
1da177e4
LT
994
995 local_irq_disable();
996 rq = this_rq();
05fa785c 997 raw_spin_lock(&rq->lock);
1da177e4
LT
998
999 return rq;
1000}
1001
8f4d37ec
PZ
1002#ifdef CONFIG_SCHED_HRTICK
1003/*
1004 * Use HR-timers to deliver accurate preemption points.
1005 *
1006 * Its all a bit involved since we cannot program an hrt while holding the
1007 * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
1008 * reschedule event.
1009 *
1010 * When we get rescheduled we reprogram the hrtick_timer outside of the
1011 * rq->lock.
1012 */
8f4d37ec
PZ
1013
1014/*
1015 * Use hrtick when:
1016 * - enabled by features
1017 * - hrtimer is actually high res
1018 */
1019static inline int hrtick_enabled(struct rq *rq)
1020{
1021 if (!sched_feat(HRTICK))
1022 return 0;
ba42059f 1023 if (!cpu_active(cpu_of(rq)))
b328ca18 1024 return 0;
8f4d37ec
PZ
1025 return hrtimer_is_hres_active(&rq->hrtick_timer);
1026}
1027
8f4d37ec
PZ
1028static void hrtick_clear(struct rq *rq)
1029{
1030 if (hrtimer_active(&rq->hrtick_timer))
1031 hrtimer_cancel(&rq->hrtick_timer);
1032}
1033
8f4d37ec
PZ
1034/*
1035 * High-resolution timer tick.
1036 * Runs from hardirq context with interrupts disabled.
1037 */
1038static enum hrtimer_restart hrtick(struct hrtimer *timer)
1039{
1040 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
1041
1042 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
1043
05fa785c 1044 raw_spin_lock(&rq->lock);
3e51f33f 1045 update_rq_clock(rq);
8f4d37ec 1046 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
05fa785c 1047 raw_spin_unlock(&rq->lock);
8f4d37ec
PZ
1048
1049 return HRTIMER_NORESTART;
1050}
1051
95e904c7 1052#ifdef CONFIG_SMP
31656519
PZ
1053/*
1054 * called from hardirq (IPI) context
1055 */
1056static void __hrtick_start(void *arg)
b328ca18 1057{
31656519 1058 struct rq *rq = arg;
b328ca18 1059
05fa785c 1060 raw_spin_lock(&rq->lock);
31656519
PZ
1061 hrtimer_restart(&rq->hrtick_timer);
1062 rq->hrtick_csd_pending = 0;
05fa785c 1063 raw_spin_unlock(&rq->lock);
b328ca18
PZ
1064}
1065
31656519
PZ
1066/*
1067 * Called to set the hrtick timer state.
1068 *
1069 * called with rq->lock held and irqs disabled
1070 */
1071static void hrtick_start(struct rq *rq, u64 delay)
b328ca18 1072{
31656519
PZ
1073 struct hrtimer *timer = &rq->hrtick_timer;
1074 ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
b328ca18 1075
cc584b21 1076 hrtimer_set_expires(timer, time);
31656519
PZ
1077
1078 if (rq == this_rq()) {
1079 hrtimer_restart(timer);
1080 } else if (!rq->hrtick_csd_pending) {
6e275637 1081 __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
31656519
PZ
1082 rq->hrtick_csd_pending = 1;
1083 }
b328ca18
PZ
1084}
1085
1086static int
1087hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
1088{
1089 int cpu = (int)(long)hcpu;
1090
1091 switch (action) {
1092 case CPU_UP_CANCELED:
1093 case CPU_UP_CANCELED_FROZEN:
1094 case CPU_DOWN_PREPARE:
1095 case CPU_DOWN_PREPARE_FROZEN:
1096 case CPU_DEAD:
1097 case CPU_DEAD_FROZEN:
31656519 1098 hrtick_clear(cpu_rq(cpu));
b328ca18
PZ
1099 return NOTIFY_OK;
1100 }
1101
1102 return NOTIFY_DONE;
1103}
1104
fa748203 1105static __init void init_hrtick(void)
b328ca18
PZ
1106{
1107 hotcpu_notifier(hotplug_hrtick, 0);
1108}
31656519
PZ
1109#else
1110/*
1111 * Called to set the hrtick timer state.
1112 *
1113 * called with rq->lock held and irqs disabled
1114 */
1115static void hrtick_start(struct rq *rq, u64 delay)
1116{
7f1e2ca9 1117 __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
5c333864 1118 HRTIMER_MODE_REL_PINNED, 0);
31656519 1119}
b328ca18 1120
006c75f1 1121static inline void init_hrtick(void)
8f4d37ec 1122{
8f4d37ec 1123}
31656519 1124#endif /* CONFIG_SMP */
8f4d37ec 1125
31656519 1126static void init_rq_hrtick(struct rq *rq)
8f4d37ec 1127{
31656519
PZ
1128#ifdef CONFIG_SMP
1129 rq->hrtick_csd_pending = 0;
8f4d37ec 1130
31656519
PZ
1131 rq->hrtick_csd.flags = 0;
1132 rq->hrtick_csd.func = __hrtick_start;
1133 rq->hrtick_csd.info = rq;
1134#endif
8f4d37ec 1135
31656519
PZ
1136 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1137 rq->hrtick_timer.function = hrtick;
8f4d37ec 1138}
006c75f1 1139#else /* CONFIG_SCHED_HRTICK */
8f4d37ec
PZ
1140static inline void hrtick_clear(struct rq *rq)
1141{
1142}
1143
8f4d37ec
PZ
1144static inline void init_rq_hrtick(struct rq *rq)
1145{
1146}
1147
b328ca18
PZ
1148static inline void init_hrtick(void)
1149{
1150}
006c75f1 1151#endif /* CONFIG_SCHED_HRTICK */
8f4d37ec 1152
c24d20db
IM
1153/*
1154 * resched_task - mark a task 'to be rescheduled now'.
1155 *
1156 * On UP this means the setting of the need_resched flag, on SMP it
1157 * might also involve a cross-CPU call to trigger the scheduler on
1158 * the target CPU.
1159 */
1160#ifdef CONFIG_SMP
1161
1162#ifndef tsk_is_polling
1163#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
1164#endif
1165
31656519 1166static void resched_task(struct task_struct *p)
c24d20db
IM
1167{
1168 int cpu;
1169
05fa785c 1170 assert_raw_spin_locked(&task_rq(p)->lock);
c24d20db 1171
5ed0cec0 1172 if (test_tsk_need_resched(p))
c24d20db
IM
1173 return;
1174
5ed0cec0 1175 set_tsk_need_resched(p);
c24d20db
IM
1176
1177 cpu = task_cpu(p);
1178 if (cpu == smp_processor_id())
1179 return;
1180
1181 /* NEED_RESCHED must be visible before we test polling */
1182 smp_mb();
1183 if (!tsk_is_polling(p))
1184 smp_send_reschedule(cpu);
1185}
1186
1187static void resched_cpu(int cpu)
1188{
1189 struct rq *rq = cpu_rq(cpu);
1190 unsigned long flags;
1191
05fa785c 1192 if (!raw_spin_trylock_irqsave(&rq->lock, flags))
c24d20db
IM
1193 return;
1194 resched_task(cpu_curr(cpu));
05fa785c 1195 raw_spin_unlock_irqrestore(&rq->lock, flags);
c24d20db 1196}
06d8308c
TG
1197
1198#ifdef CONFIG_NO_HZ
83cd4fe2
VP
1199/*
1200 * In the semi idle case, use the nearest busy cpu for migrating timers
1201 * from an idle cpu. This is good for power-savings.
1202 *
1203 * We don't do similar optimization for completely idle system, as
1204 * selecting an idle cpu will add more delays to the timers than intended
1205 * (as that cpu's timer base may not be uptodate wrt jiffies etc).
1206 */
1207int get_nohz_timer_target(void)
1208{
1209 int cpu = smp_processor_id();
1210 int i;
1211 struct sched_domain *sd;
1212
057f3fad 1213 rcu_read_lock();
83cd4fe2 1214 for_each_domain(cpu, sd) {
057f3fad
PZ
1215 for_each_cpu(i, sched_domain_span(sd)) {
1216 if (!idle_cpu(i)) {
1217 cpu = i;
1218 goto unlock;
1219 }
1220 }
83cd4fe2 1221 }
057f3fad
PZ
1222unlock:
1223 rcu_read_unlock();
83cd4fe2
VP
1224 return cpu;
1225}
06d8308c
TG
1226/*
1227 * When add_timer_on() enqueues a timer into the timer wheel of an
1228 * idle CPU then this timer might expire before the next timer event
1229 * which is scheduled to wake up that CPU. In case of a completely
1230 * idle system the next event might even be infinite time into the
1231 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1232 * leaves the inner idle loop so the newly added timer is taken into
1233 * account when the CPU goes back to idle and evaluates the timer
1234 * wheel for the next timer event.
1235 */
1236void wake_up_idle_cpu(int cpu)
1237{
1238 struct rq *rq = cpu_rq(cpu);
1239
1240 if (cpu == smp_processor_id())
1241 return;
1242
1243 /*
1244 * This is safe, as this function is called with the timer
1245 * wheel base lock of (cpu) held. When the CPU is on the way
1246 * to idle and has not yet set rq->curr to idle then it will
1247 * be serialized on the timer wheel base lock and take the new
1248 * timer into account automatically.
1249 */
1250 if (rq->curr != rq->idle)
1251 return;
1252
1253 /*
1254 * We can set TIF_RESCHED on the idle task of the other CPU
1255 * lockless. The worst case is that the other CPU runs the
1256 * idle task through an additional NOOP schedule()
1257 */
5ed0cec0 1258 set_tsk_need_resched(rq->idle);
06d8308c
TG
1259
1260 /* NEED_RESCHED must be visible before we test polling */
1261 smp_mb();
1262 if (!tsk_is_polling(rq->idle))
1263 smp_send_reschedule(cpu);
1264}
39c0cbe2 1265
6d6bc0ad 1266#endif /* CONFIG_NO_HZ */
06d8308c 1267
e9e9250b
PZ
1268static u64 sched_avg_period(void)
1269{
1270 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1271}
1272
1273static void sched_avg_update(struct rq *rq)
1274{
1275 s64 period = sched_avg_period();
1276
1277 while ((s64)(rq->clock - rq->age_stamp) > period) {
0d98bb26
WD
1278 /*
1279 * Inline assembly required to prevent the compiler
1280 * optimising this loop into a divmod call.
1281 * See __iter_div_u64_rem() for another example of this.
1282 */
1283 asm("" : "+rm" (rq->age_stamp));
e9e9250b
PZ
1284 rq->age_stamp += period;
1285 rq->rt_avg /= 2;
1286 }
1287}
1288
1289static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1290{
1291 rq->rt_avg += rt_delta;
1292 sched_avg_update(rq);
1293}
1294
6d6bc0ad 1295#else /* !CONFIG_SMP */
31656519 1296static void resched_task(struct task_struct *p)
c24d20db 1297{
05fa785c 1298 assert_raw_spin_locked(&task_rq(p)->lock);
31656519 1299 set_tsk_need_resched(p);
c24d20db 1300}
e9e9250b
PZ
1301
1302static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1303{
1304}
da2b71ed
SS
1305
1306static void sched_avg_update(struct rq *rq)
1307{
1308}
6d6bc0ad 1309#endif /* CONFIG_SMP */
c24d20db 1310
45bf76df
IM
1311#if BITS_PER_LONG == 32
1312# define WMULT_CONST (~0UL)
1313#else
1314# define WMULT_CONST (1UL << 32)
1315#endif
1316
1317#define WMULT_SHIFT 32
1318
194081eb
IM
1319/*
1320 * Shift right and round:
1321 */
cf2ab469 1322#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
194081eb 1323
a7be37ac
PZ
1324/*
1325 * delta *= weight / lw
1326 */
cb1c4fc9 1327static unsigned long
45bf76df
IM
1328calc_delta_mine(unsigned long delta_exec, unsigned long weight,
1329 struct load_weight *lw)
1330{
1331 u64 tmp;
1332
c8b28116
NR
1333 /*
1334 * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
1335 * entities since MIN_SHARES = 2. Treat weight as 1 if less than
1336 * 2^SCHED_LOAD_RESOLUTION.
1337 */
1338 if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
1339 tmp = (u64)delta_exec * scale_load_down(weight);
1340 else
1341 tmp = (u64)delta_exec;
db670dac 1342
7a232e03 1343 if (!lw->inv_weight) {
c8b28116
NR
1344 unsigned long w = scale_load_down(lw->weight);
1345
1346 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
7a232e03 1347 lw->inv_weight = 1;
c8b28116
NR
1348 else if (unlikely(!w))
1349 lw->inv_weight = WMULT_CONST;
7a232e03 1350 else
c8b28116 1351 lw->inv_weight = WMULT_CONST / w;
7a232e03 1352 }
45bf76df 1353
45bf76df
IM
1354 /*
1355 * Check whether we'd overflow the 64-bit multiplication:
1356 */
194081eb 1357 if (unlikely(tmp > WMULT_CONST))
cf2ab469 1358 tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
194081eb
IM
1359 WMULT_SHIFT/2);
1360 else
cf2ab469 1361 tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
45bf76df 1362
ecf691da 1363 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
45bf76df
IM
1364}
1365
1091985b 1366static inline void update_load_add(struct load_weight *lw, unsigned long inc)
45bf76df
IM
1367{
1368 lw->weight += inc;
e89996ae 1369 lw->inv_weight = 0;
45bf76df
IM
1370}
1371
1091985b 1372static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
45bf76df
IM
1373{
1374 lw->weight -= dec;
e89996ae 1375 lw->inv_weight = 0;
45bf76df
IM
1376}
1377
2069dd75
PZ
1378static inline void update_load_set(struct load_weight *lw, unsigned long w)
1379{
1380 lw->weight = w;
1381 lw->inv_weight = 0;
1382}
1383
2dd73a4f
PW
1384/*
1385 * To aid in avoiding the subversion of "niceness" due to uneven distribution
1386 * of tasks with abnormal "nice" values across CPUs the contribution that
1387 * each task makes to its run queue's load is weighted according to its
41a2d6cf 1388 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
2dd73a4f
PW
1389 * scaled version of the new time slice allocation that they receive on time
1390 * slice expiry etc.
1391 */
1392
cce7ade8
PZ
1393#define WEIGHT_IDLEPRIO 3
1394#define WMULT_IDLEPRIO 1431655765
dd41f596
IM
1395
1396/*
1397 * Nice levels are multiplicative, with a gentle 10% change for every
1398 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
1399 * nice 1, it will get ~10% less CPU time than another CPU-bound task
1400 * that remained on nice 0.
1401 *
1402 * The "10% effect" is relative and cumulative: from _any_ nice level,
1403 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
f9153ee6
IM
1404 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
1405 * If a task goes up by ~10% and another task goes down by ~10% then
1406 * the relative distance between them is ~25%.)
dd41f596
IM
1407 */
1408static const int prio_to_weight[40] = {
254753dc
IM
1409 /* -20 */ 88761, 71755, 56483, 46273, 36291,
1410 /* -15 */ 29154, 23254, 18705, 14949, 11916,
1411 /* -10 */ 9548, 7620, 6100, 4904, 3906,
1412 /* -5 */ 3121, 2501, 1991, 1586, 1277,
1413 /* 0 */ 1024, 820, 655, 526, 423,
1414 /* 5 */ 335, 272, 215, 172, 137,
1415 /* 10 */ 110, 87, 70, 56, 45,
1416 /* 15 */ 36, 29, 23, 18, 15,
dd41f596
IM
1417};
1418
5714d2de
IM
1419/*
1420 * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
1421 *
1422 * In cases where the weight does not change often, we can use the
1423 * precalculated inverse to speed up arithmetics by turning divisions
1424 * into multiplications:
1425 */
dd41f596 1426static const u32 prio_to_wmult[40] = {
254753dc
IM
1427 /* -20 */ 48388, 59856, 76040, 92818, 118348,
1428 /* -15 */ 147320, 184698, 229616, 287308, 360437,
1429 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
1430 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
1431 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
1432 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
1433 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
1434 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
dd41f596 1435};
2dd73a4f 1436
ef12fefa
BR
1437/* Time spent by the tasks of the cpu accounting group executing in ... */
1438enum cpuacct_stat_index {
1439 CPUACCT_STAT_USER, /* ... user mode */
1440 CPUACCT_STAT_SYSTEM, /* ... kernel mode */
1441
1442 CPUACCT_STAT_NSTATS,
1443};
1444
d842de87
SV
1445#ifdef CONFIG_CGROUP_CPUACCT
1446static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
ef12fefa
BR
1447static void cpuacct_update_stats(struct task_struct *tsk,
1448 enum cpuacct_stat_index idx, cputime_t val);
d842de87
SV
1449#else
1450static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
ef12fefa
BR
1451static inline void cpuacct_update_stats(struct task_struct *tsk,
1452 enum cpuacct_stat_index idx, cputime_t val) {}
d842de87
SV
1453#endif
1454
18d95a28
PZ
1455static inline void inc_cpu_load(struct rq *rq, unsigned long load)
1456{
1457 update_load_add(&rq->load, load);
1458}
1459
1460static inline void dec_cpu_load(struct rq *rq, unsigned long load)
1461{
1462 update_load_sub(&rq->load, load);
1463}
1464
7940ca36 1465#if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)) || defined(CONFIG_RT_GROUP_SCHED)
eb755805 1466typedef int (*tg_visitor)(struct task_group *, void *);
c09595f6
PZ
1467
1468/*
1469 * Iterate the full tree, calling @down when first entering a node and @up when
1470 * leaving it for the final time.
1471 */
eb755805 1472static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
c09595f6
PZ
1473{
1474 struct task_group *parent, *child;
eb755805 1475 int ret;
c09595f6
PZ
1476
1477 rcu_read_lock();
1478 parent = &root_task_group;
1479down:
eb755805
PZ
1480 ret = (*down)(parent, data);
1481 if (ret)
1482 goto out_unlock;
c09595f6
PZ
1483 list_for_each_entry_rcu(child, &parent->children, siblings) {
1484 parent = child;
1485 goto down;
1486
1487up:
1488 continue;
1489 }
eb755805
PZ
1490 ret = (*up)(parent, data);
1491 if (ret)
1492 goto out_unlock;
c09595f6
PZ
1493
1494 child = parent;
1495 parent = parent->parent;
1496 if (parent)
1497 goto up;
eb755805 1498out_unlock:
c09595f6 1499 rcu_read_unlock();
eb755805
PZ
1500
1501 return ret;
c09595f6
PZ
1502}
1503
eb755805
PZ
1504static int tg_nop(struct task_group *tg, void *data)
1505{
1506 return 0;
c09595f6 1507}
eb755805
PZ
1508#endif
1509
1510#ifdef CONFIG_SMP
f5f08f39
PZ
1511/* Used instead of source_load when we know the type == 0 */
1512static unsigned long weighted_cpuload(const int cpu)
1513{
1514 return cpu_rq(cpu)->load.weight;
1515}
1516
1517/*
1518 * Return a low guess at the load of a migration-source cpu weighted
1519 * according to the scheduling class and "nice" value.
1520 *
1521 * We want to under-estimate the load of migration sources, to
1522 * balance conservatively.
1523 */
1524static unsigned long source_load(int cpu, int type)
1525{
1526 struct rq *rq = cpu_rq(cpu);
1527 unsigned long total = weighted_cpuload(cpu);
1528
1529 if (type == 0 || !sched_feat(LB_BIAS))
1530 return total;
1531
1532 return min(rq->cpu_load[type-1], total);
1533}
1534
1535/*
1536 * Return a high guess at the load of a migration-target cpu weighted
1537 * according to the scheduling class and "nice" value.
1538 */
1539static unsigned long target_load(int cpu, int type)
1540{
1541 struct rq *rq = cpu_rq(cpu);
1542 unsigned long total = weighted_cpuload(cpu);
1543
1544 if (type == 0 || !sched_feat(LB_BIAS))
1545 return total;
1546
1547 return max(rq->cpu_load[type-1], total);
1548}
1549
ae154be1
PZ
1550static unsigned long power_of(int cpu)
1551{
e51fd5e2 1552 return cpu_rq(cpu)->cpu_power;
ae154be1
PZ
1553}
1554
eb755805
PZ
1555static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
1556
1557static unsigned long cpu_avg_load_per_task(int cpu)
1558{
1559 struct rq *rq = cpu_rq(cpu);
af6d596f 1560 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
eb755805 1561
4cd42620
SR
1562 if (nr_running)
1563 rq->avg_load_per_task = rq->load.weight / nr_running;
a2d47777
BS
1564 else
1565 rq->avg_load_per_task = 0;
eb755805
PZ
1566
1567 return rq->avg_load_per_task;
1568}
1569
1570#ifdef CONFIG_FAIR_GROUP_SCHED
c09595f6 1571
c09595f6 1572/*
c8cba857
PZ
1573 * Compute the cpu's hierarchical load factor for each task group.
1574 * This needs to be done in a top-down fashion because the load of a child
1575 * group is a fraction of its parents load.
c09595f6 1576 */
eb755805 1577static int tg_load_down(struct task_group *tg, void *data)
c09595f6 1578{
c8cba857 1579 unsigned long load;
eb755805 1580 long cpu = (long)data;
c09595f6 1581
c8cba857
PZ
1582 if (!tg->parent) {
1583 load = cpu_rq(cpu)->load.weight;
1584 } else {
1585 load = tg->parent->cfs_rq[cpu]->h_load;
2069dd75 1586 load *= tg->se[cpu]->load.weight;
c8cba857
PZ
1587 load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
1588 }
c09595f6 1589
c8cba857 1590 tg->cfs_rq[cpu]->h_load = load;
c09595f6 1591
eb755805 1592 return 0;
c09595f6
PZ
1593}
1594
eb755805 1595static void update_h_load(long cpu)
c09595f6 1596{
eb755805 1597 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
c09595f6
PZ
1598}
1599
18d95a28
PZ
1600#endif
1601
8f45e2b5
GH
1602#ifdef CONFIG_PREEMPT
1603
b78bb868
PZ
1604static void double_rq_lock(struct rq *rq1, struct rq *rq2);
1605
70574a99 1606/*
8f45e2b5
GH
1607 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1608 * way at the expense of forcing extra atomic operations in all
1609 * invocations. This assures that the double_lock is acquired using the
1610 * same underlying policy as the spinlock_t on this architecture, which
1611 * reduces latency compared to the unfair variant below. However, it
1612 * also adds more overhead and therefore may reduce throughput.
70574a99 1613 */
8f45e2b5
GH
1614static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1615 __releases(this_rq->lock)
1616 __acquires(busiest->lock)
1617 __acquires(this_rq->lock)
1618{
05fa785c 1619 raw_spin_unlock(&this_rq->lock);
8f45e2b5
GH
1620 double_rq_lock(this_rq, busiest);
1621
1622 return 1;
1623}
1624
1625#else
1626/*
1627 * Unfair double_lock_balance: Optimizes throughput at the expense of
1628 * latency by eliminating extra atomic operations when the locks are
1629 * already in proper order on entry. This favors lower cpu-ids and will
1630 * grant the double lock to lower cpus over higher ids under contention,
1631 * regardless of entry order into the function.
1632 */
1633static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
70574a99
AD
1634 __releases(this_rq->lock)
1635 __acquires(busiest->lock)
1636 __acquires(this_rq->lock)
1637{
1638 int ret = 0;
1639
05fa785c 1640 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
70574a99 1641 if (busiest < this_rq) {
05fa785c
TG
1642 raw_spin_unlock(&this_rq->lock);
1643 raw_spin_lock(&busiest->lock);
1644 raw_spin_lock_nested(&this_rq->lock,
1645 SINGLE_DEPTH_NESTING);
70574a99
AD
1646 ret = 1;
1647 } else
05fa785c
TG
1648 raw_spin_lock_nested(&busiest->lock,
1649 SINGLE_DEPTH_NESTING);
70574a99
AD
1650 }
1651 return ret;
1652}
1653
8f45e2b5
GH
1654#endif /* CONFIG_PREEMPT */
1655
1656/*
1657 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1658 */
1659static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1660{
1661 if (unlikely(!irqs_disabled())) {
1662 /* printk() doesn't work good under rq->lock */
05fa785c 1663 raw_spin_unlock(&this_rq->lock);
8f45e2b5
GH
1664 BUG_ON(1);
1665 }
1666
1667 return _double_lock_balance(this_rq, busiest);
1668}
1669
70574a99
AD
1670static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1671 __releases(busiest->lock)
1672{
05fa785c 1673 raw_spin_unlock(&busiest->lock);
70574a99
AD
1674 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1675}
1e3c88bd
PZ
1676
1677/*
1678 * double_rq_lock - safely lock two runqueues
1679 *
1680 * Note this does not disable interrupts like task_rq_lock,
1681 * you need to do so manually before calling.
1682 */
1683static void double_rq_lock(struct rq *rq1, struct rq *rq2)
1684 __acquires(rq1->lock)
1685 __acquires(rq2->lock)
1686{
1687 BUG_ON(!irqs_disabled());
1688 if (rq1 == rq2) {
1689 raw_spin_lock(&rq1->lock);
1690 __acquire(rq2->lock); /* Fake it out ;) */
1691 } else {
1692 if (rq1 < rq2) {
1693 raw_spin_lock(&rq1->lock);
1694 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1695 } else {
1696 raw_spin_lock(&rq2->lock);
1697 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1698 }
1699 }
1e3c88bd
PZ
1700}
1701
1702/*
1703 * double_rq_unlock - safely unlock two runqueues
1704 *
1705 * Note this does not restore interrupts like task_rq_unlock,
1706 * you need to do so manually after calling.
1707 */
1708static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1709 __releases(rq1->lock)
1710 __releases(rq2->lock)
1711{
1712 raw_spin_unlock(&rq1->lock);
1713 if (rq1 != rq2)
1714 raw_spin_unlock(&rq2->lock);
1715 else
1716 __release(rq2->lock);
1717}
1718
d95f4122
MG
1719#else /* CONFIG_SMP */
1720
1721/*
1722 * double_rq_lock - safely lock two runqueues
1723 *
1724 * Note this does not disable interrupts like task_rq_lock,
1725 * you need to do so manually before calling.
1726 */
1727static void double_rq_lock(struct rq *rq1, struct rq *rq2)
1728 __acquires(rq1->lock)
1729 __acquires(rq2->lock)
1730{
1731 BUG_ON(!irqs_disabled());
1732 BUG_ON(rq1 != rq2);
1733 raw_spin_lock(&rq1->lock);
1734 __acquire(rq2->lock); /* Fake it out ;) */
1735}
1736
1737/*
1738 * double_rq_unlock - safely unlock two runqueues
1739 *
1740 * Note this does not restore interrupts like task_rq_unlock,
1741 * you need to do so manually after calling.
1742 */
1743static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1744 __releases(rq1->lock)
1745 __releases(rq2->lock)
1746{
1747 BUG_ON(rq1 != rq2);
1748 raw_spin_unlock(&rq1->lock);
1749 __release(rq2->lock);
1750}
1751
18d95a28
PZ
1752#endif
1753
74f5187a 1754static void calc_load_account_idle(struct rq *this_rq);
0bcdcf28 1755static void update_sysctl(void);
acb4a848 1756static int get_update_sysctl_factor(void);
fdf3e95d 1757static void update_cpu_load(struct rq *this_rq);
dce48a84 1758
cd29fe6f
PZ
1759static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1760{
1761 set_task_rq(p, cpu);
1762#ifdef CONFIG_SMP
1763 /*
1764 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1765 * successfuly executed on another CPU. We must ensure that updates of
1766 * per-task data have been completed by this moment.
1767 */
1768 smp_wmb();
1769 task_thread_info(p)->cpu = cpu;
1770#endif
1771}
dce48a84 1772
1e3c88bd 1773static const struct sched_class rt_sched_class;
dd41f596 1774
34f971f6 1775#define sched_class_highest (&stop_sched_class)
1f11eb6a
GH
1776#define for_each_class(class) \
1777 for (class = sched_class_highest; class; class = class->next)
dd41f596 1778
1e3c88bd
PZ
1779#include "sched_stats.h"
1780
c09595f6 1781static void inc_nr_running(struct rq *rq)
9c217245
IM
1782{
1783 rq->nr_running++;
9c217245
IM
1784}
1785
c09595f6 1786static void dec_nr_running(struct rq *rq)
9c217245
IM
1787{
1788 rq->nr_running--;
9c217245
IM
1789}
1790
45bf76df
IM
1791static void set_load_weight(struct task_struct *p)
1792{
f05998d4
NR
1793 int prio = p->static_prio - MAX_RT_PRIO;
1794 struct load_weight *load = &p->se.load;
1795
dd41f596
IM
1796 /*
1797 * SCHED_IDLE tasks get minimal weight:
1798 */
1799 if (p->policy == SCHED_IDLE) {
c8b28116 1800 load->weight = scale_load(WEIGHT_IDLEPRIO);
f05998d4 1801 load->inv_weight = WMULT_IDLEPRIO;
dd41f596
IM
1802 return;
1803 }
71f8bd46 1804
c8b28116 1805 load->weight = scale_load(prio_to_weight[prio]);
f05998d4 1806 load->inv_weight = prio_to_wmult[prio];
71f8bd46
IM
1807}
1808
371fd7e7 1809static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
2087a1ad 1810{
a64692a3 1811 update_rq_clock(rq);
dd41f596 1812 sched_info_queued(p);
371fd7e7 1813 p->sched_class->enqueue_task(rq, p, flags);
71f8bd46
IM
1814}
1815
371fd7e7 1816static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
71f8bd46 1817{
a64692a3 1818 update_rq_clock(rq);
46ac22ba 1819 sched_info_dequeued(p);
371fd7e7 1820 p->sched_class->dequeue_task(rq, p, flags);
71f8bd46
IM
1821}
1822
1e3c88bd
PZ
1823/*
1824 * activate_task - move a task to the runqueue.
1825 */
371fd7e7 1826static void activate_task(struct rq *rq, struct task_struct *p, int flags)
1e3c88bd
PZ
1827{
1828 if (task_contributes_to_load(p))
1829 rq->nr_uninterruptible--;
1830
371fd7e7 1831 enqueue_task(rq, p, flags);
1e3c88bd
PZ
1832 inc_nr_running(rq);
1833}
1834
1835/*
1836 * deactivate_task - remove a task from the runqueue.
1837 */
371fd7e7 1838static void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
1e3c88bd
PZ
1839{
1840 if (task_contributes_to_load(p))
1841 rq->nr_uninterruptible++;
1842
371fd7e7 1843 dequeue_task(rq, p, flags);
1e3c88bd
PZ
1844 dec_nr_running(rq);
1845}
1846
b52bfee4
VP
1847#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1848
305e6835
VP
1849/*
1850 * There are no locks covering percpu hardirq/softirq time.
1851 * They are only modified in account_system_vtime, on corresponding CPU
1852 * with interrupts disabled. So, writes are safe.
1853 * They are read and saved off onto struct rq in update_rq_clock().
1854 * This may result in other CPU reading this CPU's irq time and can
1855 * race with irq/account_system_vtime on this CPU. We would either get old
8e92c201
PZ
1856 * or new value with a side effect of accounting a slice of irq time to wrong
1857 * task when irq is in progress while we read rq->clock. That is a worthy
1858 * compromise in place of having locks on each irq in account_system_time.
305e6835 1859 */
b52bfee4
VP
1860static DEFINE_PER_CPU(u64, cpu_hardirq_time);
1861static DEFINE_PER_CPU(u64, cpu_softirq_time);
1862
1863static DEFINE_PER_CPU(u64, irq_start_time);
1864static int sched_clock_irqtime;
1865
1866void enable_sched_clock_irqtime(void)
1867{
1868 sched_clock_irqtime = 1;
1869}
1870
1871void disable_sched_clock_irqtime(void)
1872{
1873 sched_clock_irqtime = 0;
1874}
1875
8e92c201
PZ
1876#ifndef CONFIG_64BIT
1877static DEFINE_PER_CPU(seqcount_t, irq_time_seq);
1878
1879static inline void irq_time_write_begin(void)
1880{
1881 __this_cpu_inc(irq_time_seq.sequence);
1882 smp_wmb();
1883}
1884
1885static inline void irq_time_write_end(void)
1886{
1887 smp_wmb();
1888 __this_cpu_inc(irq_time_seq.sequence);
1889}
1890
1891static inline u64 irq_time_read(int cpu)
1892{
1893 u64 irq_time;
1894 unsigned seq;
1895
1896 do {
1897 seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
1898 irq_time = per_cpu(cpu_softirq_time, cpu) +
1899 per_cpu(cpu_hardirq_time, cpu);
1900 } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
1901
1902 return irq_time;
1903}
1904#else /* CONFIG_64BIT */
1905static inline void irq_time_write_begin(void)
1906{
1907}
1908
1909static inline void irq_time_write_end(void)
1910{
1911}
1912
1913static inline u64 irq_time_read(int cpu)
305e6835 1914{
305e6835
VP
1915 return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
1916}
8e92c201 1917#endif /* CONFIG_64BIT */
305e6835 1918
fe44d621
PZ
1919/*
1920 * Called before incrementing preempt_count on {soft,}irq_enter
1921 * and before decrementing preempt_count on {soft,}irq_exit.
1922 */
b52bfee4
VP
1923void account_system_vtime(struct task_struct *curr)
1924{
1925 unsigned long flags;
fe44d621 1926 s64 delta;
b52bfee4 1927 int cpu;
b52bfee4
VP
1928
1929 if (!sched_clock_irqtime)
1930 return;
1931
1932 local_irq_save(flags);
1933
b52bfee4 1934 cpu = smp_processor_id();
fe44d621
PZ
1935 delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
1936 __this_cpu_add(irq_start_time, delta);
1937
8e92c201 1938 irq_time_write_begin();
b52bfee4
VP
1939 /*
1940 * We do not account for softirq time from ksoftirqd here.
1941 * We want to continue accounting softirq time to ksoftirqd thread
1942 * in that case, so as not to confuse scheduler with a special task
1943 * that do not consume any time, but still wants to run.
1944 */
1945 if (hardirq_count())
fe44d621 1946 __this_cpu_add(cpu_hardirq_time, delta);
4dd53d89 1947 else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
fe44d621 1948 __this_cpu_add(cpu_softirq_time, delta);
b52bfee4 1949
8e92c201 1950 irq_time_write_end();
b52bfee4
VP
1951 local_irq_restore(flags);
1952}
b7dadc38 1953EXPORT_SYMBOL_GPL(account_system_vtime);
b52bfee4 1954
fe44d621 1955static void update_rq_clock_task(struct rq *rq, s64 delta)
aa483808 1956{
fe44d621
PZ
1957 s64 irq_delta;
1958
8e92c201 1959 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
fe44d621
PZ
1960
1961 /*
1962 * Since irq_time is only updated on {soft,}irq_exit, we might run into
1963 * this case when a previous update_rq_clock() happened inside a
1964 * {soft,}irq region.
1965 *
1966 * When this happens, we stop ->clock_task and only update the
1967 * prev_irq_time stamp to account for the part that fit, so that a next
1968 * update will consume the rest. This ensures ->clock_task is
1969 * monotonic.
1970 *
1971 * It does however cause some slight miss-attribution of {soft,}irq
1972 * time, a more accurate solution would be to update the irq_time using
1973 * the current rq->clock timestamp, except that would require using
1974 * atomic ops.
1975 */
1976 if (irq_delta > delta)
1977 irq_delta = delta;
1978
1979 rq->prev_irq_time += irq_delta;
1980 delta -= irq_delta;
1981 rq->clock_task += delta;
1982
1983 if (irq_delta && sched_feat(NONIRQ_POWER))
1984 sched_rt_avg_update(rq, irq_delta);
aa483808
VP
1985}
1986
abb74cef
VP
1987static int irqtime_account_hi_update(void)
1988{
1989 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
1990 unsigned long flags;
1991 u64 latest_ns;
1992 int ret = 0;
1993
1994 local_irq_save(flags);
1995 latest_ns = this_cpu_read(cpu_hardirq_time);
1996 if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->irq))
1997 ret = 1;
1998 local_irq_restore(flags);
1999 return ret;
2000}
2001
2002static int irqtime_account_si_update(void)
2003{
2004 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
2005 unsigned long flags;
2006 u64 latest_ns;
2007 int ret = 0;
2008
2009 local_irq_save(flags);
2010 latest_ns = this_cpu_read(cpu_softirq_time);
2011 if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->softirq))
2012 ret = 1;
2013 local_irq_restore(flags);
2014 return ret;
2015}
2016
fe44d621 2017#else /* CONFIG_IRQ_TIME_ACCOUNTING */
305e6835 2018
abb74cef
VP
2019#define sched_clock_irqtime (0)
2020
fe44d621 2021static void update_rq_clock_task(struct rq *rq, s64 delta)
305e6835 2022{
fe44d621 2023 rq->clock_task += delta;
305e6835
VP
2024}
2025
fe44d621 2026#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
b52bfee4 2027
1e3c88bd
PZ
2028#include "sched_idletask.c"
2029#include "sched_fair.c"
2030#include "sched_rt.c"
5091faa4 2031#include "sched_autogroup.c"
34f971f6 2032#include "sched_stoptask.c"
1e3c88bd
PZ
2033#ifdef CONFIG_SCHED_DEBUG
2034# include "sched_debug.c"
2035#endif
2036
34f971f6
PZ
2037void sched_set_stop_task(int cpu, struct task_struct *stop)
2038{
2039 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
2040 struct task_struct *old_stop = cpu_rq(cpu)->stop;
2041
2042 if (stop) {
2043 /*
2044 * Make it appear like a SCHED_FIFO task, its something
2045 * userspace knows about and won't get confused about.
2046 *
2047 * Also, it will make PI more or less work without too
2048 * much confusion -- but then, stop work should not
2049 * rely on PI working anyway.
2050 */
2051 sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
2052
2053 stop->sched_class = &stop_sched_class;
2054 }
2055
2056 cpu_rq(cpu)->stop = stop;
2057
2058 if (old_stop) {
2059 /*
2060 * Reset it back to a normal scheduling class so that
2061 * it can die in pieces.
2062 */
2063 old_stop->sched_class = &rt_sched_class;
2064 }
2065}
2066
14531189 2067/*
dd41f596 2068 * __normal_prio - return the priority that is based on the static prio
14531189 2069 */
14531189
IM
2070static inline int __normal_prio(struct task_struct *p)
2071{
dd41f596 2072 return p->static_prio;
14531189
IM
2073}
2074
b29739f9
IM
2075/*
2076 * Calculate the expected normal priority: i.e. priority
2077 * without taking RT-inheritance into account. Might be
2078 * boosted by interactivity modifiers. Changes upon fork,
2079 * setprio syscalls, and whenever the interactivity
2080 * estimator recalculates.
2081 */
36c8b586 2082static inline int normal_prio(struct task_struct *p)
b29739f9
IM
2083{
2084 int prio;
2085
e05606d3 2086 if (task_has_rt_policy(p))
b29739f9
IM
2087 prio = MAX_RT_PRIO-1 - p->rt_priority;
2088 else
2089 prio = __normal_prio(p);
2090 return prio;
2091}
2092
2093/*
2094 * Calculate the current priority, i.e. the priority
2095 * taken into account by the scheduler. This value might
2096 * be boosted by RT tasks, or might be boosted by
2097 * interactivity modifiers. Will be RT if the task got
2098 * RT-boosted. If not then it returns p->normal_prio.
2099 */
36c8b586 2100static int effective_prio(struct task_struct *p)
b29739f9
IM
2101{
2102 p->normal_prio = normal_prio(p);
2103 /*
2104 * If we are RT tasks or we were boosted to RT priority,
2105 * keep the priority unchanged. Otherwise, update priority
2106 * to the normal priority:
2107 */
2108 if (!rt_prio(p->prio))
2109 return p->normal_prio;
2110 return p->prio;
2111}
2112
1da177e4
LT
2113/**
2114 * task_curr - is this task currently executing on a CPU?
2115 * @p: the task in question.
2116 */
36c8b586 2117inline int task_curr(const struct task_struct *p)
1da177e4
LT
2118{
2119 return cpu_curr(task_cpu(p)) == p;
2120}
2121
cb469845
SR
2122static inline void check_class_changed(struct rq *rq, struct task_struct *p,
2123 const struct sched_class *prev_class,
da7a735e 2124 int oldprio)
cb469845
SR
2125{
2126 if (prev_class != p->sched_class) {
2127 if (prev_class->switched_from)
da7a735e
PZ
2128 prev_class->switched_from(rq, p);
2129 p->sched_class->switched_to(rq, p);
2130 } else if (oldprio != p->prio)
2131 p->sched_class->prio_changed(rq, p, oldprio);
cb469845
SR
2132}
2133
1e5a7405
PZ
2134static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
2135{
2136 const struct sched_class *class;
2137
2138 if (p->sched_class == rq->curr->sched_class) {
2139 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
2140 } else {
2141 for_each_class(class) {
2142 if (class == rq->curr->sched_class)
2143 break;
2144 if (class == p->sched_class) {
2145 resched_task(rq->curr);
2146 break;
2147 }
2148 }
2149 }
2150
2151 /*
2152 * A queue event has occurred, and we're going to schedule. In
2153 * this case, we can save a useless back to back clock update.
2154 */
fd2f4419 2155 if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
1e5a7405
PZ
2156 rq->skip_clock_update = 1;
2157}
2158
1da177e4 2159#ifdef CONFIG_SMP
cc367732
IM
2160/*
2161 * Is this task likely cache-hot:
2162 */
e7693a36 2163static int
cc367732
IM
2164task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2165{
2166 s64 delta;
2167
e6c8fba7
PZ
2168 if (p->sched_class != &fair_sched_class)
2169 return 0;
2170
ef8002f6
NR
2171 if (unlikely(p->policy == SCHED_IDLE))
2172 return 0;
2173
f540a608
IM
2174 /*
2175 * Buddy candidates are cache hot:
2176 */
f685ceac 2177 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
4793241b
PZ
2178 (&p->se == cfs_rq_of(&p->se)->next ||
2179 &p->se == cfs_rq_of(&p->se)->last))
f540a608
IM
2180 return 1;
2181
6bc1665b
IM
2182 if (sysctl_sched_migration_cost == -1)
2183 return 1;
2184 if (sysctl_sched_migration_cost == 0)
2185 return 0;
2186
cc367732
IM
2187 delta = now - p->se.exec_start;
2188
2189 return delta < (s64)sysctl_sched_migration_cost;
2190}
2191
dd41f596 2192void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
c65cc870 2193{
e2912009
PZ
2194#ifdef CONFIG_SCHED_DEBUG
2195 /*
2196 * We should never call set_task_cpu() on a blocked task,
2197 * ttwu() will sort out the placement.
2198 */
077614ee
PZ
2199 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
2200 !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
0122ec5b
PZ
2201
2202#ifdef CONFIG_LOCKDEP
2203 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
2204 lockdep_is_held(&task_rq(p)->lock)));
2205#endif
e2912009
PZ
2206#endif
2207
de1d7286 2208 trace_sched_migrate_task(p, new_cpu);
cbc34ed1 2209
0c69774e
PZ
2210 if (task_cpu(p) != new_cpu) {
2211 p->se.nr_migrations++;
2212 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0);
2213 }
dd41f596
IM
2214
2215 __set_task_cpu(p, new_cpu);
c65cc870
IM
2216}
2217
969c7921 2218struct migration_arg {
36c8b586 2219 struct task_struct *task;
1da177e4 2220 int dest_cpu;
70b97a7f 2221};
1da177e4 2222
969c7921
TH
2223static int migration_cpu_stop(void *data);
2224
1da177e4
LT
2225/*
2226 * wait_task_inactive - wait for a thread to unschedule.
2227 *
85ba2d86
RM
2228 * If @match_state is nonzero, it's the @p->state value just checked and
2229 * not expected to change. If it changes, i.e. @p might have woken up,
2230 * then return zero. When we succeed in waiting for @p to be off its CPU,
2231 * we return a positive number (its total switch count). If a second call
2232 * a short while later returns the same number, the caller can be sure that
2233 * @p has remained unscheduled the whole time.
2234 *
1da177e4
LT
2235 * The caller must ensure that the task *will* unschedule sometime soon,
2236 * else this function might spin for a *long* time. This function can't
2237 * be called with interrupts off, or it may introduce deadlock with
2238 * smp_call_function() if an IPI is sent by the same process we are
2239 * waiting to become inactive.
2240 */
85ba2d86 2241unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1da177e4
LT
2242{
2243 unsigned long flags;
dd41f596 2244 int running, on_rq;
85ba2d86 2245 unsigned long ncsw;
70b97a7f 2246 struct rq *rq;
1da177e4 2247
3a5c359a
AK
2248 for (;;) {
2249 /*
2250 * We do the initial early heuristics without holding
2251 * any task-queue locks at all. We'll only try to get
2252 * the runqueue lock when things look like they will
2253 * work out!
2254 */
2255 rq = task_rq(p);
fa490cfd 2256
3a5c359a
AK
2257 /*
2258 * If the task is actively running on another CPU
2259 * still, just relax and busy-wait without holding
2260 * any locks.
2261 *
2262 * NOTE! Since we don't hold any locks, it's not
2263 * even sure that "rq" stays as the right runqueue!
2264 * But we don't care, since "task_running()" will
2265 * return false if the runqueue has changed and p
2266 * is actually now running somewhere else!
2267 */
85ba2d86
RM
2268 while (task_running(rq, p)) {
2269 if (match_state && unlikely(p->state != match_state))
2270 return 0;
3a5c359a 2271 cpu_relax();
85ba2d86 2272 }
fa490cfd 2273
3a5c359a
AK
2274 /*
2275 * Ok, time to look more closely! We need the rq
2276 * lock now, to be *sure*. If we're wrong, we'll
2277 * just go back and repeat.
2278 */
2279 rq = task_rq_lock(p, &flags);
27a9da65 2280 trace_sched_wait_task(p);
3a5c359a 2281 running = task_running(rq, p);
fd2f4419 2282 on_rq = p->on_rq;
85ba2d86 2283 ncsw = 0;
f31e11d8 2284 if (!match_state || p->state == match_state)
93dcf55f 2285 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
0122ec5b 2286 task_rq_unlock(rq, p, &flags);
fa490cfd 2287
85ba2d86
RM
2288 /*
2289 * If it changed from the expected state, bail out now.
2290 */
2291 if (unlikely(!ncsw))
2292 break;
2293
3a5c359a
AK
2294 /*
2295 * Was it really running after all now that we
2296 * checked with the proper locks actually held?
2297 *
2298 * Oops. Go back and try again..
2299 */
2300 if (unlikely(running)) {
2301 cpu_relax();
2302 continue;
2303 }
fa490cfd 2304
3a5c359a
AK
2305 /*
2306 * It's not enough that it's not actively running,
2307 * it must be off the runqueue _entirely_, and not
2308 * preempted!
2309 *
80dd99b3 2310 * So if it was still runnable (but just not actively
3a5c359a
AK
2311 * running right now), it's preempted, and we should
2312 * yield - it could be a while.
2313 */
2314 if (unlikely(on_rq)) {
8eb90c30
TG
2315 ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
2316
2317 set_current_state(TASK_UNINTERRUPTIBLE);
2318 schedule_hrtimeout(&to, HRTIMER_MODE_REL);
3a5c359a
AK
2319 continue;
2320 }
fa490cfd 2321
3a5c359a
AK
2322 /*
2323 * Ahh, all good. It wasn't running, and it wasn't
2324 * runnable, which means that it will never become
2325 * running in the future either. We're all done!
2326 */
2327 break;
2328 }
85ba2d86
RM
2329
2330 return ncsw;
1da177e4
LT
2331}
2332
2333/***
2334 * kick_process - kick a running thread to enter/exit the kernel
2335 * @p: the to-be-kicked thread
2336 *
2337 * Cause a process which is running on another CPU to enter
2338 * kernel-mode, without any delay. (to get signals handled.)
2339 *
25985edc 2340 * NOTE: this function doesn't have to take the runqueue lock,
1da177e4
LT
2341 * because all it wants to ensure is that the remote task enters
2342 * the kernel. If the IPI races and the task has been migrated
2343 * to another CPU then no harm is done and the purpose has been
2344 * achieved as well.
2345 */
36c8b586 2346void kick_process(struct task_struct *p)
1da177e4
LT
2347{
2348 int cpu;
2349
2350 preempt_disable();
2351 cpu = task_cpu(p);
2352 if ((cpu != smp_processor_id()) && task_curr(p))
2353 smp_send_reschedule(cpu);
2354 preempt_enable();
2355}
b43e3521 2356EXPORT_SYMBOL_GPL(kick_process);
476d139c 2357#endif /* CONFIG_SMP */
1da177e4 2358
970b13ba 2359#ifdef CONFIG_SMP
30da688e 2360/*
013fdb80 2361 * ->cpus_allowed is protected by both rq->lock and p->pi_lock
30da688e 2362 */
5da9a0fb
PZ
2363static int select_fallback_rq(int cpu, struct task_struct *p)
2364{
2365 int dest_cpu;
2366 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
2367
2368 /* Look for allowed, online CPU in same node. */
2369 for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
2370 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
2371 return dest_cpu;
2372
2373 /* Any allowed, online CPU? */
2374 dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
2375 if (dest_cpu < nr_cpu_ids)
2376 return dest_cpu;
2377
2378 /* No more Mr. Nice Guy. */
48c5ccae
PZ
2379 dest_cpu = cpuset_cpus_allowed_fallback(p);
2380 /*
2381 * Don't tell them about moving exiting tasks or
2382 * kernel threads (both mm NULL), since they never
2383 * leave kernel.
2384 */
2385 if (p->mm && printk_ratelimit()) {
2386 printk(KERN_INFO "process %d (%s) no longer affine to cpu%d\n",
2387 task_pid_nr(p), p->comm, cpu);
5da9a0fb
PZ
2388 }
2389
2390 return dest_cpu;
2391}
2392
e2912009 2393/*
013fdb80 2394 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
e2912009 2395 */
970b13ba 2396static inline
7608dec2 2397int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
970b13ba 2398{
7608dec2 2399 int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
e2912009
PZ
2400
2401 /*
2402 * In order not to call set_task_cpu() on a blocking task we need
2403 * to rely on ttwu() to place the task on a valid ->cpus_allowed
2404 * cpu.
2405 *
2406 * Since this is common to all placement strategies, this lives here.
2407 *
2408 * [ this allows ->select_task() to simply return task_cpu(p) and
2409 * not worry about this generic constraint ]
2410 */
2411 if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
70f11205 2412 !cpu_online(cpu)))
5da9a0fb 2413 cpu = select_fallback_rq(task_cpu(p), p);
e2912009
PZ
2414
2415 return cpu;
970b13ba 2416}
09a40af5
MG
2417
2418static void update_avg(u64 *avg, u64 sample)
2419{
2420 s64 diff = sample - *avg;
2421 *avg += diff >> 3;
2422}
970b13ba
PZ
2423#endif
2424
d7c01d27 2425static void
b84cb5df 2426ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
9ed3811a 2427{
d7c01d27 2428#ifdef CONFIG_SCHEDSTATS
b84cb5df
PZ
2429 struct rq *rq = this_rq();
2430
d7c01d27
PZ
2431#ifdef CONFIG_SMP
2432 int this_cpu = smp_processor_id();
2433
2434 if (cpu == this_cpu) {
2435 schedstat_inc(rq, ttwu_local);
2436 schedstat_inc(p, se.statistics.nr_wakeups_local);
2437 } else {
2438 struct sched_domain *sd;
2439
2440 schedstat_inc(p, se.statistics.nr_wakeups_remote);
057f3fad 2441 rcu_read_lock();
d7c01d27
PZ
2442 for_each_domain(this_cpu, sd) {
2443 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
2444 schedstat_inc(sd, ttwu_wake_remote);
2445 break;
2446 }
2447 }
057f3fad 2448 rcu_read_unlock();
d7c01d27
PZ
2449 }
2450#endif /* CONFIG_SMP */
2451
2452 schedstat_inc(rq, ttwu_count);
9ed3811a 2453 schedstat_inc(p, se.statistics.nr_wakeups);
d7c01d27
PZ
2454
2455 if (wake_flags & WF_SYNC)
9ed3811a 2456 schedstat_inc(p, se.statistics.nr_wakeups_sync);
d7c01d27
PZ
2457
2458 if (cpu != task_cpu(p))
9ed3811a 2459 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
9ed3811a 2460
d7c01d27
PZ
2461#endif /* CONFIG_SCHEDSTATS */
2462}
2463
2464static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
2465{
9ed3811a 2466 activate_task(rq, p, en_flags);
fd2f4419 2467 p->on_rq = 1;
c2f7115e
PZ
2468
2469 /* if a worker is waking up, notify workqueue */
2470 if (p->flags & PF_WQ_WORKER)
2471 wq_worker_waking_up(p, cpu_of(rq));
9ed3811a
TH
2472}
2473
23f41eeb
PZ
2474/*
2475 * Mark the task runnable and perform wakeup-preemption.
2476 */
89363381 2477static void
23f41eeb 2478ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
9ed3811a 2479{
89363381 2480 trace_sched_wakeup(p, true);
9ed3811a
TH
2481 check_preempt_curr(rq, p, wake_flags);
2482
2483 p->state = TASK_RUNNING;
2484#ifdef CONFIG_SMP
2485 if (p->sched_class->task_woken)
2486 p->sched_class->task_woken(rq, p);
2487
2488 if (unlikely(rq->idle_stamp)) {
2489 u64 delta = rq->clock - rq->idle_stamp;
2490 u64 max = 2*sysctl_sched_migration_cost;
2491
2492 if (delta > max)
2493 rq->avg_idle = max;
2494 else
2495 update_avg(&rq->avg_idle, delta);
2496 rq->idle_stamp = 0;
2497 }
2498#endif
2499}
2500
c05fbafb
PZ
2501static void
2502ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
2503{
2504#ifdef CONFIG_SMP
2505 if (p->sched_contributes_to_load)
2506 rq->nr_uninterruptible--;
2507#endif
2508
2509 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
2510 ttwu_do_wakeup(rq, p, wake_flags);
2511}
2512
2513/*
2514 * Called in case the task @p isn't fully descheduled from its runqueue,
2515 * in this case we must do a remote wakeup. Its a 'light' wakeup though,
2516 * since all we need to do is flip p->state to TASK_RUNNING, since
2517 * the task is still ->on_rq.
2518 */
2519static int ttwu_remote(struct task_struct *p, int wake_flags)
2520{
2521 struct rq *rq;
2522 int ret = 0;
2523
2524 rq = __task_rq_lock(p);
2525 if (p->on_rq) {
2526 ttwu_do_wakeup(rq, p, wake_flags);
2527 ret = 1;
2528 }
2529 __task_rq_unlock(rq);
2530
2531 return ret;
2532}
2533
317f3941
PZ
2534#ifdef CONFIG_SMP
2535static void sched_ttwu_pending(void)
2536{
2537 struct rq *rq = this_rq();
2538 struct task_struct *list = xchg(&rq->wake_list, NULL);
2539
2540 if (!list)
2541 return;
2542
2543 raw_spin_lock(&rq->lock);
2544
2545 while (list) {
2546 struct task_struct *p = list;
2547 list = list->wake_entry;
2548 ttwu_do_activate(rq, p, 0);
2549 }
2550
2551 raw_spin_unlock(&rq->lock);
2552}
2553
2554void scheduler_ipi(void)
2555{
2556 sched_ttwu_pending();
2557}
2558
2559static void ttwu_queue_remote(struct task_struct *p, int cpu)
2560{
2561 struct rq *rq = cpu_rq(cpu);
2562 struct task_struct *next = rq->wake_list;
2563
2564 for (;;) {
2565 struct task_struct *old = next;
2566
2567 p->wake_entry = next;
2568 next = cmpxchg(&rq->wake_list, old, p);
2569 if (next == old)
2570 break;
2571 }
2572
2573 if (!next)
2574 smp_send_reschedule(cpu);
2575}
2576#endif
2577
c05fbafb
PZ
2578static void ttwu_queue(struct task_struct *p, int cpu)
2579{
2580 struct rq *rq = cpu_rq(cpu);
2581
17d9f311 2582#if defined(CONFIG_SMP)
317f3941
PZ
2583 if (sched_feat(TTWU_QUEUE) && cpu != smp_processor_id()) {
2584 ttwu_queue_remote(p, cpu);
2585 return;
2586 }
2587#endif
2588
c05fbafb
PZ
2589 raw_spin_lock(&rq->lock);
2590 ttwu_do_activate(rq, p, 0);
2591 raw_spin_unlock(&rq->lock);
9ed3811a
TH
2592}
2593
2594/**
1da177e4 2595 * try_to_wake_up - wake up a thread
9ed3811a 2596 * @p: the thread to be awakened
1da177e4 2597 * @state: the mask of task states that can be woken
9ed3811a 2598 * @wake_flags: wake modifier flags (WF_*)
1da177e4
LT
2599 *
2600 * Put it on the run-queue if it's not already there. The "current"
2601 * thread is always on the run-queue (except when the actual
2602 * re-schedule is in progress), and as such you're allowed to do
2603 * the simpler "current->state = TASK_RUNNING" to mark yourself
2604 * runnable without the overhead of this.
2605 *
9ed3811a
TH
2606 * Returns %true if @p was woken up, %false if it was already running
2607 * or @state didn't match @p's state.
1da177e4 2608 */
e4a52bcb
PZ
2609static int
2610try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
1da177e4 2611{
1da177e4 2612 unsigned long flags;
c05fbafb 2613 int cpu, success = 0;
2398f2c6 2614
04e2f174 2615 smp_wmb();
013fdb80 2616 raw_spin_lock_irqsave(&p->pi_lock, flags);
e9c84311 2617 if (!(p->state & state))
1da177e4
LT
2618 goto out;
2619
c05fbafb 2620 success = 1; /* we're going to change ->state */
1da177e4 2621 cpu = task_cpu(p);
1da177e4 2622
c05fbafb
PZ
2623 if (p->on_rq && ttwu_remote(p, wake_flags))
2624 goto stat;
1da177e4 2625
1da177e4 2626#ifdef CONFIG_SMP
e9c84311 2627 /*
c05fbafb
PZ
2628 * If the owning (remote) cpu is still in the middle of schedule() with
2629 * this task as prev, wait until its done referencing the task.
e9c84311 2630 */
e4a52bcb
PZ
2631 while (p->on_cpu) {
2632#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2633 /*
2634 * If called from interrupt context we could have landed in the
2635 * middle of schedule(), in this case we should take care not
2636 * to spin on ->on_cpu if p is current, since that would
2637 * deadlock.
2638 */
c05fbafb
PZ
2639 if (p == current) {
2640 ttwu_queue(p, cpu);
2641 goto stat;
2642 }
e4a52bcb
PZ
2643#endif
2644 cpu_relax();
371fd7e7 2645 }
0970d299 2646 /*
e4a52bcb 2647 * Pairs with the smp_wmb() in finish_lock_switch().
0970d299 2648 */
e4a52bcb 2649 smp_rmb();
1da177e4 2650
a8e4f2ea 2651 p->sched_contributes_to_load = !!task_contributes_to_load(p);
e9c84311 2652 p->state = TASK_WAKING;
e7693a36 2653
e4a52bcb 2654 if (p->sched_class->task_waking)
74f8e4b2 2655 p->sched_class->task_waking(p);
efbbd05a 2656
7608dec2 2657 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
c05fbafb 2658 if (task_cpu(p) != cpu)
e4a52bcb 2659 set_task_cpu(p, cpu);
1da177e4 2660#endif /* CONFIG_SMP */
1da177e4 2661
c05fbafb
PZ
2662 ttwu_queue(p, cpu);
2663stat:
b84cb5df 2664 ttwu_stat(p, cpu, wake_flags);
1da177e4 2665out:
013fdb80 2666 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4
LT
2667
2668 return success;
2669}
2670
21aa9af0
TH
2671/**
2672 * try_to_wake_up_local - try to wake up a local task with rq lock held
2673 * @p: the thread to be awakened
2674 *
2acca55e 2675 * Put @p on the run-queue if it's not already there. The caller must
21aa9af0 2676 * ensure that this_rq() is locked, @p is bound to this_rq() and not
2acca55e 2677 * the current task.
21aa9af0
TH
2678 */
2679static void try_to_wake_up_local(struct task_struct *p)
2680{
2681 struct rq *rq = task_rq(p);
21aa9af0
TH
2682
2683 BUG_ON(rq != this_rq());
2684 BUG_ON(p == current);
2685 lockdep_assert_held(&rq->lock);
2686
2acca55e
PZ
2687 if (!raw_spin_trylock(&p->pi_lock)) {
2688 raw_spin_unlock(&rq->lock);
2689 raw_spin_lock(&p->pi_lock);
2690 raw_spin_lock(&rq->lock);
2691 }
2692
21aa9af0 2693 if (!(p->state & TASK_NORMAL))
2acca55e 2694 goto out;
21aa9af0 2695
fd2f4419 2696 if (!p->on_rq)
d7c01d27
PZ
2697 ttwu_activate(rq, p, ENQUEUE_WAKEUP);
2698
23f41eeb 2699 ttwu_do_wakeup(rq, p, 0);
b84cb5df 2700 ttwu_stat(p, smp_processor_id(), 0);
2acca55e
PZ
2701out:
2702 raw_spin_unlock(&p->pi_lock);
21aa9af0
TH
2703}
2704
50fa610a
DH
2705/**
2706 * wake_up_process - Wake up a specific process
2707 * @p: The process to be woken up.
2708 *
2709 * Attempt to wake up the nominated process and move it to the set of runnable
2710 * processes. Returns 1 if the process was woken up, 0 if it was already
2711 * running.
2712 *
2713 * It may be assumed that this function implies a write memory barrier before
2714 * changing the task state if and only if any tasks are woken up.
2715 */
7ad5b3a5 2716int wake_up_process(struct task_struct *p)
1da177e4 2717{
d9514f6c 2718 return try_to_wake_up(p, TASK_ALL, 0);
1da177e4 2719}
1da177e4
LT
2720EXPORT_SYMBOL(wake_up_process);
2721
7ad5b3a5 2722int wake_up_state(struct task_struct *p, unsigned int state)
1da177e4
LT
2723{
2724 return try_to_wake_up(p, state, 0);
2725}
2726
1da177e4
LT
2727/*
2728 * Perform scheduler related setup for a newly forked process p.
2729 * p is forked by current.
dd41f596
IM
2730 *
2731 * __sched_fork() is basic setup used by init_idle() too:
2732 */
2733static void __sched_fork(struct task_struct *p)
2734{
fd2f4419
PZ
2735 p->on_rq = 0;
2736
2737 p->se.on_rq = 0;
dd41f596
IM
2738 p->se.exec_start = 0;
2739 p->se.sum_exec_runtime = 0;
f6cf891c 2740 p->se.prev_sum_exec_runtime = 0;
6c594c21 2741 p->se.nr_migrations = 0;
da7a735e 2742 p->se.vruntime = 0;
fd2f4419 2743 INIT_LIST_HEAD(&p->se.group_node);
6cfb0d5d
IM
2744
2745#ifdef CONFIG_SCHEDSTATS
41acab88 2746 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
6cfb0d5d 2747#endif
476d139c 2748
fa717060 2749 INIT_LIST_HEAD(&p->rt.run_list);
476d139c 2750
e107be36
AK
2751#ifdef CONFIG_PREEMPT_NOTIFIERS
2752 INIT_HLIST_HEAD(&p->preempt_notifiers);
2753#endif
dd41f596
IM
2754}
2755
2756/*
2757 * fork()/clone()-time setup:
2758 */
3e51e3ed 2759void sched_fork(struct task_struct *p)
dd41f596 2760{
0122ec5b 2761 unsigned long flags;
dd41f596
IM
2762 int cpu = get_cpu();
2763
2764 __sched_fork(p);
06b83b5f 2765 /*
0017d735 2766 * We mark the process as running here. This guarantees that
06b83b5f
PZ
2767 * nobody will actually run it, and a signal or other external
2768 * event cannot wake it up and insert it on the runqueue either.
2769 */
0017d735 2770 p->state = TASK_RUNNING;
dd41f596 2771
b9dc29e7
MG
2772 /*
2773 * Revert to default priority/policy on fork if requested.
2774 */
2775 if (unlikely(p->sched_reset_on_fork)) {
f83f9ac2 2776 if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) {
b9dc29e7 2777 p->policy = SCHED_NORMAL;
f83f9ac2
PW
2778 p->normal_prio = p->static_prio;
2779 }
b9dc29e7 2780
6c697bdf
MG
2781 if (PRIO_TO_NICE(p->static_prio) < 0) {
2782 p->static_prio = NICE_TO_PRIO(0);
f83f9ac2 2783 p->normal_prio = p->static_prio;
6c697bdf
MG
2784 set_load_weight(p);
2785 }
2786
b9dc29e7
MG
2787 /*
2788 * We don't need the reset flag anymore after the fork. It has
2789 * fulfilled its duty:
2790 */
2791 p->sched_reset_on_fork = 0;
2792 }
ca94c442 2793
f83f9ac2
PW
2794 /*
2795 * Make sure we do not leak PI boosting priority to the child.
2796 */
2797 p->prio = current->normal_prio;
2798
2ddbf952
HS
2799 if (!rt_prio(p->prio))
2800 p->sched_class = &fair_sched_class;
b29739f9 2801
cd29fe6f
PZ
2802 if (p->sched_class->task_fork)
2803 p->sched_class->task_fork(p);
2804
86951599
PZ
2805 /*
2806 * The child is not yet in the pid-hash so no cgroup attach races,
2807 * and the cgroup is pinned to this child due to cgroup_fork()
2808 * is ran before sched_fork().
2809 *
2810 * Silence PROVE_RCU.
2811 */
0122ec5b 2812 raw_spin_lock_irqsave(&p->pi_lock, flags);
5f3edc1b 2813 set_task_cpu(p, cpu);
0122ec5b 2814 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5f3edc1b 2815
52f17b6c 2816#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
dd41f596 2817 if (likely(sched_info_on()))
52f17b6c 2818 memset(&p->sched_info, 0, sizeof(p->sched_info));
1da177e4 2819#endif
3ca7a440
PZ
2820#if defined(CONFIG_SMP)
2821 p->on_cpu = 0;
4866cde0 2822#endif
1da177e4 2823#ifdef CONFIG_PREEMPT
4866cde0 2824 /* Want to start with kernel preemption disabled. */
a1261f54 2825 task_thread_info(p)->preempt_count = 1;
1da177e4 2826#endif
806c09a7 2827#ifdef CONFIG_SMP
917b627d 2828 plist_node_init(&p->pushable_tasks, MAX_PRIO);
806c09a7 2829#endif
917b627d 2830
476d139c 2831 put_cpu();
1da177e4
LT
2832}
2833
2834/*
2835 * wake_up_new_task - wake up a newly created task for the first time.
2836 *
2837 * This function will do some initial scheduler statistics housekeeping
2838 * that must be done for every newly created context, then puts the task
2839 * on the runqueue and wakes it.
2840 */
3e51e3ed 2841void wake_up_new_task(struct task_struct *p)
1da177e4
LT
2842{
2843 unsigned long flags;
dd41f596 2844 struct rq *rq;
fabf318e 2845
ab2515c4 2846 raw_spin_lock_irqsave(&p->pi_lock, flags);
fabf318e
PZ
2847#ifdef CONFIG_SMP
2848 /*
2849 * Fork balancing, do it here and not earlier because:
2850 * - cpus_allowed can change in the fork path
2851 * - any previously selected cpu might disappear through hotplug
fabf318e 2852 */
ab2515c4 2853 set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0));
0017d735
PZ
2854#endif
2855
ab2515c4 2856 rq = __task_rq_lock(p);
cd29fe6f 2857 activate_task(rq, p, 0);
fd2f4419 2858 p->on_rq = 1;
89363381 2859 trace_sched_wakeup_new(p, true);
a7558e01 2860 check_preempt_curr(rq, p, WF_FORK);
9a897c5a 2861#ifdef CONFIG_SMP
efbbd05a
PZ
2862 if (p->sched_class->task_woken)
2863 p->sched_class->task_woken(rq, p);
9a897c5a 2864#endif
0122ec5b 2865 task_rq_unlock(rq, p, &flags);
1da177e4
LT
2866}
2867
e107be36
AK
2868#ifdef CONFIG_PREEMPT_NOTIFIERS
2869
2870/**
80dd99b3 2871 * preempt_notifier_register - tell me when current is being preempted & rescheduled
421cee29 2872 * @notifier: notifier struct to register
e107be36
AK
2873 */
2874void preempt_notifier_register(struct preempt_notifier *notifier)
2875{
2876 hlist_add_head(&notifier->link, &current->preempt_notifiers);
2877}
2878EXPORT_SYMBOL_GPL(preempt_notifier_register);
2879
2880/**
2881 * preempt_notifier_unregister - no longer interested in preemption notifications
421cee29 2882 * @notifier: notifier struct to unregister
e107be36
AK
2883 *
2884 * This is safe to call from within a preemption notifier.
2885 */
2886void preempt_notifier_unregister(struct preempt_notifier *notifier)
2887{
2888 hlist_del(&notifier->link);
2889}
2890EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
2891
2892static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2893{
2894 struct preempt_notifier *notifier;
2895 struct hlist_node *node;
2896
2897 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
2898 notifier->ops->sched_in(notifier, raw_smp_processor_id());
2899}
2900
2901static void
2902fire_sched_out_preempt_notifiers(struct task_struct *curr,
2903 struct task_struct *next)
2904{
2905 struct preempt_notifier *notifier;
2906 struct hlist_node *node;
2907
2908 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
2909 notifier->ops->sched_out(notifier, next);
2910}
2911
6d6bc0ad 2912#else /* !CONFIG_PREEMPT_NOTIFIERS */
e107be36
AK
2913
2914static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2915{
2916}
2917
2918static void
2919fire_sched_out_preempt_notifiers(struct task_struct *curr,
2920 struct task_struct *next)
2921{
2922}
2923
6d6bc0ad 2924#endif /* CONFIG_PREEMPT_NOTIFIERS */
e107be36 2925
4866cde0
NP
2926/**
2927 * prepare_task_switch - prepare to switch tasks
2928 * @rq: the runqueue preparing to switch
421cee29 2929 * @prev: the current task that is being switched out
4866cde0
NP
2930 * @next: the task we are going to switch to.
2931 *
2932 * This is called with the rq lock held and interrupts off. It must
2933 * be paired with a subsequent finish_task_switch after the context
2934 * switch.
2935 *
2936 * prepare_task_switch sets up locking and calls architecture specific
2937 * hooks.
2938 */
e107be36
AK
2939static inline void
2940prepare_task_switch(struct rq *rq, struct task_struct *prev,
2941 struct task_struct *next)
4866cde0 2942{
fe4b04fa
PZ
2943 sched_info_switch(prev, next);
2944 perf_event_task_sched_out(prev, next);
e107be36 2945 fire_sched_out_preempt_notifiers(prev, next);
4866cde0
NP
2946 prepare_lock_switch(rq, next);
2947 prepare_arch_switch(next);
fe4b04fa 2948 trace_sched_switch(prev, next);
4866cde0
NP
2949}
2950
1da177e4
LT
2951/**
2952 * finish_task_switch - clean up after a task-switch
344babaa 2953 * @rq: runqueue associated with task-switch
1da177e4
LT
2954 * @prev: the thread we just switched away from.
2955 *
4866cde0
NP
2956 * finish_task_switch must be called after the context switch, paired
2957 * with a prepare_task_switch call before the context switch.
2958 * finish_task_switch will reconcile locking set up by prepare_task_switch,
2959 * and do any other architecture-specific cleanup actions.
1da177e4
LT
2960 *
2961 * Note that we may have delayed dropping an mm in context_switch(). If
41a2d6cf 2962 * so, we finish that here outside of the runqueue lock. (Doing it
1da177e4
LT
2963 * with the lock held can cause deadlocks; see schedule() for
2964 * details.)
2965 */
a9957449 2966static void finish_task_switch(struct rq *rq, struct task_struct *prev)
1da177e4
LT
2967 __releases(rq->lock)
2968{
1da177e4 2969 struct mm_struct *mm = rq->prev_mm;
55a101f8 2970 long prev_state;
1da177e4
LT
2971
2972 rq->prev_mm = NULL;
2973
2974 /*
2975 * A task struct has one reference for the use as "current".
c394cc9f 2976 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
55a101f8
ON
2977 * schedule one last time. The schedule call will never return, and
2978 * the scheduled task must drop that reference.
c394cc9f 2979 * The test for TASK_DEAD must occur while the runqueue locks are
1da177e4
LT
2980 * still held, otherwise prev could be scheduled on another cpu, die
2981 * there before we look at prev->state, and then the reference would
2982 * be dropped twice.
2983 * Manfred Spraul <manfred@colorfullife.com>
2984 */
55a101f8 2985 prev_state = prev->state;
4866cde0 2986 finish_arch_switch(prev);
8381f65d
JI
2987#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2988 local_irq_disable();
2989#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
49f47433 2990 perf_event_task_sched_in(current);
8381f65d
JI
2991#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2992 local_irq_enable();
2993#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
4866cde0 2994 finish_lock_switch(rq, prev);
e8fa1362 2995
e107be36 2996 fire_sched_in_preempt_notifiers(current);
1da177e4
LT
2997 if (mm)
2998 mmdrop(mm);
c394cc9f 2999 if (unlikely(prev_state == TASK_DEAD)) {
c6fd91f0 3000 /*
3001 * Remove function-return probe instances associated with this
3002 * task and put them back on the free list.
9761eea8 3003 */
c6fd91f0 3004 kprobe_flush_task(prev);
1da177e4 3005 put_task_struct(prev);
c6fd91f0 3006 }
1da177e4
LT
3007}
3008
3f029d3c
GH
3009#ifdef CONFIG_SMP
3010
3011/* assumes rq->lock is held */
3012static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
3013{
3014 if (prev->sched_class->pre_schedule)
3015 prev->sched_class->pre_schedule(rq, prev);
3016}
3017
3018/* rq->lock is NOT held, but preemption is disabled */
3019static inline void post_schedule(struct rq *rq)
3020{
3021 if (rq->post_schedule) {
3022 unsigned long flags;
3023
05fa785c 3024 raw_spin_lock_irqsave(&rq->lock, flags);
3f029d3c
GH
3025 if (rq->curr->sched_class->post_schedule)
3026 rq->curr->sched_class->post_schedule(rq);
05fa785c 3027 raw_spin_unlock_irqrestore(&rq->lock, flags);
3f029d3c
GH
3028
3029 rq->post_schedule = 0;
3030 }
3031}
3032
3033#else
da19ab51 3034
3f029d3c
GH
3035static inline void pre_schedule(struct rq *rq, struct task_struct *p)
3036{
3037}
3038
3039static inline void post_schedule(struct rq *rq)
3040{
1da177e4
LT
3041}
3042
3f029d3c
GH
3043#endif
3044
1da177e4
LT
3045/**
3046 * schedule_tail - first thing a freshly forked thread must call.
3047 * @prev: the thread we just switched away from.
3048 */
36c8b586 3049asmlinkage void schedule_tail(struct task_struct *prev)
1da177e4
LT
3050 __releases(rq->lock)
3051{
70b97a7f
IM
3052 struct rq *rq = this_rq();
3053
4866cde0 3054 finish_task_switch(rq, prev);
da19ab51 3055
3f029d3c
GH
3056 /*
3057 * FIXME: do we need to worry about rq being invalidated by the
3058 * task_switch?
3059 */
3060 post_schedule(rq);
70b97a7f 3061
4866cde0
NP
3062#ifdef __ARCH_WANT_UNLOCKED_CTXSW
3063 /* In this case, finish_task_switch does not reenable preemption */
3064 preempt_enable();
3065#endif
1da177e4 3066 if (current->set_child_tid)
b488893a 3067 put_user(task_pid_vnr(current), current->set_child_tid);
1da177e4
LT
3068}
3069
3070/*
3071 * context_switch - switch to the new MM and the new
3072 * thread's register state.
3073 */
dd41f596 3074static inline void
70b97a7f 3075context_switch(struct rq *rq, struct task_struct *prev,
36c8b586 3076 struct task_struct *next)
1da177e4 3077{
dd41f596 3078 struct mm_struct *mm, *oldmm;
1da177e4 3079
e107be36 3080 prepare_task_switch(rq, prev, next);
fe4b04fa 3081
dd41f596
IM
3082 mm = next->mm;
3083 oldmm = prev->active_mm;
9226d125
ZA
3084 /*
3085 * For paravirt, this is coupled with an exit in switch_to to
3086 * combine the page table reload and the switch backend into
3087 * one hypercall.
3088 */
224101ed 3089 arch_start_context_switch(prev);
9226d125 3090
31915ab4 3091 if (!mm) {
1da177e4
LT
3092 next->active_mm = oldmm;
3093 atomic_inc(&oldmm->mm_count);
3094 enter_lazy_tlb(oldmm, next);
3095 } else
3096 switch_mm(oldmm, mm, next);
3097
31915ab4 3098 if (!prev->mm) {
1da177e4 3099 prev->active_mm = NULL;
1da177e4
LT
3100 rq->prev_mm = oldmm;
3101 }
3a5f5e48
IM
3102 /*
3103 * Since the runqueue lock will be released by the next
3104 * task (which is an invalid locking op but in the case
3105 * of the scheduler it's an obvious special-case), so we
3106 * do an early lockdep release here:
3107 */
3108#ifndef __ARCH_WANT_UNLOCKED_CTXSW
8a25d5de 3109 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
3a5f5e48 3110#endif
1da177e4
LT
3111
3112 /* Here we just switch the register state and the stack. */
3113 switch_to(prev, next, prev);
3114
dd41f596
IM
3115 barrier();
3116 /*
3117 * this_rq must be evaluated again because prev may have moved
3118 * CPUs since it called schedule(), thus the 'rq' on its stack
3119 * frame will be invalid.
3120 */
3121 finish_task_switch(this_rq(), prev);
1da177e4
LT
3122}
3123
3124/*
3125 * nr_running, nr_uninterruptible and nr_context_switches:
3126 *
3127 * externally visible scheduler statistics: current number of runnable
3128 * threads, current number of uninterruptible-sleeping threads, total
3129 * number of context switches performed since bootup.
3130 */
3131unsigned long nr_running(void)
3132{
3133 unsigned long i, sum = 0;
3134
3135 for_each_online_cpu(i)
3136 sum += cpu_rq(i)->nr_running;
3137
3138 return sum;
f711f609 3139}
1da177e4
LT
3140
3141unsigned long nr_uninterruptible(void)
f711f609 3142{
1da177e4 3143 unsigned long i, sum = 0;
f711f609 3144
0a945022 3145 for_each_possible_cpu(i)
1da177e4 3146 sum += cpu_rq(i)->nr_uninterruptible;
f711f609
GS
3147
3148 /*
1da177e4
LT
3149 * Since we read the counters lockless, it might be slightly
3150 * inaccurate. Do not allow it to go below zero though:
f711f609 3151 */
1da177e4
LT
3152 if (unlikely((long)sum < 0))
3153 sum = 0;
f711f609 3154
1da177e4 3155 return sum;
f711f609 3156}
f711f609 3157
1da177e4 3158unsigned long long nr_context_switches(void)
46cb4b7c 3159{
cc94abfc
SR
3160 int i;
3161 unsigned long long sum = 0;
46cb4b7c 3162
0a945022 3163 for_each_possible_cpu(i)
1da177e4 3164 sum += cpu_rq(i)->nr_switches;
46cb4b7c 3165
1da177e4
LT
3166 return sum;
3167}
483b4ee6 3168
1da177e4
LT
3169unsigned long nr_iowait(void)
3170{
3171 unsigned long i, sum = 0;
483b4ee6 3172
0a945022 3173 for_each_possible_cpu(i)
1da177e4 3174 sum += atomic_read(&cpu_rq(i)->nr_iowait);
46cb4b7c 3175
1da177e4
LT
3176 return sum;
3177}
483b4ee6 3178
8c215bd3 3179unsigned long nr_iowait_cpu(int cpu)
69d25870 3180{
8c215bd3 3181 struct rq *this = cpu_rq(cpu);
69d25870
AV
3182 return atomic_read(&this->nr_iowait);
3183}
46cb4b7c 3184
69d25870
AV
3185unsigned long this_cpu_load(void)
3186{
3187 struct rq *this = this_rq();
3188 return this->cpu_load[0];
3189}
e790fb0b 3190
46cb4b7c 3191
dce48a84
TG
3192/* Variables and functions for calc_load */
3193static atomic_long_t calc_load_tasks;
3194static unsigned long calc_load_update;
3195unsigned long avenrun[3];
3196EXPORT_SYMBOL(avenrun);
46cb4b7c 3197
74f5187a
PZ
3198static long calc_load_fold_active(struct rq *this_rq)
3199{
3200 long nr_active, delta = 0;
3201
3202 nr_active = this_rq->nr_running;
3203 nr_active += (long) this_rq->nr_uninterruptible;
3204
3205 if (nr_active != this_rq->calc_load_active) {
3206 delta = nr_active - this_rq->calc_load_active;
3207 this_rq->calc_load_active = nr_active;
3208 }
3209
3210 return delta;
3211}
3212
0f004f5a
PZ
3213static unsigned long
3214calc_load(unsigned long load, unsigned long exp, unsigned long active)
3215{
3216 load *= exp;
3217 load += active * (FIXED_1 - exp);
3218 load += 1UL << (FSHIFT - 1);
3219 return load >> FSHIFT;
3220}
3221
74f5187a
PZ
3222#ifdef CONFIG_NO_HZ
3223/*
3224 * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
3225 *
3226 * When making the ILB scale, we should try to pull this in as well.
3227 */
3228static atomic_long_t calc_load_tasks_idle;
3229
3230static void calc_load_account_idle(struct rq *this_rq)
3231{
3232 long delta;
3233
3234 delta = calc_load_fold_active(this_rq);
3235 if (delta)
3236 atomic_long_add(delta, &calc_load_tasks_idle);
3237}
3238
3239static long calc_load_fold_idle(void)
3240{
3241 long delta = 0;
3242
3243 /*
3244 * Its got a race, we don't care...
3245 */
3246 if (atomic_long_read(&calc_load_tasks_idle))
3247 delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
3248
3249 return delta;
3250}
0f004f5a
PZ
3251
3252/**
3253 * fixed_power_int - compute: x^n, in O(log n) time
3254 *
3255 * @x: base of the power
3256 * @frac_bits: fractional bits of @x
3257 * @n: power to raise @x to.
3258 *
3259 * By exploiting the relation between the definition of the natural power
3260 * function: x^n := x*x*...*x (x multiplied by itself for n times), and
3261 * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
3262 * (where: n_i \elem {0, 1}, the binary vector representing n),
3263 * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
3264 * of course trivially computable in O(log_2 n), the length of our binary
3265 * vector.
3266 */
3267static unsigned long
3268fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
3269{
3270 unsigned long result = 1UL << frac_bits;
3271
3272 if (n) for (;;) {
3273 if (n & 1) {
3274 result *= x;
3275 result += 1UL << (frac_bits - 1);
3276 result >>= frac_bits;
3277 }
3278 n >>= 1;
3279 if (!n)
3280 break;
3281 x *= x;
3282 x += 1UL << (frac_bits - 1);
3283 x >>= frac_bits;
3284 }
3285
3286 return result;
3287}
3288
3289/*
3290 * a1 = a0 * e + a * (1 - e)
3291 *
3292 * a2 = a1 * e + a * (1 - e)
3293 * = (a0 * e + a * (1 - e)) * e + a * (1 - e)
3294 * = a0 * e^2 + a * (1 - e) * (1 + e)
3295 *
3296 * a3 = a2 * e + a * (1 - e)
3297 * = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
3298 * = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
3299 *
3300 * ...
3301 *
3302 * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
3303 * = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
3304 * = a0 * e^n + a * (1 - e^n)
3305 *
3306 * [1] application of the geometric series:
3307 *
3308 * n 1 - x^(n+1)
3309 * S_n := \Sum x^i = -------------
3310 * i=0 1 - x
3311 */
3312static unsigned long
3313calc_load_n(unsigned long load, unsigned long exp,
3314 unsigned long active, unsigned int n)
3315{
3316
3317 return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
3318}
3319
3320/*
3321 * NO_HZ can leave us missing all per-cpu ticks calling
3322 * calc_load_account_active(), but since an idle CPU folds its delta into
3323 * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold
3324 * in the pending idle delta if our idle period crossed a load cycle boundary.
3325 *
3326 * Once we've updated the global active value, we need to apply the exponential
3327 * weights adjusted to the number of cycles missed.
3328 */
3329static void calc_global_nohz(unsigned long ticks)
3330{
3331 long delta, active, n;
3332
3333 if (time_before(jiffies, calc_load_update))
3334 return;
3335
3336 /*
3337 * If we crossed a calc_load_update boundary, make sure to fold
3338 * any pending idle changes, the respective CPUs might have
3339 * missed the tick driven calc_load_account_active() update
3340 * due to NO_HZ.
3341 */
3342 delta = calc_load_fold_idle();
3343 if (delta)
3344 atomic_long_add(delta, &calc_load_tasks);
3345
3346 /*
3347 * If we were idle for multiple load cycles, apply them.
3348 */
3349 if (ticks >= LOAD_FREQ) {
3350 n = ticks / LOAD_FREQ;
3351
3352 active = atomic_long_read(&calc_load_tasks);
3353 active = active > 0 ? active * FIXED_1 : 0;
3354
3355 avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
3356 avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
3357 avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
3358
3359 calc_load_update += n * LOAD_FREQ;
3360 }
3361
3362 /*
3363 * Its possible the remainder of the above division also crosses
3364 * a LOAD_FREQ period, the regular check in calc_global_load()
3365 * which comes after this will take care of that.
3366 *
3367 * Consider us being 11 ticks before a cycle completion, and us
3368 * sleeping for 4*LOAD_FREQ + 22 ticks, then the above code will
3369 * age us 4 cycles, and the test in calc_global_load() will
3370 * pick up the final one.
3371 */
3372}
74f5187a
PZ
3373#else
3374static void calc_load_account_idle(struct rq *this_rq)
3375{
3376}
3377
3378static inline long calc_load_fold_idle(void)
3379{
3380 return 0;
3381}
0f004f5a
PZ
3382
3383static void calc_global_nohz(unsigned long ticks)
3384{
3385}
74f5187a
PZ
3386#endif
3387
2d02494f
TG
3388/**
3389 * get_avenrun - get the load average array
3390 * @loads: pointer to dest load array
3391 * @offset: offset to add
3392 * @shift: shift count to shift the result left
3393 *
3394 * These values are estimates at best, so no need for locking.
3395 */
3396void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
3397{
3398 loads[0] = (avenrun[0] + offset) << shift;
3399 loads[1] = (avenrun[1] + offset) << shift;
3400 loads[2] = (avenrun[2] + offset) << shift;
46cb4b7c 3401}
46cb4b7c 3402
46cb4b7c 3403/*
dce48a84
TG
3404 * calc_load - update the avenrun load estimates 10 ticks after the
3405 * CPUs have updated calc_load_tasks.
7835b98b 3406 */
0f004f5a 3407void calc_global_load(unsigned long ticks)
7835b98b 3408{
dce48a84 3409 long active;
1da177e4 3410
0f004f5a
PZ
3411 calc_global_nohz(ticks);
3412
3413 if (time_before(jiffies, calc_load_update + 10))
dce48a84 3414 return;
1da177e4 3415
dce48a84
TG
3416 active = atomic_long_read(&calc_load_tasks);
3417 active = active > 0 ? active * FIXED_1 : 0;
1da177e4 3418
dce48a84
TG
3419 avenrun[0] = calc_load(avenrun[0], EXP_1, active);
3420 avenrun[1] = calc_load(avenrun[1], EXP_5, active);
3421 avenrun[2] = calc_load(avenrun[2], EXP_15, active);
dd41f596 3422
dce48a84
TG
3423 calc_load_update += LOAD_FREQ;
3424}
1da177e4 3425
dce48a84 3426/*
74f5187a
PZ
3427 * Called from update_cpu_load() to periodically update this CPU's
3428 * active count.
dce48a84
TG
3429 */
3430static void calc_load_account_active(struct rq *this_rq)
3431{
74f5187a 3432 long delta;
08c183f3 3433
74f5187a
PZ
3434 if (time_before(jiffies, this_rq->calc_load_update))
3435 return;
783609c6 3436
74f5187a
PZ
3437 delta = calc_load_fold_active(this_rq);
3438 delta += calc_load_fold_idle();
3439 if (delta)
dce48a84 3440 atomic_long_add(delta, &calc_load_tasks);
74f5187a
PZ
3441
3442 this_rq->calc_load_update += LOAD_FREQ;
46cb4b7c
SS
3443}
3444
fdf3e95d
VP
3445/*
3446 * The exact cpuload at various idx values, calculated at every tick would be
3447 * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
3448 *
3449 * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called
3450 * on nth tick when cpu may be busy, then we have:
3451 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
3452 * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load
3453 *
3454 * decay_load_missed() below does efficient calculation of
3455 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
3456 * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load
3457 *
3458 * The calculation is approximated on a 128 point scale.
3459 * degrade_zero_ticks is the number of ticks after which load at any
3460 * particular idx is approximated to be zero.
3461 * degrade_factor is a precomputed table, a row for each load idx.
3462 * Each column corresponds to degradation factor for a power of two ticks,
3463 * based on 128 point scale.
3464 * Example:
3465 * row 2, col 3 (=12) says that the degradation at load idx 2 after
3466 * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8).
3467 *
3468 * With this power of 2 load factors, we can degrade the load n times
3469 * by looking at 1 bits in n and doing as many mult/shift instead of
3470 * n mult/shifts needed by the exact degradation.
3471 */
3472#define DEGRADE_SHIFT 7
3473static const unsigned char
3474 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
3475static const unsigned char
3476 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
3477 {0, 0, 0, 0, 0, 0, 0, 0},
3478 {64, 32, 8, 0, 0, 0, 0, 0},
3479 {96, 72, 40, 12, 1, 0, 0},
3480 {112, 98, 75, 43, 15, 1, 0},
3481 {120, 112, 98, 76, 45, 16, 2} };
3482
3483/*
3484 * Update cpu_load for any missed ticks, due to tickless idle. The backlog
3485 * would be when CPU is idle and so we just decay the old load without
3486 * adding any new load.
3487 */
3488static unsigned long
3489decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
3490{
3491 int j = 0;
3492
3493 if (!missed_updates)
3494 return load;
3495
3496 if (missed_updates >= degrade_zero_ticks[idx])
3497 return 0;
3498
3499 if (idx == 1)
3500 return load >> missed_updates;
3501
3502 while (missed_updates) {
3503 if (missed_updates % 2)
3504 load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
3505
3506 missed_updates >>= 1;
3507 j++;
3508 }
3509 return load;
3510}
3511
46cb4b7c 3512/*
dd41f596 3513 * Update rq->cpu_load[] statistics. This function is usually called every
fdf3e95d
VP
3514 * scheduler tick (TICK_NSEC). With tickless idle this will not be called
3515 * every tick. We fix it up based on jiffies.
46cb4b7c 3516 */
dd41f596 3517static void update_cpu_load(struct rq *this_rq)
46cb4b7c 3518{
495eca49 3519 unsigned long this_load = this_rq->load.weight;
fdf3e95d
VP
3520 unsigned long curr_jiffies = jiffies;
3521 unsigned long pending_updates;
dd41f596 3522 int i, scale;
46cb4b7c 3523
dd41f596 3524 this_rq->nr_load_updates++;
46cb4b7c 3525
fdf3e95d
VP
3526 /* Avoid repeated calls on same jiffy, when moving in and out of idle */
3527 if (curr_jiffies == this_rq->last_load_update_tick)
3528 return;
3529
3530 pending_updates = curr_jiffies - this_rq->last_load_update_tick;
3531 this_rq->last_load_update_tick = curr_jiffies;
3532
dd41f596 3533 /* Update our load: */
fdf3e95d
VP
3534 this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
3535 for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
dd41f596 3536 unsigned long old_load, new_load;
7d1e6a9b 3537
dd41f596 3538 /* scale is effectively 1 << i now, and >> i divides by scale */
46cb4b7c 3539
dd41f596 3540 old_load = this_rq->cpu_load[i];
fdf3e95d 3541 old_load = decay_load_missed(old_load, pending_updates - 1, i);
dd41f596 3542 new_load = this_load;
a25707f3
IM
3543 /*
3544 * Round up the averaging division if load is increasing. This
3545 * prevents us from getting stuck on 9 if the load is 10, for
3546 * example.
3547 */
3548 if (new_load > old_load)
fdf3e95d
VP
3549 new_load += scale - 1;
3550
3551 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
dd41f596 3552 }
da2b71ed
SS
3553
3554 sched_avg_update(this_rq);
fdf3e95d
VP
3555}
3556
3557static void update_cpu_load_active(struct rq *this_rq)
3558{
3559 update_cpu_load(this_rq);
46cb4b7c 3560
74f5187a 3561 calc_load_account_active(this_rq);
46cb4b7c
SS
3562}
3563
dd41f596 3564#ifdef CONFIG_SMP
8a0be9ef 3565
46cb4b7c 3566/*
38022906
PZ
3567 * sched_exec - execve() is a valuable balancing opportunity, because at
3568 * this point the task has the smallest effective memory and cache footprint.
46cb4b7c 3569 */
38022906 3570void sched_exec(void)
46cb4b7c 3571{
38022906 3572 struct task_struct *p = current;
1da177e4 3573 unsigned long flags;
0017d735 3574 int dest_cpu;
46cb4b7c 3575
8f42ced9 3576 raw_spin_lock_irqsave(&p->pi_lock, flags);
7608dec2 3577 dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0);
0017d735
PZ
3578 if (dest_cpu == smp_processor_id())
3579 goto unlock;
38022906 3580
8f42ced9 3581 if (likely(cpu_active(dest_cpu))) {
969c7921 3582 struct migration_arg arg = { p, dest_cpu };
46cb4b7c 3583
8f42ced9
PZ
3584 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
3585 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
1da177e4
LT
3586 return;
3587 }
0017d735 3588unlock:
8f42ced9 3589 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4 3590}
dd41f596 3591
1da177e4
LT
3592#endif
3593
1da177e4
LT
3594DEFINE_PER_CPU(struct kernel_stat, kstat);
3595
3596EXPORT_PER_CPU_SYMBOL(kstat);
3597
3598/*
c5f8d995 3599 * Return any ns on the sched_clock that have not yet been accounted in
f06febc9 3600 * @p in case that task is currently running.
c5f8d995
HS
3601 *
3602 * Called with task_rq_lock() held on @rq.
1da177e4 3603 */
c5f8d995
HS
3604static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
3605{
3606 u64 ns = 0;
3607
3608 if (task_current(rq, p)) {
3609 update_rq_clock(rq);
305e6835 3610 ns = rq->clock_task - p->se.exec_start;
c5f8d995
HS
3611 if ((s64)ns < 0)
3612 ns = 0;
3613 }
3614
3615 return ns;
3616}
3617
bb34d92f 3618unsigned long long task_delta_exec(struct task_struct *p)
1da177e4 3619{
1da177e4 3620 unsigned long flags;
41b86e9c 3621 struct rq *rq;
bb34d92f 3622 u64 ns = 0;
48f24c4d 3623
41b86e9c 3624 rq = task_rq_lock(p, &flags);
c5f8d995 3625 ns = do_task_delta_exec(p, rq);
0122ec5b 3626 task_rq_unlock(rq, p, &flags);
1508487e 3627
c5f8d995
HS
3628 return ns;
3629}
f06febc9 3630
c5f8d995
HS
3631/*
3632 * Return accounted runtime for the task.
3633 * In case the task is currently running, return the runtime plus current's
3634 * pending runtime that have not been accounted yet.
3635 */
3636unsigned long long task_sched_runtime(struct task_struct *p)
3637{
3638 unsigned long flags;
3639 struct rq *rq;
3640 u64 ns = 0;
3641
3642 rq = task_rq_lock(p, &flags);
3643 ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
0122ec5b 3644 task_rq_unlock(rq, p, &flags);
c5f8d995
HS
3645
3646 return ns;
3647}
48f24c4d 3648
c5f8d995
HS
3649/*
3650 * Return sum_exec_runtime for the thread group.
3651 * In case the task is currently running, return the sum plus current's
3652 * pending runtime that have not been accounted yet.
3653 *
3654 * Note that the thread group might have other running tasks as well,
3655 * so the return value not includes other pending runtime that other
3656 * running tasks might have.
3657 */
3658unsigned long long thread_group_sched_runtime(struct task_struct *p)
3659{
3660 struct task_cputime totals;
3661 unsigned long flags;
3662 struct rq *rq;
3663 u64 ns;
3664
3665 rq = task_rq_lock(p, &flags);
3666 thread_group_cputime(p, &totals);
3667 ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
0122ec5b 3668 task_rq_unlock(rq, p, &flags);
48f24c4d 3669
1da177e4
LT
3670 return ns;
3671}
3672
1da177e4
LT
3673/*
3674 * Account user cpu time to a process.
3675 * @p: the process that the cpu time gets accounted to
1da177e4 3676 * @cputime: the cpu time spent in user space since the last update
457533a7 3677 * @cputime_scaled: cputime scaled by cpu frequency
1da177e4 3678 */
457533a7
MS
3679void account_user_time(struct task_struct *p, cputime_t cputime,
3680 cputime_t cputime_scaled)
1da177e4
LT
3681{
3682 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3683 cputime64_t tmp;
3684
457533a7 3685 /* Add user time to process. */
1da177e4 3686 p->utime = cputime_add(p->utime, cputime);
457533a7 3687 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
f06febc9 3688 account_group_user_time(p, cputime);
1da177e4
LT
3689
3690 /* Add user time to cpustat. */
3691 tmp = cputime_to_cputime64(cputime);
3692 if (TASK_NICE(p) > 0)
3693 cpustat->nice = cputime64_add(cpustat->nice, tmp);
3694 else
3695 cpustat->user = cputime64_add(cpustat->user, tmp);
ef12fefa
BR
3696
3697 cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime);
49b5cf34
JL
3698 /* Account for user time used */
3699 acct_update_integrals(p);
1da177e4
LT
3700}
3701
94886b84
LV
3702/*
3703 * Account guest cpu time to a process.
3704 * @p: the process that the cpu time gets accounted to
3705 * @cputime: the cpu time spent in virtual machine since the last update
457533a7 3706 * @cputime_scaled: cputime scaled by cpu frequency
94886b84 3707 */
457533a7
MS
3708static void account_guest_time(struct task_struct *p, cputime_t cputime,
3709 cputime_t cputime_scaled)
94886b84
LV
3710{
3711 cputime64_t tmp;
3712 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3713
3714 tmp = cputime_to_cputime64(cputime);
3715
457533a7 3716 /* Add guest time to process. */
94886b84 3717 p->utime = cputime_add(p->utime, cputime);
457533a7 3718 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
f06febc9 3719 account_group_user_time(p, cputime);
94886b84
LV
3720 p->gtime = cputime_add(p->gtime, cputime);
3721
457533a7 3722 /* Add guest time to cpustat. */
ce0e7b28
RO
3723 if (TASK_NICE(p) > 0) {
3724 cpustat->nice = cputime64_add(cpustat->nice, tmp);
3725 cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp);
3726 } else {
3727 cpustat->user = cputime64_add(cpustat->user, tmp);
3728 cpustat->guest = cputime64_add(cpustat->guest, tmp);
3729 }
94886b84
LV
3730}
3731
70a89a66
VP
3732/*
3733 * Account system cpu time to a process and desired cpustat field
3734 * @p: the process that the cpu time gets accounted to
3735 * @cputime: the cpu time spent in kernel space since the last update
3736 * @cputime_scaled: cputime scaled by cpu frequency
3737 * @target_cputime64: pointer to cpustat field that has to be updated
3738 */
3739static inline
3740void __account_system_time(struct task_struct *p, cputime_t cputime,
3741 cputime_t cputime_scaled, cputime64_t *target_cputime64)
3742{
3743 cputime64_t tmp = cputime_to_cputime64(cputime);
3744
3745 /* Add system time to process. */
3746 p->stime = cputime_add(p->stime, cputime);
3747 p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
3748 account_group_system_time(p, cputime);
3749
3750 /* Add system time to cpustat. */
3751 *target_cputime64 = cputime64_add(*target_cputime64, tmp);
3752 cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
3753
3754 /* Account for system time used */
3755 acct_update_integrals(p);
3756}
3757
1da177e4
LT
3758/*
3759 * Account system cpu time to a process.
3760 * @p: the process that the cpu time gets accounted to
3761 * @hardirq_offset: the offset to subtract from hardirq_count()
3762 * @cputime: the cpu time spent in kernel space since the last update
457533a7 3763 * @cputime_scaled: cputime scaled by cpu frequency
1da177e4
LT
3764 */
3765void account_system_time(struct task_struct *p, int hardirq_offset,
457533a7 3766 cputime_t cputime, cputime_t cputime_scaled)
1da177e4
LT
3767{
3768 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
70a89a66 3769 cputime64_t *target_cputime64;
1da177e4 3770
983ed7a6 3771 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
457533a7 3772 account_guest_time(p, cputime, cputime_scaled);
983ed7a6
HH
3773 return;
3774 }
94886b84 3775
1da177e4 3776 if (hardirq_count() - hardirq_offset)
70a89a66 3777 target_cputime64 = &cpustat->irq;
75e1056f 3778 else if (in_serving_softirq())
70a89a66 3779 target_cputime64 = &cpustat->softirq;
1da177e4 3780 else
70a89a66 3781 target_cputime64 = &cpustat->system;
ef12fefa 3782
70a89a66 3783 __account_system_time(p, cputime, cputime_scaled, target_cputime64);
1da177e4
LT
3784}
3785
c66f08be 3786/*
1da177e4 3787 * Account for involuntary wait time.
544b4a1f 3788 * @cputime: the cpu time spent in involuntary wait
c66f08be 3789 */
79741dd3 3790void account_steal_time(cputime_t cputime)
c66f08be 3791{
79741dd3
MS
3792 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3793 cputime64_t cputime64 = cputime_to_cputime64(cputime);
3794
3795 cpustat->steal = cputime64_add(cpustat->steal, cputime64);
c66f08be
MN
3796}
3797
1da177e4 3798/*
79741dd3
MS
3799 * Account for idle time.
3800 * @cputime: the cpu time spent in idle wait
1da177e4 3801 */
79741dd3 3802void account_idle_time(cputime_t cputime)
1da177e4
LT
3803{
3804 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
79741dd3 3805 cputime64_t cputime64 = cputime_to_cputime64(cputime);
70b97a7f 3806 struct rq *rq = this_rq();
1da177e4 3807
79741dd3
MS
3808 if (atomic_read(&rq->nr_iowait) > 0)
3809 cpustat->iowait = cputime64_add(cpustat->iowait, cputime64);
3810 else
3811 cpustat->idle = cputime64_add(cpustat->idle, cputime64);
1da177e4
LT
3812}
3813
79741dd3
MS
3814#ifndef CONFIG_VIRT_CPU_ACCOUNTING
3815
abb74cef
VP
3816#ifdef CONFIG_IRQ_TIME_ACCOUNTING
3817/*
3818 * Account a tick to a process and cpustat
3819 * @p: the process that the cpu time gets accounted to
3820 * @user_tick: is the tick from userspace
3821 * @rq: the pointer to rq
3822 *
3823 * Tick demultiplexing follows the order
3824 * - pending hardirq update
3825 * - pending softirq update
3826 * - user_time
3827 * - idle_time
3828 * - system time
3829 * - check for guest_time
3830 * - else account as system_time
3831 *
3832 * Check for hardirq is done both for system and user time as there is
3833 * no timer going off while we are on hardirq and hence we may never get an
3834 * opportunity to update it solely in system time.
3835 * p->stime and friends are only updated on system time and not on irq
3836 * softirq as those do not count in task exec_runtime any more.
3837 */
3838static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
3839 struct rq *rq)
3840{
3841 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
3842 cputime64_t tmp = cputime_to_cputime64(cputime_one_jiffy);
3843 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3844
3845 if (irqtime_account_hi_update()) {
3846 cpustat->irq = cputime64_add(cpustat->irq, tmp);
3847 } else if (irqtime_account_si_update()) {
3848 cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
414bee9b
VP
3849 } else if (this_cpu_ksoftirqd() == p) {
3850 /*
3851 * ksoftirqd time do not get accounted in cpu_softirq_time.
3852 * So, we have to handle it separately here.
3853 * Also, p->stime needs to be updated for ksoftirqd.
3854 */
3855 __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
3856 &cpustat->softirq);
abb74cef
VP
3857 } else if (user_tick) {
3858 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
3859 } else if (p == rq->idle) {
3860 account_idle_time(cputime_one_jiffy);
3861 } else if (p->flags & PF_VCPU) { /* System time or guest time */
3862 account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
3863 } else {
3864 __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
3865 &cpustat->system);
3866 }
3867}
3868
3869static void irqtime_account_idle_ticks(int ticks)
3870{
3871 int i;
3872 struct rq *rq = this_rq();
3873
3874 for (i = 0; i < ticks; i++)
3875 irqtime_account_process_tick(current, 0, rq);
3876}
544b4a1f 3877#else /* CONFIG_IRQ_TIME_ACCOUNTING */
abb74cef
VP
3878static void irqtime_account_idle_ticks(int ticks) {}
3879static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
3880 struct rq *rq) {}
544b4a1f 3881#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
79741dd3
MS
3882
3883/*
3884 * Account a single tick of cpu time.
3885 * @p: the process that the cpu time gets accounted to
3886 * @user_tick: indicates if the tick is a user or a system tick
3887 */
3888void account_process_tick(struct task_struct *p, int user_tick)
3889{
a42548a1 3890 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
79741dd3
MS
3891 struct rq *rq = this_rq();
3892
abb74cef
VP
3893 if (sched_clock_irqtime) {
3894 irqtime_account_process_tick(p, user_tick, rq);
3895 return;
3896 }
3897
79741dd3 3898 if (user_tick)
a42548a1 3899 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
f5f293a4 3900 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
a42548a1 3901 account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
79741dd3
MS
3902 one_jiffy_scaled);
3903 else
a42548a1 3904 account_idle_time(cputime_one_jiffy);
79741dd3
MS
3905}
3906
3907/*
3908 * Account multiple ticks of steal time.
3909 * @p: the process from which the cpu time has been stolen
3910 * @ticks: number of stolen ticks
3911 */
3912void account_steal_ticks(unsigned long ticks)
3913{
3914 account_steal_time(jiffies_to_cputime(ticks));
3915}
3916
3917/*
3918 * Account multiple ticks of idle time.
3919 * @ticks: number of stolen ticks
3920 */
3921void account_idle_ticks(unsigned long ticks)
3922{
abb74cef
VP
3923
3924 if (sched_clock_irqtime) {
3925 irqtime_account_idle_ticks(ticks);
3926 return;
3927 }
3928
79741dd3 3929 account_idle_time(jiffies_to_cputime(ticks));
1da177e4
LT
3930}
3931
79741dd3
MS
3932#endif
3933
49048622
BS
3934/*
3935 * Use precise platform statistics if available:
3936 */
3937#ifdef CONFIG_VIRT_CPU_ACCOUNTING
d180c5bc 3938void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
49048622 3939{
d99ca3b9
HS
3940 *ut = p->utime;
3941 *st = p->stime;
49048622
BS
3942}
3943
0cf55e1e 3944void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
49048622 3945{
0cf55e1e
HS
3946 struct task_cputime cputime;
3947
3948 thread_group_cputime(p, &cputime);
3949
3950 *ut = cputime.utime;
3951 *st = cputime.stime;
49048622
BS
3952}
3953#else
761b1d26
HS
3954
3955#ifndef nsecs_to_cputime
b7b20df9 3956# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
761b1d26
HS
3957#endif
3958
d180c5bc 3959void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
49048622 3960{
d99ca3b9 3961 cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime);
49048622
BS
3962
3963 /*
3964 * Use CFS's precise accounting:
3965 */
d180c5bc 3966 rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
49048622
BS
3967
3968 if (total) {
e75e863d 3969 u64 temp = rtime;
d180c5bc 3970
e75e863d 3971 temp *= utime;
49048622 3972 do_div(temp, total);
d180c5bc
HS
3973 utime = (cputime_t)temp;
3974 } else
3975 utime = rtime;
49048622 3976
d180c5bc
HS
3977 /*
3978 * Compare with previous values, to keep monotonicity:
3979 */
761b1d26 3980 p->prev_utime = max(p->prev_utime, utime);
d99ca3b9 3981 p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime));
49048622 3982
d99ca3b9
HS
3983 *ut = p->prev_utime;
3984 *st = p->prev_stime;
49048622
BS
3985}
3986
0cf55e1e
HS
3987/*
3988 * Must be called with siglock held.
3989 */
3990void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
49048622 3991{
0cf55e1e
HS
3992 struct signal_struct *sig = p->signal;
3993 struct task_cputime cputime;
3994 cputime_t rtime, utime, total;
49048622 3995
0cf55e1e 3996 thread_group_cputime(p, &cputime);
49048622 3997
0cf55e1e
HS
3998 total = cputime_add(cputime.utime, cputime.stime);
3999 rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
49048622 4000
0cf55e1e 4001 if (total) {
e75e863d 4002 u64 temp = rtime;
49048622 4003
e75e863d 4004 temp *= cputime.utime;
0cf55e1e
HS
4005 do_div(temp, total);
4006 utime = (cputime_t)temp;
4007 } else
4008 utime = rtime;
4009
4010 sig->prev_utime = max(sig->prev_utime, utime);
4011 sig->prev_stime = max(sig->prev_stime,
4012 cputime_sub(rtime, sig->prev_utime));
4013
4014 *ut = sig->prev_utime;
4015 *st = sig->prev_stime;
49048622 4016}
49048622 4017#endif
49048622 4018
7835b98b
CL
4019/*
4020 * This function gets called by the timer code, with HZ frequency.
4021 * We call it with interrupts disabled.
7835b98b
CL
4022 */
4023void scheduler_tick(void)
4024{
7835b98b
CL
4025 int cpu = smp_processor_id();
4026 struct rq *rq = cpu_rq(cpu);
dd41f596 4027 struct task_struct *curr = rq->curr;
3e51f33f
PZ
4028
4029 sched_clock_tick();
dd41f596 4030
05fa785c 4031 raw_spin_lock(&rq->lock);
3e51f33f 4032 update_rq_clock(rq);
fdf3e95d 4033 update_cpu_load_active(rq);
fa85ae24 4034 curr->sched_class->task_tick(rq, curr, 0);
05fa785c 4035 raw_spin_unlock(&rq->lock);
7835b98b 4036
e9d2b064 4037 perf_event_task_tick();
e220d2dc 4038
e418e1c2 4039#ifdef CONFIG_SMP
dd41f596
IM
4040 rq->idle_at_tick = idle_cpu(cpu);
4041 trigger_load_balance(rq, cpu);
e418e1c2 4042#endif
1da177e4
LT
4043}
4044
132380a0 4045notrace unsigned long get_parent_ip(unsigned long addr)
6cd8a4bb
SR
4046{
4047 if (in_lock_functions(addr)) {
4048 addr = CALLER_ADDR2;
4049 if (in_lock_functions(addr))
4050 addr = CALLER_ADDR3;
4051 }
4052 return addr;
4053}
1da177e4 4054
7e49fcce
SR
4055#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
4056 defined(CONFIG_PREEMPT_TRACER))
4057
43627582 4058void __kprobes add_preempt_count(int val)
1da177e4 4059{
6cd8a4bb 4060#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
4061 /*
4062 * Underflow?
4063 */
9a11b49a
IM
4064 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
4065 return;
6cd8a4bb 4066#endif
1da177e4 4067 preempt_count() += val;
6cd8a4bb 4068#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
4069 /*
4070 * Spinlock count overflowing soon?
4071 */
33859f7f
MOS
4072 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
4073 PREEMPT_MASK - 10);
6cd8a4bb
SR
4074#endif
4075 if (preempt_count() == val)
4076 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
1da177e4
LT
4077}
4078EXPORT_SYMBOL(add_preempt_count);
4079
43627582 4080void __kprobes sub_preempt_count(int val)
1da177e4 4081{
6cd8a4bb 4082#ifdef CONFIG_DEBUG_PREEMPT
1da177e4
LT
4083 /*
4084 * Underflow?
4085 */
01e3eb82 4086 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
9a11b49a 4087 return;
1da177e4
LT
4088 /*
4089 * Is the spinlock portion underflowing?
4090 */
9a11b49a
IM
4091 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
4092 !(preempt_count() & PREEMPT_MASK)))
4093 return;
6cd8a4bb 4094#endif
9a11b49a 4095
6cd8a4bb
SR
4096 if (preempt_count() == val)
4097 trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
1da177e4
LT
4098 preempt_count() -= val;
4099}
4100EXPORT_SYMBOL(sub_preempt_count);
4101
4102#endif
4103
4104/*
dd41f596 4105 * Print scheduling while atomic bug:
1da177e4 4106 */
dd41f596 4107static noinline void __schedule_bug(struct task_struct *prev)
1da177e4 4108{
838225b4
SS
4109 struct pt_regs *regs = get_irq_regs();
4110
3df0fc5b
PZ
4111 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
4112 prev->comm, prev->pid, preempt_count());
838225b4 4113
dd41f596 4114 debug_show_held_locks(prev);
e21f5b15 4115 print_modules();
dd41f596
IM
4116 if (irqs_disabled())
4117 print_irqtrace_events(prev);
838225b4
SS
4118
4119 if (regs)
4120 show_regs(regs);
4121 else
4122 dump_stack();
dd41f596 4123}
1da177e4 4124
dd41f596
IM
4125/*
4126 * Various schedule()-time debugging checks and statistics:
4127 */
4128static inline void schedule_debug(struct task_struct *prev)
4129{
1da177e4 4130 /*
41a2d6cf 4131 * Test if we are atomic. Since do_exit() needs to call into
1da177e4
LT
4132 * schedule() atomically, we ignore that path for now.
4133 * Otherwise, whine if we are scheduling when we should not be.
4134 */
3f33a7ce 4135 if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
dd41f596
IM
4136 __schedule_bug(prev);
4137
1da177e4
LT
4138 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
4139
2d72376b 4140 schedstat_inc(this_rq(), sched_count);
dd41f596
IM
4141}
4142
6cecd084 4143static void put_prev_task(struct rq *rq, struct task_struct *prev)
df1c99d4 4144{
61eadef6 4145 if (prev->on_rq || rq->skip_clock_update < 0)
a64692a3 4146 update_rq_clock(rq);
6cecd084 4147 prev->sched_class->put_prev_task(rq, prev);
df1c99d4
MG
4148}
4149
dd41f596
IM
4150/*
4151 * Pick up the highest-prio task:
4152 */
4153static inline struct task_struct *
b67802ea 4154pick_next_task(struct rq *rq)
dd41f596 4155{
5522d5d5 4156 const struct sched_class *class;
dd41f596 4157 struct task_struct *p;
1da177e4
LT
4158
4159 /*
dd41f596
IM
4160 * Optimization: we know that if all tasks are in
4161 * the fair class we can call that function directly:
1da177e4 4162 */
dd41f596 4163 if (likely(rq->nr_running == rq->cfs.nr_running)) {
fb8d4724 4164 p = fair_sched_class.pick_next_task(rq);
dd41f596
IM
4165 if (likely(p))
4166 return p;
1da177e4
LT
4167 }
4168
34f971f6 4169 for_each_class(class) {
fb8d4724 4170 p = class->pick_next_task(rq);
dd41f596
IM
4171 if (p)
4172 return p;
dd41f596 4173 }
34f971f6
PZ
4174
4175 BUG(); /* the idle class will always have a runnable task */
dd41f596 4176}
1da177e4 4177
dd41f596
IM
4178/*
4179 * schedule() is the main scheduler function.
4180 */
ff743345 4181asmlinkage void __sched schedule(void)
dd41f596
IM
4182{
4183 struct task_struct *prev, *next;
67ca7bde 4184 unsigned long *switch_count;
dd41f596 4185 struct rq *rq;
31656519 4186 int cpu;
dd41f596 4187
ff743345
PZ
4188need_resched:
4189 preempt_disable();
dd41f596
IM
4190 cpu = smp_processor_id();
4191 rq = cpu_rq(cpu);
25502a6c 4192 rcu_note_context_switch(cpu);
dd41f596 4193 prev = rq->curr;
dd41f596 4194
dd41f596 4195 schedule_debug(prev);
1da177e4 4196
31656519 4197 if (sched_feat(HRTICK))
f333fdc9 4198 hrtick_clear(rq);
8f4d37ec 4199
05fa785c 4200 raw_spin_lock_irq(&rq->lock);
1da177e4 4201
246d86b5 4202 switch_count = &prev->nivcsw;
1da177e4 4203 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
21aa9af0 4204 if (unlikely(signal_pending_state(prev->state, prev))) {
1da177e4 4205 prev->state = TASK_RUNNING;
21aa9af0 4206 } else {
2acca55e
PZ
4207 deactivate_task(rq, prev, DEQUEUE_SLEEP);
4208 prev->on_rq = 0;
4209
21aa9af0 4210 /*
2acca55e
PZ
4211 * If a worker went to sleep, notify and ask workqueue
4212 * whether it wants to wake up a task to maintain
4213 * concurrency.
21aa9af0
TH
4214 */
4215 if (prev->flags & PF_WQ_WORKER) {
4216 struct task_struct *to_wakeup;
4217
4218 to_wakeup = wq_worker_sleeping(prev, cpu);
4219 if (to_wakeup)
4220 try_to_wake_up_local(to_wakeup);
4221 }
fd2f4419 4222
6631e635 4223 /*
2acca55e
PZ
4224 * If we are going to sleep and we have plugged IO
4225 * queued, make sure to submit it to avoid deadlocks.
6631e635
LT
4226 */
4227 if (blk_needs_flush_plug(prev)) {
4228 raw_spin_unlock(&rq->lock);
a237c1c5 4229 blk_schedule_flush_plug(prev);
6631e635
LT
4230 raw_spin_lock(&rq->lock);
4231 }
21aa9af0 4232 }
dd41f596 4233 switch_count = &prev->nvcsw;
1da177e4
LT
4234 }
4235
3f029d3c 4236 pre_schedule(rq, prev);
f65eda4f 4237
dd41f596 4238 if (unlikely(!rq->nr_running))
1da177e4 4239 idle_balance(cpu, rq);
1da177e4 4240
df1c99d4 4241 put_prev_task(rq, prev);
b67802ea 4242 next = pick_next_task(rq);
f26f9aff
MG
4243 clear_tsk_need_resched(prev);
4244 rq->skip_clock_update = 0;
1da177e4 4245
1da177e4 4246 if (likely(prev != next)) {
1da177e4
LT
4247 rq->nr_switches++;
4248 rq->curr = next;
4249 ++*switch_count;
4250
dd41f596 4251 context_switch(rq, prev, next); /* unlocks the rq */
8f4d37ec 4252 /*
246d86b5
ON
4253 * The context switch have flipped the stack from under us
4254 * and restored the local variables which were saved when
4255 * this task called schedule() in the past. prev == current
4256 * is still correct, but it can be moved to another cpu/rq.
8f4d37ec
PZ
4257 */
4258 cpu = smp_processor_id();
4259 rq = cpu_rq(cpu);
1da177e4 4260 } else
05fa785c 4261 raw_spin_unlock_irq(&rq->lock);
1da177e4 4262
3f029d3c 4263 post_schedule(rq);
1da177e4 4264
1da177e4 4265 preempt_enable_no_resched();
ff743345 4266 if (need_resched())
1da177e4
LT
4267 goto need_resched;
4268}
1da177e4
LT
4269EXPORT_SYMBOL(schedule);
4270
c08f7829 4271#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
0d66bf6d 4272
c6eb3dda
PZ
4273static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
4274{
4275 bool ret = false;
0d66bf6d 4276
c6eb3dda
PZ
4277 rcu_read_lock();
4278 if (lock->owner != owner)
4279 goto fail;
0d66bf6d
PZ
4280
4281 /*
c6eb3dda
PZ
4282 * Ensure we emit the owner->on_cpu, dereference _after_ checking
4283 * lock->owner still matches owner, if that fails, owner might
4284 * point to free()d memory, if it still matches, the rcu_read_lock()
4285 * ensures the memory stays valid.
0d66bf6d 4286 */
c6eb3dda 4287 barrier();
0d66bf6d 4288
c6eb3dda
PZ
4289 ret = owner->on_cpu;
4290fail:
4291 rcu_read_unlock();
0d66bf6d 4292
c6eb3dda
PZ
4293 return ret;
4294}
0d66bf6d 4295
c6eb3dda
PZ
4296/*
4297 * Look out! "owner" is an entirely speculative pointer
4298 * access and not reliable.
4299 */
4300int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
4301{
4302 if (!sched_feat(OWNER_SPIN))
4303 return 0;
0d66bf6d 4304
c6eb3dda
PZ
4305 while (owner_running(lock, owner)) {
4306 if (need_resched())
0d66bf6d
PZ
4307 return 0;
4308
335d7afb 4309 arch_mutex_cpu_relax();
0d66bf6d 4310 }
4b402210 4311
c6eb3dda
PZ
4312 /*
4313 * If the owner changed to another task there is likely
4314 * heavy contention, stop spinning.
4315 */
4316 if (lock->owner)
4317 return 0;
4318
0d66bf6d
PZ
4319 return 1;
4320}
4321#endif
4322
1da177e4
LT
4323#ifdef CONFIG_PREEMPT
4324/*
2ed6e34f 4325 * this is the entry point to schedule() from in-kernel preemption
41a2d6cf 4326 * off of preempt_enable. Kernel preemptions off return from interrupt
1da177e4
LT
4327 * occur there and call schedule directly.
4328 */
d1f74e20 4329asmlinkage void __sched notrace preempt_schedule(void)
1da177e4
LT
4330{
4331 struct thread_info *ti = current_thread_info();
6478d880 4332
1da177e4
LT
4333 /*
4334 * If there is a non-zero preempt_count or interrupts are disabled,
41a2d6cf 4335 * we do not want to preempt the current task. Just return..
1da177e4 4336 */
beed33a8 4337 if (likely(ti->preempt_count || irqs_disabled()))
1da177e4
LT
4338 return;
4339
3a5c359a 4340 do {
d1f74e20 4341 add_preempt_count_notrace(PREEMPT_ACTIVE);
3a5c359a 4342 schedule();
d1f74e20 4343 sub_preempt_count_notrace(PREEMPT_ACTIVE);
1da177e4 4344
3a5c359a
AK
4345 /*
4346 * Check again in case we missed a preemption opportunity
4347 * between schedule and now.
4348 */
4349 barrier();
5ed0cec0 4350 } while (need_resched());
1da177e4 4351}
1da177e4
LT
4352EXPORT_SYMBOL(preempt_schedule);
4353
4354/*
2ed6e34f 4355 * this is the entry point to schedule() from kernel preemption
1da177e4
LT
4356 * off of irq context.
4357 * Note, that this is called and return with irqs disabled. This will
4358 * protect us against recursive calling from irq.
4359 */
4360asmlinkage void __sched preempt_schedule_irq(void)
4361{
4362 struct thread_info *ti = current_thread_info();
6478d880 4363
2ed6e34f 4364 /* Catch callers which need to be fixed */
1da177e4
LT
4365 BUG_ON(ti->preempt_count || !irqs_disabled());
4366
3a5c359a
AK
4367 do {
4368 add_preempt_count(PREEMPT_ACTIVE);
3a5c359a
AK
4369 local_irq_enable();
4370 schedule();
4371 local_irq_disable();
3a5c359a 4372 sub_preempt_count(PREEMPT_ACTIVE);
1da177e4 4373
3a5c359a
AK
4374 /*
4375 * Check again in case we missed a preemption opportunity
4376 * between schedule and now.
4377 */
4378 barrier();
5ed0cec0 4379 } while (need_resched());
1da177e4
LT
4380}
4381
4382#endif /* CONFIG_PREEMPT */
4383
63859d4f 4384int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
95cdf3b7 4385 void *key)
1da177e4 4386{
63859d4f 4387 return try_to_wake_up(curr->private, mode, wake_flags);
1da177e4 4388}
1da177e4
LT
4389EXPORT_SYMBOL(default_wake_function);
4390
4391/*
41a2d6cf
IM
4392 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
4393 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
1da177e4
LT
4394 * number) then we wake all the non-exclusive tasks and one exclusive task.
4395 *
4396 * There are circumstances in which we can try to wake a task which has already
41a2d6cf 4397 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
1da177e4
LT
4398 * zero in this (rare) case, and we handle it by continuing to scan the queue.
4399 */
78ddb08f 4400static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
63859d4f 4401 int nr_exclusive, int wake_flags, void *key)
1da177e4 4402{
2e45874c 4403 wait_queue_t *curr, *next;
1da177e4 4404
2e45874c 4405 list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
48f24c4d
IM
4406 unsigned flags = curr->flags;
4407
63859d4f 4408 if (curr->func(curr, mode, wake_flags, key) &&
48f24c4d 4409 (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
1da177e4
LT
4410 break;
4411 }
4412}
4413
4414/**
4415 * __wake_up - wake up threads blocked on a waitqueue.
4416 * @q: the waitqueue
4417 * @mode: which threads
4418 * @nr_exclusive: how many wake-one or wake-many threads to wake up
67be2dd1 4419 * @key: is directly passed to the wakeup function
50fa610a
DH
4420 *
4421 * It may be assumed that this function implies a write memory barrier before
4422 * changing the task state if and only if any tasks are woken up.
1da177e4 4423 */
7ad5b3a5 4424void __wake_up(wait_queue_head_t *q, unsigned int mode,
95cdf3b7 4425 int nr_exclusive, void *key)
1da177e4
LT
4426{
4427 unsigned long flags;
4428
4429 spin_lock_irqsave(&q->lock, flags);
4430 __wake_up_common(q, mode, nr_exclusive, 0, key);
4431 spin_unlock_irqrestore(&q->lock, flags);
4432}
1da177e4
LT
4433EXPORT_SYMBOL(__wake_up);
4434
4435/*
4436 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
4437 */
7ad5b3a5 4438void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
1da177e4
LT
4439{
4440 __wake_up_common(q, mode, 1, 0, NULL);
4441}
22c43c81 4442EXPORT_SYMBOL_GPL(__wake_up_locked);
1da177e4 4443
4ede816a
DL
4444void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
4445{
4446 __wake_up_common(q, mode, 1, 0, key);
4447}
bf294b41 4448EXPORT_SYMBOL_GPL(__wake_up_locked_key);
4ede816a 4449
1da177e4 4450/**
4ede816a 4451 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
1da177e4
LT
4452 * @q: the waitqueue
4453 * @mode: which threads
4454 * @nr_exclusive: how many wake-one or wake-many threads to wake up
4ede816a 4455 * @key: opaque value to be passed to wakeup targets
1da177e4
LT
4456 *
4457 * The sync wakeup differs that the waker knows that it will schedule
4458 * away soon, so while the target thread will be woken up, it will not
4459 * be migrated to another CPU - ie. the two threads are 'synchronized'
4460 * with each other. This can prevent needless bouncing between CPUs.
4461 *
4462 * On UP it can prevent extra preemption.
50fa610a
DH
4463 *
4464 * It may be assumed that this function implies a write memory barrier before
4465 * changing the task state if and only if any tasks are woken up.
1da177e4 4466 */
4ede816a
DL
4467void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
4468 int nr_exclusive, void *key)
1da177e4
LT
4469{
4470 unsigned long flags;
7d478721 4471 int wake_flags = WF_SYNC;
1da177e4
LT
4472
4473 if (unlikely(!q))
4474 return;
4475
4476 if (unlikely(!nr_exclusive))
7d478721 4477 wake_flags = 0;
1da177e4
LT
4478
4479 spin_lock_irqsave(&q->lock, flags);
7d478721 4480 __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
1da177e4
LT
4481 spin_unlock_irqrestore(&q->lock, flags);
4482}
4ede816a
DL
4483EXPORT_SYMBOL_GPL(__wake_up_sync_key);
4484
4485/*
4486 * __wake_up_sync - see __wake_up_sync_key()
4487 */
4488void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
4489{
4490 __wake_up_sync_key(q, mode, nr_exclusive, NULL);
4491}
1da177e4
LT
4492EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
4493
65eb3dc6
KD
4494/**
4495 * complete: - signals a single thread waiting on this completion
4496 * @x: holds the state of this particular completion
4497 *
4498 * This will wake up a single thread waiting on this completion. Threads will be
4499 * awakened in the same order in which they were queued.
4500 *
4501 * See also complete_all(), wait_for_completion() and related routines.
50fa610a
DH
4502 *
4503 * It may be assumed that this function implies a write memory barrier before
4504 * changing the task state if and only if any tasks are woken up.
65eb3dc6 4505 */
b15136e9 4506void complete(struct completion *x)
1da177e4
LT
4507{
4508 unsigned long flags;
4509
4510 spin_lock_irqsave(&x->wait.lock, flags);
4511 x->done++;
d9514f6c 4512 __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
1da177e4
LT
4513 spin_unlock_irqrestore(&x->wait.lock, flags);
4514}
4515EXPORT_SYMBOL(complete);
4516
65eb3dc6
KD
4517/**
4518 * complete_all: - signals all threads waiting on this completion
4519 * @x: holds the state of this particular completion
4520 *
4521 * This will wake up all threads waiting on this particular completion event.
50fa610a
DH
4522 *
4523 * It may be assumed that this function implies a write memory barrier before
4524 * changing the task state if and only if any tasks are woken up.
65eb3dc6 4525 */
b15136e9 4526void complete_all(struct completion *x)
1da177e4
LT
4527{
4528 unsigned long flags;
4529
4530 spin_lock_irqsave(&x->wait.lock, flags);
4531 x->done += UINT_MAX/2;
d9514f6c 4532 __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
1da177e4
LT
4533 spin_unlock_irqrestore(&x->wait.lock, flags);
4534}
4535EXPORT_SYMBOL(complete_all);
4536
8cbbe86d
AK
4537static inline long __sched
4538do_wait_for_common(struct completion *x, long timeout, int state)
1da177e4 4539{
1da177e4
LT
4540 if (!x->done) {
4541 DECLARE_WAITQUEUE(wait, current);
4542
a93d2f17 4543 __add_wait_queue_tail_exclusive(&x->wait, &wait);
1da177e4 4544 do {
94d3d824 4545 if (signal_pending_state(state, current)) {
ea71a546
ON
4546 timeout = -ERESTARTSYS;
4547 break;
8cbbe86d
AK
4548 }
4549 __set_current_state(state);
1da177e4
LT
4550 spin_unlock_irq(&x->wait.lock);
4551 timeout = schedule_timeout(timeout);
4552 spin_lock_irq(&x->wait.lock);
ea71a546 4553 } while (!x->done && timeout);
1da177e4 4554 __remove_wait_queue(&x->wait, &wait);
ea71a546
ON
4555 if (!x->done)
4556 return timeout;
1da177e4
LT
4557 }
4558 x->done--;
ea71a546 4559 return timeout ?: 1;
1da177e4 4560}
1da177e4 4561
8cbbe86d
AK
4562static long __sched
4563wait_for_common(struct completion *x, long timeout, int state)
1da177e4 4564{
1da177e4
LT
4565 might_sleep();
4566
4567 spin_lock_irq(&x->wait.lock);
8cbbe86d 4568 timeout = do_wait_for_common(x, timeout, state);
1da177e4 4569 spin_unlock_irq(&x->wait.lock);
8cbbe86d
AK
4570 return timeout;
4571}
1da177e4 4572
65eb3dc6
KD
4573/**
4574 * wait_for_completion: - waits for completion of a task
4575 * @x: holds the state of this particular completion
4576 *
4577 * This waits to be signaled for completion of a specific task. It is NOT
4578 * interruptible and there is no timeout.
4579 *
4580 * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
4581 * and interrupt capability. Also see complete().
4582 */
b15136e9 4583void __sched wait_for_completion(struct completion *x)
8cbbe86d
AK
4584{
4585 wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
1da177e4 4586}
8cbbe86d 4587EXPORT_SYMBOL(wait_for_completion);
1da177e4 4588
65eb3dc6
KD
4589/**
4590 * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
4591 * @x: holds the state of this particular completion
4592 * @timeout: timeout value in jiffies
4593 *
4594 * This waits for either a completion of a specific task to be signaled or for a
4595 * specified timeout to expire. The timeout is in jiffies. It is not
4596 * interruptible.
4597 */
b15136e9 4598unsigned long __sched
8cbbe86d 4599wait_for_completion_timeout(struct completion *x, unsigned long timeout)
1da177e4 4600{
8cbbe86d 4601 return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
1da177e4 4602}
8cbbe86d 4603EXPORT_SYMBOL(wait_for_completion_timeout);
1da177e4 4604
65eb3dc6
KD
4605/**
4606 * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
4607 * @x: holds the state of this particular completion
4608 *
4609 * This waits for completion of a specific task to be signaled. It is
4610 * interruptible.
4611 */
8cbbe86d 4612int __sched wait_for_completion_interruptible(struct completion *x)
0fec171c 4613{
51e97990
AK
4614 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
4615 if (t == -ERESTARTSYS)
4616 return t;
4617 return 0;
0fec171c 4618}
8cbbe86d 4619EXPORT_SYMBOL(wait_for_completion_interruptible);
1da177e4 4620
65eb3dc6
KD
4621/**
4622 * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
4623 * @x: holds the state of this particular completion
4624 * @timeout: timeout value in jiffies
4625 *
4626 * This waits for either a completion of a specific task to be signaled or for a
4627 * specified timeout to expire. It is interruptible. The timeout is in jiffies.
4628 */
6bf41237 4629long __sched
8cbbe86d
AK
4630wait_for_completion_interruptible_timeout(struct completion *x,
4631 unsigned long timeout)
0fec171c 4632{
8cbbe86d 4633 return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
0fec171c 4634}
8cbbe86d 4635EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
1da177e4 4636
65eb3dc6
KD
4637/**
4638 * wait_for_completion_killable: - waits for completion of a task (killable)
4639 * @x: holds the state of this particular completion
4640 *
4641 * This waits to be signaled for completion of a specific task. It can be
4642 * interrupted by a kill signal.
4643 */
009e577e
MW
4644int __sched wait_for_completion_killable(struct completion *x)
4645{
4646 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
4647 if (t == -ERESTARTSYS)
4648 return t;
4649 return 0;
4650}
4651EXPORT_SYMBOL(wait_for_completion_killable);
4652
0aa12fb4
SW
4653/**
4654 * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
4655 * @x: holds the state of this particular completion
4656 * @timeout: timeout value in jiffies
4657 *
4658 * This waits for either a completion of a specific task to be
4659 * signaled or for a specified timeout to expire. It can be
4660 * interrupted by a kill signal. The timeout is in jiffies.
4661 */
6bf41237 4662long __sched
0aa12fb4
SW
4663wait_for_completion_killable_timeout(struct completion *x,
4664 unsigned long timeout)
4665{
4666 return wait_for_common(x, timeout, TASK_KILLABLE);
4667}
4668EXPORT_SYMBOL(wait_for_completion_killable_timeout);
4669
be4de352
DC
4670/**
4671 * try_wait_for_completion - try to decrement a completion without blocking
4672 * @x: completion structure
4673 *
4674 * Returns: 0 if a decrement cannot be done without blocking
4675 * 1 if a decrement succeeded.
4676 *
4677 * If a completion is being used as a counting completion,
4678 * attempt to decrement the counter without blocking. This
4679 * enables us to avoid waiting if the resource the completion
4680 * is protecting is not available.
4681 */
4682bool try_wait_for_completion(struct completion *x)
4683{
7539a3b3 4684 unsigned long flags;
be4de352
DC
4685 int ret = 1;
4686
7539a3b3 4687 spin_lock_irqsave(&x->wait.lock, flags);
be4de352
DC
4688 if (!x->done)
4689 ret = 0;
4690 else
4691 x->done--;
7539a3b3 4692 spin_unlock_irqrestore(&x->wait.lock, flags);
be4de352
DC
4693 return ret;
4694}
4695EXPORT_SYMBOL(try_wait_for_completion);
4696
4697/**
4698 * completion_done - Test to see if a completion has any waiters
4699 * @x: completion structure
4700 *
4701 * Returns: 0 if there are waiters (wait_for_completion() in progress)
4702 * 1 if there are no waiters.
4703 *
4704 */
4705bool completion_done(struct completion *x)
4706{
7539a3b3 4707 unsigned long flags;
be4de352
DC
4708 int ret = 1;
4709
7539a3b3 4710 spin_lock_irqsave(&x->wait.lock, flags);
be4de352
DC
4711 if (!x->done)
4712 ret = 0;
7539a3b3 4713 spin_unlock_irqrestore(&x->wait.lock, flags);
be4de352
DC
4714 return ret;
4715}
4716EXPORT_SYMBOL(completion_done);
4717
8cbbe86d
AK
4718static long __sched
4719sleep_on_common(wait_queue_head_t *q, int state, long timeout)
1da177e4 4720{
0fec171c
IM
4721 unsigned long flags;
4722 wait_queue_t wait;
4723
4724 init_waitqueue_entry(&wait, current);
1da177e4 4725
8cbbe86d 4726 __set_current_state(state);
1da177e4 4727
8cbbe86d
AK
4728 spin_lock_irqsave(&q->lock, flags);
4729 __add_wait_queue(q, &wait);
4730 spin_unlock(&q->lock);
4731 timeout = schedule_timeout(timeout);
4732 spin_lock_irq(&q->lock);
4733 __remove_wait_queue(q, &wait);
4734 spin_unlock_irqrestore(&q->lock, flags);
4735
4736 return timeout;
4737}
4738
4739void __sched interruptible_sleep_on(wait_queue_head_t *q)
4740{
4741 sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
1da177e4 4742}
1da177e4
LT
4743EXPORT_SYMBOL(interruptible_sleep_on);
4744
0fec171c 4745long __sched
95cdf3b7 4746interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
1da177e4 4747{
8cbbe86d 4748 return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
1da177e4 4749}
1da177e4
LT
4750EXPORT_SYMBOL(interruptible_sleep_on_timeout);
4751
0fec171c 4752void __sched sleep_on(wait_queue_head_t *q)
1da177e4 4753{
8cbbe86d 4754 sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
1da177e4 4755}
1da177e4
LT
4756EXPORT_SYMBOL(sleep_on);
4757
0fec171c 4758long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
1da177e4 4759{
8cbbe86d 4760 return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
1da177e4 4761}
1da177e4
LT
4762EXPORT_SYMBOL(sleep_on_timeout);
4763
b29739f9
IM
4764#ifdef CONFIG_RT_MUTEXES
4765
4766/*
4767 * rt_mutex_setprio - set the current priority of a task
4768 * @p: task
4769 * @prio: prio value (kernel-internal form)
4770 *
4771 * This function changes the 'effective' priority of a task. It does
4772 * not touch ->normal_prio like __setscheduler().
4773 *
4774 * Used by the rt_mutex code to implement priority inheritance logic.
4775 */
36c8b586 4776void rt_mutex_setprio(struct task_struct *p, int prio)
b29739f9 4777{
83b699ed 4778 int oldprio, on_rq, running;
70b97a7f 4779 struct rq *rq;
83ab0aa0 4780 const struct sched_class *prev_class;
b29739f9
IM
4781
4782 BUG_ON(prio < 0 || prio > MAX_PRIO);
4783
0122ec5b 4784 rq = __task_rq_lock(p);
b29739f9 4785
a8027073 4786 trace_sched_pi_setprio(p, prio);
d5f9f942 4787 oldprio = p->prio;
83ab0aa0 4788 prev_class = p->sched_class;
fd2f4419 4789 on_rq = p->on_rq;
051a1d1a 4790 running = task_current(rq, p);
0e1f3483 4791 if (on_rq)
69be72c1 4792 dequeue_task(rq, p, 0);
0e1f3483
HS
4793 if (running)
4794 p->sched_class->put_prev_task(rq, p);
dd41f596
IM
4795
4796 if (rt_prio(prio))
4797 p->sched_class = &rt_sched_class;
4798 else
4799 p->sched_class = &fair_sched_class;
4800
b29739f9
IM
4801 p->prio = prio;
4802
0e1f3483
HS
4803 if (running)
4804 p->sched_class->set_curr_task(rq);
da7a735e 4805 if (on_rq)
371fd7e7 4806 enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
cb469845 4807
da7a735e 4808 check_class_changed(rq, p, prev_class, oldprio);
0122ec5b 4809 __task_rq_unlock(rq);
b29739f9
IM
4810}
4811
4812#endif
4813
36c8b586 4814void set_user_nice(struct task_struct *p, long nice)
1da177e4 4815{
dd41f596 4816 int old_prio, delta, on_rq;
1da177e4 4817 unsigned long flags;
70b97a7f 4818 struct rq *rq;
1da177e4
LT
4819
4820 if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
4821 return;
4822 /*
4823 * We have to be careful, if called from sys_setpriority(),
4824 * the task might be in the middle of scheduling on another CPU.
4825 */
4826 rq = task_rq_lock(p, &flags);
4827 /*
4828 * The RT priorities are set via sched_setscheduler(), but we still
4829 * allow the 'normal' nice value to be set - but as expected
4830 * it wont have any effect on scheduling until the task is
dd41f596 4831 * SCHED_FIFO/SCHED_RR:
1da177e4 4832 */
e05606d3 4833 if (task_has_rt_policy(p)) {
1da177e4
LT
4834 p->static_prio = NICE_TO_PRIO(nice);
4835 goto out_unlock;
4836 }
fd2f4419 4837 on_rq = p->on_rq;
c09595f6 4838 if (on_rq)
69be72c1 4839 dequeue_task(rq, p, 0);
1da177e4 4840
1da177e4 4841 p->static_prio = NICE_TO_PRIO(nice);
2dd73a4f 4842 set_load_weight(p);
b29739f9
IM
4843 old_prio = p->prio;
4844 p->prio = effective_prio(p);
4845 delta = p->prio - old_prio;
1da177e4 4846
dd41f596 4847 if (on_rq) {
371fd7e7 4848 enqueue_task(rq, p, 0);
1da177e4 4849 /*
d5f9f942
AM
4850 * If the task increased its priority or is running and
4851 * lowered its priority, then reschedule its CPU:
1da177e4 4852 */
d5f9f942 4853 if (delta < 0 || (delta > 0 && task_running(rq, p)))
1da177e4
LT
4854 resched_task(rq->curr);
4855 }
4856out_unlock:
0122ec5b 4857 task_rq_unlock(rq, p, &flags);
1da177e4 4858}
1da177e4
LT
4859EXPORT_SYMBOL(set_user_nice);
4860
e43379f1
MM
4861/*
4862 * can_nice - check if a task can reduce its nice value
4863 * @p: task
4864 * @nice: nice value
4865 */
36c8b586 4866int can_nice(const struct task_struct *p, const int nice)
e43379f1 4867{
024f4747
MM
4868 /* convert nice value [19,-20] to rlimit style value [1,40] */
4869 int nice_rlim = 20 - nice;
48f24c4d 4870
78d7d407 4871 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
e43379f1
MM
4872 capable(CAP_SYS_NICE));
4873}
4874
1da177e4
LT
4875#ifdef __ARCH_WANT_SYS_NICE
4876
4877/*
4878 * sys_nice - change the priority of the current process.
4879 * @increment: priority increment
4880 *
4881 * sys_setpriority is a more generic, but much slower function that
4882 * does similar things.
4883 */
5add95d4 4884SYSCALL_DEFINE1(nice, int, increment)
1da177e4 4885{
48f24c4d 4886 long nice, retval;
1da177e4
LT
4887
4888 /*
4889 * Setpriority might change our priority at the same moment.
4890 * We don't have to worry. Conceptually one call occurs first
4891 * and we have a single winner.
4892 */
e43379f1
MM
4893 if (increment < -40)
4894 increment = -40;
1da177e4
LT
4895 if (increment > 40)
4896 increment = 40;
4897
2b8f836f 4898 nice = TASK_NICE(current) + increment;
1da177e4
LT
4899 if (nice < -20)
4900 nice = -20;
4901 if (nice > 19)
4902 nice = 19;
4903
e43379f1
MM
4904 if (increment < 0 && !can_nice(current, nice))
4905 return -EPERM;
4906
1da177e4
LT
4907 retval = security_task_setnice(current, nice);
4908 if (retval)
4909 return retval;
4910
4911 set_user_nice(current, nice);
4912 return 0;
4913}
4914
4915#endif
4916
4917/**
4918 * task_prio - return the priority value of a given task.
4919 * @p: the task in question.
4920 *
4921 * This is the priority value as seen by users in /proc.
4922 * RT tasks are offset by -200. Normal tasks are centered
4923 * around 0, value goes from -16 to +15.
4924 */
36c8b586 4925int task_prio(const struct task_struct *p)
1da177e4
LT
4926{
4927 return p->prio - MAX_RT_PRIO;
4928}
4929
4930/**
4931 * task_nice - return the nice value of a given task.
4932 * @p: the task in question.
4933 */
36c8b586 4934int task_nice(const struct task_struct *p)
1da177e4
LT
4935{
4936 return TASK_NICE(p);
4937}
150d8bed 4938EXPORT_SYMBOL(task_nice);
1da177e4
LT
4939
4940/**
4941 * idle_cpu - is a given cpu idle currently?
4942 * @cpu: the processor in question.
4943 */
4944int idle_cpu(int cpu)
4945{
4946 return cpu_curr(cpu) == cpu_rq(cpu)->idle;
4947}
4948
1da177e4
LT
4949/**
4950 * idle_task - return the idle task for a given cpu.
4951 * @cpu: the processor in question.
4952 */
36c8b586 4953struct task_struct *idle_task(int cpu)
1da177e4
LT
4954{
4955 return cpu_rq(cpu)->idle;
4956}
4957
4958/**
4959 * find_process_by_pid - find a process with a matching PID value.
4960 * @pid: the pid in question.
4961 */
a9957449 4962static struct task_struct *find_process_by_pid(pid_t pid)
1da177e4 4963{
228ebcbe 4964 return pid ? find_task_by_vpid(pid) : current;
1da177e4
LT
4965}
4966
4967/* Actually do priority change: must hold rq lock. */
dd41f596
IM
4968static void
4969__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
1da177e4 4970{
1da177e4
LT
4971 p->policy = policy;
4972 p->rt_priority = prio;
b29739f9
IM
4973 p->normal_prio = normal_prio(p);
4974 /* we are holding p->pi_lock already */
4975 p->prio = rt_mutex_getprio(p);
ffd44db5
PZ
4976 if (rt_prio(p->prio))
4977 p->sched_class = &rt_sched_class;
4978 else
4979 p->sched_class = &fair_sched_class;
2dd73a4f 4980 set_load_weight(p);
1da177e4
LT
4981}
4982
c69e8d9c
DH
4983/*
4984 * check the target process has a UID that matches the current process's
4985 */
4986static bool check_same_owner(struct task_struct *p)
4987{
4988 const struct cred *cred = current_cred(), *pcred;
4989 bool match;
4990
4991 rcu_read_lock();
4992 pcred = __task_cred(p);
b0e77598
SH
4993 if (cred->user->user_ns == pcred->user->user_ns)
4994 match = (cred->euid == pcred->euid ||
4995 cred->euid == pcred->uid);
4996 else
4997 match = false;
c69e8d9c
DH
4998 rcu_read_unlock();
4999 return match;
5000}
5001
961ccddd 5002static int __sched_setscheduler(struct task_struct *p, int policy,
fe7de49f 5003 const struct sched_param *param, bool user)
1da177e4 5004{
83b699ed 5005 int retval, oldprio, oldpolicy = -1, on_rq, running;
1da177e4 5006 unsigned long flags;
83ab0aa0 5007 const struct sched_class *prev_class;
70b97a7f 5008 struct rq *rq;
ca94c442 5009 int reset_on_fork;
1da177e4 5010
66e5393a
SR
5011 /* may grab non-irq protected spin_locks */
5012 BUG_ON(in_interrupt());
1da177e4
LT
5013recheck:
5014 /* double check policy once rq lock held */
ca94c442
LP
5015 if (policy < 0) {
5016 reset_on_fork = p->sched_reset_on_fork;
1da177e4 5017 policy = oldpolicy = p->policy;
ca94c442
LP
5018 } else {
5019 reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
5020 policy &= ~SCHED_RESET_ON_FORK;
5021
5022 if (policy != SCHED_FIFO && policy != SCHED_RR &&
5023 policy != SCHED_NORMAL && policy != SCHED_BATCH &&
5024 policy != SCHED_IDLE)
5025 return -EINVAL;
5026 }
5027
1da177e4
LT
5028 /*
5029 * Valid priorities for SCHED_FIFO and SCHED_RR are
dd41f596
IM
5030 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
5031 * SCHED_BATCH and SCHED_IDLE is 0.
1da177e4
LT
5032 */
5033 if (param->sched_priority < 0 ||
95cdf3b7 5034 (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
d46523ea 5035 (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
1da177e4 5036 return -EINVAL;
e05606d3 5037 if (rt_policy(policy) != (param->sched_priority != 0))
1da177e4
LT
5038 return -EINVAL;
5039
37e4ab3f
OC
5040 /*
5041 * Allow unprivileged RT tasks to decrease priority:
5042 */
961ccddd 5043 if (user && !capable(CAP_SYS_NICE)) {
e05606d3 5044 if (rt_policy(policy)) {
a44702e8
ON
5045 unsigned long rlim_rtprio =
5046 task_rlimit(p, RLIMIT_RTPRIO);
8dc3e909
ON
5047
5048 /* can't set/change the rt policy */
5049 if (policy != p->policy && !rlim_rtprio)
5050 return -EPERM;
5051
5052 /* can't increase priority */
5053 if (param->sched_priority > p->rt_priority &&
5054 param->sched_priority > rlim_rtprio)
5055 return -EPERM;
5056 }
c02aa73b 5057
dd41f596 5058 /*
c02aa73b
DH
5059 * Treat SCHED_IDLE as nice 20. Only allow a switch to
5060 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
dd41f596 5061 */
c02aa73b
DH
5062 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
5063 if (!can_nice(p, TASK_NICE(p)))
5064 return -EPERM;
5065 }
5fe1d75f 5066
37e4ab3f 5067 /* can't change other user's priorities */
c69e8d9c 5068 if (!check_same_owner(p))
37e4ab3f 5069 return -EPERM;
ca94c442
LP
5070
5071 /* Normal users shall not reset the sched_reset_on_fork flag */
5072 if (p->sched_reset_on_fork && !reset_on_fork)
5073 return -EPERM;
37e4ab3f 5074 }
1da177e4 5075
725aad24 5076 if (user) {
b0ae1981 5077 retval = security_task_setscheduler(p);
725aad24
JF
5078 if (retval)
5079 return retval;
5080 }
5081
b29739f9
IM
5082 /*
5083 * make sure no PI-waiters arrive (or leave) while we are
5084 * changing the priority of the task:
0122ec5b 5085 *
25985edc 5086 * To be able to change p->policy safely, the appropriate
1da177e4
LT
5087 * runqueue lock must be held.
5088 */
0122ec5b 5089 rq = task_rq_lock(p, &flags);
dc61b1d6 5090
34f971f6
PZ
5091 /*
5092 * Changing the policy of the stop threads its a very bad idea
5093 */
5094 if (p == rq->stop) {
0122ec5b 5095 task_rq_unlock(rq, p, &flags);
34f971f6
PZ
5096 return -EINVAL;
5097 }
5098
a51e9198
DF
5099 /*
5100 * If not changing anything there's no need to proceed further:
5101 */
5102 if (unlikely(policy == p->policy && (!rt_policy(policy) ||
5103 param->sched_priority == p->rt_priority))) {
5104
5105 __task_rq_unlock(rq);
5106 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5107 return 0;
5108 }
5109
dc61b1d6
PZ
5110#ifdef CONFIG_RT_GROUP_SCHED
5111 if (user) {
5112 /*
5113 * Do not allow realtime tasks into groups that have no runtime
5114 * assigned.
5115 */
5116 if (rt_bandwidth_enabled() && rt_policy(policy) &&
f4493771
MG
5117 task_group(p)->rt_bandwidth.rt_runtime == 0 &&
5118 !task_group_is_autogroup(task_group(p))) {
0122ec5b 5119 task_rq_unlock(rq, p, &flags);
dc61b1d6
PZ
5120 return -EPERM;
5121 }
5122 }
5123#endif
5124
1da177e4
LT
5125 /* recheck policy now with rq lock held */
5126 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
5127 policy = oldpolicy = -1;
0122ec5b 5128 task_rq_unlock(rq, p, &flags);
1da177e4
LT
5129 goto recheck;
5130 }
fd2f4419 5131 on_rq = p->on_rq;
051a1d1a 5132 running = task_current(rq, p);
0e1f3483 5133 if (on_rq)
2e1cb74a 5134 deactivate_task(rq, p, 0);
0e1f3483
HS
5135 if (running)
5136 p->sched_class->put_prev_task(rq, p);
f6b53205 5137
ca94c442
LP
5138 p->sched_reset_on_fork = reset_on_fork;
5139
1da177e4 5140 oldprio = p->prio;
83ab0aa0 5141 prev_class = p->sched_class;
dd41f596 5142 __setscheduler(rq, p, policy, param->sched_priority);
f6b53205 5143
0e1f3483
HS
5144 if (running)
5145 p->sched_class->set_curr_task(rq);
da7a735e 5146 if (on_rq)
dd41f596 5147 activate_task(rq, p, 0);
cb469845 5148
da7a735e 5149 check_class_changed(rq, p, prev_class, oldprio);
0122ec5b 5150 task_rq_unlock(rq, p, &flags);
b29739f9 5151
95e02ca9
TG
5152 rt_mutex_adjust_pi(p);
5153
1da177e4
LT
5154 return 0;
5155}
961ccddd
RR
5156
5157/**
5158 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
5159 * @p: the task in question.
5160 * @policy: new policy.
5161 * @param: structure containing the new RT priority.
5162 *
5163 * NOTE that the task may be already dead.
5164 */
5165int sched_setscheduler(struct task_struct *p, int policy,
fe7de49f 5166 const struct sched_param *param)
961ccddd
RR
5167{
5168 return __sched_setscheduler(p, policy, param, true);
5169}
1da177e4
LT
5170EXPORT_SYMBOL_GPL(sched_setscheduler);
5171
961ccddd
RR
5172/**
5173 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
5174 * @p: the task in question.
5175 * @policy: new policy.
5176 * @param: structure containing the new RT priority.
5177 *
5178 * Just like sched_setscheduler, only don't bother checking if the
5179 * current context has permission. For example, this is needed in
5180 * stop_machine(): we create temporary high priority worker threads,
5181 * but our caller might not have that capability.
5182 */
5183int sched_setscheduler_nocheck(struct task_struct *p, int policy,
fe7de49f 5184 const struct sched_param *param)
961ccddd
RR
5185{
5186 return __sched_setscheduler(p, policy, param, false);
5187}
5188
95cdf3b7
IM
5189static int
5190do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
1da177e4 5191{
1da177e4
LT
5192 struct sched_param lparam;
5193 struct task_struct *p;
36c8b586 5194 int retval;
1da177e4
LT
5195
5196 if (!param || pid < 0)
5197 return -EINVAL;
5198 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
5199 return -EFAULT;
5fe1d75f
ON
5200
5201 rcu_read_lock();
5202 retval = -ESRCH;
1da177e4 5203 p = find_process_by_pid(pid);
5fe1d75f
ON
5204 if (p != NULL)
5205 retval = sched_setscheduler(p, policy, &lparam);
5206 rcu_read_unlock();
36c8b586 5207
1da177e4
LT
5208 return retval;
5209}
5210
5211/**
5212 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
5213 * @pid: the pid in question.
5214 * @policy: new policy.
5215 * @param: structure containing the new RT priority.
5216 */
5add95d4
HC
5217SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
5218 struct sched_param __user *, param)
1da177e4 5219{
c21761f1
JB
5220 /* negative values for policy are not valid */
5221 if (policy < 0)
5222 return -EINVAL;
5223
1da177e4
LT
5224 return do_sched_setscheduler(pid, policy, param);
5225}
5226
5227/**
5228 * sys_sched_setparam - set/change the RT priority of a thread
5229 * @pid: the pid in question.
5230 * @param: structure containing the new RT priority.
5231 */
5add95d4 5232SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
1da177e4
LT
5233{
5234 return do_sched_setscheduler(pid, -1, param);
5235}
5236
5237/**
5238 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
5239 * @pid: the pid in question.
5240 */
5add95d4 5241SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
1da177e4 5242{
36c8b586 5243 struct task_struct *p;
3a5c359a 5244 int retval;
1da177e4
LT
5245
5246 if (pid < 0)
3a5c359a 5247 return -EINVAL;
1da177e4
LT
5248
5249 retval = -ESRCH;
5fe85be0 5250 rcu_read_lock();
1da177e4
LT
5251 p = find_process_by_pid(pid);
5252 if (p) {
5253 retval = security_task_getscheduler(p);
5254 if (!retval)
ca94c442
LP
5255 retval = p->policy
5256 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
1da177e4 5257 }
5fe85be0 5258 rcu_read_unlock();
1da177e4
LT
5259 return retval;
5260}
5261
5262/**
ca94c442 5263 * sys_sched_getparam - get the RT priority of a thread
1da177e4
LT
5264 * @pid: the pid in question.
5265 * @param: structure containing the RT priority.
5266 */
5add95d4 5267SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
1da177e4
LT
5268{
5269 struct sched_param lp;
36c8b586 5270 struct task_struct *p;
3a5c359a 5271 int retval;
1da177e4
LT
5272
5273 if (!param || pid < 0)
3a5c359a 5274 return -EINVAL;
1da177e4 5275
5fe85be0 5276 rcu_read_lock();
1da177e4
LT
5277 p = find_process_by_pid(pid);
5278 retval = -ESRCH;
5279 if (!p)
5280 goto out_unlock;
5281
5282 retval = security_task_getscheduler(p);
5283 if (retval)
5284 goto out_unlock;
5285
5286 lp.sched_priority = p->rt_priority;
5fe85be0 5287 rcu_read_unlock();
1da177e4
LT
5288
5289 /*
5290 * This one might sleep, we cannot do it with a spinlock held ...
5291 */
5292 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
5293
1da177e4
LT
5294 return retval;
5295
5296out_unlock:
5fe85be0 5297 rcu_read_unlock();
1da177e4
LT
5298 return retval;
5299}
5300
96f874e2 5301long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
1da177e4 5302{
5a16f3d3 5303 cpumask_var_t cpus_allowed, new_mask;
36c8b586
IM
5304 struct task_struct *p;
5305 int retval;
1da177e4 5306
95402b38 5307 get_online_cpus();
23f5d142 5308 rcu_read_lock();
1da177e4
LT
5309
5310 p = find_process_by_pid(pid);
5311 if (!p) {
23f5d142 5312 rcu_read_unlock();
95402b38 5313 put_online_cpus();
1da177e4
LT
5314 return -ESRCH;
5315 }
5316
23f5d142 5317 /* Prevent p going away */
1da177e4 5318 get_task_struct(p);
23f5d142 5319 rcu_read_unlock();
1da177e4 5320
5a16f3d3
RR
5321 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
5322 retval = -ENOMEM;
5323 goto out_put_task;
5324 }
5325 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
5326 retval = -ENOMEM;
5327 goto out_free_cpus_allowed;
5328 }
1da177e4 5329 retval = -EPERM;
b0e77598 5330 if (!check_same_owner(p) && !task_ns_capable(p, CAP_SYS_NICE))
1da177e4
LT
5331 goto out_unlock;
5332
b0ae1981 5333 retval = security_task_setscheduler(p);
e7834f8f
DQ
5334 if (retval)
5335 goto out_unlock;
5336
5a16f3d3
RR
5337 cpuset_cpus_allowed(p, cpus_allowed);
5338 cpumask_and(new_mask, in_mask, cpus_allowed);
49246274 5339again:
5a16f3d3 5340 retval = set_cpus_allowed_ptr(p, new_mask);
1da177e4 5341
8707d8b8 5342 if (!retval) {
5a16f3d3
RR
5343 cpuset_cpus_allowed(p, cpus_allowed);
5344 if (!cpumask_subset(new_mask, cpus_allowed)) {
8707d8b8
PM
5345 /*
5346 * We must have raced with a concurrent cpuset
5347 * update. Just reset the cpus_allowed to the
5348 * cpuset's cpus_allowed
5349 */
5a16f3d3 5350 cpumask_copy(new_mask, cpus_allowed);
8707d8b8
PM
5351 goto again;
5352 }
5353 }
1da177e4 5354out_unlock:
5a16f3d3
RR
5355 free_cpumask_var(new_mask);
5356out_free_cpus_allowed:
5357 free_cpumask_var(cpus_allowed);
5358out_put_task:
1da177e4 5359 put_task_struct(p);
95402b38 5360 put_online_cpus();
1da177e4
LT
5361 return retval;
5362}
5363
5364static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
96f874e2 5365 struct cpumask *new_mask)
1da177e4 5366{
96f874e2
RR
5367 if (len < cpumask_size())
5368 cpumask_clear(new_mask);
5369 else if (len > cpumask_size())
5370 len = cpumask_size();
5371
1da177e4
LT
5372 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
5373}
5374
5375/**
5376 * sys_sched_setaffinity - set the cpu affinity of a process
5377 * @pid: pid of the process
5378 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
5379 * @user_mask_ptr: user-space pointer to the new cpu mask
5380 */
5add95d4
HC
5381SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
5382 unsigned long __user *, user_mask_ptr)
1da177e4 5383{
5a16f3d3 5384 cpumask_var_t new_mask;
1da177e4
LT
5385 int retval;
5386
5a16f3d3
RR
5387 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
5388 return -ENOMEM;
1da177e4 5389
5a16f3d3
RR
5390 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
5391 if (retval == 0)
5392 retval = sched_setaffinity(pid, new_mask);
5393 free_cpumask_var(new_mask);
5394 return retval;
1da177e4
LT
5395}
5396
96f874e2 5397long sched_getaffinity(pid_t pid, struct cpumask *mask)
1da177e4 5398{
36c8b586 5399 struct task_struct *p;
31605683 5400 unsigned long flags;
1da177e4 5401 int retval;
1da177e4 5402
95402b38 5403 get_online_cpus();
23f5d142 5404 rcu_read_lock();
1da177e4
LT
5405
5406 retval = -ESRCH;
5407 p = find_process_by_pid(pid);
5408 if (!p)
5409 goto out_unlock;
5410
e7834f8f
DQ
5411 retval = security_task_getscheduler(p);
5412 if (retval)
5413 goto out_unlock;
5414
013fdb80 5415 raw_spin_lock_irqsave(&p->pi_lock, flags);
96f874e2 5416 cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
013fdb80 5417 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1da177e4
LT
5418
5419out_unlock:
23f5d142 5420 rcu_read_unlock();
95402b38 5421 put_online_cpus();
1da177e4 5422
9531b62f 5423 return retval;
1da177e4
LT
5424}
5425
5426/**
5427 * sys_sched_getaffinity - get the cpu affinity of a process
5428 * @pid: pid of the process
5429 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
5430 * @user_mask_ptr: user-space pointer to hold the current cpu mask
5431 */
5add95d4
HC
5432SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
5433 unsigned long __user *, user_mask_ptr)
1da177e4
LT
5434{
5435 int ret;
f17c8607 5436 cpumask_var_t mask;
1da177e4 5437
84fba5ec 5438 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
cd3d8031
KM
5439 return -EINVAL;
5440 if (len & (sizeof(unsigned long)-1))
1da177e4
LT
5441 return -EINVAL;
5442
f17c8607
RR
5443 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
5444 return -ENOMEM;
1da177e4 5445
f17c8607
RR
5446 ret = sched_getaffinity(pid, mask);
5447 if (ret == 0) {
8bc037fb 5448 size_t retlen = min_t(size_t, len, cpumask_size());
cd3d8031
KM
5449
5450 if (copy_to_user(user_mask_ptr, mask, retlen))
f17c8607
RR
5451 ret = -EFAULT;
5452 else
cd3d8031 5453 ret = retlen;
f17c8607
RR
5454 }
5455 free_cpumask_var(mask);
1da177e4 5456
f17c8607 5457 return ret;
1da177e4
LT
5458}
5459
5460/**
5461 * sys_sched_yield - yield the current processor to other threads.
5462 *
dd41f596
IM
5463 * This function yields the current CPU to other tasks. If there are no
5464 * other threads running on this CPU then this function will return.
1da177e4 5465 */
5add95d4 5466SYSCALL_DEFINE0(sched_yield)
1da177e4 5467{
70b97a7f 5468 struct rq *rq = this_rq_lock();
1da177e4 5469
2d72376b 5470 schedstat_inc(rq, yld_count);
4530d7ab 5471 current->sched_class->yield_task(rq);
1da177e4
LT
5472
5473 /*
5474 * Since we are going to call schedule() anyway, there's
5475 * no need to preempt or enable interrupts:
5476 */
5477 __release(rq->lock);
8a25d5de 5478 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
9828ea9d 5479 do_raw_spin_unlock(&rq->lock);
1da177e4
LT
5480 preempt_enable_no_resched();
5481
5482 schedule();
5483
5484 return 0;
5485}
5486
d86ee480
PZ
5487static inline int should_resched(void)
5488{
5489 return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
5490}
5491
e7b38404 5492static void __cond_resched(void)
1da177e4 5493{
e7aaaa69
FW
5494 add_preempt_count(PREEMPT_ACTIVE);
5495 schedule();
5496 sub_preempt_count(PREEMPT_ACTIVE);
1da177e4
LT
5497}
5498
02b67cc3 5499int __sched _cond_resched(void)
1da177e4 5500{
d86ee480 5501 if (should_resched()) {
1da177e4
LT
5502 __cond_resched();
5503 return 1;
5504 }
5505 return 0;
5506}
02b67cc3 5507EXPORT_SYMBOL(_cond_resched);
1da177e4
LT
5508
5509/*
613afbf8 5510 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
1da177e4
LT
5511 * call schedule, and on return reacquire the lock.
5512 *
41a2d6cf 5513 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
1da177e4
LT
5514 * operations here to prevent schedule() from being called twice (once via
5515 * spin_unlock(), once by hand).
5516 */
613afbf8 5517int __cond_resched_lock(spinlock_t *lock)
1da177e4 5518{
d86ee480 5519 int resched = should_resched();
6df3cecb
JK
5520 int ret = 0;
5521
f607c668
PZ
5522 lockdep_assert_held(lock);
5523
95c354fe 5524 if (spin_needbreak(lock) || resched) {
1da177e4 5525 spin_unlock(lock);
d86ee480 5526 if (resched)
95c354fe
NP
5527 __cond_resched();
5528 else
5529 cpu_relax();
6df3cecb 5530 ret = 1;
1da177e4 5531 spin_lock(lock);
1da177e4 5532 }
6df3cecb 5533 return ret;
1da177e4 5534}
613afbf8 5535EXPORT_SYMBOL(__cond_resched_lock);
1da177e4 5536
613afbf8 5537int __sched __cond_resched_softirq(void)
1da177e4
LT
5538{
5539 BUG_ON(!in_softirq());
5540
d86ee480 5541 if (should_resched()) {
98d82567 5542 local_bh_enable();
1da177e4
LT
5543 __cond_resched();
5544 local_bh_disable();
5545 return 1;
5546 }
5547 return 0;
5548}
613afbf8 5549EXPORT_SYMBOL(__cond_resched_softirq);
1da177e4 5550
1da177e4
LT
5551/**
5552 * yield - yield the current processor to other threads.
5553 *
72fd4a35 5554 * This is a shortcut for kernel-space yielding - it marks the
1da177e4
LT
5555 * thread runnable and calls sys_sched_yield().
5556 */
5557void __sched yield(void)
5558{
5559 set_current_state(TASK_RUNNING);
5560 sys_sched_yield();
5561}
1da177e4
LT
5562EXPORT_SYMBOL(yield);
5563
d95f4122
MG
5564/**
5565 * yield_to - yield the current processor to another thread in
5566 * your thread group, or accelerate that thread toward the
5567 * processor it's on.
16addf95
RD
5568 * @p: target task
5569 * @preempt: whether task preemption is allowed or not
d95f4122
MG
5570 *
5571 * It's the caller's job to ensure that the target task struct
5572 * can't go away on us before we can do any checks.
5573 *
5574 * Returns true if we indeed boosted the target task.
5575 */
5576bool __sched yield_to(struct task_struct *p, bool preempt)
5577{
5578 struct task_struct *curr = current;
5579 struct rq *rq, *p_rq;
5580 unsigned long flags;
5581 bool yielded = 0;
5582
5583 local_irq_save(flags);
5584 rq = this_rq();
5585
5586again:
5587 p_rq = task_rq(p);
5588 double_rq_lock(rq, p_rq);
5589 while (task_rq(p) != p_rq) {
5590 double_rq_unlock(rq, p_rq);
5591 goto again;
5592 }
5593
5594 if (!curr->sched_class->yield_to_task)
5595 goto out;
5596
5597 if (curr->sched_class != p->sched_class)
5598 goto out;
5599
5600 if (task_running(p_rq, p) || p->state)
5601 goto out;
5602
5603 yielded = curr->sched_class->yield_to_task(rq, p, preempt);
6d1cafd8 5604 if (yielded) {
d95f4122 5605 schedstat_inc(rq, yld_count);
6d1cafd8
VP
5606 /*
5607 * Make p's CPU reschedule; pick_next_entity takes care of
5608 * fairness.
5609 */
5610 if (preempt && rq != p_rq)
5611 resched_task(p_rq->curr);
5612 }
d95f4122
MG
5613
5614out:
5615 double_rq_unlock(rq, p_rq);
5616 local_irq_restore(flags);
5617
5618 if (yielded)
5619 schedule();
5620
5621 return yielded;
5622}
5623EXPORT_SYMBOL_GPL(yield_to);
5624
1da177e4 5625/*
41a2d6cf 5626 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
1da177e4 5627 * that process accounting knows that this is a task in IO wait state.
1da177e4
LT
5628 */
5629void __sched io_schedule(void)
5630{
54d35f29 5631 struct rq *rq = raw_rq();
1da177e4 5632
0ff92245 5633 delayacct_blkio_start();
1da177e4 5634 atomic_inc(&rq->nr_iowait);
73c10101 5635 blk_flush_plug(current);
8f0dfc34 5636 current->in_iowait = 1;
1da177e4 5637 schedule();
8f0dfc34 5638 current->in_iowait = 0;
1da177e4 5639 atomic_dec(&rq->nr_iowait);
0ff92245 5640 delayacct_blkio_end();
1da177e4 5641}
1da177e4
LT
5642EXPORT_SYMBOL(io_schedule);
5643
5644long __sched io_schedule_timeout(long timeout)
5645{
54d35f29 5646 struct rq *rq = raw_rq();
1da177e4
LT
5647 long ret;
5648
0ff92245 5649 delayacct_blkio_start();
1da177e4 5650 atomic_inc(&rq->nr_iowait);
73c10101 5651 blk_flush_plug(current);
8f0dfc34 5652 current->in_iowait = 1;
1da177e4 5653 ret = schedule_timeout(timeout);
8f0dfc34 5654 current->in_iowait = 0;
1da177e4 5655 atomic_dec(&rq->nr_iowait);
0ff92245 5656 delayacct_blkio_end();
1da177e4
LT
5657 return ret;
5658}
5659
5660/**
5661 * sys_sched_get_priority_max - return maximum RT priority.
5662 * @policy: scheduling class.
5663 *
5664 * this syscall returns the maximum rt_priority that can be used
5665 * by a given scheduling class.
5666 */
5add95d4 5667SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
1da177e4
LT
5668{
5669 int ret = -EINVAL;
5670
5671 switch (policy) {
5672 case SCHED_FIFO:
5673 case SCHED_RR:
5674 ret = MAX_USER_RT_PRIO-1;
5675 break;
5676 case SCHED_NORMAL:
b0a9499c 5677 case SCHED_BATCH:
dd41f596 5678 case SCHED_IDLE:
1da177e4
LT
5679 ret = 0;
5680 break;
5681 }
5682 return ret;
5683}
5684
5685/**
5686 * sys_sched_get_priority_min - return minimum RT priority.
5687 * @policy: scheduling class.
5688 *
5689 * this syscall returns the minimum rt_priority that can be used
5690 * by a given scheduling class.
5691 */
5add95d4 5692SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
1da177e4
LT
5693{
5694 int ret = -EINVAL;
5695
5696 switch (policy) {
5697 case SCHED_FIFO:
5698 case SCHED_RR:
5699 ret = 1;
5700 break;
5701 case SCHED_NORMAL:
b0a9499c 5702 case SCHED_BATCH:
dd41f596 5703 case SCHED_IDLE:
1da177e4
LT
5704 ret = 0;
5705 }
5706 return ret;
5707}
5708
5709/**
5710 * sys_sched_rr_get_interval - return the default timeslice of a process.
5711 * @pid: pid of the process.
5712 * @interval: userspace pointer to the timeslice value.
5713 *
5714 * this syscall writes the default timeslice value of a given process
5715 * into the user-space timespec buffer. A value of '0' means infinity.
5716 */
17da2bd9 5717SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
754fe8d2 5718 struct timespec __user *, interval)
1da177e4 5719{
36c8b586 5720 struct task_struct *p;
a4ec24b4 5721 unsigned int time_slice;
dba091b9
TG
5722 unsigned long flags;
5723 struct rq *rq;
3a5c359a 5724 int retval;
1da177e4 5725 struct timespec t;
1da177e4
LT
5726
5727 if (pid < 0)
3a5c359a 5728 return -EINVAL;
1da177e4
LT
5729
5730 retval = -ESRCH;
1a551ae7 5731 rcu_read_lock();
1da177e4
LT
5732 p = find_process_by_pid(pid);
5733 if (!p)
5734 goto out_unlock;
5735
5736 retval = security_task_getscheduler(p);
5737 if (retval)
5738 goto out_unlock;
5739
dba091b9
TG
5740 rq = task_rq_lock(p, &flags);
5741 time_slice = p->sched_class->get_rr_interval(rq, p);
0122ec5b 5742 task_rq_unlock(rq, p, &flags);
a4ec24b4 5743
1a551ae7 5744 rcu_read_unlock();
a4ec24b4 5745 jiffies_to_timespec(time_slice, &t);
1da177e4 5746 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
1da177e4 5747 return retval;
3a5c359a 5748
1da177e4 5749out_unlock:
1a551ae7 5750 rcu_read_unlock();
1da177e4
LT
5751 return retval;
5752}
5753
7c731e0a 5754static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
36c8b586 5755
82a1fcb9 5756void sched_show_task(struct task_struct *p)
1da177e4 5757{
1da177e4 5758 unsigned long free = 0;
36c8b586 5759 unsigned state;
1da177e4 5760
1da177e4 5761 state = p->state ? __ffs(p->state) + 1 : 0;
28d0686c 5762 printk(KERN_INFO "%-15.15s %c", p->comm,
2ed6e34f 5763 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
4bd77321 5764#if BITS_PER_LONG == 32
1da177e4 5765 if (state == TASK_RUNNING)
3df0fc5b 5766 printk(KERN_CONT " running ");
1da177e4 5767 else
3df0fc5b 5768 printk(KERN_CONT " %08lx ", thread_saved_pc(p));
1da177e4
LT
5769#else
5770 if (state == TASK_RUNNING)
3df0fc5b 5771 printk(KERN_CONT " running task ");
1da177e4 5772 else
3df0fc5b 5773 printk(KERN_CONT " %016lx ", thread_saved_pc(p));
1da177e4
LT
5774#endif
5775#ifdef CONFIG_DEBUG_STACK_USAGE
7c9f8861 5776 free = stack_not_used(p);
1da177e4 5777#endif
3df0fc5b 5778 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
aa47b7e0
DR
5779 task_pid_nr(p), task_pid_nr(p->real_parent),
5780 (unsigned long)task_thread_info(p)->flags);
1da177e4 5781
5fb5e6de 5782 show_stack(p, NULL);
1da177e4
LT
5783}
5784
e59e2ae2 5785void show_state_filter(unsigned long state_filter)
1da177e4 5786{
36c8b586 5787 struct task_struct *g, *p;
1da177e4 5788
4bd77321 5789#if BITS_PER_LONG == 32
3df0fc5b
PZ
5790 printk(KERN_INFO
5791 " task PC stack pid father\n");
1da177e4 5792#else
3df0fc5b
PZ
5793 printk(KERN_INFO
5794 " task PC stack pid father\n");
1da177e4
LT
5795#endif
5796 read_lock(&tasklist_lock);
5797 do_each_thread(g, p) {
5798 /*
5799 * reset the NMI-timeout, listing all files on a slow
25985edc 5800 * console might take a lot of time:
1da177e4
LT
5801 */
5802 touch_nmi_watchdog();
39bc89fd 5803 if (!state_filter || (p->state & state_filter))
82a1fcb9 5804 sched_show_task(p);
1da177e4
LT
5805 } while_each_thread(g, p);
5806
04c9167f
JF
5807 touch_all_softlockup_watchdogs();
5808
dd41f596
IM
5809#ifdef CONFIG_SCHED_DEBUG
5810 sysrq_sched_debug_show();
5811#endif
1da177e4 5812 read_unlock(&tasklist_lock);
e59e2ae2
IM
5813 /*
5814 * Only show locks if all tasks are dumped:
5815 */
93335a21 5816 if (!state_filter)
e59e2ae2 5817 debug_show_all_locks();
1da177e4
LT
5818}
5819
1df21055
IM
5820void __cpuinit init_idle_bootup_task(struct task_struct *idle)
5821{
dd41f596 5822 idle->sched_class = &idle_sched_class;
1df21055
IM
5823}
5824
f340c0d1
IM
5825/**
5826 * init_idle - set up an idle thread for a given CPU
5827 * @idle: task in question
5828 * @cpu: cpu the idle task belongs to
5829 *
5830 * NOTE: this function does not set the idle thread's NEED_RESCHED
5831 * flag, to make booting more robust.
5832 */
5c1e1767 5833void __cpuinit init_idle(struct task_struct *idle, int cpu)
1da177e4 5834{
70b97a7f 5835 struct rq *rq = cpu_rq(cpu);
1da177e4
LT
5836 unsigned long flags;
5837
05fa785c 5838 raw_spin_lock_irqsave(&rq->lock, flags);
5cbd54ef 5839
dd41f596 5840 __sched_fork(idle);
06b83b5f 5841 idle->state = TASK_RUNNING;
dd41f596
IM
5842 idle->se.exec_start = sched_clock();
5843
96f874e2 5844 cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
6506cf6c
PZ
5845 /*
5846 * We're having a chicken and egg problem, even though we are
5847 * holding rq->lock, the cpu isn't yet set to this cpu so the
5848 * lockdep check in task_group() will fail.
5849 *
5850 * Similar case to sched_fork(). / Alternatively we could
5851 * use task_rq_lock() here and obtain the other rq->lock.
5852 *
5853 * Silence PROVE_RCU
5854 */
5855 rcu_read_lock();
dd41f596 5856 __set_task_cpu(idle, cpu);
6506cf6c 5857 rcu_read_unlock();
1da177e4 5858
1da177e4 5859 rq->curr = rq->idle = idle;
3ca7a440
PZ
5860#if defined(CONFIG_SMP)
5861 idle->on_cpu = 1;
4866cde0 5862#endif
05fa785c 5863 raw_spin_unlock_irqrestore(&rq->lock, flags);
1da177e4
LT
5864
5865 /* Set the preempt count _outside_ the spinlocks! */
a1261f54 5866 task_thread_info(idle)->preempt_count = 0;
625f2a37 5867
dd41f596
IM
5868 /*
5869 * The idle tasks have their own, simple scheduling class:
5870 */
5871 idle->sched_class = &idle_sched_class;
868baf07 5872 ftrace_graph_init_idle_task(idle, cpu);
1da177e4
LT
5873}
5874
5875/*
5876 * In a system that switches off the HZ timer nohz_cpu_mask
5877 * indicates which cpus entered this state. This is used
5878 * in the rcu update to wait only for active cpus. For system
5879 * which do not switch off the HZ timer nohz_cpu_mask should
6a7b3dc3 5880 * always be CPU_BITS_NONE.
1da177e4 5881 */
6a7b3dc3 5882cpumask_var_t nohz_cpu_mask;
1da177e4 5883
19978ca6
IM
5884/*
5885 * Increase the granularity value when there are more CPUs,
5886 * because with more CPUs the 'effective latency' as visible
5887 * to users decreases. But the relationship is not linear,
5888 * so pick a second-best guess by going with the log2 of the
5889 * number of CPUs.
5890 *
5891 * This idea comes from the SD scheduler of Con Kolivas:
5892 */
acb4a848 5893static int get_update_sysctl_factor(void)
19978ca6 5894{
4ca3ef71 5895 unsigned int cpus = min_t(int, num_online_cpus(), 8);
1983a922
CE
5896 unsigned int factor;
5897
5898 switch (sysctl_sched_tunable_scaling) {
5899 case SCHED_TUNABLESCALING_NONE:
5900 factor = 1;
5901 break;
5902 case SCHED_TUNABLESCALING_LINEAR:
5903 factor = cpus;
5904 break;
5905 case SCHED_TUNABLESCALING_LOG:
5906 default:
5907 factor = 1 + ilog2(cpus);
5908 break;
5909 }
19978ca6 5910
acb4a848
CE
5911 return factor;
5912}
19978ca6 5913
acb4a848
CE
5914static void update_sysctl(void)
5915{
5916 unsigned int factor = get_update_sysctl_factor();
19978ca6 5917
0bcdcf28
CE
5918#define SET_SYSCTL(name) \
5919 (sysctl_##name = (factor) * normalized_sysctl_##name)
5920 SET_SYSCTL(sched_min_granularity);
5921 SET_SYSCTL(sched_latency);
5922 SET_SYSCTL(sched_wakeup_granularity);
0bcdcf28
CE
5923#undef SET_SYSCTL
5924}
55cd5340 5925
0bcdcf28
CE
5926static inline void sched_init_granularity(void)
5927{
5928 update_sysctl();
19978ca6
IM
5929}
5930
1da177e4
LT
5931#ifdef CONFIG_SMP
5932/*
5933 * This is how migration works:
5934 *
969c7921
TH
5935 * 1) we invoke migration_cpu_stop() on the target CPU using
5936 * stop_one_cpu().
5937 * 2) stopper starts to run (implicitly forcing the migrated thread
5938 * off the CPU)
5939 * 3) it checks whether the migrated task is still in the wrong runqueue.
5940 * 4) if it's in the wrong runqueue then the migration thread removes
1da177e4 5941 * it and puts it into the right queue.
969c7921
TH
5942 * 5) stopper completes and stop_one_cpu() returns and the migration
5943 * is done.
1da177e4
LT
5944 */
5945
5946/*
5947 * Change a given task's CPU affinity. Migrate the thread to a
5948 * proper CPU and schedule it away if the CPU it's executing on
5949 * is removed from the allowed bitmask.
5950 *
5951 * NOTE: the caller must have a valid reference to the task, the
41a2d6cf 5952 * task must not exit() & deallocate itself prematurely. The
1da177e4
LT
5953 * call is not atomic; no spinlocks may be held.
5954 */
96f874e2 5955int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1da177e4
LT
5956{
5957 unsigned long flags;
70b97a7f 5958 struct rq *rq;
969c7921 5959 unsigned int dest_cpu;
48f24c4d 5960 int ret = 0;
1da177e4
LT
5961
5962 rq = task_rq_lock(p, &flags);
e2912009 5963
db44fc01
YZ
5964 if (cpumask_equal(&p->cpus_allowed, new_mask))
5965 goto out;
5966
6ad4c188 5967 if (!cpumask_intersects(new_mask, cpu_active_mask)) {
1da177e4
LT
5968 ret = -EINVAL;
5969 goto out;
5970 }
5971
db44fc01 5972 if (unlikely((p->flags & PF_THREAD_BOUND) && p != current)) {
9985b0ba
DR
5973 ret = -EINVAL;
5974 goto out;
5975 }
5976
73fe6aae 5977 if (p->sched_class->set_cpus_allowed)
cd8ba7cd 5978 p->sched_class->set_cpus_allowed(p, new_mask);
73fe6aae 5979 else {
96f874e2
RR
5980 cpumask_copy(&p->cpus_allowed, new_mask);
5981 p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
73fe6aae
GH
5982 }
5983
1da177e4 5984 /* Can the task run on the task's current CPU? If so, we're done */
96f874e2 5985 if (cpumask_test_cpu(task_cpu(p), new_mask))
1da177e4
LT
5986 goto out;
5987
969c7921 5988 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
bd8e7dde 5989 if (p->on_rq) {
969c7921 5990 struct migration_arg arg = { p, dest_cpu };
1da177e4 5991 /* Need help from migration thread: drop lock and wait. */
0122ec5b 5992 task_rq_unlock(rq, p, &flags);
969c7921 5993 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
1da177e4
LT
5994 tlb_migrate_finish(p->mm);
5995 return 0;
5996 }
5997out:
0122ec5b 5998 task_rq_unlock(rq, p, &flags);
48f24c4d 5999
1da177e4
LT
6000 return ret;
6001}
cd8ba7cd 6002EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
1da177e4
LT
6003
6004/*
41a2d6cf 6005 * Move (not current) task off this cpu, onto dest cpu. We're doing
1da177e4
LT
6006 * this because either it can't run here any more (set_cpus_allowed()
6007 * away from this CPU, or CPU going down), or because we're
6008 * attempting to rebalance this task on exec (sched_exec).
6009 *
6010 * So we race with normal scheduler movements, but that's OK, as long
6011 * as the task is no longer on this CPU.
efc30814
KK
6012 *
6013 * Returns non-zero if task was successfully migrated.
1da177e4 6014 */
efc30814 6015static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
1da177e4 6016{
70b97a7f 6017 struct rq *rq_dest, *rq_src;
e2912009 6018 int ret = 0;
1da177e4 6019
e761b772 6020 if (unlikely(!cpu_active(dest_cpu)))
efc30814 6021 return ret;
1da177e4
LT
6022
6023 rq_src = cpu_rq(src_cpu);
6024 rq_dest = cpu_rq(dest_cpu);
6025
0122ec5b 6026 raw_spin_lock(&p->pi_lock);
1da177e4
LT
6027 double_rq_lock(rq_src, rq_dest);
6028 /* Already moved. */
6029 if (task_cpu(p) != src_cpu)
b1e38734 6030 goto done;
1da177e4 6031 /* Affinity changed (again). */
96f874e2 6032 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
b1e38734 6033 goto fail;
1da177e4 6034
e2912009
PZ
6035 /*
6036 * If we're not on a rq, the next wake-up will ensure we're
6037 * placed properly.
6038 */
fd2f4419 6039 if (p->on_rq) {
2e1cb74a 6040 deactivate_task(rq_src, p, 0);
e2912009 6041 set_task_cpu(p, dest_cpu);
dd41f596 6042 activate_task(rq_dest, p, 0);
15afe09b 6043 check_preempt_curr(rq_dest, p, 0);
1da177e4 6044 }
b1e38734 6045done:
efc30814 6046 ret = 1;
b1e38734 6047fail:
1da177e4 6048 double_rq_unlock(rq_src, rq_dest);
0122ec5b 6049 raw_spin_unlock(&p->pi_lock);
efc30814 6050 return ret;
1da177e4
LT
6051}
6052
6053/*
969c7921
TH
6054 * migration_cpu_stop - this will be executed by a highprio stopper thread
6055 * and performs thread migration by bumping thread off CPU then
6056 * 'pushing' onto another runqueue.
1da177e4 6057 */
969c7921 6058static int migration_cpu_stop(void *data)
1da177e4 6059{
969c7921 6060 struct migration_arg *arg = data;
f7b4cddc 6061
969c7921
TH
6062 /*
6063 * The original target cpu might have gone down and we might
6064 * be on another cpu but it doesn't matter.
6065 */
f7b4cddc 6066 local_irq_disable();
969c7921 6067 __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
f7b4cddc 6068 local_irq_enable();
1da177e4 6069 return 0;
f7b4cddc
ON
6070}
6071
1da177e4 6072#ifdef CONFIG_HOTPLUG_CPU
48c5ccae 6073
054b9108 6074/*
48c5ccae
PZ
6075 * Ensures that the idle task is using init_mm right before its cpu goes
6076 * offline.
054b9108 6077 */
48c5ccae 6078void idle_task_exit(void)
1da177e4 6079{
48c5ccae 6080 struct mm_struct *mm = current->active_mm;
e76bd8d9 6081
48c5ccae 6082 BUG_ON(cpu_online(smp_processor_id()));
e76bd8d9 6083
48c5ccae
PZ
6084 if (mm != &init_mm)
6085 switch_mm(mm, &init_mm, current);
6086 mmdrop(mm);
1da177e4
LT
6087}
6088
6089/*
6090 * While a dead CPU has no uninterruptible tasks queued at this point,
6091 * it might still have a nonzero ->nr_uninterruptible counter, because
6092 * for performance reasons the counter is not stricly tracking tasks to
6093 * their home CPUs. So we just add the counter to another CPU's counter,
6094 * to keep the global sum constant after CPU-down:
6095 */
70b97a7f 6096static void migrate_nr_uninterruptible(struct rq *rq_src)
1da177e4 6097{
6ad4c188 6098 struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
1da177e4 6099
1da177e4
LT
6100 rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
6101 rq_src->nr_uninterruptible = 0;
1da177e4
LT
6102}
6103
dd41f596 6104/*
48c5ccae 6105 * remove the tasks which were accounted by rq from calc_load_tasks.
1da177e4 6106 */
48c5ccae 6107static void calc_global_load_remove(struct rq *rq)
1da177e4 6108{
48c5ccae
PZ
6109 atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
6110 rq->calc_load_active = 0;
1da177e4
LT
6111}
6112
48f24c4d 6113/*
48c5ccae
PZ
6114 * Migrate all tasks from the rq, sleeping tasks will be migrated by
6115 * try_to_wake_up()->select_task_rq().
6116 *
6117 * Called with rq->lock held even though we'er in stop_machine() and
6118 * there's no concurrency possible, we hold the required locks anyway
6119 * because of lock validation efforts.
1da177e4 6120 */
48c5ccae 6121static void migrate_tasks(unsigned int dead_cpu)
1da177e4 6122{
70b97a7f 6123 struct rq *rq = cpu_rq(dead_cpu);
48c5ccae
PZ
6124 struct task_struct *next, *stop = rq->stop;
6125 int dest_cpu;
1da177e4
LT
6126
6127 /*
48c5ccae
PZ
6128 * Fudge the rq selection such that the below task selection loop
6129 * doesn't get stuck on the currently eligible stop task.
6130 *
6131 * We're currently inside stop_machine() and the rq is either stuck
6132 * in the stop_machine_cpu_stop() loop, or we're executing this code,
6133 * either way we should never end up calling schedule() until we're
6134 * done here.
1da177e4 6135 */
48c5ccae 6136 rq->stop = NULL;
48f24c4d 6137
dd41f596 6138 for ( ; ; ) {
48c5ccae
PZ
6139 /*
6140 * There's this thread running, bail when that's the only
6141 * remaining thread.
6142 */
6143 if (rq->nr_running == 1)
dd41f596 6144 break;
48c5ccae 6145
b67802ea 6146 next = pick_next_task(rq);
48c5ccae 6147 BUG_ON(!next);
79c53799 6148 next->sched_class->put_prev_task(rq, next);
e692ab53 6149
48c5ccae
PZ
6150 /* Find suitable destination for @next, with force if needed. */
6151 dest_cpu = select_fallback_rq(dead_cpu, next);
6152 raw_spin_unlock(&rq->lock);
6153
6154 __migrate_task(next, dead_cpu, dest_cpu);
6155
6156 raw_spin_lock(&rq->lock);
1da177e4 6157 }
dce48a84 6158
48c5ccae 6159 rq->stop = stop;
dce48a84 6160}
48c5ccae 6161
1da177e4
LT
6162#endif /* CONFIG_HOTPLUG_CPU */
6163
e692ab53
NP
6164#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
6165
6166static struct ctl_table sd_ctl_dir[] = {
e0361851
AD
6167 {
6168 .procname = "sched_domain",
c57baf1e 6169 .mode = 0555,
e0361851 6170 },
56992309 6171 {}
e692ab53
NP
6172};
6173
6174static struct ctl_table sd_ctl_root[] = {
e0361851
AD
6175 {
6176 .procname = "kernel",
c57baf1e 6177 .mode = 0555,
e0361851
AD
6178 .child = sd_ctl_dir,
6179 },
56992309 6180 {}
e692ab53
NP
6181};
6182
6183static struct ctl_table *sd_alloc_ctl_entry(int n)
6184{
6185 struct ctl_table *entry =
5cf9f062 6186 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
e692ab53 6187
e692ab53
NP
6188 return entry;
6189}
6190
6382bc90
MM
6191static void sd_free_ctl_entry(struct ctl_table **tablep)
6192{
cd790076 6193 struct ctl_table *entry;
6382bc90 6194
cd790076
MM
6195 /*
6196 * In the intermediate directories, both the child directory and
6197 * procname are dynamically allocated and could fail but the mode
41a2d6cf 6198 * will always be set. In the lowest directory the names are
cd790076
MM
6199 * static strings and all have proc handlers.
6200 */
6201 for (entry = *tablep; entry->mode; entry++) {
6382bc90
MM
6202 if (entry->child)
6203 sd_free_ctl_entry(&entry->child);
cd790076
MM
6204 if (entry->proc_handler == NULL)
6205 kfree(entry->procname);
6206 }
6382bc90
MM
6207
6208 kfree(*tablep);
6209 *tablep = NULL;
6210}
6211
e692ab53 6212static void
e0361851 6213set_table_entry(struct ctl_table *entry,
e692ab53
NP
6214 const char *procname, void *data, int maxlen,
6215 mode_t mode, proc_handler *proc_handler)
6216{
e692ab53
NP
6217 entry->procname = procname;
6218 entry->data = data;
6219 entry->maxlen = maxlen;
6220 entry->mode = mode;
6221 entry->proc_handler = proc_handler;
6222}
6223
6224static struct ctl_table *
6225sd_alloc_ctl_domain_table(struct sched_domain *sd)
6226{
a5d8c348 6227 struct ctl_table *table = sd_alloc_ctl_entry(13);
e692ab53 6228
ad1cdc1d
MM
6229 if (table == NULL)
6230 return NULL;
6231
e0361851 6232 set_table_entry(&table[0], "min_interval", &sd->min_interval,
e692ab53 6233 sizeof(long), 0644, proc_doulongvec_minmax);
e0361851 6234 set_table_entry(&table[1], "max_interval", &sd->max_interval,
e692ab53 6235 sizeof(long), 0644, proc_doulongvec_minmax);
e0361851 6236 set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
e692ab53 6237 sizeof(int), 0644, proc_dointvec_minmax);
e0361851 6238 set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
e692ab53 6239 sizeof(int), 0644, proc_dointvec_minmax);
e0361851 6240 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
e692ab53 6241 sizeof(int), 0644, proc_dointvec_minmax);
e0361851 6242 set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
e692ab53 6243 sizeof(int), 0644, proc_dointvec_minmax);
e0361851 6244 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
e692ab53 6245 sizeof(int), 0644, proc_dointvec_minmax);
e0361851 6246 set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
e692ab53 6247 sizeof(int), 0644, proc_dointvec_minmax);
e0361851 6248 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
e692ab53 6249 sizeof(int), 0644, proc_dointvec_minmax);
ace8b3d6 6250 set_table_entry(&table[9], "cache_nice_tries",
e692ab53
NP
6251 &sd->cache_nice_tries,
6252 sizeof(int), 0644, proc_dointvec_minmax);
ace8b3d6 6253 set_table_entry(&table[10], "flags", &sd->flags,
e692ab53 6254 sizeof(int), 0644, proc_dointvec_minmax);
a5d8c348
IM
6255 set_table_entry(&table[11], "name", sd->name,
6256 CORENAME_MAX_SIZE, 0444, proc_dostring);
6257 /* &table[12] is terminator */
e692ab53
NP
6258
6259 return table;
6260}
6261
9a4e7159 6262static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
e692ab53
NP
6263{
6264 struct ctl_table *entry, *table;
6265 struct sched_domain *sd;
6266 int domain_num = 0, i;
6267 char buf[32];
6268
6269 for_each_domain(cpu, sd)
6270 domain_num++;
6271 entry = table = sd_alloc_ctl_entry(domain_num + 1);
ad1cdc1d
MM
6272 if (table == NULL)
6273 return NULL;
e692ab53
NP
6274
6275 i = 0;
6276 for_each_domain(cpu, sd) {
6277 snprintf(buf, 32, "domain%d", i);
e692ab53 6278 entry->procname = kstrdup(buf, GFP_KERNEL);
c57baf1e 6279 entry->mode = 0555;
e692ab53
NP
6280 entry->child = sd_alloc_ctl_domain_table(sd);
6281 entry++;
6282 i++;
6283 }
6284 return table;
6285}
6286
6287static struct ctl_table_header *sd_sysctl_header;
6382bc90 6288static void register_sched_domain_sysctl(void)
e692ab53 6289{
6ad4c188 6290 int i, cpu_num = num_possible_cpus();
e692ab53
NP
6291 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
6292 char buf[32];
6293
7378547f
MM
6294 WARN_ON(sd_ctl_dir[0].child);
6295 sd_ctl_dir[0].child = entry;
6296
ad1cdc1d
MM
6297 if (entry == NULL)
6298 return;
6299
6ad4c188 6300 for_each_possible_cpu(i) {
e692ab53 6301 snprintf(buf, 32, "cpu%d", i);
e692ab53 6302 entry->procname = kstrdup(buf, GFP_KERNEL);
c57baf1e 6303 entry->mode = 0555;
e692ab53 6304 entry->child = sd_alloc_ctl_cpu_table(i);
97b6ea7b 6305 entry++;
e692ab53 6306 }
7378547f
MM
6307
6308 WARN_ON(sd_sysctl_header);
e692ab53
NP
6309 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
6310}
6382bc90 6311
7378547f 6312/* may be called multiple times per register */
6382bc90
MM
6313static void unregister_sched_domain_sysctl(void)
6314{
7378547f
MM
6315 if (sd_sysctl_header)
6316 unregister_sysctl_table(sd_sysctl_header);
6382bc90 6317 sd_sysctl_header = NULL;
7378547f
MM
6318 if (sd_ctl_dir[0].child)
6319 sd_free_ctl_entry(&sd_ctl_dir[0].child);
6382bc90 6320}
e692ab53 6321#else
6382bc90
MM
6322static void register_sched_domain_sysctl(void)
6323{
6324}
6325static void unregister_sched_domain_sysctl(void)
e692ab53
NP
6326{
6327}
6328#endif
6329
1f11eb6a
GH
6330static void set_rq_online(struct rq *rq)
6331{
6332 if (!rq->online) {
6333 const struct sched_class *class;
6334
c6c4927b 6335 cpumask_set_cpu(rq->cpu, rq->rd->online);
1f11eb6a
GH
6336 rq->online = 1;
6337
6338 for_each_class(class) {
6339 if (class->rq_online)
6340 class->rq_online(rq);
6341 }
6342 }
6343}
6344
6345static void set_rq_offline(struct rq *rq)
6346{
6347 if (rq->online) {
6348 const struct sched_class *class;
6349
6350 for_each_class(class) {
6351 if (class->rq_offline)
6352 class->rq_offline(rq);
6353 }
6354
c6c4927b 6355 cpumask_clear_cpu(rq->cpu, rq->rd->online);
1f11eb6a
GH
6356 rq->online = 0;
6357 }
6358}
6359
1da177e4
LT
6360/*
6361 * migration_call - callback that gets triggered when a CPU is added.
6362 * Here we can start up the necessary migration thread for the new CPU.
6363 */
48f24c4d
IM
6364static int __cpuinit
6365migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
1da177e4 6366{
48f24c4d 6367 int cpu = (long)hcpu;
1da177e4 6368 unsigned long flags;
969c7921 6369 struct rq *rq = cpu_rq(cpu);
1da177e4 6370
48c5ccae 6371 switch (action & ~CPU_TASKS_FROZEN) {
5be9361c 6372
1da177e4 6373 case CPU_UP_PREPARE:
a468d389 6374 rq->calc_load_update = calc_load_update;
1da177e4 6375 break;
48f24c4d 6376
1da177e4 6377 case CPU_ONLINE:
1f94ef59 6378 /* Update our root-domain */
05fa785c 6379 raw_spin_lock_irqsave(&rq->lock, flags);
1f94ef59 6380 if (rq->rd) {
c6c4927b 6381 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
1f11eb6a
GH
6382
6383 set_rq_online(rq);
1f94ef59 6384 }
05fa785c 6385 raw_spin_unlock_irqrestore(&rq->lock, flags);
1da177e4 6386 break;
48f24c4d 6387
1da177e4 6388#ifdef CONFIG_HOTPLUG_CPU
08f503b0 6389 case CPU_DYING:
317f3941 6390 sched_ttwu_pending();
57d885fe 6391 /* Update our root-domain */
05fa785c 6392 raw_spin_lock_irqsave(&rq->lock, flags);
57d885fe 6393 if (rq->rd) {
c6c4927b 6394 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
1f11eb6a 6395 set_rq_offline(rq);
57d885fe 6396 }
48c5ccae
PZ
6397 migrate_tasks(cpu);
6398 BUG_ON(rq->nr_running != 1); /* the migration thread */
05fa785c 6399 raw_spin_unlock_irqrestore(&rq->lock, flags);
48c5ccae
PZ
6400
6401 migrate_nr_uninterruptible(rq);
6402 calc_global_load_remove(rq);
57d885fe 6403 break;
1da177e4
LT
6404#endif
6405 }
49c022e6
PZ
6406
6407 update_max_interval();
6408
1da177e4
LT
6409 return NOTIFY_OK;
6410}
6411
f38b0820
PM
6412/*
6413 * Register at high priority so that task migration (migrate_all_tasks)
6414 * happens before everything else. This has to be lower priority than
cdd6c482 6415 * the notifier in the perf_event subsystem, though.
1da177e4 6416 */
26c2143b 6417static struct notifier_block __cpuinitdata migration_notifier = {
1da177e4 6418 .notifier_call = migration_call,
50a323b7 6419 .priority = CPU_PRI_MIGRATION,
1da177e4
LT
6420};
6421
3a101d05
TH
6422static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
6423 unsigned long action, void *hcpu)
6424{
6425 switch (action & ~CPU_TASKS_FROZEN) {
6426 case CPU_ONLINE:
6427 case CPU_DOWN_FAILED:
6428 set_cpu_active((long)hcpu, true);
6429 return NOTIFY_OK;
6430 default:
6431 return NOTIFY_DONE;
6432 }
6433}
6434
6435static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb,
6436 unsigned long action, void *hcpu)
6437{
6438 switch (action & ~CPU_TASKS_FROZEN) {
6439 case CPU_DOWN_PREPARE:
6440 set_cpu_active((long)hcpu, false);
6441 return NOTIFY_OK;
6442 default:
6443 return NOTIFY_DONE;
6444 }
6445}
6446
7babe8db 6447static int __init migration_init(void)
1da177e4
LT
6448{
6449 void *cpu = (void *)(long)smp_processor_id();
07dccf33 6450 int err;
48f24c4d 6451
3a101d05 6452 /* Initialize migration for the boot CPU */
07dccf33
AM
6453 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
6454 BUG_ON(err == NOTIFY_BAD);
1da177e4
LT
6455 migration_call(&migration_notifier, CPU_ONLINE, cpu);
6456 register_cpu_notifier(&migration_notifier);
7babe8db 6457
3a101d05
TH
6458 /* Register cpu active notifiers */
6459 cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
6460 cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
6461
a004cd42 6462 return 0;
1da177e4 6463}
7babe8db 6464early_initcall(migration_init);
1da177e4
LT
6465#endif
6466
6467#ifdef CONFIG_SMP
476f3534 6468
4cb98839
PZ
6469static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
6470
3e9830dc 6471#ifdef CONFIG_SCHED_DEBUG
4dcf6aff 6472
f6630114
MT
6473static __read_mostly int sched_domain_debug_enabled;
6474
6475static int __init sched_domain_debug_setup(char *str)
6476{
6477 sched_domain_debug_enabled = 1;
6478
6479 return 0;
6480}
6481early_param("sched_debug", sched_domain_debug_setup);
6482
7c16ec58 6483static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
96f874e2 6484 struct cpumask *groupmask)
1da177e4 6485{
4dcf6aff 6486 struct sched_group *group = sd->groups;
434d53b0 6487 char str[256];
1da177e4 6488
968ea6d8 6489 cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
96f874e2 6490 cpumask_clear(groupmask);
4dcf6aff
IM
6491
6492 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
6493
6494 if (!(sd->flags & SD_LOAD_BALANCE)) {
3df0fc5b 6495 printk("does not load-balance\n");
4dcf6aff 6496 if (sd->parent)
3df0fc5b
PZ
6497 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
6498 " has parent");
4dcf6aff 6499 return -1;
41c7ce9a
NP
6500 }
6501
3df0fc5b 6502 printk(KERN_CONT "span %s level %s\n", str, sd->name);
4dcf6aff 6503
758b2cdc 6504 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
3df0fc5b
PZ
6505 printk(KERN_ERR "ERROR: domain->span does not contain "
6506 "CPU%d\n", cpu);
4dcf6aff 6507 }
758b2cdc 6508 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
3df0fc5b
PZ
6509 printk(KERN_ERR "ERROR: domain->groups does not contain"
6510 " CPU%d\n", cpu);
4dcf6aff 6511 }
1da177e4 6512
4dcf6aff 6513 printk(KERN_DEBUG "%*s groups:", level + 1, "");
1da177e4 6514 do {
4dcf6aff 6515 if (!group) {
3df0fc5b
PZ
6516 printk("\n");
6517 printk(KERN_ERR "ERROR: group is NULL\n");
1da177e4
LT
6518 break;
6519 }
6520
18a3885f 6521 if (!group->cpu_power) {
3df0fc5b
PZ
6522 printk(KERN_CONT "\n");
6523 printk(KERN_ERR "ERROR: domain->cpu_power not "
6524 "set\n");
4dcf6aff
IM
6525 break;
6526 }
1da177e4 6527
758b2cdc 6528 if (!cpumask_weight(sched_group_cpus(group))) {
3df0fc5b
PZ
6529 printk(KERN_CONT "\n");
6530 printk(KERN_ERR "ERROR: empty group\n");
4dcf6aff
IM
6531 break;
6532 }
1da177e4 6533
758b2cdc 6534 if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
3df0fc5b
PZ
6535 printk(KERN_CONT "\n");
6536 printk(KERN_ERR "ERROR: repeated CPUs\n");
4dcf6aff
IM
6537 break;
6538 }
1da177e4 6539
758b2cdc 6540 cpumask_or(groupmask, groupmask, sched_group_cpus(group));
1da177e4 6541
968ea6d8 6542 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
381512cf 6543
3df0fc5b 6544 printk(KERN_CONT " %s", str);
1399fa78 6545 if (group->cpu_power != SCHED_POWER_SCALE) {
3df0fc5b
PZ
6546 printk(KERN_CONT " (cpu_power = %d)",
6547 group->cpu_power);
381512cf 6548 }
1da177e4 6549
4dcf6aff
IM
6550 group = group->next;
6551 } while (group != sd->groups);
3df0fc5b 6552 printk(KERN_CONT "\n");
1da177e4 6553
758b2cdc 6554 if (!cpumask_equal(sched_domain_span(sd), groupmask))
3df0fc5b 6555 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
1da177e4 6556
758b2cdc
RR
6557 if (sd->parent &&
6558 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
3df0fc5b
PZ
6559 printk(KERN_ERR "ERROR: parent span is not a superset "
6560 "of domain->span\n");
4dcf6aff
IM
6561 return 0;
6562}
1da177e4 6563
4dcf6aff
IM
6564static void sched_domain_debug(struct sched_domain *sd, int cpu)
6565{
6566 int level = 0;
1da177e4 6567
f6630114
MT
6568 if (!sched_domain_debug_enabled)
6569 return;
6570
4dcf6aff
IM
6571 if (!sd) {
6572 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
6573 return;
6574 }
1da177e4 6575
4dcf6aff
IM
6576 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
6577
6578 for (;;) {
4cb98839 6579 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
4dcf6aff 6580 break;
1da177e4
LT
6581 level++;
6582 sd = sd->parent;
33859f7f 6583 if (!sd)
4dcf6aff
IM
6584 break;
6585 }
1da177e4 6586}
6d6bc0ad 6587#else /* !CONFIG_SCHED_DEBUG */
48f24c4d 6588# define sched_domain_debug(sd, cpu) do { } while (0)
6d6bc0ad 6589#endif /* CONFIG_SCHED_DEBUG */
1da177e4 6590
1a20ff27 6591static int sd_degenerate(struct sched_domain *sd)
245af2c7 6592{
758b2cdc 6593 if (cpumask_weight(sched_domain_span(sd)) == 1)
245af2c7
SS
6594 return 1;
6595
6596 /* Following flags need at least 2 groups */
6597 if (sd->flags & (SD_LOAD_BALANCE |
6598 SD_BALANCE_NEWIDLE |
6599 SD_BALANCE_FORK |
89c4710e
SS
6600 SD_BALANCE_EXEC |
6601 SD_SHARE_CPUPOWER |
6602 SD_SHARE_PKG_RESOURCES)) {
245af2c7
SS
6603 if (sd->groups != sd->groups->next)
6604 return 0;
6605 }
6606
6607 /* Following flags don't use groups */
c88d5910 6608 if (sd->flags & (SD_WAKE_AFFINE))
245af2c7
SS
6609 return 0;
6610
6611 return 1;
6612}
6613
48f24c4d
IM
6614static int
6615sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
245af2c7
SS
6616{
6617 unsigned long cflags = sd->flags, pflags = parent->flags;
6618
6619 if (sd_degenerate(parent))
6620 return 1;
6621
758b2cdc 6622 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
245af2c7
SS
6623 return 0;
6624
245af2c7
SS
6625 /* Flags needing groups don't count if only 1 group in parent */
6626 if (parent->groups == parent->groups->next) {
6627 pflags &= ~(SD_LOAD_BALANCE |
6628 SD_BALANCE_NEWIDLE |
6629 SD_BALANCE_FORK |
89c4710e
SS
6630 SD_BALANCE_EXEC |
6631 SD_SHARE_CPUPOWER |
6632 SD_SHARE_PKG_RESOURCES);
5436499e
KC
6633 if (nr_node_ids == 1)
6634 pflags &= ~SD_SERIALIZE;
245af2c7
SS
6635 }
6636 if (~cflags & pflags)
6637 return 0;
6638
6639 return 1;
6640}
6641
dce840a0 6642static void free_rootdomain(struct rcu_head *rcu)
c6c4927b 6643{
dce840a0 6644 struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
047106ad 6645
68e74568 6646 cpupri_cleanup(&rd->cpupri);
c6c4927b
RR
6647 free_cpumask_var(rd->rto_mask);
6648 free_cpumask_var(rd->online);
6649 free_cpumask_var(rd->span);
6650 kfree(rd);
6651}
6652
57d885fe
GH
6653static void rq_attach_root(struct rq *rq, struct root_domain *rd)
6654{
a0490fa3 6655 struct root_domain *old_rd = NULL;
57d885fe 6656 unsigned long flags;
57d885fe 6657
05fa785c 6658 raw_spin_lock_irqsave(&rq->lock, flags);
57d885fe
GH
6659
6660 if (rq->rd) {
a0490fa3 6661 old_rd = rq->rd;
57d885fe 6662
c6c4927b 6663 if (cpumask_test_cpu(rq->cpu, old_rd->online))
1f11eb6a 6664 set_rq_offline(rq);
57d885fe 6665
c6c4927b 6666 cpumask_clear_cpu(rq->cpu, old_rd->span);
dc938520 6667
a0490fa3
IM
6668 /*
6669 * If we dont want to free the old_rt yet then
6670 * set old_rd to NULL to skip the freeing later
6671 * in this function:
6672 */
6673 if (!atomic_dec_and_test(&old_rd->refcount))
6674 old_rd = NULL;
57d885fe
GH
6675 }
6676
6677 atomic_inc(&rd->refcount);
6678 rq->rd = rd;
6679
c6c4927b 6680 cpumask_set_cpu(rq->cpu, rd->span);
00aec93d 6681 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
1f11eb6a 6682 set_rq_online(rq);
57d885fe 6683
05fa785c 6684 raw_spin_unlock_irqrestore(&rq->lock, flags);
a0490fa3
IM
6685
6686 if (old_rd)
dce840a0 6687 call_rcu_sched(&old_rd->rcu, free_rootdomain);
57d885fe
GH
6688}
6689
68c38fc3 6690static int init_rootdomain(struct root_domain *rd)
57d885fe
GH
6691{
6692 memset(rd, 0, sizeof(*rd));
6693
68c38fc3 6694 if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
0c910d28 6695 goto out;
68c38fc3 6696 if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
c6c4927b 6697 goto free_span;
68c38fc3 6698 if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
c6c4927b 6699 goto free_online;
6e0534f2 6700
68c38fc3 6701 if (cpupri_init(&rd->cpupri) != 0)
68e74568 6702 goto free_rto_mask;
c6c4927b 6703 return 0;
6e0534f2 6704
68e74568
RR
6705free_rto_mask:
6706 free_cpumask_var(rd->rto_mask);
c6c4927b
RR
6707free_online:
6708 free_cpumask_var(rd->online);
6709free_span:
6710 free_cpumask_var(rd->span);
0c910d28 6711out:
c6c4927b 6712 return -ENOMEM;
57d885fe
GH
6713}
6714
6715static void init_defrootdomain(void)
6716{
68c38fc3 6717 init_rootdomain(&def_root_domain);
c6c4927b 6718
57d885fe
GH
6719 atomic_set(&def_root_domain.refcount, 1);
6720}
6721
dc938520 6722static struct root_domain *alloc_rootdomain(void)
57d885fe
GH
6723{
6724 struct root_domain *rd;
6725
6726 rd = kmalloc(sizeof(*rd), GFP_KERNEL);
6727 if (!rd)
6728 return NULL;
6729
68c38fc3 6730 if (init_rootdomain(rd) != 0) {
c6c4927b
RR
6731 kfree(rd);
6732 return NULL;
6733 }
57d885fe
GH
6734
6735 return rd;
6736}
6737
dce840a0
PZ
6738static void free_sched_domain(struct rcu_head *rcu)
6739{
6740 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
6741 if (atomic_dec_and_test(&sd->groups->ref))
6742 kfree(sd->groups);
6743 kfree(sd);
6744}
6745
6746static void destroy_sched_domain(struct sched_domain *sd, int cpu)
6747{
6748 call_rcu(&sd->rcu, free_sched_domain);
6749}
6750
6751static void destroy_sched_domains(struct sched_domain *sd, int cpu)
6752{
6753 for (; sd; sd = sd->parent)
6754 destroy_sched_domain(sd, cpu);
6755}
6756
1da177e4 6757/*
0eab9146 6758 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
1da177e4
LT
6759 * hold the hotplug lock.
6760 */
0eab9146
IM
6761static void
6762cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
1da177e4 6763{
70b97a7f 6764 struct rq *rq = cpu_rq(cpu);
245af2c7
SS
6765 struct sched_domain *tmp;
6766
6767 /* Remove the sched domains which do not contribute to scheduling. */
f29c9b1c 6768 for (tmp = sd; tmp; ) {
245af2c7
SS
6769 struct sched_domain *parent = tmp->parent;
6770 if (!parent)
6771 break;
f29c9b1c 6772
1a848870 6773 if (sd_parent_degenerate(tmp, parent)) {
245af2c7 6774 tmp->parent = parent->parent;
1a848870
SS
6775 if (parent->parent)
6776 parent->parent->child = tmp;
dce840a0 6777 destroy_sched_domain(parent, cpu);
f29c9b1c
LZ
6778 } else
6779 tmp = tmp->parent;
245af2c7
SS
6780 }
6781
1a848870 6782 if (sd && sd_degenerate(sd)) {
dce840a0 6783 tmp = sd;
245af2c7 6784 sd = sd->parent;
dce840a0 6785 destroy_sched_domain(tmp, cpu);
1a848870
SS
6786 if (sd)
6787 sd->child = NULL;
6788 }
1da177e4 6789
4cb98839 6790 sched_domain_debug(sd, cpu);
1da177e4 6791
57d885fe 6792 rq_attach_root(rq, rd);
dce840a0 6793 tmp = rq->sd;
674311d5 6794 rcu_assign_pointer(rq->sd, sd);
dce840a0 6795 destroy_sched_domains(tmp, cpu);
1da177e4
LT
6796}
6797
6798/* cpus with isolated domains */
dcc30a35 6799static cpumask_var_t cpu_isolated_map;
1da177e4
LT
6800
6801/* Setup the mask of cpus configured for isolated domains */
6802static int __init isolated_cpu_setup(char *str)
6803{
bdddd296 6804 alloc_bootmem_cpumask_var(&cpu_isolated_map);
968ea6d8 6805 cpulist_parse(str, cpu_isolated_map);
1da177e4
LT
6806 return 1;
6807}
6808
8927f494 6809__setup("isolcpus=", isolated_cpu_setup);
1da177e4 6810
9c1cfda2 6811#define SD_NODES_PER_DOMAIN 16
1da177e4 6812
9c1cfda2 6813#ifdef CONFIG_NUMA
198e2f18 6814
9c1cfda2
JH
6815/**
6816 * find_next_best_node - find the next node to include in a sched_domain
6817 * @node: node whose sched_domain we're building
6818 * @used_nodes: nodes already in the sched_domain
6819 *
41a2d6cf 6820 * Find the next node to include in a given scheduling domain. Simply
9c1cfda2
JH
6821 * finds the closest node not already in the @used_nodes map.
6822 *
6823 * Should use nodemask_t.
6824 */
c5f59f08 6825static int find_next_best_node(int node, nodemask_t *used_nodes)
9c1cfda2 6826{
7142d17e 6827 int i, n, val, min_val, best_node = -1;
9c1cfda2
JH
6828
6829 min_val = INT_MAX;
6830
076ac2af 6831 for (i = 0; i < nr_node_ids; i++) {
9c1cfda2 6832 /* Start at @node */
076ac2af 6833 n = (node + i) % nr_node_ids;
9c1cfda2
JH
6834
6835 if (!nr_cpus_node(n))
6836 continue;
6837
6838 /* Skip already used nodes */
c5f59f08 6839 if (node_isset(n, *used_nodes))
9c1cfda2
JH
6840 continue;
6841
6842 /* Simple min distance search */
6843 val = node_distance(node, n);
6844
6845 if (val < min_val) {
6846 min_val = val;
6847 best_node = n;
6848 }
6849 }
6850
7142d17e
HD
6851 if (best_node != -1)
6852 node_set(best_node, *used_nodes);
9c1cfda2
JH
6853 return best_node;
6854}
6855
6856/**
6857 * sched_domain_node_span - get a cpumask for a node's sched_domain
6858 * @node: node whose cpumask we're constructing
73486722 6859 * @span: resulting cpumask
9c1cfda2 6860 *
41a2d6cf 6861 * Given a node, construct a good cpumask for its sched_domain to span. It
9c1cfda2
JH
6862 * should be one that prevents unnecessary balancing, but also spreads tasks
6863 * out optimally.
6864 */
96f874e2 6865static void sched_domain_node_span(int node, struct cpumask *span)
9c1cfda2 6866{
c5f59f08 6867 nodemask_t used_nodes;
48f24c4d 6868 int i;
9c1cfda2 6869
6ca09dfc 6870 cpumask_clear(span);
c5f59f08 6871 nodes_clear(used_nodes);
9c1cfda2 6872
6ca09dfc 6873 cpumask_or(span, span, cpumask_of_node(node));
c5f59f08 6874 node_set(node, used_nodes);
9c1cfda2
JH
6875
6876 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
c5f59f08 6877 int next_node = find_next_best_node(node, &used_nodes);
7142d17e
HD
6878 if (next_node < 0)
6879 break;
6ca09dfc 6880 cpumask_or(span, span, cpumask_of_node(next_node));
9c1cfda2 6881 }
9c1cfda2 6882}
d3081f52
PZ
6883
6884static const struct cpumask *cpu_node_mask(int cpu)
6885{
6886 lockdep_assert_held(&sched_domains_mutex);
6887
6888 sched_domain_node_span(cpu_to_node(cpu), sched_domains_tmpmask);
6889
6890 return sched_domains_tmpmask;
6891}
2c402dc3
PZ
6892
6893static const struct cpumask *cpu_allnodes_mask(int cpu)
6894{
6895 return cpu_possible_mask;
6896}
6d6bc0ad 6897#endif /* CONFIG_NUMA */
9c1cfda2 6898
d3081f52
PZ
6899static const struct cpumask *cpu_cpu_mask(int cpu)
6900{
6901 return cpumask_of_node(cpu_to_node(cpu));
6902}
6903
5c45bf27 6904int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
48f24c4d 6905
dce840a0
PZ
6906struct sd_data {
6907 struct sched_domain **__percpu sd;
6908 struct sched_group **__percpu sg;
6909};
6910
49a02c51 6911struct s_data {
21d42ccf 6912 struct sched_domain ** __percpu sd;
49a02c51
AH
6913 struct root_domain *rd;
6914};
6915
2109b99e 6916enum s_alloc {
2109b99e 6917 sa_rootdomain,
21d42ccf 6918 sa_sd,
dce840a0 6919 sa_sd_storage,
2109b99e
AH
6920 sa_none,
6921};
6922
54ab4ff4
PZ
6923struct sched_domain_topology_level;
6924
6925typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
eb7a74e6
PZ
6926typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
6927
6928struct sched_domain_topology_level {
2c402dc3
PZ
6929 sched_domain_init_f init;
6930 sched_domain_mask_f mask;
54ab4ff4 6931 struct sd_data data;
eb7a74e6
PZ
6932};
6933
9c1cfda2 6934/*
dce840a0 6935 * Assumes the sched_domain tree is fully constructed
9c1cfda2 6936 */
dce840a0 6937static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
1da177e4 6938{
dce840a0
PZ
6939 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
6940 struct sched_domain *child = sd->child;
1da177e4 6941
dce840a0
PZ
6942 if (child)
6943 cpu = cpumask_first(sched_domain_span(child));
1e9f28fa 6944
6711cab4 6945 if (sg)
dce840a0
PZ
6946 *sg = *per_cpu_ptr(sdd->sg, cpu);
6947
6948 return cpu;
1e9f28fa 6949}
1e9f28fa 6950
01a08546 6951/*
dce840a0
PZ
6952 * build_sched_groups takes the cpumask we wish to span, and a pointer
6953 * to a function which identifies what group(along with sched group) a CPU
6954 * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids
6955 * (due to the fact that we keep track of groups covered with a struct cpumask).
6956 *
6957 * build_sched_groups will build a circular linked list of the groups
6958 * covered by the given span, and will set each group's ->cpumask correctly,
6959 * and ->cpu_power to 0.
01a08546 6960 */
dce840a0 6961static void
f96225fd 6962build_sched_groups(struct sched_domain *sd)
1da177e4 6963{
dce840a0
PZ
6964 struct sched_group *first = NULL, *last = NULL;
6965 struct sd_data *sdd = sd->private;
6966 const struct cpumask *span = sched_domain_span(sd);
f96225fd 6967 struct cpumask *covered;
dce840a0 6968 int i;
9c1cfda2 6969
f96225fd
PZ
6970 lockdep_assert_held(&sched_domains_mutex);
6971 covered = sched_domains_tmpmask;
6972
dce840a0 6973 cpumask_clear(covered);
6711cab4 6974
dce840a0
PZ
6975 for_each_cpu(i, span) {
6976 struct sched_group *sg;
6977 int group = get_group(i, sdd, &sg);
6978 int j;
6711cab4 6979
dce840a0
PZ
6980 if (cpumask_test_cpu(i, covered))
6981 continue;
6711cab4 6982
dce840a0
PZ
6983 cpumask_clear(sched_group_cpus(sg));
6984 sg->cpu_power = 0;
0601a88d 6985
dce840a0
PZ
6986 for_each_cpu(j, span) {
6987 if (get_group(j, sdd, NULL) != group)
6988 continue;
0601a88d 6989
dce840a0
PZ
6990 cpumask_set_cpu(j, covered);
6991 cpumask_set_cpu(j, sched_group_cpus(sg));
6992 }
0601a88d 6993
dce840a0
PZ
6994 if (!first)
6995 first = sg;
6996 if (last)
6997 last->next = sg;
6998 last = sg;
6999 }
7000 last->next = first;
0601a88d 7001}
51888ca2 7002
89c4710e
SS
7003/*
7004 * Initialize sched groups cpu_power.
7005 *
7006 * cpu_power indicates the capacity of sched group, which is used while
7007 * distributing the load between different sched groups in a sched domain.
7008 * Typically cpu_power for all the groups in a sched domain will be same unless
7009 * there are asymmetries in the topology. If there are asymmetries, group
7010 * having more cpu_power will pickup more load compared to the group having
7011 * less cpu_power.
89c4710e
SS
7012 */
7013static void init_sched_groups_power(int cpu, struct sched_domain *sd)
7014{
89c4710e
SS
7015 WARN_ON(!sd || !sd->groups);
7016
13318a71 7017 if (cpu != group_first_cpu(sd->groups))
89c4710e
SS
7018 return;
7019
aae6d3dd
SS
7020 sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups));
7021
d274cb30 7022 update_group_power(sd, cpu);
89c4710e
SS
7023}
7024
7c16ec58
MT
7025/*
7026 * Initializers for schedule domains
7027 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
7028 */
7029
a5d8c348
IM
7030#ifdef CONFIG_SCHED_DEBUG
7031# define SD_INIT_NAME(sd, type) sd->name = #type
7032#else
7033# define SD_INIT_NAME(sd, type) do { } while (0)
7034#endif
7035
54ab4ff4
PZ
7036#define SD_INIT_FUNC(type) \
7037static noinline struct sched_domain * \
7038sd_init_##type(struct sched_domain_topology_level *tl, int cpu) \
7039{ \
7040 struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); \
7041 *sd = SD_##type##_INIT; \
54ab4ff4
PZ
7042 SD_INIT_NAME(sd, type); \
7043 sd->private = &tl->data; \
7044 return sd; \
7c16ec58
MT
7045}
7046
7047SD_INIT_FUNC(CPU)
7048#ifdef CONFIG_NUMA
7049 SD_INIT_FUNC(ALLNODES)
7050 SD_INIT_FUNC(NODE)
7051#endif
7052#ifdef CONFIG_SCHED_SMT
7053 SD_INIT_FUNC(SIBLING)
7054#endif
7055#ifdef CONFIG_SCHED_MC
7056 SD_INIT_FUNC(MC)
7057#endif
01a08546
HC
7058#ifdef CONFIG_SCHED_BOOK
7059 SD_INIT_FUNC(BOOK)
7060#endif
7c16ec58 7061
1d3504fc 7062static int default_relax_domain_level = -1;
60495e77 7063int sched_domain_level_max;
1d3504fc
HS
7064
7065static int __init setup_relax_domain_level(char *str)
7066{
30e0e178
LZ
7067 unsigned long val;
7068
7069 val = simple_strtoul(str, NULL, 0);
60495e77 7070 if (val < sched_domain_level_max)
30e0e178
LZ
7071 default_relax_domain_level = val;
7072
1d3504fc
HS
7073 return 1;
7074}
7075__setup("relax_domain_level=", setup_relax_domain_level);
7076
7077static void set_domain_attribute(struct sched_domain *sd,
7078 struct sched_domain_attr *attr)
7079{
7080 int request;
7081
7082 if (!attr || attr->relax_domain_level < 0) {
7083 if (default_relax_domain_level < 0)
7084 return;
7085 else
7086 request = default_relax_domain_level;
7087 } else
7088 request = attr->relax_domain_level;
7089 if (request < sd->level) {
7090 /* turn off idle balance on this domain */
c88d5910 7091 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
1d3504fc
HS
7092 } else {
7093 /* turn on idle balance on this domain */
c88d5910 7094 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
1d3504fc
HS
7095 }
7096}
7097
54ab4ff4
PZ
7098static void __sdt_free(const struct cpumask *cpu_map);
7099static int __sdt_alloc(const struct cpumask *cpu_map);
7100
2109b99e
AH
7101static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
7102 const struct cpumask *cpu_map)
7103{
7104 switch (what) {
2109b99e 7105 case sa_rootdomain:
822ff793
PZ
7106 if (!atomic_read(&d->rd->refcount))
7107 free_rootdomain(&d->rd->rcu); /* fall through */
21d42ccf
PZ
7108 case sa_sd:
7109 free_percpu(d->sd); /* fall through */
dce840a0 7110 case sa_sd_storage:
54ab4ff4 7111 __sdt_free(cpu_map); /* fall through */
2109b99e
AH
7112 case sa_none:
7113 break;
7114 }
7115}
3404c8d9 7116
2109b99e
AH
7117static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
7118 const struct cpumask *cpu_map)
7119{
dce840a0
PZ
7120 memset(d, 0, sizeof(*d));
7121
54ab4ff4
PZ
7122 if (__sdt_alloc(cpu_map))
7123 return sa_sd_storage;
dce840a0
PZ
7124 d->sd = alloc_percpu(struct sched_domain *);
7125 if (!d->sd)
7126 return sa_sd_storage;
2109b99e 7127 d->rd = alloc_rootdomain();
dce840a0 7128 if (!d->rd)
21d42ccf 7129 return sa_sd;
2109b99e
AH
7130 return sa_rootdomain;
7131}
57d885fe 7132
dce840a0
PZ
7133/*
7134 * NULL the sd_data elements we've used to build the sched_domain and
7135 * sched_group structure so that the subsequent __free_domain_allocs()
7136 * will not free the data we're using.
7137 */
7138static void claim_allocations(int cpu, struct sched_domain *sd)
7139{
7140 struct sd_data *sdd = sd->private;
7141 struct sched_group *sg = sd->groups;
7142
7143 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
7144 *per_cpu_ptr(sdd->sd, cpu) = NULL;
7145
7146 if (cpu == cpumask_first(sched_group_cpus(sg))) {
7147 WARN_ON_ONCE(*per_cpu_ptr(sdd->sg, cpu) != sg);
7148 *per_cpu_ptr(sdd->sg, cpu) = NULL;
7149 }
7150}
7151
2c402dc3
PZ
7152#ifdef CONFIG_SCHED_SMT
7153static const struct cpumask *cpu_smt_mask(int cpu)
7f4588f3 7154{
2c402dc3 7155 return topology_thread_cpumask(cpu);
3bd65a80 7156}
2c402dc3 7157#endif
7f4588f3 7158
d069b916
PZ
7159/*
7160 * Topology list, bottom-up.
7161 */
2c402dc3 7162static struct sched_domain_topology_level default_topology[] = {
d069b916
PZ
7163#ifdef CONFIG_SCHED_SMT
7164 { sd_init_SIBLING, cpu_smt_mask, },
01a08546 7165#endif
1e9f28fa 7166#ifdef CONFIG_SCHED_MC
2c402dc3 7167 { sd_init_MC, cpu_coregroup_mask, },
1e9f28fa 7168#endif
d069b916
PZ
7169#ifdef CONFIG_SCHED_BOOK
7170 { sd_init_BOOK, cpu_book_mask, },
7171#endif
7172 { sd_init_CPU, cpu_cpu_mask, },
7173#ifdef CONFIG_NUMA
7174 { sd_init_NODE, cpu_node_mask, },
7175 { sd_init_ALLNODES, cpu_allnodes_mask, },
1da177e4 7176#endif
eb7a74e6
PZ
7177 { NULL, },
7178};
7179
7180static struct sched_domain_topology_level *sched_domain_topology = default_topology;
7181
54ab4ff4
PZ
7182static int __sdt_alloc(const struct cpumask *cpu_map)
7183{
7184 struct sched_domain_topology_level *tl;
7185 int j;
7186
7187 for (tl = sched_domain_topology; tl->init; tl++) {
7188 struct sd_data *sdd = &tl->data;
7189
7190 sdd->sd = alloc_percpu(struct sched_domain *);
7191 if (!sdd->sd)
7192 return -ENOMEM;
7193
7194 sdd->sg = alloc_percpu(struct sched_group *);
7195 if (!sdd->sg)
7196 return -ENOMEM;
7197
7198 for_each_cpu(j, cpu_map) {
7199 struct sched_domain *sd;
7200 struct sched_group *sg;
7201
7202 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
7203 GFP_KERNEL, cpu_to_node(j));
7204 if (!sd)
7205 return -ENOMEM;
7206
7207 *per_cpu_ptr(sdd->sd, j) = sd;
7208
7209 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
7210 GFP_KERNEL, cpu_to_node(j));
7211 if (!sg)
7212 return -ENOMEM;
7213
7214 *per_cpu_ptr(sdd->sg, j) = sg;
7215 }
7216 }
7217
7218 return 0;
7219}
7220
7221static void __sdt_free(const struct cpumask *cpu_map)
7222{
7223 struct sched_domain_topology_level *tl;
7224 int j;
7225
7226 for (tl = sched_domain_topology; tl->init; tl++) {
7227 struct sd_data *sdd = &tl->data;
7228
7229 for_each_cpu(j, cpu_map) {
7230 kfree(*per_cpu_ptr(sdd->sd, j));
7231 kfree(*per_cpu_ptr(sdd->sg, j));
7232 }
7233 free_percpu(sdd->sd);
7234 free_percpu(sdd->sg);
7235 }
7236}
7237
2c402dc3
PZ
7238struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
7239 struct s_data *d, const struct cpumask *cpu_map,
d069b916 7240 struct sched_domain_attr *attr, struct sched_domain *child,
2c402dc3
PZ
7241 int cpu)
7242{
54ab4ff4 7243 struct sched_domain *sd = tl->init(tl, cpu);
2c402dc3 7244 if (!sd)
d069b916 7245 return child;
2c402dc3
PZ
7246
7247 set_domain_attribute(sd, attr);
7248 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
60495e77
PZ
7249 if (child) {
7250 sd->level = child->level + 1;
7251 sched_domain_level_max = max(sched_domain_level_max, sd->level);
d069b916 7252 child->parent = sd;
60495e77 7253 }
d069b916 7254 sd->child = child;
2c402dc3
PZ
7255
7256 return sd;
7257}
7258
2109b99e
AH
7259/*
7260 * Build sched domains for a given set of cpus and attach the sched domains
7261 * to the individual cpus
7262 */
dce840a0
PZ
7263static int build_sched_domains(const struct cpumask *cpu_map,
7264 struct sched_domain_attr *attr)
2109b99e
AH
7265{
7266 enum s_alloc alloc_state = sa_none;
dce840a0 7267 struct sched_domain *sd;
2109b99e 7268 struct s_data d;
822ff793 7269 int i, ret = -ENOMEM;
9c1cfda2 7270
2109b99e
AH
7271 alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
7272 if (alloc_state != sa_rootdomain)
7273 goto error;
9c1cfda2 7274
dce840a0 7275 /* Set up domains for cpus specified by the cpu_map. */
abcd083a 7276 for_each_cpu(i, cpu_map) {
eb7a74e6
PZ
7277 struct sched_domain_topology_level *tl;
7278
3bd65a80 7279 sd = NULL;
2c402dc3
PZ
7280 for (tl = sched_domain_topology; tl->init; tl++)
7281 sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i);
d274cb30 7282
d069b916
PZ
7283 while (sd->child)
7284 sd = sd->child;
7285
21d42ccf 7286 *per_cpu_ptr(d.sd, i) = sd;
dce840a0
PZ
7287 }
7288
7289 /* Build the groups for the domains */
7290 for_each_cpu(i, cpu_map) {
7291 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
7292 sd->span_weight = cpumask_weight(sched_domain_span(sd));
7293 get_group(i, sd->private, &sd->groups);
7294 atomic_inc(&sd->groups->ref);
21d42ccf 7295
dce840a0
PZ
7296 if (i != cpumask_first(sched_domain_span(sd)))
7297 continue;
7298
f96225fd 7299 build_sched_groups(sd);
1cf51902 7300 }
a06dadbe 7301 }
9c1cfda2 7302
1da177e4 7303 /* Calculate CPU power for physical packages and nodes */
a9c9a9b6
PZ
7304 for (i = nr_cpumask_bits-1; i >= 0; i--) {
7305 if (!cpumask_test_cpu(i, cpu_map))
7306 continue;
9c1cfda2 7307
dce840a0
PZ
7308 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
7309 claim_allocations(i, sd);
cd4ea6ae 7310 init_sched_groups_power(i, sd);
dce840a0 7311 }
f712c0c7 7312 }
9c1cfda2 7313
1da177e4 7314 /* Attach the domains */
dce840a0 7315 rcu_read_lock();
abcd083a 7316 for_each_cpu(i, cpu_map) {
21d42ccf 7317 sd = *per_cpu_ptr(d.sd, i);
49a02c51 7318 cpu_attach_domain(sd, d.rd, i);
1da177e4 7319 }
dce840a0 7320 rcu_read_unlock();
51888ca2 7321
822ff793 7322 ret = 0;
51888ca2 7323error:
2109b99e 7324 __free_domain_allocs(&d, alloc_state, cpu_map);
822ff793 7325 return ret;
1da177e4 7326}
029190c5 7327
acc3f5d7 7328static cpumask_var_t *doms_cur; /* current sched domains */
029190c5 7329static int ndoms_cur; /* number of sched domains in 'doms_cur' */
4285f594
IM
7330static struct sched_domain_attr *dattr_cur;
7331 /* attribues of custom domains in 'doms_cur' */
029190c5
PJ
7332
7333/*
7334 * Special case: If a kmalloc of a doms_cur partition (array of
4212823f
RR
7335 * cpumask) fails, then fallback to a single sched domain,
7336 * as determined by the single cpumask fallback_doms.
029190c5 7337 */
4212823f 7338static cpumask_var_t fallback_doms;
029190c5 7339
ee79d1bd
HC
7340/*
7341 * arch_update_cpu_topology lets virtualized architectures update the
7342 * cpu core maps. It is supposed to return 1 if the topology changed
7343 * or 0 if it stayed the same.
7344 */
7345int __attribute__((weak)) arch_update_cpu_topology(void)
22e52b07 7346{
ee79d1bd 7347 return 0;
22e52b07
HC
7348}
7349
acc3f5d7
RR
7350cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
7351{
7352 int i;
7353 cpumask_var_t *doms;
7354
7355 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
7356 if (!doms)
7357 return NULL;
7358 for (i = 0; i < ndoms; i++) {
7359 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
7360 free_sched_domains(doms, i);
7361 return NULL;
7362 }
7363 }
7364 return doms;
7365}
7366
7367void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
7368{
7369 unsigned int i;
7370 for (i = 0; i < ndoms; i++)
7371 free_cpumask_var(doms[i]);
7372 kfree(doms);
7373}
7374
1a20ff27 7375/*
41a2d6cf 7376 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
029190c5
PJ
7377 * For now this just excludes isolated cpus, but could be used to
7378 * exclude other special cases in the future.
1a20ff27 7379 */
c4a8849a 7380static int init_sched_domains(const struct cpumask *cpu_map)
1a20ff27 7381{
7378547f
MM
7382 int err;
7383
22e52b07 7384 arch_update_cpu_topology();
029190c5 7385 ndoms_cur = 1;
acc3f5d7 7386 doms_cur = alloc_sched_domains(ndoms_cur);
029190c5 7387 if (!doms_cur)
acc3f5d7
RR
7388 doms_cur = &fallback_doms;
7389 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
1d3504fc 7390 dattr_cur = NULL;
dce840a0 7391 err = build_sched_domains(doms_cur[0], NULL);
6382bc90 7392 register_sched_domain_sysctl();
7378547f
MM
7393
7394 return err;
1a20ff27
DG
7395}
7396
1a20ff27
DG
7397/*
7398 * Detach sched domains from a group of cpus specified in cpu_map
7399 * These cpus will now be attached to the NULL domain
7400 */
96f874e2 7401static void detach_destroy_domains(const struct cpumask *cpu_map)
1a20ff27
DG
7402{
7403 int i;
7404
dce840a0 7405 rcu_read_lock();
abcd083a 7406 for_each_cpu(i, cpu_map)
57d885fe 7407 cpu_attach_domain(NULL, &def_root_domain, i);
dce840a0 7408 rcu_read_unlock();
1a20ff27
DG
7409}
7410
1d3504fc
HS
7411/* handle null as "default" */
7412static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
7413 struct sched_domain_attr *new, int idx_new)
7414{
7415 struct sched_domain_attr tmp;
7416
7417 /* fast path */
7418 if (!new && !cur)
7419 return 1;
7420
7421 tmp = SD_ATTR_INIT;
7422 return !memcmp(cur ? (cur + idx_cur) : &tmp,
7423 new ? (new + idx_new) : &tmp,
7424 sizeof(struct sched_domain_attr));
7425}
7426
029190c5
PJ
7427/*
7428 * Partition sched domains as specified by the 'ndoms_new'
41a2d6cf 7429 * cpumasks in the array doms_new[] of cpumasks. This compares
029190c5
PJ
7430 * doms_new[] to the current sched domain partitioning, doms_cur[].
7431 * It destroys each deleted domain and builds each new domain.
7432 *
acc3f5d7 7433 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
41a2d6cf
IM
7434 * The masks don't intersect (don't overlap.) We should setup one
7435 * sched domain for each mask. CPUs not in any of the cpumasks will
7436 * not be load balanced. If the same cpumask appears both in the
029190c5
PJ
7437 * current 'doms_cur' domains and in the new 'doms_new', we can leave
7438 * it as it is.
7439 *
acc3f5d7
RR
7440 * The passed in 'doms_new' should be allocated using
7441 * alloc_sched_domains. This routine takes ownership of it and will
7442 * free_sched_domains it when done with it. If the caller failed the
7443 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
7444 * and partition_sched_domains() will fallback to the single partition
7445 * 'fallback_doms', it also forces the domains to be rebuilt.
029190c5 7446 *
96f874e2 7447 * If doms_new == NULL it will be replaced with cpu_online_mask.
700018e0
LZ
7448 * ndoms_new == 0 is a special case for destroying existing domains,
7449 * and it will not create the default domain.
dfb512ec 7450 *
029190c5
PJ
7451 * Call with hotplug lock held
7452 */
acc3f5d7 7453void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1d3504fc 7454 struct sched_domain_attr *dattr_new)
029190c5 7455{
dfb512ec 7456 int i, j, n;
d65bd5ec 7457 int new_topology;
029190c5 7458
712555ee 7459 mutex_lock(&sched_domains_mutex);
a1835615 7460
7378547f
MM
7461 /* always unregister in case we don't destroy any domains */
7462 unregister_sched_domain_sysctl();
7463
d65bd5ec
HC
7464 /* Let architecture update cpu core mappings. */
7465 new_topology = arch_update_cpu_topology();
7466
dfb512ec 7467 n = doms_new ? ndoms_new : 0;
029190c5
PJ
7468
7469 /* Destroy deleted domains */
7470 for (i = 0; i < ndoms_cur; i++) {
d65bd5ec 7471 for (j = 0; j < n && !new_topology; j++) {
acc3f5d7 7472 if (cpumask_equal(doms_cur[i], doms_new[j])
1d3504fc 7473 && dattrs_equal(dattr_cur, i, dattr_new, j))
029190c5
PJ
7474 goto match1;
7475 }
7476 /* no match - a current sched domain not in new doms_new[] */
acc3f5d7 7477 detach_destroy_domains(doms_cur[i]);
029190c5
PJ
7478match1:
7479 ;
7480 }
7481
e761b772
MK
7482 if (doms_new == NULL) {
7483 ndoms_cur = 0;
acc3f5d7 7484 doms_new = &fallback_doms;
6ad4c188 7485 cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
faa2f98f 7486 WARN_ON_ONCE(dattr_new);
e761b772
MK
7487 }
7488
029190c5
PJ
7489 /* Build new domains */
7490 for (i = 0; i < ndoms_new; i++) {
d65bd5ec 7491 for (j = 0; j < ndoms_cur && !new_topology; j++) {
acc3f5d7 7492 if (cpumask_equal(doms_new[i], doms_cur[j])
1d3504fc 7493 && dattrs_equal(dattr_new, i, dattr_cur, j))
029190c5
PJ
7494 goto match2;
7495 }
7496 /* no match - add a new doms_new */
dce840a0 7497 build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
029190c5
PJ
7498match2:
7499 ;
7500 }
7501
7502 /* Remember the new sched domains */
acc3f5d7
RR
7503 if (doms_cur != &fallback_doms)
7504 free_sched_domains(doms_cur, ndoms_cur);
1d3504fc 7505 kfree(dattr_cur); /* kfree(NULL) is safe */
029190c5 7506 doms_cur = doms_new;
1d3504fc 7507 dattr_cur = dattr_new;
029190c5 7508 ndoms_cur = ndoms_new;
7378547f
MM
7509
7510 register_sched_domain_sysctl();
a1835615 7511
712555ee 7512 mutex_unlock(&sched_domains_mutex);
029190c5
PJ
7513}
7514
5c45bf27 7515#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
c4a8849a 7516static void reinit_sched_domains(void)
5c45bf27 7517{
95402b38 7518 get_online_cpus();
dfb512ec
MK
7519
7520 /* Destroy domains first to force the rebuild */
7521 partition_sched_domains(0, NULL, NULL);
7522
e761b772 7523 rebuild_sched_domains();
95402b38 7524 put_online_cpus();
5c45bf27
SS
7525}
7526
7527static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
7528{
afb8a9b7 7529 unsigned int level = 0;
5c45bf27 7530
afb8a9b7
GS
7531 if (sscanf(buf, "%u", &level) != 1)
7532 return -EINVAL;
7533
7534 /*
7535 * level is always be positive so don't check for
7536 * level < POWERSAVINGS_BALANCE_NONE which is 0
7537 * What happens on 0 or 1 byte write,
7538 * need to check for count as well?
7539 */
7540
7541 if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS)
5c45bf27
SS
7542 return -EINVAL;
7543
7544 if (smt)
afb8a9b7 7545 sched_smt_power_savings = level;
5c45bf27 7546 else
afb8a9b7 7547 sched_mc_power_savings = level;
5c45bf27 7548
c4a8849a 7549 reinit_sched_domains();
5c45bf27 7550
c70f22d2 7551 return count;
5c45bf27
SS
7552}
7553
5c45bf27 7554#ifdef CONFIG_SCHED_MC
f718cd4a 7555static ssize_t sched_mc_power_savings_show(struct sysdev_class *class,
c9be0a36 7556 struct sysdev_class_attribute *attr,
f718cd4a 7557 char *page)
5c45bf27
SS
7558{
7559 return sprintf(page, "%u\n", sched_mc_power_savings);
7560}
f718cd4a 7561static ssize_t sched_mc_power_savings_store(struct sysdev_class *class,
c9be0a36 7562 struct sysdev_class_attribute *attr,
48f24c4d 7563 const char *buf, size_t count)
5c45bf27
SS
7564{
7565 return sched_power_savings_store(buf, count, 0);
7566}
f718cd4a
AK
7567static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644,
7568 sched_mc_power_savings_show,
7569 sched_mc_power_savings_store);
5c45bf27
SS
7570#endif
7571
7572#ifdef CONFIG_SCHED_SMT
f718cd4a 7573static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev,
c9be0a36 7574 struct sysdev_class_attribute *attr,
f718cd4a 7575 char *page)
5c45bf27
SS
7576{
7577 return sprintf(page, "%u\n", sched_smt_power_savings);
7578}
f718cd4a 7579static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev,
c9be0a36 7580 struct sysdev_class_attribute *attr,
48f24c4d 7581 const char *buf, size_t count)
5c45bf27
SS
7582{
7583 return sched_power_savings_store(buf, count, 1);
7584}
f718cd4a
AK
7585static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644,
7586 sched_smt_power_savings_show,
6707de00
AB
7587 sched_smt_power_savings_store);
7588#endif
7589
39aac648 7590int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
6707de00
AB
7591{
7592 int err = 0;
7593
7594#ifdef CONFIG_SCHED_SMT
7595 if (smt_capable())
7596 err = sysfs_create_file(&cls->kset.kobj,
7597 &attr_sched_smt_power_savings.attr);
7598#endif
7599#ifdef CONFIG_SCHED_MC
7600 if (!err && mc_capable())
7601 err = sysfs_create_file(&cls->kset.kobj,
7602 &attr_sched_mc_power_savings.attr);
7603#endif
7604 return err;
7605}
6d6bc0ad 7606#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
5c45bf27 7607
1da177e4 7608/*
3a101d05
TH
7609 * Update cpusets according to cpu_active mask. If cpusets are
7610 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
7611 * around partition_sched_domains().
1da177e4 7612 */
0b2e918a
TH
7613static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
7614 void *hcpu)
e761b772 7615{
3a101d05 7616 switch (action & ~CPU_TASKS_FROZEN) {
e761b772 7617 case CPU_ONLINE:
6ad4c188 7618 case CPU_DOWN_FAILED:
3a101d05 7619 cpuset_update_active_cpus();
e761b772 7620 return NOTIFY_OK;
3a101d05
TH
7621 default:
7622 return NOTIFY_DONE;
7623 }
7624}
e761b772 7625
0b2e918a
TH
7626static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
7627 void *hcpu)
3a101d05
TH
7628{
7629 switch (action & ~CPU_TASKS_FROZEN) {
7630 case CPU_DOWN_PREPARE:
7631 cpuset_update_active_cpus();
7632 return NOTIFY_OK;
e761b772
MK
7633 default:
7634 return NOTIFY_DONE;
7635 }
7636}
e761b772
MK
7637
7638static int update_runtime(struct notifier_block *nfb,
7639 unsigned long action, void *hcpu)
1da177e4 7640{
7def2be1
PZ
7641 int cpu = (int)(long)hcpu;
7642
1da177e4 7643 switch (action) {
1da177e4 7644 case CPU_DOWN_PREPARE:
8bb78442 7645 case CPU_DOWN_PREPARE_FROZEN:
7def2be1 7646 disable_runtime(cpu_rq(cpu));
1da177e4
LT
7647 return NOTIFY_OK;
7648
1da177e4 7649 case CPU_DOWN_FAILED:
8bb78442 7650 case CPU_DOWN_FAILED_FROZEN:
1da177e4 7651 case CPU_ONLINE:
8bb78442 7652 case CPU_ONLINE_FROZEN:
7def2be1 7653 enable_runtime(cpu_rq(cpu));
e761b772
MK
7654 return NOTIFY_OK;
7655
1da177e4
LT
7656 default:
7657 return NOTIFY_DONE;
7658 }
1da177e4 7659}
1da177e4
LT
7660
7661void __init sched_init_smp(void)
7662{
dcc30a35
RR
7663 cpumask_var_t non_isolated_cpus;
7664
7665 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
cb5fd13f 7666 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
5c1e1767 7667
95402b38 7668 get_online_cpus();
712555ee 7669 mutex_lock(&sched_domains_mutex);
c4a8849a 7670 init_sched_domains(cpu_active_mask);
dcc30a35
RR
7671 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
7672 if (cpumask_empty(non_isolated_cpus))
7673 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
712555ee 7674 mutex_unlock(&sched_domains_mutex);
95402b38 7675 put_online_cpus();
e761b772 7676
3a101d05
TH
7677 hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
7678 hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
e761b772
MK
7679
7680 /* RT runtime code needs to handle some hotplug events */
7681 hotcpu_notifier(update_runtime, 0);
7682
b328ca18 7683 init_hrtick();
5c1e1767
NP
7684
7685 /* Move init over to a non-isolated CPU */
dcc30a35 7686 if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
5c1e1767 7687 BUG();
19978ca6 7688 sched_init_granularity();
dcc30a35 7689 free_cpumask_var(non_isolated_cpus);
4212823f 7690
0e3900e6 7691 init_sched_rt_class();
1da177e4
LT
7692}
7693#else
7694void __init sched_init_smp(void)
7695{
19978ca6 7696 sched_init_granularity();
1da177e4
LT
7697}
7698#endif /* CONFIG_SMP */
7699
cd1bb94b
AB
7700const_debug unsigned int sysctl_timer_migration = 1;
7701
1da177e4
LT
7702int in_sched_functions(unsigned long addr)
7703{
1da177e4
LT
7704 return in_lock_functions(addr) ||
7705 (addr >= (unsigned long)__sched_text_start
7706 && addr < (unsigned long)__sched_text_end);
7707}
7708
a9957449 7709static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
dd41f596
IM
7710{
7711 cfs_rq->tasks_timeline = RB_ROOT;
4a55bd5e 7712 INIT_LIST_HEAD(&cfs_rq->tasks);
dd41f596
IM
7713#ifdef CONFIG_FAIR_GROUP_SCHED
7714 cfs_rq->rq = rq;
f07333bf 7715 /* allow initial update_cfs_load() to truncate */
6ea72f12 7716#ifdef CONFIG_SMP
f07333bf 7717 cfs_rq->load_stamp = 1;
6ea72f12 7718#endif
dd41f596 7719#endif
67e9fb2a 7720 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
dd41f596
IM
7721}
7722
fa85ae24
PZ
7723static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
7724{
7725 struct rt_prio_array *array;
7726 int i;
7727
7728 array = &rt_rq->active;
7729 for (i = 0; i < MAX_RT_PRIO; i++) {
7730 INIT_LIST_HEAD(array->queue + i);
7731 __clear_bit(i, array->bitmap);
7732 }
7733 /* delimiter for bitsearch: */
7734 __set_bit(MAX_RT_PRIO, array->bitmap);
7735
052f1dc7 7736#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
e864c499 7737 rt_rq->highest_prio.curr = MAX_RT_PRIO;
398a153b 7738#ifdef CONFIG_SMP
e864c499 7739 rt_rq->highest_prio.next = MAX_RT_PRIO;
48d5e258 7740#endif
48d5e258 7741#endif
fa85ae24
PZ
7742#ifdef CONFIG_SMP
7743 rt_rq->rt_nr_migratory = 0;
fa85ae24 7744 rt_rq->overloaded = 0;
05fa785c 7745 plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock);
fa85ae24
PZ
7746#endif
7747
7748 rt_rq->rt_time = 0;
7749 rt_rq->rt_throttled = 0;
ac086bc2 7750 rt_rq->rt_runtime = 0;
0986b11b 7751 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
6f505b16 7752
052f1dc7 7753#ifdef CONFIG_RT_GROUP_SCHED
23b0fdfc 7754 rt_rq->rt_nr_boosted = 0;
6f505b16
PZ
7755 rt_rq->rq = rq;
7756#endif
fa85ae24
PZ
7757}
7758
6f505b16 7759#ifdef CONFIG_FAIR_GROUP_SCHED
ec7dc8ac 7760static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
3d4b47b4 7761 struct sched_entity *se, int cpu,
ec7dc8ac 7762 struct sched_entity *parent)
6f505b16 7763{
ec7dc8ac 7764 struct rq *rq = cpu_rq(cpu);
6f505b16
PZ
7765 tg->cfs_rq[cpu] = cfs_rq;
7766 init_cfs_rq(cfs_rq, rq);
7767 cfs_rq->tg = tg;
6f505b16
PZ
7768
7769 tg->se[cpu] = se;
07e06b01 7770 /* se could be NULL for root_task_group */
354d60c2
DG
7771 if (!se)
7772 return;
7773
ec7dc8ac
DG
7774 if (!parent)
7775 se->cfs_rq = &rq->cfs;
7776 else
7777 se->cfs_rq = parent->my_q;
7778
6f505b16 7779 se->my_q = cfs_rq;
9437178f 7780 update_load_set(&se->load, 0);
ec7dc8ac 7781 se->parent = parent;
6f505b16 7782}
052f1dc7 7783#endif
6f505b16 7784
052f1dc7 7785#ifdef CONFIG_RT_GROUP_SCHED
ec7dc8ac 7786static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
3d4b47b4 7787 struct sched_rt_entity *rt_se, int cpu,
ec7dc8ac 7788 struct sched_rt_entity *parent)
6f505b16 7789{
ec7dc8ac
DG
7790 struct rq *rq = cpu_rq(cpu);
7791
6f505b16
PZ
7792 tg->rt_rq[cpu] = rt_rq;
7793 init_rt_rq(rt_rq, rq);
7794 rt_rq->tg = tg;
ac086bc2 7795 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
6f505b16
PZ
7796
7797 tg->rt_se[cpu] = rt_se;
354d60c2
DG
7798 if (!rt_se)
7799 return;
7800
ec7dc8ac
DG
7801 if (!parent)
7802 rt_se->rt_rq = &rq->rt;
7803 else
7804 rt_se->rt_rq = parent->my_q;
7805
6f505b16 7806 rt_se->my_q = rt_rq;
ec7dc8ac 7807 rt_se->parent = parent;
6f505b16
PZ
7808 INIT_LIST_HEAD(&rt_se->run_list);
7809}
7810#endif
7811
1da177e4
LT
7812void __init sched_init(void)
7813{
dd41f596 7814 int i, j;
434d53b0
MT
7815 unsigned long alloc_size = 0, ptr;
7816
7817#ifdef CONFIG_FAIR_GROUP_SCHED
7818 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7819#endif
7820#ifdef CONFIG_RT_GROUP_SCHED
7821 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
eff766a6 7822#endif
df7c8e84 7823#ifdef CONFIG_CPUMASK_OFFSTACK
8c083f08 7824 alloc_size += num_possible_cpus() * cpumask_size();
434d53b0 7825#endif
434d53b0 7826 if (alloc_size) {
36b7b6d4 7827 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
434d53b0
MT
7828
7829#ifdef CONFIG_FAIR_GROUP_SCHED
07e06b01 7830 root_task_group.se = (struct sched_entity **)ptr;
434d53b0
MT
7831 ptr += nr_cpu_ids * sizeof(void **);
7832
07e06b01 7833 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
434d53b0 7834 ptr += nr_cpu_ids * sizeof(void **);
eff766a6 7835
6d6bc0ad 7836#endif /* CONFIG_FAIR_GROUP_SCHED */
434d53b0 7837#ifdef CONFIG_RT_GROUP_SCHED
07e06b01 7838 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
434d53b0
MT
7839 ptr += nr_cpu_ids * sizeof(void **);
7840
07e06b01 7841 root_task_group.rt_rq = (struct rt_rq **)ptr;
eff766a6
PZ
7842 ptr += nr_cpu_ids * sizeof(void **);
7843
6d6bc0ad 7844#endif /* CONFIG_RT_GROUP_SCHED */
df7c8e84
RR
7845#ifdef CONFIG_CPUMASK_OFFSTACK
7846 for_each_possible_cpu(i) {
7847 per_cpu(load_balance_tmpmask, i) = (void *)ptr;
7848 ptr += cpumask_size();
7849 }
7850#endif /* CONFIG_CPUMASK_OFFSTACK */
434d53b0 7851 }
dd41f596 7852
57d885fe
GH
7853#ifdef CONFIG_SMP
7854 init_defrootdomain();
7855#endif
7856
d0b27fa7
PZ
7857 init_rt_bandwidth(&def_rt_bandwidth,
7858 global_rt_period(), global_rt_runtime());
7859
7860#ifdef CONFIG_RT_GROUP_SCHED
07e06b01 7861 init_rt_bandwidth(&root_task_group.rt_bandwidth,
d0b27fa7 7862 global_rt_period(), global_rt_runtime());
6d6bc0ad 7863#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7 7864
7c941438 7865#ifdef CONFIG_CGROUP_SCHED
07e06b01
YZ
7866 list_add(&root_task_group.list, &task_groups);
7867 INIT_LIST_HEAD(&root_task_group.children);
5091faa4 7868 autogroup_init(&init_task);
7c941438 7869#endif /* CONFIG_CGROUP_SCHED */
6f505b16 7870
0a945022 7871 for_each_possible_cpu(i) {
70b97a7f 7872 struct rq *rq;
1da177e4
LT
7873
7874 rq = cpu_rq(i);
05fa785c 7875 raw_spin_lock_init(&rq->lock);
7897986b 7876 rq->nr_running = 0;
dce48a84
TG
7877 rq->calc_load_active = 0;
7878 rq->calc_load_update = jiffies + LOAD_FREQ;
dd41f596 7879 init_cfs_rq(&rq->cfs, rq);
6f505b16 7880 init_rt_rq(&rq->rt, rq);
dd41f596 7881#ifdef CONFIG_FAIR_GROUP_SCHED
07e06b01 7882 root_task_group.shares = root_task_group_load;
6f505b16 7883 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
354d60c2 7884 /*
07e06b01 7885 * How much cpu bandwidth does root_task_group get?
354d60c2
DG
7886 *
7887 * In case of task-groups formed thr' the cgroup filesystem, it
7888 * gets 100% of the cpu resources in the system. This overall
7889 * system cpu resource is divided among the tasks of
07e06b01 7890 * root_task_group and its child task-groups in a fair manner,
354d60c2
DG
7891 * based on each entity's (task or task-group's) weight
7892 * (se->load.weight).
7893 *
07e06b01 7894 * In other words, if root_task_group has 10 tasks of weight
354d60c2
DG
7895 * 1024) and two child groups A0 and A1 (of weight 1024 each),
7896 * then A0's share of the cpu resource is:
7897 *
0d905bca 7898 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
354d60c2 7899 *
07e06b01
YZ
7900 * We achieve this by letting root_task_group's tasks sit
7901 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
354d60c2 7902 */
07e06b01 7903 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
354d60c2
DG
7904#endif /* CONFIG_FAIR_GROUP_SCHED */
7905
7906 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
052f1dc7 7907#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 7908 INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
07e06b01 7909 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
dd41f596 7910#endif
1da177e4 7911
dd41f596
IM
7912 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
7913 rq->cpu_load[j] = 0;
fdf3e95d
VP
7914
7915 rq->last_load_update_tick = jiffies;
7916
1da177e4 7917#ifdef CONFIG_SMP
41c7ce9a 7918 rq->sd = NULL;
57d885fe 7919 rq->rd = NULL;
1399fa78 7920 rq->cpu_power = SCHED_POWER_SCALE;
3f029d3c 7921 rq->post_schedule = 0;
1da177e4 7922 rq->active_balance = 0;
dd41f596 7923 rq->next_balance = jiffies;
1da177e4 7924 rq->push_cpu = 0;
0a2966b4 7925 rq->cpu = i;
1f11eb6a 7926 rq->online = 0;
eae0c9df
MG
7927 rq->idle_stamp = 0;
7928 rq->avg_idle = 2*sysctl_sched_migration_cost;
dc938520 7929 rq_attach_root(rq, &def_root_domain);
83cd4fe2
VP
7930#ifdef CONFIG_NO_HZ
7931 rq->nohz_balance_kick = 0;
7932 init_sched_softirq_csd(&per_cpu(remote_sched_softirq_cb, i));
7933#endif
1da177e4 7934#endif
8f4d37ec 7935 init_rq_hrtick(rq);
1da177e4 7936 atomic_set(&rq->nr_iowait, 0);
1da177e4
LT
7937 }
7938
2dd73a4f 7939 set_load_weight(&init_task);
b50f60ce 7940
e107be36
AK
7941#ifdef CONFIG_PREEMPT_NOTIFIERS
7942 INIT_HLIST_HEAD(&init_task.preempt_notifiers);
7943#endif
7944
c9819f45 7945#ifdef CONFIG_SMP
962cf36c 7946 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
c9819f45
CL
7947#endif
7948
b50f60ce 7949#ifdef CONFIG_RT_MUTEXES
1d615482 7950 plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock);
b50f60ce
HC
7951#endif
7952
1da177e4
LT
7953 /*
7954 * The boot idle thread does lazy MMU switching as well:
7955 */
7956 atomic_inc(&init_mm.mm_count);
7957 enter_lazy_tlb(&init_mm, current);
7958
7959 /*
7960 * Make us the idle thread. Technically, schedule() should not be
7961 * called from this thread, however somewhere below it might be,
7962 * but because we are the idle thread, we just pick up running again
7963 * when this runqueue becomes "idle".
7964 */
7965 init_idle(current, smp_processor_id());
dce48a84
TG
7966
7967 calc_load_update = jiffies + LOAD_FREQ;
7968
dd41f596
IM
7969 /*
7970 * During early bootup we pretend to be a normal task:
7971 */
7972 current->sched_class = &fair_sched_class;
6892b75e 7973
6a7b3dc3 7974 /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
49557e62 7975 zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
bf4d83f6 7976#ifdef CONFIG_SMP
4cb98839 7977 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
7d1e6a9b 7978#ifdef CONFIG_NO_HZ
83cd4fe2
VP
7979 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
7980 alloc_cpumask_var(&nohz.grp_idle_mask, GFP_NOWAIT);
7981 atomic_set(&nohz.load_balancer, nr_cpu_ids);
7982 atomic_set(&nohz.first_pick_cpu, nr_cpu_ids);
7983 atomic_set(&nohz.second_pick_cpu, nr_cpu_ids);
7d1e6a9b 7984#endif
bdddd296
RR
7985 /* May be allocated at isolcpus cmdline parse time */
7986 if (cpu_isolated_map == NULL)
7987 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
bf4d83f6 7988#endif /* SMP */
6a7b3dc3 7989
6892b75e 7990 scheduler_running = 1;
1da177e4
LT
7991}
7992
7993#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
e4aafea2
FW
7994static inline int preempt_count_equals(int preempt_offset)
7995{
234da7bc 7996 int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
e4aafea2 7997
4ba8216c 7998 return (nested == preempt_offset);
e4aafea2
FW
7999}
8000
d894837f 8001void __might_sleep(const char *file, int line, int preempt_offset)
1da177e4 8002{
48f24c4d 8003#ifdef in_atomic
1da177e4
LT
8004 static unsigned long prev_jiffy; /* ratelimiting */
8005
e4aafea2
FW
8006 if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
8007 system_state != SYSTEM_RUNNING || oops_in_progress)
aef745fc
IM
8008 return;
8009 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8010 return;
8011 prev_jiffy = jiffies;
8012
3df0fc5b
PZ
8013 printk(KERN_ERR
8014 "BUG: sleeping function called from invalid context at %s:%d\n",
8015 file, line);
8016 printk(KERN_ERR
8017 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
8018 in_atomic(), irqs_disabled(),
8019 current->pid, current->comm);
aef745fc
IM
8020
8021 debug_show_held_locks(current);
8022 if (irqs_disabled())
8023 print_irqtrace_events(current);
8024 dump_stack();
1da177e4
LT
8025#endif
8026}
8027EXPORT_SYMBOL(__might_sleep);
8028#endif
8029
8030#ifdef CONFIG_MAGIC_SYSRQ
3a5e4dc1
AK
8031static void normalize_task(struct rq *rq, struct task_struct *p)
8032{
da7a735e
PZ
8033 const struct sched_class *prev_class = p->sched_class;
8034 int old_prio = p->prio;
3a5e4dc1 8035 int on_rq;
3e51f33f 8036
fd2f4419 8037 on_rq = p->on_rq;
3a5e4dc1
AK
8038 if (on_rq)
8039 deactivate_task(rq, p, 0);
8040 __setscheduler(rq, p, SCHED_NORMAL, 0);
8041 if (on_rq) {
8042 activate_task(rq, p, 0);
8043 resched_task(rq->curr);
8044 }
da7a735e
PZ
8045
8046 check_class_changed(rq, p, prev_class, old_prio);
3a5e4dc1
AK
8047}
8048
1da177e4
LT
8049void normalize_rt_tasks(void)
8050{
a0f98a1c 8051 struct task_struct *g, *p;
1da177e4 8052 unsigned long flags;
70b97a7f 8053 struct rq *rq;
1da177e4 8054
4cf5d77a 8055 read_lock_irqsave(&tasklist_lock, flags);
a0f98a1c 8056 do_each_thread(g, p) {
178be793
IM
8057 /*
8058 * Only normalize user tasks:
8059 */
8060 if (!p->mm)
8061 continue;
8062
6cfb0d5d 8063 p->se.exec_start = 0;
6cfb0d5d 8064#ifdef CONFIG_SCHEDSTATS
41acab88
LDM
8065 p->se.statistics.wait_start = 0;
8066 p->se.statistics.sleep_start = 0;
8067 p->se.statistics.block_start = 0;
6cfb0d5d 8068#endif
dd41f596
IM
8069
8070 if (!rt_task(p)) {
8071 /*
8072 * Renice negative nice level userspace
8073 * tasks back to 0:
8074 */
8075 if (TASK_NICE(p) < 0 && p->mm)
8076 set_user_nice(p, 0);
1da177e4 8077 continue;
dd41f596 8078 }
1da177e4 8079
1d615482 8080 raw_spin_lock(&p->pi_lock);
b29739f9 8081 rq = __task_rq_lock(p);
1da177e4 8082
178be793 8083 normalize_task(rq, p);
3a5e4dc1 8084
b29739f9 8085 __task_rq_unlock(rq);
1d615482 8086 raw_spin_unlock(&p->pi_lock);
a0f98a1c
IM
8087 } while_each_thread(g, p);
8088
4cf5d77a 8089 read_unlock_irqrestore(&tasklist_lock, flags);
1da177e4
LT
8090}
8091
8092#endif /* CONFIG_MAGIC_SYSRQ */
1df5c10a 8093
67fc4e0c 8094#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
1df5c10a 8095/*
67fc4e0c 8096 * These functions are only useful for the IA64 MCA handling, or kdb.
1df5c10a
LT
8097 *
8098 * They can only be called when the whole system has been
8099 * stopped - every CPU needs to be quiescent, and no scheduling
8100 * activity can take place. Using them for anything else would
8101 * be a serious bug, and as a result, they aren't even visible
8102 * under any other configuration.
8103 */
8104
8105/**
8106 * curr_task - return the current task for a given cpu.
8107 * @cpu: the processor in question.
8108 *
8109 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
8110 */
36c8b586 8111struct task_struct *curr_task(int cpu)
1df5c10a
LT
8112{
8113 return cpu_curr(cpu);
8114}
8115
67fc4e0c
JW
8116#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
8117
8118#ifdef CONFIG_IA64
1df5c10a
LT
8119/**
8120 * set_curr_task - set the current task for a given cpu.
8121 * @cpu: the processor in question.
8122 * @p: the task pointer to set.
8123 *
8124 * Description: This function must only be used when non-maskable interrupts
41a2d6cf
IM
8125 * are serviced on a separate stack. It allows the architecture to switch the
8126 * notion of the current task on a cpu in a non-blocking manner. This function
1df5c10a
LT
8127 * must be called with all CPU's synchronized, and interrupts disabled, the
8128 * and caller must save the original value of the current task (see
8129 * curr_task() above) and restore that value before reenabling interrupts and
8130 * re-starting the system.
8131 *
8132 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
8133 */
36c8b586 8134void set_curr_task(int cpu, struct task_struct *p)
1df5c10a
LT
8135{
8136 cpu_curr(cpu) = p;
8137}
8138
8139#endif
29f59db3 8140
bccbe08a
PZ
8141#ifdef CONFIG_FAIR_GROUP_SCHED
8142static void free_fair_sched_group(struct task_group *tg)
6f505b16
PZ
8143{
8144 int i;
8145
8146 for_each_possible_cpu(i) {
8147 if (tg->cfs_rq)
8148 kfree(tg->cfs_rq[i]);
8149 if (tg->se)
8150 kfree(tg->se[i]);
6f505b16
PZ
8151 }
8152
8153 kfree(tg->cfs_rq);
8154 kfree(tg->se);
6f505b16
PZ
8155}
8156
ec7dc8ac
DG
8157static
8158int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
29f59db3 8159{
29f59db3 8160 struct cfs_rq *cfs_rq;
eab17229 8161 struct sched_entity *se;
29f59db3
SV
8162 int i;
8163
434d53b0 8164 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
29f59db3
SV
8165 if (!tg->cfs_rq)
8166 goto err;
434d53b0 8167 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
29f59db3
SV
8168 if (!tg->se)
8169 goto err;
052f1dc7
PZ
8170
8171 tg->shares = NICE_0_LOAD;
29f59db3
SV
8172
8173 for_each_possible_cpu(i) {
eab17229
LZ
8174 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
8175 GFP_KERNEL, cpu_to_node(i));
29f59db3
SV
8176 if (!cfs_rq)
8177 goto err;
8178
eab17229
LZ
8179 se = kzalloc_node(sizeof(struct sched_entity),
8180 GFP_KERNEL, cpu_to_node(i));
29f59db3 8181 if (!se)
dfc12eb2 8182 goto err_free_rq;
29f59db3 8183
3d4b47b4 8184 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
bccbe08a
PZ
8185 }
8186
8187 return 1;
8188
49246274 8189err_free_rq:
dfc12eb2 8190 kfree(cfs_rq);
49246274 8191err:
bccbe08a
PZ
8192 return 0;
8193}
8194
bccbe08a
PZ
8195static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
8196{
3d4b47b4
PZ
8197 struct rq *rq = cpu_rq(cpu);
8198 unsigned long flags;
3d4b47b4
PZ
8199
8200 /*
8201 * Only empty task groups can be destroyed; so we can speculatively
8202 * check on_list without danger of it being re-added.
8203 */
8204 if (!tg->cfs_rq[cpu]->on_list)
8205 return;
8206
8207 raw_spin_lock_irqsave(&rq->lock, flags);
822bc180 8208 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
3d4b47b4 8209 raw_spin_unlock_irqrestore(&rq->lock, flags);
bccbe08a 8210}
6d6bc0ad 8211#else /* !CONFG_FAIR_GROUP_SCHED */
bccbe08a
PZ
8212static inline void free_fair_sched_group(struct task_group *tg)
8213{
8214}
8215
ec7dc8ac
DG
8216static inline
8217int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
bccbe08a
PZ
8218{
8219 return 1;
8220}
8221
bccbe08a
PZ
8222static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
8223{
8224}
6d6bc0ad 8225#endif /* CONFIG_FAIR_GROUP_SCHED */
052f1dc7
PZ
8226
8227#ifdef CONFIG_RT_GROUP_SCHED
bccbe08a
PZ
8228static void free_rt_sched_group(struct task_group *tg)
8229{
8230 int i;
8231
d0b27fa7
PZ
8232 destroy_rt_bandwidth(&tg->rt_bandwidth);
8233
bccbe08a
PZ
8234 for_each_possible_cpu(i) {
8235 if (tg->rt_rq)
8236 kfree(tg->rt_rq[i]);
8237 if (tg->rt_se)
8238 kfree(tg->rt_se[i]);
8239 }
8240
8241 kfree(tg->rt_rq);
8242 kfree(tg->rt_se);
8243}
8244
ec7dc8ac
DG
8245static
8246int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
bccbe08a
PZ
8247{
8248 struct rt_rq *rt_rq;
eab17229 8249 struct sched_rt_entity *rt_se;
bccbe08a
PZ
8250 int i;
8251
434d53b0 8252 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
bccbe08a
PZ
8253 if (!tg->rt_rq)
8254 goto err;
434d53b0 8255 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
bccbe08a
PZ
8256 if (!tg->rt_se)
8257 goto err;
8258
d0b27fa7
PZ
8259 init_rt_bandwidth(&tg->rt_bandwidth,
8260 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
bccbe08a
PZ
8261
8262 for_each_possible_cpu(i) {
eab17229
LZ
8263 rt_rq = kzalloc_node(sizeof(struct rt_rq),
8264 GFP_KERNEL, cpu_to_node(i));
6f505b16
PZ
8265 if (!rt_rq)
8266 goto err;
29f59db3 8267
eab17229
LZ
8268 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
8269 GFP_KERNEL, cpu_to_node(i));
6f505b16 8270 if (!rt_se)
dfc12eb2 8271 goto err_free_rq;
29f59db3 8272
3d4b47b4 8273 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
29f59db3
SV
8274 }
8275
bccbe08a
PZ
8276 return 1;
8277
49246274 8278err_free_rq:
dfc12eb2 8279 kfree(rt_rq);
49246274 8280err:
bccbe08a
PZ
8281 return 0;
8282}
6d6bc0ad 8283#else /* !CONFIG_RT_GROUP_SCHED */
bccbe08a
PZ
8284static inline void free_rt_sched_group(struct task_group *tg)
8285{
8286}
8287
ec7dc8ac
DG
8288static inline
8289int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
bccbe08a
PZ
8290{
8291 return 1;
8292}
6d6bc0ad 8293#endif /* CONFIG_RT_GROUP_SCHED */
bccbe08a 8294
7c941438 8295#ifdef CONFIG_CGROUP_SCHED
bccbe08a
PZ
8296static void free_sched_group(struct task_group *tg)
8297{
8298 free_fair_sched_group(tg);
8299 free_rt_sched_group(tg);
e9aa1dd1 8300 autogroup_free(tg);
bccbe08a
PZ
8301 kfree(tg);
8302}
8303
8304/* allocate runqueue etc for a new task group */
ec7dc8ac 8305struct task_group *sched_create_group(struct task_group *parent)
bccbe08a
PZ
8306{
8307 struct task_group *tg;
8308 unsigned long flags;
bccbe08a
PZ
8309
8310 tg = kzalloc(sizeof(*tg), GFP_KERNEL);
8311 if (!tg)
8312 return ERR_PTR(-ENOMEM);
8313
ec7dc8ac 8314 if (!alloc_fair_sched_group(tg, parent))
bccbe08a
PZ
8315 goto err;
8316
ec7dc8ac 8317 if (!alloc_rt_sched_group(tg, parent))
bccbe08a
PZ
8318 goto err;
8319
8ed36996 8320 spin_lock_irqsave(&task_group_lock, flags);
6f505b16 8321 list_add_rcu(&tg->list, &task_groups);
f473aa5e
PZ
8322
8323 WARN_ON(!parent); /* root should already exist */
8324
8325 tg->parent = parent;
f473aa5e 8326 INIT_LIST_HEAD(&tg->children);
09f2724a 8327 list_add_rcu(&tg->siblings, &parent->children);
8ed36996 8328 spin_unlock_irqrestore(&task_group_lock, flags);
29f59db3 8329
9b5b7751 8330 return tg;
29f59db3
SV
8331
8332err:
6f505b16 8333 free_sched_group(tg);
29f59db3
SV
8334 return ERR_PTR(-ENOMEM);
8335}
8336
9b5b7751 8337/* rcu callback to free various structures associated with a task group */
6f505b16 8338static void free_sched_group_rcu(struct rcu_head *rhp)
29f59db3 8339{
29f59db3 8340 /* now it should be safe to free those cfs_rqs */
6f505b16 8341 free_sched_group(container_of(rhp, struct task_group, rcu));
29f59db3
SV
8342}
8343
9b5b7751 8344/* Destroy runqueue etc associated with a task group */
4cf86d77 8345void sched_destroy_group(struct task_group *tg)
29f59db3 8346{
8ed36996 8347 unsigned long flags;
9b5b7751 8348 int i;
29f59db3 8349
3d4b47b4
PZ
8350 /* end participation in shares distribution */
8351 for_each_possible_cpu(i)
bccbe08a 8352 unregister_fair_sched_group(tg, i);
3d4b47b4
PZ
8353
8354 spin_lock_irqsave(&task_group_lock, flags);
6f505b16 8355 list_del_rcu(&tg->list);
f473aa5e 8356 list_del_rcu(&tg->siblings);
8ed36996 8357 spin_unlock_irqrestore(&task_group_lock, flags);
9b5b7751 8358
9b5b7751 8359 /* wait for possible concurrent references to cfs_rqs complete */
6f505b16 8360 call_rcu(&tg->rcu, free_sched_group_rcu);
29f59db3
SV
8361}
8362
9b5b7751 8363/* change task's runqueue when it moves between groups.
3a252015
IM
8364 * The caller of this function should have put the task in its new group
8365 * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
8366 * reflect its new group.
9b5b7751
SV
8367 */
8368void sched_move_task(struct task_struct *tsk)
29f59db3
SV
8369{
8370 int on_rq, running;
8371 unsigned long flags;
8372 struct rq *rq;
8373
8374 rq = task_rq_lock(tsk, &flags);
8375
051a1d1a 8376 running = task_current(rq, tsk);
fd2f4419 8377 on_rq = tsk->on_rq;
29f59db3 8378
0e1f3483 8379 if (on_rq)
29f59db3 8380 dequeue_task(rq, tsk, 0);
0e1f3483
HS
8381 if (unlikely(running))
8382 tsk->sched_class->put_prev_task(rq, tsk);
29f59db3 8383
810b3817 8384#ifdef CONFIG_FAIR_GROUP_SCHED
b2b5ce02
PZ
8385 if (tsk->sched_class->task_move_group)
8386 tsk->sched_class->task_move_group(tsk, on_rq);
8387 else
810b3817 8388#endif
b2b5ce02 8389 set_task_rq(tsk, task_cpu(tsk));
810b3817 8390
0e1f3483
HS
8391 if (unlikely(running))
8392 tsk->sched_class->set_curr_task(rq);
8393 if (on_rq)
371fd7e7 8394 enqueue_task(rq, tsk, 0);
29f59db3 8395
0122ec5b 8396 task_rq_unlock(rq, tsk, &flags);
29f59db3 8397}
7c941438 8398#endif /* CONFIG_CGROUP_SCHED */
29f59db3 8399
052f1dc7 8400#ifdef CONFIG_FAIR_GROUP_SCHED
8ed36996
PZ
8401static DEFINE_MUTEX(shares_mutex);
8402
4cf86d77 8403int sched_group_set_shares(struct task_group *tg, unsigned long shares)
29f59db3
SV
8404{
8405 int i;
8ed36996 8406 unsigned long flags;
c61935fd 8407
ec7dc8ac
DG
8408 /*
8409 * We can't change the weight of the root cgroup.
8410 */
8411 if (!tg->se[0])
8412 return -EINVAL;
8413
18d95a28
PZ
8414 if (shares < MIN_SHARES)
8415 shares = MIN_SHARES;
cb4ad1ff
MX
8416 else if (shares > MAX_SHARES)
8417 shares = MAX_SHARES;
62fb1851 8418
8ed36996 8419 mutex_lock(&shares_mutex);
9b5b7751 8420 if (tg->shares == shares)
5cb350ba 8421 goto done;
29f59db3 8422
9b5b7751 8423 tg->shares = shares;
c09595f6 8424 for_each_possible_cpu(i) {
9437178f
PT
8425 struct rq *rq = cpu_rq(i);
8426 struct sched_entity *se;
8427
8428 se = tg->se[i];
8429 /* Propagate contribution to hierarchy */
8430 raw_spin_lock_irqsave(&rq->lock, flags);
8431 for_each_sched_entity(se)
6d5ab293 8432 update_cfs_shares(group_cfs_rq(se));
9437178f 8433 raw_spin_unlock_irqrestore(&rq->lock, flags);
c09595f6 8434 }
29f59db3 8435
5cb350ba 8436done:
8ed36996 8437 mutex_unlock(&shares_mutex);
9b5b7751 8438 return 0;
29f59db3
SV
8439}
8440
5cb350ba
DG
8441unsigned long sched_group_shares(struct task_group *tg)
8442{
8443 return tg->shares;
8444}
052f1dc7 8445#endif
5cb350ba 8446
052f1dc7 8447#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 8448/*
9f0c1e56 8449 * Ensure that the real time constraints are schedulable.
6f505b16 8450 */
9f0c1e56
PZ
8451static DEFINE_MUTEX(rt_constraints_mutex);
8452
8453static unsigned long to_ratio(u64 period, u64 runtime)
8454{
8455 if (runtime == RUNTIME_INF)
9a7e0b18 8456 return 1ULL << 20;
9f0c1e56 8457
9a7e0b18 8458 return div64_u64(runtime << 20, period);
9f0c1e56
PZ
8459}
8460
9a7e0b18
PZ
8461/* Must be called with tasklist_lock held */
8462static inline int tg_has_rt_tasks(struct task_group *tg)
b40b2e8e 8463{
9a7e0b18 8464 struct task_struct *g, *p;
b40b2e8e 8465
9a7e0b18
PZ
8466 do_each_thread(g, p) {
8467 if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
8468 return 1;
8469 } while_each_thread(g, p);
b40b2e8e 8470
9a7e0b18
PZ
8471 return 0;
8472}
b40b2e8e 8473
9a7e0b18
PZ
8474struct rt_schedulable_data {
8475 struct task_group *tg;
8476 u64 rt_period;
8477 u64 rt_runtime;
8478};
b40b2e8e 8479
9a7e0b18
PZ
8480static int tg_schedulable(struct task_group *tg, void *data)
8481{
8482 struct rt_schedulable_data *d = data;
8483 struct task_group *child;
8484 unsigned long total, sum = 0;
8485 u64 period, runtime;
b40b2e8e 8486
9a7e0b18
PZ
8487 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
8488 runtime = tg->rt_bandwidth.rt_runtime;
b40b2e8e 8489
9a7e0b18
PZ
8490 if (tg == d->tg) {
8491 period = d->rt_period;
8492 runtime = d->rt_runtime;
b40b2e8e 8493 }
b40b2e8e 8494
4653f803
PZ
8495 /*
8496 * Cannot have more runtime than the period.
8497 */
8498 if (runtime > period && runtime != RUNTIME_INF)
8499 return -EINVAL;
6f505b16 8500
4653f803
PZ
8501 /*
8502 * Ensure we don't starve existing RT tasks.
8503 */
9a7e0b18
PZ
8504 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
8505 return -EBUSY;
6f505b16 8506
9a7e0b18 8507 total = to_ratio(period, runtime);
6f505b16 8508
4653f803
PZ
8509 /*
8510 * Nobody can have more than the global setting allows.
8511 */
8512 if (total > to_ratio(global_rt_period(), global_rt_runtime()))
8513 return -EINVAL;
6f505b16 8514
4653f803
PZ
8515 /*
8516 * The sum of our children's runtime should not exceed our own.
8517 */
9a7e0b18
PZ
8518 list_for_each_entry_rcu(child, &tg->children, siblings) {
8519 period = ktime_to_ns(child->rt_bandwidth.rt_period);
8520 runtime = child->rt_bandwidth.rt_runtime;
6f505b16 8521
9a7e0b18
PZ
8522 if (child == d->tg) {
8523 period = d->rt_period;
8524 runtime = d->rt_runtime;
8525 }
6f505b16 8526
9a7e0b18 8527 sum += to_ratio(period, runtime);
9f0c1e56 8528 }
6f505b16 8529
9a7e0b18
PZ
8530 if (sum > total)
8531 return -EINVAL;
8532
8533 return 0;
6f505b16
PZ
8534}
8535
9a7e0b18 8536static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
521f1a24 8537{
9a7e0b18
PZ
8538 struct rt_schedulable_data data = {
8539 .tg = tg,
8540 .rt_period = period,
8541 .rt_runtime = runtime,
8542 };
8543
8544 return walk_tg_tree(tg_schedulable, tg_nop, &data);
521f1a24
DG
8545}
8546
d0b27fa7
PZ
8547static int tg_set_bandwidth(struct task_group *tg,
8548 u64 rt_period, u64 rt_runtime)
6f505b16 8549{
ac086bc2 8550 int i, err = 0;
9f0c1e56 8551
9f0c1e56 8552 mutex_lock(&rt_constraints_mutex);
521f1a24 8553 read_lock(&tasklist_lock);
9a7e0b18
PZ
8554 err = __rt_schedulable(tg, rt_period, rt_runtime);
8555 if (err)
9f0c1e56 8556 goto unlock;
ac086bc2 8557
0986b11b 8558 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
d0b27fa7
PZ
8559 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
8560 tg->rt_bandwidth.rt_runtime = rt_runtime;
ac086bc2
PZ
8561
8562 for_each_possible_cpu(i) {
8563 struct rt_rq *rt_rq = tg->rt_rq[i];
8564
0986b11b 8565 raw_spin_lock(&rt_rq->rt_runtime_lock);
ac086bc2 8566 rt_rq->rt_runtime = rt_runtime;
0986b11b 8567 raw_spin_unlock(&rt_rq->rt_runtime_lock);
ac086bc2 8568 }
0986b11b 8569 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
49246274 8570unlock:
521f1a24 8571 read_unlock(&tasklist_lock);
9f0c1e56
PZ
8572 mutex_unlock(&rt_constraints_mutex);
8573
8574 return err;
6f505b16
PZ
8575}
8576
d0b27fa7
PZ
8577int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
8578{
8579 u64 rt_runtime, rt_period;
8580
8581 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
8582 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
8583 if (rt_runtime_us < 0)
8584 rt_runtime = RUNTIME_INF;
8585
8586 return tg_set_bandwidth(tg, rt_period, rt_runtime);
8587}
8588
9f0c1e56
PZ
8589long sched_group_rt_runtime(struct task_group *tg)
8590{
8591 u64 rt_runtime_us;
8592
d0b27fa7 8593 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
9f0c1e56
PZ
8594 return -1;
8595
d0b27fa7 8596 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
9f0c1e56
PZ
8597 do_div(rt_runtime_us, NSEC_PER_USEC);
8598 return rt_runtime_us;
8599}
d0b27fa7
PZ
8600
8601int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
8602{
8603 u64 rt_runtime, rt_period;
8604
8605 rt_period = (u64)rt_period_us * NSEC_PER_USEC;
8606 rt_runtime = tg->rt_bandwidth.rt_runtime;
8607
619b0488
R
8608 if (rt_period == 0)
8609 return -EINVAL;
8610
d0b27fa7
PZ
8611 return tg_set_bandwidth(tg, rt_period, rt_runtime);
8612}
8613
8614long sched_group_rt_period(struct task_group *tg)
8615{
8616 u64 rt_period_us;
8617
8618 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
8619 do_div(rt_period_us, NSEC_PER_USEC);
8620 return rt_period_us;
8621}
8622
8623static int sched_rt_global_constraints(void)
8624{
4653f803 8625 u64 runtime, period;
d0b27fa7
PZ
8626 int ret = 0;
8627
ec5d4989
HS
8628 if (sysctl_sched_rt_period <= 0)
8629 return -EINVAL;
8630
4653f803
PZ
8631 runtime = global_rt_runtime();
8632 period = global_rt_period();
8633
8634 /*
8635 * Sanity check on the sysctl variables.
8636 */
8637 if (runtime > period && runtime != RUNTIME_INF)
8638 return -EINVAL;
10b612f4 8639
d0b27fa7 8640 mutex_lock(&rt_constraints_mutex);
9a7e0b18 8641 read_lock(&tasklist_lock);
4653f803 8642 ret = __rt_schedulable(NULL, 0, 0);
9a7e0b18 8643 read_unlock(&tasklist_lock);
d0b27fa7
PZ
8644 mutex_unlock(&rt_constraints_mutex);
8645
8646 return ret;
8647}
54e99124
DG
8648
8649int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
8650{
8651 /* Don't accept realtime tasks when there is no way for them to run */
8652 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
8653 return 0;
8654
8655 return 1;
8656}
8657
6d6bc0ad 8658#else /* !CONFIG_RT_GROUP_SCHED */
d0b27fa7
PZ
8659static int sched_rt_global_constraints(void)
8660{
ac086bc2
PZ
8661 unsigned long flags;
8662 int i;
8663
ec5d4989
HS
8664 if (sysctl_sched_rt_period <= 0)
8665 return -EINVAL;
8666
60aa605d
PZ
8667 /*
8668 * There's always some RT tasks in the root group
8669 * -- migration, kstopmachine etc..
8670 */
8671 if (sysctl_sched_rt_runtime == 0)
8672 return -EBUSY;
8673
0986b11b 8674 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
ac086bc2
PZ
8675 for_each_possible_cpu(i) {
8676 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
8677
0986b11b 8678 raw_spin_lock(&rt_rq->rt_runtime_lock);
ac086bc2 8679 rt_rq->rt_runtime = global_rt_runtime();
0986b11b 8680 raw_spin_unlock(&rt_rq->rt_runtime_lock);
ac086bc2 8681 }
0986b11b 8682 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
ac086bc2 8683
d0b27fa7
PZ
8684 return 0;
8685}
6d6bc0ad 8686#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7
PZ
8687
8688int sched_rt_handler(struct ctl_table *table, int write,
8d65af78 8689 void __user *buffer, size_t *lenp,
d0b27fa7
PZ
8690 loff_t *ppos)
8691{
8692 int ret;
8693 int old_period, old_runtime;
8694 static DEFINE_MUTEX(mutex);
8695
8696 mutex_lock(&mutex);
8697 old_period = sysctl_sched_rt_period;
8698 old_runtime = sysctl_sched_rt_runtime;
8699
8d65af78 8700 ret = proc_dointvec(table, write, buffer, lenp, ppos);
d0b27fa7
PZ
8701
8702 if (!ret && write) {
8703 ret = sched_rt_global_constraints();
8704 if (ret) {
8705 sysctl_sched_rt_period = old_period;
8706 sysctl_sched_rt_runtime = old_runtime;
8707 } else {
8708 def_rt_bandwidth.rt_runtime = global_rt_runtime();
8709 def_rt_bandwidth.rt_period =
8710 ns_to_ktime(global_rt_period());
8711 }
8712 }
8713 mutex_unlock(&mutex);
8714
8715 return ret;
8716}
68318b8e 8717
052f1dc7 8718#ifdef CONFIG_CGROUP_SCHED
68318b8e
SV
8719
8720/* return corresponding task_group object of a cgroup */
2b01dfe3 8721static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
68318b8e 8722{
2b01dfe3
PM
8723 return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
8724 struct task_group, css);
68318b8e
SV
8725}
8726
8727static struct cgroup_subsys_state *
2b01dfe3 8728cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
68318b8e 8729{
ec7dc8ac 8730 struct task_group *tg, *parent;
68318b8e 8731
2b01dfe3 8732 if (!cgrp->parent) {
68318b8e 8733 /* This is early initialization for the top cgroup */
07e06b01 8734 return &root_task_group.css;
68318b8e
SV
8735 }
8736
ec7dc8ac
DG
8737 parent = cgroup_tg(cgrp->parent);
8738 tg = sched_create_group(parent);
68318b8e
SV
8739 if (IS_ERR(tg))
8740 return ERR_PTR(-ENOMEM);
8741
68318b8e
SV
8742 return &tg->css;
8743}
8744
41a2d6cf
IM
8745static void
8746cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
68318b8e 8747{
2b01dfe3 8748 struct task_group *tg = cgroup_tg(cgrp);
68318b8e
SV
8749
8750 sched_destroy_group(tg);
8751}
8752
41a2d6cf 8753static int
be367d09 8754cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
68318b8e 8755{
b68aa230 8756#ifdef CONFIG_RT_GROUP_SCHED
54e99124 8757 if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
b68aa230
PZ
8758 return -EINVAL;
8759#else
68318b8e
SV
8760 /* We don't support RT-tasks being in separate groups */
8761 if (tsk->sched_class != &fair_sched_class)
8762 return -EINVAL;
b68aa230 8763#endif
be367d09
BB
8764 return 0;
8765}
68318b8e 8766
68318b8e 8767static void
f780bdb7 8768cpu_cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
68318b8e
SV
8769{
8770 sched_move_task(tsk);
8771}
8772
068c5cc5 8773static void
d41d5a01
PZ
8774cpu_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
8775 struct cgroup *old_cgrp, struct task_struct *task)
068c5cc5
PZ
8776{
8777 /*
8778 * cgroup_exit() is called in the copy_process() failure path.
8779 * Ignore this case since the task hasn't ran yet, this avoids
8780 * trying to poke a half freed task state from generic code.
8781 */
8782 if (!(task->flags & PF_EXITING))
8783 return;
8784
8785 sched_move_task(task);
8786}
8787
052f1dc7 8788#ifdef CONFIG_FAIR_GROUP_SCHED
f4c753b7 8789static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
2b01dfe3 8790 u64 shareval)
68318b8e 8791{
c8b28116 8792 return sched_group_set_shares(cgroup_tg(cgrp), scale_load(shareval));
68318b8e
SV
8793}
8794
f4c753b7 8795static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
68318b8e 8796{
2b01dfe3 8797 struct task_group *tg = cgroup_tg(cgrp);
68318b8e 8798
c8b28116 8799 return (u64) scale_load_down(tg->shares);
68318b8e 8800}
6d6bc0ad 8801#endif /* CONFIG_FAIR_GROUP_SCHED */
68318b8e 8802
052f1dc7 8803#ifdef CONFIG_RT_GROUP_SCHED
0c70814c 8804static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
06ecb27c 8805 s64 val)
6f505b16 8806{
06ecb27c 8807 return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
6f505b16
PZ
8808}
8809
06ecb27c 8810static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
6f505b16 8811{
06ecb27c 8812 return sched_group_rt_runtime(cgroup_tg(cgrp));
6f505b16 8813}
d0b27fa7
PZ
8814
8815static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
8816 u64 rt_period_us)
8817{
8818 return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
8819}
8820
8821static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
8822{
8823 return sched_group_rt_period(cgroup_tg(cgrp));
8824}
6d6bc0ad 8825#endif /* CONFIG_RT_GROUP_SCHED */
6f505b16 8826
fe5c7cc2 8827static struct cftype cpu_files[] = {
052f1dc7 8828#ifdef CONFIG_FAIR_GROUP_SCHED
fe5c7cc2
PM
8829 {
8830 .name = "shares",
f4c753b7
PM
8831 .read_u64 = cpu_shares_read_u64,
8832 .write_u64 = cpu_shares_write_u64,
fe5c7cc2 8833 },
052f1dc7
PZ
8834#endif
8835#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 8836 {
9f0c1e56 8837 .name = "rt_runtime_us",
06ecb27c
PM
8838 .read_s64 = cpu_rt_runtime_read,
8839 .write_s64 = cpu_rt_runtime_write,
6f505b16 8840 },
d0b27fa7
PZ
8841 {
8842 .name = "rt_period_us",
f4c753b7
PM
8843 .read_u64 = cpu_rt_period_read_uint,
8844 .write_u64 = cpu_rt_period_write_uint,
d0b27fa7 8845 },
052f1dc7 8846#endif
68318b8e
SV
8847};
8848
8849static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
8850{
fe5c7cc2 8851 return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files));
68318b8e
SV
8852}
8853
8854struct cgroup_subsys cpu_cgroup_subsys = {
38605cae
IM
8855 .name = "cpu",
8856 .create = cpu_cgroup_create,
8857 .destroy = cpu_cgroup_destroy,
f780bdb7
BB
8858 .can_attach_task = cpu_cgroup_can_attach_task,
8859 .attach_task = cpu_cgroup_attach_task,
068c5cc5 8860 .exit = cpu_cgroup_exit,
38605cae
IM
8861 .populate = cpu_cgroup_populate,
8862 .subsys_id = cpu_cgroup_subsys_id,
68318b8e
SV
8863 .early_init = 1,
8864};
8865
052f1dc7 8866#endif /* CONFIG_CGROUP_SCHED */
d842de87
SV
8867
8868#ifdef CONFIG_CGROUP_CPUACCT
8869
8870/*
8871 * CPU accounting code for task groups.
8872 *
8873 * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
8874 * (balbir@in.ibm.com).
8875 */
8876
934352f2 8877/* track cpu usage of a group of tasks and its child groups */
d842de87
SV
8878struct cpuacct {
8879 struct cgroup_subsys_state css;
8880 /* cpuusage holds pointer to a u64-type object on every cpu */
43cf38eb 8881 u64 __percpu *cpuusage;
ef12fefa 8882 struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
934352f2 8883 struct cpuacct *parent;
d842de87
SV
8884};
8885
8886struct cgroup_subsys cpuacct_subsys;
8887
8888/* return cpu accounting group corresponding to this container */
32cd756a 8889static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
d842de87 8890{
32cd756a 8891 return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
d842de87
SV
8892 struct cpuacct, css);
8893}
8894
8895/* return cpu accounting group to which this task belongs */
8896static inline struct cpuacct *task_ca(struct task_struct *tsk)
8897{
8898 return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
8899 struct cpuacct, css);
8900}
8901
8902/* create a new cpu accounting group */
8903static struct cgroup_subsys_state *cpuacct_create(
32cd756a 8904 struct cgroup_subsys *ss, struct cgroup *cgrp)
d842de87
SV
8905{
8906 struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
ef12fefa 8907 int i;
d842de87
SV
8908
8909 if (!ca)
ef12fefa 8910 goto out;
d842de87
SV
8911
8912 ca->cpuusage = alloc_percpu(u64);
ef12fefa
BR
8913 if (!ca->cpuusage)
8914 goto out_free_ca;
8915
8916 for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
8917 if (percpu_counter_init(&ca->cpustat[i], 0))
8918 goto out_free_counters;
d842de87 8919
934352f2
BR
8920 if (cgrp->parent)
8921 ca->parent = cgroup_ca(cgrp->parent);
8922
d842de87 8923 return &ca->css;
ef12fefa
BR
8924
8925out_free_counters:
8926 while (--i >= 0)
8927 percpu_counter_destroy(&ca->cpustat[i]);
8928 free_percpu(ca->cpuusage);
8929out_free_ca:
8930 kfree(ca);
8931out:
8932 return ERR_PTR(-ENOMEM);
d842de87
SV
8933}
8934
8935/* destroy an existing cpu accounting group */
41a2d6cf 8936static void
32cd756a 8937cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
d842de87 8938{
32cd756a 8939 struct cpuacct *ca = cgroup_ca(cgrp);
ef12fefa 8940 int i;
d842de87 8941
ef12fefa
BR
8942 for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
8943 percpu_counter_destroy(&ca->cpustat[i]);
d842de87
SV
8944 free_percpu(ca->cpuusage);
8945 kfree(ca);
8946}
8947
720f5498
KC
8948static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
8949{
b36128c8 8950 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
720f5498
KC
8951 u64 data;
8952
8953#ifndef CONFIG_64BIT
8954 /*
8955 * Take rq->lock to make 64-bit read safe on 32-bit platforms.
8956 */
05fa785c 8957 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
720f5498 8958 data = *cpuusage;
05fa785c 8959 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
720f5498
KC
8960#else
8961 data = *cpuusage;
8962#endif
8963
8964 return data;
8965}
8966
8967static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
8968{
b36128c8 8969 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
720f5498
KC
8970
8971#ifndef CONFIG_64BIT
8972 /*
8973 * Take rq->lock to make 64-bit write safe on 32-bit platforms.
8974 */
05fa785c 8975 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
720f5498 8976 *cpuusage = val;
05fa785c 8977 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
720f5498
KC
8978#else
8979 *cpuusage = val;
8980#endif
8981}
8982
d842de87 8983/* return total cpu usage (in nanoseconds) of a group */
32cd756a 8984static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
d842de87 8985{
32cd756a 8986 struct cpuacct *ca = cgroup_ca(cgrp);
d842de87
SV
8987 u64 totalcpuusage = 0;
8988 int i;
8989
720f5498
KC
8990 for_each_present_cpu(i)
8991 totalcpuusage += cpuacct_cpuusage_read(ca, i);
d842de87
SV
8992
8993 return totalcpuusage;
8994}
8995
0297b803
DG
8996static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
8997 u64 reset)
8998{
8999 struct cpuacct *ca = cgroup_ca(cgrp);
9000 int err = 0;
9001 int i;
9002
9003 if (reset) {
9004 err = -EINVAL;
9005 goto out;
9006 }
9007
720f5498
KC
9008 for_each_present_cpu(i)
9009 cpuacct_cpuusage_write(ca, i, 0);
0297b803 9010
0297b803
DG
9011out:
9012 return err;
9013}
9014
e9515c3c
KC
9015static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
9016 struct seq_file *m)
9017{
9018 struct cpuacct *ca = cgroup_ca(cgroup);
9019 u64 percpu;
9020 int i;
9021
9022 for_each_present_cpu(i) {
9023 percpu = cpuacct_cpuusage_read(ca, i);
9024 seq_printf(m, "%llu ", (unsigned long long) percpu);
9025 }
9026 seq_printf(m, "\n");
9027 return 0;
9028}
9029
ef12fefa
BR
9030static const char *cpuacct_stat_desc[] = {
9031 [CPUACCT_STAT_USER] = "user",
9032 [CPUACCT_STAT_SYSTEM] = "system",
9033};
9034
9035static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
9036 struct cgroup_map_cb *cb)
9037{
9038 struct cpuacct *ca = cgroup_ca(cgrp);
9039 int i;
9040
9041 for (i = 0; i < CPUACCT_STAT_NSTATS; i++) {
9042 s64 val = percpu_counter_read(&ca->cpustat[i]);
9043 val = cputime64_to_clock_t(val);
9044 cb->fill(cb, cpuacct_stat_desc[i], val);
9045 }
9046 return 0;
9047}
9048
d842de87
SV
9049static struct cftype files[] = {
9050 {
9051 .name = "usage",
f4c753b7
PM
9052 .read_u64 = cpuusage_read,
9053 .write_u64 = cpuusage_write,
d842de87 9054 },
e9515c3c
KC
9055 {
9056 .name = "usage_percpu",
9057 .read_seq_string = cpuacct_percpu_seq_read,
9058 },
ef12fefa
BR
9059 {
9060 .name = "stat",
9061 .read_map = cpuacct_stats_show,
9062 },
d842de87
SV
9063};
9064
32cd756a 9065static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
d842de87 9066{
32cd756a 9067 return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files));
d842de87
SV
9068}
9069
9070/*
9071 * charge this task's execution time to its accounting group.
9072 *
9073 * called with rq->lock held.
9074 */
9075static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
9076{
9077 struct cpuacct *ca;
934352f2 9078 int cpu;
d842de87 9079
c40c6f85 9080 if (unlikely(!cpuacct_subsys.active))
d842de87
SV
9081 return;
9082
934352f2 9083 cpu = task_cpu(tsk);
a18b83b7
BR
9084
9085 rcu_read_lock();
9086
d842de87 9087 ca = task_ca(tsk);
d842de87 9088
934352f2 9089 for (; ca; ca = ca->parent) {
b36128c8 9090 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
d842de87
SV
9091 *cpuusage += cputime;
9092 }
a18b83b7
BR
9093
9094 rcu_read_unlock();
d842de87
SV
9095}
9096
fa535a77
AB
9097/*
9098 * When CONFIG_VIRT_CPU_ACCOUNTING is enabled one jiffy can be very large
9099 * in cputime_t units. As a result, cpuacct_update_stats calls
9100 * percpu_counter_add with values large enough to always overflow the
9101 * per cpu batch limit causing bad SMP scalability.
9102 *
9103 * To fix this we scale percpu_counter_batch by cputime_one_jiffy so we
9104 * batch the same amount of time with CONFIG_VIRT_CPU_ACCOUNTING disabled
9105 * and enabled. We cap it at INT_MAX which is the largest allowed batch value.
9106 */
9107#ifdef CONFIG_SMP
9108#define CPUACCT_BATCH \
9109 min_t(long, percpu_counter_batch * cputime_one_jiffy, INT_MAX)
9110#else
9111#define CPUACCT_BATCH 0
9112#endif
9113
ef12fefa
BR
9114/*
9115 * Charge the system/user time to the task's accounting group.
9116 */
9117static void cpuacct_update_stats(struct task_struct *tsk,
9118 enum cpuacct_stat_index idx, cputime_t val)
9119{
9120 struct cpuacct *ca;
fa535a77 9121 int batch = CPUACCT_BATCH;
ef12fefa
BR
9122
9123 if (unlikely(!cpuacct_subsys.active))
9124 return;
9125
9126 rcu_read_lock();
9127 ca = task_ca(tsk);
9128
9129 do {
fa535a77 9130 __percpu_counter_add(&ca->cpustat[idx], val, batch);
ef12fefa
BR
9131 ca = ca->parent;
9132 } while (ca);
9133 rcu_read_unlock();
9134}
9135
d842de87
SV
9136struct cgroup_subsys cpuacct_subsys = {
9137 .name = "cpuacct",
9138 .create = cpuacct_create,
9139 .destroy = cpuacct_destroy,
9140 .populate = cpuacct_populate,
9141 .subsys_id = cpuacct_subsys_id,
9142};
9143#endif /* CONFIG_CGROUP_CPUACCT */
03b042bf 9144