Merge branch 'sched/core' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip...
[linux-2.6-block.git] / kernel / sched / sched.h
CommitLineData
029632fb
PZ
1
2#include <linux/sched.h>
3#include <linux/mutex.h>
4#include <linux/spinlock.h>
5#include <linux/stop_machine.h>
6
391e43da 7#include "cpupri.h"
029632fb
PZ
8
9extern __read_mostly int scheduler_running;
10
11/*
12 * Convert user-nice values [ -20 ... 0 ... 19 ]
13 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
14 * and back.
15 */
16#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
17#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
18#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
19
20/*
21 * 'User priority' is the nice value converted to something we
22 * can work with better when scaling various scheduler parameters,
23 * it's a [ 0 ... 39 ] range.
24 */
25#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
26#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
27#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
28
29/*
30 * Helpers for converting nanosecond timing to jiffy resolution
31 */
32#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
33
34#define NICE_0_LOAD SCHED_LOAD_SCALE
35#define NICE_0_SHIFT SCHED_LOAD_SHIFT
36
37/*
38 * These are the 'tuning knobs' of the scheduler:
39 *
40 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
41 * Timeslices get refilled after they expire.
42 */
43#define DEF_TIMESLICE (100 * HZ / 1000)
44
45/*
46 * single value that denotes runtime == period, ie unlimited time.
47 */
48#define RUNTIME_INF ((u64)~0ULL)
49
50static inline int rt_policy(int policy)
51{
52 if (policy == SCHED_FIFO || policy == SCHED_RR)
53 return 1;
54 return 0;
55}
56
57static inline int task_has_rt_policy(struct task_struct *p)
58{
59 return rt_policy(p->policy);
60}
61
62/*
63 * This is the priority-queue data structure of the RT scheduling class:
64 */
65struct rt_prio_array {
66 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
67 struct list_head queue[MAX_RT_PRIO];
68};
69
70struct rt_bandwidth {
71 /* nests inside the rq lock: */
72 raw_spinlock_t rt_runtime_lock;
73 ktime_t rt_period;
74 u64 rt_runtime;
75 struct hrtimer rt_period_timer;
76};
77
78extern struct mutex sched_domains_mutex;
79
80#ifdef CONFIG_CGROUP_SCHED
81
82#include <linux/cgroup.h>
83
84struct cfs_rq;
85struct rt_rq;
86
87static LIST_HEAD(task_groups);
88
89struct cfs_bandwidth {
90#ifdef CONFIG_CFS_BANDWIDTH
91 raw_spinlock_t lock;
92 ktime_t period;
93 u64 quota, runtime;
94 s64 hierarchal_quota;
95 u64 runtime_expires;
96
97 int idle, timer_active;
98 struct hrtimer period_timer, slack_timer;
99 struct list_head throttled_cfs_rq;
100
101 /* statistics */
102 int nr_periods, nr_throttled;
103 u64 throttled_time;
104#endif
105};
106
107/* task group related information */
108struct task_group {
109 struct cgroup_subsys_state css;
110
111#ifdef CONFIG_FAIR_GROUP_SCHED
112 /* schedulable entities of this group on each cpu */
113 struct sched_entity **se;
114 /* runqueue "owned" by this group on each cpu */
115 struct cfs_rq **cfs_rq;
116 unsigned long shares;
117
118 atomic_t load_weight;
119#endif
120
121#ifdef CONFIG_RT_GROUP_SCHED
122 struct sched_rt_entity **rt_se;
123 struct rt_rq **rt_rq;
124
125 struct rt_bandwidth rt_bandwidth;
126#endif
127
128 struct rcu_head rcu;
129 struct list_head list;
130
131 struct task_group *parent;
132 struct list_head siblings;
133 struct list_head children;
134
135#ifdef CONFIG_SCHED_AUTOGROUP
136 struct autogroup *autogroup;
137#endif
138
139 struct cfs_bandwidth cfs_bandwidth;
140};
141
142#ifdef CONFIG_FAIR_GROUP_SCHED
143#define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
144
145/*
146 * A weight of 0 or 1 can cause arithmetics problems.
147 * A weight of a cfs_rq is the sum of weights of which entities
148 * are queued on this cfs_rq, so a weight of a entity should not be
149 * too large, so as the shares value of a task group.
150 * (The default weight is 1024 - so there's no practical
151 * limitation from this.)
152 */
153#define MIN_SHARES (1UL << 1)
154#define MAX_SHARES (1UL << 18)
155#endif
156
157/* Default task group.
158 * Every task in system belong to this group at bootup.
159 */
160extern struct task_group root_task_group;
161
162typedef int (*tg_visitor)(struct task_group *, void *);
163
164extern int walk_tg_tree_from(struct task_group *from,
165 tg_visitor down, tg_visitor up, void *data);
166
167/*
168 * Iterate the full tree, calling @down when first entering a node and @up when
169 * leaving it for the final time.
170 *
171 * Caller must hold rcu_lock or sufficient equivalent.
172 */
173static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
174{
175 return walk_tg_tree_from(&root_task_group, down, up, data);
176}
177
178extern int tg_nop(struct task_group *tg, void *data);
179
180extern void free_fair_sched_group(struct task_group *tg);
181extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
182extern void unregister_fair_sched_group(struct task_group *tg, int cpu);
183extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
184 struct sched_entity *se, int cpu,
185 struct sched_entity *parent);
186extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
187extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
188
189extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
190extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
191extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
192
193extern void free_rt_sched_group(struct task_group *tg);
194extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
195extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
196 struct sched_rt_entity *rt_se, int cpu,
197 struct sched_rt_entity *parent);
198
199#else /* CONFIG_CGROUP_SCHED */
200
201struct cfs_bandwidth { };
202
203#endif /* CONFIG_CGROUP_SCHED */
204
205/* CFS-related fields in a runqueue */
206struct cfs_rq {
207 struct load_weight load;
208 unsigned long nr_running, h_nr_running;
209
210 u64 exec_clock;
211 u64 min_vruntime;
212#ifndef CONFIG_64BIT
213 u64 min_vruntime_copy;
214#endif
215
216 struct rb_root tasks_timeline;
217 struct rb_node *rb_leftmost;
218
219 struct list_head tasks;
220 struct list_head *balance_iterator;
221
222 /*
223 * 'curr' points to currently running entity on this cfs_rq.
224 * It is set to NULL otherwise (i.e when none are currently running).
225 */
226 struct sched_entity *curr, *next, *last, *skip;
227
228#ifdef CONFIG_SCHED_DEBUG
229 unsigned int nr_spread_over;
230#endif
231
232#ifdef CONFIG_FAIR_GROUP_SCHED
233 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
234
235 /*
236 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
237 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
238 * (like users, containers etc.)
239 *
240 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
241 * list is used during load balance.
242 */
243 int on_list;
244 struct list_head leaf_cfs_rq_list;
245 struct task_group *tg; /* group that "owns" this runqueue */
246
247#ifdef CONFIG_SMP
248 /*
249 * the part of load.weight contributed by tasks
250 */
251 unsigned long task_weight;
252
253 /*
254 * h_load = weight * f(tg)
255 *
256 * Where f(tg) is the recursive weight fraction assigned to
257 * this group.
258 */
259 unsigned long h_load;
260
261 /*
262 * Maintaining per-cpu shares distribution for group scheduling
263 *
264 * load_stamp is the last time we updated the load average
265 * load_last is the last time we updated the load average and saw load
266 * load_unacc_exec_time is currently unaccounted execution time
267 */
268 u64 load_avg;
269 u64 load_period;
270 u64 load_stamp, load_last, load_unacc_exec_time;
271
272 unsigned long load_contribution;
273#endif /* CONFIG_SMP */
274#ifdef CONFIG_CFS_BANDWIDTH
275 int runtime_enabled;
276 u64 runtime_expires;
277 s64 runtime_remaining;
278
279 u64 throttled_timestamp;
280 int throttled, throttle_count;
281 struct list_head throttled_list;
282#endif /* CONFIG_CFS_BANDWIDTH */
283#endif /* CONFIG_FAIR_GROUP_SCHED */
284};
285
286static inline int rt_bandwidth_enabled(void)
287{
288 return sysctl_sched_rt_runtime >= 0;
289}
290
291/* Real-Time classes' related field in a runqueue: */
292struct rt_rq {
293 struct rt_prio_array active;
294 unsigned long rt_nr_running;
295#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
296 struct {
297 int curr; /* highest queued rt task prio */
298#ifdef CONFIG_SMP
299 int next; /* next highest */
300#endif
301 } highest_prio;
302#endif
303#ifdef CONFIG_SMP
304 unsigned long rt_nr_migratory;
305 unsigned long rt_nr_total;
306 int overloaded;
307 struct plist_head pushable_tasks;
308#endif
309 int rt_throttled;
310 u64 rt_time;
311 u64 rt_runtime;
312 /* Nests inside the rq lock: */
313 raw_spinlock_t rt_runtime_lock;
314
315#ifdef CONFIG_RT_GROUP_SCHED
316 unsigned long rt_nr_boosted;
317
318 struct rq *rq;
319 struct list_head leaf_rt_rq_list;
320 struct task_group *tg;
321#endif
322};
323
324#ifdef CONFIG_SMP
325
326/*
327 * We add the notion of a root-domain which will be used to define per-domain
328 * variables. Each exclusive cpuset essentially defines an island domain by
329 * fully partitioning the member cpus from any other cpuset. Whenever a new
330 * exclusive cpuset is created, we also create and attach a new root-domain
331 * object.
332 *
333 */
334struct root_domain {
335 atomic_t refcount;
336 atomic_t rto_count;
337 struct rcu_head rcu;
338 cpumask_var_t span;
339 cpumask_var_t online;
340
341 /*
342 * The "RT overload" flag: it gets set if a CPU has more than
343 * one runnable RT task.
344 */
345 cpumask_var_t rto_mask;
346 struct cpupri cpupri;
347};
348
349extern struct root_domain def_root_domain;
350
351#endif /* CONFIG_SMP */
352
353/*
354 * This is the main, per-CPU runqueue data structure.
355 *
356 * Locking rule: those places that want to lock multiple runqueues
357 * (such as the load balancing or the thread migration code), lock
358 * acquire operations must be ordered by ascending &runqueue.
359 */
360struct rq {
361 /* runqueue lock: */
362 raw_spinlock_t lock;
363
364 /*
365 * nr_running and cpu_load should be in the same cacheline because
366 * remote CPUs use both these fields when doing load calculation.
367 */
368 unsigned long nr_running;
369 #define CPU_LOAD_IDX_MAX 5
370 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
371 unsigned long last_load_update_tick;
372#ifdef CONFIG_NO_HZ
373 u64 nohz_stamp;
1c792db7 374 unsigned long nohz_flags;
029632fb
PZ
375#endif
376 int skip_clock_update;
377
378 /* capture load from *all* tasks on this cpu: */
379 struct load_weight load;
380 unsigned long nr_load_updates;
381 u64 nr_switches;
382
383 struct cfs_rq cfs;
384 struct rt_rq rt;
385
386#ifdef CONFIG_FAIR_GROUP_SCHED
387 /* list of leaf cfs_rq on this cpu: */
388 struct list_head leaf_cfs_rq_list;
389#endif
390#ifdef CONFIG_RT_GROUP_SCHED
391 struct list_head leaf_rt_rq_list;
392#endif
393
394 /*
395 * This is part of a global counter where only the total sum
396 * over all CPUs matters. A task can increase this counter on
397 * one CPU and if it got migrated afterwards it may decrease
398 * it on another CPU. Always updated under the runqueue lock:
399 */
400 unsigned long nr_uninterruptible;
401
402 struct task_struct *curr, *idle, *stop;
403 unsigned long next_balance;
404 struct mm_struct *prev_mm;
405
406 u64 clock;
407 u64 clock_task;
408
409 atomic_t nr_iowait;
410
411#ifdef CONFIG_SMP
412 struct root_domain *rd;
413 struct sched_domain *sd;
414
415 unsigned long cpu_power;
416
417 unsigned char idle_balance;
418 /* For active balancing */
419 int post_schedule;
420 int active_balance;
421 int push_cpu;
422 struct cpu_stop_work active_balance_work;
423 /* cpu of this runqueue: */
424 int cpu;
425 int online;
426
427 u64 rt_avg;
428 u64 age_stamp;
429 u64 idle_stamp;
430 u64 avg_idle;
431#endif
432
433#ifdef CONFIG_IRQ_TIME_ACCOUNTING
434 u64 prev_irq_time;
435#endif
436#ifdef CONFIG_PARAVIRT
437 u64 prev_steal_time;
438#endif
439#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
440 u64 prev_steal_time_rq;
441#endif
442
443 /* calc_load related fields */
444 unsigned long calc_load_update;
445 long calc_load_active;
446
447#ifdef CONFIG_SCHED_HRTICK
448#ifdef CONFIG_SMP
449 int hrtick_csd_pending;
450 struct call_single_data hrtick_csd;
451#endif
452 struct hrtimer hrtick_timer;
453#endif
454
455#ifdef CONFIG_SCHEDSTATS
456 /* latency stats */
457 struct sched_info rq_sched_info;
458 unsigned long long rq_cpu_time;
459 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
460
461 /* sys_sched_yield() stats */
462 unsigned int yld_count;
463
464 /* schedule() stats */
465 unsigned int sched_switch;
466 unsigned int sched_count;
467 unsigned int sched_goidle;
468
469 /* try_to_wake_up() stats */
470 unsigned int ttwu_count;
471 unsigned int ttwu_local;
472#endif
473
474#ifdef CONFIG_SMP
475 struct llist_head wake_list;
476#endif
477};
478
479static inline int cpu_of(struct rq *rq)
480{
481#ifdef CONFIG_SMP
482 return rq->cpu;
483#else
484 return 0;
485#endif
486}
487
488DECLARE_PER_CPU(struct rq, runqueues);
489
490#define rcu_dereference_check_sched_domain(p) \
491 rcu_dereference_check((p), \
492 lockdep_is_held(&sched_domains_mutex))
493
494/*
495 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
496 * See detach_destroy_domains: synchronize_sched for details.
497 *
498 * The domain tree of any CPU may only be accessed from within
499 * preempt-disabled sections.
500 */
501#define for_each_domain(cpu, __sd) \
502 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
503
77e81365
SS
504#define for_each_lower_domain(sd) for (; sd; sd = sd->child)
505
029632fb
PZ
506#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
507#define this_rq() (&__get_cpu_var(runqueues))
508#define task_rq(p) cpu_rq(task_cpu(p))
509#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
510#define raw_rq() (&__raw_get_cpu_var(runqueues))
511
391e43da
PZ
512#include "stats.h"
513#include "auto_group.h"
029632fb
PZ
514
515#ifdef CONFIG_CGROUP_SCHED
516
517/*
518 * Return the group to which this tasks belongs.
519 *
520 * We use task_subsys_state_check() and extend the RCU verification with
521 * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each
522 * task it moves into the cgroup. Therefore by holding either of those locks,
523 * we pin the task to the current cgroup.
524 */
525static inline struct task_group *task_group(struct task_struct *p)
526{
527 struct task_group *tg;
528 struct cgroup_subsys_state *css;
529
530 css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
531 lockdep_is_held(&p->pi_lock) ||
532 lockdep_is_held(&task_rq(p)->lock));
533 tg = container_of(css, struct task_group, css);
534
535 return autogroup_task_group(p, tg);
536}
537
538/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
539static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
540{
541#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
542 struct task_group *tg = task_group(p);
543#endif
544
545#ifdef CONFIG_FAIR_GROUP_SCHED
546 p->se.cfs_rq = tg->cfs_rq[cpu];
547 p->se.parent = tg->se[cpu];
548#endif
549
550#ifdef CONFIG_RT_GROUP_SCHED
551 p->rt.rt_rq = tg->rt_rq[cpu];
552 p->rt.parent = tg->rt_se[cpu];
553#endif
554}
555
556#else /* CONFIG_CGROUP_SCHED */
557
558static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
559static inline struct task_group *task_group(struct task_struct *p)
560{
561 return NULL;
562}
563
564#endif /* CONFIG_CGROUP_SCHED */
565
566static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
567{
568 set_task_rq(p, cpu);
569#ifdef CONFIG_SMP
570 /*
571 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
572 * successfuly executed on another CPU. We must ensure that updates of
573 * per-task data have been completed by this moment.
574 */
575 smp_wmb();
576 task_thread_info(p)->cpu = cpu;
577#endif
578}
579
580/*
581 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
582 */
583#ifdef CONFIG_SCHED_DEBUG
f8b6d1cc 584# include <linux/jump_label.h>
029632fb
PZ
585# define const_debug __read_mostly
586#else
587# define const_debug const
588#endif
589
590extern const_debug unsigned int sysctl_sched_features;
591
592#define SCHED_FEAT(name, enabled) \
593 __SCHED_FEAT_##name ,
594
595enum {
391e43da 596#include "features.h"
f8b6d1cc 597 __SCHED_FEAT_NR,
029632fb
PZ
598};
599
600#undef SCHED_FEAT
601
f8b6d1cc
PZ
602#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
603static __always_inline bool static_branch__true(struct jump_label_key *key)
604{
605 return likely(static_branch(key)); /* Not out of line branch. */
606}
607
608static __always_inline bool static_branch__false(struct jump_label_key *key)
609{
610 return unlikely(static_branch(key)); /* Out of line branch. */
611}
612
613#define SCHED_FEAT(name, enabled) \
614static __always_inline bool static_branch_##name(struct jump_label_key *key) \
615{ \
616 return static_branch__##enabled(key); \
617}
618
619#include "features.h"
620
621#undef SCHED_FEAT
622
623extern struct jump_label_key sched_feat_keys[__SCHED_FEAT_NR];
624#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
625#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
029632fb 626#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
f8b6d1cc 627#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
029632fb
PZ
628
629static inline u64 global_rt_period(void)
630{
631 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
632}
633
634static inline u64 global_rt_runtime(void)
635{
636 if (sysctl_sched_rt_runtime < 0)
637 return RUNTIME_INF;
638
639 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
640}
641
642
643
644static inline int task_current(struct rq *rq, struct task_struct *p)
645{
646 return rq->curr == p;
647}
648
649static inline int task_running(struct rq *rq, struct task_struct *p)
650{
651#ifdef CONFIG_SMP
652 return p->on_cpu;
653#else
654 return task_current(rq, p);
655#endif
656}
657
658
659#ifndef prepare_arch_switch
660# define prepare_arch_switch(next) do { } while (0)
661#endif
662#ifndef finish_arch_switch
663# define finish_arch_switch(prev) do { } while (0)
664#endif
665
666#ifndef __ARCH_WANT_UNLOCKED_CTXSW
667static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
668{
669#ifdef CONFIG_SMP
670 /*
671 * We can optimise this out completely for !SMP, because the
672 * SMP rebalancing from interrupt is the only thing that cares
673 * here.
674 */
675 next->on_cpu = 1;
676#endif
677}
678
679static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
680{
681#ifdef CONFIG_SMP
682 /*
683 * After ->on_cpu is cleared, the task can be moved to a different CPU.
684 * We must ensure this doesn't happen until the switch is completely
685 * finished.
686 */
687 smp_wmb();
688 prev->on_cpu = 0;
689#endif
690#ifdef CONFIG_DEBUG_SPINLOCK
691 /* this is a valid case when another task releases the spinlock */
692 rq->lock.owner = current;
693#endif
694 /*
695 * If we are tracking spinlock dependencies then we have to
696 * fix up the runqueue lock - which gets 'carried over' from
697 * prev into current:
698 */
699 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
700
701 raw_spin_unlock_irq(&rq->lock);
702}
703
704#else /* __ARCH_WANT_UNLOCKED_CTXSW */
705static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
706{
707#ifdef CONFIG_SMP
708 /*
709 * We can optimise this out completely for !SMP, because the
710 * SMP rebalancing from interrupt is the only thing that cares
711 * here.
712 */
713 next->on_cpu = 1;
714#endif
715#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
716 raw_spin_unlock_irq(&rq->lock);
717#else
718 raw_spin_unlock(&rq->lock);
719#endif
720}
721
722static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
723{
724#ifdef CONFIG_SMP
725 /*
726 * After ->on_cpu is cleared, the task can be moved to a different CPU.
727 * We must ensure this doesn't happen until the switch is completely
728 * finished.
729 */
730 smp_wmb();
731 prev->on_cpu = 0;
732#endif
733#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
734 local_irq_enable();
735#endif
736}
737#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
738
739
740static inline void update_load_add(struct load_weight *lw, unsigned long inc)
741{
742 lw->weight += inc;
743 lw->inv_weight = 0;
744}
745
746static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
747{
748 lw->weight -= dec;
749 lw->inv_weight = 0;
750}
751
752static inline void update_load_set(struct load_weight *lw, unsigned long w)
753{
754 lw->weight = w;
755 lw->inv_weight = 0;
756}
757
758/*
759 * To aid in avoiding the subversion of "niceness" due to uneven distribution
760 * of tasks with abnormal "nice" values across CPUs the contribution that
761 * each task makes to its run queue's load is weighted according to its
762 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
763 * scaled version of the new time slice allocation that they receive on time
764 * slice expiry etc.
765 */
766
767#define WEIGHT_IDLEPRIO 3
768#define WMULT_IDLEPRIO 1431655765
769
770/*
771 * Nice levels are multiplicative, with a gentle 10% change for every
772 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
773 * nice 1, it will get ~10% less CPU time than another CPU-bound task
774 * that remained on nice 0.
775 *
776 * The "10% effect" is relative and cumulative: from _any_ nice level,
777 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
778 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
779 * If a task goes up by ~10% and another task goes down by ~10% then
780 * the relative distance between them is ~25%.)
781 */
782static const int prio_to_weight[40] = {
783 /* -20 */ 88761, 71755, 56483, 46273, 36291,
784 /* -15 */ 29154, 23254, 18705, 14949, 11916,
785 /* -10 */ 9548, 7620, 6100, 4904, 3906,
786 /* -5 */ 3121, 2501, 1991, 1586, 1277,
787 /* 0 */ 1024, 820, 655, 526, 423,
788 /* 5 */ 335, 272, 215, 172, 137,
789 /* 10 */ 110, 87, 70, 56, 45,
790 /* 15 */ 36, 29, 23, 18, 15,
791};
792
793/*
794 * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
795 *
796 * In cases where the weight does not change often, we can use the
797 * precalculated inverse to speed up arithmetics by turning divisions
798 * into multiplications:
799 */
800static const u32 prio_to_wmult[40] = {
801 /* -20 */ 48388, 59856, 76040, 92818, 118348,
802 /* -15 */ 147320, 184698, 229616, 287308, 360437,
803 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
804 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
805 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
806 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
807 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
808 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
809};
810
811/* Time spent by the tasks of the cpu accounting group executing in ... */
812enum cpuacct_stat_index {
813 CPUACCT_STAT_USER, /* ... user mode */
814 CPUACCT_STAT_SYSTEM, /* ... kernel mode */
815
816 CPUACCT_STAT_NSTATS,
817};
818
819
820#define sched_class_highest (&stop_sched_class)
821#define for_each_class(class) \
822 for (class = sched_class_highest; class; class = class->next)
823
824extern const struct sched_class stop_sched_class;
825extern const struct sched_class rt_sched_class;
826extern const struct sched_class fair_sched_class;
827extern const struct sched_class idle_sched_class;
828
829
830#ifdef CONFIG_SMP
831
832extern void trigger_load_balance(struct rq *rq, int cpu);
833extern void idle_balance(int this_cpu, struct rq *this_rq);
834
835#else /* CONFIG_SMP */
836
837static inline void idle_balance(int cpu, struct rq *rq)
838{
839}
840
841#endif
842
843extern void sysrq_sched_debug_show(void);
844extern void sched_init_granularity(void);
845extern void update_max_interval(void);
846extern void update_group_power(struct sched_domain *sd, int cpu);
847extern int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu);
848extern void init_sched_rt_class(void);
849extern void init_sched_fair_class(void);
850
851extern void resched_task(struct task_struct *p);
852extern void resched_cpu(int cpu);
853
854extern struct rt_bandwidth def_rt_bandwidth;
855extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
856
857extern void update_cpu_load(struct rq *this_rq);
858
859#ifdef CONFIG_CGROUP_CPUACCT
54c707e9
GC
860#include <linux/cgroup.h>
861/* track cpu usage of a group of tasks and its child groups */
862struct cpuacct {
863 struct cgroup_subsys_state css;
864 /* cpuusage holds pointer to a u64-type object on every cpu */
865 u64 __percpu *cpuusage;
866 struct kernel_cpustat __percpu *cpustat;
867};
868
869/* return cpu accounting group corresponding to this container */
870static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
871{
872 return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
873 struct cpuacct, css);
874}
875
876/* return cpu accounting group to which this task belongs */
877static inline struct cpuacct *task_ca(struct task_struct *tsk)
878{
879 return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
880 struct cpuacct, css);
881}
882
883static inline struct cpuacct *parent_ca(struct cpuacct *ca)
884{
885 if (!ca || !ca->css.cgroup->parent)
886 return NULL;
887 return cgroup_ca(ca->css.cgroup->parent);
888}
889
029632fb 890extern void cpuacct_charge(struct task_struct *tsk, u64 cputime);
029632fb
PZ
891#else
892static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
029632fb
PZ
893#endif
894
895static inline void inc_nr_running(struct rq *rq)
896{
897 rq->nr_running++;
898}
899
900static inline void dec_nr_running(struct rq *rq)
901{
902 rq->nr_running--;
903}
904
905extern void update_rq_clock(struct rq *rq);
906
907extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
908extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
909
910extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
911
912extern const_debug unsigned int sysctl_sched_time_avg;
913extern const_debug unsigned int sysctl_sched_nr_migrate;
914extern const_debug unsigned int sysctl_sched_migration_cost;
915
916static inline u64 sched_avg_period(void)
917{
918 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
919}
920
921void calc_load_account_idle(struct rq *this_rq);
922
923#ifdef CONFIG_SCHED_HRTICK
924
925/*
926 * Use hrtick when:
927 * - enabled by features
928 * - hrtimer is actually high res
929 */
930static inline int hrtick_enabled(struct rq *rq)
931{
932 if (!sched_feat(HRTICK))
933 return 0;
934 if (!cpu_active(cpu_of(rq)))
935 return 0;
936 return hrtimer_is_hres_active(&rq->hrtick_timer);
937}
938
939void hrtick_start(struct rq *rq, u64 delay);
940
b39e66ea
MG
941#else
942
943static inline int hrtick_enabled(struct rq *rq)
944{
945 return 0;
946}
947
029632fb
PZ
948#endif /* CONFIG_SCHED_HRTICK */
949
950#ifdef CONFIG_SMP
951extern void sched_avg_update(struct rq *rq);
952static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
953{
954 rq->rt_avg += rt_delta;
955 sched_avg_update(rq);
956}
957#else
958static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
959static inline void sched_avg_update(struct rq *rq) { }
960#endif
961
962extern void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period);
963
964#ifdef CONFIG_SMP
965#ifdef CONFIG_PREEMPT
966
967static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
968
969/*
970 * fair double_lock_balance: Safely acquires both rq->locks in a fair
971 * way at the expense of forcing extra atomic operations in all
972 * invocations. This assures that the double_lock is acquired using the
973 * same underlying policy as the spinlock_t on this architecture, which
974 * reduces latency compared to the unfair variant below. However, it
975 * also adds more overhead and therefore may reduce throughput.
976 */
977static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
978 __releases(this_rq->lock)
979 __acquires(busiest->lock)
980 __acquires(this_rq->lock)
981{
982 raw_spin_unlock(&this_rq->lock);
983 double_rq_lock(this_rq, busiest);
984
985 return 1;
986}
987
988#else
989/*
990 * Unfair double_lock_balance: Optimizes throughput at the expense of
991 * latency by eliminating extra atomic operations when the locks are
992 * already in proper order on entry. This favors lower cpu-ids and will
993 * grant the double lock to lower cpus over higher ids under contention,
994 * regardless of entry order into the function.
995 */
996static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
997 __releases(this_rq->lock)
998 __acquires(busiest->lock)
999 __acquires(this_rq->lock)
1000{
1001 int ret = 0;
1002
1003 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1004 if (busiest < this_rq) {
1005 raw_spin_unlock(&this_rq->lock);
1006 raw_spin_lock(&busiest->lock);
1007 raw_spin_lock_nested(&this_rq->lock,
1008 SINGLE_DEPTH_NESTING);
1009 ret = 1;
1010 } else
1011 raw_spin_lock_nested(&busiest->lock,
1012 SINGLE_DEPTH_NESTING);
1013 }
1014 return ret;
1015}
1016
1017#endif /* CONFIG_PREEMPT */
1018
1019/*
1020 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1021 */
1022static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1023{
1024 if (unlikely(!irqs_disabled())) {
1025 /* printk() doesn't work good under rq->lock */
1026 raw_spin_unlock(&this_rq->lock);
1027 BUG_ON(1);
1028 }
1029
1030 return _double_lock_balance(this_rq, busiest);
1031}
1032
1033static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1034 __releases(busiest->lock)
1035{
1036 raw_spin_unlock(&busiest->lock);
1037 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1038}
1039
1040/*
1041 * double_rq_lock - safely lock two runqueues
1042 *
1043 * Note this does not disable interrupts like task_rq_lock,
1044 * you need to do so manually before calling.
1045 */
1046static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1047 __acquires(rq1->lock)
1048 __acquires(rq2->lock)
1049{
1050 BUG_ON(!irqs_disabled());
1051 if (rq1 == rq2) {
1052 raw_spin_lock(&rq1->lock);
1053 __acquire(rq2->lock); /* Fake it out ;) */
1054 } else {
1055 if (rq1 < rq2) {
1056 raw_spin_lock(&rq1->lock);
1057 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1058 } else {
1059 raw_spin_lock(&rq2->lock);
1060 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1061 }
1062 }
1063}
1064
1065/*
1066 * double_rq_unlock - safely unlock two runqueues
1067 *
1068 * Note this does not restore interrupts like task_rq_unlock,
1069 * you need to do so manually after calling.
1070 */
1071static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1072 __releases(rq1->lock)
1073 __releases(rq2->lock)
1074{
1075 raw_spin_unlock(&rq1->lock);
1076 if (rq1 != rq2)
1077 raw_spin_unlock(&rq2->lock);
1078 else
1079 __release(rq2->lock);
1080}
1081
1082#else /* CONFIG_SMP */
1083
1084/*
1085 * double_rq_lock - safely lock two runqueues
1086 *
1087 * Note this does not disable interrupts like task_rq_lock,
1088 * you need to do so manually before calling.
1089 */
1090static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1091 __acquires(rq1->lock)
1092 __acquires(rq2->lock)
1093{
1094 BUG_ON(!irqs_disabled());
1095 BUG_ON(rq1 != rq2);
1096 raw_spin_lock(&rq1->lock);
1097 __acquire(rq2->lock); /* Fake it out ;) */
1098}
1099
1100/*
1101 * double_rq_unlock - safely unlock two runqueues
1102 *
1103 * Note this does not restore interrupts like task_rq_unlock,
1104 * you need to do so manually after calling.
1105 */
1106static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1107 __releases(rq1->lock)
1108 __releases(rq2->lock)
1109{
1110 BUG_ON(rq1 != rq2);
1111 raw_spin_unlock(&rq1->lock);
1112 __release(rq2->lock);
1113}
1114
1115#endif
1116
1117extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
1118extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
1119extern void print_cfs_stats(struct seq_file *m, int cpu);
1120extern void print_rt_stats(struct seq_file *m, int cpu);
1121
1122extern void init_cfs_rq(struct cfs_rq *cfs_rq);
1123extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
1124extern void unthrottle_offline_cfs_rqs(struct rq *rq);
1125
1126extern void account_cfs_bandwidth_used(int enabled, int was_enabled);
1c792db7
SS
1127
1128#ifdef CONFIG_NO_HZ
1129enum rq_nohz_flag_bits {
1130 NOHZ_TICK_STOPPED,
1131 NOHZ_BALANCE_KICK,
69e1e811 1132 NOHZ_IDLE,
1c792db7
SS
1133};
1134
1135#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
1136#endif