sched/x86: Do not clear PREEMPT_NEED_RESCHED on preempt count reset
[linux-2.6-block.git] / kernel / sched / fair.c
CommitLineData
bf0f6f24
IM
1/*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
21805085
PZ
18 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
90eec103 20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
bf0f6f24
IM
21 */
22
1983a922 23#include <linux/sched.h>
cb251765 24#include <linux/latencytop.h>
3436ae12 25#include <linux/cpumask.h>
83a0a96a 26#include <linux/cpuidle.h>
029632fb
PZ
27#include <linux/slab.h>
28#include <linux/profile.h>
29#include <linux/interrupt.h>
cbee9f88 30#include <linux/mempolicy.h>
e14808b4 31#include <linux/migrate.h>
cbee9f88 32#include <linux/task_work.h>
029632fb
PZ
33
34#include <trace/events/sched.h>
35
36#include "sched.h"
9745512c 37
bf0f6f24 38/*
21805085 39 * Targeted preemption latency for CPU-bound tasks:
864616ee 40 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
bf0f6f24 41 *
21805085 42 * NOTE: this latency value is not the same as the concept of
d274a4ce
IM
43 * 'timeslice length' - timeslices in CFS are of variable length
44 * and have no persistent notion like in traditional, time-slice
45 * based scheduling concepts.
bf0f6f24 46 *
d274a4ce
IM
47 * (to see the precise effective timeslice length of your workload,
48 * run vmstat and monitor the context-switches (cs) field)
bf0f6f24 49 */
21406928
MG
50unsigned int sysctl_sched_latency = 6000000ULL;
51unsigned int normalized_sysctl_sched_latency = 6000000ULL;
2bd8e6d4 52
1983a922
CE
53/*
54 * The initial- and re-scaling of tunables is configurable
55 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
56 *
57 * Options are:
58 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
59 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
60 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
61 */
62enum sched_tunable_scaling sysctl_sched_tunable_scaling
63 = SCHED_TUNABLESCALING_LOG;
64
2bd8e6d4 65/*
b2be5e96 66 * Minimal preemption granularity for CPU-bound tasks:
864616ee 67 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
2bd8e6d4 68 */
0bf377bb
IM
69unsigned int sysctl_sched_min_granularity = 750000ULL;
70unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
21805085
PZ
71
72/*
b2be5e96
PZ
73 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
74 */
0bf377bb 75static unsigned int sched_nr_latency = 8;
b2be5e96
PZ
76
77/*
2bba22c5 78 * After fork, child runs first. If set to 0 (default) then
b2be5e96 79 * parent will (try to) run first.
21805085 80 */
2bba22c5 81unsigned int sysctl_sched_child_runs_first __read_mostly;
bf0f6f24 82
bf0f6f24
IM
83/*
84 * SCHED_OTHER wake-up granularity.
172e082a 85 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
bf0f6f24
IM
86 *
87 * This option delays the preemption effects of decoupled workloads
88 * and reduces their over-scheduling. Synchronous workloads will still
89 * have immediate wakeup/sleep latencies.
90 */
172e082a 91unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
0bcdcf28 92unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
bf0f6f24 93
da84d961
IM
94const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
95
ec12cb7f
PT
96#ifdef CONFIG_CFS_BANDWIDTH
97/*
98 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
99 * each time a cfs_rq requests quota.
100 *
101 * Note: in the case that the slice exceeds the runtime remaining (either due
102 * to consumption or the quota being specified to be smaller than the slice)
103 * we will always only issue the remaining available time.
104 *
105 * default: 5 msec, units: microseconds
106 */
107unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
108#endif
109
3273163c
MR
110/*
111 * The margin used when comparing utilization with CPU capacity:
112 * util * 1024 < capacity * margin
113 */
114unsigned int capacity_margin = 1280; /* ~20% */
115
8527632d
PG
116static inline void update_load_add(struct load_weight *lw, unsigned long inc)
117{
118 lw->weight += inc;
119 lw->inv_weight = 0;
120}
121
122static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
123{
124 lw->weight -= dec;
125 lw->inv_weight = 0;
126}
127
128static inline void update_load_set(struct load_weight *lw, unsigned long w)
129{
130 lw->weight = w;
131 lw->inv_weight = 0;
132}
133
029632fb
PZ
134/*
135 * Increase the granularity value when there are more CPUs,
136 * because with more CPUs the 'effective latency' as visible
137 * to users decreases. But the relationship is not linear,
138 * so pick a second-best guess by going with the log2 of the
139 * number of CPUs.
140 *
141 * This idea comes from the SD scheduler of Con Kolivas:
142 */
58ac93e4 143static unsigned int get_update_sysctl_factor(void)
029632fb 144{
58ac93e4 145 unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8);
029632fb
PZ
146 unsigned int factor;
147
148 switch (sysctl_sched_tunable_scaling) {
149 case SCHED_TUNABLESCALING_NONE:
150 factor = 1;
151 break;
152 case SCHED_TUNABLESCALING_LINEAR:
153 factor = cpus;
154 break;
155 case SCHED_TUNABLESCALING_LOG:
156 default:
157 factor = 1 + ilog2(cpus);
158 break;
159 }
160
161 return factor;
162}
163
164static void update_sysctl(void)
165{
166 unsigned int factor = get_update_sysctl_factor();
167
168#define SET_SYSCTL(name) \
169 (sysctl_##name = (factor) * normalized_sysctl_##name)
170 SET_SYSCTL(sched_min_granularity);
171 SET_SYSCTL(sched_latency);
172 SET_SYSCTL(sched_wakeup_granularity);
173#undef SET_SYSCTL
174}
175
176void sched_init_granularity(void)
177{
178 update_sysctl();
179}
180
9dbdb155 181#define WMULT_CONST (~0U)
029632fb
PZ
182#define WMULT_SHIFT 32
183
9dbdb155
PZ
184static void __update_inv_weight(struct load_weight *lw)
185{
186 unsigned long w;
187
188 if (likely(lw->inv_weight))
189 return;
190
191 w = scale_load_down(lw->weight);
192
193 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
194 lw->inv_weight = 1;
195 else if (unlikely(!w))
196 lw->inv_weight = WMULT_CONST;
197 else
198 lw->inv_weight = WMULT_CONST / w;
199}
029632fb
PZ
200
201/*
9dbdb155
PZ
202 * delta_exec * weight / lw.weight
203 * OR
204 * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
205 *
1c3de5e1 206 * Either weight := NICE_0_LOAD and lw \e sched_prio_to_wmult[], in which case
9dbdb155
PZ
207 * we're guaranteed shift stays positive because inv_weight is guaranteed to
208 * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
209 *
210 * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
211 * weight/lw.weight <= 1, and therefore our shift will also be positive.
029632fb 212 */
9dbdb155 213static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
029632fb 214{
9dbdb155
PZ
215 u64 fact = scale_load_down(weight);
216 int shift = WMULT_SHIFT;
029632fb 217
9dbdb155 218 __update_inv_weight(lw);
029632fb 219
9dbdb155
PZ
220 if (unlikely(fact >> 32)) {
221 while (fact >> 32) {
222 fact >>= 1;
223 shift--;
224 }
029632fb
PZ
225 }
226
9dbdb155
PZ
227 /* hint to use a 32x32->64 mul */
228 fact = (u64)(u32)fact * lw->inv_weight;
029632fb 229
9dbdb155
PZ
230 while (fact >> 32) {
231 fact >>= 1;
232 shift--;
233 }
029632fb 234
9dbdb155 235 return mul_u64_u32_shr(delta_exec, fact, shift);
029632fb
PZ
236}
237
238
239const struct sched_class fair_sched_class;
a4c2f00f 240
bf0f6f24
IM
241/**************************************************************
242 * CFS operations on generic schedulable entities:
243 */
244
62160e3f 245#ifdef CONFIG_FAIR_GROUP_SCHED
bf0f6f24 246
62160e3f 247/* cpu runqueue to which this cfs_rq is attached */
bf0f6f24
IM
248static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
249{
62160e3f 250 return cfs_rq->rq;
bf0f6f24
IM
251}
252
62160e3f
IM
253/* An entity is a task if it doesn't "own" a runqueue */
254#define entity_is_task(se) (!se->my_q)
bf0f6f24 255
8f48894f
PZ
256static inline struct task_struct *task_of(struct sched_entity *se)
257{
9148a3a1 258 SCHED_WARN_ON(!entity_is_task(se));
8f48894f
PZ
259 return container_of(se, struct task_struct, se);
260}
261
b758149c
PZ
262/* Walk up scheduling entities hierarchy */
263#define for_each_sched_entity(se) \
264 for (; se; se = se->parent)
265
266static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
267{
268 return p->se.cfs_rq;
269}
270
271/* runqueue on which this entity is (to be) queued */
272static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
273{
274 return se->cfs_rq;
275}
276
277/* runqueue "owned" by this group */
278static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
279{
280 return grp->my_q;
281}
282
3d4b47b4
PZ
283static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
284{
285 if (!cfs_rq->on_list) {
67e86250
PT
286 /*
287 * Ensure we either appear before our parent (if already
288 * enqueued) or force our parent to appear after us when it is
289 * enqueued. The fact that we always enqueue bottom-up
290 * reduces this to two cases.
291 */
292 if (cfs_rq->tg->parent &&
293 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
294 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
295 &rq_of(cfs_rq)->leaf_cfs_rq_list);
296 } else {
297 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
3d4b47b4 298 &rq_of(cfs_rq)->leaf_cfs_rq_list);
67e86250 299 }
3d4b47b4
PZ
300
301 cfs_rq->on_list = 1;
302 }
303}
304
305static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
306{
307 if (cfs_rq->on_list) {
308 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
309 cfs_rq->on_list = 0;
310 }
311}
312
b758149c
PZ
313/* Iterate thr' all leaf cfs_rq's on a runqueue */
314#define for_each_leaf_cfs_rq(rq, cfs_rq) \
315 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
316
317/* Do the two (enqueued) entities belong to the same group ? */
fed14d45 318static inline struct cfs_rq *
b758149c
PZ
319is_same_group(struct sched_entity *se, struct sched_entity *pse)
320{
321 if (se->cfs_rq == pse->cfs_rq)
fed14d45 322 return se->cfs_rq;
b758149c 323
fed14d45 324 return NULL;
b758149c
PZ
325}
326
327static inline struct sched_entity *parent_entity(struct sched_entity *se)
328{
329 return se->parent;
330}
331
464b7527
PZ
332static void
333find_matching_se(struct sched_entity **se, struct sched_entity **pse)
334{
335 int se_depth, pse_depth;
336
337 /*
338 * preemption test can be made between sibling entities who are in the
339 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
340 * both tasks until we find their ancestors who are siblings of common
341 * parent.
342 */
343
344 /* First walk up until both entities are at same depth */
fed14d45
PZ
345 se_depth = (*se)->depth;
346 pse_depth = (*pse)->depth;
464b7527
PZ
347
348 while (se_depth > pse_depth) {
349 se_depth--;
350 *se = parent_entity(*se);
351 }
352
353 while (pse_depth > se_depth) {
354 pse_depth--;
355 *pse = parent_entity(*pse);
356 }
357
358 while (!is_same_group(*se, *pse)) {
359 *se = parent_entity(*se);
360 *pse = parent_entity(*pse);
361 }
362}
363
8f48894f
PZ
364#else /* !CONFIG_FAIR_GROUP_SCHED */
365
366static inline struct task_struct *task_of(struct sched_entity *se)
367{
368 return container_of(se, struct task_struct, se);
369}
bf0f6f24 370
62160e3f
IM
371static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
372{
373 return container_of(cfs_rq, struct rq, cfs);
bf0f6f24
IM
374}
375
376#define entity_is_task(se) 1
377
b758149c
PZ
378#define for_each_sched_entity(se) \
379 for (; se; se = NULL)
bf0f6f24 380
b758149c 381static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
bf0f6f24 382{
b758149c 383 return &task_rq(p)->cfs;
bf0f6f24
IM
384}
385
b758149c
PZ
386static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
387{
388 struct task_struct *p = task_of(se);
389 struct rq *rq = task_rq(p);
390
391 return &rq->cfs;
392}
393
394/* runqueue "owned" by this group */
395static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
396{
397 return NULL;
398}
399
3d4b47b4
PZ
400static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
401{
402}
403
404static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
405{
406}
407
b758149c
PZ
408#define for_each_leaf_cfs_rq(rq, cfs_rq) \
409 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
410
b758149c
PZ
411static inline struct sched_entity *parent_entity(struct sched_entity *se)
412{
413 return NULL;
414}
415
464b7527
PZ
416static inline void
417find_matching_se(struct sched_entity **se, struct sched_entity **pse)
418{
419}
420
b758149c
PZ
421#endif /* CONFIG_FAIR_GROUP_SCHED */
422
6c16a6dc 423static __always_inline
9dbdb155 424void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
bf0f6f24
IM
425
426/**************************************************************
427 * Scheduling class tree data structure manipulation methods:
428 */
429
1bf08230 430static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
02e0431a 431{
1bf08230 432 s64 delta = (s64)(vruntime - max_vruntime);
368059a9 433 if (delta > 0)
1bf08230 434 max_vruntime = vruntime;
02e0431a 435
1bf08230 436 return max_vruntime;
02e0431a
PZ
437}
438
0702e3eb 439static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
b0ffd246
PZ
440{
441 s64 delta = (s64)(vruntime - min_vruntime);
442 if (delta < 0)
443 min_vruntime = vruntime;
444
445 return min_vruntime;
446}
447
54fdc581
FC
448static inline int entity_before(struct sched_entity *a,
449 struct sched_entity *b)
450{
451 return (s64)(a->vruntime - b->vruntime) < 0;
452}
453
1af5f730
PZ
454static void update_min_vruntime(struct cfs_rq *cfs_rq)
455{
b60205c7
PZ
456 struct sched_entity *curr = cfs_rq->curr;
457
1af5f730
PZ
458 u64 vruntime = cfs_rq->min_vruntime;
459
b60205c7
PZ
460 if (curr) {
461 if (curr->on_rq)
462 vruntime = curr->vruntime;
463 else
464 curr = NULL;
465 }
1af5f730
PZ
466
467 if (cfs_rq->rb_leftmost) {
468 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
469 struct sched_entity,
470 run_node);
471
b60205c7 472 if (!curr)
1af5f730
PZ
473 vruntime = se->vruntime;
474 else
475 vruntime = min_vruntime(vruntime, se->vruntime);
476 }
477
1bf08230 478 /* ensure we never gain time by being placed backwards. */
1af5f730 479 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
3fe1698b
PZ
480#ifndef CONFIG_64BIT
481 smp_wmb();
482 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
483#endif
1af5f730
PZ
484}
485
bf0f6f24
IM
486/*
487 * Enqueue an entity into the rb-tree:
488 */
0702e3eb 489static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
490{
491 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
492 struct rb_node *parent = NULL;
493 struct sched_entity *entry;
bf0f6f24
IM
494 int leftmost = 1;
495
496 /*
497 * Find the right place in the rbtree:
498 */
499 while (*link) {
500 parent = *link;
501 entry = rb_entry(parent, struct sched_entity, run_node);
502 /*
503 * We dont care about collisions. Nodes with
504 * the same key stay together.
505 */
2bd2d6f2 506 if (entity_before(se, entry)) {
bf0f6f24
IM
507 link = &parent->rb_left;
508 } else {
509 link = &parent->rb_right;
510 leftmost = 0;
511 }
512 }
513
514 /*
515 * Maintain a cache of leftmost tree entries (it is frequently
516 * used):
517 */
1af5f730 518 if (leftmost)
57cb499d 519 cfs_rq->rb_leftmost = &se->run_node;
bf0f6f24
IM
520
521 rb_link_node(&se->run_node, parent, link);
522 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
bf0f6f24
IM
523}
524
0702e3eb 525static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 526{
3fe69747
PZ
527 if (cfs_rq->rb_leftmost == &se->run_node) {
528 struct rb_node *next_node;
3fe69747
PZ
529
530 next_node = rb_next(&se->run_node);
531 cfs_rq->rb_leftmost = next_node;
3fe69747 532 }
e9acbff6 533
bf0f6f24 534 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
bf0f6f24
IM
535}
536
029632fb 537struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
bf0f6f24 538{
f4b6755f
PZ
539 struct rb_node *left = cfs_rq->rb_leftmost;
540
541 if (!left)
542 return NULL;
543
544 return rb_entry(left, struct sched_entity, run_node);
bf0f6f24
IM
545}
546
ac53db59
RR
547static struct sched_entity *__pick_next_entity(struct sched_entity *se)
548{
549 struct rb_node *next = rb_next(&se->run_node);
550
551 if (!next)
552 return NULL;
553
554 return rb_entry(next, struct sched_entity, run_node);
555}
556
557#ifdef CONFIG_SCHED_DEBUG
029632fb 558struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
aeb73b04 559{
7eee3e67 560 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
aeb73b04 561
70eee74b
BS
562 if (!last)
563 return NULL;
7eee3e67
IM
564
565 return rb_entry(last, struct sched_entity, run_node);
aeb73b04
PZ
566}
567
bf0f6f24
IM
568/**************************************************************
569 * Scheduling class statistics methods:
570 */
571
acb4a848 572int sched_proc_update_handler(struct ctl_table *table, int write,
8d65af78 573 void __user *buffer, size_t *lenp,
b2be5e96
PZ
574 loff_t *ppos)
575{
8d65af78 576 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
58ac93e4 577 unsigned int factor = get_update_sysctl_factor();
b2be5e96
PZ
578
579 if (ret || !write)
580 return ret;
581
582 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
583 sysctl_sched_min_granularity);
584
acb4a848
CE
585#define WRT_SYSCTL(name) \
586 (normalized_sysctl_##name = sysctl_##name / (factor))
587 WRT_SYSCTL(sched_min_granularity);
588 WRT_SYSCTL(sched_latency);
589 WRT_SYSCTL(sched_wakeup_granularity);
acb4a848
CE
590#undef WRT_SYSCTL
591
b2be5e96
PZ
592 return 0;
593}
594#endif
647e7cac 595
a7be37ac 596/*
f9c0b095 597 * delta /= w
a7be37ac 598 */
9dbdb155 599static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
a7be37ac 600{
f9c0b095 601 if (unlikely(se->load.weight != NICE_0_LOAD))
9dbdb155 602 delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
a7be37ac
PZ
603
604 return delta;
605}
606
647e7cac
IM
607/*
608 * The idea is to set a period in which each task runs once.
609 *
532b1858 610 * When there are too many tasks (sched_nr_latency) we have to stretch
647e7cac
IM
611 * this period because otherwise the slices get too small.
612 *
613 * p = (nr <= nl) ? l : l*nr/nl
614 */
4d78e7b6
PZ
615static u64 __sched_period(unsigned long nr_running)
616{
8e2b0bf3
BF
617 if (unlikely(nr_running > sched_nr_latency))
618 return nr_running * sysctl_sched_min_granularity;
619 else
620 return sysctl_sched_latency;
4d78e7b6
PZ
621}
622
647e7cac
IM
623/*
624 * We calculate the wall-time slice from the period by taking a part
625 * proportional to the weight.
626 *
f9c0b095 627 * s = p*P[w/rw]
647e7cac 628 */
6d0f0ebd 629static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
21805085 630{
0a582440 631 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
f9c0b095 632
0a582440 633 for_each_sched_entity(se) {
6272d68c 634 struct load_weight *load;
3104bf03 635 struct load_weight lw;
6272d68c
LM
636
637 cfs_rq = cfs_rq_of(se);
638 load = &cfs_rq->load;
f9c0b095 639
0a582440 640 if (unlikely(!se->on_rq)) {
3104bf03 641 lw = cfs_rq->load;
0a582440
MG
642
643 update_load_add(&lw, se->load.weight);
644 load = &lw;
645 }
9dbdb155 646 slice = __calc_delta(slice, se->load.weight, load);
0a582440
MG
647 }
648 return slice;
bf0f6f24
IM
649}
650
647e7cac 651/*
660cc00f 652 * We calculate the vruntime slice of a to-be-inserted task.
647e7cac 653 *
f9c0b095 654 * vs = s/w
647e7cac 655 */
f9c0b095 656static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
67e9fb2a 657{
f9c0b095 658 return calc_delta_fair(sched_slice(cfs_rq, se), se);
a7be37ac
PZ
659}
660
a75cdaa9 661#ifdef CONFIG_SMP
772bd008 662static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
fb13c7ee
MG
663static unsigned long task_h_load(struct task_struct *p);
664
9d89c257
YD
665/*
666 * We choose a half-life close to 1 scheduling period.
84fb5a18
LY
667 * Note: The tables runnable_avg_yN_inv and runnable_avg_yN_sum are
668 * dependent on this value.
9d89c257
YD
669 */
670#define LOAD_AVG_PERIOD 32
671#define LOAD_AVG_MAX 47742 /* maximum possible load avg */
84fb5a18 672#define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_AVG_MAX */
a75cdaa9 673
540247fb
YD
674/* Give new sched_entity start runnable values to heavy its load in infant time */
675void init_entity_runnable_average(struct sched_entity *se)
a75cdaa9 676{
540247fb 677 struct sched_avg *sa = &se->avg;
a75cdaa9 678
9d89c257
YD
679 sa->last_update_time = 0;
680 /*
681 * sched_avg's period_contrib should be strictly less then 1024, so
682 * we give it 1023 to make sure it is almost a period (1024us), and
683 * will definitely be update (after enqueue).
684 */
685 sa->period_contrib = 1023;
b5a9b340
VG
686 /*
687 * Tasks are intialized with full load to be seen as heavy tasks until
688 * they get a chance to stabilize to their real load level.
689 * Group entities are intialized with zero load to reflect the fact that
690 * nothing has been attached to the task group yet.
691 */
692 if (entity_is_task(se))
693 sa->load_avg = scale_load_down(se->load.weight);
9d89c257 694 sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
2b8c41da
YD
695 /*
696 * At this point, util_avg won't be used in select_task_rq_fair anyway
697 */
698 sa->util_avg = 0;
699 sa->util_sum = 0;
9d89c257 700 /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
a75cdaa9 701}
7ea241af 702
7dc603c9
PZ
703static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
704static int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq);
3d30544f 705static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force);
7dc603c9
PZ
706static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se);
707
2b8c41da
YD
708/*
709 * With new tasks being created, their initial util_avgs are extrapolated
710 * based on the cfs_rq's current util_avg:
711 *
712 * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight
713 *
714 * However, in many cases, the above util_avg does not give a desired
715 * value. Moreover, the sum of the util_avgs may be divergent, such
716 * as when the series is a harmonic series.
717 *
718 * To solve this problem, we also cap the util_avg of successive tasks to
719 * only 1/2 of the left utilization budget:
720 *
721 * util_avg_cap = (1024 - cfs_rq->avg.util_avg) / 2^n
722 *
723 * where n denotes the nth task.
724 *
725 * For example, a simplest series from the beginning would be like:
726 *
727 * task util_avg: 512, 256, 128, 64, 32, 16, 8, ...
728 * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ...
729 *
730 * Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap)
731 * if util_avg > util_avg_cap.
732 */
733void post_init_entity_util_avg(struct sched_entity *se)
734{
735 struct cfs_rq *cfs_rq = cfs_rq_of(se);
736 struct sched_avg *sa = &se->avg;
172895e6 737 long cap = (long)(SCHED_CAPACITY_SCALE - cfs_rq->avg.util_avg) / 2;
7dc603c9 738 u64 now = cfs_rq_clock_task(cfs_rq);
2b8c41da
YD
739
740 if (cap > 0) {
741 if (cfs_rq->avg.util_avg != 0) {
742 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight;
743 sa->util_avg /= (cfs_rq->avg.load_avg + 1);
744
745 if (sa->util_avg > cap)
746 sa->util_avg = cap;
747 } else {
748 sa->util_avg = cap;
749 }
750 sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
751 }
7dc603c9
PZ
752
753 if (entity_is_task(se)) {
754 struct task_struct *p = task_of(se);
755 if (p->sched_class != &fair_sched_class) {
756 /*
757 * For !fair tasks do:
758 *
759 update_cfs_rq_load_avg(now, cfs_rq, false);
760 attach_entity_load_avg(cfs_rq, se);
761 switched_from_fair(rq, p);
762 *
763 * such that the next switched_to_fair() has the
764 * expected state.
765 */
766 se->avg.last_update_time = now;
767 return;
768 }
769 }
770
7c3edd2c 771 update_cfs_rq_load_avg(now, cfs_rq, false);
7dc603c9 772 attach_entity_load_avg(cfs_rq, se);
7c3edd2c 773 update_tg_load_avg(cfs_rq, false);
2b8c41da
YD
774}
775
7dc603c9 776#else /* !CONFIG_SMP */
540247fb 777void init_entity_runnable_average(struct sched_entity *se)
a75cdaa9
AS
778{
779}
2b8c41da
YD
780void post_init_entity_util_avg(struct sched_entity *se)
781{
782}
3d30544f
PZ
783static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
784{
785}
7dc603c9 786#endif /* CONFIG_SMP */
a75cdaa9 787
bf0f6f24 788/*
9dbdb155 789 * Update the current task's runtime statistics.
bf0f6f24 790 */
b7cc0896 791static void update_curr(struct cfs_rq *cfs_rq)
bf0f6f24 792{
429d43bc 793 struct sched_entity *curr = cfs_rq->curr;
78becc27 794 u64 now = rq_clock_task(rq_of(cfs_rq));
9dbdb155 795 u64 delta_exec;
bf0f6f24
IM
796
797 if (unlikely(!curr))
798 return;
799
9dbdb155
PZ
800 delta_exec = now - curr->exec_start;
801 if (unlikely((s64)delta_exec <= 0))
34f28ecd 802 return;
bf0f6f24 803
8ebc91d9 804 curr->exec_start = now;
d842de87 805
9dbdb155
PZ
806 schedstat_set(curr->statistics.exec_max,
807 max(delta_exec, curr->statistics.exec_max));
808
809 curr->sum_exec_runtime += delta_exec;
ae92882e 810 schedstat_add(cfs_rq->exec_clock, delta_exec);
9dbdb155
PZ
811
812 curr->vruntime += calc_delta_fair(delta_exec, curr);
813 update_min_vruntime(cfs_rq);
814
d842de87
SV
815 if (entity_is_task(curr)) {
816 struct task_struct *curtask = task_of(curr);
817
f977bb49 818 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
d842de87 819 cpuacct_charge(curtask, delta_exec);
f06febc9 820 account_group_exec_runtime(curtask, delta_exec);
d842de87 821 }
ec12cb7f
PT
822
823 account_cfs_rq_runtime(cfs_rq, delta_exec);
bf0f6f24
IM
824}
825
6e998916
SG
826static void update_curr_fair(struct rq *rq)
827{
828 update_curr(cfs_rq_of(&rq->curr->se));
829}
830
bf0f6f24 831static inline void
5870db5b 832update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 833{
4fa8d299
JP
834 u64 wait_start, prev_wait_start;
835
836 if (!schedstat_enabled())
837 return;
838
839 wait_start = rq_clock(rq_of(cfs_rq));
840 prev_wait_start = schedstat_val(se->statistics.wait_start);
3ea94de1
JP
841
842 if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) &&
4fa8d299
JP
843 likely(wait_start > prev_wait_start))
844 wait_start -= prev_wait_start;
3ea94de1 845
4fa8d299 846 schedstat_set(se->statistics.wait_start, wait_start);
bf0f6f24
IM
847}
848
4fa8d299 849static inline void
3ea94de1
JP
850update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
851{
852 struct task_struct *p;
cb251765
MG
853 u64 delta;
854
4fa8d299
JP
855 if (!schedstat_enabled())
856 return;
857
858 delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start);
3ea94de1
JP
859
860 if (entity_is_task(se)) {
861 p = task_of(se);
862 if (task_on_rq_migrating(p)) {
863 /*
864 * Preserve migrating task's wait time so wait_start
865 * time stamp can be adjusted to accumulate wait time
866 * prior to migration.
867 */
4fa8d299 868 schedstat_set(se->statistics.wait_start, delta);
3ea94de1
JP
869 return;
870 }
871 trace_sched_stat_wait(p, delta);
872 }
873
4fa8d299
JP
874 schedstat_set(se->statistics.wait_max,
875 max(schedstat_val(se->statistics.wait_max), delta));
876 schedstat_inc(se->statistics.wait_count);
877 schedstat_add(se->statistics.wait_sum, delta);
878 schedstat_set(se->statistics.wait_start, 0);
3ea94de1 879}
3ea94de1 880
4fa8d299 881static inline void
1a3d027c
JP
882update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
883{
884 struct task_struct *tsk = NULL;
4fa8d299
JP
885 u64 sleep_start, block_start;
886
887 if (!schedstat_enabled())
888 return;
889
890 sleep_start = schedstat_val(se->statistics.sleep_start);
891 block_start = schedstat_val(se->statistics.block_start);
1a3d027c
JP
892
893 if (entity_is_task(se))
894 tsk = task_of(se);
895
4fa8d299
JP
896 if (sleep_start) {
897 u64 delta = rq_clock(rq_of(cfs_rq)) - sleep_start;
1a3d027c
JP
898
899 if ((s64)delta < 0)
900 delta = 0;
901
4fa8d299
JP
902 if (unlikely(delta > schedstat_val(se->statistics.sleep_max)))
903 schedstat_set(se->statistics.sleep_max, delta);
1a3d027c 904
4fa8d299
JP
905 schedstat_set(se->statistics.sleep_start, 0);
906 schedstat_add(se->statistics.sum_sleep_runtime, delta);
1a3d027c
JP
907
908 if (tsk) {
909 account_scheduler_latency(tsk, delta >> 10, 1);
910 trace_sched_stat_sleep(tsk, delta);
911 }
912 }
4fa8d299
JP
913 if (block_start) {
914 u64 delta = rq_clock(rq_of(cfs_rq)) - block_start;
1a3d027c
JP
915
916 if ((s64)delta < 0)
917 delta = 0;
918
4fa8d299
JP
919 if (unlikely(delta > schedstat_val(se->statistics.block_max)))
920 schedstat_set(se->statistics.block_max, delta);
1a3d027c 921
4fa8d299
JP
922 schedstat_set(se->statistics.block_start, 0);
923 schedstat_add(se->statistics.sum_sleep_runtime, delta);
1a3d027c
JP
924
925 if (tsk) {
926 if (tsk->in_iowait) {
4fa8d299
JP
927 schedstat_add(se->statistics.iowait_sum, delta);
928 schedstat_inc(se->statistics.iowait_count);
1a3d027c
JP
929 trace_sched_stat_iowait(tsk, delta);
930 }
931
932 trace_sched_stat_blocked(tsk, delta);
933
934 /*
935 * Blocking time is in units of nanosecs, so shift by
936 * 20 to get a milliseconds-range estimation of the
937 * amount of time that the task spent sleeping:
938 */
939 if (unlikely(prof_on == SLEEP_PROFILING)) {
940 profile_hits(SLEEP_PROFILING,
941 (void *)get_wchan(tsk),
942 delta >> 20);
943 }
944 account_scheduler_latency(tsk, delta >> 10, 0);
945 }
946 }
3ea94de1 947}
3ea94de1 948
bf0f6f24
IM
949/*
950 * Task is being enqueued - update stats:
951 */
cb251765 952static inline void
1a3d027c 953update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
bf0f6f24 954{
4fa8d299
JP
955 if (!schedstat_enabled())
956 return;
957
bf0f6f24
IM
958 /*
959 * Are we enqueueing a waiting task? (for current tasks
960 * a dequeue/enqueue event is a NOP)
961 */
429d43bc 962 if (se != cfs_rq->curr)
5870db5b 963 update_stats_wait_start(cfs_rq, se);
1a3d027c
JP
964
965 if (flags & ENQUEUE_WAKEUP)
966 update_stats_enqueue_sleeper(cfs_rq, se);
bf0f6f24
IM
967}
968
bf0f6f24 969static inline void
cb251765 970update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
bf0f6f24 971{
4fa8d299
JP
972
973 if (!schedstat_enabled())
974 return;
975
bf0f6f24
IM
976 /*
977 * Mark the end of the wait period if dequeueing a
978 * waiting task:
979 */
429d43bc 980 if (se != cfs_rq->curr)
9ef0a961 981 update_stats_wait_end(cfs_rq, se);
cb251765 982
4fa8d299
JP
983 if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) {
984 struct task_struct *tsk = task_of(se);
cb251765 985
4fa8d299
JP
986 if (tsk->state & TASK_INTERRUPTIBLE)
987 schedstat_set(se->statistics.sleep_start,
988 rq_clock(rq_of(cfs_rq)));
989 if (tsk->state & TASK_UNINTERRUPTIBLE)
990 schedstat_set(se->statistics.block_start,
991 rq_clock(rq_of(cfs_rq)));
cb251765 992 }
cb251765
MG
993}
994
bf0f6f24
IM
995/*
996 * We are picking a new current task - update its stats:
997 */
998static inline void
79303e9e 999update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
1000{
1001 /*
1002 * We are starting a new run period:
1003 */
78becc27 1004 se->exec_start = rq_clock_task(rq_of(cfs_rq));
bf0f6f24
IM
1005}
1006
bf0f6f24
IM
1007/**************************************************
1008 * Scheduling class queueing methods:
1009 */
1010
cbee9f88
PZ
1011#ifdef CONFIG_NUMA_BALANCING
1012/*
598f0ec0
MG
1013 * Approximate time to scan a full NUMA task in ms. The task scan period is
1014 * calculated based on the tasks virtual memory size and
1015 * numa_balancing_scan_size.
cbee9f88 1016 */
598f0ec0
MG
1017unsigned int sysctl_numa_balancing_scan_period_min = 1000;
1018unsigned int sysctl_numa_balancing_scan_period_max = 60000;
6e5fb223
PZ
1019
1020/* Portion of address space to scan in MB */
1021unsigned int sysctl_numa_balancing_scan_size = 256;
cbee9f88 1022
4b96a29b
PZ
1023/* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
1024unsigned int sysctl_numa_balancing_scan_delay = 1000;
1025
598f0ec0
MG
1026static unsigned int task_nr_scan_windows(struct task_struct *p)
1027{
1028 unsigned long rss = 0;
1029 unsigned long nr_scan_pages;
1030
1031 /*
1032 * Calculations based on RSS as non-present and empty pages are skipped
1033 * by the PTE scanner and NUMA hinting faults should be trapped based
1034 * on resident pages
1035 */
1036 nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
1037 rss = get_mm_rss(p->mm);
1038 if (!rss)
1039 rss = nr_scan_pages;
1040
1041 rss = round_up(rss, nr_scan_pages);
1042 return rss / nr_scan_pages;
1043}
1044
1045/* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
1046#define MAX_SCAN_WINDOW 2560
1047
1048static unsigned int task_scan_min(struct task_struct *p)
1049{
316c1608 1050 unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size);
598f0ec0
MG
1051 unsigned int scan, floor;
1052 unsigned int windows = 1;
1053
64192658
KT
1054 if (scan_size < MAX_SCAN_WINDOW)
1055 windows = MAX_SCAN_WINDOW / scan_size;
598f0ec0
MG
1056 floor = 1000 / windows;
1057
1058 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
1059 return max_t(unsigned int, floor, scan);
1060}
1061
1062static unsigned int task_scan_max(struct task_struct *p)
1063{
1064 unsigned int smin = task_scan_min(p);
1065 unsigned int smax;
1066
1067 /* Watch for min being lower than max due to floor calculations */
1068 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
1069 return max(smin, smax);
1070}
1071
0ec8aa00
PZ
1072static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
1073{
1074 rq->nr_numa_running += (p->numa_preferred_nid != -1);
1075 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
1076}
1077
1078static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
1079{
1080 rq->nr_numa_running -= (p->numa_preferred_nid != -1);
1081 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
1082}
1083
8c8a743c
PZ
1084struct numa_group {
1085 atomic_t refcount;
1086
1087 spinlock_t lock; /* nr_tasks, tasks */
1088 int nr_tasks;
e29cf08b 1089 pid_t gid;
4142c3eb 1090 int active_nodes;
8c8a743c
PZ
1091
1092 struct rcu_head rcu;
989348b5 1093 unsigned long total_faults;
4142c3eb 1094 unsigned long max_faults_cpu;
7e2703e6
RR
1095 /*
1096 * Faults_cpu is used to decide whether memory should move
1097 * towards the CPU. As a consequence, these stats are weighted
1098 * more by CPU use than by memory faults.
1099 */
50ec8a40 1100 unsigned long *faults_cpu;
989348b5 1101 unsigned long faults[0];
8c8a743c
PZ
1102};
1103
be1e4e76
RR
1104/* Shared or private faults. */
1105#define NR_NUMA_HINT_FAULT_TYPES 2
1106
1107/* Memory and CPU locality */
1108#define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2)
1109
1110/* Averaged statistics, and temporary buffers. */
1111#define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2)
1112
e29cf08b
MG
1113pid_t task_numa_group_id(struct task_struct *p)
1114{
1115 return p->numa_group ? p->numa_group->gid : 0;
1116}
1117
44dba3d5
IM
1118/*
1119 * The averaged statistics, shared & private, memory & cpu,
1120 * occupy the first half of the array. The second half of the
1121 * array is for current counters, which are averaged into the
1122 * first set by task_numa_placement.
1123 */
1124static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv)
ac8e895b 1125{
44dba3d5 1126 return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv;
ac8e895b
MG
1127}
1128
1129static inline unsigned long task_faults(struct task_struct *p, int nid)
1130{
44dba3d5 1131 if (!p->numa_faults)
ac8e895b
MG
1132 return 0;
1133
44dba3d5
IM
1134 return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] +
1135 p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)];
ac8e895b
MG
1136}
1137
83e1d2cd
MG
1138static inline unsigned long group_faults(struct task_struct *p, int nid)
1139{
1140 if (!p->numa_group)
1141 return 0;
1142
44dba3d5
IM
1143 return p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 0)] +
1144 p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 1)];
83e1d2cd
MG
1145}
1146
20e07dea
RR
1147static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
1148{
44dba3d5
IM
1149 return group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 0)] +
1150 group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 1)];
20e07dea
RR
1151}
1152
4142c3eb
RR
1153/*
1154 * A node triggering more than 1/3 as many NUMA faults as the maximum is
1155 * considered part of a numa group's pseudo-interleaving set. Migrations
1156 * between these nodes are slowed down, to allow things to settle down.
1157 */
1158#define ACTIVE_NODE_FRACTION 3
1159
1160static bool numa_is_active_node(int nid, struct numa_group *ng)
1161{
1162 return group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION > ng->max_faults_cpu;
1163}
1164
6c6b1193
RR
1165/* Handle placement on systems where not all nodes are directly connected. */
1166static unsigned long score_nearby_nodes(struct task_struct *p, int nid,
1167 int maxdist, bool task)
1168{
1169 unsigned long score = 0;
1170 int node;
1171
1172 /*
1173 * All nodes are directly connected, and the same distance
1174 * from each other. No need for fancy placement algorithms.
1175 */
1176 if (sched_numa_topology_type == NUMA_DIRECT)
1177 return 0;
1178
1179 /*
1180 * This code is called for each node, introducing N^2 complexity,
1181 * which should be ok given the number of nodes rarely exceeds 8.
1182 */
1183 for_each_online_node(node) {
1184 unsigned long faults;
1185 int dist = node_distance(nid, node);
1186
1187 /*
1188 * The furthest away nodes in the system are not interesting
1189 * for placement; nid was already counted.
1190 */
1191 if (dist == sched_max_numa_distance || node == nid)
1192 continue;
1193
1194 /*
1195 * On systems with a backplane NUMA topology, compare groups
1196 * of nodes, and move tasks towards the group with the most
1197 * memory accesses. When comparing two nodes at distance
1198 * "hoplimit", only nodes closer by than "hoplimit" are part
1199 * of each group. Skip other nodes.
1200 */
1201 if (sched_numa_topology_type == NUMA_BACKPLANE &&
1202 dist > maxdist)
1203 continue;
1204
1205 /* Add up the faults from nearby nodes. */
1206 if (task)
1207 faults = task_faults(p, node);
1208 else
1209 faults = group_faults(p, node);
1210
1211 /*
1212 * On systems with a glueless mesh NUMA topology, there are
1213 * no fixed "groups of nodes". Instead, nodes that are not
1214 * directly connected bounce traffic through intermediate
1215 * nodes; a numa_group can occupy any set of nodes.
1216 * The further away a node is, the less the faults count.
1217 * This seems to result in good task placement.
1218 */
1219 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
1220 faults *= (sched_max_numa_distance - dist);
1221 faults /= (sched_max_numa_distance - LOCAL_DISTANCE);
1222 }
1223
1224 score += faults;
1225 }
1226
1227 return score;
1228}
1229
83e1d2cd
MG
1230/*
1231 * These return the fraction of accesses done by a particular task, or
1232 * task group, on a particular numa node. The group weight is given a
1233 * larger multiplier, in order to group tasks together that are almost
1234 * evenly spread out between numa nodes.
1235 */
7bd95320
RR
1236static inline unsigned long task_weight(struct task_struct *p, int nid,
1237 int dist)
83e1d2cd 1238{
7bd95320 1239 unsigned long faults, total_faults;
83e1d2cd 1240
44dba3d5 1241 if (!p->numa_faults)
83e1d2cd
MG
1242 return 0;
1243
1244 total_faults = p->total_numa_faults;
1245
1246 if (!total_faults)
1247 return 0;
1248
7bd95320 1249 faults = task_faults(p, nid);
6c6b1193
RR
1250 faults += score_nearby_nodes(p, nid, dist, true);
1251
7bd95320 1252 return 1000 * faults / total_faults;
83e1d2cd
MG
1253}
1254
7bd95320
RR
1255static inline unsigned long group_weight(struct task_struct *p, int nid,
1256 int dist)
83e1d2cd 1257{
7bd95320
RR
1258 unsigned long faults, total_faults;
1259
1260 if (!p->numa_group)
1261 return 0;
1262
1263 total_faults = p->numa_group->total_faults;
1264
1265 if (!total_faults)
83e1d2cd
MG
1266 return 0;
1267
7bd95320 1268 faults = group_faults(p, nid);
6c6b1193
RR
1269 faults += score_nearby_nodes(p, nid, dist, false);
1270
7bd95320 1271 return 1000 * faults / total_faults;
83e1d2cd
MG
1272}
1273
10f39042
RR
1274bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
1275 int src_nid, int dst_cpu)
1276{
1277 struct numa_group *ng = p->numa_group;
1278 int dst_nid = cpu_to_node(dst_cpu);
1279 int last_cpupid, this_cpupid;
1280
1281 this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
1282
1283 /*
1284 * Multi-stage node selection is used in conjunction with a periodic
1285 * migration fault to build a temporal task<->page relation. By using
1286 * a two-stage filter we remove short/unlikely relations.
1287 *
1288 * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate
1289 * a task's usage of a particular page (n_p) per total usage of this
1290 * page (n_t) (in a given time-span) to a probability.
1291 *
1292 * Our periodic faults will sample this probability and getting the
1293 * same result twice in a row, given these samples are fully
1294 * independent, is then given by P(n)^2, provided our sample period
1295 * is sufficiently short compared to the usage pattern.
1296 *
1297 * This quadric squishes small probabilities, making it less likely we
1298 * act on an unlikely task<->page relation.
1299 */
1300 last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
1301 if (!cpupid_pid_unset(last_cpupid) &&
1302 cpupid_to_nid(last_cpupid) != dst_nid)
1303 return false;
1304
1305 /* Always allow migrate on private faults */
1306 if (cpupid_match_pid(p, last_cpupid))
1307 return true;
1308
1309 /* A shared fault, but p->numa_group has not been set up yet. */
1310 if (!ng)
1311 return true;
1312
1313 /*
4142c3eb
RR
1314 * Destination node is much more heavily used than the source
1315 * node? Allow migration.
10f39042 1316 */
4142c3eb
RR
1317 if (group_faults_cpu(ng, dst_nid) > group_faults_cpu(ng, src_nid) *
1318 ACTIVE_NODE_FRACTION)
10f39042
RR
1319 return true;
1320
1321 /*
4142c3eb
RR
1322 * Distribute memory according to CPU & memory use on each node,
1323 * with 3/4 hysteresis to avoid unnecessary memory migrations:
1324 *
1325 * faults_cpu(dst) 3 faults_cpu(src)
1326 * --------------- * - > ---------------
1327 * faults_mem(dst) 4 faults_mem(src)
10f39042 1328 */
4142c3eb
RR
1329 return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 >
1330 group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4;
10f39042
RR
1331}
1332
e6628d5b 1333static unsigned long weighted_cpuload(const int cpu);
58d081b5
MG
1334static unsigned long source_load(int cpu, int type);
1335static unsigned long target_load(int cpu, int type);
ced549fa 1336static unsigned long capacity_of(int cpu);
58d081b5
MG
1337static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
1338
fb13c7ee 1339/* Cached statistics for all CPUs within a node */
58d081b5 1340struct numa_stats {
fb13c7ee 1341 unsigned long nr_running;
58d081b5 1342 unsigned long load;
fb13c7ee
MG
1343
1344 /* Total compute capacity of CPUs on a node */
5ef20ca1 1345 unsigned long compute_capacity;
fb13c7ee
MG
1346
1347 /* Approximate capacity in terms of runnable tasks on a node */
5ef20ca1 1348 unsigned long task_capacity;
1b6a7495 1349 int has_free_capacity;
58d081b5 1350};
e6628d5b 1351
fb13c7ee
MG
1352/*
1353 * XXX borrowed from update_sg_lb_stats
1354 */
1355static void update_numa_stats(struct numa_stats *ns, int nid)
1356{
83d7f242
RR
1357 int smt, cpu, cpus = 0;
1358 unsigned long capacity;
fb13c7ee
MG
1359
1360 memset(ns, 0, sizeof(*ns));
1361 for_each_cpu(cpu, cpumask_of_node(nid)) {
1362 struct rq *rq = cpu_rq(cpu);
1363
1364 ns->nr_running += rq->nr_running;
1365 ns->load += weighted_cpuload(cpu);
ced549fa 1366 ns->compute_capacity += capacity_of(cpu);
5eca82a9
PZ
1367
1368 cpus++;
fb13c7ee
MG
1369 }
1370
5eca82a9
PZ
1371 /*
1372 * If we raced with hotplug and there are no CPUs left in our mask
1373 * the @ns structure is NULL'ed and task_numa_compare() will
1374 * not find this node attractive.
1375 *
1b6a7495
NP
1376 * We'll either bail at !has_free_capacity, or we'll detect a huge
1377 * imbalance and bail there.
5eca82a9
PZ
1378 */
1379 if (!cpus)
1380 return;
1381
83d7f242
RR
1382 /* smt := ceil(cpus / capacity), assumes: 1 < smt_power < 2 */
1383 smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, ns->compute_capacity);
1384 capacity = cpus / smt; /* cores */
1385
1386 ns->task_capacity = min_t(unsigned, capacity,
1387 DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE));
1b6a7495 1388 ns->has_free_capacity = (ns->nr_running < ns->task_capacity);
fb13c7ee
MG
1389}
1390
58d081b5
MG
1391struct task_numa_env {
1392 struct task_struct *p;
e6628d5b 1393
58d081b5
MG
1394 int src_cpu, src_nid;
1395 int dst_cpu, dst_nid;
e6628d5b 1396
58d081b5 1397 struct numa_stats src_stats, dst_stats;
e6628d5b 1398
40ea2b42 1399 int imbalance_pct;
7bd95320 1400 int dist;
fb13c7ee
MG
1401
1402 struct task_struct *best_task;
1403 long best_imp;
58d081b5
MG
1404 int best_cpu;
1405};
1406
fb13c7ee
MG
1407static void task_numa_assign(struct task_numa_env *env,
1408 struct task_struct *p, long imp)
1409{
1410 if (env->best_task)
1411 put_task_struct(env->best_task);
bac78573
ON
1412 if (p)
1413 get_task_struct(p);
fb13c7ee
MG
1414
1415 env->best_task = p;
1416 env->best_imp = imp;
1417 env->best_cpu = env->dst_cpu;
1418}
1419
28a21745 1420static bool load_too_imbalanced(long src_load, long dst_load,
e63da036
RR
1421 struct task_numa_env *env)
1422{
e4991b24
RR
1423 long imb, old_imb;
1424 long orig_src_load, orig_dst_load;
28a21745
RR
1425 long src_capacity, dst_capacity;
1426
1427 /*
1428 * The load is corrected for the CPU capacity available on each node.
1429 *
1430 * src_load dst_load
1431 * ------------ vs ---------
1432 * src_capacity dst_capacity
1433 */
1434 src_capacity = env->src_stats.compute_capacity;
1435 dst_capacity = env->dst_stats.compute_capacity;
e63da036
RR
1436
1437 /* We care about the slope of the imbalance, not the direction. */
e4991b24
RR
1438 if (dst_load < src_load)
1439 swap(dst_load, src_load);
e63da036
RR
1440
1441 /* Is the difference below the threshold? */
e4991b24
RR
1442 imb = dst_load * src_capacity * 100 -
1443 src_load * dst_capacity * env->imbalance_pct;
e63da036
RR
1444 if (imb <= 0)
1445 return false;
1446
1447 /*
1448 * The imbalance is above the allowed threshold.
e4991b24 1449 * Compare it with the old imbalance.
e63da036 1450 */
28a21745 1451 orig_src_load = env->src_stats.load;
e4991b24 1452 orig_dst_load = env->dst_stats.load;
28a21745 1453
e4991b24
RR
1454 if (orig_dst_load < orig_src_load)
1455 swap(orig_dst_load, orig_src_load);
e63da036 1456
e4991b24
RR
1457 old_imb = orig_dst_load * src_capacity * 100 -
1458 orig_src_load * dst_capacity * env->imbalance_pct;
1459
1460 /* Would this change make things worse? */
1461 return (imb > old_imb);
e63da036
RR
1462}
1463
fb13c7ee
MG
1464/*
1465 * This checks if the overall compute and NUMA accesses of the system would
1466 * be improved if the source tasks was migrated to the target dst_cpu taking
1467 * into account that it might be best if task running on the dst_cpu should
1468 * be exchanged with the source task
1469 */
887c290e
RR
1470static void task_numa_compare(struct task_numa_env *env,
1471 long taskimp, long groupimp)
fb13c7ee
MG
1472{
1473 struct rq *src_rq = cpu_rq(env->src_cpu);
1474 struct rq *dst_rq = cpu_rq(env->dst_cpu);
1475 struct task_struct *cur;
28a21745 1476 long src_load, dst_load;
fb13c7ee 1477 long load;
1c5d3eb3 1478 long imp = env->p->numa_group ? groupimp : taskimp;
0132c3e1 1479 long moveimp = imp;
7bd95320 1480 int dist = env->dist;
fb13c7ee
MG
1481
1482 rcu_read_lock();
bac78573
ON
1483 cur = task_rcu_dereference(&dst_rq->curr);
1484 if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur)))
fb13c7ee
MG
1485 cur = NULL;
1486
7af68335
PZ
1487 /*
1488 * Because we have preemption enabled we can get migrated around and
1489 * end try selecting ourselves (current == env->p) as a swap candidate.
1490 */
1491 if (cur == env->p)
1492 goto unlock;
1493
fb13c7ee
MG
1494 /*
1495 * "imp" is the fault differential for the source task between the
1496 * source and destination node. Calculate the total differential for
1497 * the source task and potential destination task. The more negative
1498 * the value is, the more rmeote accesses that would be expected to
1499 * be incurred if the tasks were swapped.
1500 */
1501 if (cur) {
1502 /* Skip this swap candidate if cannot move to the source cpu */
1503 if (!cpumask_test_cpu(env->src_cpu, tsk_cpus_allowed(cur)))
1504 goto unlock;
1505
887c290e
RR
1506 /*
1507 * If dst and source tasks are in the same NUMA group, or not
ca28aa53 1508 * in any group then look only at task weights.
887c290e 1509 */
ca28aa53 1510 if (cur->numa_group == env->p->numa_group) {
7bd95320
RR
1511 imp = taskimp + task_weight(cur, env->src_nid, dist) -
1512 task_weight(cur, env->dst_nid, dist);
ca28aa53
RR
1513 /*
1514 * Add some hysteresis to prevent swapping the
1515 * tasks within a group over tiny differences.
1516 */
1517 if (cur->numa_group)
1518 imp -= imp/16;
887c290e 1519 } else {
ca28aa53
RR
1520 /*
1521 * Compare the group weights. If a task is all by
1522 * itself (not part of a group), use the task weight
1523 * instead.
1524 */
ca28aa53 1525 if (cur->numa_group)
7bd95320
RR
1526 imp += group_weight(cur, env->src_nid, dist) -
1527 group_weight(cur, env->dst_nid, dist);
ca28aa53 1528 else
7bd95320
RR
1529 imp += task_weight(cur, env->src_nid, dist) -
1530 task_weight(cur, env->dst_nid, dist);
887c290e 1531 }
fb13c7ee
MG
1532 }
1533
0132c3e1 1534 if (imp <= env->best_imp && moveimp <= env->best_imp)
fb13c7ee
MG
1535 goto unlock;
1536
1537 if (!cur) {
1538 /* Is there capacity at our destination? */
b932c03c 1539 if (env->src_stats.nr_running <= env->src_stats.task_capacity &&
1b6a7495 1540 !env->dst_stats.has_free_capacity)
fb13c7ee
MG
1541 goto unlock;
1542
1543 goto balance;
1544 }
1545
1546 /* Balance doesn't matter much if we're running a task per cpu */
0132c3e1
RR
1547 if (imp > env->best_imp && src_rq->nr_running == 1 &&
1548 dst_rq->nr_running == 1)
fb13c7ee
MG
1549 goto assign;
1550
1551 /*
1552 * In the overloaded case, try and keep the load balanced.
1553 */
1554balance:
e720fff6
PZ
1555 load = task_h_load(env->p);
1556 dst_load = env->dst_stats.load + load;
1557 src_load = env->src_stats.load - load;
fb13c7ee 1558
0132c3e1
RR
1559 if (moveimp > imp && moveimp > env->best_imp) {
1560 /*
1561 * If the improvement from just moving env->p direction is
1562 * better than swapping tasks around, check if a move is
1563 * possible. Store a slightly smaller score than moveimp,
1564 * so an actually idle CPU will win.
1565 */
1566 if (!load_too_imbalanced(src_load, dst_load, env)) {
1567 imp = moveimp - 1;
1568 cur = NULL;
1569 goto assign;
1570 }
1571 }
1572
1573 if (imp <= env->best_imp)
1574 goto unlock;
1575
fb13c7ee 1576 if (cur) {
e720fff6
PZ
1577 load = task_h_load(cur);
1578 dst_load -= load;
1579 src_load += load;
fb13c7ee
MG
1580 }
1581
28a21745 1582 if (load_too_imbalanced(src_load, dst_load, env))
fb13c7ee
MG
1583 goto unlock;
1584
ba7e5a27
RR
1585 /*
1586 * One idle CPU per node is evaluated for a task numa move.
1587 * Call select_idle_sibling to maybe find a better one.
1588 */
10e2f1ac
PZ
1589 if (!cur) {
1590 /*
1591 * select_idle_siblings() uses an per-cpu cpumask that
1592 * can be used from IRQ context.
1593 */
1594 local_irq_disable();
772bd008
MR
1595 env->dst_cpu = select_idle_sibling(env->p, env->src_cpu,
1596 env->dst_cpu);
10e2f1ac
PZ
1597 local_irq_enable();
1598 }
ba7e5a27 1599
fb13c7ee
MG
1600assign:
1601 task_numa_assign(env, cur, imp);
1602unlock:
1603 rcu_read_unlock();
1604}
1605
887c290e
RR
1606static void task_numa_find_cpu(struct task_numa_env *env,
1607 long taskimp, long groupimp)
2c8a50aa
MG
1608{
1609 int cpu;
1610
1611 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
1612 /* Skip this CPU if the source task cannot migrate */
1613 if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(env->p)))
1614 continue;
1615
1616 env->dst_cpu = cpu;
887c290e 1617 task_numa_compare(env, taskimp, groupimp);
2c8a50aa
MG
1618 }
1619}
1620
6f9aad0b
RR
1621/* Only move tasks to a NUMA node less busy than the current node. */
1622static bool numa_has_capacity(struct task_numa_env *env)
1623{
1624 struct numa_stats *src = &env->src_stats;
1625 struct numa_stats *dst = &env->dst_stats;
1626
1627 if (src->has_free_capacity && !dst->has_free_capacity)
1628 return false;
1629
1630 /*
1631 * Only consider a task move if the source has a higher load
1632 * than the destination, corrected for CPU capacity on each node.
1633 *
1634 * src->load dst->load
1635 * --------------------- vs ---------------------
1636 * src->compute_capacity dst->compute_capacity
1637 */
44dcb04f
SD
1638 if (src->load * dst->compute_capacity * env->imbalance_pct >
1639
1640 dst->load * src->compute_capacity * 100)
6f9aad0b
RR
1641 return true;
1642
1643 return false;
1644}
1645
58d081b5
MG
1646static int task_numa_migrate(struct task_struct *p)
1647{
58d081b5
MG
1648 struct task_numa_env env = {
1649 .p = p,
fb13c7ee 1650
58d081b5 1651 .src_cpu = task_cpu(p),
b32e86b4 1652 .src_nid = task_node(p),
fb13c7ee
MG
1653
1654 .imbalance_pct = 112,
1655
1656 .best_task = NULL,
1657 .best_imp = 0,
4142c3eb 1658 .best_cpu = -1,
58d081b5
MG
1659 };
1660 struct sched_domain *sd;
887c290e 1661 unsigned long taskweight, groupweight;
7bd95320 1662 int nid, ret, dist;
887c290e 1663 long taskimp, groupimp;
e6628d5b 1664
58d081b5 1665 /*
fb13c7ee
MG
1666 * Pick the lowest SD_NUMA domain, as that would have the smallest
1667 * imbalance and would be the first to start moving tasks about.
1668 *
1669 * And we want to avoid any moving of tasks about, as that would create
1670 * random movement of tasks -- counter the numa conditions we're trying
1671 * to satisfy here.
58d081b5
MG
1672 */
1673 rcu_read_lock();
fb13c7ee 1674 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
46a73e8a
RR
1675 if (sd)
1676 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
e6628d5b
MG
1677 rcu_read_unlock();
1678
46a73e8a
RR
1679 /*
1680 * Cpusets can break the scheduler domain tree into smaller
1681 * balance domains, some of which do not cross NUMA boundaries.
1682 * Tasks that are "trapped" in such domains cannot be migrated
1683 * elsewhere, so there is no point in (re)trying.
1684 */
1685 if (unlikely(!sd)) {
de1b301a 1686 p->numa_preferred_nid = task_node(p);
46a73e8a
RR
1687 return -EINVAL;
1688 }
1689
2c8a50aa 1690 env.dst_nid = p->numa_preferred_nid;
7bd95320
RR
1691 dist = env.dist = node_distance(env.src_nid, env.dst_nid);
1692 taskweight = task_weight(p, env.src_nid, dist);
1693 groupweight = group_weight(p, env.src_nid, dist);
1694 update_numa_stats(&env.src_stats, env.src_nid);
1695 taskimp = task_weight(p, env.dst_nid, dist) - taskweight;
1696 groupimp = group_weight(p, env.dst_nid, dist) - groupweight;
2c8a50aa 1697 update_numa_stats(&env.dst_stats, env.dst_nid);
58d081b5 1698
a43455a1 1699 /* Try to find a spot on the preferred nid. */
6f9aad0b
RR
1700 if (numa_has_capacity(&env))
1701 task_numa_find_cpu(&env, taskimp, groupimp);
e1dda8a7 1702
9de05d48
RR
1703 /*
1704 * Look at other nodes in these cases:
1705 * - there is no space available on the preferred_nid
1706 * - the task is part of a numa_group that is interleaved across
1707 * multiple NUMA nodes; in order to better consolidate the group,
1708 * we need to check other locations.
1709 */
4142c3eb 1710 if (env.best_cpu == -1 || (p->numa_group && p->numa_group->active_nodes > 1)) {
2c8a50aa
MG
1711 for_each_online_node(nid) {
1712 if (nid == env.src_nid || nid == p->numa_preferred_nid)
1713 continue;
58d081b5 1714
7bd95320 1715 dist = node_distance(env.src_nid, env.dst_nid);
6c6b1193
RR
1716 if (sched_numa_topology_type == NUMA_BACKPLANE &&
1717 dist != env.dist) {
1718 taskweight = task_weight(p, env.src_nid, dist);
1719 groupweight = group_weight(p, env.src_nid, dist);
1720 }
7bd95320 1721
83e1d2cd 1722 /* Only consider nodes where both task and groups benefit */
7bd95320
RR
1723 taskimp = task_weight(p, nid, dist) - taskweight;
1724 groupimp = group_weight(p, nid, dist) - groupweight;
887c290e 1725 if (taskimp < 0 && groupimp < 0)
fb13c7ee
MG
1726 continue;
1727
7bd95320 1728 env.dist = dist;
2c8a50aa
MG
1729 env.dst_nid = nid;
1730 update_numa_stats(&env.dst_stats, env.dst_nid);
6f9aad0b
RR
1731 if (numa_has_capacity(&env))
1732 task_numa_find_cpu(&env, taskimp, groupimp);
58d081b5
MG
1733 }
1734 }
1735
68d1b02a
RR
1736 /*
1737 * If the task is part of a workload that spans multiple NUMA nodes,
1738 * and is migrating into one of the workload's active nodes, remember
1739 * this node as the task's preferred numa node, so the workload can
1740 * settle down.
1741 * A task that migrated to a second choice node will be better off
1742 * trying for a better one later. Do not set the preferred node here.
1743 */
db015dae 1744 if (p->numa_group) {
4142c3eb
RR
1745 struct numa_group *ng = p->numa_group;
1746
db015dae
RR
1747 if (env.best_cpu == -1)
1748 nid = env.src_nid;
1749 else
1750 nid = env.dst_nid;
1751
4142c3eb 1752 if (ng->active_nodes > 1 && numa_is_active_node(env.dst_nid, ng))
db015dae
RR
1753 sched_setnuma(p, env.dst_nid);
1754 }
1755
1756 /* No better CPU than the current one was found. */
1757 if (env.best_cpu == -1)
1758 return -EAGAIN;
0ec8aa00 1759
04bb2f94
RR
1760 /*
1761 * Reset the scan period if the task is being rescheduled on an
1762 * alternative node to recheck if the tasks is now properly placed.
1763 */
1764 p->numa_scan_period = task_scan_min(p);
1765
fb13c7ee 1766 if (env.best_task == NULL) {
286549dc
MG
1767 ret = migrate_task_to(p, env.best_cpu);
1768 if (ret != 0)
1769 trace_sched_stick_numa(p, env.src_cpu, env.best_cpu);
fb13c7ee
MG
1770 return ret;
1771 }
1772
1773 ret = migrate_swap(p, env.best_task);
286549dc
MG
1774 if (ret != 0)
1775 trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task));
fb13c7ee
MG
1776 put_task_struct(env.best_task);
1777 return ret;
e6628d5b
MG
1778}
1779
6b9a7460
MG
1780/* Attempt to migrate a task to a CPU on the preferred node. */
1781static void numa_migrate_preferred(struct task_struct *p)
1782{
5085e2a3
RR
1783 unsigned long interval = HZ;
1784
2739d3ee 1785 /* This task has no NUMA fault statistics yet */
44dba3d5 1786 if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults))
6b9a7460
MG
1787 return;
1788
2739d3ee 1789 /* Periodically retry migrating the task to the preferred node */
5085e2a3
RR
1790 interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16);
1791 p->numa_migrate_retry = jiffies + interval;
2739d3ee
RR
1792
1793 /* Success if task is already running on preferred CPU */
de1b301a 1794 if (task_node(p) == p->numa_preferred_nid)
6b9a7460
MG
1795 return;
1796
1797 /* Otherwise, try migrate to a CPU on the preferred node */
2739d3ee 1798 task_numa_migrate(p);
6b9a7460
MG
1799}
1800
20e07dea 1801/*
4142c3eb 1802 * Find out how many nodes on the workload is actively running on. Do this by
20e07dea
RR
1803 * tracking the nodes from which NUMA hinting faults are triggered. This can
1804 * be different from the set of nodes where the workload's memory is currently
1805 * located.
20e07dea 1806 */
4142c3eb 1807static void numa_group_count_active_nodes(struct numa_group *numa_group)
20e07dea
RR
1808{
1809 unsigned long faults, max_faults = 0;
4142c3eb 1810 int nid, active_nodes = 0;
20e07dea
RR
1811
1812 for_each_online_node(nid) {
1813 faults = group_faults_cpu(numa_group, nid);
1814 if (faults > max_faults)
1815 max_faults = faults;
1816 }
1817
1818 for_each_online_node(nid) {
1819 faults = group_faults_cpu(numa_group, nid);
4142c3eb
RR
1820 if (faults * ACTIVE_NODE_FRACTION > max_faults)
1821 active_nodes++;
20e07dea 1822 }
4142c3eb
RR
1823
1824 numa_group->max_faults_cpu = max_faults;
1825 numa_group->active_nodes = active_nodes;
20e07dea
RR
1826}
1827
04bb2f94
RR
1828/*
1829 * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
1830 * increments. The more local the fault statistics are, the higher the scan
a22b4b01
RR
1831 * period will be for the next scan window. If local/(local+remote) ratio is
1832 * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS)
1833 * the scan period will decrease. Aim for 70% local accesses.
04bb2f94
RR
1834 */
1835#define NUMA_PERIOD_SLOTS 10
a22b4b01 1836#define NUMA_PERIOD_THRESHOLD 7
04bb2f94
RR
1837
1838/*
1839 * Increase the scan period (slow down scanning) if the majority of
1840 * our memory is already on our local node, or if the majority of
1841 * the page accesses are shared with other processes.
1842 * Otherwise, decrease the scan period.
1843 */
1844static void update_task_scan_period(struct task_struct *p,
1845 unsigned long shared, unsigned long private)
1846{
1847 unsigned int period_slot;
1848 int ratio;
1849 int diff;
1850
1851 unsigned long remote = p->numa_faults_locality[0];
1852 unsigned long local = p->numa_faults_locality[1];
1853
1854 /*
1855 * If there were no record hinting faults then either the task is
1856 * completely idle or all activity is areas that are not of interest
074c2381
MG
1857 * to automatic numa balancing. Related to that, if there were failed
1858 * migration then it implies we are migrating too quickly or the local
1859 * node is overloaded. In either case, scan slower
04bb2f94 1860 */
074c2381 1861 if (local + shared == 0 || p->numa_faults_locality[2]) {
04bb2f94
RR
1862 p->numa_scan_period = min(p->numa_scan_period_max,
1863 p->numa_scan_period << 1);
1864
1865 p->mm->numa_next_scan = jiffies +
1866 msecs_to_jiffies(p->numa_scan_period);
1867
1868 return;
1869 }
1870
1871 /*
1872 * Prepare to scale scan period relative to the current period.
1873 * == NUMA_PERIOD_THRESHOLD scan period stays the same
1874 * < NUMA_PERIOD_THRESHOLD scan period decreases (scan faster)
1875 * >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower)
1876 */
1877 period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS);
1878 ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote);
1879 if (ratio >= NUMA_PERIOD_THRESHOLD) {
1880 int slot = ratio - NUMA_PERIOD_THRESHOLD;
1881 if (!slot)
1882 slot = 1;
1883 diff = slot * period_slot;
1884 } else {
1885 diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot;
1886
1887 /*
1888 * Scale scan rate increases based on sharing. There is an
1889 * inverse relationship between the degree of sharing and
1890 * the adjustment made to the scanning period. Broadly
1891 * speaking the intent is that there is little point
1892 * scanning faster if shared accesses dominate as it may
1893 * simply bounce migrations uselessly
1894 */
2847c90e 1895 ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared + 1));
04bb2f94
RR
1896 diff = (diff * ratio) / NUMA_PERIOD_SLOTS;
1897 }
1898
1899 p->numa_scan_period = clamp(p->numa_scan_period + diff,
1900 task_scan_min(p), task_scan_max(p));
1901 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
1902}
1903
7e2703e6
RR
1904/*
1905 * Get the fraction of time the task has been running since the last
1906 * NUMA placement cycle. The scheduler keeps similar statistics, but
1907 * decays those on a 32ms period, which is orders of magnitude off
1908 * from the dozens-of-seconds NUMA balancing period. Use the scheduler
1909 * stats only if the task is so new there are no NUMA statistics yet.
1910 */
1911static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
1912{
1913 u64 runtime, delta, now;
1914 /* Use the start of this time slice to avoid calculations. */
1915 now = p->se.exec_start;
1916 runtime = p->se.sum_exec_runtime;
1917
1918 if (p->last_task_numa_placement) {
1919 delta = runtime - p->last_sum_exec_runtime;
1920 *period = now - p->last_task_numa_placement;
1921 } else {
9d89c257
YD
1922 delta = p->se.avg.load_sum / p->se.load.weight;
1923 *period = LOAD_AVG_MAX;
7e2703e6
RR
1924 }
1925
1926 p->last_sum_exec_runtime = runtime;
1927 p->last_task_numa_placement = now;
1928
1929 return delta;
1930}
1931
54009416
RR
1932/*
1933 * Determine the preferred nid for a task in a numa_group. This needs to
1934 * be done in a way that produces consistent results with group_weight,
1935 * otherwise workloads might not converge.
1936 */
1937static int preferred_group_nid(struct task_struct *p, int nid)
1938{
1939 nodemask_t nodes;
1940 int dist;
1941
1942 /* Direct connections between all NUMA nodes. */
1943 if (sched_numa_topology_type == NUMA_DIRECT)
1944 return nid;
1945
1946 /*
1947 * On a system with glueless mesh NUMA topology, group_weight
1948 * scores nodes according to the number of NUMA hinting faults on
1949 * both the node itself, and on nearby nodes.
1950 */
1951 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
1952 unsigned long score, max_score = 0;
1953 int node, max_node = nid;
1954
1955 dist = sched_max_numa_distance;
1956
1957 for_each_online_node(node) {
1958 score = group_weight(p, node, dist);
1959 if (score > max_score) {
1960 max_score = score;
1961 max_node = node;
1962 }
1963 }
1964 return max_node;
1965 }
1966
1967 /*
1968 * Finding the preferred nid in a system with NUMA backplane
1969 * interconnect topology is more involved. The goal is to locate
1970 * tasks from numa_groups near each other in the system, and
1971 * untangle workloads from different sides of the system. This requires
1972 * searching down the hierarchy of node groups, recursively searching
1973 * inside the highest scoring group of nodes. The nodemask tricks
1974 * keep the complexity of the search down.
1975 */
1976 nodes = node_online_map;
1977 for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) {
1978 unsigned long max_faults = 0;
81907478 1979 nodemask_t max_group = NODE_MASK_NONE;
54009416
RR
1980 int a, b;
1981
1982 /* Are there nodes at this distance from each other? */
1983 if (!find_numa_distance(dist))
1984 continue;
1985
1986 for_each_node_mask(a, nodes) {
1987 unsigned long faults = 0;
1988 nodemask_t this_group;
1989 nodes_clear(this_group);
1990
1991 /* Sum group's NUMA faults; includes a==b case. */
1992 for_each_node_mask(b, nodes) {
1993 if (node_distance(a, b) < dist) {
1994 faults += group_faults(p, b);
1995 node_set(b, this_group);
1996 node_clear(b, nodes);
1997 }
1998 }
1999
2000 /* Remember the top group. */
2001 if (faults > max_faults) {
2002 max_faults = faults;
2003 max_group = this_group;
2004 /*
2005 * subtle: at the smallest distance there is
2006 * just one node left in each "group", the
2007 * winner is the preferred nid.
2008 */
2009 nid = a;
2010 }
2011 }
2012 /* Next round, evaluate the nodes within max_group. */
890a5409
JB
2013 if (!max_faults)
2014 break;
54009416
RR
2015 nodes = max_group;
2016 }
2017 return nid;
2018}
2019
cbee9f88
PZ
2020static void task_numa_placement(struct task_struct *p)
2021{
83e1d2cd
MG
2022 int seq, nid, max_nid = -1, max_group_nid = -1;
2023 unsigned long max_faults = 0, max_group_faults = 0;
04bb2f94 2024 unsigned long fault_types[2] = { 0, 0 };
7e2703e6
RR
2025 unsigned long total_faults;
2026 u64 runtime, period;
7dbd13ed 2027 spinlock_t *group_lock = NULL;
cbee9f88 2028
7e5a2c17
JL
2029 /*
2030 * The p->mm->numa_scan_seq field gets updated without
2031 * exclusive access. Use READ_ONCE() here to ensure
2032 * that the field is read in a single access:
2033 */
316c1608 2034 seq = READ_ONCE(p->mm->numa_scan_seq);
cbee9f88
PZ
2035 if (p->numa_scan_seq == seq)
2036 return;
2037 p->numa_scan_seq = seq;
598f0ec0 2038 p->numa_scan_period_max = task_scan_max(p);
cbee9f88 2039
7e2703e6
RR
2040 total_faults = p->numa_faults_locality[0] +
2041 p->numa_faults_locality[1];
2042 runtime = numa_get_avg_runtime(p, &period);
2043
7dbd13ed
MG
2044 /* If the task is part of a group prevent parallel updates to group stats */
2045 if (p->numa_group) {
2046 group_lock = &p->numa_group->lock;
60e69eed 2047 spin_lock_irq(group_lock);
7dbd13ed
MG
2048 }
2049
688b7585
MG
2050 /* Find the node with the highest number of faults */
2051 for_each_online_node(nid) {
44dba3d5
IM
2052 /* Keep track of the offsets in numa_faults array */
2053 int mem_idx, membuf_idx, cpu_idx, cpubuf_idx;
83e1d2cd 2054 unsigned long faults = 0, group_faults = 0;
44dba3d5 2055 int priv;
745d6147 2056
be1e4e76 2057 for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) {
7e2703e6 2058 long diff, f_diff, f_weight;
8c8a743c 2059
44dba3d5
IM
2060 mem_idx = task_faults_idx(NUMA_MEM, nid, priv);
2061 membuf_idx = task_faults_idx(NUMA_MEMBUF, nid, priv);
2062 cpu_idx = task_faults_idx(NUMA_CPU, nid, priv);
2063 cpubuf_idx = task_faults_idx(NUMA_CPUBUF, nid, priv);
745d6147 2064
ac8e895b 2065 /* Decay existing window, copy faults since last scan */
44dba3d5
IM
2066 diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2;
2067 fault_types[priv] += p->numa_faults[membuf_idx];
2068 p->numa_faults[membuf_idx] = 0;
fb13c7ee 2069
7e2703e6
RR
2070 /*
2071 * Normalize the faults_from, so all tasks in a group
2072 * count according to CPU use, instead of by the raw
2073 * number of faults. Tasks with little runtime have
2074 * little over-all impact on throughput, and thus their
2075 * faults are less important.
2076 */
2077 f_weight = div64_u64(runtime << 16, period + 1);
44dba3d5 2078 f_weight = (f_weight * p->numa_faults[cpubuf_idx]) /
7e2703e6 2079 (total_faults + 1);
44dba3d5
IM
2080 f_diff = f_weight - p->numa_faults[cpu_idx] / 2;
2081 p->numa_faults[cpubuf_idx] = 0;
50ec8a40 2082
44dba3d5
IM
2083 p->numa_faults[mem_idx] += diff;
2084 p->numa_faults[cpu_idx] += f_diff;
2085 faults += p->numa_faults[mem_idx];
83e1d2cd 2086 p->total_numa_faults += diff;
8c8a743c 2087 if (p->numa_group) {
44dba3d5
IM
2088 /*
2089 * safe because we can only change our own group
2090 *
2091 * mem_idx represents the offset for a given
2092 * nid and priv in a specific region because it
2093 * is at the beginning of the numa_faults array.
2094 */
2095 p->numa_group->faults[mem_idx] += diff;
2096 p->numa_group->faults_cpu[mem_idx] += f_diff;
989348b5 2097 p->numa_group->total_faults += diff;
44dba3d5 2098 group_faults += p->numa_group->faults[mem_idx];
8c8a743c 2099 }
ac8e895b
MG
2100 }
2101
688b7585
MG
2102 if (faults > max_faults) {
2103 max_faults = faults;
2104 max_nid = nid;
2105 }
83e1d2cd
MG
2106
2107 if (group_faults > max_group_faults) {
2108 max_group_faults = group_faults;
2109 max_group_nid = nid;
2110 }
2111 }
2112
04bb2f94
RR
2113 update_task_scan_period(p, fault_types[0], fault_types[1]);
2114
7dbd13ed 2115 if (p->numa_group) {
4142c3eb 2116 numa_group_count_active_nodes(p->numa_group);
60e69eed 2117 spin_unlock_irq(group_lock);
54009416 2118 max_nid = preferred_group_nid(p, max_group_nid);
688b7585
MG
2119 }
2120
bb97fc31
RR
2121 if (max_faults) {
2122 /* Set the new preferred node */
2123 if (max_nid != p->numa_preferred_nid)
2124 sched_setnuma(p, max_nid);
2125
2126 if (task_node(p) != p->numa_preferred_nid)
2127 numa_migrate_preferred(p);
3a7053b3 2128 }
cbee9f88
PZ
2129}
2130
8c8a743c
PZ
2131static inline int get_numa_group(struct numa_group *grp)
2132{
2133 return atomic_inc_not_zero(&grp->refcount);
2134}
2135
2136static inline void put_numa_group(struct numa_group *grp)
2137{
2138 if (atomic_dec_and_test(&grp->refcount))
2139 kfree_rcu(grp, rcu);
2140}
2141
3e6a9418
MG
2142static void task_numa_group(struct task_struct *p, int cpupid, int flags,
2143 int *priv)
8c8a743c
PZ
2144{
2145 struct numa_group *grp, *my_grp;
2146 struct task_struct *tsk;
2147 bool join = false;
2148 int cpu = cpupid_to_cpu(cpupid);
2149 int i;
2150
2151 if (unlikely(!p->numa_group)) {
2152 unsigned int size = sizeof(struct numa_group) +
50ec8a40 2153 4*nr_node_ids*sizeof(unsigned long);
8c8a743c
PZ
2154
2155 grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
2156 if (!grp)
2157 return;
2158
2159 atomic_set(&grp->refcount, 1);
4142c3eb
RR
2160 grp->active_nodes = 1;
2161 grp->max_faults_cpu = 0;
8c8a743c 2162 spin_lock_init(&grp->lock);
e29cf08b 2163 grp->gid = p->pid;
50ec8a40 2164 /* Second half of the array tracks nids where faults happen */
be1e4e76
RR
2165 grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES *
2166 nr_node_ids;
8c8a743c 2167
be1e4e76 2168 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
44dba3d5 2169 grp->faults[i] = p->numa_faults[i];
8c8a743c 2170
989348b5 2171 grp->total_faults = p->total_numa_faults;
83e1d2cd 2172
8c8a743c
PZ
2173 grp->nr_tasks++;
2174 rcu_assign_pointer(p->numa_group, grp);
2175 }
2176
2177 rcu_read_lock();
316c1608 2178 tsk = READ_ONCE(cpu_rq(cpu)->curr);
8c8a743c
PZ
2179
2180 if (!cpupid_match_pid(tsk, cpupid))
3354781a 2181 goto no_join;
8c8a743c
PZ
2182
2183 grp = rcu_dereference(tsk->numa_group);
2184 if (!grp)
3354781a 2185 goto no_join;
8c8a743c
PZ
2186
2187 my_grp = p->numa_group;
2188 if (grp == my_grp)
3354781a 2189 goto no_join;
8c8a743c
PZ
2190
2191 /*
2192 * Only join the other group if its bigger; if we're the bigger group,
2193 * the other task will join us.
2194 */
2195 if (my_grp->nr_tasks > grp->nr_tasks)
3354781a 2196 goto no_join;
8c8a743c
PZ
2197
2198 /*
2199 * Tie-break on the grp address.
2200 */
2201 if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
3354781a 2202 goto no_join;
8c8a743c 2203
dabe1d99
RR
2204 /* Always join threads in the same process. */
2205 if (tsk->mm == current->mm)
2206 join = true;
2207
2208 /* Simple filter to avoid false positives due to PID collisions */
2209 if (flags & TNF_SHARED)
2210 join = true;
8c8a743c 2211
3e6a9418
MG
2212 /* Update priv based on whether false sharing was detected */
2213 *priv = !join;
2214
dabe1d99 2215 if (join && !get_numa_group(grp))
3354781a 2216 goto no_join;
8c8a743c 2217
8c8a743c
PZ
2218 rcu_read_unlock();
2219
2220 if (!join)
2221 return;
2222
60e69eed
MG
2223 BUG_ON(irqs_disabled());
2224 double_lock_irq(&my_grp->lock, &grp->lock);
989348b5 2225
be1e4e76 2226 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
44dba3d5
IM
2227 my_grp->faults[i] -= p->numa_faults[i];
2228 grp->faults[i] += p->numa_faults[i];
8c8a743c 2229 }
989348b5
MG
2230 my_grp->total_faults -= p->total_numa_faults;
2231 grp->total_faults += p->total_numa_faults;
8c8a743c 2232
8c8a743c
PZ
2233 my_grp->nr_tasks--;
2234 grp->nr_tasks++;
2235
2236 spin_unlock(&my_grp->lock);
60e69eed 2237 spin_unlock_irq(&grp->lock);
8c8a743c
PZ
2238
2239 rcu_assign_pointer(p->numa_group, grp);
2240
2241 put_numa_group(my_grp);
3354781a
PZ
2242 return;
2243
2244no_join:
2245 rcu_read_unlock();
2246 return;
8c8a743c
PZ
2247}
2248
2249void task_numa_free(struct task_struct *p)
2250{
2251 struct numa_group *grp = p->numa_group;
44dba3d5 2252 void *numa_faults = p->numa_faults;
e9dd685c
SR
2253 unsigned long flags;
2254 int i;
8c8a743c
PZ
2255
2256 if (grp) {
e9dd685c 2257 spin_lock_irqsave(&grp->lock, flags);
be1e4e76 2258 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
44dba3d5 2259 grp->faults[i] -= p->numa_faults[i];
989348b5 2260 grp->total_faults -= p->total_numa_faults;
83e1d2cd 2261
8c8a743c 2262 grp->nr_tasks--;
e9dd685c 2263 spin_unlock_irqrestore(&grp->lock, flags);
35b123e2 2264 RCU_INIT_POINTER(p->numa_group, NULL);
8c8a743c
PZ
2265 put_numa_group(grp);
2266 }
2267
44dba3d5 2268 p->numa_faults = NULL;
82727018 2269 kfree(numa_faults);
8c8a743c
PZ
2270}
2271
cbee9f88
PZ
2272/*
2273 * Got a PROT_NONE fault for a page on @node.
2274 */
58b46da3 2275void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
cbee9f88
PZ
2276{
2277 struct task_struct *p = current;
6688cc05 2278 bool migrated = flags & TNF_MIGRATED;
58b46da3 2279 int cpu_node = task_node(current);
792568ec 2280 int local = !!(flags & TNF_FAULT_LOCAL);
4142c3eb 2281 struct numa_group *ng;
ac8e895b 2282 int priv;
cbee9f88 2283
2a595721 2284 if (!static_branch_likely(&sched_numa_balancing))
1a687c2e
MG
2285 return;
2286
9ff1d9ff
MG
2287 /* for example, ksmd faulting in a user's mm */
2288 if (!p->mm)
2289 return;
2290
f809ca9a 2291 /* Allocate buffer to track faults on a per-node basis */
44dba3d5
IM
2292 if (unlikely(!p->numa_faults)) {
2293 int size = sizeof(*p->numa_faults) *
be1e4e76 2294 NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
f809ca9a 2295
44dba3d5
IM
2296 p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN);
2297 if (!p->numa_faults)
f809ca9a 2298 return;
745d6147 2299
83e1d2cd 2300 p->total_numa_faults = 0;
04bb2f94 2301 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
f809ca9a 2302 }
cbee9f88 2303
8c8a743c
PZ
2304 /*
2305 * First accesses are treated as private, otherwise consider accesses
2306 * to be private if the accessing pid has not changed
2307 */
2308 if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
2309 priv = 1;
2310 } else {
2311 priv = cpupid_match_pid(p, last_cpupid);
6688cc05 2312 if (!priv && !(flags & TNF_NO_GROUP))
3e6a9418 2313 task_numa_group(p, last_cpupid, flags, &priv);
8c8a743c
PZ
2314 }
2315
792568ec
RR
2316 /*
2317 * If a workload spans multiple NUMA nodes, a shared fault that
2318 * occurs wholly within the set of nodes that the workload is
2319 * actively using should be counted as local. This allows the
2320 * scan rate to slow down when a workload has settled down.
2321 */
4142c3eb
RR
2322 ng = p->numa_group;
2323 if (!priv && !local && ng && ng->active_nodes > 1 &&
2324 numa_is_active_node(cpu_node, ng) &&
2325 numa_is_active_node(mem_node, ng))
792568ec
RR
2326 local = 1;
2327
cbee9f88 2328 task_numa_placement(p);
f809ca9a 2329
2739d3ee
RR
2330 /*
2331 * Retry task to preferred node migration periodically, in case it
2332 * case it previously failed, or the scheduler moved us.
2333 */
2334 if (time_after(jiffies, p->numa_migrate_retry))
6b9a7460
MG
2335 numa_migrate_preferred(p);
2336
b32e86b4
IM
2337 if (migrated)
2338 p->numa_pages_migrated += pages;
074c2381
MG
2339 if (flags & TNF_MIGRATE_FAIL)
2340 p->numa_faults_locality[2] += pages;
b32e86b4 2341
44dba3d5
IM
2342 p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages;
2343 p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages;
792568ec 2344 p->numa_faults_locality[local] += pages;
cbee9f88
PZ
2345}
2346
6e5fb223
PZ
2347static void reset_ptenuma_scan(struct task_struct *p)
2348{
7e5a2c17
JL
2349 /*
2350 * We only did a read acquisition of the mmap sem, so
2351 * p->mm->numa_scan_seq is written to without exclusive access
2352 * and the update is not guaranteed to be atomic. That's not
2353 * much of an issue though, since this is just used for
2354 * statistical sampling. Use READ_ONCE/WRITE_ONCE, which are not
2355 * expensive, to avoid any form of compiler optimizations:
2356 */
316c1608 2357 WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1);
6e5fb223
PZ
2358 p->mm->numa_scan_offset = 0;
2359}
2360
cbee9f88
PZ
2361/*
2362 * The expensive part of numa migration is done from task_work context.
2363 * Triggered from task_tick_numa().
2364 */
2365void task_numa_work(struct callback_head *work)
2366{
2367 unsigned long migrate, next_scan, now = jiffies;
2368 struct task_struct *p = current;
2369 struct mm_struct *mm = p->mm;
51170840 2370 u64 runtime = p->se.sum_exec_runtime;
6e5fb223 2371 struct vm_area_struct *vma;
9f40604c 2372 unsigned long start, end;
598f0ec0 2373 unsigned long nr_pte_updates = 0;
4620f8c1 2374 long pages, virtpages;
cbee9f88 2375
9148a3a1 2376 SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work));
cbee9f88
PZ
2377
2378 work->next = work; /* protect against double add */
2379 /*
2380 * Who cares about NUMA placement when they're dying.
2381 *
2382 * NOTE: make sure not to dereference p->mm before this check,
2383 * exit_task_work() happens _after_ exit_mm() so we could be called
2384 * without p->mm even though we still had it when we enqueued this
2385 * work.
2386 */
2387 if (p->flags & PF_EXITING)
2388 return;
2389
930aa174 2390 if (!mm->numa_next_scan) {
7e8d16b6
MG
2391 mm->numa_next_scan = now +
2392 msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
b8593bfd
MG
2393 }
2394
cbee9f88
PZ
2395 /*
2396 * Enforce maximal scan/migration frequency..
2397 */
2398 migrate = mm->numa_next_scan;
2399 if (time_before(now, migrate))
2400 return;
2401
598f0ec0
MG
2402 if (p->numa_scan_period == 0) {
2403 p->numa_scan_period_max = task_scan_max(p);
2404 p->numa_scan_period = task_scan_min(p);
2405 }
cbee9f88 2406
fb003b80 2407 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
cbee9f88
PZ
2408 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
2409 return;
2410
19a78d11
PZ
2411 /*
2412 * Delay this task enough that another task of this mm will likely win
2413 * the next time around.
2414 */
2415 p->node_stamp += 2 * TICK_NSEC;
2416
9f40604c
MG
2417 start = mm->numa_scan_offset;
2418 pages = sysctl_numa_balancing_scan_size;
2419 pages <<= 20 - PAGE_SHIFT; /* MB in pages */
4620f8c1 2420 virtpages = pages * 8; /* Scan up to this much virtual space */
9f40604c
MG
2421 if (!pages)
2422 return;
cbee9f88 2423
4620f8c1 2424
6e5fb223 2425 down_read(&mm->mmap_sem);
9f40604c 2426 vma = find_vma(mm, start);
6e5fb223
PZ
2427 if (!vma) {
2428 reset_ptenuma_scan(p);
9f40604c 2429 start = 0;
6e5fb223
PZ
2430 vma = mm->mmap;
2431 }
9f40604c 2432 for (; vma; vma = vma->vm_next) {
6b79c57b 2433 if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
8e76d4ee 2434 is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
6e5fb223 2435 continue;
6b79c57b 2436 }
6e5fb223 2437
4591ce4f
MG
2438 /*
2439 * Shared library pages mapped by multiple processes are not
2440 * migrated as it is expected they are cache replicated. Avoid
2441 * hinting faults in read-only file-backed mappings or the vdso
2442 * as migrating the pages will be of marginal benefit.
2443 */
2444 if (!vma->vm_mm ||
2445 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
2446 continue;
2447
3c67f474
MG
2448 /*
2449 * Skip inaccessible VMAs to avoid any confusion between
2450 * PROT_NONE and NUMA hinting ptes
2451 */
2452 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
2453 continue;
4591ce4f 2454
9f40604c
MG
2455 do {
2456 start = max(start, vma->vm_start);
2457 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
2458 end = min(end, vma->vm_end);
4620f8c1 2459 nr_pte_updates = change_prot_numa(vma, start, end);
598f0ec0
MG
2460
2461 /*
4620f8c1
RR
2462 * Try to scan sysctl_numa_balancing_size worth of
2463 * hpages that have at least one present PTE that
2464 * is not already pte-numa. If the VMA contains
2465 * areas that are unused or already full of prot_numa
2466 * PTEs, scan up to virtpages, to skip through those
2467 * areas faster.
598f0ec0
MG
2468 */
2469 if (nr_pte_updates)
2470 pages -= (end - start) >> PAGE_SHIFT;
4620f8c1 2471 virtpages -= (end - start) >> PAGE_SHIFT;
6e5fb223 2472
9f40604c 2473 start = end;
4620f8c1 2474 if (pages <= 0 || virtpages <= 0)
9f40604c 2475 goto out;
3cf1962c
RR
2476
2477 cond_resched();
9f40604c 2478 } while (end != vma->vm_end);
cbee9f88 2479 }
6e5fb223 2480
9f40604c 2481out:
6e5fb223 2482 /*
c69307d5
PZ
2483 * It is possible to reach the end of the VMA list but the last few
2484 * VMAs are not guaranteed to the vma_migratable. If they are not, we
2485 * would find the !migratable VMA on the next scan but not reset the
2486 * scanner to the start so check it now.
6e5fb223
PZ
2487 */
2488 if (vma)
9f40604c 2489 mm->numa_scan_offset = start;
6e5fb223
PZ
2490 else
2491 reset_ptenuma_scan(p);
2492 up_read(&mm->mmap_sem);
51170840
RR
2493
2494 /*
2495 * Make sure tasks use at least 32x as much time to run other code
2496 * than they used here, to limit NUMA PTE scanning overhead to 3% max.
2497 * Usually update_task_scan_period slows down scanning enough; on an
2498 * overloaded system we need to limit overhead on a per task basis.
2499 */
2500 if (unlikely(p->se.sum_exec_runtime != runtime)) {
2501 u64 diff = p->se.sum_exec_runtime - runtime;
2502 p->node_stamp += 32 * diff;
2503 }
cbee9f88
PZ
2504}
2505
2506/*
2507 * Drive the periodic memory faults..
2508 */
2509void task_tick_numa(struct rq *rq, struct task_struct *curr)
2510{
2511 struct callback_head *work = &curr->numa_work;
2512 u64 period, now;
2513
2514 /*
2515 * We don't care about NUMA placement if we don't have memory.
2516 */
2517 if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
2518 return;
2519
2520 /*
2521 * Using runtime rather than walltime has the dual advantage that
2522 * we (mostly) drive the selection from busy threads and that the
2523 * task needs to have done some actual work before we bother with
2524 * NUMA placement.
2525 */
2526 now = curr->se.sum_exec_runtime;
2527 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
2528
25b3e5a3 2529 if (now > curr->node_stamp + period) {
4b96a29b 2530 if (!curr->node_stamp)
598f0ec0 2531 curr->numa_scan_period = task_scan_min(curr);
19a78d11 2532 curr->node_stamp += period;
cbee9f88
PZ
2533
2534 if (!time_before(jiffies, curr->mm->numa_next_scan)) {
2535 init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
2536 task_work_add(curr, work, true);
2537 }
2538 }
2539}
2540#else
2541static void task_tick_numa(struct rq *rq, struct task_struct *curr)
2542{
2543}
0ec8aa00
PZ
2544
2545static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
2546{
2547}
2548
2549static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
2550{
2551}
cbee9f88
PZ
2552#endif /* CONFIG_NUMA_BALANCING */
2553
30cfdcfc
DA
2554static void
2555account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2556{
2557 update_load_add(&cfs_rq->load, se->load.weight);
c09595f6 2558 if (!parent_entity(se))
029632fb 2559 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
367456c7 2560#ifdef CONFIG_SMP
0ec8aa00
PZ
2561 if (entity_is_task(se)) {
2562 struct rq *rq = rq_of(cfs_rq);
2563
2564 account_numa_enqueue(rq, task_of(se));
2565 list_add(&se->group_node, &rq->cfs_tasks);
2566 }
367456c7 2567#endif
30cfdcfc 2568 cfs_rq->nr_running++;
30cfdcfc
DA
2569}
2570
2571static void
2572account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2573{
2574 update_load_sub(&cfs_rq->load, se->load.weight);
c09595f6 2575 if (!parent_entity(se))
029632fb 2576 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
bfdb198c 2577#ifdef CONFIG_SMP
0ec8aa00
PZ
2578 if (entity_is_task(se)) {
2579 account_numa_dequeue(rq_of(cfs_rq), task_of(se));
b87f1724 2580 list_del_init(&se->group_node);
0ec8aa00 2581 }
bfdb198c 2582#endif
30cfdcfc 2583 cfs_rq->nr_running--;
30cfdcfc
DA
2584}
2585
3ff6dcac
YZ
2586#ifdef CONFIG_FAIR_GROUP_SCHED
2587# ifdef CONFIG_SMP
ea1dc6fc 2588static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
cf5f0acf 2589{
ea1dc6fc 2590 long tg_weight, load, shares;
cf5f0acf
PZ
2591
2592 /*
ea1dc6fc
PZ
2593 * This really should be: cfs_rq->avg.load_avg, but instead we use
2594 * cfs_rq->load.weight, which is its upper bound. This helps ramp up
2595 * the shares for small weight interactive tasks.
cf5f0acf 2596 */
ea1dc6fc 2597 load = scale_load_down(cfs_rq->load.weight);
cf5f0acf 2598
ea1dc6fc 2599 tg_weight = atomic_long_read(&tg->load_avg);
3ff6dcac 2600
ea1dc6fc
PZ
2601 /* Ensure tg_weight >= load */
2602 tg_weight -= cfs_rq->tg_load_avg_contrib;
2603 tg_weight += load;
3ff6dcac 2604
3ff6dcac 2605 shares = (tg->shares * load);
cf5f0acf
PZ
2606 if (tg_weight)
2607 shares /= tg_weight;
3ff6dcac
YZ
2608
2609 if (shares < MIN_SHARES)
2610 shares = MIN_SHARES;
2611 if (shares > tg->shares)
2612 shares = tg->shares;
2613
2614 return shares;
2615}
3ff6dcac 2616# else /* CONFIG_SMP */
6d5ab293 2617static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
3ff6dcac
YZ
2618{
2619 return tg->shares;
2620}
3ff6dcac 2621# endif /* CONFIG_SMP */
ea1dc6fc 2622
2069dd75
PZ
2623static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
2624 unsigned long weight)
2625{
19e5eebb
PT
2626 if (se->on_rq) {
2627 /* commit outstanding execution time */
2628 if (cfs_rq->curr == se)
2629 update_curr(cfs_rq);
2069dd75 2630 account_entity_dequeue(cfs_rq, se);
19e5eebb 2631 }
2069dd75
PZ
2632
2633 update_load_set(&se->load, weight);
2634
2635 if (se->on_rq)
2636 account_entity_enqueue(cfs_rq, se);
2637}
2638
82958366
PT
2639static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
2640
6d5ab293 2641static void update_cfs_shares(struct cfs_rq *cfs_rq)
2069dd75
PZ
2642{
2643 struct task_group *tg;
2644 struct sched_entity *se;
3ff6dcac 2645 long shares;
2069dd75 2646
2069dd75
PZ
2647 tg = cfs_rq->tg;
2648 se = tg->se[cpu_of(rq_of(cfs_rq))];
64660c86 2649 if (!se || throttled_hierarchy(cfs_rq))
2069dd75 2650 return;
3ff6dcac
YZ
2651#ifndef CONFIG_SMP
2652 if (likely(se->load.weight == tg->shares))
2653 return;
2654#endif
6d5ab293 2655 shares = calc_cfs_shares(cfs_rq, tg);
2069dd75
PZ
2656
2657 reweight_entity(cfs_rq_of(se), se, shares);
2658}
2659#else /* CONFIG_FAIR_GROUP_SCHED */
6d5ab293 2660static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
2069dd75
PZ
2661{
2662}
2663#endif /* CONFIG_FAIR_GROUP_SCHED */
2664
141965c7 2665#ifdef CONFIG_SMP
5b51f2f8
PT
2666/* Precomputed fixed inverse multiplies for multiplication by y^n */
2667static const u32 runnable_avg_yN_inv[] = {
2668 0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
2669 0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
2670 0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
2671 0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
2672 0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
2673 0x85aac367, 0x82cd8698,
2674};
2675
2676/*
2677 * Precomputed \Sum y^k { 1<=k<=n }. These are floor(true_value) to prevent
2678 * over-estimates when re-combining.
2679 */
2680static const u32 runnable_avg_yN_sum[] = {
2681 0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
2682 9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
2683 17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
2684};
2685
7b20b916
YD
2686/*
2687 * Precomputed \Sum y^k { 1<=k<=n, where n%32=0). Values are rolled down to
2688 * lower integers. See Documentation/scheduler/sched-avg.txt how these
2689 * were generated:
2690 */
2691static const u32 __accumulated_sum_N32[] = {
2692 0, 23371, 35056, 40899, 43820, 45281,
2693 46011, 46376, 46559, 46650, 46696, 46719,
2694};
2695
9d85f21c
PT
2696/*
2697 * Approximate:
2698 * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
2699 */
2700static __always_inline u64 decay_load(u64 val, u64 n)
2701{
5b51f2f8
PT
2702 unsigned int local_n;
2703
2704 if (!n)
2705 return val;
2706 else if (unlikely(n > LOAD_AVG_PERIOD * 63))
2707 return 0;
2708
2709 /* after bounds checking we can collapse to 32-bit */
2710 local_n = n;
2711
2712 /*
2713 * As y^PERIOD = 1/2, we can combine
9c58c79a
ZZ
2714 * y^n = 1/2^(n/PERIOD) * y^(n%PERIOD)
2715 * With a look-up table which covers y^n (n<PERIOD)
5b51f2f8
PT
2716 *
2717 * To achieve constant time decay_load.
2718 */
2719 if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
2720 val >>= local_n / LOAD_AVG_PERIOD;
2721 local_n %= LOAD_AVG_PERIOD;
9d85f21c
PT
2722 }
2723
9d89c257
YD
2724 val = mul_u64_u32_shr(val, runnable_avg_yN_inv[local_n], 32);
2725 return val;
5b51f2f8
PT
2726}
2727
2728/*
2729 * For updates fully spanning n periods, the contribution to runnable
2730 * average will be: \Sum 1024*y^n
2731 *
2732 * We can compute this reasonably efficiently by combining:
2733 * y^PERIOD = 1/2 with precomputed \Sum 1024*y^n {for n <PERIOD}
2734 */
2735static u32 __compute_runnable_contrib(u64 n)
2736{
2737 u32 contrib = 0;
2738
2739 if (likely(n <= LOAD_AVG_PERIOD))
2740 return runnable_avg_yN_sum[n];
2741 else if (unlikely(n >= LOAD_AVG_MAX_N))
2742 return LOAD_AVG_MAX;
2743
7b20b916
YD
2744 /* Since n < LOAD_AVG_MAX_N, n/LOAD_AVG_PERIOD < 11 */
2745 contrib = __accumulated_sum_N32[n/LOAD_AVG_PERIOD];
2746 n %= LOAD_AVG_PERIOD;
5b51f2f8
PT
2747 contrib = decay_load(contrib, n);
2748 return contrib + runnable_avg_yN_sum[n];
9d85f21c
PT
2749}
2750
54a21385 2751#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
e0f5f3af 2752
9d85f21c
PT
2753/*
2754 * We can represent the historical contribution to runnable average as the
2755 * coefficients of a geometric series. To do this we sub-divide our runnable
2756 * history into segments of approximately 1ms (1024us); label the segment that
2757 * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
2758 *
2759 * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
2760 * p0 p1 p2
2761 * (now) (~1ms ago) (~2ms ago)
2762 *
2763 * Let u_i denote the fraction of p_i that the entity was runnable.
2764 *
2765 * We then designate the fractions u_i as our co-efficients, yielding the
2766 * following representation of historical load:
2767 * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
2768 *
2769 * We choose y based on the with of a reasonably scheduling period, fixing:
2770 * y^32 = 0.5
2771 *
2772 * This means that the contribution to load ~32ms ago (u_32) will be weighted
2773 * approximately half as much as the contribution to load within the last ms
2774 * (u_0).
2775 *
2776 * When a period "rolls over" and we have new u_0`, multiplying the previous
2777 * sum again by y is sufficient to update:
2778 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
2779 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
2780 */
9d89c257
YD
2781static __always_inline int
2782__update_load_avg(u64 now, int cpu, struct sched_avg *sa,
13962234 2783 unsigned long weight, int running, struct cfs_rq *cfs_rq)
9d85f21c 2784{
e0f5f3af 2785 u64 delta, scaled_delta, periods;
9d89c257 2786 u32 contrib;
6115c793 2787 unsigned int delta_w, scaled_delta_w, decayed = 0;
6f2b0452 2788 unsigned long scale_freq, scale_cpu;
9d85f21c 2789
9d89c257 2790 delta = now - sa->last_update_time;
9d85f21c
PT
2791 /*
2792 * This should only happen when time goes backwards, which it
2793 * unfortunately does during sched clock init when we swap over to TSC.
2794 */
2795 if ((s64)delta < 0) {
9d89c257 2796 sa->last_update_time = now;
9d85f21c
PT
2797 return 0;
2798 }
2799
2800 /*
2801 * Use 1024ns as the unit of measurement since it's a reasonable
2802 * approximation of 1us and fast to compute.
2803 */
2804 delta >>= 10;
2805 if (!delta)
2806 return 0;
9d89c257 2807 sa->last_update_time = now;
9d85f21c 2808
6f2b0452
DE
2809 scale_freq = arch_scale_freq_capacity(NULL, cpu);
2810 scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
2811
9d85f21c 2812 /* delta_w is the amount already accumulated against our next period */
9d89c257 2813 delta_w = sa->period_contrib;
9d85f21c 2814 if (delta + delta_w >= 1024) {
9d85f21c
PT
2815 decayed = 1;
2816
9d89c257
YD
2817 /* how much left for next period will start over, we don't know yet */
2818 sa->period_contrib = 0;
2819
9d85f21c
PT
2820 /*
2821 * Now that we know we're crossing a period boundary, figure
2822 * out how much from delta we need to complete the current
2823 * period and accrue it.
2824 */
2825 delta_w = 1024 - delta_w;
54a21385 2826 scaled_delta_w = cap_scale(delta_w, scale_freq);
13962234 2827 if (weight) {
e0f5f3af
DE
2828 sa->load_sum += weight * scaled_delta_w;
2829 if (cfs_rq) {
2830 cfs_rq->runnable_load_sum +=
2831 weight * scaled_delta_w;
2832 }
13962234 2833 }
36ee28e4 2834 if (running)
006cdf02 2835 sa->util_sum += scaled_delta_w * scale_cpu;
5b51f2f8
PT
2836
2837 delta -= delta_w;
2838
2839 /* Figure out how many additional periods this update spans */
2840 periods = delta / 1024;
2841 delta %= 1024;
2842
9d89c257 2843 sa->load_sum = decay_load(sa->load_sum, periods + 1);
13962234
YD
2844 if (cfs_rq) {
2845 cfs_rq->runnable_load_sum =
2846 decay_load(cfs_rq->runnable_load_sum, periods + 1);
2847 }
9d89c257 2848 sa->util_sum = decay_load((u64)(sa->util_sum), periods + 1);
5b51f2f8
PT
2849
2850 /* Efficiently calculate \sum (1..n_period) 1024*y^i */
9d89c257 2851 contrib = __compute_runnable_contrib(periods);
54a21385 2852 contrib = cap_scale(contrib, scale_freq);
13962234 2853 if (weight) {
9d89c257 2854 sa->load_sum += weight * contrib;
13962234
YD
2855 if (cfs_rq)
2856 cfs_rq->runnable_load_sum += weight * contrib;
2857 }
36ee28e4 2858 if (running)
006cdf02 2859 sa->util_sum += contrib * scale_cpu;
9d85f21c
PT
2860 }
2861
2862 /* Remainder of delta accrued against u_0` */
54a21385 2863 scaled_delta = cap_scale(delta, scale_freq);
13962234 2864 if (weight) {
e0f5f3af 2865 sa->load_sum += weight * scaled_delta;
13962234 2866 if (cfs_rq)
e0f5f3af 2867 cfs_rq->runnable_load_sum += weight * scaled_delta;
13962234 2868 }
36ee28e4 2869 if (running)
006cdf02 2870 sa->util_sum += scaled_delta * scale_cpu;
9ee474f5 2871
9d89c257 2872 sa->period_contrib += delta;
9ee474f5 2873
9d89c257
YD
2874 if (decayed) {
2875 sa->load_avg = div_u64(sa->load_sum, LOAD_AVG_MAX);
13962234
YD
2876 if (cfs_rq) {
2877 cfs_rq->runnable_load_avg =
2878 div_u64(cfs_rq->runnable_load_sum, LOAD_AVG_MAX);
2879 }
006cdf02 2880 sa->util_avg = sa->util_sum / LOAD_AVG_MAX;
9d89c257 2881 }
aff3e498 2882
9d89c257 2883 return decayed;
9ee474f5
PT
2884}
2885
c566e8e9 2886#ifdef CONFIG_FAIR_GROUP_SCHED
7c3edd2c
PZ
2887/**
2888 * update_tg_load_avg - update the tg's load avg
2889 * @cfs_rq: the cfs_rq whose avg changed
2890 * @force: update regardless of how small the difference
2891 *
2892 * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load.
2893 * However, because tg->load_avg is a global value there are performance
2894 * considerations.
2895 *
2896 * In order to avoid having to look at the other cfs_rq's, we use a
2897 * differential update where we store the last value we propagated. This in
2898 * turn allows skipping updates if the differential is 'small'.
2899 *
2900 * Updating tg's load_avg is necessary before update_cfs_share() (which is
2901 * done) and effective_load() (which is not done because it is too costly).
bb17f655 2902 */
9d89c257 2903static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
bb17f655 2904{
9d89c257 2905 long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
bb17f655 2906
aa0b7ae0
WL
2907 /*
2908 * No need to update load_avg for root_task_group as it is not used.
2909 */
2910 if (cfs_rq->tg == &root_task_group)
2911 return;
2912
9d89c257
YD
2913 if (force || abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
2914 atomic_long_add(delta, &cfs_rq->tg->load_avg);
2915 cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
bb17f655 2916 }
8165e145 2917}
f5f9739d 2918
ad936d86
BP
2919/*
2920 * Called within set_task_rq() right before setting a task's cpu. The
2921 * caller only guarantees p->pi_lock is held; no other assumptions,
2922 * including the state of rq->lock, should be made.
2923 */
2924void set_task_rq_fair(struct sched_entity *se,
2925 struct cfs_rq *prev, struct cfs_rq *next)
2926{
2927 if (!sched_feat(ATTACH_AGE_LOAD))
2928 return;
2929
2930 /*
2931 * We are supposed to update the task to "current" time, then its up to
2932 * date and ready to go to new CPU/cfs_rq. But we have difficulty in
2933 * getting what current time is, so simply throw away the out-of-date
2934 * time. This will result in the wakee task is less decayed, but giving
2935 * the wakee more load sounds not bad.
2936 */
2937 if (se->avg.last_update_time && prev) {
2938 u64 p_last_update_time;
2939 u64 n_last_update_time;
2940
2941#ifndef CONFIG_64BIT
2942 u64 p_last_update_time_copy;
2943 u64 n_last_update_time_copy;
2944
2945 do {
2946 p_last_update_time_copy = prev->load_last_update_time_copy;
2947 n_last_update_time_copy = next->load_last_update_time_copy;
2948
2949 smp_rmb();
2950
2951 p_last_update_time = prev->avg.last_update_time;
2952 n_last_update_time = next->avg.last_update_time;
2953
2954 } while (p_last_update_time != p_last_update_time_copy ||
2955 n_last_update_time != n_last_update_time_copy);
2956#else
2957 p_last_update_time = prev->avg.last_update_time;
2958 n_last_update_time = next->avg.last_update_time;
2959#endif
2960 __update_load_avg(p_last_update_time, cpu_of(rq_of(prev)),
2961 &se->avg, 0, 0, NULL);
2962 se->avg.last_update_time = n_last_update_time;
2963 }
2964}
6e83125c 2965#else /* CONFIG_FAIR_GROUP_SCHED */
9d89c257 2966static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}
6e83125c 2967#endif /* CONFIG_FAIR_GROUP_SCHED */
c566e8e9 2968
a2c6c91f
SM
2969static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
2970{
58919e83 2971 if (&this_rq()->cfs == cfs_rq) {
a2c6c91f
SM
2972 /*
2973 * There are a few boundary cases this might miss but it should
2974 * get called often enough that that should (hopefully) not be
2975 * a real problem -- added to that it only calls on the local
2976 * CPU, so if we enqueue remotely we'll miss an update, but
2977 * the next tick/schedule should update.
2978 *
2979 * It will not get called when we go idle, because the idle
2980 * thread is a different class (!fair), nor will the utilization
2981 * number include things like RT tasks.
2982 *
2983 * As is, the util number is not freq-invariant (we'd have to
2984 * implement arch_scale_freq_capacity() for that).
2985 *
2986 * See cpu_util().
2987 */
12bde33d 2988 cpufreq_update_util(rq_of(cfs_rq), 0);
a2c6c91f
SM
2989 }
2990}
2991
89741892
PZ
2992/*
2993 * Unsigned subtract and clamp on underflow.
2994 *
2995 * Explicitly do a load-store to ensure the intermediate value never hits
2996 * memory. This allows lockless observations without ever seeing the negative
2997 * values.
2998 */
2999#define sub_positive(_ptr, _val) do { \
3000 typeof(_ptr) ptr = (_ptr); \
3001 typeof(*ptr) val = (_val); \
3002 typeof(*ptr) res, var = READ_ONCE(*ptr); \
3003 res = var - val; \
3004 if (res > var) \
3005 res = 0; \
3006 WRITE_ONCE(*ptr, res); \
3007} while (0)
3008
3d30544f
PZ
3009/**
3010 * update_cfs_rq_load_avg - update the cfs_rq's load/util averages
3011 * @now: current time, as per cfs_rq_clock_task()
3012 * @cfs_rq: cfs_rq to update
3013 * @update_freq: should we call cfs_rq_util_change() or will the call do so
3014 *
3015 * The cfs_rq avg is the direct sum of all its entities (blocked and runnable)
3016 * avg. The immediate corollary is that all (fair) tasks must be attached, see
3017 * post_init_entity_util_avg().
3018 *
3019 * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example.
3020 *
7c3edd2c
PZ
3021 * Returns true if the load decayed or we removed load.
3022 *
3023 * Since both these conditions indicate a changed cfs_rq->avg.load we should
3024 * call update_tg_load_avg() when this function returns true.
3d30544f 3025 */
a2c6c91f
SM
3026static inline int
3027update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
2dac754e 3028{
9d89c257 3029 struct sched_avg *sa = &cfs_rq->avg;
41e0d37f 3030 int decayed, removed_load = 0, removed_util = 0;
2dac754e 3031
9d89c257 3032 if (atomic_long_read(&cfs_rq->removed_load_avg)) {
9e0e83a1 3033 s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
89741892
PZ
3034 sub_positive(&sa->load_avg, r);
3035 sub_positive(&sa->load_sum, r * LOAD_AVG_MAX);
41e0d37f 3036 removed_load = 1;
8165e145 3037 }
2dac754e 3038
9d89c257
YD
3039 if (atomic_long_read(&cfs_rq->removed_util_avg)) {
3040 long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
89741892
PZ
3041 sub_positive(&sa->util_avg, r);
3042 sub_positive(&sa->util_sum, r * LOAD_AVG_MAX);
41e0d37f 3043 removed_util = 1;
9d89c257 3044 }
36ee28e4 3045
a2c6c91f 3046 decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
13962234 3047 scale_load_down(cfs_rq->load.weight), cfs_rq->curr != NULL, cfs_rq);
36ee28e4 3048
9d89c257
YD
3049#ifndef CONFIG_64BIT
3050 smp_wmb();
3051 cfs_rq->load_last_update_time_copy = sa->last_update_time;
3052#endif
36ee28e4 3053
a2c6c91f
SM
3054 if (update_freq && (decayed || removed_util))
3055 cfs_rq_util_change(cfs_rq);
21e96f88 3056
41e0d37f 3057 return decayed || removed_load;
21e96f88
SM
3058}
3059
3060/* Update task and its cfs_rq load average */
3061static inline void update_load_avg(struct sched_entity *se, int update_tg)
3062{
3063 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3064 u64 now = cfs_rq_clock_task(cfs_rq);
3065 struct rq *rq = rq_of(cfs_rq);
3066 int cpu = cpu_of(rq);
3067
3068 /*
3069 * Track task load average for carrying it to new CPU after migrated, and
3070 * track group sched_entity load average for task_h_load calc in migration
3071 */
3072 __update_load_avg(now, cpu, &se->avg,
3073 se->on_rq * scale_load_down(se->load.weight),
3074 cfs_rq->curr == se, NULL);
3075
a2c6c91f 3076 if (update_cfs_rq_load_avg(now, cfs_rq, true) && update_tg)
21e96f88 3077 update_tg_load_avg(cfs_rq, 0);
9ee474f5
PT
3078}
3079
3d30544f
PZ
3080/**
3081 * attach_entity_load_avg - attach this entity to its cfs_rq load avg
3082 * @cfs_rq: cfs_rq to attach to
3083 * @se: sched_entity to attach
3084 *
3085 * Must call update_cfs_rq_load_avg() before this, since we rely on
3086 * cfs_rq->avg.last_update_time being current.
3087 */
a05e8c51
BP
3088static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3089{
a9280514
PZ
3090 if (!sched_feat(ATTACH_AGE_LOAD))
3091 goto skip_aging;
3092
6efdb105
BP
3093 /*
3094 * If we got migrated (either between CPUs or between cgroups) we'll
3095 * have aged the average right before clearing @last_update_time.
7dc603c9
PZ
3096 *
3097 * Or we're fresh through post_init_entity_util_avg().
6efdb105
BP
3098 */
3099 if (se->avg.last_update_time) {
3100 __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
3101 &se->avg, 0, 0, NULL);
3102
3103 /*
3104 * XXX: we could have just aged the entire load away if we've been
3105 * absent from the fair class for too long.
3106 */
3107 }
3108
a9280514 3109skip_aging:
a05e8c51
BP
3110 se->avg.last_update_time = cfs_rq->avg.last_update_time;
3111 cfs_rq->avg.load_avg += se->avg.load_avg;
3112 cfs_rq->avg.load_sum += se->avg.load_sum;
3113 cfs_rq->avg.util_avg += se->avg.util_avg;
3114 cfs_rq->avg.util_sum += se->avg.util_sum;
a2c6c91f
SM
3115
3116 cfs_rq_util_change(cfs_rq);
a05e8c51
BP
3117}
3118
3d30544f
PZ
3119/**
3120 * detach_entity_load_avg - detach this entity from its cfs_rq load avg
3121 * @cfs_rq: cfs_rq to detach from
3122 * @se: sched_entity to detach
3123 *
3124 * Must call update_cfs_rq_load_avg() before this, since we rely on
3125 * cfs_rq->avg.last_update_time being current.
3126 */
a05e8c51
BP
3127static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3128{
3129 __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
3130 &se->avg, se->on_rq * scale_load_down(se->load.weight),
3131 cfs_rq->curr == se, NULL);
3132
89741892
PZ
3133 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
3134 sub_positive(&cfs_rq->avg.load_sum, se->avg.load_sum);
3135 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
3136 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
a2c6c91f
SM
3137
3138 cfs_rq_util_change(cfs_rq);
a05e8c51
BP
3139}
3140
9d89c257
YD
3141/* Add the load generated by se into cfs_rq's load average */
3142static inline void
3143enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
9ee474f5 3144{
9d89c257
YD
3145 struct sched_avg *sa = &se->avg;
3146 u64 now = cfs_rq_clock_task(cfs_rq);
a05e8c51 3147 int migrated, decayed;
9ee474f5 3148
a05e8c51
BP
3149 migrated = !sa->last_update_time;
3150 if (!migrated) {
9d89c257 3151 __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
13962234
YD
3152 se->on_rq * scale_load_down(se->load.weight),
3153 cfs_rq->curr == se, NULL);
aff3e498 3154 }
c566e8e9 3155
a2c6c91f 3156 decayed = update_cfs_rq_load_avg(now, cfs_rq, !migrated);
18bf2805 3157
13962234
YD
3158 cfs_rq->runnable_load_avg += sa->load_avg;
3159 cfs_rq->runnable_load_sum += sa->load_sum;
3160
a05e8c51
BP
3161 if (migrated)
3162 attach_entity_load_avg(cfs_rq, se);
9ee474f5 3163
9d89c257
YD
3164 if (decayed || migrated)
3165 update_tg_load_avg(cfs_rq, 0);
2dac754e
PT
3166}
3167
13962234
YD
3168/* Remove the runnable load generated by se from cfs_rq's runnable load average */
3169static inline void
3170dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3171{
3172 update_load_avg(se, 1);
3173
3174 cfs_rq->runnable_load_avg =
3175 max_t(long, cfs_rq->runnable_load_avg - se->avg.load_avg, 0);
3176 cfs_rq->runnable_load_sum =
a05e8c51 3177 max_t(s64, cfs_rq->runnable_load_sum - se->avg.load_sum, 0);
13962234
YD
3178}
3179
9d89c257 3180#ifndef CONFIG_64BIT
0905f04e
YD
3181static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
3182{
9d89c257 3183 u64 last_update_time_copy;
0905f04e 3184 u64 last_update_time;
9ee474f5 3185
9d89c257
YD
3186 do {
3187 last_update_time_copy = cfs_rq->load_last_update_time_copy;
3188 smp_rmb();
3189 last_update_time = cfs_rq->avg.last_update_time;
3190 } while (last_update_time != last_update_time_copy);
0905f04e
YD
3191
3192 return last_update_time;
3193}
9d89c257 3194#else
0905f04e
YD
3195static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
3196{
3197 return cfs_rq->avg.last_update_time;
3198}
9d89c257
YD
3199#endif
3200
0905f04e
YD
3201/*
3202 * Task first catches up with cfs_rq, and then subtract
3203 * itself from the cfs_rq (task must be off the queue now).
3204 */
3205void remove_entity_load_avg(struct sched_entity *se)
3206{
3207 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3208 u64 last_update_time;
3209
3210 /*
7dc603c9
PZ
3211 * tasks cannot exit without having gone through wake_up_new_task() ->
3212 * post_init_entity_util_avg() which will have added things to the
3213 * cfs_rq, so we can remove unconditionally.
3214 *
3215 * Similarly for groups, they will have passed through
3216 * post_init_entity_util_avg() before unregister_sched_fair_group()
3217 * calls this.
0905f04e 3218 */
0905f04e
YD
3219
3220 last_update_time = cfs_rq_last_update_time(cfs_rq);
3221
13962234 3222 __update_load_avg(last_update_time, cpu_of(rq_of(cfs_rq)), &se->avg, 0, 0, NULL);
9d89c257
YD
3223 atomic_long_add(se->avg.load_avg, &cfs_rq->removed_load_avg);
3224 atomic_long_add(se->avg.util_avg, &cfs_rq->removed_util_avg);
2dac754e 3225}
642dbc39 3226
7ea241af
YD
3227static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq)
3228{
3229 return cfs_rq->runnable_load_avg;
3230}
3231
3232static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq)
3233{
3234 return cfs_rq->avg.load_avg;
3235}
3236
6e83125c
PZ
3237static int idle_balance(struct rq *this_rq);
3238
38033c37
PZ
3239#else /* CONFIG_SMP */
3240
01011473
PZ
3241static inline int
3242update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
3243{
3244 return 0;
3245}
3246
536bd00c
RW
3247static inline void update_load_avg(struct sched_entity *se, int not_used)
3248{
12bde33d 3249 cpufreq_update_util(rq_of(cfs_rq_of(se)), 0);
536bd00c
RW
3250}
3251
9d89c257
YD
3252static inline void
3253enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
13962234
YD
3254static inline void
3255dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
9d89c257 3256static inline void remove_entity_load_avg(struct sched_entity *se) {}
6e83125c 3257
a05e8c51
BP
3258static inline void
3259attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
3260static inline void
3261detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
3262
6e83125c
PZ
3263static inline int idle_balance(struct rq *rq)
3264{
3265 return 0;
3266}
3267
38033c37 3268#endif /* CONFIG_SMP */
9d85f21c 3269
ddc97297
PZ
3270static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
3271{
3272#ifdef CONFIG_SCHED_DEBUG
3273 s64 d = se->vruntime - cfs_rq->min_vruntime;
3274
3275 if (d < 0)
3276 d = -d;
3277
3278 if (d > 3*sysctl_sched_latency)
ae92882e 3279 schedstat_inc(cfs_rq->nr_spread_over);
ddc97297
PZ
3280#endif
3281}
3282
aeb73b04
PZ
3283static void
3284place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
3285{
1af5f730 3286 u64 vruntime = cfs_rq->min_vruntime;
94dfb5e7 3287
2cb8600e
PZ
3288 /*
3289 * The 'current' period is already promised to the current tasks,
3290 * however the extra weight of the new task will slow them down a
3291 * little, place the new task so that it fits in the slot that
3292 * stays open at the end.
3293 */
94dfb5e7 3294 if (initial && sched_feat(START_DEBIT))
f9c0b095 3295 vruntime += sched_vslice(cfs_rq, se);
aeb73b04 3296
a2e7a7eb 3297 /* sleeps up to a single latency don't count. */
5ca9880c 3298 if (!initial) {
a2e7a7eb 3299 unsigned long thresh = sysctl_sched_latency;
a7be37ac 3300
a2e7a7eb
MG
3301 /*
3302 * Halve their sleep time's effect, to allow
3303 * for a gentler effect of sleepers:
3304 */
3305 if (sched_feat(GENTLE_FAIR_SLEEPERS))
3306 thresh >>= 1;
51e0304c 3307
a2e7a7eb 3308 vruntime -= thresh;
aeb73b04
PZ
3309 }
3310
b5d9d734 3311 /* ensure we never gain time by being placed backwards. */
16c8f1c7 3312 se->vruntime = max_vruntime(se->vruntime, vruntime);
aeb73b04
PZ
3313}
3314
d3d9dc33
PT
3315static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
3316
cb251765
MG
3317static inline void check_schedstat_required(void)
3318{
3319#ifdef CONFIG_SCHEDSTATS
3320 if (schedstat_enabled())
3321 return;
3322
3323 /* Force schedstat enabled if a dependent tracepoint is active */
3324 if (trace_sched_stat_wait_enabled() ||
3325 trace_sched_stat_sleep_enabled() ||
3326 trace_sched_stat_iowait_enabled() ||
3327 trace_sched_stat_blocked_enabled() ||
3328 trace_sched_stat_runtime_enabled()) {
eda8dca5 3329 printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, "
cb251765
MG
3330 "stat_blocked and stat_runtime require the "
3331 "kernel parameter schedstats=enabled or "
3332 "kernel.sched_schedstats=1\n");
3333 }
3334#endif
3335}
3336
b5179ac7
PZ
3337
3338/*
3339 * MIGRATION
3340 *
3341 * dequeue
3342 * update_curr()
3343 * update_min_vruntime()
3344 * vruntime -= min_vruntime
3345 *
3346 * enqueue
3347 * update_curr()
3348 * update_min_vruntime()
3349 * vruntime += min_vruntime
3350 *
3351 * this way the vruntime transition between RQs is done when both
3352 * min_vruntime are up-to-date.
3353 *
3354 * WAKEUP (remote)
3355 *
59efa0ba 3356 * ->migrate_task_rq_fair() (p->state == TASK_WAKING)
b5179ac7
PZ
3357 * vruntime -= min_vruntime
3358 *
3359 * enqueue
3360 * update_curr()
3361 * update_min_vruntime()
3362 * vruntime += min_vruntime
3363 *
3364 * this way we don't have the most up-to-date min_vruntime on the originating
3365 * CPU and an up-to-date min_vruntime on the destination CPU.
3366 */
3367
bf0f6f24 3368static void
88ec22d3 3369enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
bf0f6f24 3370{
2f950354
PZ
3371 bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED);
3372 bool curr = cfs_rq->curr == se;
3373
88ec22d3 3374 /*
2f950354
PZ
3375 * If we're the current task, we must renormalise before calling
3376 * update_curr().
88ec22d3 3377 */
2f950354 3378 if (renorm && curr)
88ec22d3
PZ
3379 se->vruntime += cfs_rq->min_vruntime;
3380
2f950354
PZ
3381 update_curr(cfs_rq);
3382
bf0f6f24 3383 /*
2f950354
PZ
3384 * Otherwise, renormalise after, such that we're placed at the current
3385 * moment in time, instead of some random moment in the past. Being
3386 * placed in the past could significantly boost this task to the
3387 * fairness detriment of existing tasks.
bf0f6f24 3388 */
2f950354
PZ
3389 if (renorm && !curr)
3390 se->vruntime += cfs_rq->min_vruntime;
3391
9d89c257 3392 enqueue_entity_load_avg(cfs_rq, se);
17bc14b7
LT
3393 account_entity_enqueue(cfs_rq, se);
3394 update_cfs_shares(cfs_rq);
bf0f6f24 3395
1a3d027c 3396 if (flags & ENQUEUE_WAKEUP)
aeb73b04 3397 place_entity(cfs_rq, se, 0);
bf0f6f24 3398
cb251765 3399 check_schedstat_required();
4fa8d299
JP
3400 update_stats_enqueue(cfs_rq, se, flags);
3401 check_spread(cfs_rq, se);
2f950354 3402 if (!curr)
83b699ed 3403 __enqueue_entity(cfs_rq, se);
2069dd75 3404 se->on_rq = 1;
3d4b47b4 3405
d3d9dc33 3406 if (cfs_rq->nr_running == 1) {
3d4b47b4 3407 list_add_leaf_cfs_rq(cfs_rq);
d3d9dc33
PT
3408 check_enqueue_throttle(cfs_rq);
3409 }
bf0f6f24
IM
3410}
3411
2c13c919 3412static void __clear_buddies_last(struct sched_entity *se)
2002c695 3413{
2c13c919
RR
3414 for_each_sched_entity(se) {
3415 struct cfs_rq *cfs_rq = cfs_rq_of(se);
f1044799 3416 if (cfs_rq->last != se)
2c13c919 3417 break;
f1044799
PZ
3418
3419 cfs_rq->last = NULL;
2c13c919
RR
3420 }
3421}
2002c695 3422
2c13c919
RR
3423static void __clear_buddies_next(struct sched_entity *se)
3424{
3425 for_each_sched_entity(se) {
3426 struct cfs_rq *cfs_rq = cfs_rq_of(se);
f1044799 3427 if (cfs_rq->next != se)
2c13c919 3428 break;
f1044799
PZ
3429
3430 cfs_rq->next = NULL;
2c13c919 3431 }
2002c695
PZ
3432}
3433
ac53db59
RR
3434static void __clear_buddies_skip(struct sched_entity *se)
3435{
3436 for_each_sched_entity(se) {
3437 struct cfs_rq *cfs_rq = cfs_rq_of(se);
f1044799 3438 if (cfs_rq->skip != se)
ac53db59 3439 break;
f1044799
PZ
3440
3441 cfs_rq->skip = NULL;
ac53db59
RR
3442 }
3443}
3444
a571bbea
PZ
3445static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
3446{
2c13c919
RR
3447 if (cfs_rq->last == se)
3448 __clear_buddies_last(se);
3449
3450 if (cfs_rq->next == se)
3451 __clear_buddies_next(se);
ac53db59
RR
3452
3453 if (cfs_rq->skip == se)
3454 __clear_buddies_skip(se);
a571bbea
PZ
3455}
3456
6c16a6dc 3457static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
d8b4986d 3458
bf0f6f24 3459static void
371fd7e7 3460dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
bf0f6f24 3461{
a2a2d680
DA
3462 /*
3463 * Update run-time statistics of the 'current'.
3464 */
3465 update_curr(cfs_rq);
13962234 3466 dequeue_entity_load_avg(cfs_rq, se);
a2a2d680 3467
4fa8d299 3468 update_stats_dequeue(cfs_rq, se, flags);
67e9fb2a 3469
2002c695 3470 clear_buddies(cfs_rq, se);
4793241b 3471
83b699ed 3472 if (se != cfs_rq->curr)
30cfdcfc 3473 __dequeue_entity(cfs_rq, se);
17bc14b7 3474 se->on_rq = 0;
30cfdcfc 3475 account_entity_dequeue(cfs_rq, se);
88ec22d3
PZ
3476
3477 /*
b60205c7
PZ
3478 * Normalize after update_curr(); which will also have moved
3479 * min_vruntime if @se is the one holding it back. But before doing
3480 * update_min_vruntime() again, which will discount @se's position and
3481 * can move min_vruntime forward still more.
88ec22d3 3482 */
371fd7e7 3483 if (!(flags & DEQUEUE_SLEEP))
88ec22d3 3484 se->vruntime -= cfs_rq->min_vruntime;
1e876231 3485
d8b4986d
PT
3486 /* return excess runtime on last dequeue */
3487 return_cfs_rq_runtime(cfs_rq);
3488
17bc14b7 3489 update_cfs_shares(cfs_rq);
b60205c7
PZ
3490
3491 /*
3492 * Now advance min_vruntime if @se was the entity holding it back,
3493 * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
3494 * put back on, and if we advance min_vruntime, we'll be placed back
3495 * further than we started -- ie. we'll be penalized.
3496 */
3497 if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
3498 update_min_vruntime(cfs_rq);
bf0f6f24
IM
3499}
3500
3501/*
3502 * Preempt the current task with a newly woken task if needed:
3503 */
7c92e54f 3504static void
2e09bf55 3505check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
bf0f6f24 3506{
11697830 3507 unsigned long ideal_runtime, delta_exec;
f4cfb33e
WX
3508 struct sched_entity *se;
3509 s64 delta;
11697830 3510
6d0f0ebd 3511 ideal_runtime = sched_slice(cfs_rq, curr);
11697830 3512 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
a9f3e2b5 3513 if (delta_exec > ideal_runtime) {
8875125e 3514 resched_curr(rq_of(cfs_rq));
a9f3e2b5
MG
3515 /*
3516 * The current task ran long enough, ensure it doesn't get
3517 * re-elected due to buddy favours.
3518 */
3519 clear_buddies(cfs_rq, curr);
f685ceac
MG
3520 return;
3521 }
3522
3523 /*
3524 * Ensure that a task that missed wakeup preemption by a
3525 * narrow margin doesn't have to wait for a full slice.
3526 * This also mitigates buddy induced latencies under load.
3527 */
f685ceac
MG
3528 if (delta_exec < sysctl_sched_min_granularity)
3529 return;
3530
f4cfb33e
WX
3531 se = __pick_first_entity(cfs_rq);
3532 delta = curr->vruntime - se->vruntime;
f685ceac 3533
f4cfb33e
WX
3534 if (delta < 0)
3535 return;
d7d82944 3536
f4cfb33e 3537 if (delta > ideal_runtime)
8875125e 3538 resched_curr(rq_of(cfs_rq));
bf0f6f24
IM
3539}
3540
83b699ed 3541static void
8494f412 3542set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 3543{
83b699ed
SV
3544 /* 'current' is not kept within the tree. */
3545 if (se->on_rq) {
3546 /*
3547 * Any task has to be enqueued before it get to execute on
3548 * a CPU. So account for the time it spent waiting on the
3549 * runqueue.
3550 */
4fa8d299 3551 update_stats_wait_end(cfs_rq, se);
83b699ed 3552 __dequeue_entity(cfs_rq, se);
9d89c257 3553 update_load_avg(se, 1);
83b699ed
SV
3554 }
3555
79303e9e 3556 update_stats_curr_start(cfs_rq, se);
429d43bc 3557 cfs_rq->curr = se;
4fa8d299 3558
eba1ed4b
IM
3559 /*
3560 * Track our maximum slice length, if the CPU's load is at
3561 * least twice that of our own weight (i.e. dont track it
3562 * when there are only lesser-weight tasks around):
3563 */
cb251765 3564 if (schedstat_enabled() && rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
4fa8d299
JP
3565 schedstat_set(se->statistics.slice_max,
3566 max((u64)schedstat_val(se->statistics.slice_max),
3567 se->sum_exec_runtime - se->prev_sum_exec_runtime));
eba1ed4b 3568 }
4fa8d299 3569
4a55b450 3570 se->prev_sum_exec_runtime = se->sum_exec_runtime;
bf0f6f24
IM
3571}
3572
3f3a4904
PZ
3573static int
3574wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
3575
ac53db59
RR
3576/*
3577 * Pick the next process, keeping these things in mind, in this order:
3578 * 1) keep things fair between processes/task groups
3579 * 2) pick the "next" process, since someone really wants that to run
3580 * 3) pick the "last" process, for cache locality
3581 * 4) do not run the "skip" process, if something else is available
3582 */
678d5718
PZ
3583static struct sched_entity *
3584pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
aa2ac252 3585{
678d5718
PZ
3586 struct sched_entity *left = __pick_first_entity(cfs_rq);
3587 struct sched_entity *se;
3588
3589 /*
3590 * If curr is set we have to see if its left of the leftmost entity
3591 * still in the tree, provided there was anything in the tree at all.
3592 */
3593 if (!left || (curr && entity_before(curr, left)))
3594 left = curr;
3595
3596 se = left; /* ideally we run the leftmost entity */
f4b6755f 3597
ac53db59
RR
3598 /*
3599 * Avoid running the skip buddy, if running something else can
3600 * be done without getting too unfair.
3601 */
3602 if (cfs_rq->skip == se) {
678d5718
PZ
3603 struct sched_entity *second;
3604
3605 if (se == curr) {
3606 second = __pick_first_entity(cfs_rq);
3607 } else {
3608 second = __pick_next_entity(se);
3609 if (!second || (curr && entity_before(curr, second)))
3610 second = curr;
3611 }
3612
ac53db59
RR
3613 if (second && wakeup_preempt_entity(second, left) < 1)
3614 se = second;
3615 }
aa2ac252 3616
f685ceac
MG
3617 /*
3618 * Prefer last buddy, try to return the CPU to a preempted task.
3619 */
3620 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
3621 se = cfs_rq->last;
3622
ac53db59
RR
3623 /*
3624 * Someone really wants this to run. If it's not unfair, run it.
3625 */
3626 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
3627 se = cfs_rq->next;
3628
f685ceac 3629 clear_buddies(cfs_rq, se);
4793241b
PZ
3630
3631 return se;
aa2ac252
PZ
3632}
3633
678d5718 3634static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
d3d9dc33 3635
ab6cde26 3636static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
bf0f6f24
IM
3637{
3638 /*
3639 * If still on the runqueue then deactivate_task()
3640 * was not called and update_curr() has to be done:
3641 */
3642 if (prev->on_rq)
b7cc0896 3643 update_curr(cfs_rq);
bf0f6f24 3644
d3d9dc33
PT
3645 /* throttle cfs_rqs exceeding runtime */
3646 check_cfs_rq_runtime(cfs_rq);
3647
4fa8d299 3648 check_spread(cfs_rq, prev);
cb251765 3649
30cfdcfc 3650 if (prev->on_rq) {
4fa8d299 3651 update_stats_wait_start(cfs_rq, prev);
30cfdcfc
DA
3652 /* Put 'current' back into the tree. */
3653 __enqueue_entity(cfs_rq, prev);
9d85f21c 3654 /* in !on_rq case, update occurred at dequeue */
9d89c257 3655 update_load_avg(prev, 0);
30cfdcfc 3656 }
429d43bc 3657 cfs_rq->curr = NULL;
bf0f6f24
IM
3658}
3659
8f4d37ec
PZ
3660static void
3661entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
bf0f6f24 3662{
bf0f6f24 3663 /*
30cfdcfc 3664 * Update run-time statistics of the 'current'.
bf0f6f24 3665 */
30cfdcfc 3666 update_curr(cfs_rq);
bf0f6f24 3667
9d85f21c
PT
3668 /*
3669 * Ensure that runnable average is periodically updated.
3670 */
9d89c257 3671 update_load_avg(curr, 1);
bf0bd948 3672 update_cfs_shares(cfs_rq);
9d85f21c 3673
8f4d37ec
PZ
3674#ifdef CONFIG_SCHED_HRTICK
3675 /*
3676 * queued ticks are scheduled to match the slice, so don't bother
3677 * validating it and just reschedule.
3678 */
983ed7a6 3679 if (queued) {
8875125e 3680 resched_curr(rq_of(cfs_rq));
983ed7a6
HH
3681 return;
3682 }
8f4d37ec
PZ
3683 /*
3684 * don't let the period tick interfere with the hrtick preemption
3685 */
3686 if (!sched_feat(DOUBLE_TICK) &&
3687 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
3688 return;
3689#endif
3690
2c2efaed 3691 if (cfs_rq->nr_running > 1)
2e09bf55 3692 check_preempt_tick(cfs_rq, curr);
bf0f6f24
IM
3693}
3694
ab84d31e
PT
3695
3696/**************************************************
3697 * CFS bandwidth control machinery
3698 */
3699
3700#ifdef CONFIG_CFS_BANDWIDTH
029632fb
PZ
3701
3702#ifdef HAVE_JUMP_LABEL
c5905afb 3703static struct static_key __cfs_bandwidth_used;
029632fb
PZ
3704
3705static inline bool cfs_bandwidth_used(void)
3706{
c5905afb 3707 return static_key_false(&__cfs_bandwidth_used);
029632fb
PZ
3708}
3709
1ee14e6c 3710void cfs_bandwidth_usage_inc(void)
029632fb 3711{
1ee14e6c
BS
3712 static_key_slow_inc(&__cfs_bandwidth_used);
3713}
3714
3715void cfs_bandwidth_usage_dec(void)
3716{
3717 static_key_slow_dec(&__cfs_bandwidth_used);
029632fb
PZ
3718}
3719#else /* HAVE_JUMP_LABEL */
3720static bool cfs_bandwidth_used(void)
3721{
3722 return true;
3723}
3724
1ee14e6c
BS
3725void cfs_bandwidth_usage_inc(void) {}
3726void cfs_bandwidth_usage_dec(void) {}
029632fb
PZ
3727#endif /* HAVE_JUMP_LABEL */
3728
ab84d31e
PT
3729/*
3730 * default period for cfs group bandwidth.
3731 * default: 0.1s, units: nanoseconds
3732 */
3733static inline u64 default_cfs_period(void)
3734{
3735 return 100000000ULL;
3736}
ec12cb7f
PT
3737
3738static inline u64 sched_cfs_bandwidth_slice(void)
3739{
3740 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
3741}
3742
a9cf55b2
PT
3743/*
3744 * Replenish runtime according to assigned quota and update expiration time.
3745 * We use sched_clock_cpu directly instead of rq->clock to avoid adding
3746 * additional synchronization around rq->lock.
3747 *
3748 * requires cfs_b->lock
3749 */
029632fb 3750void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
a9cf55b2
PT
3751{
3752 u64 now;
3753
3754 if (cfs_b->quota == RUNTIME_INF)
3755 return;
3756
3757 now = sched_clock_cpu(smp_processor_id());
3758 cfs_b->runtime = cfs_b->quota;
3759 cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
3760}
3761
029632fb
PZ
3762static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
3763{
3764 return &tg->cfs_bandwidth;
3765}
3766
f1b17280
PT
3767/* rq->task_clock normalized against any time this cfs_rq has spent throttled */
3768static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
3769{
3770 if (unlikely(cfs_rq->throttle_count))
1a99ae3f 3771 return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time;
f1b17280 3772
78becc27 3773 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
f1b17280
PT
3774}
3775
85dac906
PT
3776/* returns 0 on failure to allocate runtime */
3777static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
ec12cb7f
PT
3778{
3779 struct task_group *tg = cfs_rq->tg;
3780 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
a9cf55b2 3781 u64 amount = 0, min_amount, expires;
ec12cb7f
PT
3782
3783 /* note: this is a positive sum as runtime_remaining <= 0 */
3784 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
3785
3786 raw_spin_lock(&cfs_b->lock);
3787 if (cfs_b->quota == RUNTIME_INF)
3788 amount = min_amount;
58088ad0 3789 else {
77a4d1a1 3790 start_cfs_bandwidth(cfs_b);
58088ad0
PT
3791
3792 if (cfs_b->runtime > 0) {
3793 amount = min(cfs_b->runtime, min_amount);
3794 cfs_b->runtime -= amount;
3795 cfs_b->idle = 0;
3796 }
ec12cb7f 3797 }
a9cf55b2 3798 expires = cfs_b->runtime_expires;
ec12cb7f
PT
3799 raw_spin_unlock(&cfs_b->lock);
3800
3801 cfs_rq->runtime_remaining += amount;
a9cf55b2
PT
3802 /*
3803 * we may have advanced our local expiration to account for allowed
3804 * spread between our sched_clock and the one on which runtime was
3805 * issued.
3806 */
3807 if ((s64)(expires - cfs_rq->runtime_expires) > 0)
3808 cfs_rq->runtime_expires = expires;
85dac906
PT
3809
3810 return cfs_rq->runtime_remaining > 0;
ec12cb7f
PT
3811}
3812
a9cf55b2
PT
3813/*
3814 * Note: This depends on the synchronization provided by sched_clock and the
3815 * fact that rq->clock snapshots this value.
3816 */
3817static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
ec12cb7f 3818{
a9cf55b2 3819 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
a9cf55b2
PT
3820
3821 /* if the deadline is ahead of our clock, nothing to do */
78becc27 3822 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
ec12cb7f
PT
3823 return;
3824
a9cf55b2
PT
3825 if (cfs_rq->runtime_remaining < 0)
3826 return;
3827
3828 /*
3829 * If the local deadline has passed we have to consider the
3830 * possibility that our sched_clock is 'fast' and the global deadline
3831 * has not truly expired.
3832 *
3833 * Fortunately we can check determine whether this the case by checking
51f2176d
BS
3834 * whether the global deadline has advanced. It is valid to compare
3835 * cfs_b->runtime_expires without any locks since we only care about
3836 * exact equality, so a partial write will still work.
a9cf55b2
PT
3837 */
3838
51f2176d 3839 if (cfs_rq->runtime_expires != cfs_b->runtime_expires) {
a9cf55b2
PT
3840 /* extend local deadline, drift is bounded above by 2 ticks */
3841 cfs_rq->runtime_expires += TICK_NSEC;
3842 } else {
3843 /* global deadline is ahead, expiration has passed */
3844 cfs_rq->runtime_remaining = 0;
3845 }
3846}
3847
9dbdb155 3848static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
a9cf55b2
PT
3849{
3850 /* dock delta_exec before expiring quota (as it could span periods) */
ec12cb7f 3851 cfs_rq->runtime_remaining -= delta_exec;
a9cf55b2
PT
3852 expire_cfs_rq_runtime(cfs_rq);
3853
3854 if (likely(cfs_rq->runtime_remaining > 0))
ec12cb7f
PT
3855 return;
3856
85dac906
PT
3857 /*
3858 * if we're unable to extend our runtime we resched so that the active
3859 * hierarchy can be throttled
3860 */
3861 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
8875125e 3862 resched_curr(rq_of(cfs_rq));
ec12cb7f
PT
3863}
3864
6c16a6dc 3865static __always_inline
9dbdb155 3866void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
ec12cb7f 3867{
56f570e5 3868 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
ec12cb7f
PT
3869 return;
3870
3871 __account_cfs_rq_runtime(cfs_rq, delta_exec);
3872}
3873
85dac906
PT
3874static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
3875{
56f570e5 3876 return cfs_bandwidth_used() && cfs_rq->throttled;
85dac906
PT
3877}
3878
64660c86
PT
3879/* check whether cfs_rq, or any parent, is throttled */
3880static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
3881{
56f570e5 3882 return cfs_bandwidth_used() && cfs_rq->throttle_count;
64660c86
PT
3883}
3884
3885/*
3886 * Ensure that neither of the group entities corresponding to src_cpu or
3887 * dest_cpu are members of a throttled hierarchy when performing group
3888 * load-balance operations.
3889 */
3890static inline int throttled_lb_pair(struct task_group *tg,
3891 int src_cpu, int dest_cpu)
3892{
3893 struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
3894
3895 src_cfs_rq = tg->cfs_rq[src_cpu];
3896 dest_cfs_rq = tg->cfs_rq[dest_cpu];
3897
3898 return throttled_hierarchy(src_cfs_rq) ||
3899 throttled_hierarchy(dest_cfs_rq);
3900}
3901
3902/* updated child weight may affect parent so we have to do this bottom up */
3903static int tg_unthrottle_up(struct task_group *tg, void *data)
3904{
3905 struct rq *rq = data;
3906 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
3907
3908 cfs_rq->throttle_count--;
64660c86 3909 if (!cfs_rq->throttle_count) {
f1b17280 3910 /* adjust cfs_rq_clock_task() */
78becc27 3911 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
f1b17280 3912 cfs_rq->throttled_clock_task;
64660c86 3913 }
64660c86
PT
3914
3915 return 0;
3916}
3917
3918static int tg_throttle_down(struct task_group *tg, void *data)
3919{
3920 struct rq *rq = data;
3921 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
3922
82958366
PT
3923 /* group is entering throttled state, stop time */
3924 if (!cfs_rq->throttle_count)
78becc27 3925 cfs_rq->throttled_clock_task = rq_clock_task(rq);
64660c86
PT
3926 cfs_rq->throttle_count++;
3927
3928 return 0;
3929}
3930
d3d9dc33 3931static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
85dac906
PT
3932{
3933 struct rq *rq = rq_of(cfs_rq);
3934 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3935 struct sched_entity *se;
3936 long task_delta, dequeue = 1;
77a4d1a1 3937 bool empty;
85dac906
PT
3938
3939 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
3940
f1b17280 3941 /* freeze hierarchy runnable averages while throttled */
64660c86
PT
3942 rcu_read_lock();
3943 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
3944 rcu_read_unlock();
85dac906
PT
3945
3946 task_delta = cfs_rq->h_nr_running;
3947 for_each_sched_entity(se) {
3948 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
3949 /* throttled entity or throttle-on-deactivate */
3950 if (!se->on_rq)
3951 break;
3952
3953 if (dequeue)
3954 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
3955 qcfs_rq->h_nr_running -= task_delta;
3956
3957 if (qcfs_rq->load.weight)
3958 dequeue = 0;
3959 }
3960
3961 if (!se)
72465447 3962 sub_nr_running(rq, task_delta);
85dac906
PT
3963
3964 cfs_rq->throttled = 1;
78becc27 3965 cfs_rq->throttled_clock = rq_clock(rq);
85dac906 3966 raw_spin_lock(&cfs_b->lock);
d49db342 3967 empty = list_empty(&cfs_b->throttled_cfs_rq);
77a4d1a1 3968
c06f04c7
BS
3969 /*
3970 * Add to the _head_ of the list, so that an already-started
3971 * distribute_cfs_runtime will not see us
3972 */
3973 list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
77a4d1a1
PZ
3974
3975 /*
3976 * If we're the first throttled task, make sure the bandwidth
3977 * timer is running.
3978 */
3979 if (empty)
3980 start_cfs_bandwidth(cfs_b);
3981
85dac906
PT
3982 raw_spin_unlock(&cfs_b->lock);
3983}
3984
029632fb 3985void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
671fd9da
PT
3986{
3987 struct rq *rq = rq_of(cfs_rq);
3988 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3989 struct sched_entity *se;
3990 int enqueue = 1;
3991 long task_delta;
3992
22b958d8 3993 se = cfs_rq->tg->se[cpu_of(rq)];
671fd9da
PT
3994
3995 cfs_rq->throttled = 0;
1a55af2e
FW
3996
3997 update_rq_clock(rq);
3998
671fd9da 3999 raw_spin_lock(&cfs_b->lock);
78becc27 4000 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
671fd9da
PT
4001 list_del_rcu(&cfs_rq->throttled_list);
4002 raw_spin_unlock(&cfs_b->lock);
4003
64660c86
PT
4004 /* update hierarchical throttle state */
4005 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
4006
671fd9da
PT
4007 if (!cfs_rq->load.weight)
4008 return;
4009
4010 task_delta = cfs_rq->h_nr_running;
4011 for_each_sched_entity(se) {
4012 if (se->on_rq)
4013 enqueue = 0;
4014
4015 cfs_rq = cfs_rq_of(se);
4016 if (enqueue)
4017 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
4018 cfs_rq->h_nr_running += task_delta;
4019
4020 if (cfs_rq_throttled(cfs_rq))
4021 break;
4022 }
4023
4024 if (!se)
72465447 4025 add_nr_running(rq, task_delta);
671fd9da
PT
4026
4027 /* determine whether we need to wake up potentially idle cpu */
4028 if (rq->curr == rq->idle && rq->cfs.nr_running)
8875125e 4029 resched_curr(rq);
671fd9da
PT
4030}
4031
4032static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
4033 u64 remaining, u64 expires)
4034{
4035 struct cfs_rq *cfs_rq;
c06f04c7
BS
4036 u64 runtime;
4037 u64 starting_runtime = remaining;
671fd9da
PT
4038
4039 rcu_read_lock();
4040 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
4041 throttled_list) {
4042 struct rq *rq = rq_of(cfs_rq);
4043
4044 raw_spin_lock(&rq->lock);
4045 if (!cfs_rq_throttled(cfs_rq))
4046 goto next;
4047
4048 runtime = -cfs_rq->runtime_remaining + 1;
4049 if (runtime > remaining)
4050 runtime = remaining;
4051 remaining -= runtime;
4052
4053 cfs_rq->runtime_remaining += runtime;
4054 cfs_rq->runtime_expires = expires;
4055
4056 /* we check whether we're throttled above */
4057 if (cfs_rq->runtime_remaining > 0)
4058 unthrottle_cfs_rq(cfs_rq);
4059
4060next:
4061 raw_spin_unlock(&rq->lock);
4062
4063 if (!remaining)
4064 break;
4065 }
4066 rcu_read_unlock();
4067
c06f04c7 4068 return starting_runtime - remaining;
671fd9da
PT
4069}
4070
58088ad0
PT
4071/*
4072 * Responsible for refilling a task_group's bandwidth and unthrottling its
4073 * cfs_rqs as appropriate. If there has been no activity within the last
4074 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
4075 * used to track this state.
4076 */
4077static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
4078{
671fd9da 4079 u64 runtime, runtime_expires;
51f2176d 4080 int throttled;
58088ad0 4081
58088ad0
PT
4082 /* no need to continue the timer with no bandwidth constraint */
4083 if (cfs_b->quota == RUNTIME_INF)
51f2176d 4084 goto out_deactivate;
58088ad0 4085
671fd9da 4086 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
e8da1b18 4087 cfs_b->nr_periods += overrun;
671fd9da 4088
51f2176d
BS
4089 /*
4090 * idle depends on !throttled (for the case of a large deficit), and if
4091 * we're going inactive then everything else can be deferred
4092 */
4093 if (cfs_b->idle && !throttled)
4094 goto out_deactivate;
a9cf55b2
PT
4095
4096 __refill_cfs_bandwidth_runtime(cfs_b);
4097
671fd9da
PT
4098 if (!throttled) {
4099 /* mark as potentially idle for the upcoming period */
4100 cfs_b->idle = 1;
51f2176d 4101 return 0;
671fd9da
PT
4102 }
4103
e8da1b18
NR
4104 /* account preceding periods in which throttling occurred */
4105 cfs_b->nr_throttled += overrun;
4106
671fd9da 4107 runtime_expires = cfs_b->runtime_expires;
671fd9da
PT
4108
4109 /*
c06f04c7
BS
4110 * This check is repeated as we are holding onto the new bandwidth while
4111 * we unthrottle. This can potentially race with an unthrottled group
4112 * trying to acquire new bandwidth from the global pool. This can result
4113 * in us over-using our runtime if it is all used during this loop, but
4114 * only by limited amounts in that extreme case.
671fd9da 4115 */
c06f04c7
BS
4116 while (throttled && cfs_b->runtime > 0) {
4117 runtime = cfs_b->runtime;
671fd9da
PT
4118 raw_spin_unlock(&cfs_b->lock);
4119 /* we can't nest cfs_b->lock while distributing bandwidth */
4120 runtime = distribute_cfs_runtime(cfs_b, runtime,
4121 runtime_expires);
4122 raw_spin_lock(&cfs_b->lock);
4123
4124 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
c06f04c7
BS
4125
4126 cfs_b->runtime -= min(runtime, cfs_b->runtime);
671fd9da 4127 }
58088ad0 4128
671fd9da
PT
4129 /*
4130 * While we are ensured activity in the period following an
4131 * unthrottle, this also covers the case in which the new bandwidth is
4132 * insufficient to cover the existing bandwidth deficit. (Forcing the
4133 * timer to remain active while there are any throttled entities.)
4134 */
4135 cfs_b->idle = 0;
58088ad0 4136
51f2176d
BS
4137 return 0;
4138
4139out_deactivate:
51f2176d 4140 return 1;
58088ad0 4141}
d3d9dc33 4142
d8b4986d
PT
4143/* a cfs_rq won't donate quota below this amount */
4144static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
4145/* minimum remaining period time to redistribute slack quota */
4146static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
4147/* how long we wait to gather additional slack before distributing */
4148static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
4149
db06e78c
BS
4150/*
4151 * Are we near the end of the current quota period?
4152 *
4153 * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
4961b6e1 4154 * hrtimer base being cleared by hrtimer_start. In the case of
db06e78c
BS
4155 * migrate_hrtimers, base is never cleared, so we are fine.
4156 */
d8b4986d
PT
4157static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
4158{
4159 struct hrtimer *refresh_timer = &cfs_b->period_timer;
4160 u64 remaining;
4161
4162 /* if the call-back is running a quota refresh is already occurring */
4163 if (hrtimer_callback_running(refresh_timer))
4164 return 1;
4165
4166 /* is a quota refresh about to occur? */
4167 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
4168 if (remaining < min_expire)
4169 return 1;
4170
4171 return 0;
4172}
4173
4174static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
4175{
4176 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
4177
4178 /* if there's a quota refresh soon don't bother with slack */
4179 if (runtime_refresh_within(cfs_b, min_left))
4180 return;
4181
4cfafd30
PZ
4182 hrtimer_start(&cfs_b->slack_timer,
4183 ns_to_ktime(cfs_bandwidth_slack_period),
4184 HRTIMER_MODE_REL);
d8b4986d
PT
4185}
4186
4187/* we know any runtime found here is valid as update_curr() precedes return */
4188static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4189{
4190 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
4191 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
4192
4193 if (slack_runtime <= 0)
4194 return;
4195
4196 raw_spin_lock(&cfs_b->lock);
4197 if (cfs_b->quota != RUNTIME_INF &&
4198 cfs_rq->runtime_expires == cfs_b->runtime_expires) {
4199 cfs_b->runtime += slack_runtime;
4200
4201 /* we are under rq->lock, defer unthrottling using a timer */
4202 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
4203 !list_empty(&cfs_b->throttled_cfs_rq))
4204 start_cfs_slack_bandwidth(cfs_b);
4205 }
4206 raw_spin_unlock(&cfs_b->lock);
4207
4208 /* even if it's not valid for return we don't want to try again */
4209 cfs_rq->runtime_remaining -= slack_runtime;
4210}
4211
4212static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4213{
56f570e5
PT
4214 if (!cfs_bandwidth_used())
4215 return;
4216
fccfdc6f 4217 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
d8b4986d
PT
4218 return;
4219
4220 __return_cfs_rq_runtime(cfs_rq);
4221}
4222
4223/*
4224 * This is done with a timer (instead of inline with bandwidth return) since
4225 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
4226 */
4227static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
4228{
4229 u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
4230 u64 expires;
4231
4232 /* confirm we're still not at a refresh boundary */
db06e78c
BS
4233 raw_spin_lock(&cfs_b->lock);
4234 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
4235 raw_spin_unlock(&cfs_b->lock);
d8b4986d 4236 return;
db06e78c 4237 }
d8b4986d 4238
c06f04c7 4239 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
d8b4986d 4240 runtime = cfs_b->runtime;
c06f04c7 4241
d8b4986d
PT
4242 expires = cfs_b->runtime_expires;
4243 raw_spin_unlock(&cfs_b->lock);
4244
4245 if (!runtime)
4246 return;
4247
4248 runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
4249
4250 raw_spin_lock(&cfs_b->lock);
4251 if (expires == cfs_b->runtime_expires)
c06f04c7 4252 cfs_b->runtime -= min(runtime, cfs_b->runtime);
d8b4986d
PT
4253 raw_spin_unlock(&cfs_b->lock);
4254}
4255
d3d9dc33
PT
4256/*
4257 * When a group wakes up we want to make sure that its quota is not already
4258 * expired/exceeded, otherwise it may be allowed to steal additional ticks of
4259 * runtime as update_curr() throttling can not not trigger until it's on-rq.
4260 */
4261static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
4262{
56f570e5
PT
4263 if (!cfs_bandwidth_used())
4264 return;
4265
d3d9dc33
PT
4266 /* an active group must be handled by the update_curr()->put() path */
4267 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
4268 return;
4269
4270 /* ensure the group is not already throttled */
4271 if (cfs_rq_throttled(cfs_rq))
4272 return;
4273
4274 /* update runtime allocation */
4275 account_cfs_rq_runtime(cfs_rq, 0);
4276 if (cfs_rq->runtime_remaining <= 0)
4277 throttle_cfs_rq(cfs_rq);
4278}
4279
55e16d30
PZ
4280static void sync_throttle(struct task_group *tg, int cpu)
4281{
4282 struct cfs_rq *pcfs_rq, *cfs_rq;
4283
4284 if (!cfs_bandwidth_used())
4285 return;
4286
4287 if (!tg->parent)
4288 return;
4289
4290 cfs_rq = tg->cfs_rq[cpu];
4291 pcfs_rq = tg->parent->cfs_rq[cpu];
4292
4293 cfs_rq->throttle_count = pcfs_rq->throttle_count;
b8922125 4294 cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
55e16d30
PZ
4295}
4296
d3d9dc33 4297/* conditionally throttle active cfs_rq's from put_prev_entity() */
678d5718 4298static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
d3d9dc33 4299{
56f570e5 4300 if (!cfs_bandwidth_used())
678d5718 4301 return false;
56f570e5 4302
d3d9dc33 4303 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
678d5718 4304 return false;
d3d9dc33
PT
4305
4306 /*
4307 * it's possible for a throttled entity to be forced into a running
4308 * state (e.g. set_curr_task), in this case we're finished.
4309 */
4310 if (cfs_rq_throttled(cfs_rq))
678d5718 4311 return true;
d3d9dc33
PT
4312
4313 throttle_cfs_rq(cfs_rq);
678d5718 4314 return true;
d3d9dc33 4315}
029632fb 4316
029632fb
PZ
4317static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
4318{
4319 struct cfs_bandwidth *cfs_b =
4320 container_of(timer, struct cfs_bandwidth, slack_timer);
77a4d1a1 4321
029632fb
PZ
4322 do_sched_cfs_slack_timer(cfs_b);
4323
4324 return HRTIMER_NORESTART;
4325}
4326
4327static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
4328{
4329 struct cfs_bandwidth *cfs_b =
4330 container_of(timer, struct cfs_bandwidth, period_timer);
029632fb
PZ
4331 int overrun;
4332 int idle = 0;
4333
51f2176d 4334 raw_spin_lock(&cfs_b->lock);
029632fb 4335 for (;;) {
77a4d1a1 4336 overrun = hrtimer_forward_now(timer, cfs_b->period);
029632fb
PZ
4337 if (!overrun)
4338 break;
4339
4340 idle = do_sched_cfs_period_timer(cfs_b, overrun);
4341 }
4cfafd30
PZ
4342 if (idle)
4343 cfs_b->period_active = 0;
51f2176d 4344 raw_spin_unlock(&cfs_b->lock);
029632fb
PZ
4345
4346 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
4347}
4348
4349void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4350{
4351 raw_spin_lock_init(&cfs_b->lock);
4352 cfs_b->runtime = 0;
4353 cfs_b->quota = RUNTIME_INF;
4354 cfs_b->period = ns_to_ktime(default_cfs_period());
4355
4356 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
4cfafd30 4357 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
029632fb
PZ
4358 cfs_b->period_timer.function = sched_cfs_period_timer;
4359 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
4360 cfs_b->slack_timer.function = sched_cfs_slack_timer;
4361}
4362
4363static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4364{
4365 cfs_rq->runtime_enabled = 0;
4366 INIT_LIST_HEAD(&cfs_rq->throttled_list);
4367}
4368
77a4d1a1 4369void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
029632fb 4370{
4cfafd30 4371 lockdep_assert_held(&cfs_b->lock);
029632fb 4372
4cfafd30
PZ
4373 if (!cfs_b->period_active) {
4374 cfs_b->period_active = 1;
4375 hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
4376 hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
4377 }
029632fb
PZ
4378}
4379
4380static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4381{
7f1a169b
TH
4382 /* init_cfs_bandwidth() was not called */
4383 if (!cfs_b->throttled_cfs_rq.next)
4384 return;
4385
029632fb
PZ
4386 hrtimer_cancel(&cfs_b->period_timer);
4387 hrtimer_cancel(&cfs_b->slack_timer);
4388}
4389
0e59bdae
KT
4390static void __maybe_unused update_runtime_enabled(struct rq *rq)
4391{
4392 struct cfs_rq *cfs_rq;
4393
4394 for_each_leaf_cfs_rq(rq, cfs_rq) {
4395 struct cfs_bandwidth *cfs_b = &cfs_rq->tg->cfs_bandwidth;
4396
4397 raw_spin_lock(&cfs_b->lock);
4398 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
4399 raw_spin_unlock(&cfs_b->lock);
4400 }
4401}
4402
38dc3348 4403static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
029632fb
PZ
4404{
4405 struct cfs_rq *cfs_rq;
4406
4407 for_each_leaf_cfs_rq(rq, cfs_rq) {
029632fb
PZ
4408 if (!cfs_rq->runtime_enabled)
4409 continue;
4410
4411 /*
4412 * clock_task is not advancing so we just need to make sure
4413 * there's some valid quota amount
4414 */
51f2176d 4415 cfs_rq->runtime_remaining = 1;
0e59bdae
KT
4416 /*
4417 * Offline rq is schedulable till cpu is completely disabled
4418 * in take_cpu_down(), so we prevent new cfs throttling here.
4419 */
4420 cfs_rq->runtime_enabled = 0;
4421
029632fb
PZ
4422 if (cfs_rq_throttled(cfs_rq))
4423 unthrottle_cfs_rq(cfs_rq);
4424 }
4425}
4426
4427#else /* CONFIG_CFS_BANDWIDTH */
f1b17280
PT
4428static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
4429{
78becc27 4430 return rq_clock_task(rq_of(cfs_rq));
f1b17280
PT
4431}
4432
9dbdb155 4433static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
678d5718 4434static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
d3d9dc33 4435static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
55e16d30 4436static inline void sync_throttle(struct task_group *tg, int cpu) {}
6c16a6dc 4437static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
85dac906
PT
4438
4439static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
4440{
4441 return 0;
4442}
64660c86
PT
4443
4444static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
4445{
4446 return 0;
4447}
4448
4449static inline int throttled_lb_pair(struct task_group *tg,
4450 int src_cpu, int dest_cpu)
4451{
4452 return 0;
4453}
029632fb
PZ
4454
4455void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
4456
4457#ifdef CONFIG_FAIR_GROUP_SCHED
4458static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
ab84d31e
PT
4459#endif
4460
029632fb
PZ
4461static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
4462{
4463 return NULL;
4464}
4465static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
0e59bdae 4466static inline void update_runtime_enabled(struct rq *rq) {}
a4c96ae3 4467static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
029632fb
PZ
4468
4469#endif /* CONFIG_CFS_BANDWIDTH */
4470
bf0f6f24
IM
4471/**************************************************
4472 * CFS operations on tasks:
4473 */
4474
8f4d37ec
PZ
4475#ifdef CONFIG_SCHED_HRTICK
4476static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
4477{
8f4d37ec
PZ
4478 struct sched_entity *se = &p->se;
4479 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4480
9148a3a1 4481 SCHED_WARN_ON(task_rq(p) != rq);
8f4d37ec 4482
8bf46a39 4483 if (rq->cfs.h_nr_running > 1) {
8f4d37ec
PZ
4484 u64 slice = sched_slice(cfs_rq, se);
4485 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
4486 s64 delta = slice - ran;
4487
4488 if (delta < 0) {
4489 if (rq->curr == p)
8875125e 4490 resched_curr(rq);
8f4d37ec
PZ
4491 return;
4492 }
31656519 4493 hrtick_start(rq, delta);
8f4d37ec
PZ
4494 }
4495}
a4c2f00f
PZ
4496
4497/*
4498 * called from enqueue/dequeue and updates the hrtick when the
4499 * current task is from our class and nr_running is low enough
4500 * to matter.
4501 */
4502static void hrtick_update(struct rq *rq)
4503{
4504 struct task_struct *curr = rq->curr;
4505
b39e66ea 4506 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
a4c2f00f
PZ
4507 return;
4508
4509 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
4510 hrtick_start_fair(rq, curr);
4511}
55e12e5e 4512#else /* !CONFIG_SCHED_HRTICK */
8f4d37ec
PZ
4513static inline void
4514hrtick_start_fair(struct rq *rq, struct task_struct *p)
4515{
4516}
a4c2f00f
PZ
4517
4518static inline void hrtick_update(struct rq *rq)
4519{
4520}
8f4d37ec
PZ
4521#endif
4522
bf0f6f24
IM
4523/*
4524 * The enqueue_task method is called before nr_running is
4525 * increased. Here we update the fair scheduling stats and
4526 * then put the task into the rbtree:
4527 */
ea87bb78 4528static void
371fd7e7 4529enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
bf0f6f24
IM
4530{
4531 struct cfs_rq *cfs_rq;
62fb1851 4532 struct sched_entity *se = &p->se;
bf0f6f24 4533
8c34ab19
RW
4534 /*
4535 * If in_iowait is set, the code below may not trigger any cpufreq
4536 * utilization updates, so do it here explicitly with the IOWAIT flag
4537 * passed.
4538 */
4539 if (p->in_iowait)
4540 cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_IOWAIT);
4541
bf0f6f24 4542 for_each_sched_entity(se) {
62fb1851 4543 if (se->on_rq)
bf0f6f24
IM
4544 break;
4545 cfs_rq = cfs_rq_of(se);
88ec22d3 4546 enqueue_entity(cfs_rq, se, flags);
85dac906
PT
4547
4548 /*
4549 * end evaluation on encountering a throttled cfs_rq
4550 *
4551 * note: in the case of encountering a throttled cfs_rq we will
4552 * post the final h_nr_running increment below.
e210bffd 4553 */
85dac906
PT
4554 if (cfs_rq_throttled(cfs_rq))
4555 break;
953bfcd1 4556 cfs_rq->h_nr_running++;
85dac906 4557
88ec22d3 4558 flags = ENQUEUE_WAKEUP;
bf0f6f24 4559 }
8f4d37ec 4560
2069dd75 4561 for_each_sched_entity(se) {
0f317143 4562 cfs_rq = cfs_rq_of(se);
953bfcd1 4563 cfs_rq->h_nr_running++;
2069dd75 4564
85dac906
PT
4565 if (cfs_rq_throttled(cfs_rq))
4566 break;
4567
9d89c257 4568 update_load_avg(se, 1);
17bc14b7 4569 update_cfs_shares(cfs_rq);
2069dd75
PZ
4570 }
4571
cd126afe 4572 if (!se)
72465447 4573 add_nr_running(rq, 1);
cd126afe 4574
a4c2f00f 4575 hrtick_update(rq);
bf0f6f24
IM
4576}
4577
2f36825b
VP
4578static void set_next_buddy(struct sched_entity *se);
4579
bf0f6f24
IM
4580/*
4581 * The dequeue_task method is called before nr_running is
4582 * decreased. We remove the task from the rbtree and
4583 * update the fair scheduling stats:
4584 */
371fd7e7 4585static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
bf0f6f24
IM
4586{
4587 struct cfs_rq *cfs_rq;
62fb1851 4588 struct sched_entity *se = &p->se;
2f36825b 4589 int task_sleep = flags & DEQUEUE_SLEEP;
bf0f6f24
IM
4590
4591 for_each_sched_entity(se) {
4592 cfs_rq = cfs_rq_of(se);
371fd7e7 4593 dequeue_entity(cfs_rq, se, flags);
85dac906
PT
4594
4595 /*
4596 * end evaluation on encountering a throttled cfs_rq
4597 *
4598 * note: in the case of encountering a throttled cfs_rq we will
4599 * post the final h_nr_running decrement below.
4600 */
4601 if (cfs_rq_throttled(cfs_rq))
4602 break;
953bfcd1 4603 cfs_rq->h_nr_running--;
2069dd75 4604
bf0f6f24 4605 /* Don't dequeue parent if it has other entities besides us */
2f36825b 4606 if (cfs_rq->load.weight) {
754bd598
KK
4607 /* Avoid re-evaluating load for this entity: */
4608 se = parent_entity(se);
2f36825b
VP
4609 /*
4610 * Bias pick_next to pick a task from this cfs_rq, as
4611 * p is sleeping when it is within its sched_slice.
4612 */
754bd598
KK
4613 if (task_sleep && se && !throttled_hierarchy(cfs_rq))
4614 set_next_buddy(se);
bf0f6f24 4615 break;
2f36825b 4616 }
371fd7e7 4617 flags |= DEQUEUE_SLEEP;
bf0f6f24 4618 }
8f4d37ec 4619
2069dd75 4620 for_each_sched_entity(se) {
0f317143 4621 cfs_rq = cfs_rq_of(se);
953bfcd1 4622 cfs_rq->h_nr_running--;
2069dd75 4623
85dac906
PT
4624 if (cfs_rq_throttled(cfs_rq))
4625 break;
4626
9d89c257 4627 update_load_avg(se, 1);
17bc14b7 4628 update_cfs_shares(cfs_rq);
2069dd75
PZ
4629 }
4630
cd126afe 4631 if (!se)
72465447 4632 sub_nr_running(rq, 1);
cd126afe 4633
a4c2f00f 4634 hrtick_update(rq);
bf0f6f24
IM
4635}
4636
e7693a36 4637#ifdef CONFIG_SMP
10e2f1ac
PZ
4638
4639/* Working cpumask for: load_balance, load_balance_newidle. */
4640DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
4641DEFINE_PER_CPU(cpumask_var_t, select_idle_mask);
4642
9fd81dd5 4643#ifdef CONFIG_NO_HZ_COMMON
3289bdb4
PZ
4644/*
4645 * per rq 'load' arrray crap; XXX kill this.
4646 */
4647
4648/*
d937cdc5 4649 * The exact cpuload calculated at every tick would be:
3289bdb4 4650 *
d937cdc5
PZ
4651 * load' = (1 - 1/2^i) * load + (1/2^i) * cur_load
4652 *
4653 * If a cpu misses updates for n ticks (as it was idle) and update gets
4654 * called on the n+1-th tick when cpu may be busy, then we have:
4655 *
4656 * load_n = (1 - 1/2^i)^n * load_0
4657 * load_n+1 = (1 - 1/2^i) * load_n + (1/2^i) * cur_load
3289bdb4
PZ
4658 *
4659 * decay_load_missed() below does efficient calculation of
3289bdb4 4660 *
d937cdc5
PZ
4661 * load' = (1 - 1/2^i)^n * load
4662 *
4663 * Because x^(n+m) := x^n * x^m we can decompose any x^n in power-of-2 factors.
4664 * This allows us to precompute the above in said factors, thereby allowing the
4665 * reduction of an arbitrary n in O(log_2 n) steps. (See also
4666 * fixed_power_int())
3289bdb4 4667 *
d937cdc5 4668 * The calculation is approximated on a 128 point scale.
3289bdb4
PZ
4669 */
4670#define DEGRADE_SHIFT 7
d937cdc5
PZ
4671
4672static const u8 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
4673static const u8 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
4674 { 0, 0, 0, 0, 0, 0, 0, 0 },
4675 { 64, 32, 8, 0, 0, 0, 0, 0 },
4676 { 96, 72, 40, 12, 1, 0, 0, 0 },
4677 { 112, 98, 75, 43, 15, 1, 0, 0 },
4678 { 120, 112, 98, 76, 45, 16, 2, 0 }
4679};
3289bdb4
PZ
4680
4681/*
4682 * Update cpu_load for any missed ticks, due to tickless idle. The backlog
4683 * would be when CPU is idle and so we just decay the old load without
4684 * adding any new load.
4685 */
4686static unsigned long
4687decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
4688{
4689 int j = 0;
4690
4691 if (!missed_updates)
4692 return load;
4693
4694 if (missed_updates >= degrade_zero_ticks[idx])
4695 return 0;
4696
4697 if (idx == 1)
4698 return load >> missed_updates;
4699
4700 while (missed_updates) {
4701 if (missed_updates % 2)
4702 load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
4703
4704 missed_updates >>= 1;
4705 j++;
4706 }
4707 return load;
4708}
9fd81dd5 4709#endif /* CONFIG_NO_HZ_COMMON */
3289bdb4 4710
59543275 4711/**
cee1afce 4712 * __cpu_load_update - update the rq->cpu_load[] statistics
59543275
BP
4713 * @this_rq: The rq to update statistics for
4714 * @this_load: The current load
4715 * @pending_updates: The number of missed updates
59543275 4716 *
3289bdb4 4717 * Update rq->cpu_load[] statistics. This function is usually called every
59543275
BP
4718 * scheduler tick (TICK_NSEC).
4719 *
4720 * This function computes a decaying average:
4721 *
4722 * load[i]' = (1 - 1/2^i) * load[i] + (1/2^i) * load
4723 *
4724 * Because of NOHZ it might not get called on every tick which gives need for
4725 * the @pending_updates argument.
4726 *
4727 * load[i]_n = (1 - 1/2^i) * load[i]_n-1 + (1/2^i) * load_n-1
4728 * = A * load[i]_n-1 + B ; A := (1 - 1/2^i), B := (1/2^i) * load
4729 * = A * (A * load[i]_n-2 + B) + B
4730 * = A * (A * (A * load[i]_n-3 + B) + B) + B
4731 * = A^3 * load[i]_n-3 + (A^2 + A + 1) * B
4732 * = A^n * load[i]_0 + (A^(n-1) + A^(n-2) + ... + 1) * B
4733 * = A^n * load[i]_0 + ((1 - A^n) / (1 - A)) * B
4734 * = (1 - 1/2^i)^n * (load[i]_0 - load) + load
4735 *
4736 * In the above we've assumed load_n := load, which is true for NOHZ_FULL as
4737 * any change in load would have resulted in the tick being turned back on.
4738 *
4739 * For regular NOHZ, this reduces to:
4740 *
4741 * load[i]_n = (1 - 1/2^i)^n * load[i]_0
4742 *
4743 * see decay_load_misses(). For NOHZ_FULL we get to subtract and add the extra
1f41906a 4744 * term.
3289bdb4 4745 */
1f41906a
FW
4746static void cpu_load_update(struct rq *this_rq, unsigned long this_load,
4747 unsigned long pending_updates)
3289bdb4 4748{
9fd81dd5 4749 unsigned long __maybe_unused tickless_load = this_rq->cpu_load[0];
3289bdb4
PZ
4750 int i, scale;
4751
4752 this_rq->nr_load_updates++;
4753
4754 /* Update our load: */
4755 this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
4756 for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
4757 unsigned long old_load, new_load;
4758
4759 /* scale is effectively 1 << i now, and >> i divides by scale */
4760
7400d3bb 4761 old_load = this_rq->cpu_load[i];
9fd81dd5 4762#ifdef CONFIG_NO_HZ_COMMON
3289bdb4 4763 old_load = decay_load_missed(old_load, pending_updates - 1, i);
7400d3bb
BP
4764 if (tickless_load) {
4765 old_load -= decay_load_missed(tickless_load, pending_updates - 1, i);
4766 /*
4767 * old_load can never be a negative value because a
4768 * decayed tickless_load cannot be greater than the
4769 * original tickless_load.
4770 */
4771 old_load += tickless_load;
4772 }
9fd81dd5 4773#endif
3289bdb4
PZ
4774 new_load = this_load;
4775 /*
4776 * Round up the averaging division if load is increasing. This
4777 * prevents us from getting stuck on 9 if the load is 10, for
4778 * example.
4779 */
4780 if (new_load > old_load)
4781 new_load += scale - 1;
4782
4783 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
4784 }
4785
4786 sched_avg_update(this_rq);
4787}
4788
7ea241af
YD
4789/* Used instead of source_load when we know the type == 0 */
4790static unsigned long weighted_cpuload(const int cpu)
4791{
4792 return cfs_rq_runnable_load_avg(&cpu_rq(cpu)->cfs);
4793}
4794
3289bdb4 4795#ifdef CONFIG_NO_HZ_COMMON
1f41906a
FW
4796/*
4797 * There is no sane way to deal with nohz on smp when using jiffies because the
4798 * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading
4799 * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}.
4800 *
4801 * Therefore we need to avoid the delta approach from the regular tick when
4802 * possible since that would seriously skew the load calculation. This is why we
4803 * use cpu_load_update_periodic() for CPUs out of nohz. However we'll rely on
4804 * jiffies deltas for updates happening while in nohz mode (idle ticks, idle
4805 * loop exit, nohz_idle_balance, nohz full exit...)
4806 *
4807 * This means we might still be one tick off for nohz periods.
4808 */
4809
4810static void cpu_load_update_nohz(struct rq *this_rq,
4811 unsigned long curr_jiffies,
4812 unsigned long load)
be68a682
FW
4813{
4814 unsigned long pending_updates;
4815
4816 pending_updates = curr_jiffies - this_rq->last_load_update_tick;
4817 if (pending_updates) {
4818 this_rq->last_load_update_tick = curr_jiffies;
4819 /*
4820 * In the regular NOHZ case, we were idle, this means load 0.
4821 * In the NOHZ_FULL case, we were non-idle, we should consider
4822 * its weighted load.
4823 */
1f41906a 4824 cpu_load_update(this_rq, load, pending_updates);
be68a682
FW
4825 }
4826}
4827
3289bdb4
PZ
4828/*
4829 * Called from nohz_idle_balance() to update the load ratings before doing the
4830 * idle balance.
4831 */
cee1afce 4832static void cpu_load_update_idle(struct rq *this_rq)
3289bdb4 4833{
3289bdb4
PZ
4834 /*
4835 * bail if there's load or we're actually up-to-date.
4836 */
be68a682 4837 if (weighted_cpuload(cpu_of(this_rq)))
3289bdb4
PZ
4838 return;
4839
1f41906a 4840 cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), 0);
3289bdb4
PZ
4841}
4842
4843/*
1f41906a
FW
4844 * Record CPU load on nohz entry so we know the tickless load to account
4845 * on nohz exit. cpu_load[0] happens then to be updated more frequently
4846 * than other cpu_load[idx] but it should be fine as cpu_load readers
4847 * shouldn't rely into synchronized cpu_load[*] updates.
3289bdb4 4848 */
1f41906a 4849void cpu_load_update_nohz_start(void)
3289bdb4
PZ
4850{
4851 struct rq *this_rq = this_rq();
1f41906a
FW
4852
4853 /*
4854 * This is all lockless but should be fine. If weighted_cpuload changes
4855 * concurrently we'll exit nohz. And cpu_load write can race with
4856 * cpu_load_update_idle() but both updater would be writing the same.
4857 */
4858 this_rq->cpu_load[0] = weighted_cpuload(cpu_of(this_rq));
4859}
4860
4861/*
4862 * Account the tickless load in the end of a nohz frame.
4863 */
4864void cpu_load_update_nohz_stop(void)
4865{
316c1608 4866 unsigned long curr_jiffies = READ_ONCE(jiffies);
1f41906a
FW
4867 struct rq *this_rq = this_rq();
4868 unsigned long load;
3289bdb4
PZ
4869
4870 if (curr_jiffies == this_rq->last_load_update_tick)
4871 return;
4872
1f41906a 4873 load = weighted_cpuload(cpu_of(this_rq));
3289bdb4 4874 raw_spin_lock(&this_rq->lock);
b52fad2d 4875 update_rq_clock(this_rq);
1f41906a 4876 cpu_load_update_nohz(this_rq, curr_jiffies, load);
3289bdb4
PZ
4877 raw_spin_unlock(&this_rq->lock);
4878}
1f41906a
FW
4879#else /* !CONFIG_NO_HZ_COMMON */
4880static inline void cpu_load_update_nohz(struct rq *this_rq,
4881 unsigned long curr_jiffies,
4882 unsigned long load) { }
4883#endif /* CONFIG_NO_HZ_COMMON */
4884
4885static void cpu_load_update_periodic(struct rq *this_rq, unsigned long load)
4886{
9fd81dd5 4887#ifdef CONFIG_NO_HZ_COMMON
1f41906a
FW
4888 /* See the mess around cpu_load_update_nohz(). */
4889 this_rq->last_load_update_tick = READ_ONCE(jiffies);
9fd81dd5 4890#endif
1f41906a
FW
4891 cpu_load_update(this_rq, load, 1);
4892}
3289bdb4
PZ
4893
4894/*
4895 * Called from scheduler_tick()
4896 */
cee1afce 4897void cpu_load_update_active(struct rq *this_rq)
3289bdb4 4898{
7ea241af 4899 unsigned long load = weighted_cpuload(cpu_of(this_rq));
1f41906a
FW
4900
4901 if (tick_nohz_tick_stopped())
4902 cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), load);
4903 else
4904 cpu_load_update_periodic(this_rq, load);
3289bdb4
PZ
4905}
4906
029632fb
PZ
4907/*
4908 * Return a low guess at the load of a migration-source cpu weighted
4909 * according to the scheduling class and "nice" value.
4910 *
4911 * We want to under-estimate the load of migration sources, to
4912 * balance conservatively.
4913 */
4914static unsigned long source_load(int cpu, int type)
4915{
4916 struct rq *rq = cpu_rq(cpu);
4917 unsigned long total = weighted_cpuload(cpu);
4918
4919 if (type == 0 || !sched_feat(LB_BIAS))
4920 return total;
4921
4922 return min(rq->cpu_load[type-1], total);
4923}
4924
4925/*
4926 * Return a high guess at the load of a migration-target cpu weighted
4927 * according to the scheduling class and "nice" value.
4928 */
4929static unsigned long target_load(int cpu, int type)
4930{
4931 struct rq *rq = cpu_rq(cpu);
4932 unsigned long total = weighted_cpuload(cpu);
4933
4934 if (type == 0 || !sched_feat(LB_BIAS))
4935 return total;
4936
4937 return max(rq->cpu_load[type-1], total);
4938}
4939
ced549fa 4940static unsigned long capacity_of(int cpu)
029632fb 4941{
ced549fa 4942 return cpu_rq(cpu)->cpu_capacity;
029632fb
PZ
4943}
4944
ca6d75e6
VG
4945static unsigned long capacity_orig_of(int cpu)
4946{
4947 return cpu_rq(cpu)->cpu_capacity_orig;
4948}
4949
029632fb
PZ
4950static unsigned long cpu_avg_load_per_task(int cpu)
4951{
4952 struct rq *rq = cpu_rq(cpu);
316c1608 4953 unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running);
7ea241af 4954 unsigned long load_avg = weighted_cpuload(cpu);
029632fb
PZ
4955
4956 if (nr_running)
b92486cb 4957 return load_avg / nr_running;
029632fb
PZ
4958
4959 return 0;
4960}
4961
bb3469ac 4962#ifdef CONFIG_FAIR_GROUP_SCHED
f5bfb7d9
PZ
4963/*
4964 * effective_load() calculates the load change as seen from the root_task_group
4965 *
4966 * Adding load to a group doesn't make a group heavier, but can cause movement
4967 * of group shares between cpus. Assuming the shares were perfectly aligned one
4968 * can calculate the shift in shares.
cf5f0acf
PZ
4969 *
4970 * Calculate the effective load difference if @wl is added (subtracted) to @tg
4971 * on this @cpu and results in a total addition (subtraction) of @wg to the
4972 * total group weight.
4973 *
4974 * Given a runqueue weight distribution (rw_i) we can compute a shares
4975 * distribution (s_i) using:
4976 *
4977 * s_i = rw_i / \Sum rw_j (1)
4978 *
4979 * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
4980 * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
4981 * shares distribution (s_i):
4982 *
4983 * rw_i = { 2, 4, 1, 0 }
4984 * s_i = { 2/7, 4/7, 1/7, 0 }
4985 *
4986 * As per wake_affine() we're interested in the load of two CPUs (the CPU the
4987 * task used to run on and the CPU the waker is running on), we need to
4988 * compute the effect of waking a task on either CPU and, in case of a sync
4989 * wakeup, compute the effect of the current task going to sleep.
4990 *
4991 * So for a change of @wl to the local @cpu with an overall group weight change
4992 * of @wl we can compute the new shares distribution (s'_i) using:
4993 *
4994 * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
4995 *
4996 * Suppose we're interested in CPUs 0 and 1, and want to compute the load
4997 * differences in waking a task to CPU 0. The additional task changes the
4998 * weight and shares distributions like:
4999 *
5000 * rw'_i = { 3, 4, 1, 0 }
5001 * s'_i = { 3/8, 4/8, 1/8, 0 }
5002 *
5003 * We can then compute the difference in effective weight by using:
5004 *
5005 * dw_i = S * (s'_i - s_i) (3)
5006 *
5007 * Where 'S' is the group weight as seen by its parent.
5008 *
5009 * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
5010 * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
5011 * 4/7) times the weight of the group.
f5bfb7d9 5012 */
2069dd75 5013static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
bb3469ac 5014{
4be9daaa 5015 struct sched_entity *se = tg->se[cpu];
f1d239f7 5016
9722c2da 5017 if (!tg->parent) /* the trivial, non-cgroup case */
f1d239f7
PZ
5018 return wl;
5019
4be9daaa 5020 for_each_sched_entity(se) {
7dd49125
PZ
5021 struct cfs_rq *cfs_rq = se->my_q;
5022 long W, w = cfs_rq_load_avg(cfs_rq);
4be9daaa 5023
7dd49125 5024 tg = cfs_rq->tg;
bb3469ac 5025
cf5f0acf
PZ
5026 /*
5027 * W = @wg + \Sum rw_j
5028 */
7dd49125
PZ
5029 W = wg + atomic_long_read(&tg->load_avg);
5030
5031 /* Ensure \Sum rw_j >= rw_i */
5032 W -= cfs_rq->tg_load_avg_contrib;
5033 W += w;
4be9daaa 5034
cf5f0acf
PZ
5035 /*
5036 * w = rw_i + @wl
5037 */
7dd49125 5038 w += wl;
940959e9 5039
cf5f0acf
PZ
5040 /*
5041 * wl = S * s'_i; see (2)
5042 */
5043 if (W > 0 && w < W)
ab522e33 5044 wl = (w * (long)scale_load_down(tg->shares)) / W;
977dda7c 5045 else
ab522e33 5046 wl = scale_load_down(tg->shares);
940959e9 5047
cf5f0acf
PZ
5048 /*
5049 * Per the above, wl is the new se->load.weight value; since
5050 * those are clipped to [MIN_SHARES, ...) do so now. See
5051 * calc_cfs_shares().
5052 */
977dda7c
PT
5053 if (wl < MIN_SHARES)
5054 wl = MIN_SHARES;
cf5f0acf
PZ
5055
5056 /*
5057 * wl = dw_i = S * (s'_i - s_i); see (3)
5058 */
9d89c257 5059 wl -= se->avg.load_avg;
cf5f0acf
PZ
5060
5061 /*
5062 * Recursively apply this logic to all parent groups to compute
5063 * the final effective load change on the root group. Since
5064 * only the @tg group gets extra weight, all parent groups can
5065 * only redistribute existing shares. @wl is the shift in shares
5066 * resulting from this level per the above.
5067 */
4be9daaa 5068 wg = 0;
4be9daaa 5069 }
bb3469ac 5070
4be9daaa 5071 return wl;
bb3469ac
PZ
5072}
5073#else
4be9daaa 5074
58d081b5 5075static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
4be9daaa 5076{
83378269 5077 return wl;
bb3469ac 5078}
4be9daaa 5079
bb3469ac
PZ
5080#endif
5081
c58d25f3
PZ
5082static void record_wakee(struct task_struct *p)
5083{
5084 /*
5085 * Only decay a single time; tasks that have less then 1 wakeup per
5086 * jiffy will not have built up many flips.
5087 */
5088 if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) {
5089 current->wakee_flips >>= 1;
5090 current->wakee_flip_decay_ts = jiffies;
5091 }
5092
5093 if (current->last_wakee != p) {
5094 current->last_wakee = p;
5095 current->wakee_flips++;
5096 }
5097}
5098
63b0e9ed
MG
5099/*
5100 * Detect M:N waker/wakee relationships via a switching-frequency heuristic.
c58d25f3 5101 *
63b0e9ed 5102 * A waker of many should wake a different task than the one last awakened
c58d25f3
PZ
5103 * at a frequency roughly N times higher than one of its wakees.
5104 *
5105 * In order to determine whether we should let the load spread vs consolidating
5106 * to shared cache, we look for a minimum 'flip' frequency of llc_size in one
5107 * partner, and a factor of lls_size higher frequency in the other.
5108 *
5109 * With both conditions met, we can be relatively sure that the relationship is
5110 * non-monogamous, with partner count exceeding socket size.
5111 *
5112 * Waker/wakee being client/server, worker/dispatcher, interrupt source or
5113 * whatever is irrelevant, spread criteria is apparent partner count exceeds
5114 * socket size.
63b0e9ed 5115 */
62470419
MW
5116static int wake_wide(struct task_struct *p)
5117{
63b0e9ed
MG
5118 unsigned int master = current->wakee_flips;
5119 unsigned int slave = p->wakee_flips;
7d9ffa89 5120 int factor = this_cpu_read(sd_llc_size);
62470419 5121
63b0e9ed
MG
5122 if (master < slave)
5123 swap(master, slave);
5124 if (slave < factor || master < slave * factor)
5125 return 0;
5126 return 1;
62470419
MW
5127}
5128
772bd008
MR
5129static int wake_affine(struct sched_domain *sd, struct task_struct *p,
5130 int prev_cpu, int sync)
098fb9db 5131{
e37b6a7b 5132 s64 this_load, load;
bd61c98f 5133 s64 this_eff_load, prev_eff_load;
772bd008 5134 int idx, this_cpu;
c88d5910 5135 struct task_group *tg;
83378269 5136 unsigned long weight;
b3137bc8 5137 int balanced;
098fb9db 5138
c88d5910
PZ
5139 idx = sd->wake_idx;
5140 this_cpu = smp_processor_id();
c88d5910
PZ
5141 load = source_load(prev_cpu, idx);
5142 this_load = target_load(this_cpu, idx);
098fb9db 5143
b3137bc8
MG
5144 /*
5145 * If sync wakeup then subtract the (maximum possible)
5146 * effect of the currently running task from the load
5147 * of the current CPU:
5148 */
83378269
PZ
5149 if (sync) {
5150 tg = task_group(current);
9d89c257 5151 weight = current->se.avg.load_avg;
83378269 5152
c88d5910 5153 this_load += effective_load(tg, this_cpu, -weight, -weight);
83378269
PZ
5154 load += effective_load(tg, prev_cpu, 0, -weight);
5155 }
b3137bc8 5156
83378269 5157 tg = task_group(p);
9d89c257 5158 weight = p->se.avg.load_avg;
b3137bc8 5159
71a29aa7
PZ
5160 /*
5161 * In low-load situations, where prev_cpu is idle and this_cpu is idle
c88d5910
PZ
5162 * due to the sync cause above having dropped this_load to 0, we'll
5163 * always have an imbalance, but there's really nothing you can do
5164 * about that, so that's good too.
71a29aa7
PZ
5165 *
5166 * Otherwise check if either cpus are near enough in load to allow this
5167 * task to be woken on this_cpu.
5168 */
bd61c98f
VG
5169 this_eff_load = 100;
5170 this_eff_load *= capacity_of(prev_cpu);
e51fd5e2 5171
bd61c98f
VG
5172 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
5173 prev_eff_load *= capacity_of(this_cpu);
e51fd5e2 5174
bd61c98f 5175 if (this_load > 0) {
e51fd5e2
PZ
5176 this_eff_load *= this_load +
5177 effective_load(tg, this_cpu, weight, weight);
5178
e51fd5e2 5179 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
bd61c98f 5180 }
e51fd5e2 5181
bd61c98f 5182 balanced = this_eff_load <= prev_eff_load;
098fb9db 5183
ae92882e 5184 schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts);
098fb9db 5185
05bfb65f
VG
5186 if (!balanced)
5187 return 0;
098fb9db 5188
ae92882e
JP
5189 schedstat_inc(sd->ttwu_move_affine);
5190 schedstat_inc(p->se.statistics.nr_wakeups_affine);
05bfb65f
VG
5191
5192 return 1;
098fb9db
IM
5193}
5194
aaee1203
PZ
5195/*
5196 * find_idlest_group finds and returns the least busy CPU group within the
5197 * domain.
5198 */
5199static struct sched_group *
78e7ed53 5200find_idlest_group(struct sched_domain *sd, struct task_struct *p,
c44f2a02 5201 int this_cpu, int sd_flag)
e7693a36 5202{
b3bd3de6 5203 struct sched_group *idlest = NULL, *group = sd->groups;
aaee1203 5204 unsigned long min_load = ULONG_MAX, this_load = 0;
c44f2a02 5205 int load_idx = sd->forkexec_idx;
aaee1203 5206 int imbalance = 100 + (sd->imbalance_pct-100)/2;
e7693a36 5207
c44f2a02
VG
5208 if (sd_flag & SD_BALANCE_WAKE)
5209 load_idx = sd->wake_idx;
5210
aaee1203
PZ
5211 do {
5212 unsigned long load, avg_load;
5213 int local_group;
5214 int i;
e7693a36 5215
aaee1203
PZ
5216 /* Skip over this group if it has no CPUs allowed */
5217 if (!cpumask_intersects(sched_group_cpus(group),
fa17b507 5218 tsk_cpus_allowed(p)))
aaee1203
PZ
5219 continue;
5220
5221 local_group = cpumask_test_cpu(this_cpu,
5222 sched_group_cpus(group));
5223
5224 /* Tally up the load of all CPUs in the group */
5225 avg_load = 0;
5226
5227 for_each_cpu(i, sched_group_cpus(group)) {
5228 /* Bias balancing toward cpus of our domain */
5229 if (local_group)
5230 load = source_load(i, load_idx);
5231 else
5232 load = target_load(i, load_idx);
5233
5234 avg_load += load;
5235 }
5236
63b2ca30 5237 /* Adjust by relative CPU capacity of the group */
ca8ce3d0 5238 avg_load = (avg_load * SCHED_CAPACITY_SCALE) / group->sgc->capacity;
aaee1203
PZ
5239
5240 if (local_group) {
5241 this_load = avg_load;
aaee1203
PZ
5242 } else if (avg_load < min_load) {
5243 min_load = avg_load;
5244 idlest = group;
5245 }
5246 } while (group = group->next, group != sd->groups);
5247
5248 if (!idlest || 100*this_load < imbalance*min_load)
5249 return NULL;
5250 return idlest;
5251}
5252
5253/*
5254 * find_idlest_cpu - find the idlest cpu among the cpus in group.
5255 */
5256static int
5257find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
5258{
5259 unsigned long load, min_load = ULONG_MAX;
83a0a96a
NP
5260 unsigned int min_exit_latency = UINT_MAX;
5261 u64 latest_idle_timestamp = 0;
5262 int least_loaded_cpu = this_cpu;
5263 int shallowest_idle_cpu = -1;
aaee1203
PZ
5264 int i;
5265
eaecf41f
MR
5266 /* Check if we have any choice: */
5267 if (group->group_weight == 1)
5268 return cpumask_first(sched_group_cpus(group));
5269
aaee1203 5270 /* Traverse only the allowed CPUs */
fa17b507 5271 for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
83a0a96a
NP
5272 if (idle_cpu(i)) {
5273 struct rq *rq = cpu_rq(i);
5274 struct cpuidle_state *idle = idle_get_state(rq);
5275 if (idle && idle->exit_latency < min_exit_latency) {
5276 /*
5277 * We give priority to a CPU whose idle state
5278 * has the smallest exit latency irrespective
5279 * of any idle timestamp.
5280 */
5281 min_exit_latency = idle->exit_latency;
5282 latest_idle_timestamp = rq->idle_stamp;
5283 shallowest_idle_cpu = i;
5284 } else if ((!idle || idle->exit_latency == min_exit_latency) &&
5285 rq->idle_stamp > latest_idle_timestamp) {
5286 /*
5287 * If equal or no active idle state, then
5288 * the most recently idled CPU might have
5289 * a warmer cache.
5290 */
5291 latest_idle_timestamp = rq->idle_stamp;
5292 shallowest_idle_cpu = i;
5293 }
9f96742a 5294 } else if (shallowest_idle_cpu == -1) {
83a0a96a
NP
5295 load = weighted_cpuload(i);
5296 if (load < min_load || (load == min_load && i == this_cpu)) {
5297 min_load = load;
5298 least_loaded_cpu = i;
5299 }
e7693a36
GH
5300 }
5301 }
5302
83a0a96a 5303 return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
aaee1203 5304}
e7693a36 5305
a50bde51 5306/*
10e2f1ac
PZ
5307 * Implement a for_each_cpu() variant that starts the scan at a given cpu
5308 * (@start), and wraps around.
5309 *
5310 * This is used to scan for idle CPUs; such that not all CPUs looking for an
5311 * idle CPU find the same CPU. The down-side is that tasks tend to cycle
5312 * through the LLC domain.
5313 *
5314 * Especially tbench is found sensitive to this.
5315 */
5316
5317static int cpumask_next_wrap(int n, const struct cpumask *mask, int start, int *wrapped)
5318{
5319 int next;
5320
5321again:
5322 next = find_next_bit(cpumask_bits(mask), nr_cpumask_bits, n+1);
5323
5324 if (*wrapped) {
5325 if (next >= start)
5326 return nr_cpumask_bits;
5327 } else {
5328 if (next >= nr_cpumask_bits) {
5329 *wrapped = 1;
5330 n = -1;
5331 goto again;
5332 }
5333 }
5334
5335 return next;
5336}
5337
5338#define for_each_cpu_wrap(cpu, mask, start, wrap) \
5339 for ((wrap) = 0, (cpu) = (start)-1; \
5340 (cpu) = cpumask_next_wrap((cpu), (mask), (start), &(wrap)), \
5341 (cpu) < nr_cpumask_bits; )
5342
5343#ifdef CONFIG_SCHED_SMT
5344
5345static inline void set_idle_cores(int cpu, int val)
5346{
5347 struct sched_domain_shared *sds;
5348
5349 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
5350 if (sds)
5351 WRITE_ONCE(sds->has_idle_cores, val);
5352}
5353
5354static inline bool test_idle_cores(int cpu, bool def)
5355{
5356 struct sched_domain_shared *sds;
5357
5358 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
5359 if (sds)
5360 return READ_ONCE(sds->has_idle_cores);
5361
5362 return def;
5363}
5364
5365/*
5366 * Scans the local SMT mask to see if the entire core is idle, and records this
5367 * information in sd_llc_shared->has_idle_cores.
5368 *
5369 * Since SMT siblings share all cache levels, inspecting this limited remote
5370 * state should be fairly cheap.
5371 */
1b568f0a 5372void __update_idle_core(struct rq *rq)
10e2f1ac
PZ
5373{
5374 int core = cpu_of(rq);
5375 int cpu;
5376
5377 rcu_read_lock();
5378 if (test_idle_cores(core, true))
5379 goto unlock;
5380
5381 for_each_cpu(cpu, cpu_smt_mask(core)) {
5382 if (cpu == core)
5383 continue;
5384
5385 if (!idle_cpu(cpu))
5386 goto unlock;
5387 }
5388
5389 set_idle_cores(core, 1);
5390unlock:
5391 rcu_read_unlock();
5392}
5393
5394/*
5395 * Scan the entire LLC domain for idle cores; this dynamically switches off if
5396 * there are no idle cores left in the system; tracked through
5397 * sd_llc->shared->has_idle_cores and enabled through update_idle_core() above.
5398 */
5399static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
5400{
5401 struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
5402 int core, cpu, wrap;
5403
1b568f0a
PZ
5404 if (!static_branch_likely(&sched_smt_present))
5405 return -1;
5406
10e2f1ac
PZ
5407 if (!test_idle_cores(target, false))
5408 return -1;
5409
5410 cpumask_and(cpus, sched_domain_span(sd), tsk_cpus_allowed(p));
5411
5412 for_each_cpu_wrap(core, cpus, target, wrap) {
5413 bool idle = true;
5414
5415 for_each_cpu(cpu, cpu_smt_mask(core)) {
5416 cpumask_clear_cpu(cpu, cpus);
5417 if (!idle_cpu(cpu))
5418 idle = false;
5419 }
5420
5421 if (idle)
5422 return core;
5423 }
5424
5425 /*
5426 * Failed to find an idle core; stop looking for one.
5427 */
5428 set_idle_cores(target, 0);
5429
5430 return -1;
5431}
5432
5433/*
5434 * Scan the local SMT mask for idle CPUs.
5435 */
5436static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
5437{
5438 int cpu;
5439
1b568f0a
PZ
5440 if (!static_branch_likely(&sched_smt_present))
5441 return -1;
5442
10e2f1ac
PZ
5443 for_each_cpu(cpu, cpu_smt_mask(target)) {
5444 if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
5445 continue;
5446 if (idle_cpu(cpu))
5447 return cpu;
5448 }
5449
5450 return -1;
5451}
5452
5453#else /* CONFIG_SCHED_SMT */
5454
5455static inline int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
5456{
5457 return -1;
5458}
5459
5460static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
5461{
5462 return -1;
5463}
5464
5465#endif /* CONFIG_SCHED_SMT */
5466
5467/*
5468 * Scan the LLC domain for idle CPUs; this is dynamically regulated by
5469 * comparing the average scan cost (tracked in sd->avg_scan_cost) against the
5470 * average idle time for this rq (as found in rq->avg_idle).
a50bde51 5471 */
10e2f1ac
PZ
5472static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target)
5473{
9cfb38a7
WL
5474 struct sched_domain *this_sd;
5475 u64 avg_cost, avg_idle = this_rq()->avg_idle;
10e2f1ac
PZ
5476 u64 time, cost;
5477 s64 delta;
5478 int cpu, wrap;
5479
9cfb38a7
WL
5480 this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
5481 if (!this_sd)
5482 return -1;
5483
5484 avg_cost = this_sd->avg_scan_cost;
5485
10e2f1ac
PZ
5486 /*
5487 * Due to large variance we need a large fuzz factor; hackbench in
5488 * particularly is sensitive here.
5489 */
5490 if ((avg_idle / 512) < avg_cost)
5491 return -1;
5492
5493 time = local_clock();
5494
5495 for_each_cpu_wrap(cpu, sched_domain_span(sd), target, wrap) {
5496 if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
5497 continue;
5498 if (idle_cpu(cpu))
5499 break;
5500 }
5501
5502 time = local_clock() - time;
5503 cost = this_sd->avg_scan_cost;
5504 delta = (s64)(time - cost) / 8;
5505 this_sd->avg_scan_cost += delta;
5506
5507 return cpu;
5508}
5509
5510/*
5511 * Try and locate an idle core/thread in the LLC cache domain.
a50bde51 5512 */
772bd008 5513static int select_idle_sibling(struct task_struct *p, int prev, int target)
a50bde51 5514{
99bd5e2f 5515 struct sched_domain *sd;
10e2f1ac 5516 int i;
a50bde51 5517
e0a79f52
MG
5518 if (idle_cpu(target))
5519 return target;
99bd5e2f
SS
5520
5521 /*
10e2f1ac 5522 * If the previous cpu is cache affine and idle, don't be stupid.
99bd5e2f 5523 */
772bd008
MR
5524 if (prev != target && cpus_share_cache(prev, target) && idle_cpu(prev))
5525 return prev;
a50bde51 5526
518cd623 5527 sd = rcu_dereference(per_cpu(sd_llc, target));
10e2f1ac
PZ
5528 if (!sd)
5529 return target;
772bd008 5530
10e2f1ac
PZ
5531 i = select_idle_core(p, sd, target);
5532 if ((unsigned)i < nr_cpumask_bits)
5533 return i;
37407ea7 5534
10e2f1ac
PZ
5535 i = select_idle_cpu(p, sd, target);
5536 if ((unsigned)i < nr_cpumask_bits)
5537 return i;
5538
5539 i = select_idle_smt(p, sd, target);
5540 if ((unsigned)i < nr_cpumask_bits)
5541 return i;
970e1789 5542
a50bde51
PZ
5543 return target;
5544}
231678b7 5545
8bb5b00c 5546/*
9e91d61d 5547 * cpu_util returns the amount of capacity of a CPU that is used by CFS
8bb5b00c 5548 * tasks. The unit of the return value must be the one of capacity so we can
9e91d61d
DE
5549 * compare the utilization with the capacity of the CPU that is available for
5550 * CFS task (ie cpu_capacity).
231678b7
DE
5551 *
5552 * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the
5553 * recent utilization of currently non-runnable tasks on a CPU. It represents
5554 * the amount of utilization of a CPU in the range [0..capacity_orig] where
5555 * capacity_orig is the cpu_capacity available at the highest frequency
5556 * (arch_scale_freq_capacity()).
5557 * The utilization of a CPU converges towards a sum equal to or less than the
5558 * current capacity (capacity_curr <= capacity_orig) of the CPU because it is
5559 * the running time on this CPU scaled by capacity_curr.
5560 *
5561 * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even
5562 * higher than capacity_orig because of unfortunate rounding in
5563 * cfs.avg.util_avg or just after migrating tasks and new task wakeups until
5564 * the average stabilizes with the new running time. We need to check that the
5565 * utilization stays within the range of [0..capacity_orig] and cap it if
5566 * necessary. Without utilization capping, a group could be seen as overloaded
5567 * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of
5568 * available capacity. We allow utilization to overshoot capacity_curr (but not
5569 * capacity_orig) as it useful for predicting the capacity required after task
5570 * migrations (scheduler-driven DVFS).
8bb5b00c 5571 */
9e91d61d 5572static int cpu_util(int cpu)
8bb5b00c 5573{
9e91d61d 5574 unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
8bb5b00c
VG
5575 unsigned long capacity = capacity_orig_of(cpu);
5576
231678b7 5577 return (util >= capacity) ? capacity : util;
8bb5b00c 5578}
a50bde51 5579
3273163c
MR
5580static inline int task_util(struct task_struct *p)
5581{
5582 return p->se.avg.util_avg;
5583}
5584
5585/*
5586 * Disable WAKE_AFFINE in the case where task @p doesn't fit in the
5587 * capacity of either the waking CPU @cpu or the previous CPU @prev_cpu.
5588 *
5589 * In that case WAKE_AFFINE doesn't make sense and we'll let
5590 * BALANCE_WAKE sort things out.
5591 */
5592static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
5593{
5594 long min_cap, max_cap;
5595
5596 min_cap = min(capacity_orig_of(prev_cpu), capacity_orig_of(cpu));
5597 max_cap = cpu_rq(cpu)->rd->max_cpu_capacity;
5598
5599 /* Minimum capacity is close to max, no need to abort wake_affine */
5600 if (max_cap - min_cap < max_cap >> 3)
5601 return 0;
5602
5603 return min_cap * 1024 < task_util(p) * capacity_margin;
5604}
5605
aaee1203 5606/*
de91b9cb
MR
5607 * select_task_rq_fair: Select target runqueue for the waking task in domains
5608 * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE,
5609 * SD_BALANCE_FORK, or SD_BALANCE_EXEC.
aaee1203 5610 *
de91b9cb
MR
5611 * Balances load by selecting the idlest cpu in the idlest group, or under
5612 * certain conditions an idle sibling cpu if the domain has SD_WAKE_AFFINE set.
aaee1203 5613 *
de91b9cb 5614 * Returns the target cpu number.
aaee1203
PZ
5615 *
5616 * preempt must be disabled.
5617 */
0017d735 5618static int
ac66f547 5619select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
aaee1203 5620{
29cd8bae 5621 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
c88d5910 5622 int cpu = smp_processor_id();
63b0e9ed 5623 int new_cpu = prev_cpu;
99bd5e2f 5624 int want_affine = 0;
5158f4e4 5625 int sync = wake_flags & WF_SYNC;
c88d5910 5626
c58d25f3
PZ
5627 if (sd_flag & SD_BALANCE_WAKE) {
5628 record_wakee(p);
3273163c
MR
5629 want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu)
5630 && cpumask_test_cpu(cpu, tsk_cpus_allowed(p));
c58d25f3 5631 }
aaee1203 5632
dce840a0 5633 rcu_read_lock();
aaee1203 5634 for_each_domain(cpu, tmp) {
e4f42888 5635 if (!(tmp->flags & SD_LOAD_BALANCE))
63b0e9ed 5636 break;
e4f42888 5637
fe3bcfe1 5638 /*
99bd5e2f
SS
5639 * If both cpu and prev_cpu are part of this domain,
5640 * cpu is a valid SD_WAKE_AFFINE target.
fe3bcfe1 5641 */
99bd5e2f
SS
5642 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
5643 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
5644 affine_sd = tmp;
29cd8bae 5645 break;
f03542a7 5646 }
29cd8bae 5647
f03542a7 5648 if (tmp->flags & sd_flag)
29cd8bae 5649 sd = tmp;
63b0e9ed
MG
5650 else if (!want_affine)
5651 break;
29cd8bae
PZ
5652 }
5653
63b0e9ed
MG
5654 if (affine_sd) {
5655 sd = NULL; /* Prefer wake_affine over balance flags */
772bd008 5656 if (cpu != prev_cpu && wake_affine(affine_sd, p, prev_cpu, sync))
63b0e9ed 5657 new_cpu = cpu;
8b911acd 5658 }
e7693a36 5659
63b0e9ed
MG
5660 if (!sd) {
5661 if (sd_flag & SD_BALANCE_WAKE) /* XXX always ? */
772bd008 5662 new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
63b0e9ed
MG
5663
5664 } else while (sd) {
aaee1203 5665 struct sched_group *group;
c88d5910 5666 int weight;
098fb9db 5667
0763a660 5668 if (!(sd->flags & sd_flag)) {
aaee1203
PZ
5669 sd = sd->child;
5670 continue;
5671 }
098fb9db 5672
c44f2a02 5673 group = find_idlest_group(sd, p, cpu, sd_flag);
aaee1203
PZ
5674 if (!group) {
5675 sd = sd->child;
5676 continue;
5677 }
4ae7d5ce 5678
d7c33c49 5679 new_cpu = find_idlest_cpu(group, p, cpu);
aaee1203
PZ
5680 if (new_cpu == -1 || new_cpu == cpu) {
5681 /* Now try balancing at a lower domain level of cpu */
5682 sd = sd->child;
5683 continue;
e7693a36 5684 }
aaee1203
PZ
5685
5686 /* Now try balancing at a lower domain level of new_cpu */
5687 cpu = new_cpu;
669c55e9 5688 weight = sd->span_weight;
aaee1203
PZ
5689 sd = NULL;
5690 for_each_domain(cpu, tmp) {
669c55e9 5691 if (weight <= tmp->span_weight)
aaee1203 5692 break;
0763a660 5693 if (tmp->flags & sd_flag)
aaee1203
PZ
5694 sd = tmp;
5695 }
5696 /* while loop will break here if sd == NULL */
e7693a36 5697 }
dce840a0 5698 rcu_read_unlock();
e7693a36 5699
c88d5910 5700 return new_cpu;
e7693a36 5701}
0a74bef8
PT
5702
5703/*
5704 * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
5705 * cfs_rq_of(p) references at time of call are still valid and identify the
525628c7 5706 * previous cpu. The caller guarantees p->pi_lock or task_rq(p)->lock is held.
0a74bef8 5707 */
5a4fd036 5708static void migrate_task_rq_fair(struct task_struct *p)
0a74bef8 5709{
59efa0ba
PZ
5710 /*
5711 * As blocked tasks retain absolute vruntime the migration needs to
5712 * deal with this by subtracting the old and adding the new
5713 * min_vruntime -- the latter is done by enqueue_entity() when placing
5714 * the task on the new runqueue.
5715 */
5716 if (p->state == TASK_WAKING) {
5717 struct sched_entity *se = &p->se;
5718 struct cfs_rq *cfs_rq = cfs_rq_of(se);
5719 u64 min_vruntime;
5720
5721#ifndef CONFIG_64BIT
5722 u64 min_vruntime_copy;
5723
5724 do {
5725 min_vruntime_copy = cfs_rq->min_vruntime_copy;
5726 smp_rmb();
5727 min_vruntime = cfs_rq->min_vruntime;
5728 } while (min_vruntime != min_vruntime_copy);
5729#else
5730 min_vruntime = cfs_rq->min_vruntime;
5731#endif
5732
5733 se->vruntime -= min_vruntime;
5734 }
5735
aff3e498 5736 /*
9d89c257
YD
5737 * We are supposed to update the task to "current" time, then its up to date
5738 * and ready to go to new CPU/cfs_rq. But we have difficulty in getting
5739 * what current time is, so simply throw away the out-of-date time. This
5740 * will result in the wakee task is less decayed, but giving the wakee more
5741 * load sounds not bad.
aff3e498 5742 */
9d89c257
YD
5743 remove_entity_load_avg(&p->se);
5744
5745 /* Tell new CPU we are migrated */
5746 p->se.avg.last_update_time = 0;
3944a927
BS
5747
5748 /* We have migrated, no longer consider this task hot */
9d89c257 5749 p->se.exec_start = 0;
0a74bef8 5750}
12695578
YD
5751
5752static void task_dead_fair(struct task_struct *p)
5753{
5754 remove_entity_load_avg(&p->se);
5755}
e7693a36
GH
5756#endif /* CONFIG_SMP */
5757
e52fb7c0
PZ
5758static unsigned long
5759wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
0bbd3336
PZ
5760{
5761 unsigned long gran = sysctl_sched_wakeup_granularity;
5762
5763 /*
e52fb7c0
PZ
5764 * Since its curr running now, convert the gran from real-time
5765 * to virtual-time in his units.
13814d42
MG
5766 *
5767 * By using 'se' instead of 'curr' we penalize light tasks, so
5768 * they get preempted easier. That is, if 'se' < 'curr' then
5769 * the resulting gran will be larger, therefore penalizing the
5770 * lighter, if otoh 'se' > 'curr' then the resulting gran will
5771 * be smaller, again penalizing the lighter task.
5772 *
5773 * This is especially important for buddies when the leftmost
5774 * task is higher priority than the buddy.
0bbd3336 5775 */
f4ad9bd2 5776 return calc_delta_fair(gran, se);
0bbd3336
PZ
5777}
5778
464b7527
PZ
5779/*
5780 * Should 'se' preempt 'curr'.
5781 *
5782 * |s1
5783 * |s2
5784 * |s3
5785 * g
5786 * |<--->|c
5787 *
5788 * w(c, s1) = -1
5789 * w(c, s2) = 0
5790 * w(c, s3) = 1
5791 *
5792 */
5793static int
5794wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
5795{
5796 s64 gran, vdiff = curr->vruntime - se->vruntime;
5797
5798 if (vdiff <= 0)
5799 return -1;
5800
e52fb7c0 5801 gran = wakeup_gran(curr, se);
464b7527
PZ
5802 if (vdiff > gran)
5803 return 1;
5804
5805 return 0;
5806}
5807
02479099
PZ
5808static void set_last_buddy(struct sched_entity *se)
5809{
69c80f3e
VP
5810 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
5811 return;
5812
5813 for_each_sched_entity(se)
5814 cfs_rq_of(se)->last = se;
02479099
PZ
5815}
5816
5817static void set_next_buddy(struct sched_entity *se)
5818{
69c80f3e
VP
5819 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
5820 return;
5821
5822 for_each_sched_entity(se)
5823 cfs_rq_of(se)->next = se;
02479099
PZ
5824}
5825
ac53db59
RR
5826static void set_skip_buddy(struct sched_entity *se)
5827{
69c80f3e
VP
5828 for_each_sched_entity(se)
5829 cfs_rq_of(se)->skip = se;
ac53db59
RR
5830}
5831
bf0f6f24
IM
5832/*
5833 * Preempt the current task with a newly woken task if needed:
5834 */
5a9b86f6 5835static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
bf0f6f24
IM
5836{
5837 struct task_struct *curr = rq->curr;
8651a86c 5838 struct sched_entity *se = &curr->se, *pse = &p->se;
03e89e45 5839 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
f685ceac 5840 int scale = cfs_rq->nr_running >= sched_nr_latency;
2f36825b 5841 int next_buddy_marked = 0;
bf0f6f24 5842
4ae7d5ce
IM
5843 if (unlikely(se == pse))
5844 return;
5845
5238cdd3 5846 /*
163122b7 5847 * This is possible from callers such as attach_tasks(), in which we
5238cdd3
PT
5848 * unconditionally check_prempt_curr() after an enqueue (which may have
5849 * lead to a throttle). This both saves work and prevents false
5850 * next-buddy nomination below.
5851 */
5852 if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
5853 return;
5854
2f36825b 5855 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
3cb63d52 5856 set_next_buddy(pse);
2f36825b
VP
5857 next_buddy_marked = 1;
5858 }
57fdc26d 5859
aec0a514
BR
5860 /*
5861 * We can come here with TIF_NEED_RESCHED already set from new task
5862 * wake up path.
5238cdd3
PT
5863 *
5864 * Note: this also catches the edge-case of curr being in a throttled
5865 * group (e.g. via set_curr_task), since update_curr() (in the
5866 * enqueue of curr) will have resulted in resched being set. This
5867 * prevents us from potentially nominating it as a false LAST_BUDDY
5868 * below.
aec0a514
BR
5869 */
5870 if (test_tsk_need_resched(curr))
5871 return;
5872
a2f5c9ab
DH
5873 /* Idle tasks are by definition preempted by non-idle tasks. */
5874 if (unlikely(curr->policy == SCHED_IDLE) &&
5875 likely(p->policy != SCHED_IDLE))
5876 goto preempt;
5877
91c234b4 5878 /*
a2f5c9ab
DH
5879 * Batch and idle tasks do not preempt non-idle tasks (their preemption
5880 * is driven by the tick):
91c234b4 5881 */
8ed92e51 5882 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
91c234b4 5883 return;
bf0f6f24 5884
464b7527 5885 find_matching_se(&se, &pse);
9bbd7374 5886 update_curr(cfs_rq_of(se));
002f128b 5887 BUG_ON(!pse);
2f36825b
VP
5888 if (wakeup_preempt_entity(se, pse) == 1) {
5889 /*
5890 * Bias pick_next to pick the sched entity that is
5891 * triggering this preemption.
5892 */
5893 if (!next_buddy_marked)
5894 set_next_buddy(pse);
3a7e73a2 5895 goto preempt;
2f36825b 5896 }
464b7527 5897
3a7e73a2 5898 return;
a65ac745 5899
3a7e73a2 5900preempt:
8875125e 5901 resched_curr(rq);
3a7e73a2
PZ
5902 /*
5903 * Only set the backward buddy when the current task is still
5904 * on the rq. This can happen when a wakeup gets interleaved
5905 * with schedule on the ->pre_schedule() or idle_balance()
5906 * point, either of which can * drop the rq lock.
5907 *
5908 * Also, during early boot the idle thread is in the fair class,
5909 * for obvious reasons its a bad idea to schedule back to it.
5910 */
5911 if (unlikely(!se->on_rq || curr == rq->idle))
5912 return;
5913
5914 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
5915 set_last_buddy(se);
bf0f6f24
IM
5916}
5917
606dba2e 5918static struct task_struct *
e7904a28 5919pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
bf0f6f24
IM
5920{
5921 struct cfs_rq *cfs_rq = &rq->cfs;
5922 struct sched_entity *se;
678d5718 5923 struct task_struct *p;
37e117c0 5924 int new_tasks;
678d5718 5925
6e83125c 5926again:
678d5718
PZ
5927#ifdef CONFIG_FAIR_GROUP_SCHED
5928 if (!cfs_rq->nr_running)
38033c37 5929 goto idle;
678d5718 5930
3f1d2a31 5931 if (prev->sched_class != &fair_sched_class)
678d5718
PZ
5932 goto simple;
5933
5934 /*
5935 * Because of the set_next_buddy() in dequeue_task_fair() it is rather
5936 * likely that a next task is from the same cgroup as the current.
5937 *
5938 * Therefore attempt to avoid putting and setting the entire cgroup
5939 * hierarchy, only change the part that actually changes.
5940 */
5941
5942 do {
5943 struct sched_entity *curr = cfs_rq->curr;
5944
5945 /*
5946 * Since we got here without doing put_prev_entity() we also
5947 * have to consider cfs_rq->curr. If it is still a runnable
5948 * entity, update_curr() will update its vruntime, otherwise
5949 * forget we've ever seen it.
5950 */
54d27365
BS
5951 if (curr) {
5952 if (curr->on_rq)
5953 update_curr(cfs_rq);
5954 else
5955 curr = NULL;
678d5718 5956
54d27365
BS
5957 /*
5958 * This call to check_cfs_rq_runtime() will do the
5959 * throttle and dequeue its entity in the parent(s).
5960 * Therefore the 'simple' nr_running test will indeed
5961 * be correct.
5962 */
5963 if (unlikely(check_cfs_rq_runtime(cfs_rq)))
5964 goto simple;
5965 }
678d5718
PZ
5966
5967 se = pick_next_entity(cfs_rq, curr);
5968 cfs_rq = group_cfs_rq(se);
5969 } while (cfs_rq);
5970
5971 p = task_of(se);
5972
5973 /*
5974 * Since we haven't yet done put_prev_entity and if the selected task
5975 * is a different task than we started out with, try and touch the
5976 * least amount of cfs_rqs.
5977 */
5978 if (prev != p) {
5979 struct sched_entity *pse = &prev->se;
5980
5981 while (!(cfs_rq = is_same_group(se, pse))) {
5982 int se_depth = se->depth;
5983 int pse_depth = pse->depth;
5984
5985 if (se_depth <= pse_depth) {
5986 put_prev_entity(cfs_rq_of(pse), pse);
5987 pse = parent_entity(pse);
5988 }
5989 if (se_depth >= pse_depth) {
5990 set_next_entity(cfs_rq_of(se), se);
5991 se = parent_entity(se);
5992 }
5993 }
5994
5995 put_prev_entity(cfs_rq, pse);
5996 set_next_entity(cfs_rq, se);
5997 }
5998
5999 if (hrtick_enabled(rq))
6000 hrtick_start_fair(rq, p);
6001
6002 return p;
6003simple:
6004 cfs_rq = &rq->cfs;
6005#endif
bf0f6f24 6006
36ace27e 6007 if (!cfs_rq->nr_running)
38033c37 6008 goto idle;
bf0f6f24 6009
3f1d2a31 6010 put_prev_task(rq, prev);
606dba2e 6011
bf0f6f24 6012 do {
678d5718 6013 se = pick_next_entity(cfs_rq, NULL);
f4b6755f 6014 set_next_entity(cfs_rq, se);
bf0f6f24
IM
6015 cfs_rq = group_cfs_rq(se);
6016 } while (cfs_rq);
6017
8f4d37ec 6018 p = task_of(se);
678d5718 6019
b39e66ea
MG
6020 if (hrtick_enabled(rq))
6021 hrtick_start_fair(rq, p);
8f4d37ec
PZ
6022
6023 return p;
38033c37
PZ
6024
6025idle:
cbce1a68
PZ
6026 /*
6027 * This is OK, because current is on_cpu, which avoids it being picked
6028 * for load-balance and preemption/IRQs are still disabled avoiding
6029 * further scheduler activity on it and we're being very careful to
6030 * re-start the picking loop.
6031 */
e7904a28 6032 lockdep_unpin_lock(&rq->lock, cookie);
e4aa358b 6033 new_tasks = idle_balance(rq);
e7904a28 6034 lockdep_repin_lock(&rq->lock, cookie);
37e117c0
PZ
6035 /*
6036 * Because idle_balance() releases (and re-acquires) rq->lock, it is
6037 * possible for any higher priority task to appear. In that case we
6038 * must re-start the pick_next_entity() loop.
6039 */
e4aa358b 6040 if (new_tasks < 0)
37e117c0
PZ
6041 return RETRY_TASK;
6042
e4aa358b 6043 if (new_tasks > 0)
38033c37 6044 goto again;
38033c37
PZ
6045
6046 return NULL;
bf0f6f24
IM
6047}
6048
6049/*
6050 * Account for a descheduled task:
6051 */
31ee529c 6052static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
bf0f6f24
IM
6053{
6054 struct sched_entity *se = &prev->se;
6055 struct cfs_rq *cfs_rq;
6056
6057 for_each_sched_entity(se) {
6058 cfs_rq = cfs_rq_of(se);
ab6cde26 6059 put_prev_entity(cfs_rq, se);
bf0f6f24
IM
6060 }
6061}
6062
ac53db59
RR
6063/*
6064 * sched_yield() is very simple
6065 *
6066 * The magic of dealing with the ->skip buddy is in pick_next_entity.
6067 */
6068static void yield_task_fair(struct rq *rq)
6069{
6070 struct task_struct *curr = rq->curr;
6071 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
6072 struct sched_entity *se = &curr->se;
6073
6074 /*
6075 * Are we the only task in the tree?
6076 */
6077 if (unlikely(rq->nr_running == 1))
6078 return;
6079
6080 clear_buddies(cfs_rq, se);
6081
6082 if (curr->policy != SCHED_BATCH) {
6083 update_rq_clock(rq);
6084 /*
6085 * Update run-time statistics of the 'current'.
6086 */
6087 update_curr(cfs_rq);
916671c0
MG
6088 /*
6089 * Tell update_rq_clock() that we've just updated,
6090 * so we don't do microscopic update in schedule()
6091 * and double the fastpath cost.
6092 */
9edfbfed 6093 rq_clock_skip_update(rq, true);
ac53db59
RR
6094 }
6095
6096 set_skip_buddy(se);
6097}
6098
d95f4122
MG
6099static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
6100{
6101 struct sched_entity *se = &p->se;
6102
5238cdd3
PT
6103 /* throttled hierarchies are not runnable */
6104 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
d95f4122
MG
6105 return false;
6106
6107 /* Tell the scheduler that we'd really like pse to run next. */
6108 set_next_buddy(se);
6109
d95f4122
MG
6110 yield_task_fair(rq);
6111
6112 return true;
6113}
6114
681f3e68 6115#ifdef CONFIG_SMP
bf0f6f24 6116/**************************************************
e9c84cb8
PZ
6117 * Fair scheduling class load-balancing methods.
6118 *
6119 * BASICS
6120 *
6121 * The purpose of load-balancing is to achieve the same basic fairness the
6122 * per-cpu scheduler provides, namely provide a proportional amount of compute
6123 * time to each task. This is expressed in the following equation:
6124 *
6125 * W_i,n/P_i == W_j,n/P_j for all i,j (1)
6126 *
6127 * Where W_i,n is the n-th weight average for cpu i. The instantaneous weight
6128 * W_i,0 is defined as:
6129 *
6130 * W_i,0 = \Sum_j w_i,j (2)
6131 *
6132 * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight
1c3de5e1 6133 * is derived from the nice value as per sched_prio_to_weight[].
e9c84cb8
PZ
6134 *
6135 * The weight average is an exponential decay average of the instantaneous
6136 * weight:
6137 *
6138 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3)
6139 *
ced549fa 6140 * C_i is the compute capacity of cpu i, typically it is the
e9c84cb8
PZ
6141 * fraction of 'recent' time available for SCHED_OTHER task execution. But it
6142 * can also include other factors [XXX].
6143 *
6144 * To achieve this balance we define a measure of imbalance which follows
6145 * directly from (1):
6146 *
ced549fa 6147 * imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j } (4)
e9c84cb8
PZ
6148 *
6149 * We them move tasks around to minimize the imbalance. In the continuous
6150 * function space it is obvious this converges, in the discrete case we get
6151 * a few fun cases generally called infeasible weight scenarios.
6152 *
6153 * [XXX expand on:
6154 * - infeasible weights;
6155 * - local vs global optima in the discrete case. ]
6156 *
6157 *
6158 * SCHED DOMAINS
6159 *
6160 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
6161 * for all i,j solution, we create a tree of cpus that follows the hardware
6162 * topology where each level pairs two lower groups (or better). This results
6163 * in O(log n) layers. Furthermore we reduce the number of cpus going up the
6164 * tree to only the first of the previous level and we decrease the frequency
6165 * of load-balance at each level inv. proportional to the number of cpus in
6166 * the groups.
6167 *
6168 * This yields:
6169 *
6170 * log_2 n 1 n
6171 * \Sum { --- * --- * 2^i } = O(n) (5)
6172 * i = 0 2^i 2^i
6173 * `- size of each group
6174 * | | `- number of cpus doing load-balance
6175 * | `- freq
6176 * `- sum over all levels
6177 *
6178 * Coupled with a limit on how many tasks we can migrate every balance pass,
6179 * this makes (5) the runtime complexity of the balancer.
6180 *
6181 * An important property here is that each CPU is still (indirectly) connected
6182 * to every other cpu in at most O(log n) steps:
6183 *
6184 * The adjacency matrix of the resulting graph is given by:
6185 *
97a7142f 6186 * log_2 n
e9c84cb8
PZ
6187 * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6)
6188 * k = 0
6189 *
6190 * And you'll find that:
6191 *
6192 * A^(log_2 n)_i,j != 0 for all i,j (7)
6193 *
6194 * Showing there's indeed a path between every cpu in at most O(log n) steps.
6195 * The task movement gives a factor of O(m), giving a convergence complexity
6196 * of:
6197 *
6198 * O(nm log n), n := nr_cpus, m := nr_tasks (8)
6199 *
6200 *
6201 * WORK CONSERVING
6202 *
6203 * In order to avoid CPUs going idle while there's still work to do, new idle
6204 * balancing is more aggressive and has the newly idle cpu iterate up the domain
6205 * tree itself instead of relying on other CPUs to bring it work.
6206 *
6207 * This adds some complexity to both (5) and (8) but it reduces the total idle
6208 * time.
6209 *
6210 * [XXX more?]
6211 *
6212 *
6213 * CGROUPS
6214 *
6215 * Cgroups make a horror show out of (2), instead of a simple sum we get:
6216 *
6217 * s_k,i
6218 * W_i,0 = \Sum_j \Prod_k w_k * ----- (9)
6219 * S_k
6220 *
6221 * Where
6222 *
6223 * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10)
6224 *
6225 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i.
6226 *
6227 * The big problem is S_k, its a global sum needed to compute a local (W_i)
6228 * property.
6229 *
6230 * [XXX write more on how we solve this.. _after_ merging pjt's patches that
6231 * rewrite all of this once again.]
97a7142f 6232 */
bf0f6f24 6233
ed387b78
HS
6234static unsigned long __read_mostly max_load_balance_interval = HZ/10;
6235
0ec8aa00
PZ
6236enum fbq_type { regular, remote, all };
6237
ddcdf6e7 6238#define LBF_ALL_PINNED 0x01
367456c7 6239#define LBF_NEED_BREAK 0x02
6263322c
PZ
6240#define LBF_DST_PINNED 0x04
6241#define LBF_SOME_PINNED 0x08
ddcdf6e7
PZ
6242
6243struct lb_env {
6244 struct sched_domain *sd;
6245
ddcdf6e7 6246 struct rq *src_rq;
85c1e7da 6247 int src_cpu;
ddcdf6e7
PZ
6248
6249 int dst_cpu;
6250 struct rq *dst_rq;
6251
88b8dac0
SV
6252 struct cpumask *dst_grpmask;
6253 int new_dst_cpu;
ddcdf6e7 6254 enum cpu_idle_type idle;
bd939f45 6255 long imbalance;
b9403130
MW
6256 /* The set of CPUs under consideration for load-balancing */
6257 struct cpumask *cpus;
6258
ddcdf6e7 6259 unsigned int flags;
367456c7
PZ
6260
6261 unsigned int loop;
6262 unsigned int loop_break;
6263 unsigned int loop_max;
0ec8aa00
PZ
6264
6265 enum fbq_type fbq_type;
163122b7 6266 struct list_head tasks;
ddcdf6e7
PZ
6267};
6268
029632fb
PZ
6269/*
6270 * Is this task likely cache-hot:
6271 */
5d5e2b1b 6272static int task_hot(struct task_struct *p, struct lb_env *env)
029632fb
PZ
6273{
6274 s64 delta;
6275
e5673f28
KT
6276 lockdep_assert_held(&env->src_rq->lock);
6277
029632fb
PZ
6278 if (p->sched_class != &fair_sched_class)
6279 return 0;
6280
6281 if (unlikely(p->policy == SCHED_IDLE))
6282 return 0;
6283
6284 /*
6285 * Buddy candidates are cache hot:
6286 */
5d5e2b1b 6287 if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running &&
029632fb
PZ
6288 (&p->se == cfs_rq_of(&p->se)->next ||
6289 &p->se == cfs_rq_of(&p->se)->last))
6290 return 1;
6291
6292 if (sysctl_sched_migration_cost == -1)
6293 return 1;
6294 if (sysctl_sched_migration_cost == 0)
6295 return 0;
6296
5d5e2b1b 6297 delta = rq_clock_task(env->src_rq) - p->se.exec_start;
029632fb
PZ
6298
6299 return delta < (s64)sysctl_sched_migration_cost;
6300}
6301
3a7053b3 6302#ifdef CONFIG_NUMA_BALANCING
c1ceac62 6303/*
2a1ed24c
SD
6304 * Returns 1, if task migration degrades locality
6305 * Returns 0, if task migration improves locality i.e migration preferred.
6306 * Returns -1, if task migration is not affected by locality.
c1ceac62 6307 */
2a1ed24c 6308static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
3a7053b3 6309{
b1ad065e 6310 struct numa_group *numa_group = rcu_dereference(p->numa_group);
c1ceac62 6311 unsigned long src_faults, dst_faults;
3a7053b3
MG
6312 int src_nid, dst_nid;
6313
2a595721 6314 if (!static_branch_likely(&sched_numa_balancing))
2a1ed24c
SD
6315 return -1;
6316
c3b9bc5b 6317 if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
2a1ed24c 6318 return -1;
7a0f3083
MG
6319
6320 src_nid = cpu_to_node(env->src_cpu);
6321 dst_nid = cpu_to_node(env->dst_cpu);
6322
83e1d2cd 6323 if (src_nid == dst_nid)
2a1ed24c 6324 return -1;
7a0f3083 6325
2a1ed24c
SD
6326 /* Migrating away from the preferred node is always bad. */
6327 if (src_nid == p->numa_preferred_nid) {
6328 if (env->src_rq->nr_running > env->src_rq->nr_preferred_running)
6329 return 1;
6330 else
6331 return -1;
6332 }
b1ad065e 6333
c1ceac62
RR
6334 /* Encourage migration to the preferred node. */
6335 if (dst_nid == p->numa_preferred_nid)
2a1ed24c 6336 return 0;
b1ad065e 6337
c1ceac62
RR
6338 if (numa_group) {
6339 src_faults = group_faults(p, src_nid);
6340 dst_faults = group_faults(p, dst_nid);
6341 } else {
6342 src_faults = task_faults(p, src_nid);
6343 dst_faults = task_faults(p, dst_nid);
b1ad065e
RR
6344 }
6345
c1ceac62 6346 return dst_faults < src_faults;
7a0f3083
MG
6347}
6348
3a7053b3 6349#else
2a1ed24c 6350static inline int migrate_degrades_locality(struct task_struct *p,
3a7053b3
MG
6351 struct lb_env *env)
6352{
2a1ed24c 6353 return -1;
7a0f3083 6354}
3a7053b3
MG
6355#endif
6356
1e3c88bd
PZ
6357/*
6358 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
6359 */
6360static
8e45cb54 6361int can_migrate_task(struct task_struct *p, struct lb_env *env)
1e3c88bd 6362{
2a1ed24c 6363 int tsk_cache_hot;
e5673f28
KT
6364
6365 lockdep_assert_held(&env->src_rq->lock);
6366
1e3c88bd
PZ
6367 /*
6368 * We do not migrate tasks that are:
d3198084 6369 * 1) throttled_lb_pair, or
1e3c88bd 6370 * 2) cannot be migrated to this CPU due to cpus_allowed, or
d3198084
JK
6371 * 3) running (obviously), or
6372 * 4) are cache-hot on their current CPU.
1e3c88bd 6373 */
d3198084
JK
6374 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
6375 return 0;
6376
ddcdf6e7 6377 if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
e02e60c1 6378 int cpu;
88b8dac0 6379
ae92882e 6380 schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
88b8dac0 6381
6263322c
PZ
6382 env->flags |= LBF_SOME_PINNED;
6383
88b8dac0
SV
6384 /*
6385 * Remember if this task can be migrated to any other cpu in
6386 * our sched_group. We may want to revisit it if we couldn't
6387 * meet load balance goals by pulling other tasks on src_cpu.
6388 *
6389 * Also avoid computing new_dst_cpu if we have already computed
6390 * one in current iteration.
6391 */
6263322c 6392 if (!env->dst_grpmask || (env->flags & LBF_DST_PINNED))
88b8dac0
SV
6393 return 0;
6394
e02e60c1
JK
6395 /* Prevent to re-select dst_cpu via env's cpus */
6396 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
6397 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
6263322c 6398 env->flags |= LBF_DST_PINNED;
e02e60c1
JK
6399 env->new_dst_cpu = cpu;
6400 break;
6401 }
88b8dac0 6402 }
e02e60c1 6403
1e3c88bd
PZ
6404 return 0;
6405 }
88b8dac0
SV
6406
6407 /* Record that we found atleast one task that could run on dst_cpu */
8e45cb54 6408 env->flags &= ~LBF_ALL_PINNED;
1e3c88bd 6409
ddcdf6e7 6410 if (task_running(env->src_rq, p)) {
ae92882e 6411 schedstat_inc(p->se.statistics.nr_failed_migrations_running);
1e3c88bd
PZ
6412 return 0;
6413 }
6414
6415 /*
6416 * Aggressive migration if:
3a7053b3
MG
6417 * 1) destination numa is preferred
6418 * 2) task is cache cold, or
6419 * 3) too many balance attempts have failed.
1e3c88bd 6420 */
2a1ed24c
SD
6421 tsk_cache_hot = migrate_degrades_locality(p, env);
6422 if (tsk_cache_hot == -1)
6423 tsk_cache_hot = task_hot(p, env);
3a7053b3 6424
2a1ed24c 6425 if (tsk_cache_hot <= 0 ||
7a96c231 6426 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
2a1ed24c 6427 if (tsk_cache_hot == 1) {
ae92882e
JP
6428 schedstat_inc(env->sd->lb_hot_gained[env->idle]);
6429 schedstat_inc(p->se.statistics.nr_forced_migrations);
3a7053b3 6430 }
1e3c88bd
PZ
6431 return 1;
6432 }
6433
ae92882e 6434 schedstat_inc(p->se.statistics.nr_failed_migrations_hot);
4e2dcb73 6435 return 0;
1e3c88bd
PZ
6436}
6437
897c395f 6438/*
163122b7
KT
6439 * detach_task() -- detach the task for the migration specified in env
6440 */
6441static void detach_task(struct task_struct *p, struct lb_env *env)
6442{
6443 lockdep_assert_held(&env->src_rq->lock);
6444
163122b7 6445 p->on_rq = TASK_ON_RQ_MIGRATING;
3ea94de1 6446 deactivate_task(env->src_rq, p, 0);
163122b7
KT
6447 set_task_cpu(p, env->dst_cpu);
6448}
6449
897c395f 6450/*
e5673f28 6451 * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as
897c395f 6452 * part of active balancing operations within "domain".
897c395f 6453 *
e5673f28 6454 * Returns a task if successful and NULL otherwise.
897c395f 6455 */
e5673f28 6456static struct task_struct *detach_one_task(struct lb_env *env)
897c395f
PZ
6457{
6458 struct task_struct *p, *n;
897c395f 6459
e5673f28
KT
6460 lockdep_assert_held(&env->src_rq->lock);
6461
367456c7 6462 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
367456c7
PZ
6463 if (!can_migrate_task(p, env))
6464 continue;
897c395f 6465
163122b7 6466 detach_task(p, env);
e5673f28 6467
367456c7 6468 /*
e5673f28 6469 * Right now, this is only the second place where
163122b7 6470 * lb_gained[env->idle] is updated (other is detach_tasks)
e5673f28 6471 * so we can safely collect stats here rather than
163122b7 6472 * inside detach_tasks().
367456c7 6473 */
ae92882e 6474 schedstat_inc(env->sd->lb_gained[env->idle]);
e5673f28 6475 return p;
897c395f 6476 }
e5673f28 6477 return NULL;
897c395f
PZ
6478}
6479
eb95308e
PZ
6480static const unsigned int sched_nr_migrate_break = 32;
6481
5d6523eb 6482/*
163122b7
KT
6483 * detach_tasks() -- tries to detach up to imbalance weighted load from
6484 * busiest_rq, as part of a balancing operation within domain "sd".
5d6523eb 6485 *
163122b7 6486 * Returns number of detached tasks if successful and 0 otherwise.
5d6523eb 6487 */
163122b7 6488static int detach_tasks(struct lb_env *env)
1e3c88bd 6489{
5d6523eb
PZ
6490 struct list_head *tasks = &env->src_rq->cfs_tasks;
6491 struct task_struct *p;
367456c7 6492 unsigned long load;
163122b7
KT
6493 int detached = 0;
6494
6495 lockdep_assert_held(&env->src_rq->lock);
1e3c88bd 6496
bd939f45 6497 if (env->imbalance <= 0)
5d6523eb 6498 return 0;
1e3c88bd 6499
5d6523eb 6500 while (!list_empty(tasks)) {
985d3a4c
YD
6501 /*
6502 * We don't want to steal all, otherwise we may be treated likewise,
6503 * which could at worst lead to a livelock crash.
6504 */
6505 if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1)
6506 break;
6507
5d6523eb 6508 p = list_first_entry(tasks, struct task_struct, se.group_node);
1e3c88bd 6509
367456c7
PZ
6510 env->loop++;
6511 /* We've more or less seen every task there is, call it quits */
5d6523eb 6512 if (env->loop > env->loop_max)
367456c7 6513 break;
5d6523eb
PZ
6514
6515 /* take a breather every nr_migrate tasks */
367456c7 6516 if (env->loop > env->loop_break) {
eb95308e 6517 env->loop_break += sched_nr_migrate_break;
8e45cb54 6518 env->flags |= LBF_NEED_BREAK;
ee00e66f 6519 break;
a195f004 6520 }
1e3c88bd 6521
d3198084 6522 if (!can_migrate_task(p, env))
367456c7
PZ
6523 goto next;
6524
6525 load = task_h_load(p);
5d6523eb 6526
eb95308e 6527 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
367456c7
PZ
6528 goto next;
6529
bd939f45 6530 if ((load / 2) > env->imbalance)
367456c7 6531 goto next;
1e3c88bd 6532
163122b7
KT
6533 detach_task(p, env);
6534 list_add(&p->se.group_node, &env->tasks);
6535
6536 detached++;
bd939f45 6537 env->imbalance -= load;
1e3c88bd
PZ
6538
6539#ifdef CONFIG_PREEMPT
ee00e66f
PZ
6540 /*
6541 * NEWIDLE balancing is a source of latency, so preemptible
163122b7 6542 * kernels will stop after the first task is detached to minimize
ee00e66f
PZ
6543 * the critical section.
6544 */
5d6523eb 6545 if (env->idle == CPU_NEWLY_IDLE)
ee00e66f 6546 break;
1e3c88bd
PZ
6547#endif
6548
ee00e66f
PZ
6549 /*
6550 * We only want to steal up to the prescribed amount of
6551 * weighted load.
6552 */
bd939f45 6553 if (env->imbalance <= 0)
ee00e66f 6554 break;
367456c7
PZ
6555
6556 continue;
6557next:
5d6523eb 6558 list_move_tail(&p->se.group_node, tasks);
1e3c88bd 6559 }
5d6523eb 6560
1e3c88bd 6561 /*
163122b7
KT
6562 * Right now, this is one of only two places we collect this stat
6563 * so we can safely collect detach_one_task() stats here rather
6564 * than inside detach_one_task().
1e3c88bd 6565 */
ae92882e 6566 schedstat_add(env->sd->lb_gained[env->idle], detached);
1e3c88bd 6567
163122b7
KT
6568 return detached;
6569}
6570
6571/*
6572 * attach_task() -- attach the task detached by detach_task() to its new rq.
6573 */
6574static void attach_task(struct rq *rq, struct task_struct *p)
6575{
6576 lockdep_assert_held(&rq->lock);
6577
6578 BUG_ON(task_rq(p) != rq);
163122b7 6579 activate_task(rq, p, 0);
3ea94de1 6580 p->on_rq = TASK_ON_RQ_QUEUED;
163122b7
KT
6581 check_preempt_curr(rq, p, 0);
6582}
6583
6584/*
6585 * attach_one_task() -- attaches the task returned from detach_one_task() to
6586 * its new rq.
6587 */
6588static void attach_one_task(struct rq *rq, struct task_struct *p)
6589{
6590 raw_spin_lock(&rq->lock);
6591 attach_task(rq, p);
6592 raw_spin_unlock(&rq->lock);
6593}
6594
6595/*
6596 * attach_tasks() -- attaches all tasks detached by detach_tasks() to their
6597 * new rq.
6598 */
6599static void attach_tasks(struct lb_env *env)
6600{
6601 struct list_head *tasks = &env->tasks;
6602 struct task_struct *p;
6603
6604 raw_spin_lock(&env->dst_rq->lock);
6605
6606 while (!list_empty(tasks)) {
6607 p = list_first_entry(tasks, struct task_struct, se.group_node);
6608 list_del_init(&p->se.group_node);
1e3c88bd 6609
163122b7
KT
6610 attach_task(env->dst_rq, p);
6611 }
6612
6613 raw_spin_unlock(&env->dst_rq->lock);
1e3c88bd
PZ
6614}
6615
230059de 6616#ifdef CONFIG_FAIR_GROUP_SCHED
48a16753 6617static void update_blocked_averages(int cpu)
9e3081ca 6618{
9e3081ca 6619 struct rq *rq = cpu_rq(cpu);
48a16753
PT
6620 struct cfs_rq *cfs_rq;
6621 unsigned long flags;
9e3081ca 6622
48a16753
PT
6623 raw_spin_lock_irqsave(&rq->lock, flags);
6624 update_rq_clock(rq);
9d89c257 6625
9763b67f
PZ
6626 /*
6627 * Iterates the task_group tree in a bottom up fashion, see
6628 * list_add_leaf_cfs_rq() for details.
6629 */
64660c86 6630 for_each_leaf_cfs_rq(rq, cfs_rq) {
9d89c257
YD
6631 /* throttled entities do not contribute to load */
6632 if (throttled_hierarchy(cfs_rq))
6633 continue;
48a16753 6634
a2c6c91f 6635 if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true))
9d89c257
YD
6636 update_tg_load_avg(cfs_rq, 0);
6637 }
48a16753 6638 raw_spin_unlock_irqrestore(&rq->lock, flags);
9e3081ca
PZ
6639}
6640
9763b67f 6641/*
68520796 6642 * Compute the hierarchical load factor for cfs_rq and all its ascendants.
9763b67f
PZ
6643 * This needs to be done in a top-down fashion because the load of a child
6644 * group is a fraction of its parents load.
6645 */
68520796 6646static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
9763b67f 6647{
68520796
VD
6648 struct rq *rq = rq_of(cfs_rq);
6649 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
a35b6466 6650 unsigned long now = jiffies;
68520796 6651 unsigned long load;
a35b6466 6652
68520796 6653 if (cfs_rq->last_h_load_update == now)
a35b6466
PZ
6654 return;
6655
68520796
VD
6656 cfs_rq->h_load_next = NULL;
6657 for_each_sched_entity(se) {
6658 cfs_rq = cfs_rq_of(se);
6659 cfs_rq->h_load_next = se;
6660 if (cfs_rq->last_h_load_update == now)
6661 break;
6662 }
a35b6466 6663
68520796 6664 if (!se) {
7ea241af 6665 cfs_rq->h_load = cfs_rq_load_avg(cfs_rq);
68520796
VD
6666 cfs_rq->last_h_load_update = now;
6667 }
6668
6669 while ((se = cfs_rq->h_load_next) != NULL) {
6670 load = cfs_rq->h_load;
7ea241af
YD
6671 load = div64_ul(load * se->avg.load_avg,
6672 cfs_rq_load_avg(cfs_rq) + 1);
68520796
VD
6673 cfs_rq = group_cfs_rq(se);
6674 cfs_rq->h_load = load;
6675 cfs_rq->last_h_load_update = now;
6676 }
9763b67f
PZ
6677}
6678
367456c7 6679static unsigned long task_h_load(struct task_struct *p)
230059de 6680{
367456c7 6681 struct cfs_rq *cfs_rq = task_cfs_rq(p);
230059de 6682
68520796 6683 update_cfs_rq_h_load(cfs_rq);
9d89c257 6684 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load,
7ea241af 6685 cfs_rq_load_avg(cfs_rq) + 1);
230059de
PZ
6686}
6687#else
48a16753 6688static inline void update_blocked_averages(int cpu)
9e3081ca 6689{
6c1d47c0
VG
6690 struct rq *rq = cpu_rq(cpu);
6691 struct cfs_rq *cfs_rq = &rq->cfs;
6692 unsigned long flags;
6693
6694 raw_spin_lock_irqsave(&rq->lock, flags);
6695 update_rq_clock(rq);
a2c6c91f 6696 update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true);
6c1d47c0 6697 raw_spin_unlock_irqrestore(&rq->lock, flags);
9e3081ca
PZ
6698}
6699
367456c7 6700static unsigned long task_h_load(struct task_struct *p)
1e3c88bd 6701{
9d89c257 6702 return p->se.avg.load_avg;
1e3c88bd 6703}
230059de 6704#endif
1e3c88bd 6705
1e3c88bd 6706/********** Helpers for find_busiest_group ************************/
caeb178c
RR
6707
6708enum group_type {
6709 group_other = 0,
6710 group_imbalanced,
6711 group_overloaded,
6712};
6713
1e3c88bd
PZ
6714/*
6715 * sg_lb_stats - stats of a sched_group required for load_balancing
6716 */
6717struct sg_lb_stats {
6718 unsigned long avg_load; /*Avg load across the CPUs of the group */
6719 unsigned long group_load; /* Total load over the CPUs of the group */
1e3c88bd 6720 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
56cf515b 6721 unsigned long load_per_task;
63b2ca30 6722 unsigned long group_capacity;
9e91d61d 6723 unsigned long group_util; /* Total utilization of the group */
147c5fc2 6724 unsigned int sum_nr_running; /* Nr tasks running in the group */
147c5fc2
PZ
6725 unsigned int idle_cpus;
6726 unsigned int group_weight;
caeb178c 6727 enum group_type group_type;
ea67821b 6728 int group_no_capacity;
0ec8aa00
PZ
6729#ifdef CONFIG_NUMA_BALANCING
6730 unsigned int nr_numa_running;
6731 unsigned int nr_preferred_running;
6732#endif
1e3c88bd
PZ
6733};
6734
56cf515b
JK
6735/*
6736 * sd_lb_stats - Structure to store the statistics of a sched_domain
6737 * during load balancing.
6738 */
6739struct sd_lb_stats {
6740 struct sched_group *busiest; /* Busiest group in this sd */
6741 struct sched_group *local; /* Local group in this sd */
6742 unsigned long total_load; /* Total load of all groups in sd */
63b2ca30 6743 unsigned long total_capacity; /* Total capacity of all groups in sd */
56cf515b
JK
6744 unsigned long avg_load; /* Average load across all groups in sd */
6745
56cf515b 6746 struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
147c5fc2 6747 struct sg_lb_stats local_stat; /* Statistics of the local group */
56cf515b
JK
6748};
6749
147c5fc2
PZ
6750static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
6751{
6752 /*
6753 * Skimp on the clearing to avoid duplicate work. We can avoid clearing
6754 * local_stat because update_sg_lb_stats() does a full clear/assignment.
6755 * We must however clear busiest_stat::avg_load because
6756 * update_sd_pick_busiest() reads this before assignment.
6757 */
6758 *sds = (struct sd_lb_stats){
6759 .busiest = NULL,
6760 .local = NULL,
6761 .total_load = 0UL,
63b2ca30 6762 .total_capacity = 0UL,
147c5fc2
PZ
6763 .busiest_stat = {
6764 .avg_load = 0UL,
caeb178c
RR
6765 .sum_nr_running = 0,
6766 .group_type = group_other,
147c5fc2
PZ
6767 },
6768 };
6769}
6770
1e3c88bd
PZ
6771/**
6772 * get_sd_load_idx - Obtain the load index for a given sched domain.
6773 * @sd: The sched_domain whose load_idx is to be obtained.
ed1b7732 6774 * @idle: The idle status of the CPU for whose sd load_idx is obtained.
e69f6186
YB
6775 *
6776 * Return: The load index.
1e3c88bd
PZ
6777 */
6778static inline int get_sd_load_idx(struct sched_domain *sd,
6779 enum cpu_idle_type idle)
6780{
6781 int load_idx;
6782
6783 switch (idle) {
6784 case CPU_NOT_IDLE:
6785 load_idx = sd->busy_idx;
6786 break;
6787
6788 case CPU_NEWLY_IDLE:
6789 load_idx = sd->newidle_idx;
6790 break;
6791 default:
6792 load_idx = sd->idle_idx;
6793 break;
6794 }
6795
6796 return load_idx;
6797}
6798
ced549fa 6799static unsigned long scale_rt_capacity(int cpu)
1e3c88bd
PZ
6800{
6801 struct rq *rq = cpu_rq(cpu);
b5b4860d 6802 u64 total, used, age_stamp, avg;
cadefd3d 6803 s64 delta;
1e3c88bd 6804
b654f7de
PZ
6805 /*
6806 * Since we're reading these variables without serialization make sure
6807 * we read them once before doing sanity checks on them.
6808 */
316c1608
JL
6809 age_stamp = READ_ONCE(rq->age_stamp);
6810 avg = READ_ONCE(rq->rt_avg);
cebde6d6 6811 delta = __rq_clock_broken(rq) - age_stamp;
b654f7de 6812
cadefd3d
PZ
6813 if (unlikely(delta < 0))
6814 delta = 0;
6815
6816 total = sched_avg_period() + delta;
aa483808 6817
b5b4860d 6818 used = div_u64(avg, total);
1e3c88bd 6819
b5b4860d
VG
6820 if (likely(used < SCHED_CAPACITY_SCALE))
6821 return SCHED_CAPACITY_SCALE - used;
1e3c88bd 6822
b5b4860d 6823 return 1;
1e3c88bd
PZ
6824}
6825
ced549fa 6826static void update_cpu_capacity(struct sched_domain *sd, int cpu)
1e3c88bd 6827{
8cd5601c 6828 unsigned long capacity = arch_scale_cpu_capacity(sd, cpu);
1e3c88bd
PZ
6829 struct sched_group *sdg = sd->groups;
6830
ca6d75e6 6831 cpu_rq(cpu)->cpu_capacity_orig = capacity;
9d5efe05 6832
ced549fa 6833 capacity *= scale_rt_capacity(cpu);
ca8ce3d0 6834 capacity >>= SCHED_CAPACITY_SHIFT;
1e3c88bd 6835
ced549fa
NP
6836 if (!capacity)
6837 capacity = 1;
1e3c88bd 6838
ced549fa
NP
6839 cpu_rq(cpu)->cpu_capacity = capacity;
6840 sdg->sgc->capacity = capacity;
1e3c88bd
PZ
6841}
6842
63b2ca30 6843void update_group_capacity(struct sched_domain *sd, int cpu)
1e3c88bd
PZ
6844{
6845 struct sched_domain *child = sd->child;
6846 struct sched_group *group, *sdg = sd->groups;
dc7ff76e 6847 unsigned long capacity;
4ec4412e
VG
6848 unsigned long interval;
6849
6850 interval = msecs_to_jiffies(sd->balance_interval);
6851 interval = clamp(interval, 1UL, max_load_balance_interval);
63b2ca30 6852 sdg->sgc->next_update = jiffies + interval;
1e3c88bd
PZ
6853
6854 if (!child) {
ced549fa 6855 update_cpu_capacity(sd, cpu);
1e3c88bd
PZ
6856 return;
6857 }
6858
dc7ff76e 6859 capacity = 0;
1e3c88bd 6860
74a5ce20
PZ
6861 if (child->flags & SD_OVERLAP) {
6862 /*
6863 * SD_OVERLAP domains cannot assume that child groups
6864 * span the current group.
6865 */
6866
863bffc8 6867 for_each_cpu(cpu, sched_group_cpus(sdg)) {
63b2ca30 6868 struct sched_group_capacity *sgc;
9abf24d4 6869 struct rq *rq = cpu_rq(cpu);
863bffc8 6870
9abf24d4 6871 /*
63b2ca30 6872 * build_sched_domains() -> init_sched_groups_capacity()
9abf24d4
SD
6873 * gets here before we've attached the domains to the
6874 * runqueues.
6875 *
ced549fa
NP
6876 * Use capacity_of(), which is set irrespective of domains
6877 * in update_cpu_capacity().
9abf24d4 6878 *
dc7ff76e 6879 * This avoids capacity from being 0 and
9abf24d4 6880 * causing divide-by-zero issues on boot.
9abf24d4
SD
6881 */
6882 if (unlikely(!rq->sd)) {
ced549fa 6883 capacity += capacity_of(cpu);
9abf24d4
SD
6884 continue;
6885 }
863bffc8 6886
63b2ca30 6887 sgc = rq->sd->groups->sgc;
63b2ca30 6888 capacity += sgc->capacity;
863bffc8 6889 }
74a5ce20
PZ
6890 } else {
6891 /*
6892 * !SD_OVERLAP domains can assume that child groups
6893 * span the current group.
97a7142f 6894 */
74a5ce20
PZ
6895
6896 group = child->groups;
6897 do {
63b2ca30 6898 capacity += group->sgc->capacity;
74a5ce20
PZ
6899 group = group->next;
6900 } while (group != child->groups);
6901 }
1e3c88bd 6902
63b2ca30 6903 sdg->sgc->capacity = capacity;
1e3c88bd
PZ
6904}
6905
9d5efe05 6906/*
ea67821b
VG
6907 * Check whether the capacity of the rq has been noticeably reduced by side
6908 * activity. The imbalance_pct is used for the threshold.
6909 * Return true is the capacity is reduced
9d5efe05
SV
6910 */
6911static inline int
ea67821b 6912check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
9d5efe05 6913{
ea67821b
VG
6914 return ((rq->cpu_capacity * sd->imbalance_pct) <
6915 (rq->cpu_capacity_orig * 100));
9d5efe05
SV
6916}
6917
30ce5dab
PZ
6918/*
6919 * Group imbalance indicates (and tries to solve) the problem where balancing
6920 * groups is inadequate due to tsk_cpus_allowed() constraints.
6921 *
6922 * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
6923 * cpumask covering 1 cpu of the first group and 3 cpus of the second group.
6924 * Something like:
6925 *
6926 * { 0 1 2 3 } { 4 5 6 7 }
6927 * * * * *
6928 *
6929 * If we were to balance group-wise we'd place two tasks in the first group and
6930 * two tasks in the second group. Clearly this is undesired as it will overload
6931 * cpu 3 and leave one of the cpus in the second group unused.
6932 *
6933 * The current solution to this issue is detecting the skew in the first group
6263322c
PZ
6934 * by noticing the lower domain failed to reach balance and had difficulty
6935 * moving tasks due to affinity constraints.
30ce5dab
PZ
6936 *
6937 * When this is so detected; this group becomes a candidate for busiest; see
ed1b7732 6938 * update_sd_pick_busiest(). And calculate_imbalance() and
6263322c 6939 * find_busiest_group() avoid some of the usual balance conditions to allow it
30ce5dab
PZ
6940 * to create an effective group imbalance.
6941 *
6942 * This is a somewhat tricky proposition since the next run might not find the
6943 * group imbalance and decide the groups need to be balanced again. A most
6944 * subtle and fragile situation.
6945 */
6946
6263322c 6947static inline int sg_imbalanced(struct sched_group *group)
30ce5dab 6948{
63b2ca30 6949 return group->sgc->imbalance;
30ce5dab
PZ
6950}
6951
b37d9316 6952/*
ea67821b
VG
6953 * group_has_capacity returns true if the group has spare capacity that could
6954 * be used by some tasks.
6955 * We consider that a group has spare capacity if the * number of task is
9e91d61d
DE
6956 * smaller than the number of CPUs or if the utilization is lower than the
6957 * available capacity for CFS tasks.
ea67821b
VG
6958 * For the latter, we use a threshold to stabilize the state, to take into
6959 * account the variance of the tasks' load and to return true if the available
6960 * capacity in meaningful for the load balancer.
6961 * As an example, an available capacity of 1% can appear but it doesn't make
6962 * any benefit for the load balance.
b37d9316 6963 */
ea67821b
VG
6964static inline bool
6965group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs)
b37d9316 6966{
ea67821b
VG
6967 if (sgs->sum_nr_running < sgs->group_weight)
6968 return true;
c61037e9 6969
ea67821b 6970 if ((sgs->group_capacity * 100) >
9e91d61d 6971 (sgs->group_util * env->sd->imbalance_pct))
ea67821b 6972 return true;
b37d9316 6973
ea67821b
VG
6974 return false;
6975}
6976
6977/*
6978 * group_is_overloaded returns true if the group has more tasks than it can
6979 * handle.
6980 * group_is_overloaded is not equals to !group_has_capacity because a group
6981 * with the exact right number of tasks, has no more spare capacity but is not
6982 * overloaded so both group_has_capacity and group_is_overloaded return
6983 * false.
6984 */
6985static inline bool
6986group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs)
6987{
6988 if (sgs->sum_nr_running <= sgs->group_weight)
6989 return false;
b37d9316 6990
ea67821b 6991 if ((sgs->group_capacity * 100) <
9e91d61d 6992 (sgs->group_util * env->sd->imbalance_pct))
ea67821b 6993 return true;
b37d9316 6994
ea67821b 6995 return false;
b37d9316
PZ
6996}
6997
79a89f92
LY
6998static inline enum
6999group_type group_classify(struct sched_group *group,
7000 struct sg_lb_stats *sgs)
caeb178c 7001{
ea67821b 7002 if (sgs->group_no_capacity)
caeb178c
RR
7003 return group_overloaded;
7004
7005 if (sg_imbalanced(group))
7006 return group_imbalanced;
7007
7008 return group_other;
7009}
7010
1e3c88bd
PZ
7011/**
7012 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
cd96891d 7013 * @env: The load balancing environment.
1e3c88bd 7014 * @group: sched_group whose statistics are to be updated.
1e3c88bd 7015 * @load_idx: Load index of sched_domain of this_cpu for load calc.
1e3c88bd 7016 * @local_group: Does group contain this_cpu.
1e3c88bd 7017 * @sgs: variable to hold the statistics for this group.
cd3bd4e6 7018 * @overload: Indicate more than one runnable task for any CPU.
1e3c88bd 7019 */
bd939f45
PZ
7020static inline void update_sg_lb_stats(struct lb_env *env,
7021 struct sched_group *group, int load_idx,
4486edd1
TC
7022 int local_group, struct sg_lb_stats *sgs,
7023 bool *overload)
1e3c88bd 7024{
30ce5dab 7025 unsigned long load;
a426f99c 7026 int i, nr_running;
1e3c88bd 7027
b72ff13c
PZ
7028 memset(sgs, 0, sizeof(*sgs));
7029
b9403130 7030 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
1e3c88bd
PZ
7031 struct rq *rq = cpu_rq(i);
7032
1e3c88bd 7033 /* Bias balancing toward cpus of our domain */
6263322c 7034 if (local_group)
04f733b4 7035 load = target_load(i, load_idx);
6263322c 7036 else
1e3c88bd 7037 load = source_load(i, load_idx);
1e3c88bd
PZ
7038
7039 sgs->group_load += load;
9e91d61d 7040 sgs->group_util += cpu_util(i);
65fdac08 7041 sgs->sum_nr_running += rq->cfs.h_nr_running;
4486edd1 7042
a426f99c
WL
7043 nr_running = rq->nr_running;
7044 if (nr_running > 1)
4486edd1
TC
7045 *overload = true;
7046
0ec8aa00
PZ
7047#ifdef CONFIG_NUMA_BALANCING
7048 sgs->nr_numa_running += rq->nr_numa_running;
7049 sgs->nr_preferred_running += rq->nr_preferred_running;
7050#endif
1e3c88bd 7051 sgs->sum_weighted_load += weighted_cpuload(i);
a426f99c
WL
7052 /*
7053 * No need to call idle_cpu() if nr_running is not 0
7054 */
7055 if (!nr_running && idle_cpu(i))
aae6d3dd 7056 sgs->idle_cpus++;
1e3c88bd
PZ
7057 }
7058
63b2ca30
NP
7059 /* Adjust by relative CPU capacity of the group */
7060 sgs->group_capacity = group->sgc->capacity;
ca8ce3d0 7061 sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity;
1e3c88bd 7062
dd5feea1 7063 if (sgs->sum_nr_running)
38d0f770 7064 sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
1e3c88bd 7065
aae6d3dd 7066 sgs->group_weight = group->group_weight;
b37d9316 7067
ea67821b 7068 sgs->group_no_capacity = group_is_overloaded(env, sgs);
79a89f92 7069 sgs->group_type = group_classify(group, sgs);
1e3c88bd
PZ
7070}
7071
532cb4c4
MN
7072/**
7073 * update_sd_pick_busiest - return 1 on busiest group
cd96891d 7074 * @env: The load balancing environment.
532cb4c4
MN
7075 * @sds: sched_domain statistics
7076 * @sg: sched_group candidate to be checked for being the busiest
b6b12294 7077 * @sgs: sched_group statistics
532cb4c4
MN
7078 *
7079 * Determine if @sg is a busier group than the previously selected
7080 * busiest group.
e69f6186
YB
7081 *
7082 * Return: %true if @sg is a busier group than the previously selected
7083 * busiest group. %false otherwise.
532cb4c4 7084 */
bd939f45 7085static bool update_sd_pick_busiest(struct lb_env *env,
532cb4c4
MN
7086 struct sd_lb_stats *sds,
7087 struct sched_group *sg,
bd939f45 7088 struct sg_lb_stats *sgs)
532cb4c4 7089{
caeb178c 7090 struct sg_lb_stats *busiest = &sds->busiest_stat;
532cb4c4 7091
caeb178c 7092 if (sgs->group_type > busiest->group_type)
532cb4c4
MN
7093 return true;
7094
caeb178c
RR
7095 if (sgs->group_type < busiest->group_type)
7096 return false;
7097
7098 if (sgs->avg_load <= busiest->avg_load)
7099 return false;
7100
7101 /* This is the busiest node in its class. */
7102 if (!(env->sd->flags & SD_ASYM_PACKING))
532cb4c4
MN
7103 return true;
7104
1f621e02
SD
7105 /* No ASYM_PACKING if target cpu is already busy */
7106 if (env->idle == CPU_NOT_IDLE)
7107 return true;
532cb4c4
MN
7108 /*
7109 * ASYM_PACKING needs to move all the work to the lowest
7110 * numbered CPUs in the group, therefore mark all groups
7111 * higher than ourself as busy.
7112 */
caeb178c 7113 if (sgs->sum_nr_running && env->dst_cpu < group_first_cpu(sg)) {
532cb4c4
MN
7114 if (!sds->busiest)
7115 return true;
7116
1f621e02
SD
7117 /* Prefer to move from highest possible cpu's work */
7118 if (group_first_cpu(sds->busiest) < group_first_cpu(sg))
532cb4c4
MN
7119 return true;
7120 }
7121
7122 return false;
7123}
7124
0ec8aa00
PZ
7125#ifdef CONFIG_NUMA_BALANCING
7126static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
7127{
7128 if (sgs->sum_nr_running > sgs->nr_numa_running)
7129 return regular;
7130 if (sgs->sum_nr_running > sgs->nr_preferred_running)
7131 return remote;
7132 return all;
7133}
7134
7135static inline enum fbq_type fbq_classify_rq(struct rq *rq)
7136{
7137 if (rq->nr_running > rq->nr_numa_running)
7138 return regular;
7139 if (rq->nr_running > rq->nr_preferred_running)
7140 return remote;
7141 return all;
7142}
7143#else
7144static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
7145{
7146 return all;
7147}
7148
7149static inline enum fbq_type fbq_classify_rq(struct rq *rq)
7150{
7151 return regular;
7152}
7153#endif /* CONFIG_NUMA_BALANCING */
7154
1e3c88bd 7155/**
461819ac 7156 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
cd96891d 7157 * @env: The load balancing environment.
1e3c88bd
PZ
7158 * @sds: variable to hold the statistics for this sched_domain.
7159 */
0ec8aa00 7160static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
1e3c88bd 7161{
bd939f45
PZ
7162 struct sched_domain *child = env->sd->child;
7163 struct sched_group *sg = env->sd->groups;
56cf515b 7164 struct sg_lb_stats tmp_sgs;
1e3c88bd 7165 int load_idx, prefer_sibling = 0;
4486edd1 7166 bool overload = false;
1e3c88bd
PZ
7167
7168 if (child && child->flags & SD_PREFER_SIBLING)
7169 prefer_sibling = 1;
7170
bd939f45 7171 load_idx = get_sd_load_idx(env->sd, env->idle);
1e3c88bd
PZ
7172
7173 do {
56cf515b 7174 struct sg_lb_stats *sgs = &tmp_sgs;
1e3c88bd
PZ
7175 int local_group;
7176
bd939f45 7177 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
56cf515b
JK
7178 if (local_group) {
7179 sds->local = sg;
7180 sgs = &sds->local_stat;
b72ff13c
PZ
7181
7182 if (env->idle != CPU_NEWLY_IDLE ||
63b2ca30
NP
7183 time_after_eq(jiffies, sg->sgc->next_update))
7184 update_group_capacity(env->sd, env->dst_cpu);
56cf515b 7185 }
1e3c88bd 7186
4486edd1
TC
7187 update_sg_lb_stats(env, sg, load_idx, local_group, sgs,
7188 &overload);
1e3c88bd 7189
b72ff13c
PZ
7190 if (local_group)
7191 goto next_group;
7192
1e3c88bd
PZ
7193 /*
7194 * In case the child domain prefers tasks go to siblings
ea67821b 7195 * first, lower the sg capacity so that we'll try
75dd321d
NR
7196 * and move all the excess tasks away. We lower the capacity
7197 * of a group only if the local group has the capacity to fit
ea67821b
VG
7198 * these excess tasks. The extra check prevents the case where
7199 * you always pull from the heaviest group when it is already
7200 * under-utilized (possible with a large weight task outweighs
7201 * the tasks on the system).
1e3c88bd 7202 */
b72ff13c 7203 if (prefer_sibling && sds->local &&
ea67821b
VG
7204 group_has_capacity(env, &sds->local_stat) &&
7205 (sgs->sum_nr_running > 1)) {
7206 sgs->group_no_capacity = 1;
79a89f92 7207 sgs->group_type = group_classify(sg, sgs);
cb0b9f24 7208 }
1e3c88bd 7209
b72ff13c 7210 if (update_sd_pick_busiest(env, sds, sg, sgs)) {
532cb4c4 7211 sds->busiest = sg;
56cf515b 7212 sds->busiest_stat = *sgs;
1e3c88bd
PZ
7213 }
7214
b72ff13c
PZ
7215next_group:
7216 /* Now, start updating sd_lb_stats */
7217 sds->total_load += sgs->group_load;
63b2ca30 7218 sds->total_capacity += sgs->group_capacity;
b72ff13c 7219
532cb4c4 7220 sg = sg->next;
bd939f45 7221 } while (sg != env->sd->groups);
0ec8aa00
PZ
7222
7223 if (env->sd->flags & SD_NUMA)
7224 env->fbq_type = fbq_classify_group(&sds->busiest_stat);
4486edd1
TC
7225
7226 if (!env->sd->parent) {
7227 /* update overload indicator if we are at root domain */
7228 if (env->dst_rq->rd->overload != overload)
7229 env->dst_rq->rd->overload = overload;
7230 }
7231
532cb4c4
MN
7232}
7233
532cb4c4
MN
7234/**
7235 * check_asym_packing - Check to see if the group is packed into the
7236 * sched doman.
7237 *
7238 * This is primarily intended to used at the sibling level. Some
7239 * cores like POWER7 prefer to use lower numbered SMT threads. In the
7240 * case of POWER7, it can move to lower SMT modes only when higher
7241 * threads are idle. When in lower SMT modes, the threads will
7242 * perform better since they share less core resources. Hence when we
7243 * have idle threads, we want them to be the higher ones.
7244 *
7245 * This packing function is run on idle threads. It checks to see if
7246 * the busiest CPU in this domain (core in the P7 case) has a higher
7247 * CPU number than the packing function is being run on. Here we are
7248 * assuming lower CPU number will be equivalent to lower a SMT thread
7249 * number.
7250 *
e69f6186 7251 * Return: 1 when packing is required and a task should be moved to
b6b12294
MN
7252 * this CPU. The amount of the imbalance is returned in *imbalance.
7253 *
cd96891d 7254 * @env: The load balancing environment.
532cb4c4 7255 * @sds: Statistics of the sched_domain which is to be packed
532cb4c4 7256 */
bd939f45 7257static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
532cb4c4
MN
7258{
7259 int busiest_cpu;
7260
bd939f45 7261 if (!(env->sd->flags & SD_ASYM_PACKING))
532cb4c4
MN
7262 return 0;
7263
1f621e02
SD
7264 if (env->idle == CPU_NOT_IDLE)
7265 return 0;
7266
532cb4c4
MN
7267 if (!sds->busiest)
7268 return 0;
7269
7270 busiest_cpu = group_first_cpu(sds->busiest);
bd939f45 7271 if (env->dst_cpu > busiest_cpu)
532cb4c4
MN
7272 return 0;
7273
bd939f45 7274 env->imbalance = DIV_ROUND_CLOSEST(
63b2ca30 7275 sds->busiest_stat.avg_load * sds->busiest_stat.group_capacity,
ca8ce3d0 7276 SCHED_CAPACITY_SCALE);
bd939f45 7277
532cb4c4 7278 return 1;
1e3c88bd
PZ
7279}
7280
7281/**
7282 * fix_small_imbalance - Calculate the minor imbalance that exists
7283 * amongst the groups of a sched_domain, during
7284 * load balancing.
cd96891d 7285 * @env: The load balancing environment.
1e3c88bd 7286 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
1e3c88bd 7287 */
bd939f45
PZ
7288static inline
7289void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
1e3c88bd 7290{
63b2ca30 7291 unsigned long tmp, capa_now = 0, capa_move = 0;
1e3c88bd 7292 unsigned int imbn = 2;
dd5feea1 7293 unsigned long scaled_busy_load_per_task;
56cf515b 7294 struct sg_lb_stats *local, *busiest;
1e3c88bd 7295
56cf515b
JK
7296 local = &sds->local_stat;
7297 busiest = &sds->busiest_stat;
1e3c88bd 7298
56cf515b
JK
7299 if (!local->sum_nr_running)
7300 local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
7301 else if (busiest->load_per_task > local->load_per_task)
7302 imbn = 1;
dd5feea1 7303
56cf515b 7304 scaled_busy_load_per_task =
ca8ce3d0 7305 (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
63b2ca30 7306 busiest->group_capacity;
56cf515b 7307
3029ede3
VD
7308 if (busiest->avg_load + scaled_busy_load_per_task >=
7309 local->avg_load + (scaled_busy_load_per_task * imbn)) {
56cf515b 7310 env->imbalance = busiest->load_per_task;
1e3c88bd
PZ
7311 return;
7312 }
7313
7314 /*
7315 * OK, we don't have enough imbalance to justify moving tasks,
ced549fa 7316 * however we may be able to increase total CPU capacity used by
1e3c88bd
PZ
7317 * moving them.
7318 */
7319
63b2ca30 7320 capa_now += busiest->group_capacity *
56cf515b 7321 min(busiest->load_per_task, busiest->avg_load);
63b2ca30 7322 capa_now += local->group_capacity *
56cf515b 7323 min(local->load_per_task, local->avg_load);
ca8ce3d0 7324 capa_now /= SCHED_CAPACITY_SCALE;
1e3c88bd
PZ
7325
7326 /* Amount of load we'd subtract */
a2cd4260 7327 if (busiest->avg_load > scaled_busy_load_per_task) {
63b2ca30 7328 capa_move += busiest->group_capacity *
56cf515b 7329 min(busiest->load_per_task,
a2cd4260 7330 busiest->avg_load - scaled_busy_load_per_task);
56cf515b 7331 }
1e3c88bd
PZ
7332
7333 /* Amount of load we'd add */
63b2ca30 7334 if (busiest->avg_load * busiest->group_capacity <
ca8ce3d0 7335 busiest->load_per_task * SCHED_CAPACITY_SCALE) {
63b2ca30
NP
7336 tmp = (busiest->avg_load * busiest->group_capacity) /
7337 local->group_capacity;
56cf515b 7338 } else {
ca8ce3d0 7339 tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
63b2ca30 7340 local->group_capacity;
56cf515b 7341 }
63b2ca30 7342 capa_move += local->group_capacity *
3ae11c90 7343 min(local->load_per_task, local->avg_load + tmp);
ca8ce3d0 7344 capa_move /= SCHED_CAPACITY_SCALE;
1e3c88bd
PZ
7345
7346 /* Move if we gain throughput */
63b2ca30 7347 if (capa_move > capa_now)
56cf515b 7348 env->imbalance = busiest->load_per_task;
1e3c88bd
PZ
7349}
7350
7351/**
7352 * calculate_imbalance - Calculate the amount of imbalance present within the
7353 * groups of a given sched_domain during load balance.
bd939f45 7354 * @env: load balance environment
1e3c88bd 7355 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
1e3c88bd 7356 */
bd939f45 7357static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
1e3c88bd 7358{
dd5feea1 7359 unsigned long max_pull, load_above_capacity = ~0UL;
56cf515b
JK
7360 struct sg_lb_stats *local, *busiest;
7361
7362 local = &sds->local_stat;
56cf515b 7363 busiest = &sds->busiest_stat;
dd5feea1 7364
caeb178c 7365 if (busiest->group_type == group_imbalanced) {
30ce5dab
PZ
7366 /*
7367 * In the group_imb case we cannot rely on group-wide averages
7368 * to ensure cpu-load equilibrium, look at wider averages. XXX
7369 */
56cf515b
JK
7370 busiest->load_per_task =
7371 min(busiest->load_per_task, sds->avg_load);
dd5feea1
SS
7372 }
7373
1e3c88bd 7374 /*
885e542c
DE
7375 * Avg load of busiest sg can be less and avg load of local sg can
7376 * be greater than avg load across all sgs of sd because avg load
7377 * factors in sg capacity and sgs with smaller group_type are
7378 * skipped when updating the busiest sg:
1e3c88bd 7379 */
b1885550
VD
7380 if (busiest->avg_load <= sds->avg_load ||
7381 local->avg_load >= sds->avg_load) {
bd939f45
PZ
7382 env->imbalance = 0;
7383 return fix_small_imbalance(env, sds);
1e3c88bd
PZ
7384 }
7385
9a5d9ba6
PZ
7386 /*
7387 * If there aren't any idle cpus, avoid creating some.
7388 */
7389 if (busiest->group_type == group_overloaded &&
7390 local->group_type == group_overloaded) {
1be0eb2a 7391 load_above_capacity = busiest->sum_nr_running * SCHED_CAPACITY_SCALE;
cfa10334 7392 if (load_above_capacity > busiest->group_capacity) {
ea67821b 7393 load_above_capacity -= busiest->group_capacity;
26656215 7394 load_above_capacity *= scale_load_down(NICE_0_LOAD);
cfa10334
MR
7395 load_above_capacity /= busiest->group_capacity;
7396 } else
ea67821b 7397 load_above_capacity = ~0UL;
dd5feea1
SS
7398 }
7399
7400 /*
7401 * We're trying to get all the cpus to the average_load, so we don't
7402 * want to push ourselves above the average load, nor do we wish to
7403 * reduce the max loaded cpu below the average load. At the same time,
0a9b23ce
DE
7404 * we also don't want to reduce the group load below the group
7405 * capacity. Thus we look for the minimum possible imbalance.
dd5feea1 7406 */
30ce5dab 7407 max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity);
1e3c88bd
PZ
7408
7409 /* How much load to actually move to equalise the imbalance */
56cf515b 7410 env->imbalance = min(
63b2ca30
NP
7411 max_pull * busiest->group_capacity,
7412 (sds->avg_load - local->avg_load) * local->group_capacity
ca8ce3d0 7413 ) / SCHED_CAPACITY_SCALE;
1e3c88bd
PZ
7414
7415 /*
7416 * if *imbalance is less than the average load per runnable task
25985edc 7417 * there is no guarantee that any tasks will be moved so we'll have
1e3c88bd
PZ
7418 * a think about bumping its value to force at least one task to be
7419 * moved
7420 */
56cf515b 7421 if (env->imbalance < busiest->load_per_task)
bd939f45 7422 return fix_small_imbalance(env, sds);
1e3c88bd 7423}
fab47622 7424
1e3c88bd
PZ
7425/******* find_busiest_group() helpers end here *********************/
7426
7427/**
7428 * find_busiest_group - Returns the busiest group within the sched_domain
0a9b23ce 7429 * if there is an imbalance.
1e3c88bd
PZ
7430 *
7431 * Also calculates the amount of weighted load which should be moved
7432 * to restore balance.
7433 *
cd96891d 7434 * @env: The load balancing environment.
1e3c88bd 7435 *
e69f6186 7436 * Return: - The busiest group if imbalance exists.
1e3c88bd 7437 */
56cf515b 7438static struct sched_group *find_busiest_group(struct lb_env *env)
1e3c88bd 7439{
56cf515b 7440 struct sg_lb_stats *local, *busiest;
1e3c88bd
PZ
7441 struct sd_lb_stats sds;
7442
147c5fc2 7443 init_sd_lb_stats(&sds);
1e3c88bd
PZ
7444
7445 /*
7446 * Compute the various statistics relavent for load balancing at
7447 * this level.
7448 */
23f0d209 7449 update_sd_lb_stats(env, &sds);
56cf515b
JK
7450 local = &sds.local_stat;
7451 busiest = &sds.busiest_stat;
1e3c88bd 7452
ea67821b 7453 /* ASYM feature bypasses nice load balance check */
1f621e02 7454 if (check_asym_packing(env, &sds))
532cb4c4
MN
7455 return sds.busiest;
7456
cc57aa8f 7457 /* There is no busy sibling group to pull tasks from */
56cf515b 7458 if (!sds.busiest || busiest->sum_nr_running == 0)
1e3c88bd
PZ
7459 goto out_balanced;
7460
ca8ce3d0
NP
7461 sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load)
7462 / sds.total_capacity;
b0432d8f 7463
866ab43e
PZ
7464 /*
7465 * If the busiest group is imbalanced the below checks don't
30ce5dab 7466 * work because they assume all things are equal, which typically
866ab43e
PZ
7467 * isn't true due to cpus_allowed constraints and the like.
7468 */
caeb178c 7469 if (busiest->group_type == group_imbalanced)
866ab43e
PZ
7470 goto force_balance;
7471
cc57aa8f 7472 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
ea67821b
VG
7473 if (env->idle == CPU_NEWLY_IDLE && group_has_capacity(env, local) &&
7474 busiest->group_no_capacity)
fab47622
NR
7475 goto force_balance;
7476
cc57aa8f 7477 /*
9c58c79a 7478 * If the local group is busier than the selected busiest group
cc57aa8f
PZ
7479 * don't try and pull any tasks.
7480 */
56cf515b 7481 if (local->avg_load >= busiest->avg_load)
1e3c88bd
PZ
7482 goto out_balanced;
7483
cc57aa8f
PZ
7484 /*
7485 * Don't pull any tasks if this group is already above the domain
7486 * average load.
7487 */
56cf515b 7488 if (local->avg_load >= sds.avg_load)
1e3c88bd
PZ
7489 goto out_balanced;
7490
bd939f45 7491 if (env->idle == CPU_IDLE) {
aae6d3dd 7492 /*
43f4d666
VG
7493 * This cpu is idle. If the busiest group is not overloaded
7494 * and there is no imbalance between this and busiest group
7495 * wrt idle cpus, it is balanced. The imbalance becomes
7496 * significant if the diff is greater than 1 otherwise we
7497 * might end up to just move the imbalance on another group
aae6d3dd 7498 */
43f4d666
VG
7499 if ((busiest->group_type != group_overloaded) &&
7500 (local->idle_cpus <= (busiest->idle_cpus + 1)))
aae6d3dd 7501 goto out_balanced;
c186fafe
PZ
7502 } else {
7503 /*
7504 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
7505 * imbalance_pct to be conservative.
7506 */
56cf515b
JK
7507 if (100 * busiest->avg_load <=
7508 env->sd->imbalance_pct * local->avg_load)
c186fafe 7509 goto out_balanced;
aae6d3dd 7510 }
1e3c88bd 7511
fab47622 7512force_balance:
1e3c88bd 7513 /* Looks like there is an imbalance. Compute it */
bd939f45 7514 calculate_imbalance(env, &sds);
1e3c88bd
PZ
7515 return sds.busiest;
7516
7517out_balanced:
bd939f45 7518 env->imbalance = 0;
1e3c88bd
PZ
7519 return NULL;
7520}
7521
7522/*
7523 * find_busiest_queue - find the busiest runqueue among the cpus in group.
7524 */
bd939f45 7525static struct rq *find_busiest_queue(struct lb_env *env,
b9403130 7526 struct sched_group *group)
1e3c88bd
PZ
7527{
7528 struct rq *busiest = NULL, *rq;
ced549fa 7529 unsigned long busiest_load = 0, busiest_capacity = 1;
1e3c88bd
PZ
7530 int i;
7531
6906a408 7532 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
ea67821b 7533 unsigned long capacity, wl;
0ec8aa00
PZ
7534 enum fbq_type rt;
7535
7536 rq = cpu_rq(i);
7537 rt = fbq_classify_rq(rq);
1e3c88bd 7538
0ec8aa00
PZ
7539 /*
7540 * We classify groups/runqueues into three groups:
7541 * - regular: there are !numa tasks
7542 * - remote: there are numa tasks that run on the 'wrong' node
7543 * - all: there is no distinction
7544 *
7545 * In order to avoid migrating ideally placed numa tasks,
7546 * ignore those when there's better options.
7547 *
7548 * If we ignore the actual busiest queue to migrate another
7549 * task, the next balance pass can still reduce the busiest
7550 * queue by moving tasks around inside the node.
7551 *
7552 * If we cannot move enough load due to this classification
7553 * the next pass will adjust the group classification and
7554 * allow migration of more tasks.
7555 *
7556 * Both cases only affect the total convergence complexity.
7557 */
7558 if (rt > env->fbq_type)
7559 continue;
7560
ced549fa 7561 capacity = capacity_of(i);
9d5efe05 7562
6e40f5bb 7563 wl = weighted_cpuload(i);
1e3c88bd 7564
6e40f5bb
TG
7565 /*
7566 * When comparing with imbalance, use weighted_cpuload()
ced549fa 7567 * which is not scaled with the cpu capacity.
6e40f5bb 7568 */
ea67821b
VG
7569
7570 if (rq->nr_running == 1 && wl > env->imbalance &&
7571 !check_cpu_capacity(rq, env->sd))
1e3c88bd
PZ
7572 continue;
7573
6e40f5bb
TG
7574 /*
7575 * For the load comparisons with the other cpu's, consider
ced549fa
NP
7576 * the weighted_cpuload() scaled with the cpu capacity, so
7577 * that the load can be moved away from the cpu that is
7578 * potentially running at a lower capacity.
95a79b80 7579 *
ced549fa 7580 * Thus we're looking for max(wl_i / capacity_i), crosswise
95a79b80 7581 * multiplication to rid ourselves of the division works out
ced549fa
NP
7582 * to: wl_i * capacity_j > wl_j * capacity_i; where j is
7583 * our previous maximum.
6e40f5bb 7584 */
ced549fa 7585 if (wl * busiest_capacity > busiest_load * capacity) {
95a79b80 7586 busiest_load = wl;
ced549fa 7587 busiest_capacity = capacity;
1e3c88bd
PZ
7588 busiest = rq;
7589 }
7590 }
7591
7592 return busiest;
7593}
7594
7595/*
7596 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
7597 * so long as it is large enough.
7598 */
7599#define MAX_PINNED_INTERVAL 512
7600
bd939f45 7601static int need_active_balance(struct lb_env *env)
1af3ed3d 7602{
bd939f45
PZ
7603 struct sched_domain *sd = env->sd;
7604
7605 if (env->idle == CPU_NEWLY_IDLE) {
532cb4c4
MN
7606
7607 /*
7608 * ASYM_PACKING needs to force migrate tasks from busy but
7609 * higher numbered CPUs in order to pack all tasks in the
7610 * lowest numbered CPUs.
7611 */
bd939f45 7612 if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
532cb4c4 7613 return 1;
1af3ed3d
PZ
7614 }
7615
1aaf90a4
VG
7616 /*
7617 * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task.
7618 * It's worth migrating the task if the src_cpu's capacity is reduced
7619 * because of other sched_class or IRQs if more capacity stays
7620 * available on dst_cpu.
7621 */
7622 if ((env->idle != CPU_NOT_IDLE) &&
7623 (env->src_rq->cfs.h_nr_running == 1)) {
7624 if ((check_cpu_capacity(env->src_rq, sd)) &&
7625 (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100))
7626 return 1;
7627 }
7628
1af3ed3d
PZ
7629 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
7630}
7631
969c7921
TH
7632static int active_load_balance_cpu_stop(void *data);
7633
23f0d209
JK
7634static int should_we_balance(struct lb_env *env)
7635{
7636 struct sched_group *sg = env->sd->groups;
7637 struct cpumask *sg_cpus, *sg_mask;
7638 int cpu, balance_cpu = -1;
7639
7640 /*
7641 * In the newly idle case, we will allow all the cpu's
7642 * to do the newly idle load balance.
7643 */
7644 if (env->idle == CPU_NEWLY_IDLE)
7645 return 1;
7646
7647 sg_cpus = sched_group_cpus(sg);
7648 sg_mask = sched_group_mask(sg);
7649 /* Try to find first idle cpu */
7650 for_each_cpu_and(cpu, sg_cpus, env->cpus) {
7651 if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu))
7652 continue;
7653
7654 balance_cpu = cpu;
7655 break;
7656 }
7657
7658 if (balance_cpu == -1)
7659 balance_cpu = group_balance_cpu(sg);
7660
7661 /*
7662 * First idle cpu or the first cpu(busiest) in this sched group
7663 * is eligible for doing load balancing at this and above domains.
7664 */
b0cff9d8 7665 return balance_cpu == env->dst_cpu;
23f0d209
JK
7666}
7667
1e3c88bd
PZ
7668/*
7669 * Check this_cpu to ensure it is balanced within domain. Attempt to move
7670 * tasks if there is an imbalance.
7671 */
7672static int load_balance(int this_cpu, struct rq *this_rq,
7673 struct sched_domain *sd, enum cpu_idle_type idle,
23f0d209 7674 int *continue_balancing)
1e3c88bd 7675{
88b8dac0 7676 int ld_moved, cur_ld_moved, active_balance = 0;
6263322c 7677 struct sched_domain *sd_parent = sd->parent;
1e3c88bd 7678 struct sched_group *group;
1e3c88bd
PZ
7679 struct rq *busiest;
7680 unsigned long flags;
4ba29684 7681 struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
1e3c88bd 7682
8e45cb54
PZ
7683 struct lb_env env = {
7684 .sd = sd,
ddcdf6e7
PZ
7685 .dst_cpu = this_cpu,
7686 .dst_rq = this_rq,
88b8dac0 7687 .dst_grpmask = sched_group_cpus(sd->groups),
8e45cb54 7688 .idle = idle,
eb95308e 7689 .loop_break = sched_nr_migrate_break,
b9403130 7690 .cpus = cpus,
0ec8aa00 7691 .fbq_type = all,
163122b7 7692 .tasks = LIST_HEAD_INIT(env.tasks),
8e45cb54
PZ
7693 };
7694
cfc03118
JK
7695 /*
7696 * For NEWLY_IDLE load_balancing, we don't need to consider
7697 * other cpus in our group
7698 */
e02e60c1 7699 if (idle == CPU_NEWLY_IDLE)
cfc03118 7700 env.dst_grpmask = NULL;
cfc03118 7701
1e3c88bd
PZ
7702 cpumask_copy(cpus, cpu_active_mask);
7703
ae92882e 7704 schedstat_inc(sd->lb_count[idle]);
1e3c88bd
PZ
7705
7706redo:
23f0d209
JK
7707 if (!should_we_balance(&env)) {
7708 *continue_balancing = 0;
1e3c88bd 7709 goto out_balanced;
23f0d209 7710 }
1e3c88bd 7711
23f0d209 7712 group = find_busiest_group(&env);
1e3c88bd 7713 if (!group) {
ae92882e 7714 schedstat_inc(sd->lb_nobusyg[idle]);
1e3c88bd
PZ
7715 goto out_balanced;
7716 }
7717
b9403130 7718 busiest = find_busiest_queue(&env, group);
1e3c88bd 7719 if (!busiest) {
ae92882e 7720 schedstat_inc(sd->lb_nobusyq[idle]);
1e3c88bd
PZ
7721 goto out_balanced;
7722 }
7723
78feefc5 7724 BUG_ON(busiest == env.dst_rq);
1e3c88bd 7725
ae92882e 7726 schedstat_add(sd->lb_imbalance[idle], env.imbalance);
1e3c88bd 7727
1aaf90a4
VG
7728 env.src_cpu = busiest->cpu;
7729 env.src_rq = busiest;
7730
1e3c88bd
PZ
7731 ld_moved = 0;
7732 if (busiest->nr_running > 1) {
7733 /*
7734 * Attempt to move tasks. If find_busiest_group has found
7735 * an imbalance but busiest->nr_running <= 1, the group is
7736 * still unbalanced. ld_moved simply stays zero, so it is
7737 * correctly treated as an imbalance.
7738 */
8e45cb54 7739 env.flags |= LBF_ALL_PINNED;
c82513e5 7740 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
8e45cb54 7741
5d6523eb 7742more_balance:
163122b7 7743 raw_spin_lock_irqsave(&busiest->lock, flags);
88b8dac0
SV
7744
7745 /*
7746 * cur_ld_moved - load moved in current iteration
7747 * ld_moved - cumulative load moved across iterations
7748 */
163122b7 7749 cur_ld_moved = detach_tasks(&env);
1e3c88bd
PZ
7750
7751 /*
163122b7
KT
7752 * We've detached some tasks from busiest_rq. Every
7753 * task is masked "TASK_ON_RQ_MIGRATING", so we can safely
7754 * unlock busiest->lock, and we are able to be sure
7755 * that nobody can manipulate the tasks in parallel.
7756 * See task_rq_lock() family for the details.
1e3c88bd 7757 */
163122b7
KT
7758
7759 raw_spin_unlock(&busiest->lock);
7760
7761 if (cur_ld_moved) {
7762 attach_tasks(&env);
7763 ld_moved += cur_ld_moved;
7764 }
7765
1e3c88bd 7766 local_irq_restore(flags);
88b8dac0 7767
f1cd0858
JK
7768 if (env.flags & LBF_NEED_BREAK) {
7769 env.flags &= ~LBF_NEED_BREAK;
7770 goto more_balance;
7771 }
7772
88b8dac0
SV
7773 /*
7774 * Revisit (affine) tasks on src_cpu that couldn't be moved to
7775 * us and move them to an alternate dst_cpu in our sched_group
7776 * where they can run. The upper limit on how many times we
7777 * iterate on same src_cpu is dependent on number of cpus in our
7778 * sched_group.
7779 *
7780 * This changes load balance semantics a bit on who can move
7781 * load to a given_cpu. In addition to the given_cpu itself
7782 * (or a ilb_cpu acting on its behalf where given_cpu is
7783 * nohz-idle), we now have balance_cpu in a position to move
7784 * load to given_cpu. In rare situations, this may cause
7785 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
7786 * _independently_ and at _same_ time to move some load to
7787 * given_cpu) causing exceess load to be moved to given_cpu.
7788 * This however should not happen so much in practice and
7789 * moreover subsequent load balance cycles should correct the
7790 * excess load moved.
7791 */
6263322c 7792 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
88b8dac0 7793
7aff2e3a
VD
7794 /* Prevent to re-select dst_cpu via env's cpus */
7795 cpumask_clear_cpu(env.dst_cpu, env.cpus);
7796
78feefc5 7797 env.dst_rq = cpu_rq(env.new_dst_cpu);
88b8dac0 7798 env.dst_cpu = env.new_dst_cpu;
6263322c 7799 env.flags &= ~LBF_DST_PINNED;
88b8dac0
SV
7800 env.loop = 0;
7801 env.loop_break = sched_nr_migrate_break;
e02e60c1 7802
88b8dac0
SV
7803 /*
7804 * Go back to "more_balance" rather than "redo" since we
7805 * need to continue with same src_cpu.
7806 */
7807 goto more_balance;
7808 }
1e3c88bd 7809
6263322c
PZ
7810 /*
7811 * We failed to reach balance because of affinity.
7812 */
7813 if (sd_parent) {
63b2ca30 7814 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
6263322c 7815
afdeee05 7816 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0)
6263322c 7817 *group_imbalance = 1;
6263322c
PZ
7818 }
7819
1e3c88bd 7820 /* All tasks on this runqueue were pinned by CPU affinity */
8e45cb54 7821 if (unlikely(env.flags & LBF_ALL_PINNED)) {
1e3c88bd 7822 cpumask_clear_cpu(cpu_of(busiest), cpus);
bbf18b19
PN
7823 if (!cpumask_empty(cpus)) {
7824 env.loop = 0;
7825 env.loop_break = sched_nr_migrate_break;
1e3c88bd 7826 goto redo;
bbf18b19 7827 }
afdeee05 7828 goto out_all_pinned;
1e3c88bd
PZ
7829 }
7830 }
7831
7832 if (!ld_moved) {
ae92882e 7833 schedstat_inc(sd->lb_failed[idle]);
58b26c4c
VP
7834 /*
7835 * Increment the failure counter only on periodic balance.
7836 * We do not want newidle balance, which can be very
7837 * frequent, pollute the failure counter causing
7838 * excessive cache_hot migrations and active balances.
7839 */
7840 if (idle != CPU_NEWLY_IDLE)
7841 sd->nr_balance_failed++;
1e3c88bd 7842
bd939f45 7843 if (need_active_balance(&env)) {
1e3c88bd
PZ
7844 raw_spin_lock_irqsave(&busiest->lock, flags);
7845
969c7921
TH
7846 /* don't kick the active_load_balance_cpu_stop,
7847 * if the curr task on busiest cpu can't be
7848 * moved to this_cpu
1e3c88bd
PZ
7849 */
7850 if (!cpumask_test_cpu(this_cpu,
fa17b507 7851 tsk_cpus_allowed(busiest->curr))) {
1e3c88bd
PZ
7852 raw_spin_unlock_irqrestore(&busiest->lock,
7853 flags);
8e45cb54 7854 env.flags |= LBF_ALL_PINNED;
1e3c88bd
PZ
7855 goto out_one_pinned;
7856 }
7857
969c7921
TH
7858 /*
7859 * ->active_balance synchronizes accesses to
7860 * ->active_balance_work. Once set, it's cleared
7861 * only after active load balance is finished.
7862 */
1e3c88bd
PZ
7863 if (!busiest->active_balance) {
7864 busiest->active_balance = 1;
7865 busiest->push_cpu = this_cpu;
7866 active_balance = 1;
7867 }
7868 raw_spin_unlock_irqrestore(&busiest->lock, flags);
969c7921 7869
bd939f45 7870 if (active_balance) {
969c7921
TH
7871 stop_one_cpu_nowait(cpu_of(busiest),
7872 active_load_balance_cpu_stop, busiest,
7873 &busiest->active_balance_work);
bd939f45 7874 }
1e3c88bd 7875
d02c0711 7876 /* We've kicked active balancing, force task migration. */
1e3c88bd
PZ
7877 sd->nr_balance_failed = sd->cache_nice_tries+1;
7878 }
7879 } else
7880 sd->nr_balance_failed = 0;
7881
7882 if (likely(!active_balance)) {
7883 /* We were unbalanced, so reset the balancing interval */
7884 sd->balance_interval = sd->min_interval;
7885 } else {
7886 /*
7887 * If we've begun active balancing, start to back off. This
7888 * case may not be covered by the all_pinned logic if there
7889 * is only 1 task on the busy runqueue (because we don't call
163122b7 7890 * detach_tasks).
1e3c88bd
PZ
7891 */
7892 if (sd->balance_interval < sd->max_interval)
7893 sd->balance_interval *= 2;
7894 }
7895
1e3c88bd
PZ
7896 goto out;
7897
7898out_balanced:
afdeee05
VG
7899 /*
7900 * We reach balance although we may have faced some affinity
7901 * constraints. Clear the imbalance flag if it was set.
7902 */
7903 if (sd_parent) {
7904 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
7905
7906 if (*group_imbalance)
7907 *group_imbalance = 0;
7908 }
7909
7910out_all_pinned:
7911 /*
7912 * We reach balance because all tasks are pinned at this level so
7913 * we can't migrate them. Let the imbalance flag set so parent level
7914 * can try to migrate them.
7915 */
ae92882e 7916 schedstat_inc(sd->lb_balanced[idle]);
1e3c88bd
PZ
7917
7918 sd->nr_balance_failed = 0;
7919
7920out_one_pinned:
7921 /* tune up the balancing interval */
8e45cb54 7922 if (((env.flags & LBF_ALL_PINNED) &&
5b54b56b 7923 sd->balance_interval < MAX_PINNED_INTERVAL) ||
1e3c88bd
PZ
7924 (sd->balance_interval < sd->max_interval))
7925 sd->balance_interval *= 2;
7926
46e49b38 7927 ld_moved = 0;
1e3c88bd 7928out:
1e3c88bd
PZ
7929 return ld_moved;
7930}
7931
52a08ef1
JL
7932static inline unsigned long
7933get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
7934{
7935 unsigned long interval = sd->balance_interval;
7936
7937 if (cpu_busy)
7938 interval *= sd->busy_factor;
7939
7940 /* scale ms to jiffies */
7941 interval = msecs_to_jiffies(interval);
7942 interval = clamp(interval, 1UL, max_load_balance_interval);
7943
7944 return interval;
7945}
7946
7947static inline void
31851a98 7948update_next_balance(struct sched_domain *sd, unsigned long *next_balance)
52a08ef1
JL
7949{
7950 unsigned long interval, next;
7951
31851a98
LY
7952 /* used by idle balance, so cpu_busy = 0 */
7953 interval = get_sd_balance_interval(sd, 0);
52a08ef1
JL
7954 next = sd->last_balance + interval;
7955
7956 if (time_after(*next_balance, next))
7957 *next_balance = next;
7958}
7959
1e3c88bd
PZ
7960/*
7961 * idle_balance is called by schedule() if this_cpu is about to become
7962 * idle. Attempts to pull tasks from other CPUs.
7963 */
6e83125c 7964static int idle_balance(struct rq *this_rq)
1e3c88bd 7965{
52a08ef1
JL
7966 unsigned long next_balance = jiffies + HZ;
7967 int this_cpu = this_rq->cpu;
1e3c88bd
PZ
7968 struct sched_domain *sd;
7969 int pulled_task = 0;
9bd721c5 7970 u64 curr_cost = 0;
1e3c88bd 7971
6e83125c
PZ
7972 /*
7973 * We must set idle_stamp _before_ calling idle_balance(), such that we
7974 * measure the duration of idle_balance() as idle time.
7975 */
7976 this_rq->idle_stamp = rq_clock(this_rq);
7977
4486edd1
TC
7978 if (this_rq->avg_idle < sysctl_sched_migration_cost ||
7979 !this_rq->rd->overload) {
52a08ef1
JL
7980 rcu_read_lock();
7981 sd = rcu_dereference_check_sched_domain(this_rq->sd);
7982 if (sd)
31851a98 7983 update_next_balance(sd, &next_balance);
52a08ef1
JL
7984 rcu_read_unlock();
7985
6e83125c 7986 goto out;
52a08ef1 7987 }
1e3c88bd 7988
f492e12e
PZ
7989 raw_spin_unlock(&this_rq->lock);
7990
48a16753 7991 update_blocked_averages(this_cpu);
dce840a0 7992 rcu_read_lock();
1e3c88bd 7993 for_each_domain(this_cpu, sd) {
23f0d209 7994 int continue_balancing = 1;
9bd721c5 7995 u64 t0, domain_cost;
1e3c88bd
PZ
7996
7997 if (!(sd->flags & SD_LOAD_BALANCE))
7998 continue;
7999
52a08ef1 8000 if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) {
31851a98 8001 update_next_balance(sd, &next_balance);
9bd721c5 8002 break;
52a08ef1 8003 }
9bd721c5 8004
f492e12e 8005 if (sd->flags & SD_BALANCE_NEWIDLE) {
9bd721c5
JL
8006 t0 = sched_clock_cpu(this_cpu);
8007
f492e12e 8008 pulled_task = load_balance(this_cpu, this_rq,
23f0d209
JK
8009 sd, CPU_NEWLY_IDLE,
8010 &continue_balancing);
9bd721c5
JL
8011
8012 domain_cost = sched_clock_cpu(this_cpu) - t0;
8013 if (domain_cost > sd->max_newidle_lb_cost)
8014 sd->max_newidle_lb_cost = domain_cost;
8015
8016 curr_cost += domain_cost;
f492e12e 8017 }
1e3c88bd 8018
31851a98 8019 update_next_balance(sd, &next_balance);
39a4d9ca
JL
8020
8021 /*
8022 * Stop searching for tasks to pull if there are
8023 * now runnable tasks on this rq.
8024 */
8025 if (pulled_task || this_rq->nr_running > 0)
1e3c88bd 8026 break;
1e3c88bd 8027 }
dce840a0 8028 rcu_read_unlock();
f492e12e
PZ
8029
8030 raw_spin_lock(&this_rq->lock);
8031
0e5b5337
JL
8032 if (curr_cost > this_rq->max_idle_balance_cost)
8033 this_rq->max_idle_balance_cost = curr_cost;
8034
e5fc6611 8035 /*
0e5b5337
JL
8036 * While browsing the domains, we released the rq lock, a task could
8037 * have been enqueued in the meantime. Since we're not going idle,
8038 * pretend we pulled a task.
e5fc6611 8039 */
0e5b5337 8040 if (this_rq->cfs.h_nr_running && !pulled_task)
6e83125c 8041 pulled_task = 1;
e5fc6611 8042
52a08ef1
JL
8043out:
8044 /* Move the next balance forward */
8045 if (time_after(this_rq->next_balance, next_balance))
1e3c88bd 8046 this_rq->next_balance = next_balance;
9bd721c5 8047
e4aa358b 8048 /* Is there a task of a high priority class? */
46383648 8049 if (this_rq->nr_running != this_rq->cfs.h_nr_running)
e4aa358b
KT
8050 pulled_task = -1;
8051
38c6ade2 8052 if (pulled_task)
6e83125c
PZ
8053 this_rq->idle_stamp = 0;
8054
3c4017c1 8055 return pulled_task;
1e3c88bd
PZ
8056}
8057
8058/*
969c7921
TH
8059 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
8060 * running tasks off the busiest CPU onto idle CPUs. It requires at
8061 * least 1 task to be running on each physical CPU where possible, and
8062 * avoids physical / logical imbalances.
1e3c88bd 8063 */
969c7921 8064static int active_load_balance_cpu_stop(void *data)
1e3c88bd 8065{
969c7921
TH
8066 struct rq *busiest_rq = data;
8067 int busiest_cpu = cpu_of(busiest_rq);
1e3c88bd 8068 int target_cpu = busiest_rq->push_cpu;
969c7921 8069 struct rq *target_rq = cpu_rq(target_cpu);
1e3c88bd 8070 struct sched_domain *sd;
e5673f28 8071 struct task_struct *p = NULL;
969c7921
TH
8072
8073 raw_spin_lock_irq(&busiest_rq->lock);
8074
8075 /* make sure the requested cpu hasn't gone down in the meantime */
8076 if (unlikely(busiest_cpu != smp_processor_id() ||
8077 !busiest_rq->active_balance))
8078 goto out_unlock;
1e3c88bd
PZ
8079
8080 /* Is there any task to move? */
8081 if (busiest_rq->nr_running <= 1)
969c7921 8082 goto out_unlock;
1e3c88bd
PZ
8083
8084 /*
8085 * This condition is "impossible", if it occurs
8086 * we need to fix it. Originally reported by
8087 * Bjorn Helgaas on a 128-cpu setup.
8088 */
8089 BUG_ON(busiest_rq == target_rq);
8090
1e3c88bd 8091 /* Search for an sd spanning us and the target CPU. */
dce840a0 8092 rcu_read_lock();
1e3c88bd
PZ
8093 for_each_domain(target_cpu, sd) {
8094 if ((sd->flags & SD_LOAD_BALANCE) &&
8095 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
8096 break;
8097 }
8098
8099 if (likely(sd)) {
8e45cb54
PZ
8100 struct lb_env env = {
8101 .sd = sd,
ddcdf6e7
PZ
8102 .dst_cpu = target_cpu,
8103 .dst_rq = target_rq,
8104 .src_cpu = busiest_rq->cpu,
8105 .src_rq = busiest_rq,
8e45cb54
PZ
8106 .idle = CPU_IDLE,
8107 };
8108
ae92882e 8109 schedstat_inc(sd->alb_count);
1e3c88bd 8110
e5673f28 8111 p = detach_one_task(&env);
d02c0711 8112 if (p) {
ae92882e 8113 schedstat_inc(sd->alb_pushed);
d02c0711
SD
8114 /* Active balancing done, reset the failure counter. */
8115 sd->nr_balance_failed = 0;
8116 } else {
ae92882e 8117 schedstat_inc(sd->alb_failed);
d02c0711 8118 }
1e3c88bd 8119 }
dce840a0 8120 rcu_read_unlock();
969c7921
TH
8121out_unlock:
8122 busiest_rq->active_balance = 0;
e5673f28
KT
8123 raw_spin_unlock(&busiest_rq->lock);
8124
8125 if (p)
8126 attach_one_task(target_rq, p);
8127
8128 local_irq_enable();
8129
969c7921 8130 return 0;
1e3c88bd
PZ
8131}
8132
d987fc7f
MG
8133static inline int on_null_domain(struct rq *rq)
8134{
8135 return unlikely(!rcu_dereference_sched(rq->sd));
8136}
8137
3451d024 8138#ifdef CONFIG_NO_HZ_COMMON
83cd4fe2
VP
8139/*
8140 * idle load balancing details
83cd4fe2
VP
8141 * - When one of the busy CPUs notice that there may be an idle rebalancing
8142 * needed, they will kick the idle load balancer, which then does idle
8143 * load balancing for all the idle CPUs.
8144 */
1e3c88bd 8145static struct {
83cd4fe2 8146 cpumask_var_t idle_cpus_mask;
0b005cf5 8147 atomic_t nr_cpus;
83cd4fe2
VP
8148 unsigned long next_balance; /* in jiffy units */
8149} nohz ____cacheline_aligned;
1e3c88bd 8150
3dd0337d 8151static inline int find_new_ilb(void)
1e3c88bd 8152{
0b005cf5 8153 int ilb = cpumask_first(nohz.idle_cpus_mask);
1e3c88bd 8154
786d6dc7
SS
8155 if (ilb < nr_cpu_ids && idle_cpu(ilb))
8156 return ilb;
8157
8158 return nr_cpu_ids;
1e3c88bd 8159}
1e3c88bd 8160
83cd4fe2
VP
8161/*
8162 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
8163 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
8164 * CPU (if there is one).
8165 */
0aeeeeba 8166static void nohz_balancer_kick(void)
83cd4fe2
VP
8167{
8168 int ilb_cpu;
8169
8170 nohz.next_balance++;
8171
3dd0337d 8172 ilb_cpu = find_new_ilb();
83cd4fe2 8173
0b005cf5
SS
8174 if (ilb_cpu >= nr_cpu_ids)
8175 return;
83cd4fe2 8176
cd490c5b 8177 if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
1c792db7
SS
8178 return;
8179 /*
8180 * Use smp_send_reschedule() instead of resched_cpu().
8181 * This way we generate a sched IPI on the target cpu which
8182 * is idle. And the softirq performing nohz idle load balance
8183 * will be run before returning from the IPI.
8184 */
8185 smp_send_reschedule(ilb_cpu);
83cd4fe2
VP
8186 return;
8187}
8188
20a5c8cc 8189void nohz_balance_exit_idle(unsigned int cpu)
71325960
SS
8190{
8191 if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
d987fc7f
MG
8192 /*
8193 * Completely isolated CPUs don't ever set, so we must test.
8194 */
8195 if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
8196 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
8197 atomic_dec(&nohz.nr_cpus);
8198 }
71325960
SS
8199 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
8200 }
8201}
8202
69e1e811
SS
8203static inline void set_cpu_sd_state_busy(void)
8204{
8205 struct sched_domain *sd;
37dc6b50 8206 int cpu = smp_processor_id();
69e1e811 8207
69e1e811 8208 rcu_read_lock();
0e369d75 8209 sd = rcu_dereference(per_cpu(sd_llc, cpu));
25f55d9d
VG
8210
8211 if (!sd || !sd->nohz_idle)
8212 goto unlock;
8213 sd->nohz_idle = 0;
8214
0e369d75 8215 atomic_inc(&sd->shared->nr_busy_cpus);
25f55d9d 8216unlock:
69e1e811
SS
8217 rcu_read_unlock();
8218}
8219
8220void set_cpu_sd_state_idle(void)
8221{
8222 struct sched_domain *sd;
37dc6b50 8223 int cpu = smp_processor_id();
69e1e811 8224
69e1e811 8225 rcu_read_lock();
0e369d75 8226 sd = rcu_dereference(per_cpu(sd_llc, cpu));
25f55d9d
VG
8227
8228 if (!sd || sd->nohz_idle)
8229 goto unlock;
8230 sd->nohz_idle = 1;
8231
0e369d75 8232 atomic_dec(&sd->shared->nr_busy_cpus);
25f55d9d 8233unlock:
69e1e811
SS
8234 rcu_read_unlock();
8235}
8236
1e3c88bd 8237/*
c1cc017c 8238 * This routine will record that the cpu is going idle with tick stopped.
0b005cf5 8239 * This info will be used in performing idle load balancing in the future.
1e3c88bd 8240 */
c1cc017c 8241void nohz_balance_enter_idle(int cpu)
1e3c88bd 8242{
71325960
SS
8243 /*
8244 * If this cpu is going down, then nothing needs to be done.
8245 */
8246 if (!cpu_active(cpu))
8247 return;
8248
c1cc017c
AS
8249 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
8250 return;
1e3c88bd 8251
d987fc7f
MG
8252 /*
8253 * If we're a completely isolated CPU, we don't play.
8254 */
8255 if (on_null_domain(cpu_rq(cpu)))
8256 return;
8257
c1cc017c
AS
8258 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
8259 atomic_inc(&nohz.nr_cpus);
8260 set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
1e3c88bd
PZ
8261}
8262#endif
8263
8264static DEFINE_SPINLOCK(balancing);
8265
49c022e6
PZ
8266/*
8267 * Scale the max load_balance interval with the number of CPUs in the system.
8268 * This trades load-balance latency on larger machines for less cross talk.
8269 */
029632fb 8270void update_max_interval(void)
49c022e6
PZ
8271{
8272 max_load_balance_interval = HZ*num_online_cpus()/10;
8273}
8274
1e3c88bd
PZ
8275/*
8276 * It checks each scheduling domain to see if it is due to be balanced,
8277 * and initiates a balancing operation if so.
8278 *
b9b0853a 8279 * Balancing parameters are set up in init_sched_domains.
1e3c88bd 8280 */
f7ed0a89 8281static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
1e3c88bd 8282{
23f0d209 8283 int continue_balancing = 1;
f7ed0a89 8284 int cpu = rq->cpu;
1e3c88bd 8285 unsigned long interval;
04f733b4 8286 struct sched_domain *sd;
1e3c88bd
PZ
8287 /* Earliest time when we have to do rebalance again */
8288 unsigned long next_balance = jiffies + 60*HZ;
8289 int update_next_balance = 0;
f48627e6
JL
8290 int need_serialize, need_decay = 0;
8291 u64 max_cost = 0;
1e3c88bd 8292
48a16753 8293 update_blocked_averages(cpu);
2069dd75 8294
dce840a0 8295 rcu_read_lock();
1e3c88bd 8296 for_each_domain(cpu, sd) {
f48627e6
JL
8297 /*
8298 * Decay the newidle max times here because this is a regular
8299 * visit to all the domains. Decay ~1% per second.
8300 */
8301 if (time_after(jiffies, sd->next_decay_max_lb_cost)) {
8302 sd->max_newidle_lb_cost =
8303 (sd->max_newidle_lb_cost * 253) / 256;
8304 sd->next_decay_max_lb_cost = jiffies + HZ;
8305 need_decay = 1;
8306 }
8307 max_cost += sd->max_newidle_lb_cost;
8308
1e3c88bd
PZ
8309 if (!(sd->flags & SD_LOAD_BALANCE))
8310 continue;
8311
f48627e6
JL
8312 /*
8313 * Stop the load balance at this level. There is another
8314 * CPU in our sched group which is doing load balancing more
8315 * actively.
8316 */
8317 if (!continue_balancing) {
8318 if (need_decay)
8319 continue;
8320 break;
8321 }
8322
52a08ef1 8323 interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
1e3c88bd
PZ
8324
8325 need_serialize = sd->flags & SD_SERIALIZE;
1e3c88bd
PZ
8326 if (need_serialize) {
8327 if (!spin_trylock(&balancing))
8328 goto out;
8329 }
8330
8331 if (time_after_eq(jiffies, sd->last_balance + interval)) {
23f0d209 8332 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
1e3c88bd 8333 /*
6263322c 8334 * The LBF_DST_PINNED logic could have changed
de5eb2dd
JK
8335 * env->dst_cpu, so we can't know our idle
8336 * state even if we migrated tasks. Update it.
1e3c88bd 8337 */
de5eb2dd 8338 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
1e3c88bd
PZ
8339 }
8340 sd->last_balance = jiffies;
52a08ef1 8341 interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
1e3c88bd
PZ
8342 }
8343 if (need_serialize)
8344 spin_unlock(&balancing);
8345out:
8346 if (time_after(next_balance, sd->last_balance + interval)) {
8347 next_balance = sd->last_balance + interval;
8348 update_next_balance = 1;
8349 }
f48627e6
JL
8350 }
8351 if (need_decay) {
1e3c88bd 8352 /*
f48627e6
JL
8353 * Ensure the rq-wide value also decays but keep it at a
8354 * reasonable floor to avoid funnies with rq->avg_idle.
1e3c88bd 8355 */
f48627e6
JL
8356 rq->max_idle_balance_cost =
8357 max((u64)sysctl_sched_migration_cost, max_cost);
1e3c88bd 8358 }
dce840a0 8359 rcu_read_unlock();
1e3c88bd
PZ
8360
8361 /*
8362 * next_balance will be updated only when there is a need.
8363 * When the cpu is attached to null domain for ex, it will not be
8364 * updated.
8365 */
c5afb6a8 8366 if (likely(update_next_balance)) {
1e3c88bd 8367 rq->next_balance = next_balance;
c5afb6a8
VG
8368
8369#ifdef CONFIG_NO_HZ_COMMON
8370 /*
8371 * If this CPU has been elected to perform the nohz idle
8372 * balance. Other idle CPUs have already rebalanced with
8373 * nohz_idle_balance() and nohz.next_balance has been
8374 * updated accordingly. This CPU is now running the idle load
8375 * balance for itself and we need to update the
8376 * nohz.next_balance accordingly.
8377 */
8378 if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance))
8379 nohz.next_balance = rq->next_balance;
8380#endif
8381 }
1e3c88bd
PZ
8382}
8383
3451d024 8384#ifdef CONFIG_NO_HZ_COMMON
1e3c88bd 8385/*
3451d024 8386 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
1e3c88bd
PZ
8387 * rebalancing for all the cpus for whom scheduler ticks are stopped.
8388 */
208cb16b 8389static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
83cd4fe2 8390{
208cb16b 8391 int this_cpu = this_rq->cpu;
83cd4fe2
VP
8392 struct rq *rq;
8393 int balance_cpu;
c5afb6a8
VG
8394 /* Earliest time when we have to do rebalance again */
8395 unsigned long next_balance = jiffies + 60*HZ;
8396 int update_next_balance = 0;
83cd4fe2 8397
1c792db7
SS
8398 if (idle != CPU_IDLE ||
8399 !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
8400 goto end;
83cd4fe2
VP
8401
8402 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
8a6d42d1 8403 if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
83cd4fe2
VP
8404 continue;
8405
8406 /*
8407 * If this cpu gets work to do, stop the load balancing
8408 * work being done for other cpus. Next load
8409 * balancing owner will pick it up.
8410 */
1c792db7 8411 if (need_resched())
83cd4fe2 8412 break;
83cd4fe2 8413
5ed4f1d9
VG
8414 rq = cpu_rq(balance_cpu);
8415
ed61bbc6
TC
8416 /*
8417 * If time for next balance is due,
8418 * do the balance.
8419 */
8420 if (time_after_eq(jiffies, rq->next_balance)) {
8421 raw_spin_lock_irq(&rq->lock);
8422 update_rq_clock(rq);
cee1afce 8423 cpu_load_update_idle(rq);
ed61bbc6
TC
8424 raw_spin_unlock_irq(&rq->lock);
8425 rebalance_domains(rq, CPU_IDLE);
8426 }
83cd4fe2 8427
c5afb6a8
VG
8428 if (time_after(next_balance, rq->next_balance)) {
8429 next_balance = rq->next_balance;
8430 update_next_balance = 1;
8431 }
83cd4fe2 8432 }
c5afb6a8
VG
8433
8434 /*
8435 * next_balance will be updated only when there is a need.
8436 * When the CPU is attached to null domain for ex, it will not be
8437 * updated.
8438 */
8439 if (likely(update_next_balance))
8440 nohz.next_balance = next_balance;
1c792db7
SS
8441end:
8442 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
83cd4fe2
VP
8443}
8444
8445/*
0b005cf5 8446 * Current heuristic for kicking the idle load balancer in the presence
1aaf90a4 8447 * of an idle cpu in the system.
0b005cf5 8448 * - This rq has more than one task.
1aaf90a4
VG
8449 * - This rq has at least one CFS task and the capacity of the CPU is
8450 * significantly reduced because of RT tasks or IRQs.
8451 * - At parent of LLC scheduler domain level, this cpu's scheduler group has
8452 * multiple busy cpu.
0b005cf5
SS
8453 * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
8454 * domain span are idle.
83cd4fe2 8455 */
1aaf90a4 8456static inline bool nohz_kick_needed(struct rq *rq)
83cd4fe2
VP
8457{
8458 unsigned long now = jiffies;
0e369d75 8459 struct sched_domain_shared *sds;
0b005cf5 8460 struct sched_domain *sd;
4a725627 8461 int nr_busy, cpu = rq->cpu;
1aaf90a4 8462 bool kick = false;
83cd4fe2 8463
4a725627 8464 if (unlikely(rq->idle_balance))
1aaf90a4 8465 return false;
83cd4fe2 8466
1c792db7
SS
8467 /*
8468 * We may be recently in ticked or tickless idle mode. At the first
8469 * busy tick after returning from idle, we will update the busy stats.
8470 */
69e1e811 8471 set_cpu_sd_state_busy();
c1cc017c 8472 nohz_balance_exit_idle(cpu);
0b005cf5
SS
8473
8474 /*
8475 * None are in tickless mode and hence no need for NOHZ idle load
8476 * balancing.
8477 */
8478 if (likely(!atomic_read(&nohz.nr_cpus)))
1aaf90a4 8479 return false;
1c792db7
SS
8480
8481 if (time_before(now, nohz.next_balance))
1aaf90a4 8482 return false;
83cd4fe2 8483
0b005cf5 8484 if (rq->nr_running >= 2)
1aaf90a4 8485 return true;
83cd4fe2 8486
067491b7 8487 rcu_read_lock();
0e369d75
PZ
8488 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
8489 if (sds) {
8490 /*
8491 * XXX: write a coherent comment on why we do this.
8492 * See also: http://lkml.kernel.org/r/20111202010832.602203411@sbsiddha-desk.sc.intel.com
8493 */
8494 nr_busy = atomic_read(&sds->nr_busy_cpus);
1aaf90a4
VG
8495 if (nr_busy > 1) {
8496 kick = true;
8497 goto unlock;
8498 }
8499
83cd4fe2 8500 }
37dc6b50 8501
1aaf90a4
VG
8502 sd = rcu_dereference(rq->sd);
8503 if (sd) {
8504 if ((rq->cfs.h_nr_running >= 1) &&
8505 check_cpu_capacity(rq, sd)) {
8506 kick = true;
8507 goto unlock;
8508 }
8509 }
37dc6b50 8510
1aaf90a4 8511 sd = rcu_dereference(per_cpu(sd_asym, cpu));
37dc6b50 8512 if (sd && (cpumask_first_and(nohz.idle_cpus_mask,
1aaf90a4
VG
8513 sched_domain_span(sd)) < cpu)) {
8514 kick = true;
8515 goto unlock;
8516 }
067491b7 8517
1aaf90a4 8518unlock:
067491b7 8519 rcu_read_unlock();
1aaf90a4 8520 return kick;
83cd4fe2
VP
8521}
8522#else
208cb16b 8523static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
83cd4fe2
VP
8524#endif
8525
8526/*
8527 * run_rebalance_domains is triggered when needed from the scheduler tick.
8528 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
8529 */
0766f788 8530static __latent_entropy void run_rebalance_domains(struct softirq_action *h)
1e3c88bd 8531{
208cb16b 8532 struct rq *this_rq = this_rq();
6eb57e0d 8533 enum cpu_idle_type idle = this_rq->idle_balance ?
1e3c88bd
PZ
8534 CPU_IDLE : CPU_NOT_IDLE;
8535
1e3c88bd 8536 /*
83cd4fe2 8537 * If this cpu has a pending nohz_balance_kick, then do the
1e3c88bd 8538 * balancing on behalf of the other idle cpus whose ticks are
d4573c3e
PM
8539 * stopped. Do nohz_idle_balance *before* rebalance_domains to
8540 * give the idle cpus a chance to load balance. Else we may
8541 * load balance only within the local sched_domain hierarchy
8542 * and abort nohz_idle_balance altogether if we pull some load.
1e3c88bd 8543 */
208cb16b 8544 nohz_idle_balance(this_rq, idle);
d4573c3e 8545 rebalance_domains(this_rq, idle);
1e3c88bd
PZ
8546}
8547
1e3c88bd
PZ
8548/*
8549 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
1e3c88bd 8550 */
7caff66f 8551void trigger_load_balance(struct rq *rq)
1e3c88bd 8552{
1e3c88bd 8553 /* Don't need to rebalance while attached to NULL domain */
c726099e
DL
8554 if (unlikely(on_null_domain(rq)))
8555 return;
8556
8557 if (time_after_eq(jiffies, rq->next_balance))
1e3c88bd 8558 raise_softirq(SCHED_SOFTIRQ);
3451d024 8559#ifdef CONFIG_NO_HZ_COMMON
c726099e 8560 if (nohz_kick_needed(rq))
0aeeeeba 8561 nohz_balancer_kick();
83cd4fe2 8562#endif
1e3c88bd
PZ
8563}
8564
0bcdcf28
CE
8565static void rq_online_fair(struct rq *rq)
8566{
8567 update_sysctl();
0e59bdae
KT
8568
8569 update_runtime_enabled(rq);
0bcdcf28
CE
8570}
8571
8572static void rq_offline_fair(struct rq *rq)
8573{
8574 update_sysctl();
a4c96ae3
PB
8575
8576 /* Ensure any throttled groups are reachable by pick_next_task */
8577 unthrottle_offline_cfs_rqs(rq);
0bcdcf28
CE
8578}
8579
55e12e5e 8580#endif /* CONFIG_SMP */
e1d1484f 8581
bf0f6f24
IM
8582/*
8583 * scheduler tick hitting a task of our scheduling class:
8584 */
8f4d37ec 8585static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
bf0f6f24
IM
8586{
8587 struct cfs_rq *cfs_rq;
8588 struct sched_entity *se = &curr->se;
8589
8590 for_each_sched_entity(se) {
8591 cfs_rq = cfs_rq_of(se);
8f4d37ec 8592 entity_tick(cfs_rq, se, queued);
bf0f6f24 8593 }
18bf2805 8594
b52da86e 8595 if (static_branch_unlikely(&sched_numa_balancing))
cbee9f88 8596 task_tick_numa(rq, curr);
bf0f6f24
IM
8597}
8598
8599/*
cd29fe6f
PZ
8600 * called on fork with the child task as argument from the parent's context
8601 * - child not yet on the tasklist
8602 * - preemption disabled
bf0f6f24 8603 */
cd29fe6f 8604static void task_fork_fair(struct task_struct *p)
bf0f6f24 8605{
4fc420c9
DN
8606 struct cfs_rq *cfs_rq;
8607 struct sched_entity *se = &p->se, *curr;
cd29fe6f 8608 struct rq *rq = this_rq();
bf0f6f24 8609
e210bffd 8610 raw_spin_lock(&rq->lock);
861d034e
PZ
8611 update_rq_clock(rq);
8612
4fc420c9
DN
8613 cfs_rq = task_cfs_rq(current);
8614 curr = cfs_rq->curr;
e210bffd
PZ
8615 if (curr) {
8616 update_curr(cfs_rq);
b5d9d734 8617 se->vruntime = curr->vruntime;
e210bffd 8618 }
aeb73b04 8619 place_entity(cfs_rq, se, 1);
4d78e7b6 8620
cd29fe6f 8621 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
87fefa38 8622 /*
edcb60a3
IM
8623 * Upon rescheduling, sched_class::put_prev_task() will place
8624 * 'current' within the tree based on its new key value.
8625 */
4d78e7b6 8626 swap(curr->vruntime, se->vruntime);
8875125e 8627 resched_curr(rq);
4d78e7b6 8628 }
bf0f6f24 8629
88ec22d3 8630 se->vruntime -= cfs_rq->min_vruntime;
e210bffd 8631 raw_spin_unlock(&rq->lock);
bf0f6f24
IM
8632}
8633
cb469845
SR
8634/*
8635 * Priority of the task has changed. Check to see if we preempt
8636 * the current task.
8637 */
da7a735e
PZ
8638static void
8639prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
cb469845 8640{
da0c1e65 8641 if (!task_on_rq_queued(p))
da7a735e
PZ
8642 return;
8643
cb469845
SR
8644 /*
8645 * Reschedule if we are currently running on this runqueue and
8646 * our priority decreased, or if we are not currently running on
8647 * this runqueue and our priority is higher than the current's
8648 */
da7a735e 8649 if (rq->curr == p) {
cb469845 8650 if (p->prio > oldprio)
8875125e 8651 resched_curr(rq);
cb469845 8652 } else
15afe09b 8653 check_preempt_curr(rq, p, 0);
cb469845
SR
8654}
8655
daa59407 8656static inline bool vruntime_normalized(struct task_struct *p)
da7a735e
PZ
8657{
8658 struct sched_entity *se = &p->se;
da7a735e
PZ
8659
8660 /*
daa59407
BP
8661 * In both the TASK_ON_RQ_QUEUED and TASK_ON_RQ_MIGRATING cases,
8662 * the dequeue_entity(.flags=0) will already have normalized the
8663 * vruntime.
8664 */
8665 if (p->on_rq)
8666 return true;
8667
8668 /*
8669 * When !on_rq, vruntime of the task has usually NOT been normalized.
8670 * But there are some cases where it has already been normalized:
da7a735e 8671 *
daa59407
BP
8672 * - A forked child which is waiting for being woken up by
8673 * wake_up_new_task().
8674 * - A task which has been woken up by try_to_wake_up() and
8675 * waiting for actually being woken up by sched_ttwu_pending().
da7a735e 8676 */
daa59407
BP
8677 if (!se->sum_exec_runtime || p->state == TASK_WAKING)
8678 return true;
8679
8680 return false;
8681}
8682
8683static void detach_task_cfs_rq(struct task_struct *p)
8684{
8685 struct sched_entity *se = &p->se;
8686 struct cfs_rq *cfs_rq = cfs_rq_of(se);
01011473 8687 u64 now = cfs_rq_clock_task(cfs_rq);
daa59407
BP
8688
8689 if (!vruntime_normalized(p)) {
da7a735e
PZ
8690 /*
8691 * Fix up our vruntime so that the current sleep doesn't
8692 * cause 'unlimited' sleep bonus.
8693 */
8694 place_entity(cfs_rq, se, 0);
8695 se->vruntime -= cfs_rq->min_vruntime;
8696 }
9ee474f5 8697
9d89c257 8698 /* Catch up with the cfs_rq and remove our load when we leave */
7c3edd2c 8699 update_cfs_rq_load_avg(now, cfs_rq, false);
a05e8c51 8700 detach_entity_load_avg(cfs_rq, se);
7c3edd2c 8701 update_tg_load_avg(cfs_rq, false);
da7a735e
PZ
8702}
8703
daa59407 8704static void attach_task_cfs_rq(struct task_struct *p)
cb469845 8705{
f36c019c 8706 struct sched_entity *se = &p->se;
daa59407 8707 struct cfs_rq *cfs_rq = cfs_rq_of(se);
01011473 8708 u64 now = cfs_rq_clock_task(cfs_rq);
7855a35a
BP
8709
8710#ifdef CONFIG_FAIR_GROUP_SCHED
eb7a59b2
M
8711 /*
8712 * Since the real-depth could have been changed (only FAIR
8713 * class maintain depth value), reset depth properly.
8714 */
8715 se->depth = se->parent ? se->parent->depth + 1 : 0;
8716#endif
7855a35a 8717
6efdb105 8718 /* Synchronize task with its cfs_rq */
7c3edd2c 8719 update_cfs_rq_load_avg(now, cfs_rq, false);
daa59407 8720 attach_entity_load_avg(cfs_rq, se);
7c3edd2c 8721 update_tg_load_avg(cfs_rq, false);
daa59407
BP
8722
8723 if (!vruntime_normalized(p))
8724 se->vruntime += cfs_rq->min_vruntime;
8725}
6efdb105 8726
daa59407
BP
8727static void switched_from_fair(struct rq *rq, struct task_struct *p)
8728{
8729 detach_task_cfs_rq(p);
8730}
8731
8732static void switched_to_fair(struct rq *rq, struct task_struct *p)
8733{
8734 attach_task_cfs_rq(p);
7855a35a 8735
daa59407 8736 if (task_on_rq_queued(p)) {
7855a35a 8737 /*
daa59407
BP
8738 * We were most likely switched from sched_rt, so
8739 * kick off the schedule if running, otherwise just see
8740 * if we can still preempt the current task.
7855a35a 8741 */
daa59407
BP
8742 if (rq->curr == p)
8743 resched_curr(rq);
8744 else
8745 check_preempt_curr(rq, p, 0);
7855a35a 8746 }
cb469845
SR
8747}
8748
83b699ed
SV
8749/* Account for a task changing its policy or group.
8750 *
8751 * This routine is mostly called to set cfs_rq->curr field when a task
8752 * migrates between groups/classes.
8753 */
8754static void set_curr_task_fair(struct rq *rq)
8755{
8756 struct sched_entity *se = &rq->curr->se;
8757
ec12cb7f
PT
8758 for_each_sched_entity(se) {
8759 struct cfs_rq *cfs_rq = cfs_rq_of(se);
8760
8761 set_next_entity(cfs_rq, se);
8762 /* ensure bandwidth has been allocated on our new cfs_rq */
8763 account_cfs_rq_runtime(cfs_rq, 0);
8764 }
83b699ed
SV
8765}
8766
029632fb
PZ
8767void init_cfs_rq(struct cfs_rq *cfs_rq)
8768{
8769 cfs_rq->tasks_timeline = RB_ROOT;
029632fb
PZ
8770 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
8771#ifndef CONFIG_64BIT
8772 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
8773#endif
141965c7 8774#ifdef CONFIG_SMP
9d89c257
YD
8775 atomic_long_set(&cfs_rq->removed_load_avg, 0);
8776 atomic_long_set(&cfs_rq->removed_util_avg, 0);
9ee474f5 8777#endif
029632fb
PZ
8778}
8779
810b3817 8780#ifdef CONFIG_FAIR_GROUP_SCHED
ea86cb4b
VG
8781static void task_set_group_fair(struct task_struct *p)
8782{
8783 struct sched_entity *se = &p->se;
8784
8785 set_task_rq(p, task_cpu(p));
8786 se->depth = se->parent ? se->parent->depth + 1 : 0;
8787}
8788
bc54da21 8789static void task_move_group_fair(struct task_struct *p)
810b3817 8790{
daa59407 8791 detach_task_cfs_rq(p);
b2b5ce02 8792 set_task_rq(p, task_cpu(p));
6efdb105
BP
8793
8794#ifdef CONFIG_SMP
8795 /* Tell se's cfs_rq has been changed -- migrated */
8796 p->se.avg.last_update_time = 0;
8797#endif
daa59407 8798 attach_task_cfs_rq(p);
810b3817 8799}
029632fb 8800
ea86cb4b
VG
8801static void task_change_group_fair(struct task_struct *p, int type)
8802{
8803 switch (type) {
8804 case TASK_SET_GROUP:
8805 task_set_group_fair(p);
8806 break;
8807
8808 case TASK_MOVE_GROUP:
8809 task_move_group_fair(p);
8810 break;
8811 }
8812}
8813
029632fb
PZ
8814void free_fair_sched_group(struct task_group *tg)
8815{
8816 int i;
8817
8818 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
8819
8820 for_each_possible_cpu(i) {
8821 if (tg->cfs_rq)
8822 kfree(tg->cfs_rq[i]);
6fe1f348 8823 if (tg->se)
029632fb
PZ
8824 kfree(tg->se[i]);
8825 }
8826
8827 kfree(tg->cfs_rq);
8828 kfree(tg->se);
8829}
8830
8831int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8832{
029632fb 8833 struct sched_entity *se;
b7fa30c9 8834 struct cfs_rq *cfs_rq;
029632fb
PZ
8835 int i;
8836
8837 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
8838 if (!tg->cfs_rq)
8839 goto err;
8840 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
8841 if (!tg->se)
8842 goto err;
8843
8844 tg->shares = NICE_0_LOAD;
8845
8846 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
8847
8848 for_each_possible_cpu(i) {
8849 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
8850 GFP_KERNEL, cpu_to_node(i));
8851 if (!cfs_rq)
8852 goto err;
8853
8854 se = kzalloc_node(sizeof(struct sched_entity),
8855 GFP_KERNEL, cpu_to_node(i));
8856 if (!se)
8857 goto err_free_rq;
8858
8859 init_cfs_rq(cfs_rq);
8860 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
540247fb 8861 init_entity_runnable_average(se);
029632fb
PZ
8862 }
8863
8864 return 1;
8865
8866err_free_rq:
8867 kfree(cfs_rq);
8868err:
8869 return 0;
8870}
8871
8663e24d
PZ
8872void online_fair_sched_group(struct task_group *tg)
8873{
8874 struct sched_entity *se;
8875 struct rq *rq;
8876 int i;
8877
8878 for_each_possible_cpu(i) {
8879 rq = cpu_rq(i);
8880 se = tg->se[i];
8881
8882 raw_spin_lock_irq(&rq->lock);
8883 post_init_entity_util_avg(se);
55e16d30 8884 sync_throttle(tg, i);
8663e24d
PZ
8885 raw_spin_unlock_irq(&rq->lock);
8886 }
8887}
8888
6fe1f348 8889void unregister_fair_sched_group(struct task_group *tg)
029632fb 8890{
029632fb 8891 unsigned long flags;
6fe1f348
PZ
8892 struct rq *rq;
8893 int cpu;
029632fb 8894
6fe1f348
PZ
8895 for_each_possible_cpu(cpu) {
8896 if (tg->se[cpu])
8897 remove_entity_load_avg(tg->se[cpu]);
029632fb 8898
6fe1f348
PZ
8899 /*
8900 * Only empty task groups can be destroyed; so we can speculatively
8901 * check on_list without danger of it being re-added.
8902 */
8903 if (!tg->cfs_rq[cpu]->on_list)
8904 continue;
8905
8906 rq = cpu_rq(cpu);
8907
8908 raw_spin_lock_irqsave(&rq->lock, flags);
8909 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
8910 raw_spin_unlock_irqrestore(&rq->lock, flags);
8911 }
029632fb
PZ
8912}
8913
8914void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
8915 struct sched_entity *se, int cpu,
8916 struct sched_entity *parent)
8917{
8918 struct rq *rq = cpu_rq(cpu);
8919
8920 cfs_rq->tg = tg;
8921 cfs_rq->rq = rq;
029632fb
PZ
8922 init_cfs_rq_runtime(cfs_rq);
8923
8924 tg->cfs_rq[cpu] = cfs_rq;
8925 tg->se[cpu] = se;
8926
8927 /* se could be NULL for root_task_group */
8928 if (!se)
8929 return;
8930
fed14d45 8931 if (!parent) {
029632fb 8932 se->cfs_rq = &rq->cfs;
fed14d45
PZ
8933 se->depth = 0;
8934 } else {
029632fb 8935 se->cfs_rq = parent->my_q;
fed14d45
PZ
8936 se->depth = parent->depth + 1;
8937 }
029632fb
PZ
8938
8939 se->my_q = cfs_rq;
0ac9b1c2
PT
8940 /* guarantee group entities always have weight */
8941 update_load_set(&se->load, NICE_0_LOAD);
029632fb
PZ
8942 se->parent = parent;
8943}
8944
8945static DEFINE_MUTEX(shares_mutex);
8946
8947int sched_group_set_shares(struct task_group *tg, unsigned long shares)
8948{
8949 int i;
8950 unsigned long flags;
8951
8952 /*
8953 * We can't change the weight of the root cgroup.
8954 */
8955 if (!tg->se[0])
8956 return -EINVAL;
8957
8958 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
8959
8960 mutex_lock(&shares_mutex);
8961 if (tg->shares == shares)
8962 goto done;
8963
8964 tg->shares = shares;
8965 for_each_possible_cpu(i) {
8966 struct rq *rq = cpu_rq(i);
8967 struct sched_entity *se;
8968
8969 se = tg->se[i];
8970 /* Propagate contribution to hierarchy */
8971 raw_spin_lock_irqsave(&rq->lock, flags);
71b1da46
FW
8972
8973 /* Possible calls to update_curr() need rq clock */
8974 update_rq_clock(rq);
17bc14b7 8975 for_each_sched_entity(se)
029632fb
PZ
8976 update_cfs_shares(group_cfs_rq(se));
8977 raw_spin_unlock_irqrestore(&rq->lock, flags);
8978 }
8979
8980done:
8981 mutex_unlock(&shares_mutex);
8982 return 0;
8983}
8984#else /* CONFIG_FAIR_GROUP_SCHED */
8985
8986void free_fair_sched_group(struct task_group *tg) { }
8987
8988int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8989{
8990 return 1;
8991}
8992
8663e24d
PZ
8993void online_fair_sched_group(struct task_group *tg) { }
8994
6fe1f348 8995void unregister_fair_sched_group(struct task_group *tg) { }
029632fb
PZ
8996
8997#endif /* CONFIG_FAIR_GROUP_SCHED */
8998
810b3817 8999
6d686f45 9000static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
0d721cea
PW
9001{
9002 struct sched_entity *se = &task->se;
0d721cea
PW
9003 unsigned int rr_interval = 0;
9004
9005 /*
9006 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
9007 * idle runqueue:
9008 */
0d721cea 9009 if (rq->cfs.load.weight)
a59f4e07 9010 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
0d721cea
PW
9011
9012 return rr_interval;
9013}
9014
bf0f6f24
IM
9015/*
9016 * All the scheduling class methods:
9017 */
029632fb 9018const struct sched_class fair_sched_class = {
5522d5d5 9019 .next = &idle_sched_class,
bf0f6f24
IM
9020 .enqueue_task = enqueue_task_fair,
9021 .dequeue_task = dequeue_task_fair,
9022 .yield_task = yield_task_fair,
d95f4122 9023 .yield_to_task = yield_to_task_fair,
bf0f6f24 9024
2e09bf55 9025 .check_preempt_curr = check_preempt_wakeup,
bf0f6f24
IM
9026
9027 .pick_next_task = pick_next_task_fair,
9028 .put_prev_task = put_prev_task_fair,
9029
681f3e68 9030#ifdef CONFIG_SMP
4ce72a2c 9031 .select_task_rq = select_task_rq_fair,
0a74bef8 9032 .migrate_task_rq = migrate_task_rq_fair,
141965c7 9033
0bcdcf28
CE
9034 .rq_online = rq_online_fair,
9035 .rq_offline = rq_offline_fair,
88ec22d3 9036
12695578 9037 .task_dead = task_dead_fair,
c5b28038 9038 .set_cpus_allowed = set_cpus_allowed_common,
681f3e68 9039#endif
bf0f6f24 9040
83b699ed 9041 .set_curr_task = set_curr_task_fair,
bf0f6f24 9042 .task_tick = task_tick_fair,
cd29fe6f 9043 .task_fork = task_fork_fair,
cb469845
SR
9044
9045 .prio_changed = prio_changed_fair,
da7a735e 9046 .switched_from = switched_from_fair,
cb469845 9047 .switched_to = switched_to_fair,
810b3817 9048
0d721cea
PW
9049 .get_rr_interval = get_rr_interval_fair,
9050
6e998916
SG
9051 .update_curr = update_curr_fair,
9052
810b3817 9053#ifdef CONFIG_FAIR_GROUP_SCHED
ea86cb4b 9054 .task_change_group = task_change_group_fair,
810b3817 9055#endif
bf0f6f24
IM
9056};
9057
9058#ifdef CONFIG_SCHED_DEBUG
029632fb 9059void print_cfs_stats(struct seq_file *m, int cpu)
bf0f6f24 9060{
bf0f6f24
IM
9061 struct cfs_rq *cfs_rq;
9062
5973e5b9 9063 rcu_read_lock();
c3b64f1e 9064 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
5cef9eca 9065 print_cfs_rq(m, cpu, cfs_rq);
5973e5b9 9066 rcu_read_unlock();
bf0f6f24 9067}
397f2378
SD
9068
9069#ifdef CONFIG_NUMA_BALANCING
9070void show_numa_stats(struct task_struct *p, struct seq_file *m)
9071{
9072 int node;
9073 unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0;
9074
9075 for_each_online_node(node) {
9076 if (p->numa_faults) {
9077 tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)];
9078 tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)];
9079 }
9080 if (p->numa_group) {
9081 gsf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 0)],
9082 gpf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 1)];
9083 }
9084 print_numa_stats(m, node, tsf, tpf, gsf, gpf);
9085 }
9086}
9087#endif /* CONFIG_NUMA_BALANCING */
9088#endif /* CONFIG_SCHED_DEBUG */
029632fb
PZ
9089
9090__init void init_sched_fair_class(void)
9091{
9092#ifdef CONFIG_SMP
9093 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
9094
3451d024 9095#ifdef CONFIG_NO_HZ_COMMON
554cecaf 9096 nohz.next_balance = jiffies;
029632fb 9097 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
029632fb
PZ
9098#endif
9099#endif /* SMP */
9100
9101}