sched/core: Create task_has_idle_policy() helper
[linux-2.6-block.git] / kernel / sched / fair.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
bf0f6f24
IM
2/*
3 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
4 *
5 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 *
7 * Interactivity improvements by Mike Galbraith
8 * (C) 2007 Mike Galbraith <efault@gmx.de>
9 *
10 * Various enhancements by Dmitry Adamushko.
11 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
12 *
13 * Group scheduling enhancements by Srivatsa Vaddagiri
14 * Copyright IBM Corporation, 2007
15 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
16 *
17 * Scaled math optimizations by Thomas Gleixner
18 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
21805085
PZ
19 *
20 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
90eec103 21 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
bf0f6f24 22 */
325ea10c 23#include "sched.h"
029632fb
PZ
24
25#include <trace/events/sched.h>
26
bf0f6f24 27/*
21805085 28 * Targeted preemption latency for CPU-bound tasks:
bf0f6f24 29 *
21805085 30 * NOTE: this latency value is not the same as the concept of
d274a4ce
IM
31 * 'timeslice length' - timeslices in CFS are of variable length
32 * and have no persistent notion like in traditional, time-slice
33 * based scheduling concepts.
bf0f6f24 34 *
d274a4ce
IM
35 * (to see the precise effective timeslice length of your workload,
36 * run vmstat and monitor the context-switches (cs) field)
2b4d5b25
IM
37 *
38 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
bf0f6f24 39 */
2b4d5b25
IM
40unsigned int sysctl_sched_latency = 6000000ULL;
41unsigned int normalized_sysctl_sched_latency = 6000000ULL;
2bd8e6d4 42
1983a922
CE
43/*
44 * The initial- and re-scaling of tunables is configurable
1983a922
CE
45 *
46 * Options are:
2b4d5b25
IM
47 *
48 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
49 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
50 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
51 *
52 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
1983a922 53 */
2b4d5b25 54enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG;
1983a922 55
2bd8e6d4 56/*
b2be5e96 57 * Minimal preemption granularity for CPU-bound tasks:
2b4d5b25 58 *
864616ee 59 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
2bd8e6d4 60 */
2b4d5b25
IM
61unsigned int sysctl_sched_min_granularity = 750000ULL;
62unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
21805085
PZ
63
64/*
2b4d5b25 65 * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity
b2be5e96 66 */
0bf377bb 67static unsigned int sched_nr_latency = 8;
b2be5e96
PZ
68
69/*
2bba22c5 70 * After fork, child runs first. If set to 0 (default) then
b2be5e96 71 * parent will (try to) run first.
21805085 72 */
2bba22c5 73unsigned int sysctl_sched_child_runs_first __read_mostly;
bf0f6f24 74
bf0f6f24
IM
75/*
76 * SCHED_OTHER wake-up granularity.
bf0f6f24
IM
77 *
78 * This option delays the preemption effects of decoupled workloads
79 * and reduces their over-scheduling. Synchronous workloads will still
80 * have immediate wakeup/sleep latencies.
2b4d5b25
IM
81 *
82 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
bf0f6f24 83 */
2b4d5b25
IM
84unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
85unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
bf0f6f24 86
2b4d5b25 87const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
da84d961 88
afe06efd
TC
89#ifdef CONFIG_SMP
90/*
97fb7a0a 91 * For asym packing, by default the lower numbered CPU has higher priority.
afe06efd
TC
92 */
93int __weak arch_asym_cpu_priority(int cpu)
94{
95 return -cpu;
96}
97#endif
98
ec12cb7f
PT
99#ifdef CONFIG_CFS_BANDWIDTH
100/*
101 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
102 * each time a cfs_rq requests quota.
103 *
104 * Note: in the case that the slice exceeds the runtime remaining (either due
105 * to consumption or the quota being specified to be smaller than the slice)
106 * we will always only issue the remaining available time.
107 *
2b4d5b25
IM
108 * (default: 5 msec, units: microseconds)
109 */
110unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
ec12cb7f
PT
111#endif
112
3273163c
MR
113/*
114 * The margin used when comparing utilization with CPU capacity:
893c5d22 115 * util * margin < capacity * 1024
2b4d5b25
IM
116 *
117 * (default: ~20%)
3273163c 118 */
2b4d5b25 119unsigned int capacity_margin = 1280;
3273163c 120
8527632d
PG
121static inline void update_load_add(struct load_weight *lw, unsigned long inc)
122{
123 lw->weight += inc;
124 lw->inv_weight = 0;
125}
126
127static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
128{
129 lw->weight -= dec;
130 lw->inv_weight = 0;
131}
132
133static inline void update_load_set(struct load_weight *lw, unsigned long w)
134{
135 lw->weight = w;
136 lw->inv_weight = 0;
137}
138
029632fb
PZ
139/*
140 * Increase the granularity value when there are more CPUs,
141 * because with more CPUs the 'effective latency' as visible
142 * to users decreases. But the relationship is not linear,
143 * so pick a second-best guess by going with the log2 of the
144 * number of CPUs.
145 *
146 * This idea comes from the SD scheduler of Con Kolivas:
147 */
58ac93e4 148static unsigned int get_update_sysctl_factor(void)
029632fb 149{
58ac93e4 150 unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8);
029632fb
PZ
151 unsigned int factor;
152
153 switch (sysctl_sched_tunable_scaling) {
154 case SCHED_TUNABLESCALING_NONE:
155 factor = 1;
156 break;
157 case SCHED_TUNABLESCALING_LINEAR:
158 factor = cpus;
159 break;
160 case SCHED_TUNABLESCALING_LOG:
161 default:
162 factor = 1 + ilog2(cpus);
163 break;
164 }
165
166 return factor;
167}
168
169static void update_sysctl(void)
170{
171 unsigned int factor = get_update_sysctl_factor();
172
173#define SET_SYSCTL(name) \
174 (sysctl_##name = (factor) * normalized_sysctl_##name)
175 SET_SYSCTL(sched_min_granularity);
176 SET_SYSCTL(sched_latency);
177 SET_SYSCTL(sched_wakeup_granularity);
178#undef SET_SYSCTL
179}
180
181void sched_init_granularity(void)
182{
183 update_sysctl();
184}
185
9dbdb155 186#define WMULT_CONST (~0U)
029632fb
PZ
187#define WMULT_SHIFT 32
188
9dbdb155
PZ
189static void __update_inv_weight(struct load_weight *lw)
190{
191 unsigned long w;
192
193 if (likely(lw->inv_weight))
194 return;
195
196 w = scale_load_down(lw->weight);
197
198 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
199 lw->inv_weight = 1;
200 else if (unlikely(!w))
201 lw->inv_weight = WMULT_CONST;
202 else
203 lw->inv_weight = WMULT_CONST / w;
204}
029632fb
PZ
205
206/*
9dbdb155
PZ
207 * delta_exec * weight / lw.weight
208 * OR
209 * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
210 *
1c3de5e1 211 * Either weight := NICE_0_LOAD and lw \e sched_prio_to_wmult[], in which case
9dbdb155
PZ
212 * we're guaranteed shift stays positive because inv_weight is guaranteed to
213 * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
214 *
215 * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
216 * weight/lw.weight <= 1, and therefore our shift will also be positive.
029632fb 217 */
9dbdb155 218static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
029632fb 219{
9dbdb155
PZ
220 u64 fact = scale_load_down(weight);
221 int shift = WMULT_SHIFT;
029632fb 222
9dbdb155 223 __update_inv_weight(lw);
029632fb 224
9dbdb155
PZ
225 if (unlikely(fact >> 32)) {
226 while (fact >> 32) {
227 fact >>= 1;
228 shift--;
229 }
029632fb
PZ
230 }
231
9dbdb155
PZ
232 /* hint to use a 32x32->64 mul */
233 fact = (u64)(u32)fact * lw->inv_weight;
029632fb 234
9dbdb155
PZ
235 while (fact >> 32) {
236 fact >>= 1;
237 shift--;
238 }
029632fb 239
9dbdb155 240 return mul_u64_u32_shr(delta_exec, fact, shift);
029632fb
PZ
241}
242
243
244const struct sched_class fair_sched_class;
a4c2f00f 245
bf0f6f24
IM
246/**************************************************************
247 * CFS operations on generic schedulable entities:
248 */
249
62160e3f 250#ifdef CONFIG_FAIR_GROUP_SCHED
bf0f6f24 251
62160e3f 252/* cpu runqueue to which this cfs_rq is attached */
bf0f6f24
IM
253static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
254{
62160e3f 255 return cfs_rq->rq;
bf0f6f24
IM
256}
257
8f48894f
PZ
258static inline struct task_struct *task_of(struct sched_entity *se)
259{
9148a3a1 260 SCHED_WARN_ON(!entity_is_task(se));
8f48894f
PZ
261 return container_of(se, struct task_struct, se);
262}
263
b758149c
PZ
264/* Walk up scheduling entities hierarchy */
265#define for_each_sched_entity(se) \
266 for (; se; se = se->parent)
267
268static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
269{
270 return p->se.cfs_rq;
271}
272
273/* runqueue on which this entity is (to be) queued */
274static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
275{
276 return se->cfs_rq;
277}
278
279/* runqueue "owned" by this group */
280static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
281{
282 return grp->my_q;
283}
284
3d4b47b4
PZ
285static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
286{
287 if (!cfs_rq->on_list) {
9c2791f9
VG
288 struct rq *rq = rq_of(cfs_rq);
289 int cpu = cpu_of(rq);
67e86250
PT
290 /*
291 * Ensure we either appear before our parent (if already
292 * enqueued) or force our parent to appear after us when it is
9c2791f9
VG
293 * enqueued. The fact that we always enqueue bottom-up
294 * reduces this to two cases and a special case for the root
295 * cfs_rq. Furthermore, it also means that we will always reset
296 * tmp_alone_branch either when the branch is connected
297 * to a tree or when we reach the beg of the tree
67e86250
PT
298 */
299 if (cfs_rq->tg->parent &&
9c2791f9
VG
300 cfs_rq->tg->parent->cfs_rq[cpu]->on_list) {
301 /*
302 * If parent is already on the list, we add the child
303 * just before. Thanks to circular linked property of
304 * the list, this means to put the child at the tail
305 * of the list that starts by parent.
306 */
307 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
308 &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list));
309 /*
310 * The branch is now connected to its tree so we can
311 * reset tmp_alone_branch to the beginning of the
312 * list.
313 */
314 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
315 } else if (!cfs_rq->tg->parent) {
316 /*
317 * cfs rq without parent should be put
318 * at the tail of the list.
319 */
67e86250 320 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
9c2791f9
VG
321 &rq->leaf_cfs_rq_list);
322 /*
323 * We have reach the beg of a tree so we can reset
324 * tmp_alone_branch to the beginning of the list.
325 */
326 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
327 } else {
328 /*
329 * The parent has not already been added so we want to
330 * make sure that it will be put after us.
331 * tmp_alone_branch points to the beg of the branch
332 * where we will add parent.
333 */
334 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
335 rq->tmp_alone_branch);
336 /*
337 * update tmp_alone_branch to points to the new beg
338 * of the branch
339 */
340 rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list;
67e86250 341 }
3d4b47b4
PZ
342
343 cfs_rq->on_list = 1;
344 }
345}
346
347static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
348{
349 if (cfs_rq->on_list) {
350 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
351 cfs_rq->on_list = 0;
352 }
353}
354
b758149c 355/* Iterate thr' all leaf cfs_rq's on a runqueue */
a9e7f654
TH
356#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
357 list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \
358 leaf_cfs_rq_list)
b758149c
PZ
359
360/* Do the two (enqueued) entities belong to the same group ? */
fed14d45 361static inline struct cfs_rq *
b758149c
PZ
362is_same_group(struct sched_entity *se, struct sched_entity *pse)
363{
364 if (se->cfs_rq == pse->cfs_rq)
fed14d45 365 return se->cfs_rq;
b758149c 366
fed14d45 367 return NULL;
b758149c
PZ
368}
369
370static inline struct sched_entity *parent_entity(struct sched_entity *se)
371{
372 return se->parent;
373}
374
464b7527
PZ
375static void
376find_matching_se(struct sched_entity **se, struct sched_entity **pse)
377{
378 int se_depth, pse_depth;
379
380 /*
381 * preemption test can be made between sibling entities who are in the
382 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
383 * both tasks until we find their ancestors who are siblings of common
384 * parent.
385 */
386
387 /* First walk up until both entities are at same depth */
fed14d45
PZ
388 se_depth = (*se)->depth;
389 pse_depth = (*pse)->depth;
464b7527
PZ
390
391 while (se_depth > pse_depth) {
392 se_depth--;
393 *se = parent_entity(*se);
394 }
395
396 while (pse_depth > se_depth) {
397 pse_depth--;
398 *pse = parent_entity(*pse);
399 }
400
401 while (!is_same_group(*se, *pse)) {
402 *se = parent_entity(*se);
403 *pse = parent_entity(*pse);
404 }
405}
406
8f48894f
PZ
407#else /* !CONFIG_FAIR_GROUP_SCHED */
408
409static inline struct task_struct *task_of(struct sched_entity *se)
410{
411 return container_of(se, struct task_struct, se);
412}
bf0f6f24 413
62160e3f
IM
414static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
415{
416 return container_of(cfs_rq, struct rq, cfs);
bf0f6f24
IM
417}
418
bf0f6f24 419
b758149c
PZ
420#define for_each_sched_entity(se) \
421 for (; se; se = NULL)
bf0f6f24 422
b758149c 423static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
bf0f6f24 424{
b758149c 425 return &task_rq(p)->cfs;
bf0f6f24
IM
426}
427
b758149c
PZ
428static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
429{
430 struct task_struct *p = task_of(se);
431 struct rq *rq = task_rq(p);
432
433 return &rq->cfs;
434}
435
436/* runqueue "owned" by this group */
437static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
438{
439 return NULL;
440}
441
3d4b47b4
PZ
442static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
443{
444}
445
446static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
447{
448}
449
a9e7f654
TH
450#define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \
451 for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos)
b758149c 452
b758149c
PZ
453static inline struct sched_entity *parent_entity(struct sched_entity *se)
454{
455 return NULL;
456}
457
464b7527
PZ
458static inline void
459find_matching_se(struct sched_entity **se, struct sched_entity **pse)
460{
461}
462
b758149c
PZ
463#endif /* CONFIG_FAIR_GROUP_SCHED */
464
6c16a6dc 465static __always_inline
9dbdb155 466void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
bf0f6f24
IM
467
468/**************************************************************
469 * Scheduling class tree data structure manipulation methods:
470 */
471
1bf08230 472static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
02e0431a 473{
1bf08230 474 s64 delta = (s64)(vruntime - max_vruntime);
368059a9 475 if (delta > 0)
1bf08230 476 max_vruntime = vruntime;
02e0431a 477
1bf08230 478 return max_vruntime;
02e0431a
PZ
479}
480
0702e3eb 481static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
b0ffd246
PZ
482{
483 s64 delta = (s64)(vruntime - min_vruntime);
484 if (delta < 0)
485 min_vruntime = vruntime;
486
487 return min_vruntime;
488}
489
54fdc581
FC
490static inline int entity_before(struct sched_entity *a,
491 struct sched_entity *b)
492{
493 return (s64)(a->vruntime - b->vruntime) < 0;
494}
495
1af5f730
PZ
496static void update_min_vruntime(struct cfs_rq *cfs_rq)
497{
b60205c7 498 struct sched_entity *curr = cfs_rq->curr;
bfb06889 499 struct rb_node *leftmost = rb_first_cached(&cfs_rq->tasks_timeline);
b60205c7 500
1af5f730
PZ
501 u64 vruntime = cfs_rq->min_vruntime;
502
b60205c7
PZ
503 if (curr) {
504 if (curr->on_rq)
505 vruntime = curr->vruntime;
506 else
507 curr = NULL;
508 }
1af5f730 509
bfb06889
DB
510 if (leftmost) { /* non-empty tree */
511 struct sched_entity *se;
512 se = rb_entry(leftmost, struct sched_entity, run_node);
1af5f730 513
b60205c7 514 if (!curr)
1af5f730
PZ
515 vruntime = se->vruntime;
516 else
517 vruntime = min_vruntime(vruntime, se->vruntime);
518 }
519
1bf08230 520 /* ensure we never gain time by being placed backwards. */
1af5f730 521 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
3fe1698b
PZ
522#ifndef CONFIG_64BIT
523 smp_wmb();
524 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
525#endif
1af5f730
PZ
526}
527
bf0f6f24
IM
528/*
529 * Enqueue an entity into the rb-tree:
530 */
0702e3eb 531static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 532{
bfb06889 533 struct rb_node **link = &cfs_rq->tasks_timeline.rb_root.rb_node;
bf0f6f24
IM
534 struct rb_node *parent = NULL;
535 struct sched_entity *entry;
bfb06889 536 bool leftmost = true;
bf0f6f24
IM
537
538 /*
539 * Find the right place in the rbtree:
540 */
541 while (*link) {
542 parent = *link;
543 entry = rb_entry(parent, struct sched_entity, run_node);
544 /*
545 * We dont care about collisions. Nodes with
546 * the same key stay together.
547 */
2bd2d6f2 548 if (entity_before(se, entry)) {
bf0f6f24
IM
549 link = &parent->rb_left;
550 } else {
551 link = &parent->rb_right;
bfb06889 552 leftmost = false;
bf0f6f24
IM
553 }
554 }
555
bf0f6f24 556 rb_link_node(&se->run_node, parent, link);
bfb06889
DB
557 rb_insert_color_cached(&se->run_node,
558 &cfs_rq->tasks_timeline, leftmost);
bf0f6f24
IM
559}
560
0702e3eb 561static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 562{
bfb06889 563 rb_erase_cached(&se->run_node, &cfs_rq->tasks_timeline);
bf0f6f24
IM
564}
565
029632fb 566struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
bf0f6f24 567{
bfb06889 568 struct rb_node *left = rb_first_cached(&cfs_rq->tasks_timeline);
f4b6755f
PZ
569
570 if (!left)
571 return NULL;
572
573 return rb_entry(left, struct sched_entity, run_node);
bf0f6f24
IM
574}
575
ac53db59
RR
576static struct sched_entity *__pick_next_entity(struct sched_entity *se)
577{
578 struct rb_node *next = rb_next(&se->run_node);
579
580 if (!next)
581 return NULL;
582
583 return rb_entry(next, struct sched_entity, run_node);
584}
585
586#ifdef CONFIG_SCHED_DEBUG
029632fb 587struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
aeb73b04 588{
bfb06889 589 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root);
aeb73b04 590
70eee74b
BS
591 if (!last)
592 return NULL;
7eee3e67
IM
593
594 return rb_entry(last, struct sched_entity, run_node);
aeb73b04
PZ
595}
596
bf0f6f24
IM
597/**************************************************************
598 * Scheduling class statistics methods:
599 */
600
acb4a848 601int sched_proc_update_handler(struct ctl_table *table, int write,
8d65af78 602 void __user *buffer, size_t *lenp,
b2be5e96
PZ
603 loff_t *ppos)
604{
8d65af78 605 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
58ac93e4 606 unsigned int factor = get_update_sysctl_factor();
b2be5e96
PZ
607
608 if (ret || !write)
609 return ret;
610
611 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
612 sysctl_sched_min_granularity);
613
acb4a848
CE
614#define WRT_SYSCTL(name) \
615 (normalized_sysctl_##name = sysctl_##name / (factor))
616 WRT_SYSCTL(sched_min_granularity);
617 WRT_SYSCTL(sched_latency);
618 WRT_SYSCTL(sched_wakeup_granularity);
acb4a848
CE
619#undef WRT_SYSCTL
620
b2be5e96
PZ
621 return 0;
622}
623#endif
647e7cac 624
a7be37ac 625/*
f9c0b095 626 * delta /= w
a7be37ac 627 */
9dbdb155 628static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
a7be37ac 629{
f9c0b095 630 if (unlikely(se->load.weight != NICE_0_LOAD))
9dbdb155 631 delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
a7be37ac
PZ
632
633 return delta;
634}
635
647e7cac
IM
636/*
637 * The idea is to set a period in which each task runs once.
638 *
532b1858 639 * When there are too many tasks (sched_nr_latency) we have to stretch
647e7cac
IM
640 * this period because otherwise the slices get too small.
641 *
642 * p = (nr <= nl) ? l : l*nr/nl
643 */
4d78e7b6
PZ
644static u64 __sched_period(unsigned long nr_running)
645{
8e2b0bf3
BF
646 if (unlikely(nr_running > sched_nr_latency))
647 return nr_running * sysctl_sched_min_granularity;
648 else
649 return sysctl_sched_latency;
4d78e7b6
PZ
650}
651
647e7cac
IM
652/*
653 * We calculate the wall-time slice from the period by taking a part
654 * proportional to the weight.
655 *
f9c0b095 656 * s = p*P[w/rw]
647e7cac 657 */
6d0f0ebd 658static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
21805085 659{
0a582440 660 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
f9c0b095 661
0a582440 662 for_each_sched_entity(se) {
6272d68c 663 struct load_weight *load;
3104bf03 664 struct load_weight lw;
6272d68c
LM
665
666 cfs_rq = cfs_rq_of(se);
667 load = &cfs_rq->load;
f9c0b095 668
0a582440 669 if (unlikely(!se->on_rq)) {
3104bf03 670 lw = cfs_rq->load;
0a582440
MG
671
672 update_load_add(&lw, se->load.weight);
673 load = &lw;
674 }
9dbdb155 675 slice = __calc_delta(slice, se->load.weight, load);
0a582440
MG
676 }
677 return slice;
bf0f6f24
IM
678}
679
647e7cac 680/*
660cc00f 681 * We calculate the vruntime slice of a to-be-inserted task.
647e7cac 682 *
f9c0b095 683 * vs = s/w
647e7cac 684 */
f9c0b095 685static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
67e9fb2a 686{
f9c0b095 687 return calc_delta_fair(sched_slice(cfs_rq, se), se);
a7be37ac
PZ
688}
689
a75cdaa9 690#ifdef CONFIG_SMP
c0796298 691#include "pelt.h"
283e2ed3
PZ
692#include "sched-pelt.h"
693
772bd008 694static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu);
fb13c7ee 695static unsigned long task_h_load(struct task_struct *p);
3b1baa64 696static unsigned long capacity_of(int cpu);
fb13c7ee 697
540247fb
YD
698/* Give new sched_entity start runnable values to heavy its load in infant time */
699void init_entity_runnable_average(struct sched_entity *se)
a75cdaa9 700{
540247fb 701 struct sched_avg *sa = &se->avg;
a75cdaa9 702
f207934f
PZ
703 memset(sa, 0, sizeof(*sa));
704
b5a9b340
VG
705 /*
706 * Tasks are intialized with full load to be seen as heavy tasks until
707 * they get a chance to stabilize to their real load level.
708 * Group entities are intialized with zero load to reflect the fact that
709 * nothing has been attached to the task group yet.
710 */
711 if (entity_is_task(se))
1ea6c46a 712 sa->runnable_load_avg = sa->load_avg = scale_load_down(se->load.weight);
1ea6c46a 713
f207934f
PZ
714 se->runnable_weight = se->load.weight;
715
9d89c257 716 /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
a75cdaa9 717}
7ea241af 718
7dc603c9 719static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
df217913 720static void attach_entity_cfs_rq(struct sched_entity *se);
7dc603c9 721
2b8c41da
YD
722/*
723 * With new tasks being created, their initial util_avgs are extrapolated
724 * based on the cfs_rq's current util_avg:
725 *
726 * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight
727 *
728 * However, in many cases, the above util_avg does not give a desired
729 * value. Moreover, the sum of the util_avgs may be divergent, such
730 * as when the series is a harmonic series.
731 *
732 * To solve this problem, we also cap the util_avg of successive tasks to
733 * only 1/2 of the left utilization budget:
734 *
8fe5c5a9 735 * util_avg_cap = (cpu_scale - cfs_rq->avg.util_avg) / 2^n
2b8c41da 736 *
8fe5c5a9 737 * where n denotes the nth task and cpu_scale the CPU capacity.
2b8c41da 738 *
8fe5c5a9
QP
739 * For example, for a CPU with 1024 of capacity, a simplest series from
740 * the beginning would be like:
2b8c41da
YD
741 *
742 * task util_avg: 512, 256, 128, 64, 32, 16, 8, ...
743 * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ...
744 *
745 * Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap)
746 * if util_avg > util_avg_cap.
747 */
748void post_init_entity_util_avg(struct sched_entity *se)
749{
750 struct cfs_rq *cfs_rq = cfs_rq_of(se);
751 struct sched_avg *sa = &se->avg;
8fe5c5a9
QP
752 long cpu_scale = arch_scale_cpu_capacity(NULL, cpu_of(rq_of(cfs_rq)));
753 long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2;
2b8c41da
YD
754
755 if (cap > 0) {
756 if (cfs_rq->avg.util_avg != 0) {
757 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight;
758 sa->util_avg /= (cfs_rq->avg.load_avg + 1);
759
760 if (sa->util_avg > cap)
761 sa->util_avg = cap;
762 } else {
763 sa->util_avg = cap;
764 }
2b8c41da 765 }
7dc603c9
PZ
766
767 if (entity_is_task(se)) {
768 struct task_struct *p = task_of(se);
769 if (p->sched_class != &fair_sched_class) {
770 /*
771 * For !fair tasks do:
772 *
3a123bbb 773 update_cfs_rq_load_avg(now, cfs_rq);
ea14b57e 774 attach_entity_load_avg(cfs_rq, se, 0);
7dc603c9
PZ
775 switched_from_fair(rq, p);
776 *
777 * such that the next switched_to_fair() has the
778 * expected state.
779 */
df217913 780 se->avg.last_update_time = cfs_rq_clock_task(cfs_rq);
7dc603c9
PZ
781 return;
782 }
783 }
784
df217913 785 attach_entity_cfs_rq(se);
2b8c41da
YD
786}
787
7dc603c9 788#else /* !CONFIG_SMP */
540247fb 789void init_entity_runnable_average(struct sched_entity *se)
a75cdaa9
AS
790{
791}
2b8c41da
YD
792void post_init_entity_util_avg(struct sched_entity *se)
793{
794}
3d30544f
PZ
795static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
796{
797}
7dc603c9 798#endif /* CONFIG_SMP */
a75cdaa9 799
bf0f6f24 800/*
9dbdb155 801 * Update the current task's runtime statistics.
bf0f6f24 802 */
b7cc0896 803static void update_curr(struct cfs_rq *cfs_rq)
bf0f6f24 804{
429d43bc 805 struct sched_entity *curr = cfs_rq->curr;
78becc27 806 u64 now = rq_clock_task(rq_of(cfs_rq));
9dbdb155 807 u64 delta_exec;
bf0f6f24
IM
808
809 if (unlikely(!curr))
810 return;
811
9dbdb155
PZ
812 delta_exec = now - curr->exec_start;
813 if (unlikely((s64)delta_exec <= 0))
34f28ecd 814 return;
bf0f6f24 815
8ebc91d9 816 curr->exec_start = now;
d842de87 817
9dbdb155
PZ
818 schedstat_set(curr->statistics.exec_max,
819 max(delta_exec, curr->statistics.exec_max));
820
821 curr->sum_exec_runtime += delta_exec;
ae92882e 822 schedstat_add(cfs_rq->exec_clock, delta_exec);
9dbdb155
PZ
823
824 curr->vruntime += calc_delta_fair(delta_exec, curr);
825 update_min_vruntime(cfs_rq);
826
d842de87
SV
827 if (entity_is_task(curr)) {
828 struct task_struct *curtask = task_of(curr);
829
f977bb49 830 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
d2cc5ed6 831 cgroup_account_cputime(curtask, delta_exec);
f06febc9 832 account_group_exec_runtime(curtask, delta_exec);
d842de87 833 }
ec12cb7f
PT
834
835 account_cfs_rq_runtime(cfs_rq, delta_exec);
bf0f6f24
IM
836}
837
6e998916
SG
838static void update_curr_fair(struct rq *rq)
839{
840 update_curr(cfs_rq_of(&rq->curr->se));
841}
842
bf0f6f24 843static inline void
5870db5b 844update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 845{
4fa8d299
JP
846 u64 wait_start, prev_wait_start;
847
848 if (!schedstat_enabled())
849 return;
850
851 wait_start = rq_clock(rq_of(cfs_rq));
852 prev_wait_start = schedstat_val(se->statistics.wait_start);
3ea94de1
JP
853
854 if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) &&
4fa8d299
JP
855 likely(wait_start > prev_wait_start))
856 wait_start -= prev_wait_start;
3ea94de1 857
2ed41a55 858 __schedstat_set(se->statistics.wait_start, wait_start);
bf0f6f24
IM
859}
860
4fa8d299 861static inline void
3ea94de1
JP
862update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
863{
864 struct task_struct *p;
cb251765
MG
865 u64 delta;
866
4fa8d299
JP
867 if (!schedstat_enabled())
868 return;
869
870 delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start);
3ea94de1
JP
871
872 if (entity_is_task(se)) {
873 p = task_of(se);
874 if (task_on_rq_migrating(p)) {
875 /*
876 * Preserve migrating task's wait time so wait_start
877 * time stamp can be adjusted to accumulate wait time
878 * prior to migration.
879 */
2ed41a55 880 __schedstat_set(se->statistics.wait_start, delta);
3ea94de1
JP
881 return;
882 }
883 trace_sched_stat_wait(p, delta);
884 }
885
2ed41a55 886 __schedstat_set(se->statistics.wait_max,
4fa8d299 887 max(schedstat_val(se->statistics.wait_max), delta));
2ed41a55
PZ
888 __schedstat_inc(se->statistics.wait_count);
889 __schedstat_add(se->statistics.wait_sum, delta);
890 __schedstat_set(se->statistics.wait_start, 0);
3ea94de1 891}
3ea94de1 892
4fa8d299 893static inline void
1a3d027c
JP
894update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
895{
896 struct task_struct *tsk = NULL;
4fa8d299
JP
897 u64 sleep_start, block_start;
898
899 if (!schedstat_enabled())
900 return;
901
902 sleep_start = schedstat_val(se->statistics.sleep_start);
903 block_start = schedstat_val(se->statistics.block_start);
1a3d027c
JP
904
905 if (entity_is_task(se))
906 tsk = task_of(se);
907
4fa8d299
JP
908 if (sleep_start) {
909 u64 delta = rq_clock(rq_of(cfs_rq)) - sleep_start;
1a3d027c
JP
910
911 if ((s64)delta < 0)
912 delta = 0;
913
4fa8d299 914 if (unlikely(delta > schedstat_val(se->statistics.sleep_max)))
2ed41a55 915 __schedstat_set(se->statistics.sleep_max, delta);
1a3d027c 916
2ed41a55
PZ
917 __schedstat_set(se->statistics.sleep_start, 0);
918 __schedstat_add(se->statistics.sum_sleep_runtime, delta);
1a3d027c
JP
919
920 if (tsk) {
921 account_scheduler_latency(tsk, delta >> 10, 1);
922 trace_sched_stat_sleep(tsk, delta);
923 }
924 }
4fa8d299
JP
925 if (block_start) {
926 u64 delta = rq_clock(rq_of(cfs_rq)) - block_start;
1a3d027c
JP
927
928 if ((s64)delta < 0)
929 delta = 0;
930
4fa8d299 931 if (unlikely(delta > schedstat_val(se->statistics.block_max)))
2ed41a55 932 __schedstat_set(se->statistics.block_max, delta);
1a3d027c 933
2ed41a55
PZ
934 __schedstat_set(se->statistics.block_start, 0);
935 __schedstat_add(se->statistics.sum_sleep_runtime, delta);
1a3d027c
JP
936
937 if (tsk) {
938 if (tsk->in_iowait) {
2ed41a55
PZ
939 __schedstat_add(se->statistics.iowait_sum, delta);
940 __schedstat_inc(se->statistics.iowait_count);
1a3d027c
JP
941 trace_sched_stat_iowait(tsk, delta);
942 }
943
944 trace_sched_stat_blocked(tsk, delta);
945
946 /*
947 * Blocking time is in units of nanosecs, so shift by
948 * 20 to get a milliseconds-range estimation of the
949 * amount of time that the task spent sleeping:
950 */
951 if (unlikely(prof_on == SLEEP_PROFILING)) {
952 profile_hits(SLEEP_PROFILING,
953 (void *)get_wchan(tsk),
954 delta >> 20);
955 }
956 account_scheduler_latency(tsk, delta >> 10, 0);
957 }
958 }
3ea94de1 959}
3ea94de1 960
bf0f6f24
IM
961/*
962 * Task is being enqueued - update stats:
963 */
cb251765 964static inline void
1a3d027c 965update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
bf0f6f24 966{
4fa8d299
JP
967 if (!schedstat_enabled())
968 return;
969
bf0f6f24
IM
970 /*
971 * Are we enqueueing a waiting task? (for current tasks
972 * a dequeue/enqueue event is a NOP)
973 */
429d43bc 974 if (se != cfs_rq->curr)
5870db5b 975 update_stats_wait_start(cfs_rq, se);
1a3d027c
JP
976
977 if (flags & ENQUEUE_WAKEUP)
978 update_stats_enqueue_sleeper(cfs_rq, se);
bf0f6f24
IM
979}
980
bf0f6f24 981static inline void
cb251765 982update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
bf0f6f24 983{
4fa8d299
JP
984
985 if (!schedstat_enabled())
986 return;
987
bf0f6f24
IM
988 /*
989 * Mark the end of the wait period if dequeueing a
990 * waiting task:
991 */
429d43bc 992 if (se != cfs_rq->curr)
9ef0a961 993 update_stats_wait_end(cfs_rq, se);
cb251765 994
4fa8d299
JP
995 if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) {
996 struct task_struct *tsk = task_of(se);
cb251765 997
4fa8d299 998 if (tsk->state & TASK_INTERRUPTIBLE)
2ed41a55 999 __schedstat_set(se->statistics.sleep_start,
4fa8d299
JP
1000 rq_clock(rq_of(cfs_rq)));
1001 if (tsk->state & TASK_UNINTERRUPTIBLE)
2ed41a55 1002 __schedstat_set(se->statistics.block_start,
4fa8d299 1003 rq_clock(rq_of(cfs_rq)));
cb251765 1004 }
cb251765
MG
1005}
1006
bf0f6f24
IM
1007/*
1008 * We are picking a new current task - update its stats:
1009 */
1010static inline void
79303e9e 1011update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
1012{
1013 /*
1014 * We are starting a new run period:
1015 */
78becc27 1016 se->exec_start = rq_clock_task(rq_of(cfs_rq));
bf0f6f24
IM
1017}
1018
bf0f6f24
IM
1019/**************************************************
1020 * Scheduling class queueing methods:
1021 */
1022
cbee9f88
PZ
1023#ifdef CONFIG_NUMA_BALANCING
1024/*
598f0ec0
MG
1025 * Approximate time to scan a full NUMA task in ms. The task scan period is
1026 * calculated based on the tasks virtual memory size and
1027 * numa_balancing_scan_size.
cbee9f88 1028 */
598f0ec0
MG
1029unsigned int sysctl_numa_balancing_scan_period_min = 1000;
1030unsigned int sysctl_numa_balancing_scan_period_max = 60000;
6e5fb223
PZ
1031
1032/* Portion of address space to scan in MB */
1033unsigned int sysctl_numa_balancing_scan_size = 256;
cbee9f88 1034
4b96a29b
PZ
1035/* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
1036unsigned int sysctl_numa_balancing_scan_delay = 1000;
1037
b5dd77c8
RR
1038struct numa_group {
1039 atomic_t refcount;
1040
1041 spinlock_t lock; /* nr_tasks, tasks */
1042 int nr_tasks;
1043 pid_t gid;
1044 int active_nodes;
1045
1046 struct rcu_head rcu;
1047 unsigned long total_faults;
1048 unsigned long max_faults_cpu;
1049 /*
1050 * Faults_cpu is used to decide whether memory should move
1051 * towards the CPU. As a consequence, these stats are weighted
1052 * more by CPU use than by memory faults.
1053 */
1054 unsigned long *faults_cpu;
1055 unsigned long faults[0];
1056};
1057
1058static inline unsigned long group_faults_priv(struct numa_group *ng);
1059static inline unsigned long group_faults_shared(struct numa_group *ng);
1060
598f0ec0
MG
1061static unsigned int task_nr_scan_windows(struct task_struct *p)
1062{
1063 unsigned long rss = 0;
1064 unsigned long nr_scan_pages;
1065
1066 /*
1067 * Calculations based on RSS as non-present and empty pages are skipped
1068 * by the PTE scanner and NUMA hinting faults should be trapped based
1069 * on resident pages
1070 */
1071 nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
1072 rss = get_mm_rss(p->mm);
1073 if (!rss)
1074 rss = nr_scan_pages;
1075
1076 rss = round_up(rss, nr_scan_pages);
1077 return rss / nr_scan_pages;
1078}
1079
1080/* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
1081#define MAX_SCAN_WINDOW 2560
1082
1083static unsigned int task_scan_min(struct task_struct *p)
1084{
316c1608 1085 unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size);
598f0ec0
MG
1086 unsigned int scan, floor;
1087 unsigned int windows = 1;
1088
64192658
KT
1089 if (scan_size < MAX_SCAN_WINDOW)
1090 windows = MAX_SCAN_WINDOW / scan_size;
598f0ec0
MG
1091 floor = 1000 / windows;
1092
1093 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
1094 return max_t(unsigned int, floor, scan);
1095}
1096
b5dd77c8
RR
1097static unsigned int task_scan_start(struct task_struct *p)
1098{
1099 unsigned long smin = task_scan_min(p);
1100 unsigned long period = smin;
1101
1102 /* Scale the maximum scan period with the amount of shared memory. */
1103 if (p->numa_group) {
1104 struct numa_group *ng = p->numa_group;
1105 unsigned long shared = group_faults_shared(ng);
1106 unsigned long private = group_faults_priv(ng);
1107
1108 period *= atomic_read(&ng->refcount);
1109 period *= shared + 1;
1110 period /= private + shared + 1;
1111 }
1112
1113 return max(smin, period);
1114}
1115
598f0ec0
MG
1116static unsigned int task_scan_max(struct task_struct *p)
1117{
b5dd77c8
RR
1118 unsigned long smin = task_scan_min(p);
1119 unsigned long smax;
598f0ec0
MG
1120
1121 /* Watch for min being lower than max due to floor calculations */
1122 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
b5dd77c8
RR
1123
1124 /* Scale the maximum scan period with the amount of shared memory. */
1125 if (p->numa_group) {
1126 struct numa_group *ng = p->numa_group;
1127 unsigned long shared = group_faults_shared(ng);
1128 unsigned long private = group_faults_priv(ng);
1129 unsigned long period = smax;
1130
1131 period *= atomic_read(&ng->refcount);
1132 period *= shared + 1;
1133 period /= private + shared + 1;
1134
1135 smax = max(smax, period);
1136 }
1137
598f0ec0
MG
1138 return max(smin, smax);
1139}
1140
13784475
MG
1141void init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
1142{
1143 int mm_users = 0;
1144 struct mm_struct *mm = p->mm;
1145
1146 if (mm) {
1147 mm_users = atomic_read(&mm->mm_users);
1148 if (mm_users == 1) {
1149 mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
1150 mm->numa_scan_seq = 0;
1151 }
1152 }
1153 p->node_stamp = 0;
1154 p->numa_scan_seq = mm ? mm->numa_scan_seq : 0;
1155 p->numa_scan_period = sysctl_numa_balancing_scan_delay;
1156 p->numa_work.next = &p->numa_work;
1157 p->numa_faults = NULL;
1158 p->numa_group = NULL;
1159 p->last_task_numa_placement = 0;
1160 p->last_sum_exec_runtime = 0;
1161
1162 /* New address space, reset the preferred nid */
1163 if (!(clone_flags & CLONE_VM)) {
1164 p->numa_preferred_nid = -1;
1165 return;
1166 }
1167
1168 /*
1169 * New thread, keep existing numa_preferred_nid which should be copied
1170 * already by arch_dup_task_struct but stagger when scans start.
1171 */
1172 if (mm) {
1173 unsigned int delay;
1174
1175 delay = min_t(unsigned int, task_scan_max(current),
1176 current->numa_scan_period * mm_users * NSEC_PER_MSEC);
1177 delay += 2 * TICK_NSEC;
1178 p->node_stamp = delay;
1179 }
1180}
1181
0ec8aa00
PZ
1182static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
1183{
1184 rq->nr_numa_running += (p->numa_preferred_nid != -1);
1185 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
1186}
1187
1188static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
1189{
1190 rq->nr_numa_running -= (p->numa_preferred_nid != -1);
1191 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
1192}
1193
be1e4e76
RR
1194/* Shared or private faults. */
1195#define NR_NUMA_HINT_FAULT_TYPES 2
1196
1197/* Memory and CPU locality */
1198#define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2)
1199
1200/* Averaged statistics, and temporary buffers. */
1201#define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2)
1202
e29cf08b
MG
1203pid_t task_numa_group_id(struct task_struct *p)
1204{
1205 return p->numa_group ? p->numa_group->gid : 0;
1206}
1207
44dba3d5 1208/*
97fb7a0a 1209 * The averaged statistics, shared & private, memory & CPU,
44dba3d5
IM
1210 * occupy the first half of the array. The second half of the
1211 * array is for current counters, which are averaged into the
1212 * first set by task_numa_placement.
1213 */
1214static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv)
ac8e895b 1215{
44dba3d5 1216 return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv;
ac8e895b
MG
1217}
1218
1219static inline unsigned long task_faults(struct task_struct *p, int nid)
1220{
44dba3d5 1221 if (!p->numa_faults)
ac8e895b
MG
1222 return 0;
1223
44dba3d5
IM
1224 return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] +
1225 p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)];
ac8e895b
MG
1226}
1227
83e1d2cd
MG
1228static inline unsigned long group_faults(struct task_struct *p, int nid)
1229{
1230 if (!p->numa_group)
1231 return 0;
1232
44dba3d5
IM
1233 return p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 0)] +
1234 p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 1)];
83e1d2cd
MG
1235}
1236
20e07dea
RR
1237static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
1238{
44dba3d5
IM
1239 return group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 0)] +
1240 group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 1)];
20e07dea
RR
1241}
1242
b5dd77c8
RR
1243static inline unsigned long group_faults_priv(struct numa_group *ng)
1244{
1245 unsigned long faults = 0;
1246 int node;
1247
1248 for_each_online_node(node) {
1249 faults += ng->faults[task_faults_idx(NUMA_MEM, node, 1)];
1250 }
1251
1252 return faults;
1253}
1254
1255static inline unsigned long group_faults_shared(struct numa_group *ng)
1256{
1257 unsigned long faults = 0;
1258 int node;
1259
1260 for_each_online_node(node) {
1261 faults += ng->faults[task_faults_idx(NUMA_MEM, node, 0)];
1262 }
1263
1264 return faults;
1265}
1266
4142c3eb
RR
1267/*
1268 * A node triggering more than 1/3 as many NUMA faults as the maximum is
1269 * considered part of a numa group's pseudo-interleaving set. Migrations
1270 * between these nodes are slowed down, to allow things to settle down.
1271 */
1272#define ACTIVE_NODE_FRACTION 3
1273
1274static bool numa_is_active_node(int nid, struct numa_group *ng)
1275{
1276 return group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION > ng->max_faults_cpu;
1277}
1278
6c6b1193
RR
1279/* Handle placement on systems where not all nodes are directly connected. */
1280static unsigned long score_nearby_nodes(struct task_struct *p, int nid,
1281 int maxdist, bool task)
1282{
1283 unsigned long score = 0;
1284 int node;
1285
1286 /*
1287 * All nodes are directly connected, and the same distance
1288 * from each other. No need for fancy placement algorithms.
1289 */
1290 if (sched_numa_topology_type == NUMA_DIRECT)
1291 return 0;
1292
1293 /*
1294 * This code is called for each node, introducing N^2 complexity,
1295 * which should be ok given the number of nodes rarely exceeds 8.
1296 */
1297 for_each_online_node(node) {
1298 unsigned long faults;
1299 int dist = node_distance(nid, node);
1300
1301 /*
1302 * The furthest away nodes in the system are not interesting
1303 * for placement; nid was already counted.
1304 */
1305 if (dist == sched_max_numa_distance || node == nid)
1306 continue;
1307
1308 /*
1309 * On systems with a backplane NUMA topology, compare groups
1310 * of nodes, and move tasks towards the group with the most
1311 * memory accesses. When comparing two nodes at distance
1312 * "hoplimit", only nodes closer by than "hoplimit" are part
1313 * of each group. Skip other nodes.
1314 */
1315 if (sched_numa_topology_type == NUMA_BACKPLANE &&
0ee7e74d 1316 dist >= maxdist)
6c6b1193
RR
1317 continue;
1318
1319 /* Add up the faults from nearby nodes. */
1320 if (task)
1321 faults = task_faults(p, node);
1322 else
1323 faults = group_faults(p, node);
1324
1325 /*
1326 * On systems with a glueless mesh NUMA topology, there are
1327 * no fixed "groups of nodes". Instead, nodes that are not
1328 * directly connected bounce traffic through intermediate
1329 * nodes; a numa_group can occupy any set of nodes.
1330 * The further away a node is, the less the faults count.
1331 * This seems to result in good task placement.
1332 */
1333 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
1334 faults *= (sched_max_numa_distance - dist);
1335 faults /= (sched_max_numa_distance - LOCAL_DISTANCE);
1336 }
1337
1338 score += faults;
1339 }
1340
1341 return score;
1342}
1343
83e1d2cd
MG
1344/*
1345 * These return the fraction of accesses done by a particular task, or
1346 * task group, on a particular numa node. The group weight is given a
1347 * larger multiplier, in order to group tasks together that are almost
1348 * evenly spread out between numa nodes.
1349 */
7bd95320
RR
1350static inline unsigned long task_weight(struct task_struct *p, int nid,
1351 int dist)
83e1d2cd 1352{
7bd95320 1353 unsigned long faults, total_faults;
83e1d2cd 1354
44dba3d5 1355 if (!p->numa_faults)
83e1d2cd
MG
1356 return 0;
1357
1358 total_faults = p->total_numa_faults;
1359
1360 if (!total_faults)
1361 return 0;
1362
7bd95320 1363 faults = task_faults(p, nid);
6c6b1193
RR
1364 faults += score_nearby_nodes(p, nid, dist, true);
1365
7bd95320 1366 return 1000 * faults / total_faults;
83e1d2cd
MG
1367}
1368
7bd95320
RR
1369static inline unsigned long group_weight(struct task_struct *p, int nid,
1370 int dist)
83e1d2cd 1371{
7bd95320
RR
1372 unsigned long faults, total_faults;
1373
1374 if (!p->numa_group)
1375 return 0;
1376
1377 total_faults = p->numa_group->total_faults;
1378
1379 if (!total_faults)
83e1d2cd
MG
1380 return 0;
1381
7bd95320 1382 faults = group_faults(p, nid);
6c6b1193
RR
1383 faults += score_nearby_nodes(p, nid, dist, false);
1384
7bd95320 1385 return 1000 * faults / total_faults;
83e1d2cd
MG
1386}
1387
10f39042
RR
1388bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
1389 int src_nid, int dst_cpu)
1390{
1391 struct numa_group *ng = p->numa_group;
1392 int dst_nid = cpu_to_node(dst_cpu);
1393 int last_cpupid, this_cpupid;
1394
1395 this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
37355bdc
MG
1396 last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
1397
1398 /*
1399 * Allow first faults or private faults to migrate immediately early in
1400 * the lifetime of a task. The magic number 4 is based on waiting for
1401 * two full passes of the "multi-stage node selection" test that is
1402 * executed below.
1403 */
1404 if ((p->numa_preferred_nid == -1 || p->numa_scan_seq <= 4) &&
1405 (cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid)))
1406 return true;
10f39042
RR
1407
1408 /*
1409 * Multi-stage node selection is used in conjunction with a periodic
1410 * migration fault to build a temporal task<->page relation. By using
1411 * a two-stage filter we remove short/unlikely relations.
1412 *
1413 * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate
1414 * a task's usage of a particular page (n_p) per total usage of this
1415 * page (n_t) (in a given time-span) to a probability.
1416 *
1417 * Our periodic faults will sample this probability and getting the
1418 * same result twice in a row, given these samples are fully
1419 * independent, is then given by P(n)^2, provided our sample period
1420 * is sufficiently short compared to the usage pattern.
1421 *
1422 * This quadric squishes small probabilities, making it less likely we
1423 * act on an unlikely task<->page relation.
1424 */
10f39042
RR
1425 if (!cpupid_pid_unset(last_cpupid) &&
1426 cpupid_to_nid(last_cpupid) != dst_nid)
1427 return false;
1428
1429 /* Always allow migrate on private faults */
1430 if (cpupid_match_pid(p, last_cpupid))
1431 return true;
1432
1433 /* A shared fault, but p->numa_group has not been set up yet. */
1434 if (!ng)
1435 return true;
1436
1437 /*
4142c3eb
RR
1438 * Destination node is much more heavily used than the source
1439 * node? Allow migration.
10f39042 1440 */
4142c3eb
RR
1441 if (group_faults_cpu(ng, dst_nid) > group_faults_cpu(ng, src_nid) *
1442 ACTIVE_NODE_FRACTION)
10f39042
RR
1443 return true;
1444
1445 /*
4142c3eb
RR
1446 * Distribute memory according to CPU & memory use on each node,
1447 * with 3/4 hysteresis to avoid unnecessary memory migrations:
1448 *
1449 * faults_cpu(dst) 3 faults_cpu(src)
1450 * --------------- * - > ---------------
1451 * faults_mem(dst) 4 faults_mem(src)
10f39042 1452 */
4142c3eb
RR
1453 return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 >
1454 group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4;
10f39042
RR
1455}
1456
c7132dd6 1457static unsigned long weighted_cpuload(struct rq *rq);
58d081b5
MG
1458static unsigned long source_load(int cpu, int type);
1459static unsigned long target_load(int cpu, int type);
58d081b5 1460
fb13c7ee 1461/* Cached statistics for all CPUs within a node */
58d081b5
MG
1462struct numa_stats {
1463 unsigned long load;
fb13c7ee
MG
1464
1465 /* Total compute capacity of CPUs on a node */
5ef20ca1 1466 unsigned long compute_capacity;
58d081b5 1467};
e6628d5b 1468
fb13c7ee
MG
1469/*
1470 * XXX borrowed from update_sg_lb_stats
1471 */
1472static void update_numa_stats(struct numa_stats *ns, int nid)
1473{
d90707eb 1474 int cpu;
fb13c7ee
MG
1475
1476 memset(ns, 0, sizeof(*ns));
1477 for_each_cpu(cpu, cpumask_of_node(nid)) {
1478 struct rq *rq = cpu_rq(cpu);
1479
c7132dd6 1480 ns->load += weighted_cpuload(rq);
ced549fa 1481 ns->compute_capacity += capacity_of(cpu);
fb13c7ee
MG
1482 }
1483
fb13c7ee
MG
1484}
1485
58d081b5
MG
1486struct task_numa_env {
1487 struct task_struct *p;
e6628d5b 1488
58d081b5
MG
1489 int src_cpu, src_nid;
1490 int dst_cpu, dst_nid;
e6628d5b 1491
58d081b5 1492 struct numa_stats src_stats, dst_stats;
e6628d5b 1493
40ea2b42 1494 int imbalance_pct;
7bd95320 1495 int dist;
fb13c7ee
MG
1496
1497 struct task_struct *best_task;
1498 long best_imp;
58d081b5
MG
1499 int best_cpu;
1500};
1501
fb13c7ee
MG
1502static void task_numa_assign(struct task_numa_env *env,
1503 struct task_struct *p, long imp)
1504{
a4739eca
SD
1505 struct rq *rq = cpu_rq(env->dst_cpu);
1506
1507 /* Bail out if run-queue part of active NUMA balance. */
1508 if (xchg(&rq->numa_migrate_on, 1))
1509 return;
1510
1511 /*
1512 * Clear previous best_cpu/rq numa-migrate flag, since task now
1513 * found a better CPU to move/swap.
1514 */
1515 if (env->best_cpu != -1) {
1516 rq = cpu_rq(env->best_cpu);
1517 WRITE_ONCE(rq->numa_migrate_on, 0);
1518 }
1519
fb13c7ee
MG
1520 if (env->best_task)
1521 put_task_struct(env->best_task);
bac78573
ON
1522 if (p)
1523 get_task_struct(p);
fb13c7ee
MG
1524
1525 env->best_task = p;
1526 env->best_imp = imp;
1527 env->best_cpu = env->dst_cpu;
1528}
1529
28a21745 1530static bool load_too_imbalanced(long src_load, long dst_load,
e63da036
RR
1531 struct task_numa_env *env)
1532{
e4991b24
RR
1533 long imb, old_imb;
1534 long orig_src_load, orig_dst_load;
28a21745
RR
1535 long src_capacity, dst_capacity;
1536
1537 /*
1538 * The load is corrected for the CPU capacity available on each node.
1539 *
1540 * src_load dst_load
1541 * ------------ vs ---------
1542 * src_capacity dst_capacity
1543 */
1544 src_capacity = env->src_stats.compute_capacity;
1545 dst_capacity = env->dst_stats.compute_capacity;
e63da036 1546
5f95ba7a 1547 imb = abs(dst_load * src_capacity - src_load * dst_capacity);
e63da036 1548
28a21745 1549 orig_src_load = env->src_stats.load;
e4991b24 1550 orig_dst_load = env->dst_stats.load;
28a21745 1551
5f95ba7a 1552 old_imb = abs(orig_dst_load * src_capacity - orig_src_load * dst_capacity);
e4991b24
RR
1553
1554 /* Would this change make things worse? */
1555 return (imb > old_imb);
e63da036
RR
1556}
1557
6fd98e77
SD
1558/*
1559 * Maximum NUMA importance can be 1998 (2*999);
1560 * SMALLIMP @ 30 would be close to 1998/64.
1561 * Used to deter task migration.
1562 */
1563#define SMALLIMP 30
1564
fb13c7ee
MG
1565/*
1566 * This checks if the overall compute and NUMA accesses of the system would
1567 * be improved if the source tasks was migrated to the target dst_cpu taking
1568 * into account that it might be best if task running on the dst_cpu should
1569 * be exchanged with the source task
1570 */
887c290e 1571static void task_numa_compare(struct task_numa_env *env,
305c1fac 1572 long taskimp, long groupimp, bool maymove)
fb13c7ee 1573{
fb13c7ee
MG
1574 struct rq *dst_rq = cpu_rq(env->dst_cpu);
1575 struct task_struct *cur;
28a21745 1576 long src_load, dst_load;
fb13c7ee 1577 long load;
1c5d3eb3 1578 long imp = env->p->numa_group ? groupimp : taskimp;
0132c3e1 1579 long moveimp = imp;
7bd95320 1580 int dist = env->dist;
fb13c7ee 1581
a4739eca
SD
1582 if (READ_ONCE(dst_rq->numa_migrate_on))
1583 return;
1584
fb13c7ee 1585 rcu_read_lock();
bac78573
ON
1586 cur = task_rcu_dereference(&dst_rq->curr);
1587 if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur)))
fb13c7ee
MG
1588 cur = NULL;
1589
7af68335
PZ
1590 /*
1591 * Because we have preemption enabled we can get migrated around and
1592 * end try selecting ourselves (current == env->p) as a swap candidate.
1593 */
1594 if (cur == env->p)
1595 goto unlock;
1596
305c1fac 1597 if (!cur) {
6fd98e77 1598 if (maymove && moveimp >= env->best_imp)
305c1fac
SD
1599 goto assign;
1600 else
1601 goto unlock;
1602 }
1603
fb13c7ee
MG
1604 /*
1605 * "imp" is the fault differential for the source task between the
1606 * source and destination node. Calculate the total differential for
1607 * the source task and potential destination task. The more negative
305c1fac 1608 * the value is, the more remote accesses that would be expected to
fb13c7ee
MG
1609 * be incurred if the tasks were swapped.
1610 */
305c1fac
SD
1611 /* Skip this swap candidate if cannot move to the source cpu */
1612 if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed))
1613 goto unlock;
fb13c7ee 1614
305c1fac
SD
1615 /*
1616 * If dst and source tasks are in the same NUMA group, or not
1617 * in any group then look only at task weights.
1618 */
1619 if (cur->numa_group == env->p->numa_group) {
1620 imp = taskimp + task_weight(cur, env->src_nid, dist) -
1621 task_weight(cur, env->dst_nid, dist);
887c290e 1622 /*
305c1fac
SD
1623 * Add some hysteresis to prevent swapping the
1624 * tasks within a group over tiny differences.
887c290e 1625 */
305c1fac
SD
1626 if (cur->numa_group)
1627 imp -= imp / 16;
1628 } else {
1629 /*
1630 * Compare the group weights. If a task is all by itself
1631 * (not part of a group), use the task weight instead.
1632 */
1633 if (cur->numa_group && env->p->numa_group)
1634 imp += group_weight(cur, env->src_nid, dist) -
1635 group_weight(cur, env->dst_nid, dist);
1636 else
1637 imp += task_weight(cur, env->src_nid, dist) -
1638 task_weight(cur, env->dst_nid, dist);
fb13c7ee
MG
1639 }
1640
305c1fac 1641 if (maymove && moveimp > imp && moveimp > env->best_imp) {
6fd98e77 1642 imp = moveimp;
305c1fac 1643 cur = NULL;
fb13c7ee 1644 goto assign;
305c1fac 1645 }
fb13c7ee 1646
6fd98e77
SD
1647 /*
1648 * If the NUMA importance is less than SMALLIMP,
1649 * task migration might only result in ping pong
1650 * of tasks and also hurt performance due to cache
1651 * misses.
1652 */
1653 if (imp < SMALLIMP || imp <= env->best_imp + SMALLIMP / 2)
1654 goto unlock;
1655
fb13c7ee
MG
1656 /*
1657 * In the overloaded case, try and keep the load balanced.
1658 */
305c1fac
SD
1659 load = task_h_load(env->p) - task_h_load(cur);
1660 if (!load)
1661 goto assign;
1662
e720fff6
PZ
1663 dst_load = env->dst_stats.load + load;
1664 src_load = env->src_stats.load - load;
fb13c7ee 1665
28a21745 1666 if (load_too_imbalanced(src_load, dst_load, env))
fb13c7ee
MG
1667 goto unlock;
1668
305c1fac 1669assign:
ba7e5a27
RR
1670 /*
1671 * One idle CPU per node is evaluated for a task numa move.
1672 * Call select_idle_sibling to maybe find a better one.
1673 */
10e2f1ac
PZ
1674 if (!cur) {
1675 /*
97fb7a0a 1676 * select_idle_siblings() uses an per-CPU cpumask that
10e2f1ac
PZ
1677 * can be used from IRQ context.
1678 */
1679 local_irq_disable();
772bd008
MR
1680 env->dst_cpu = select_idle_sibling(env->p, env->src_cpu,
1681 env->dst_cpu);
10e2f1ac
PZ
1682 local_irq_enable();
1683 }
ba7e5a27 1684
fb13c7ee
MG
1685 task_numa_assign(env, cur, imp);
1686unlock:
1687 rcu_read_unlock();
1688}
1689
887c290e
RR
1690static void task_numa_find_cpu(struct task_numa_env *env,
1691 long taskimp, long groupimp)
2c8a50aa 1692{
305c1fac
SD
1693 long src_load, dst_load, load;
1694 bool maymove = false;
2c8a50aa
MG
1695 int cpu;
1696
305c1fac
SD
1697 load = task_h_load(env->p);
1698 dst_load = env->dst_stats.load + load;
1699 src_load = env->src_stats.load - load;
1700
1701 /*
1702 * If the improvement from just moving env->p direction is better
1703 * than swapping tasks around, check if a move is possible.
1704 */
1705 maymove = !load_too_imbalanced(src_load, dst_load, env);
1706
2c8a50aa
MG
1707 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
1708 /* Skip this CPU if the source task cannot migrate */
0c98d344 1709 if (!cpumask_test_cpu(cpu, &env->p->cpus_allowed))
2c8a50aa
MG
1710 continue;
1711
1712 env->dst_cpu = cpu;
305c1fac 1713 task_numa_compare(env, taskimp, groupimp, maymove);
2c8a50aa
MG
1714 }
1715}
1716
58d081b5
MG
1717static int task_numa_migrate(struct task_struct *p)
1718{
58d081b5
MG
1719 struct task_numa_env env = {
1720 .p = p,
fb13c7ee 1721
58d081b5 1722 .src_cpu = task_cpu(p),
b32e86b4 1723 .src_nid = task_node(p),
fb13c7ee
MG
1724
1725 .imbalance_pct = 112,
1726
1727 .best_task = NULL,
1728 .best_imp = 0,
4142c3eb 1729 .best_cpu = -1,
58d081b5
MG
1730 };
1731 struct sched_domain *sd;
a4739eca 1732 struct rq *best_rq;
887c290e 1733 unsigned long taskweight, groupweight;
7bd95320 1734 int nid, ret, dist;
887c290e 1735 long taskimp, groupimp;
e6628d5b 1736
58d081b5 1737 /*
fb13c7ee
MG
1738 * Pick the lowest SD_NUMA domain, as that would have the smallest
1739 * imbalance and would be the first to start moving tasks about.
1740 *
1741 * And we want to avoid any moving of tasks about, as that would create
1742 * random movement of tasks -- counter the numa conditions we're trying
1743 * to satisfy here.
58d081b5
MG
1744 */
1745 rcu_read_lock();
fb13c7ee 1746 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
46a73e8a
RR
1747 if (sd)
1748 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
e6628d5b
MG
1749 rcu_read_unlock();
1750
46a73e8a
RR
1751 /*
1752 * Cpusets can break the scheduler domain tree into smaller
1753 * balance domains, some of which do not cross NUMA boundaries.
1754 * Tasks that are "trapped" in such domains cannot be migrated
1755 * elsewhere, so there is no point in (re)trying.
1756 */
1757 if (unlikely(!sd)) {
8cd45eee 1758 sched_setnuma(p, task_node(p));
46a73e8a
RR
1759 return -EINVAL;
1760 }
1761
2c8a50aa 1762 env.dst_nid = p->numa_preferred_nid;
7bd95320
RR
1763 dist = env.dist = node_distance(env.src_nid, env.dst_nid);
1764 taskweight = task_weight(p, env.src_nid, dist);
1765 groupweight = group_weight(p, env.src_nid, dist);
1766 update_numa_stats(&env.src_stats, env.src_nid);
1767 taskimp = task_weight(p, env.dst_nid, dist) - taskweight;
1768 groupimp = group_weight(p, env.dst_nid, dist) - groupweight;
2c8a50aa 1769 update_numa_stats(&env.dst_stats, env.dst_nid);
58d081b5 1770
a43455a1 1771 /* Try to find a spot on the preferred nid. */
2d4056fa 1772 task_numa_find_cpu(&env, taskimp, groupimp);
e1dda8a7 1773
9de05d48
RR
1774 /*
1775 * Look at other nodes in these cases:
1776 * - there is no space available on the preferred_nid
1777 * - the task is part of a numa_group that is interleaved across
1778 * multiple NUMA nodes; in order to better consolidate the group,
1779 * we need to check other locations.
1780 */
4142c3eb 1781 if (env.best_cpu == -1 || (p->numa_group && p->numa_group->active_nodes > 1)) {
2c8a50aa
MG
1782 for_each_online_node(nid) {
1783 if (nid == env.src_nid || nid == p->numa_preferred_nid)
1784 continue;
58d081b5 1785
7bd95320 1786 dist = node_distance(env.src_nid, env.dst_nid);
6c6b1193
RR
1787 if (sched_numa_topology_type == NUMA_BACKPLANE &&
1788 dist != env.dist) {
1789 taskweight = task_weight(p, env.src_nid, dist);
1790 groupweight = group_weight(p, env.src_nid, dist);
1791 }
7bd95320 1792
83e1d2cd 1793 /* Only consider nodes where both task and groups benefit */
7bd95320
RR
1794 taskimp = task_weight(p, nid, dist) - taskweight;
1795 groupimp = group_weight(p, nid, dist) - groupweight;
887c290e 1796 if (taskimp < 0 && groupimp < 0)
fb13c7ee
MG
1797 continue;
1798
7bd95320 1799 env.dist = dist;
2c8a50aa
MG
1800 env.dst_nid = nid;
1801 update_numa_stats(&env.dst_stats, env.dst_nid);
2d4056fa 1802 task_numa_find_cpu(&env, taskimp, groupimp);
58d081b5
MG
1803 }
1804 }
1805
68d1b02a
RR
1806 /*
1807 * If the task is part of a workload that spans multiple NUMA nodes,
1808 * and is migrating into one of the workload's active nodes, remember
1809 * this node as the task's preferred numa node, so the workload can
1810 * settle down.
1811 * A task that migrated to a second choice node will be better off
1812 * trying for a better one later. Do not set the preferred node here.
1813 */
db015dae
RR
1814 if (p->numa_group) {
1815 if (env.best_cpu == -1)
1816 nid = env.src_nid;
1817 else
8cd45eee 1818 nid = cpu_to_node(env.best_cpu);
db015dae 1819
8cd45eee
SD
1820 if (nid != p->numa_preferred_nid)
1821 sched_setnuma(p, nid);
db015dae
RR
1822 }
1823
1824 /* No better CPU than the current one was found. */
1825 if (env.best_cpu == -1)
1826 return -EAGAIN;
0ec8aa00 1827
a4739eca 1828 best_rq = cpu_rq(env.best_cpu);
fb13c7ee 1829 if (env.best_task == NULL) {
286549dc 1830 ret = migrate_task_to(p, env.best_cpu);
a4739eca 1831 WRITE_ONCE(best_rq->numa_migrate_on, 0);
286549dc
MG
1832 if (ret != 0)
1833 trace_sched_stick_numa(p, env.src_cpu, env.best_cpu);
fb13c7ee
MG
1834 return ret;
1835 }
1836
0ad4e3df 1837 ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu);
a4739eca 1838 WRITE_ONCE(best_rq->numa_migrate_on, 0);
0ad4e3df 1839
286549dc
MG
1840 if (ret != 0)
1841 trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task));
fb13c7ee
MG
1842 put_task_struct(env.best_task);
1843 return ret;
e6628d5b
MG
1844}
1845
6b9a7460
MG
1846/* Attempt to migrate a task to a CPU on the preferred node. */
1847static void numa_migrate_preferred(struct task_struct *p)
1848{
5085e2a3
RR
1849 unsigned long interval = HZ;
1850
2739d3ee 1851 /* This task has no NUMA fault statistics yet */
44dba3d5 1852 if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults))
6b9a7460
MG
1853 return;
1854
2739d3ee 1855 /* Periodically retry migrating the task to the preferred node */
5085e2a3 1856 interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16);
789ba280 1857 p->numa_migrate_retry = jiffies + interval;
2739d3ee
RR
1858
1859 /* Success if task is already running on preferred CPU */
de1b301a 1860 if (task_node(p) == p->numa_preferred_nid)
6b9a7460
MG
1861 return;
1862
1863 /* Otherwise, try migrate to a CPU on the preferred node */
2739d3ee 1864 task_numa_migrate(p);
6b9a7460
MG
1865}
1866
20e07dea 1867/*
4142c3eb 1868 * Find out how many nodes on the workload is actively running on. Do this by
20e07dea
RR
1869 * tracking the nodes from which NUMA hinting faults are triggered. This can
1870 * be different from the set of nodes where the workload's memory is currently
1871 * located.
20e07dea 1872 */
4142c3eb 1873static void numa_group_count_active_nodes(struct numa_group *numa_group)
20e07dea
RR
1874{
1875 unsigned long faults, max_faults = 0;
4142c3eb 1876 int nid, active_nodes = 0;
20e07dea
RR
1877
1878 for_each_online_node(nid) {
1879 faults = group_faults_cpu(numa_group, nid);
1880 if (faults > max_faults)
1881 max_faults = faults;
1882 }
1883
1884 for_each_online_node(nid) {
1885 faults = group_faults_cpu(numa_group, nid);
4142c3eb
RR
1886 if (faults * ACTIVE_NODE_FRACTION > max_faults)
1887 active_nodes++;
20e07dea 1888 }
4142c3eb
RR
1889
1890 numa_group->max_faults_cpu = max_faults;
1891 numa_group->active_nodes = active_nodes;
20e07dea
RR
1892}
1893
04bb2f94
RR
1894/*
1895 * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
1896 * increments. The more local the fault statistics are, the higher the scan
a22b4b01
RR
1897 * period will be for the next scan window. If local/(local+remote) ratio is
1898 * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS)
1899 * the scan period will decrease. Aim for 70% local accesses.
04bb2f94
RR
1900 */
1901#define NUMA_PERIOD_SLOTS 10
a22b4b01 1902#define NUMA_PERIOD_THRESHOLD 7
04bb2f94
RR
1903
1904/*
1905 * Increase the scan period (slow down scanning) if the majority of
1906 * our memory is already on our local node, or if the majority of
1907 * the page accesses are shared with other processes.
1908 * Otherwise, decrease the scan period.
1909 */
1910static void update_task_scan_period(struct task_struct *p,
1911 unsigned long shared, unsigned long private)
1912{
1913 unsigned int period_slot;
37ec97de 1914 int lr_ratio, ps_ratio;
04bb2f94
RR
1915 int diff;
1916
1917 unsigned long remote = p->numa_faults_locality[0];
1918 unsigned long local = p->numa_faults_locality[1];
1919
1920 /*
1921 * If there were no record hinting faults then either the task is
1922 * completely idle or all activity is areas that are not of interest
074c2381
MG
1923 * to automatic numa balancing. Related to that, if there were failed
1924 * migration then it implies we are migrating too quickly or the local
1925 * node is overloaded. In either case, scan slower
04bb2f94 1926 */
074c2381 1927 if (local + shared == 0 || p->numa_faults_locality[2]) {
04bb2f94
RR
1928 p->numa_scan_period = min(p->numa_scan_period_max,
1929 p->numa_scan_period << 1);
1930
1931 p->mm->numa_next_scan = jiffies +
1932 msecs_to_jiffies(p->numa_scan_period);
1933
1934 return;
1935 }
1936
1937 /*
1938 * Prepare to scale scan period relative to the current period.
1939 * == NUMA_PERIOD_THRESHOLD scan period stays the same
1940 * < NUMA_PERIOD_THRESHOLD scan period decreases (scan faster)
1941 * >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower)
1942 */
1943 period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS);
37ec97de
RR
1944 lr_ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote);
1945 ps_ratio = (private * NUMA_PERIOD_SLOTS) / (private + shared);
1946
1947 if (ps_ratio >= NUMA_PERIOD_THRESHOLD) {
1948 /*
1949 * Most memory accesses are local. There is no need to
1950 * do fast NUMA scanning, since memory is already local.
1951 */
1952 int slot = ps_ratio - NUMA_PERIOD_THRESHOLD;
1953 if (!slot)
1954 slot = 1;
1955 diff = slot * period_slot;
1956 } else if (lr_ratio >= NUMA_PERIOD_THRESHOLD) {
1957 /*
1958 * Most memory accesses are shared with other tasks.
1959 * There is no point in continuing fast NUMA scanning,
1960 * since other tasks may just move the memory elsewhere.
1961 */
1962 int slot = lr_ratio - NUMA_PERIOD_THRESHOLD;
04bb2f94
RR
1963 if (!slot)
1964 slot = 1;
1965 diff = slot * period_slot;
1966 } else {
04bb2f94 1967 /*
37ec97de
RR
1968 * Private memory faults exceed (SLOTS-THRESHOLD)/SLOTS,
1969 * yet they are not on the local NUMA node. Speed up
1970 * NUMA scanning to get the memory moved over.
04bb2f94 1971 */
37ec97de
RR
1972 int ratio = max(lr_ratio, ps_ratio);
1973 diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot;
04bb2f94
RR
1974 }
1975
1976 p->numa_scan_period = clamp(p->numa_scan_period + diff,
1977 task_scan_min(p), task_scan_max(p));
1978 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
1979}
1980
7e2703e6
RR
1981/*
1982 * Get the fraction of time the task has been running since the last
1983 * NUMA placement cycle. The scheduler keeps similar statistics, but
1984 * decays those on a 32ms period, which is orders of magnitude off
1985 * from the dozens-of-seconds NUMA balancing period. Use the scheduler
1986 * stats only if the task is so new there are no NUMA statistics yet.
1987 */
1988static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
1989{
1990 u64 runtime, delta, now;
1991 /* Use the start of this time slice to avoid calculations. */
1992 now = p->se.exec_start;
1993 runtime = p->se.sum_exec_runtime;
1994
1995 if (p->last_task_numa_placement) {
1996 delta = runtime - p->last_sum_exec_runtime;
1997 *period = now - p->last_task_numa_placement;
1998 } else {
c7b50216 1999 delta = p->se.avg.load_sum;
9d89c257 2000 *period = LOAD_AVG_MAX;
7e2703e6
RR
2001 }
2002
2003 p->last_sum_exec_runtime = runtime;
2004 p->last_task_numa_placement = now;
2005
2006 return delta;
2007}
2008
54009416
RR
2009/*
2010 * Determine the preferred nid for a task in a numa_group. This needs to
2011 * be done in a way that produces consistent results with group_weight,
2012 * otherwise workloads might not converge.
2013 */
2014static int preferred_group_nid(struct task_struct *p, int nid)
2015{
2016 nodemask_t nodes;
2017 int dist;
2018
2019 /* Direct connections between all NUMA nodes. */
2020 if (sched_numa_topology_type == NUMA_DIRECT)
2021 return nid;
2022
2023 /*
2024 * On a system with glueless mesh NUMA topology, group_weight
2025 * scores nodes according to the number of NUMA hinting faults on
2026 * both the node itself, and on nearby nodes.
2027 */
2028 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
2029 unsigned long score, max_score = 0;
2030 int node, max_node = nid;
2031
2032 dist = sched_max_numa_distance;
2033
2034 for_each_online_node(node) {
2035 score = group_weight(p, node, dist);
2036 if (score > max_score) {
2037 max_score = score;
2038 max_node = node;
2039 }
2040 }
2041 return max_node;
2042 }
2043
2044 /*
2045 * Finding the preferred nid in a system with NUMA backplane
2046 * interconnect topology is more involved. The goal is to locate
2047 * tasks from numa_groups near each other in the system, and
2048 * untangle workloads from different sides of the system. This requires
2049 * searching down the hierarchy of node groups, recursively searching
2050 * inside the highest scoring group of nodes. The nodemask tricks
2051 * keep the complexity of the search down.
2052 */
2053 nodes = node_online_map;
2054 for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) {
2055 unsigned long max_faults = 0;
81907478 2056 nodemask_t max_group = NODE_MASK_NONE;
54009416
RR
2057 int a, b;
2058
2059 /* Are there nodes at this distance from each other? */
2060 if (!find_numa_distance(dist))
2061 continue;
2062
2063 for_each_node_mask(a, nodes) {
2064 unsigned long faults = 0;
2065 nodemask_t this_group;
2066 nodes_clear(this_group);
2067
2068 /* Sum group's NUMA faults; includes a==b case. */
2069 for_each_node_mask(b, nodes) {
2070 if (node_distance(a, b) < dist) {
2071 faults += group_faults(p, b);
2072 node_set(b, this_group);
2073 node_clear(b, nodes);
2074 }
2075 }
2076
2077 /* Remember the top group. */
2078 if (faults > max_faults) {
2079 max_faults = faults;
2080 max_group = this_group;
2081 /*
2082 * subtle: at the smallest distance there is
2083 * just one node left in each "group", the
2084 * winner is the preferred nid.
2085 */
2086 nid = a;
2087 }
2088 }
2089 /* Next round, evaluate the nodes within max_group. */
890a5409
JB
2090 if (!max_faults)
2091 break;
54009416
RR
2092 nodes = max_group;
2093 }
2094 return nid;
2095}
2096
cbee9f88
PZ
2097static void task_numa_placement(struct task_struct *p)
2098{
f03bb676
SD
2099 int seq, nid, max_nid = -1;
2100 unsigned long max_faults = 0;
04bb2f94 2101 unsigned long fault_types[2] = { 0, 0 };
7e2703e6
RR
2102 unsigned long total_faults;
2103 u64 runtime, period;
7dbd13ed 2104 spinlock_t *group_lock = NULL;
cbee9f88 2105
7e5a2c17
JL
2106 /*
2107 * The p->mm->numa_scan_seq field gets updated without
2108 * exclusive access. Use READ_ONCE() here to ensure
2109 * that the field is read in a single access:
2110 */
316c1608 2111 seq = READ_ONCE(p->mm->numa_scan_seq);
cbee9f88
PZ
2112 if (p->numa_scan_seq == seq)
2113 return;
2114 p->numa_scan_seq = seq;
598f0ec0 2115 p->numa_scan_period_max = task_scan_max(p);
cbee9f88 2116
7e2703e6
RR
2117 total_faults = p->numa_faults_locality[0] +
2118 p->numa_faults_locality[1];
2119 runtime = numa_get_avg_runtime(p, &period);
2120
7dbd13ed
MG
2121 /* If the task is part of a group prevent parallel updates to group stats */
2122 if (p->numa_group) {
2123 group_lock = &p->numa_group->lock;
60e69eed 2124 spin_lock_irq(group_lock);
7dbd13ed
MG
2125 }
2126
688b7585
MG
2127 /* Find the node with the highest number of faults */
2128 for_each_online_node(nid) {
44dba3d5
IM
2129 /* Keep track of the offsets in numa_faults array */
2130 int mem_idx, membuf_idx, cpu_idx, cpubuf_idx;
83e1d2cd 2131 unsigned long faults = 0, group_faults = 0;
44dba3d5 2132 int priv;
745d6147 2133
be1e4e76 2134 for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) {
7e2703e6 2135 long diff, f_diff, f_weight;
8c8a743c 2136
44dba3d5
IM
2137 mem_idx = task_faults_idx(NUMA_MEM, nid, priv);
2138 membuf_idx = task_faults_idx(NUMA_MEMBUF, nid, priv);
2139 cpu_idx = task_faults_idx(NUMA_CPU, nid, priv);
2140 cpubuf_idx = task_faults_idx(NUMA_CPUBUF, nid, priv);
745d6147 2141
ac8e895b 2142 /* Decay existing window, copy faults since last scan */
44dba3d5
IM
2143 diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2;
2144 fault_types[priv] += p->numa_faults[membuf_idx];
2145 p->numa_faults[membuf_idx] = 0;
fb13c7ee 2146
7e2703e6
RR
2147 /*
2148 * Normalize the faults_from, so all tasks in a group
2149 * count according to CPU use, instead of by the raw
2150 * number of faults. Tasks with little runtime have
2151 * little over-all impact on throughput, and thus their
2152 * faults are less important.
2153 */
2154 f_weight = div64_u64(runtime << 16, period + 1);
44dba3d5 2155 f_weight = (f_weight * p->numa_faults[cpubuf_idx]) /
7e2703e6 2156 (total_faults + 1);
44dba3d5
IM
2157 f_diff = f_weight - p->numa_faults[cpu_idx] / 2;
2158 p->numa_faults[cpubuf_idx] = 0;
50ec8a40 2159
44dba3d5
IM
2160 p->numa_faults[mem_idx] += diff;
2161 p->numa_faults[cpu_idx] += f_diff;
2162 faults += p->numa_faults[mem_idx];
83e1d2cd 2163 p->total_numa_faults += diff;
8c8a743c 2164 if (p->numa_group) {
44dba3d5
IM
2165 /*
2166 * safe because we can only change our own group
2167 *
2168 * mem_idx represents the offset for a given
2169 * nid and priv in a specific region because it
2170 * is at the beginning of the numa_faults array.
2171 */
2172 p->numa_group->faults[mem_idx] += diff;
2173 p->numa_group->faults_cpu[mem_idx] += f_diff;
989348b5 2174 p->numa_group->total_faults += diff;
44dba3d5 2175 group_faults += p->numa_group->faults[mem_idx];
8c8a743c 2176 }
ac8e895b
MG
2177 }
2178
f03bb676
SD
2179 if (!p->numa_group) {
2180 if (faults > max_faults) {
2181 max_faults = faults;
2182 max_nid = nid;
2183 }
2184 } else if (group_faults > max_faults) {
2185 max_faults = group_faults;
688b7585
MG
2186 max_nid = nid;
2187 }
83e1d2cd
MG
2188 }
2189
7dbd13ed 2190 if (p->numa_group) {
4142c3eb 2191 numa_group_count_active_nodes(p->numa_group);
60e69eed 2192 spin_unlock_irq(group_lock);
f03bb676 2193 max_nid = preferred_group_nid(p, max_nid);
688b7585
MG
2194 }
2195
bb97fc31
RR
2196 if (max_faults) {
2197 /* Set the new preferred node */
2198 if (max_nid != p->numa_preferred_nid)
2199 sched_setnuma(p, max_nid);
3a7053b3 2200 }
30619c89
SD
2201
2202 update_task_scan_period(p, fault_types[0], fault_types[1]);
cbee9f88
PZ
2203}
2204
8c8a743c
PZ
2205static inline int get_numa_group(struct numa_group *grp)
2206{
2207 return atomic_inc_not_zero(&grp->refcount);
2208}
2209
2210static inline void put_numa_group(struct numa_group *grp)
2211{
2212 if (atomic_dec_and_test(&grp->refcount))
2213 kfree_rcu(grp, rcu);
2214}
2215
3e6a9418
MG
2216static void task_numa_group(struct task_struct *p, int cpupid, int flags,
2217 int *priv)
8c8a743c
PZ
2218{
2219 struct numa_group *grp, *my_grp;
2220 struct task_struct *tsk;
2221 bool join = false;
2222 int cpu = cpupid_to_cpu(cpupid);
2223 int i;
2224
2225 if (unlikely(!p->numa_group)) {
2226 unsigned int size = sizeof(struct numa_group) +
50ec8a40 2227 4*nr_node_ids*sizeof(unsigned long);
8c8a743c
PZ
2228
2229 grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
2230 if (!grp)
2231 return;
2232
2233 atomic_set(&grp->refcount, 1);
4142c3eb
RR
2234 grp->active_nodes = 1;
2235 grp->max_faults_cpu = 0;
8c8a743c 2236 spin_lock_init(&grp->lock);
e29cf08b 2237 grp->gid = p->pid;
50ec8a40 2238 /* Second half of the array tracks nids where faults happen */
be1e4e76
RR
2239 grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES *
2240 nr_node_ids;
8c8a743c 2241
be1e4e76 2242 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
44dba3d5 2243 grp->faults[i] = p->numa_faults[i];
8c8a743c 2244
989348b5 2245 grp->total_faults = p->total_numa_faults;
83e1d2cd 2246
8c8a743c
PZ
2247 grp->nr_tasks++;
2248 rcu_assign_pointer(p->numa_group, grp);
2249 }
2250
2251 rcu_read_lock();
316c1608 2252 tsk = READ_ONCE(cpu_rq(cpu)->curr);
8c8a743c
PZ
2253
2254 if (!cpupid_match_pid(tsk, cpupid))
3354781a 2255 goto no_join;
8c8a743c
PZ
2256
2257 grp = rcu_dereference(tsk->numa_group);
2258 if (!grp)
3354781a 2259 goto no_join;
8c8a743c
PZ
2260
2261 my_grp = p->numa_group;
2262 if (grp == my_grp)
3354781a 2263 goto no_join;
8c8a743c
PZ
2264
2265 /*
2266 * Only join the other group if its bigger; if we're the bigger group,
2267 * the other task will join us.
2268 */
2269 if (my_grp->nr_tasks > grp->nr_tasks)
3354781a 2270 goto no_join;
8c8a743c
PZ
2271
2272 /*
2273 * Tie-break on the grp address.
2274 */
2275 if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
3354781a 2276 goto no_join;
8c8a743c 2277
dabe1d99
RR
2278 /* Always join threads in the same process. */
2279 if (tsk->mm == current->mm)
2280 join = true;
2281
2282 /* Simple filter to avoid false positives due to PID collisions */
2283 if (flags & TNF_SHARED)
2284 join = true;
8c8a743c 2285
3e6a9418
MG
2286 /* Update priv based on whether false sharing was detected */
2287 *priv = !join;
2288
dabe1d99 2289 if (join && !get_numa_group(grp))
3354781a 2290 goto no_join;
8c8a743c 2291
8c8a743c
PZ
2292 rcu_read_unlock();
2293
2294 if (!join)
2295 return;
2296
60e69eed
MG
2297 BUG_ON(irqs_disabled());
2298 double_lock_irq(&my_grp->lock, &grp->lock);
989348b5 2299
be1e4e76 2300 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
44dba3d5
IM
2301 my_grp->faults[i] -= p->numa_faults[i];
2302 grp->faults[i] += p->numa_faults[i];
8c8a743c 2303 }
989348b5
MG
2304 my_grp->total_faults -= p->total_numa_faults;
2305 grp->total_faults += p->total_numa_faults;
8c8a743c 2306
8c8a743c
PZ
2307 my_grp->nr_tasks--;
2308 grp->nr_tasks++;
2309
2310 spin_unlock(&my_grp->lock);
60e69eed 2311 spin_unlock_irq(&grp->lock);
8c8a743c
PZ
2312
2313 rcu_assign_pointer(p->numa_group, grp);
2314
2315 put_numa_group(my_grp);
3354781a
PZ
2316 return;
2317
2318no_join:
2319 rcu_read_unlock();
2320 return;
8c8a743c
PZ
2321}
2322
2323void task_numa_free(struct task_struct *p)
2324{
2325 struct numa_group *grp = p->numa_group;
44dba3d5 2326 void *numa_faults = p->numa_faults;
e9dd685c
SR
2327 unsigned long flags;
2328 int i;
8c8a743c
PZ
2329
2330 if (grp) {
e9dd685c 2331 spin_lock_irqsave(&grp->lock, flags);
be1e4e76 2332 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
44dba3d5 2333 grp->faults[i] -= p->numa_faults[i];
989348b5 2334 grp->total_faults -= p->total_numa_faults;
83e1d2cd 2335
8c8a743c 2336 grp->nr_tasks--;
e9dd685c 2337 spin_unlock_irqrestore(&grp->lock, flags);
35b123e2 2338 RCU_INIT_POINTER(p->numa_group, NULL);
8c8a743c
PZ
2339 put_numa_group(grp);
2340 }
2341
44dba3d5 2342 p->numa_faults = NULL;
82727018 2343 kfree(numa_faults);
8c8a743c
PZ
2344}
2345
cbee9f88
PZ
2346/*
2347 * Got a PROT_NONE fault for a page on @node.
2348 */
58b46da3 2349void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
cbee9f88
PZ
2350{
2351 struct task_struct *p = current;
6688cc05 2352 bool migrated = flags & TNF_MIGRATED;
58b46da3 2353 int cpu_node = task_node(current);
792568ec 2354 int local = !!(flags & TNF_FAULT_LOCAL);
4142c3eb 2355 struct numa_group *ng;
ac8e895b 2356 int priv;
cbee9f88 2357
2a595721 2358 if (!static_branch_likely(&sched_numa_balancing))
1a687c2e
MG
2359 return;
2360
9ff1d9ff
MG
2361 /* for example, ksmd faulting in a user's mm */
2362 if (!p->mm)
2363 return;
2364
f809ca9a 2365 /* Allocate buffer to track faults on a per-node basis */
44dba3d5
IM
2366 if (unlikely(!p->numa_faults)) {
2367 int size = sizeof(*p->numa_faults) *
be1e4e76 2368 NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
f809ca9a 2369
44dba3d5
IM
2370 p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN);
2371 if (!p->numa_faults)
f809ca9a 2372 return;
745d6147 2373
83e1d2cd 2374 p->total_numa_faults = 0;
04bb2f94 2375 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
f809ca9a 2376 }
cbee9f88 2377
8c8a743c
PZ
2378 /*
2379 * First accesses are treated as private, otherwise consider accesses
2380 * to be private if the accessing pid has not changed
2381 */
2382 if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
2383 priv = 1;
2384 } else {
2385 priv = cpupid_match_pid(p, last_cpupid);
6688cc05 2386 if (!priv && !(flags & TNF_NO_GROUP))
3e6a9418 2387 task_numa_group(p, last_cpupid, flags, &priv);
8c8a743c
PZ
2388 }
2389
792568ec
RR
2390 /*
2391 * If a workload spans multiple NUMA nodes, a shared fault that
2392 * occurs wholly within the set of nodes that the workload is
2393 * actively using should be counted as local. This allows the
2394 * scan rate to slow down when a workload has settled down.
2395 */
4142c3eb
RR
2396 ng = p->numa_group;
2397 if (!priv && !local && ng && ng->active_nodes > 1 &&
2398 numa_is_active_node(cpu_node, ng) &&
2399 numa_is_active_node(mem_node, ng))
792568ec
RR
2400 local = 1;
2401
2739d3ee 2402 /*
e1ff516a
YW
2403 * Retry to migrate task to preferred node periodically, in case it
2404 * previously failed, or the scheduler moved us.
2739d3ee 2405 */
b6a60cf3
SD
2406 if (time_after(jiffies, p->numa_migrate_retry)) {
2407 task_numa_placement(p);
6b9a7460 2408 numa_migrate_preferred(p);
b6a60cf3 2409 }
6b9a7460 2410
b32e86b4
IM
2411 if (migrated)
2412 p->numa_pages_migrated += pages;
074c2381
MG
2413 if (flags & TNF_MIGRATE_FAIL)
2414 p->numa_faults_locality[2] += pages;
b32e86b4 2415
44dba3d5
IM
2416 p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages;
2417 p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages;
792568ec 2418 p->numa_faults_locality[local] += pages;
cbee9f88
PZ
2419}
2420
6e5fb223
PZ
2421static void reset_ptenuma_scan(struct task_struct *p)
2422{
7e5a2c17
JL
2423 /*
2424 * We only did a read acquisition of the mmap sem, so
2425 * p->mm->numa_scan_seq is written to without exclusive access
2426 * and the update is not guaranteed to be atomic. That's not
2427 * much of an issue though, since this is just used for
2428 * statistical sampling. Use READ_ONCE/WRITE_ONCE, which are not
2429 * expensive, to avoid any form of compiler optimizations:
2430 */
316c1608 2431 WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1);
6e5fb223
PZ
2432 p->mm->numa_scan_offset = 0;
2433}
2434
cbee9f88
PZ
2435/*
2436 * The expensive part of numa migration is done from task_work context.
2437 * Triggered from task_tick_numa().
2438 */
2439void task_numa_work(struct callback_head *work)
2440{
2441 unsigned long migrate, next_scan, now = jiffies;
2442 struct task_struct *p = current;
2443 struct mm_struct *mm = p->mm;
51170840 2444 u64 runtime = p->se.sum_exec_runtime;
6e5fb223 2445 struct vm_area_struct *vma;
9f40604c 2446 unsigned long start, end;
598f0ec0 2447 unsigned long nr_pte_updates = 0;
4620f8c1 2448 long pages, virtpages;
cbee9f88 2449
9148a3a1 2450 SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work));
cbee9f88
PZ
2451
2452 work->next = work; /* protect against double add */
2453 /*
2454 * Who cares about NUMA placement when they're dying.
2455 *
2456 * NOTE: make sure not to dereference p->mm before this check,
2457 * exit_task_work() happens _after_ exit_mm() so we could be called
2458 * without p->mm even though we still had it when we enqueued this
2459 * work.
2460 */
2461 if (p->flags & PF_EXITING)
2462 return;
2463
930aa174 2464 if (!mm->numa_next_scan) {
7e8d16b6
MG
2465 mm->numa_next_scan = now +
2466 msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
b8593bfd
MG
2467 }
2468
cbee9f88
PZ
2469 /*
2470 * Enforce maximal scan/migration frequency..
2471 */
2472 migrate = mm->numa_next_scan;
2473 if (time_before(now, migrate))
2474 return;
2475
598f0ec0
MG
2476 if (p->numa_scan_period == 0) {
2477 p->numa_scan_period_max = task_scan_max(p);
b5dd77c8 2478 p->numa_scan_period = task_scan_start(p);
598f0ec0 2479 }
cbee9f88 2480
fb003b80 2481 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
cbee9f88
PZ
2482 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
2483 return;
2484
19a78d11
PZ
2485 /*
2486 * Delay this task enough that another task of this mm will likely win
2487 * the next time around.
2488 */
2489 p->node_stamp += 2 * TICK_NSEC;
2490
9f40604c
MG
2491 start = mm->numa_scan_offset;
2492 pages = sysctl_numa_balancing_scan_size;
2493 pages <<= 20 - PAGE_SHIFT; /* MB in pages */
4620f8c1 2494 virtpages = pages * 8; /* Scan up to this much virtual space */
9f40604c
MG
2495 if (!pages)
2496 return;
cbee9f88 2497
4620f8c1 2498
8655d549
VB
2499 if (!down_read_trylock(&mm->mmap_sem))
2500 return;
9f40604c 2501 vma = find_vma(mm, start);
6e5fb223
PZ
2502 if (!vma) {
2503 reset_ptenuma_scan(p);
9f40604c 2504 start = 0;
6e5fb223
PZ
2505 vma = mm->mmap;
2506 }
9f40604c 2507 for (; vma; vma = vma->vm_next) {
6b79c57b 2508 if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
8e76d4ee 2509 is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
6e5fb223 2510 continue;
6b79c57b 2511 }
6e5fb223 2512
4591ce4f
MG
2513 /*
2514 * Shared library pages mapped by multiple processes are not
2515 * migrated as it is expected they are cache replicated. Avoid
2516 * hinting faults in read-only file-backed mappings or the vdso
2517 * as migrating the pages will be of marginal benefit.
2518 */
2519 if (!vma->vm_mm ||
2520 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
2521 continue;
2522
3c67f474
MG
2523 /*
2524 * Skip inaccessible VMAs to avoid any confusion between
2525 * PROT_NONE and NUMA hinting ptes
2526 */
2527 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
2528 continue;
4591ce4f 2529
9f40604c
MG
2530 do {
2531 start = max(start, vma->vm_start);
2532 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
2533 end = min(end, vma->vm_end);
4620f8c1 2534 nr_pte_updates = change_prot_numa(vma, start, end);
598f0ec0
MG
2535
2536 /*
4620f8c1
RR
2537 * Try to scan sysctl_numa_balancing_size worth of
2538 * hpages that have at least one present PTE that
2539 * is not already pte-numa. If the VMA contains
2540 * areas that are unused or already full of prot_numa
2541 * PTEs, scan up to virtpages, to skip through those
2542 * areas faster.
598f0ec0
MG
2543 */
2544 if (nr_pte_updates)
2545 pages -= (end - start) >> PAGE_SHIFT;
4620f8c1 2546 virtpages -= (end - start) >> PAGE_SHIFT;
6e5fb223 2547
9f40604c 2548 start = end;
4620f8c1 2549 if (pages <= 0 || virtpages <= 0)
9f40604c 2550 goto out;
3cf1962c
RR
2551
2552 cond_resched();
9f40604c 2553 } while (end != vma->vm_end);
cbee9f88 2554 }
6e5fb223 2555
9f40604c 2556out:
6e5fb223 2557 /*
c69307d5
PZ
2558 * It is possible to reach the end of the VMA list but the last few
2559 * VMAs are not guaranteed to the vma_migratable. If they are not, we
2560 * would find the !migratable VMA on the next scan but not reset the
2561 * scanner to the start so check it now.
6e5fb223
PZ
2562 */
2563 if (vma)
9f40604c 2564 mm->numa_scan_offset = start;
6e5fb223
PZ
2565 else
2566 reset_ptenuma_scan(p);
2567 up_read(&mm->mmap_sem);
51170840
RR
2568
2569 /*
2570 * Make sure tasks use at least 32x as much time to run other code
2571 * than they used here, to limit NUMA PTE scanning overhead to 3% max.
2572 * Usually update_task_scan_period slows down scanning enough; on an
2573 * overloaded system we need to limit overhead on a per task basis.
2574 */
2575 if (unlikely(p->se.sum_exec_runtime != runtime)) {
2576 u64 diff = p->se.sum_exec_runtime - runtime;
2577 p->node_stamp += 32 * diff;
2578 }
cbee9f88
PZ
2579}
2580
2581/*
2582 * Drive the periodic memory faults..
2583 */
2584void task_tick_numa(struct rq *rq, struct task_struct *curr)
2585{
2586 struct callback_head *work = &curr->numa_work;
2587 u64 period, now;
2588
2589 /*
2590 * We don't care about NUMA placement if we don't have memory.
2591 */
2592 if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
2593 return;
2594
2595 /*
2596 * Using runtime rather than walltime has the dual advantage that
2597 * we (mostly) drive the selection from busy threads and that the
2598 * task needs to have done some actual work before we bother with
2599 * NUMA placement.
2600 */
2601 now = curr->se.sum_exec_runtime;
2602 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
2603
25b3e5a3 2604 if (now > curr->node_stamp + period) {
4b96a29b 2605 if (!curr->node_stamp)
b5dd77c8 2606 curr->numa_scan_period = task_scan_start(curr);
19a78d11 2607 curr->node_stamp += period;
cbee9f88
PZ
2608
2609 if (!time_before(jiffies, curr->mm->numa_next_scan)) {
2610 init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
2611 task_work_add(curr, work, true);
2612 }
2613 }
2614}
3fed382b 2615
3f9672ba
SD
2616static void update_scan_period(struct task_struct *p, int new_cpu)
2617{
2618 int src_nid = cpu_to_node(task_cpu(p));
2619 int dst_nid = cpu_to_node(new_cpu);
2620
05cbdf4f
MG
2621 if (!static_branch_likely(&sched_numa_balancing))
2622 return;
2623
3f9672ba
SD
2624 if (!p->mm || !p->numa_faults || (p->flags & PF_EXITING))
2625 return;
2626
05cbdf4f
MG
2627 if (src_nid == dst_nid)
2628 return;
2629
2630 /*
2631 * Allow resets if faults have been trapped before one scan
2632 * has completed. This is most likely due to a new task that
2633 * is pulled cross-node due to wakeups or load balancing.
2634 */
2635 if (p->numa_scan_seq) {
2636 /*
2637 * Avoid scan adjustments if moving to the preferred
2638 * node or if the task was not previously running on
2639 * the preferred node.
2640 */
2641 if (dst_nid == p->numa_preferred_nid ||
2642 (p->numa_preferred_nid != -1 && src_nid != p->numa_preferred_nid))
2643 return;
2644 }
2645
2646 p->numa_scan_period = task_scan_start(p);
3f9672ba
SD
2647}
2648
cbee9f88
PZ
2649#else
2650static void task_tick_numa(struct rq *rq, struct task_struct *curr)
2651{
2652}
0ec8aa00
PZ
2653
2654static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
2655{
2656}
2657
2658static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
2659{
2660}
3fed382b 2661
3f9672ba
SD
2662static inline void update_scan_period(struct task_struct *p, int new_cpu)
2663{
2664}
2665
cbee9f88
PZ
2666#endif /* CONFIG_NUMA_BALANCING */
2667
30cfdcfc
DA
2668static void
2669account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2670{
2671 update_load_add(&cfs_rq->load, se->load.weight);
c09595f6 2672 if (!parent_entity(se))
029632fb 2673 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
367456c7 2674#ifdef CONFIG_SMP
0ec8aa00
PZ
2675 if (entity_is_task(se)) {
2676 struct rq *rq = rq_of(cfs_rq);
2677
2678 account_numa_enqueue(rq, task_of(se));
2679 list_add(&se->group_node, &rq->cfs_tasks);
2680 }
367456c7 2681#endif
30cfdcfc 2682 cfs_rq->nr_running++;
30cfdcfc
DA
2683}
2684
2685static void
2686account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2687{
2688 update_load_sub(&cfs_rq->load, se->load.weight);
c09595f6 2689 if (!parent_entity(se))
029632fb 2690 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
bfdb198c 2691#ifdef CONFIG_SMP
0ec8aa00
PZ
2692 if (entity_is_task(se)) {
2693 account_numa_dequeue(rq_of(cfs_rq), task_of(se));
b87f1724 2694 list_del_init(&se->group_node);
0ec8aa00 2695 }
bfdb198c 2696#endif
30cfdcfc 2697 cfs_rq->nr_running--;
30cfdcfc
DA
2698}
2699
8d5b9025
PZ
2700/*
2701 * Signed add and clamp on underflow.
2702 *
2703 * Explicitly do a load-store to ensure the intermediate value never hits
2704 * memory. This allows lockless observations without ever seeing the negative
2705 * values.
2706 */
2707#define add_positive(_ptr, _val) do { \
2708 typeof(_ptr) ptr = (_ptr); \
2709 typeof(_val) val = (_val); \
2710 typeof(*ptr) res, var = READ_ONCE(*ptr); \
2711 \
2712 res = var + val; \
2713 \
2714 if (val < 0 && res > var) \
2715 res = 0; \
2716 \
2717 WRITE_ONCE(*ptr, res); \
2718} while (0)
2719
2720/*
2721 * Unsigned subtract and clamp on underflow.
2722 *
2723 * Explicitly do a load-store to ensure the intermediate value never hits
2724 * memory. This allows lockless observations without ever seeing the negative
2725 * values.
2726 */
2727#define sub_positive(_ptr, _val) do { \
2728 typeof(_ptr) ptr = (_ptr); \
2729 typeof(*ptr) val = (_val); \
2730 typeof(*ptr) res, var = READ_ONCE(*ptr); \
2731 res = var - val; \
2732 if (res > var) \
2733 res = 0; \
2734 WRITE_ONCE(*ptr, res); \
2735} while (0)
2736
b5c0ce7b
PB
2737/*
2738 * Remove and clamp on negative, from a local variable.
2739 *
2740 * A variant of sub_positive(), which does not use explicit load-store
2741 * and is thus optimized for local variable updates.
2742 */
2743#define lsub_positive(_ptr, _val) do { \
2744 typeof(_ptr) ptr = (_ptr); \
2745 *ptr -= min_t(typeof(*ptr), *ptr, _val); \
2746} while (0)
2747
8d5b9025 2748#ifdef CONFIG_SMP
8d5b9025
PZ
2749static inline void
2750enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
2751{
1ea6c46a
PZ
2752 cfs_rq->runnable_weight += se->runnable_weight;
2753
2754 cfs_rq->avg.runnable_load_avg += se->avg.runnable_load_avg;
2755 cfs_rq->avg.runnable_load_sum += se_runnable(se) * se->avg.runnable_load_sum;
8d5b9025
PZ
2756}
2757
2758static inline void
2759dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
2760{
1ea6c46a
PZ
2761 cfs_rq->runnable_weight -= se->runnable_weight;
2762
2763 sub_positive(&cfs_rq->avg.runnable_load_avg, se->avg.runnable_load_avg);
2764 sub_positive(&cfs_rq->avg.runnable_load_sum,
2765 se_runnable(se) * se->avg.runnable_load_sum);
8d5b9025
PZ
2766}
2767
2768static inline void
2769enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
2770{
2771 cfs_rq->avg.load_avg += se->avg.load_avg;
2772 cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum;
2773}
2774
2775static inline void
2776dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
2777{
2778 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
2779 sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
2780}
2781#else
2782static inline void
2783enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
2784static inline void
2785dequeue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
2786static inline void
2787enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
2788static inline void
2789dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
2790#endif
2791
9059393e 2792static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
1ea6c46a 2793 unsigned long weight, unsigned long runnable)
9059393e
VG
2794{
2795 if (se->on_rq) {
2796 /* commit outstanding execution time */
2797 if (cfs_rq->curr == se)
2798 update_curr(cfs_rq);
2799 account_entity_dequeue(cfs_rq, se);
2800 dequeue_runnable_load_avg(cfs_rq, se);
2801 }
2802 dequeue_load_avg(cfs_rq, se);
2803
1ea6c46a 2804 se->runnable_weight = runnable;
9059393e
VG
2805 update_load_set(&se->load, weight);
2806
2807#ifdef CONFIG_SMP
1ea6c46a
PZ
2808 do {
2809 u32 divider = LOAD_AVG_MAX - 1024 + se->avg.period_contrib;
2810
2811 se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider);
2812 se->avg.runnable_load_avg =
2813 div_u64(se_runnable(se) * se->avg.runnable_load_sum, divider);
2814 } while (0);
9059393e
VG
2815#endif
2816
2817 enqueue_load_avg(cfs_rq, se);
2818 if (se->on_rq) {
2819 account_entity_enqueue(cfs_rq, se);
2820 enqueue_runnable_load_avg(cfs_rq, se);
2821 }
2822}
2823
2824void reweight_task(struct task_struct *p, int prio)
2825{
2826 struct sched_entity *se = &p->se;
2827 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2828 struct load_weight *load = &se->load;
2829 unsigned long weight = scale_load(sched_prio_to_weight[prio]);
2830
1ea6c46a 2831 reweight_entity(cfs_rq, se, weight, weight);
9059393e
VG
2832 load->inv_weight = sched_prio_to_wmult[prio];
2833}
2834
3ff6dcac 2835#ifdef CONFIG_FAIR_GROUP_SCHED
387f77cc 2836#ifdef CONFIG_SMP
cef27403
PZ
2837/*
2838 * All this does is approximate the hierarchical proportion which includes that
2839 * global sum we all love to hate.
2840 *
2841 * That is, the weight of a group entity, is the proportional share of the
2842 * group weight based on the group runqueue weights. That is:
2843 *
2844 * tg->weight * grq->load.weight
2845 * ge->load.weight = ----------------------------- (1)
2846 * \Sum grq->load.weight
2847 *
2848 * Now, because computing that sum is prohibitively expensive to compute (been
2849 * there, done that) we approximate it with this average stuff. The average
2850 * moves slower and therefore the approximation is cheaper and more stable.
2851 *
2852 * So instead of the above, we substitute:
2853 *
2854 * grq->load.weight -> grq->avg.load_avg (2)
2855 *
2856 * which yields the following:
2857 *
2858 * tg->weight * grq->avg.load_avg
2859 * ge->load.weight = ------------------------------ (3)
2860 * tg->load_avg
2861 *
2862 * Where: tg->load_avg ~= \Sum grq->avg.load_avg
2863 *
2864 * That is shares_avg, and it is right (given the approximation (2)).
2865 *
2866 * The problem with it is that because the average is slow -- it was designed
2867 * to be exactly that of course -- this leads to transients in boundary
2868 * conditions. In specific, the case where the group was idle and we start the
2869 * one task. It takes time for our CPU's grq->avg.load_avg to build up,
2870 * yielding bad latency etc..
2871 *
2872 * Now, in that special case (1) reduces to:
2873 *
2874 * tg->weight * grq->load.weight
17de4ee0 2875 * ge->load.weight = ----------------------------- = tg->weight (4)
cef27403
PZ
2876 * grp->load.weight
2877 *
2878 * That is, the sum collapses because all other CPUs are idle; the UP scenario.
2879 *
2880 * So what we do is modify our approximation (3) to approach (4) in the (near)
2881 * UP case, like:
2882 *
2883 * ge->load.weight =
2884 *
2885 * tg->weight * grq->load.weight
2886 * --------------------------------------------------- (5)
2887 * tg->load_avg - grq->avg.load_avg + grq->load.weight
2888 *
17de4ee0
PZ
2889 * But because grq->load.weight can drop to 0, resulting in a divide by zero,
2890 * we need to use grq->avg.load_avg as its lower bound, which then gives:
2891 *
2892 *
2893 * tg->weight * grq->load.weight
2894 * ge->load.weight = ----------------------------- (6)
2895 * tg_load_avg'
2896 *
2897 * Where:
2898 *
2899 * tg_load_avg' = tg->load_avg - grq->avg.load_avg +
2900 * max(grq->load.weight, grq->avg.load_avg)
cef27403
PZ
2901 *
2902 * And that is shares_weight and is icky. In the (near) UP case it approaches
2903 * (4) while in the normal case it approaches (3). It consistently
2904 * overestimates the ge->load.weight and therefore:
2905 *
2906 * \Sum ge->load.weight >= tg->weight
2907 *
2908 * hence icky!
2909 */
2c8e4dce 2910static long calc_group_shares(struct cfs_rq *cfs_rq)
cf5f0acf 2911{
7c80cfc9
PZ
2912 long tg_weight, tg_shares, load, shares;
2913 struct task_group *tg = cfs_rq->tg;
2914
2915 tg_shares = READ_ONCE(tg->shares);
cf5f0acf 2916
3d4b60d3 2917 load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg);
cf5f0acf 2918
ea1dc6fc 2919 tg_weight = atomic_long_read(&tg->load_avg);
3ff6dcac 2920
ea1dc6fc
PZ
2921 /* Ensure tg_weight >= load */
2922 tg_weight -= cfs_rq->tg_load_avg_contrib;
2923 tg_weight += load;
3ff6dcac 2924
7c80cfc9 2925 shares = (tg_shares * load);
cf5f0acf
PZ
2926 if (tg_weight)
2927 shares /= tg_weight;
3ff6dcac 2928
b8fd8423
DE
2929 /*
2930 * MIN_SHARES has to be unscaled here to support per-CPU partitioning
2931 * of a group with small tg->shares value. It is a floor value which is
2932 * assigned as a minimum load.weight to the sched_entity representing
2933 * the group on a CPU.
2934 *
2935 * E.g. on 64-bit for a group with tg->shares of scale_load(15)=15*1024
2936 * on an 8-core system with 8 tasks each runnable on one CPU shares has
2937 * to be 15*1024*1/8=1920 instead of scale_load(MIN_SHARES)=2*1024. In
2938 * case no task is runnable on a CPU MIN_SHARES=2 should be returned
2939 * instead of 0.
2940 */
7c80cfc9 2941 return clamp_t(long, shares, MIN_SHARES, tg_shares);
3ff6dcac 2942}
2c8e4dce
JB
2943
2944/*
17de4ee0
PZ
2945 * This calculates the effective runnable weight for a group entity based on
2946 * the group entity weight calculated above.
2947 *
2948 * Because of the above approximation (2), our group entity weight is
2949 * an load_avg based ratio (3). This means that it includes blocked load and
2950 * does not represent the runnable weight.
2951 *
2952 * Approximate the group entity's runnable weight per ratio from the group
2953 * runqueue:
2954 *
2955 * grq->avg.runnable_load_avg
2956 * ge->runnable_weight = ge->load.weight * -------------------------- (7)
2957 * grq->avg.load_avg
2958 *
2959 * However, analogous to above, since the avg numbers are slow, this leads to
2960 * transients in the from-idle case. Instead we use:
2961 *
2962 * ge->runnable_weight = ge->load.weight *
2963 *
2964 * max(grq->avg.runnable_load_avg, grq->runnable_weight)
2965 * ----------------------------------------------------- (8)
2966 * max(grq->avg.load_avg, grq->load.weight)
2967 *
2968 * Where these max() serve both to use the 'instant' values to fix the slow
2969 * from-idle and avoid the /0 on to-idle, similar to (6).
2c8e4dce
JB
2970 */
2971static long calc_group_runnable(struct cfs_rq *cfs_rq, long shares)
2972{
17de4ee0
PZ
2973 long runnable, load_avg;
2974
2975 load_avg = max(cfs_rq->avg.load_avg,
2976 scale_load_down(cfs_rq->load.weight));
2977
2978 runnable = max(cfs_rq->avg.runnable_load_avg,
2979 scale_load_down(cfs_rq->runnable_weight));
2c8e4dce
JB
2980
2981 runnable *= shares;
2982 if (load_avg)
2983 runnable /= load_avg;
17de4ee0 2984
2c8e4dce
JB
2985 return clamp_t(long, runnable, MIN_SHARES, shares);
2986}
387f77cc 2987#endif /* CONFIG_SMP */
ea1dc6fc 2988
82958366
PT
2989static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
2990
1ea6c46a
PZ
2991/*
2992 * Recomputes the group entity based on the current state of its group
2993 * runqueue.
2994 */
2995static void update_cfs_group(struct sched_entity *se)
2069dd75 2996{
1ea6c46a
PZ
2997 struct cfs_rq *gcfs_rq = group_cfs_rq(se);
2998 long shares, runnable;
2069dd75 2999
1ea6c46a 3000 if (!gcfs_rq)
89ee048f
VG
3001 return;
3002
1ea6c46a 3003 if (throttled_hierarchy(gcfs_rq))
2069dd75 3004 return;
89ee048f 3005
3ff6dcac 3006#ifndef CONFIG_SMP
1ea6c46a 3007 runnable = shares = READ_ONCE(gcfs_rq->tg->shares);
7c80cfc9
PZ
3008
3009 if (likely(se->load.weight == shares))
3ff6dcac 3010 return;
7c80cfc9 3011#else
2c8e4dce
JB
3012 shares = calc_group_shares(gcfs_rq);
3013 runnable = calc_group_runnable(gcfs_rq, shares);
3ff6dcac 3014#endif
2069dd75 3015
1ea6c46a 3016 reweight_entity(cfs_rq_of(se), se, shares, runnable);
2069dd75 3017}
89ee048f 3018
2069dd75 3019#else /* CONFIG_FAIR_GROUP_SCHED */
1ea6c46a 3020static inline void update_cfs_group(struct sched_entity *se)
2069dd75
PZ
3021{
3022}
3023#endif /* CONFIG_FAIR_GROUP_SCHED */
3024
ea14b57e 3025static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
a030d738 3026{
43964409
LT
3027 struct rq *rq = rq_of(cfs_rq);
3028
ea14b57e 3029 if (&rq->cfs == cfs_rq || (flags & SCHED_CPUFREQ_MIGRATION)) {
a030d738
VK
3030 /*
3031 * There are a few boundary cases this might miss but it should
3032 * get called often enough that that should (hopefully) not be
9783be2c 3033 * a real problem.
a030d738
VK
3034 *
3035 * It will not get called when we go idle, because the idle
3036 * thread is a different class (!fair), nor will the utilization
3037 * number include things like RT tasks.
3038 *
3039 * As is, the util number is not freq-invariant (we'd have to
3040 * implement arch_scale_freq_capacity() for that).
3041 *
3042 * See cpu_util().
3043 */
ea14b57e 3044 cpufreq_update_util(rq, flags);
a030d738
VK
3045 }
3046}
3047
141965c7 3048#ifdef CONFIG_SMP
c566e8e9 3049#ifdef CONFIG_FAIR_GROUP_SCHED
7c3edd2c
PZ
3050/**
3051 * update_tg_load_avg - update the tg's load avg
3052 * @cfs_rq: the cfs_rq whose avg changed
3053 * @force: update regardless of how small the difference
3054 *
3055 * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load.
3056 * However, because tg->load_avg is a global value there are performance
3057 * considerations.
3058 *
3059 * In order to avoid having to look at the other cfs_rq's, we use a
3060 * differential update where we store the last value we propagated. This in
3061 * turn allows skipping updates if the differential is 'small'.
3062 *
815abf5a 3063 * Updating tg's load_avg is necessary before update_cfs_share().
bb17f655 3064 */
9d89c257 3065static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
bb17f655 3066{
9d89c257 3067 long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
bb17f655 3068
aa0b7ae0
WL
3069 /*
3070 * No need to update load_avg for root_task_group as it is not used.
3071 */
3072 if (cfs_rq->tg == &root_task_group)
3073 return;
3074
9d89c257
YD
3075 if (force || abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
3076 atomic_long_add(delta, &cfs_rq->tg->load_avg);
3077 cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
bb17f655 3078 }
8165e145 3079}
f5f9739d 3080
ad936d86 3081/*
97fb7a0a 3082 * Called within set_task_rq() right before setting a task's CPU. The
ad936d86
BP
3083 * caller only guarantees p->pi_lock is held; no other assumptions,
3084 * including the state of rq->lock, should be made.
3085 */
3086void set_task_rq_fair(struct sched_entity *se,
3087 struct cfs_rq *prev, struct cfs_rq *next)
3088{
0ccb977f
PZ
3089 u64 p_last_update_time;
3090 u64 n_last_update_time;
3091
ad936d86
BP
3092 if (!sched_feat(ATTACH_AGE_LOAD))
3093 return;
3094
3095 /*
3096 * We are supposed to update the task to "current" time, then its up to
3097 * date and ready to go to new CPU/cfs_rq. But we have difficulty in
3098 * getting what current time is, so simply throw away the out-of-date
3099 * time. This will result in the wakee task is less decayed, but giving
3100 * the wakee more load sounds not bad.
3101 */
0ccb977f
PZ
3102 if (!(se->avg.last_update_time && prev))
3103 return;
ad936d86
BP
3104
3105#ifndef CONFIG_64BIT
0ccb977f 3106 {
ad936d86
BP
3107 u64 p_last_update_time_copy;
3108 u64 n_last_update_time_copy;
3109
3110 do {
3111 p_last_update_time_copy = prev->load_last_update_time_copy;
3112 n_last_update_time_copy = next->load_last_update_time_copy;
3113
3114 smp_rmb();
3115
3116 p_last_update_time = prev->avg.last_update_time;
3117 n_last_update_time = next->avg.last_update_time;
3118
3119 } while (p_last_update_time != p_last_update_time_copy ||
3120 n_last_update_time != n_last_update_time_copy);
0ccb977f 3121 }
ad936d86 3122#else
0ccb977f
PZ
3123 p_last_update_time = prev->avg.last_update_time;
3124 n_last_update_time = next->avg.last_update_time;
ad936d86 3125#endif
0ccb977f
PZ
3126 __update_load_avg_blocked_se(p_last_update_time, cpu_of(rq_of(prev)), se);
3127 se->avg.last_update_time = n_last_update_time;
ad936d86 3128}
09a43ace 3129
0e2d2aaa
PZ
3130
3131/*
3132 * When on migration a sched_entity joins/leaves the PELT hierarchy, we need to
3133 * propagate its contribution. The key to this propagation is the invariant
3134 * that for each group:
3135 *
3136 * ge->avg == grq->avg (1)
3137 *
3138 * _IFF_ we look at the pure running and runnable sums. Because they
3139 * represent the very same entity, just at different points in the hierarchy.
3140 *
a4c3c049
VG
3141 * Per the above update_tg_cfs_util() is trivial and simply copies the running
3142 * sum over (but still wrong, because the group entity and group rq do not have
3143 * their PELT windows aligned).
0e2d2aaa
PZ
3144 *
3145 * However, update_tg_cfs_runnable() is more complex. So we have:
3146 *
3147 * ge->avg.load_avg = ge->load.weight * ge->avg.runnable_avg (2)
3148 *
3149 * And since, like util, the runnable part should be directly transferable,
3150 * the following would _appear_ to be the straight forward approach:
3151 *
a4c3c049 3152 * grq->avg.load_avg = grq->load.weight * grq->avg.runnable_avg (3)
0e2d2aaa
PZ
3153 *
3154 * And per (1) we have:
3155 *
a4c3c049 3156 * ge->avg.runnable_avg == grq->avg.runnable_avg
0e2d2aaa
PZ
3157 *
3158 * Which gives:
3159 *
3160 * ge->load.weight * grq->avg.load_avg
3161 * ge->avg.load_avg = ----------------------------------- (4)
3162 * grq->load.weight
3163 *
3164 * Except that is wrong!
3165 *
3166 * Because while for entities historical weight is not important and we
3167 * really only care about our future and therefore can consider a pure
3168 * runnable sum, runqueues can NOT do this.
3169 *
3170 * We specifically want runqueues to have a load_avg that includes
3171 * historical weights. Those represent the blocked load, the load we expect
3172 * to (shortly) return to us. This only works by keeping the weights as
3173 * integral part of the sum. We therefore cannot decompose as per (3).
3174 *
a4c3c049
VG
3175 * Another reason this doesn't work is that runnable isn't a 0-sum entity.
3176 * Imagine a rq with 2 tasks that each are runnable 2/3 of the time. Then the
3177 * rq itself is runnable anywhere between 2/3 and 1 depending on how the
3178 * runnable section of these tasks overlap (or not). If they were to perfectly
3179 * align the rq as a whole would be runnable 2/3 of the time. If however we
3180 * always have at least 1 runnable task, the rq as a whole is always runnable.
0e2d2aaa 3181 *
a4c3c049 3182 * So we'll have to approximate.. :/
0e2d2aaa 3183 *
a4c3c049 3184 * Given the constraint:
0e2d2aaa 3185 *
a4c3c049 3186 * ge->avg.running_sum <= ge->avg.runnable_sum <= LOAD_AVG_MAX
0e2d2aaa 3187 *
a4c3c049
VG
3188 * We can construct a rule that adds runnable to a rq by assuming minimal
3189 * overlap.
0e2d2aaa 3190 *
a4c3c049 3191 * On removal, we'll assume each task is equally runnable; which yields:
0e2d2aaa 3192 *
a4c3c049 3193 * grq->avg.runnable_sum = grq->avg.load_sum / grq->load.weight
0e2d2aaa 3194 *
a4c3c049 3195 * XXX: only do this for the part of runnable > running ?
0e2d2aaa 3196 *
0e2d2aaa
PZ
3197 */
3198
09a43ace 3199static inline void
0e2d2aaa 3200update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
09a43ace 3201{
09a43ace
VG
3202 long delta = gcfs_rq->avg.util_avg - se->avg.util_avg;
3203
3204 /* Nothing to update */
3205 if (!delta)
3206 return;
3207
a4c3c049
VG
3208 /*
3209 * The relation between sum and avg is:
3210 *
3211 * LOAD_AVG_MAX - 1024 + sa->period_contrib
3212 *
3213 * however, the PELT windows are not aligned between grq and gse.
3214 */
3215
09a43ace
VG
3216 /* Set new sched_entity's utilization */
3217 se->avg.util_avg = gcfs_rq->avg.util_avg;
3218 se->avg.util_sum = se->avg.util_avg * LOAD_AVG_MAX;
3219
3220 /* Update parent cfs_rq utilization */
3221 add_positive(&cfs_rq->avg.util_avg, delta);
3222 cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * LOAD_AVG_MAX;
3223}
3224
09a43ace 3225static inline void
0e2d2aaa 3226update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
09a43ace 3227{
a4c3c049
VG
3228 long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
3229 unsigned long runnable_load_avg, load_avg;
3230 u64 runnable_load_sum, load_sum = 0;
3231 s64 delta_sum;
09a43ace 3232
0e2d2aaa
PZ
3233 if (!runnable_sum)
3234 return;
09a43ace 3235
0e2d2aaa 3236 gcfs_rq->prop_runnable_sum = 0;
09a43ace 3237
a4c3c049
VG
3238 if (runnable_sum >= 0) {
3239 /*
3240 * Add runnable; clip at LOAD_AVG_MAX. Reflects that until
3241 * the CPU is saturated running == runnable.
3242 */
3243 runnable_sum += se->avg.load_sum;
3244 runnable_sum = min(runnable_sum, (long)LOAD_AVG_MAX);
3245 } else {
3246 /*
3247 * Estimate the new unweighted runnable_sum of the gcfs_rq by
3248 * assuming all tasks are equally runnable.
3249 */
3250 if (scale_load_down(gcfs_rq->load.weight)) {
3251 load_sum = div_s64(gcfs_rq->avg.load_sum,
3252 scale_load_down(gcfs_rq->load.weight));
3253 }
3254
3255 /* But make sure to not inflate se's runnable */
3256 runnable_sum = min(se->avg.load_sum, load_sum);
3257 }
3258
3259 /*
3260 * runnable_sum can't be lower than running_sum
97fb7a0a 3261 * As running sum is scale with CPU capacity wehreas the runnable sum
a4c3c049
VG
3262 * is not we rescale running_sum 1st
3263 */
3264 running_sum = se->avg.util_sum /
3265 arch_scale_cpu_capacity(NULL, cpu_of(rq_of(cfs_rq)));
3266 runnable_sum = max(runnable_sum, running_sum);
3267
0e2d2aaa
PZ
3268 load_sum = (s64)se_weight(se) * runnable_sum;
3269 load_avg = div_s64(load_sum, LOAD_AVG_MAX);
09a43ace 3270
a4c3c049
VG
3271 delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
3272 delta_avg = load_avg - se->avg.load_avg;
09a43ace 3273
a4c3c049
VG
3274 se->avg.load_sum = runnable_sum;
3275 se->avg.load_avg = load_avg;
3276 add_positive(&cfs_rq->avg.load_avg, delta_avg);
3277 add_positive(&cfs_rq->avg.load_sum, delta_sum);
09a43ace 3278
1ea6c46a
PZ
3279 runnable_load_sum = (s64)se_runnable(se) * runnable_sum;
3280 runnable_load_avg = div_s64(runnable_load_sum, LOAD_AVG_MAX);
a4c3c049
VG
3281 delta_sum = runnable_load_sum - se_weight(se) * se->avg.runnable_load_sum;
3282 delta_avg = runnable_load_avg - se->avg.runnable_load_avg;
1ea6c46a 3283
a4c3c049
VG
3284 se->avg.runnable_load_sum = runnable_sum;
3285 se->avg.runnable_load_avg = runnable_load_avg;
1ea6c46a 3286
09a43ace 3287 if (se->on_rq) {
a4c3c049
VG
3288 add_positive(&cfs_rq->avg.runnable_load_avg, delta_avg);
3289 add_positive(&cfs_rq->avg.runnable_load_sum, delta_sum);
09a43ace
VG
3290 }
3291}
3292
0e2d2aaa 3293static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
09a43ace 3294{
0e2d2aaa
PZ
3295 cfs_rq->propagate = 1;
3296 cfs_rq->prop_runnable_sum += runnable_sum;
09a43ace
VG
3297}
3298
3299/* Update task and its cfs_rq load average */
3300static inline int propagate_entity_load_avg(struct sched_entity *se)
3301{
0e2d2aaa 3302 struct cfs_rq *cfs_rq, *gcfs_rq;
09a43ace
VG
3303
3304 if (entity_is_task(se))
3305 return 0;
3306
0e2d2aaa
PZ
3307 gcfs_rq = group_cfs_rq(se);
3308 if (!gcfs_rq->propagate)
09a43ace
VG
3309 return 0;
3310
0e2d2aaa
PZ
3311 gcfs_rq->propagate = 0;
3312
09a43ace
VG
3313 cfs_rq = cfs_rq_of(se);
3314
0e2d2aaa 3315 add_tg_cfs_propagate(cfs_rq, gcfs_rq->prop_runnable_sum);
09a43ace 3316
0e2d2aaa
PZ
3317 update_tg_cfs_util(cfs_rq, se, gcfs_rq);
3318 update_tg_cfs_runnable(cfs_rq, se, gcfs_rq);
09a43ace
VG
3319
3320 return 1;
3321}
3322
bc427898
VG
3323/*
3324 * Check if we need to update the load and the utilization of a blocked
3325 * group_entity:
3326 */
3327static inline bool skip_blocked_update(struct sched_entity *se)
3328{
3329 struct cfs_rq *gcfs_rq = group_cfs_rq(se);
3330
3331 /*
3332 * If sched_entity still have not zero load or utilization, we have to
3333 * decay it:
3334 */
3335 if (se->avg.load_avg || se->avg.util_avg)
3336 return false;
3337
3338 /*
3339 * If there is a pending propagation, we have to update the load and
3340 * the utilization of the sched_entity:
3341 */
0e2d2aaa 3342 if (gcfs_rq->propagate)
bc427898
VG
3343 return false;
3344
3345 /*
3346 * Otherwise, the load and the utilization of the sched_entity is
3347 * already zero and there is no pending propagation, so it will be a
3348 * waste of time to try to decay it:
3349 */
3350 return true;
3351}
3352
6e83125c 3353#else /* CONFIG_FAIR_GROUP_SCHED */
09a43ace 3354
9d89c257 3355static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}
09a43ace
VG
3356
3357static inline int propagate_entity_load_avg(struct sched_entity *se)
3358{
3359 return 0;
3360}
3361
0e2d2aaa 3362static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) {}
09a43ace 3363
6e83125c 3364#endif /* CONFIG_FAIR_GROUP_SCHED */
c566e8e9 3365
3d30544f
PZ
3366/**
3367 * update_cfs_rq_load_avg - update the cfs_rq's load/util averages
3368 * @now: current time, as per cfs_rq_clock_task()
3369 * @cfs_rq: cfs_rq to update
3d30544f
PZ
3370 *
3371 * The cfs_rq avg is the direct sum of all its entities (blocked and runnable)
3372 * avg. The immediate corollary is that all (fair) tasks must be attached, see
3373 * post_init_entity_util_avg().
3374 *
3375 * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example.
3376 *
7c3edd2c
PZ
3377 * Returns true if the load decayed or we removed load.
3378 *
3379 * Since both these conditions indicate a changed cfs_rq->avg.load we should
3380 * call update_tg_load_avg() when this function returns true.
3d30544f 3381 */
a2c6c91f 3382static inline int
3a123bbb 3383update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
2dac754e 3384{
0e2d2aaa 3385 unsigned long removed_load = 0, removed_util = 0, removed_runnable_sum = 0;
9d89c257 3386 struct sched_avg *sa = &cfs_rq->avg;
2a2f5d4e 3387 int decayed = 0;
2dac754e 3388
2a2f5d4e
PZ
3389 if (cfs_rq->removed.nr) {
3390 unsigned long r;
9a2dd585 3391 u32 divider = LOAD_AVG_MAX - 1024 + sa->period_contrib;
2a2f5d4e
PZ
3392
3393 raw_spin_lock(&cfs_rq->removed.lock);
3394 swap(cfs_rq->removed.util_avg, removed_util);
3395 swap(cfs_rq->removed.load_avg, removed_load);
0e2d2aaa 3396 swap(cfs_rq->removed.runnable_sum, removed_runnable_sum);
2a2f5d4e
PZ
3397 cfs_rq->removed.nr = 0;
3398 raw_spin_unlock(&cfs_rq->removed.lock);
3399
2a2f5d4e 3400 r = removed_load;
89741892 3401 sub_positive(&sa->load_avg, r);
9a2dd585 3402 sub_positive(&sa->load_sum, r * divider);
2dac754e 3403
2a2f5d4e 3404 r = removed_util;
89741892 3405 sub_positive(&sa->util_avg, r);
9a2dd585 3406 sub_positive(&sa->util_sum, r * divider);
2a2f5d4e 3407
0e2d2aaa 3408 add_tg_cfs_propagate(cfs_rq, -(long)removed_runnable_sum);
2a2f5d4e
PZ
3409
3410 decayed = 1;
9d89c257 3411 }
36ee28e4 3412
2a2f5d4e 3413 decayed |= __update_load_avg_cfs_rq(now, cpu_of(rq_of(cfs_rq)), cfs_rq);
36ee28e4 3414
9d89c257
YD
3415#ifndef CONFIG_64BIT
3416 smp_wmb();
3417 cfs_rq->load_last_update_time_copy = sa->last_update_time;
3418#endif
36ee28e4 3419
2a2f5d4e 3420 if (decayed)
ea14b57e 3421 cfs_rq_util_change(cfs_rq, 0);
21e96f88 3422
2a2f5d4e 3423 return decayed;
21e96f88
SM
3424}
3425
3d30544f
PZ
3426/**
3427 * attach_entity_load_avg - attach this entity to its cfs_rq load avg
3428 * @cfs_rq: cfs_rq to attach to
3429 * @se: sched_entity to attach
882a78a9 3430 * @flags: migration hints
3d30544f
PZ
3431 *
3432 * Must call update_cfs_rq_load_avg() before this, since we rely on
3433 * cfs_rq->avg.last_update_time being current.
3434 */
ea14b57e 3435static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
a05e8c51 3436{
f207934f
PZ
3437 u32 divider = LOAD_AVG_MAX - 1024 + cfs_rq->avg.period_contrib;
3438
3439 /*
3440 * When we attach the @se to the @cfs_rq, we must align the decay
3441 * window because without that, really weird and wonderful things can
3442 * happen.
3443 *
3444 * XXX illustrate
3445 */
a05e8c51 3446 se->avg.last_update_time = cfs_rq->avg.last_update_time;
f207934f
PZ
3447 se->avg.period_contrib = cfs_rq->avg.period_contrib;
3448
3449 /*
3450 * Hell(o) Nasty stuff.. we need to recompute _sum based on the new
3451 * period_contrib. This isn't strictly correct, but since we're
3452 * entirely outside of the PELT hierarchy, nobody cares if we truncate
3453 * _sum a little.
3454 */
3455 se->avg.util_sum = se->avg.util_avg * divider;
3456
3457 se->avg.load_sum = divider;
3458 if (se_weight(se)) {
3459 se->avg.load_sum =
3460 div_u64(se->avg.load_avg * se->avg.load_sum, se_weight(se));
3461 }
3462
3463 se->avg.runnable_load_sum = se->avg.load_sum;
3464
8d5b9025 3465 enqueue_load_avg(cfs_rq, se);
a05e8c51
BP
3466 cfs_rq->avg.util_avg += se->avg.util_avg;
3467 cfs_rq->avg.util_sum += se->avg.util_sum;
0e2d2aaa
PZ
3468
3469 add_tg_cfs_propagate(cfs_rq, se->avg.load_sum);
a2c6c91f 3470
ea14b57e 3471 cfs_rq_util_change(cfs_rq, flags);
a05e8c51
BP
3472}
3473
3d30544f
PZ
3474/**
3475 * detach_entity_load_avg - detach this entity from its cfs_rq load avg
3476 * @cfs_rq: cfs_rq to detach from
3477 * @se: sched_entity to detach
3478 *
3479 * Must call update_cfs_rq_load_avg() before this, since we rely on
3480 * cfs_rq->avg.last_update_time being current.
3481 */
a05e8c51
BP
3482static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3483{
8d5b9025 3484 dequeue_load_avg(cfs_rq, se);
89741892
PZ
3485 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
3486 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
0e2d2aaa
PZ
3487
3488 add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
a2c6c91f 3489
ea14b57e 3490 cfs_rq_util_change(cfs_rq, 0);
a05e8c51
BP
3491}
3492
b382a531
PZ
3493/*
3494 * Optional action to be done while updating the load average
3495 */
3496#define UPDATE_TG 0x1
3497#define SKIP_AGE_LOAD 0x2
3498#define DO_ATTACH 0x4
3499
3500/* Update task and its cfs_rq load average */
3501static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
3502{
3503 u64 now = cfs_rq_clock_task(cfs_rq);
3504 struct rq *rq = rq_of(cfs_rq);
3505 int cpu = cpu_of(rq);
3506 int decayed;
3507
3508 /*
3509 * Track task load average for carrying it to new CPU after migrated, and
3510 * track group sched_entity load average for task_h_load calc in migration
3511 */
3512 if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))
3513 __update_load_avg_se(now, cpu, cfs_rq, se);
3514
3515 decayed = update_cfs_rq_load_avg(now, cfs_rq);
3516 decayed |= propagate_entity_load_avg(se);
3517
3518 if (!se->avg.last_update_time && (flags & DO_ATTACH)) {
3519
ea14b57e
PZ
3520 /*
3521 * DO_ATTACH means we're here from enqueue_entity().
3522 * !last_update_time means we've passed through
3523 * migrate_task_rq_fair() indicating we migrated.
3524 *
3525 * IOW we're enqueueing a task on a new CPU.
3526 */
3527 attach_entity_load_avg(cfs_rq, se, SCHED_CPUFREQ_MIGRATION);
b382a531
PZ
3528 update_tg_load_avg(cfs_rq, 0);
3529
3530 } else if (decayed && (flags & UPDATE_TG))
3531 update_tg_load_avg(cfs_rq, 0);
3532}
3533
9d89c257 3534#ifndef CONFIG_64BIT
0905f04e
YD
3535static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
3536{
9d89c257 3537 u64 last_update_time_copy;
0905f04e 3538 u64 last_update_time;
9ee474f5 3539
9d89c257
YD
3540 do {
3541 last_update_time_copy = cfs_rq->load_last_update_time_copy;
3542 smp_rmb();
3543 last_update_time = cfs_rq->avg.last_update_time;
3544 } while (last_update_time != last_update_time_copy);
0905f04e
YD
3545
3546 return last_update_time;
3547}
9d89c257 3548#else
0905f04e
YD
3549static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
3550{
3551 return cfs_rq->avg.last_update_time;
3552}
9d89c257
YD
3553#endif
3554
104cb16d
MR
3555/*
3556 * Synchronize entity load avg of dequeued entity without locking
3557 * the previous rq.
3558 */
3559void sync_entity_load_avg(struct sched_entity *se)
3560{
3561 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3562 u64 last_update_time;
3563
3564 last_update_time = cfs_rq_last_update_time(cfs_rq);
0ccb977f 3565 __update_load_avg_blocked_se(last_update_time, cpu_of(rq_of(cfs_rq)), se);
104cb16d
MR
3566}
3567
0905f04e
YD
3568/*
3569 * Task first catches up with cfs_rq, and then subtract
3570 * itself from the cfs_rq (task must be off the queue now).
3571 */
3572void remove_entity_load_avg(struct sched_entity *se)
3573{
3574 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2a2f5d4e 3575 unsigned long flags;
0905f04e
YD
3576
3577 /*
7dc603c9
PZ
3578 * tasks cannot exit without having gone through wake_up_new_task() ->
3579 * post_init_entity_util_avg() which will have added things to the
3580 * cfs_rq, so we can remove unconditionally.
3581 *
3582 * Similarly for groups, they will have passed through
3583 * post_init_entity_util_avg() before unregister_sched_fair_group()
3584 * calls this.
0905f04e 3585 */
0905f04e 3586
104cb16d 3587 sync_entity_load_avg(se);
2a2f5d4e
PZ
3588
3589 raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags);
3590 ++cfs_rq->removed.nr;
3591 cfs_rq->removed.util_avg += se->avg.util_avg;
3592 cfs_rq->removed.load_avg += se->avg.load_avg;
0e2d2aaa 3593 cfs_rq->removed.runnable_sum += se->avg.load_sum; /* == runnable_sum */
2a2f5d4e 3594 raw_spin_unlock_irqrestore(&cfs_rq->removed.lock, flags);
2dac754e 3595}
642dbc39 3596
7ea241af
YD
3597static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq)
3598{
1ea6c46a 3599 return cfs_rq->avg.runnable_load_avg;
7ea241af
YD
3600}
3601
3602static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq)
3603{
3604 return cfs_rq->avg.load_avg;
3605}
3606
46f69fa3 3607static int idle_balance(struct rq *this_rq, struct rq_flags *rf);
6e83125c 3608
7f65ea42
PB
3609static inline unsigned long task_util(struct task_struct *p)
3610{
3611 return READ_ONCE(p->se.avg.util_avg);
3612}
3613
3614static inline unsigned long _task_util_est(struct task_struct *p)
3615{
3616 struct util_est ue = READ_ONCE(p->se.avg.util_est);
3617
92a801e5 3618 return (max(ue.ewma, ue.enqueued) | UTIL_AVG_UNCHANGED);
7f65ea42
PB
3619}
3620
3621static inline unsigned long task_util_est(struct task_struct *p)
3622{
3623 return max(task_util(p), _task_util_est(p));
3624}
3625
3626static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
3627 struct task_struct *p)
3628{
3629 unsigned int enqueued;
3630
3631 if (!sched_feat(UTIL_EST))
3632 return;
3633
3634 /* Update root cfs_rq's estimated utilization */
3635 enqueued = cfs_rq->avg.util_est.enqueued;
92a801e5 3636 enqueued += _task_util_est(p);
7f65ea42
PB
3637 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
3638}
3639
3640/*
3641 * Check if a (signed) value is within a specified (unsigned) margin,
3642 * based on the observation that:
3643 *
3644 * abs(x) < y := (unsigned)(x + y - 1) < (2 * y - 1)
3645 *
3646 * NOTE: this only works when value + maring < INT_MAX.
3647 */
3648static inline bool within_margin(int value, int margin)
3649{
3650 return ((unsigned int)(value + margin - 1) < (2 * margin - 1));
3651}
3652
3653static void
3654util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
3655{
3656 long last_ewma_diff;
3657 struct util_est ue;
3658
3659 if (!sched_feat(UTIL_EST))
3660 return;
3661
3482d98b
VG
3662 /* Update root cfs_rq's estimated utilization */
3663 ue.enqueued = cfs_rq->avg.util_est.enqueued;
92a801e5 3664 ue.enqueued -= min_t(unsigned int, ue.enqueued, _task_util_est(p));
7f65ea42
PB
3665 WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued);
3666
3667 /*
3668 * Skip update of task's estimated utilization when the task has not
3669 * yet completed an activation, e.g. being migrated.
3670 */
3671 if (!task_sleep)
3672 return;
3673
d519329f
PB
3674 /*
3675 * If the PELT values haven't changed since enqueue time,
3676 * skip the util_est update.
3677 */
3678 ue = p->se.avg.util_est;
3679 if (ue.enqueued & UTIL_AVG_UNCHANGED)
3680 return;
3681
7f65ea42
PB
3682 /*
3683 * Skip update of task's estimated utilization when its EWMA is
3684 * already ~1% close to its last activation value.
3685 */
d519329f 3686 ue.enqueued = (task_util(p) | UTIL_AVG_UNCHANGED);
7f65ea42
PB
3687 last_ewma_diff = ue.enqueued - ue.ewma;
3688 if (within_margin(last_ewma_diff, (SCHED_CAPACITY_SCALE / 100)))
3689 return;
3690
3691 /*
3692 * Update Task's estimated utilization
3693 *
3694 * When *p completes an activation we can consolidate another sample
3695 * of the task size. This is done by storing the current PELT value
3696 * as ue.enqueued and by using this value to update the Exponential
3697 * Weighted Moving Average (EWMA):
3698 *
3699 * ewma(t) = w * task_util(p) + (1-w) * ewma(t-1)
3700 * = w * task_util(p) + ewma(t-1) - w * ewma(t-1)
3701 * = w * (task_util(p) - ewma(t-1)) + ewma(t-1)
3702 * = w * ( last_ewma_diff ) + ewma(t-1)
3703 * = w * (last_ewma_diff + ewma(t-1) / w)
3704 *
3705 * Where 'w' is the weight of new samples, which is configured to be
3706 * 0.25, thus making w=1/4 ( >>= UTIL_EST_WEIGHT_SHIFT)
3707 */
3708 ue.ewma <<= UTIL_EST_WEIGHT_SHIFT;
3709 ue.ewma += last_ewma_diff;
3710 ue.ewma >>= UTIL_EST_WEIGHT_SHIFT;
3711 WRITE_ONCE(p->se.avg.util_est, ue);
3712}
3713
3b1baa64
MR
3714static inline int task_fits_capacity(struct task_struct *p, long capacity)
3715{
3716 return capacity * 1024 > task_util_est(p) * capacity_margin;
3717}
3718
3719static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
3720{
3721 if (!static_branch_unlikely(&sched_asym_cpucapacity))
3722 return;
3723
3724 if (!p) {
3725 rq->misfit_task_load = 0;
3726 return;
3727 }
3728
3729 if (task_fits_capacity(p, capacity_of(cpu_of(rq)))) {
3730 rq->misfit_task_load = 0;
3731 return;
3732 }
3733
3734 rq->misfit_task_load = task_h_load(p);
3735}
3736
38033c37
PZ
3737#else /* CONFIG_SMP */
3738
d31b1a66
VG
3739#define UPDATE_TG 0x0
3740#define SKIP_AGE_LOAD 0x0
b382a531 3741#define DO_ATTACH 0x0
d31b1a66 3742
88c0616e 3743static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1)
536bd00c 3744{
ea14b57e 3745 cfs_rq_util_change(cfs_rq, 0);
536bd00c
RW
3746}
3747
9d89c257 3748static inline void remove_entity_load_avg(struct sched_entity *se) {}
6e83125c 3749
a05e8c51 3750static inline void
ea14b57e 3751attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) {}
a05e8c51
BP
3752static inline void
3753detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
3754
46f69fa3 3755static inline int idle_balance(struct rq *rq, struct rq_flags *rf)
6e83125c
PZ
3756{
3757 return 0;
3758}
3759
7f65ea42
PB
3760static inline void
3761util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {}
3762
3763static inline void
3764util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p,
3765 bool task_sleep) {}
3b1baa64 3766static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
7f65ea42 3767
38033c37 3768#endif /* CONFIG_SMP */
9d85f21c 3769
ddc97297
PZ
3770static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
3771{
3772#ifdef CONFIG_SCHED_DEBUG
3773 s64 d = se->vruntime - cfs_rq->min_vruntime;
3774
3775 if (d < 0)
3776 d = -d;
3777
3778 if (d > 3*sysctl_sched_latency)
ae92882e 3779 schedstat_inc(cfs_rq->nr_spread_over);
ddc97297
PZ
3780#endif
3781}
3782
aeb73b04
PZ
3783static void
3784place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
3785{
1af5f730 3786 u64 vruntime = cfs_rq->min_vruntime;
94dfb5e7 3787
2cb8600e
PZ
3788 /*
3789 * The 'current' period is already promised to the current tasks,
3790 * however the extra weight of the new task will slow them down a
3791 * little, place the new task so that it fits in the slot that
3792 * stays open at the end.
3793 */
94dfb5e7 3794 if (initial && sched_feat(START_DEBIT))
f9c0b095 3795 vruntime += sched_vslice(cfs_rq, se);
aeb73b04 3796
a2e7a7eb 3797 /* sleeps up to a single latency don't count. */
5ca9880c 3798 if (!initial) {
a2e7a7eb 3799 unsigned long thresh = sysctl_sched_latency;
a7be37ac 3800
a2e7a7eb
MG
3801 /*
3802 * Halve their sleep time's effect, to allow
3803 * for a gentler effect of sleepers:
3804 */
3805 if (sched_feat(GENTLE_FAIR_SLEEPERS))
3806 thresh >>= 1;
51e0304c 3807
a2e7a7eb 3808 vruntime -= thresh;
aeb73b04
PZ
3809 }
3810
b5d9d734 3811 /* ensure we never gain time by being placed backwards. */
16c8f1c7 3812 se->vruntime = max_vruntime(se->vruntime, vruntime);
aeb73b04
PZ
3813}
3814
d3d9dc33
PT
3815static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
3816
cb251765
MG
3817static inline void check_schedstat_required(void)
3818{
3819#ifdef CONFIG_SCHEDSTATS
3820 if (schedstat_enabled())
3821 return;
3822
3823 /* Force schedstat enabled if a dependent tracepoint is active */
3824 if (trace_sched_stat_wait_enabled() ||
3825 trace_sched_stat_sleep_enabled() ||
3826 trace_sched_stat_iowait_enabled() ||
3827 trace_sched_stat_blocked_enabled() ||
3828 trace_sched_stat_runtime_enabled()) {
eda8dca5 3829 printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, "
cb251765 3830 "stat_blocked and stat_runtime require the "
f67abed5 3831 "kernel parameter schedstats=enable or "
cb251765
MG
3832 "kernel.sched_schedstats=1\n");
3833 }
3834#endif
3835}
3836
b5179ac7
PZ
3837
3838/*
3839 * MIGRATION
3840 *
3841 * dequeue
3842 * update_curr()
3843 * update_min_vruntime()
3844 * vruntime -= min_vruntime
3845 *
3846 * enqueue
3847 * update_curr()
3848 * update_min_vruntime()
3849 * vruntime += min_vruntime
3850 *
3851 * this way the vruntime transition between RQs is done when both
3852 * min_vruntime are up-to-date.
3853 *
3854 * WAKEUP (remote)
3855 *
59efa0ba 3856 * ->migrate_task_rq_fair() (p->state == TASK_WAKING)
b5179ac7
PZ
3857 * vruntime -= min_vruntime
3858 *
3859 * enqueue
3860 * update_curr()
3861 * update_min_vruntime()
3862 * vruntime += min_vruntime
3863 *
3864 * this way we don't have the most up-to-date min_vruntime on the originating
3865 * CPU and an up-to-date min_vruntime on the destination CPU.
3866 */
3867
bf0f6f24 3868static void
88ec22d3 3869enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
bf0f6f24 3870{
2f950354
PZ
3871 bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED);
3872 bool curr = cfs_rq->curr == se;
3873
88ec22d3 3874 /*
2f950354
PZ
3875 * If we're the current task, we must renormalise before calling
3876 * update_curr().
88ec22d3 3877 */
2f950354 3878 if (renorm && curr)
88ec22d3
PZ
3879 se->vruntime += cfs_rq->min_vruntime;
3880
2f950354
PZ
3881 update_curr(cfs_rq);
3882
bf0f6f24 3883 /*
2f950354
PZ
3884 * Otherwise, renormalise after, such that we're placed at the current
3885 * moment in time, instead of some random moment in the past. Being
3886 * placed in the past could significantly boost this task to the
3887 * fairness detriment of existing tasks.
bf0f6f24 3888 */
2f950354
PZ
3889 if (renorm && !curr)
3890 se->vruntime += cfs_rq->min_vruntime;
3891
89ee048f
VG
3892 /*
3893 * When enqueuing a sched_entity, we must:
3894 * - Update loads to have both entity and cfs_rq synced with now.
3895 * - Add its load to cfs_rq->runnable_avg
3896 * - For group_entity, update its weight to reflect the new share of
3897 * its group cfs_rq
3898 * - Add its new weight to cfs_rq->load.weight
3899 */
b382a531 3900 update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH);
1ea6c46a 3901 update_cfs_group(se);
b5b3e35f 3902 enqueue_runnable_load_avg(cfs_rq, se);
17bc14b7 3903 account_entity_enqueue(cfs_rq, se);
bf0f6f24 3904
1a3d027c 3905 if (flags & ENQUEUE_WAKEUP)
aeb73b04 3906 place_entity(cfs_rq, se, 0);
bf0f6f24 3907
cb251765 3908 check_schedstat_required();
4fa8d299
JP
3909 update_stats_enqueue(cfs_rq, se, flags);
3910 check_spread(cfs_rq, se);
2f950354 3911 if (!curr)
83b699ed 3912 __enqueue_entity(cfs_rq, se);
2069dd75 3913 se->on_rq = 1;
3d4b47b4 3914
d3d9dc33 3915 if (cfs_rq->nr_running == 1) {
3d4b47b4 3916 list_add_leaf_cfs_rq(cfs_rq);
d3d9dc33
PT
3917 check_enqueue_throttle(cfs_rq);
3918 }
bf0f6f24
IM
3919}
3920
2c13c919 3921static void __clear_buddies_last(struct sched_entity *se)
2002c695 3922{
2c13c919
RR
3923 for_each_sched_entity(se) {
3924 struct cfs_rq *cfs_rq = cfs_rq_of(se);
f1044799 3925 if (cfs_rq->last != se)
2c13c919 3926 break;
f1044799
PZ
3927
3928 cfs_rq->last = NULL;
2c13c919
RR
3929 }
3930}
2002c695 3931
2c13c919
RR
3932static void __clear_buddies_next(struct sched_entity *se)
3933{
3934 for_each_sched_entity(se) {
3935 struct cfs_rq *cfs_rq = cfs_rq_of(se);
f1044799 3936 if (cfs_rq->next != se)
2c13c919 3937 break;
f1044799
PZ
3938
3939 cfs_rq->next = NULL;
2c13c919 3940 }
2002c695
PZ
3941}
3942
ac53db59
RR
3943static void __clear_buddies_skip(struct sched_entity *se)
3944{
3945 for_each_sched_entity(se) {
3946 struct cfs_rq *cfs_rq = cfs_rq_of(se);
f1044799 3947 if (cfs_rq->skip != se)
ac53db59 3948 break;
f1044799
PZ
3949
3950 cfs_rq->skip = NULL;
ac53db59
RR
3951 }
3952}
3953
a571bbea
PZ
3954static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
3955{
2c13c919
RR
3956 if (cfs_rq->last == se)
3957 __clear_buddies_last(se);
3958
3959 if (cfs_rq->next == se)
3960 __clear_buddies_next(se);
ac53db59
RR
3961
3962 if (cfs_rq->skip == se)
3963 __clear_buddies_skip(se);
a571bbea
PZ
3964}
3965
6c16a6dc 3966static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
d8b4986d 3967
bf0f6f24 3968static void
371fd7e7 3969dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
bf0f6f24 3970{
a2a2d680
DA
3971 /*
3972 * Update run-time statistics of the 'current'.
3973 */
3974 update_curr(cfs_rq);
89ee048f
VG
3975
3976 /*
3977 * When dequeuing a sched_entity, we must:
3978 * - Update loads to have both entity and cfs_rq synced with now.
3979 * - Substract its load from the cfs_rq->runnable_avg.
3980 * - Substract its previous weight from cfs_rq->load.weight.
3981 * - For group entity, update its weight to reflect the new share
3982 * of its group cfs_rq.
3983 */
88c0616e 3984 update_load_avg(cfs_rq, se, UPDATE_TG);
b5b3e35f 3985 dequeue_runnable_load_avg(cfs_rq, se);
a2a2d680 3986
4fa8d299 3987 update_stats_dequeue(cfs_rq, se, flags);
67e9fb2a 3988
2002c695 3989 clear_buddies(cfs_rq, se);
4793241b 3990
83b699ed 3991 if (se != cfs_rq->curr)
30cfdcfc 3992 __dequeue_entity(cfs_rq, se);
17bc14b7 3993 se->on_rq = 0;
30cfdcfc 3994 account_entity_dequeue(cfs_rq, se);
88ec22d3
PZ
3995
3996 /*
b60205c7
PZ
3997 * Normalize after update_curr(); which will also have moved
3998 * min_vruntime if @se is the one holding it back. But before doing
3999 * update_min_vruntime() again, which will discount @se's position and
4000 * can move min_vruntime forward still more.
88ec22d3 4001 */
371fd7e7 4002 if (!(flags & DEQUEUE_SLEEP))
88ec22d3 4003 se->vruntime -= cfs_rq->min_vruntime;
1e876231 4004
d8b4986d
PT
4005 /* return excess runtime on last dequeue */
4006 return_cfs_rq_runtime(cfs_rq);
4007
1ea6c46a 4008 update_cfs_group(se);
b60205c7
PZ
4009
4010 /*
4011 * Now advance min_vruntime if @se was the entity holding it back,
4012 * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
4013 * put back on, and if we advance min_vruntime, we'll be placed back
4014 * further than we started -- ie. we'll be penalized.
4015 */
9845c49c 4016 if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
b60205c7 4017 update_min_vruntime(cfs_rq);
bf0f6f24
IM
4018}
4019
4020/*
4021 * Preempt the current task with a newly woken task if needed:
4022 */
7c92e54f 4023static void
2e09bf55 4024check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
bf0f6f24 4025{
11697830 4026 unsigned long ideal_runtime, delta_exec;
f4cfb33e
WX
4027 struct sched_entity *se;
4028 s64 delta;
11697830 4029
6d0f0ebd 4030 ideal_runtime = sched_slice(cfs_rq, curr);
11697830 4031 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
a9f3e2b5 4032 if (delta_exec > ideal_runtime) {
8875125e 4033 resched_curr(rq_of(cfs_rq));
a9f3e2b5
MG
4034 /*
4035 * The current task ran long enough, ensure it doesn't get
4036 * re-elected due to buddy favours.
4037 */
4038 clear_buddies(cfs_rq, curr);
f685ceac
MG
4039 return;
4040 }
4041
4042 /*
4043 * Ensure that a task that missed wakeup preemption by a
4044 * narrow margin doesn't have to wait for a full slice.
4045 * This also mitigates buddy induced latencies under load.
4046 */
f685ceac
MG
4047 if (delta_exec < sysctl_sched_min_granularity)
4048 return;
4049
f4cfb33e
WX
4050 se = __pick_first_entity(cfs_rq);
4051 delta = curr->vruntime - se->vruntime;
f685ceac 4052
f4cfb33e
WX
4053 if (delta < 0)
4054 return;
d7d82944 4055
f4cfb33e 4056 if (delta > ideal_runtime)
8875125e 4057 resched_curr(rq_of(cfs_rq));
bf0f6f24
IM
4058}
4059
83b699ed 4060static void
8494f412 4061set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 4062{
83b699ed
SV
4063 /* 'current' is not kept within the tree. */
4064 if (se->on_rq) {
4065 /*
4066 * Any task has to be enqueued before it get to execute on
4067 * a CPU. So account for the time it spent waiting on the
4068 * runqueue.
4069 */
4fa8d299 4070 update_stats_wait_end(cfs_rq, se);
83b699ed 4071 __dequeue_entity(cfs_rq, se);
88c0616e 4072 update_load_avg(cfs_rq, se, UPDATE_TG);
83b699ed
SV
4073 }
4074
79303e9e 4075 update_stats_curr_start(cfs_rq, se);
429d43bc 4076 cfs_rq->curr = se;
4fa8d299 4077
eba1ed4b
IM
4078 /*
4079 * Track our maximum slice length, if the CPU's load is at
4080 * least twice that of our own weight (i.e. dont track it
4081 * when there are only lesser-weight tasks around):
4082 */
cb251765 4083 if (schedstat_enabled() && rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
4fa8d299
JP
4084 schedstat_set(se->statistics.slice_max,
4085 max((u64)schedstat_val(se->statistics.slice_max),
4086 se->sum_exec_runtime - se->prev_sum_exec_runtime));
eba1ed4b 4087 }
4fa8d299 4088
4a55b450 4089 se->prev_sum_exec_runtime = se->sum_exec_runtime;
bf0f6f24
IM
4090}
4091
3f3a4904
PZ
4092static int
4093wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
4094
ac53db59
RR
4095/*
4096 * Pick the next process, keeping these things in mind, in this order:
4097 * 1) keep things fair between processes/task groups
4098 * 2) pick the "next" process, since someone really wants that to run
4099 * 3) pick the "last" process, for cache locality
4100 * 4) do not run the "skip" process, if something else is available
4101 */
678d5718
PZ
4102static struct sched_entity *
4103pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
aa2ac252 4104{
678d5718
PZ
4105 struct sched_entity *left = __pick_first_entity(cfs_rq);
4106 struct sched_entity *se;
4107
4108 /*
4109 * If curr is set we have to see if its left of the leftmost entity
4110 * still in the tree, provided there was anything in the tree at all.
4111 */
4112 if (!left || (curr && entity_before(curr, left)))
4113 left = curr;
4114
4115 se = left; /* ideally we run the leftmost entity */
f4b6755f 4116
ac53db59
RR
4117 /*
4118 * Avoid running the skip buddy, if running something else can
4119 * be done without getting too unfair.
4120 */
4121 if (cfs_rq->skip == se) {
678d5718
PZ
4122 struct sched_entity *second;
4123
4124 if (se == curr) {
4125 second = __pick_first_entity(cfs_rq);
4126 } else {
4127 second = __pick_next_entity(se);
4128 if (!second || (curr && entity_before(curr, second)))
4129 second = curr;
4130 }
4131
ac53db59
RR
4132 if (second && wakeup_preempt_entity(second, left) < 1)
4133 se = second;
4134 }
aa2ac252 4135
f685ceac
MG
4136 /*
4137 * Prefer last buddy, try to return the CPU to a preempted task.
4138 */
4139 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
4140 se = cfs_rq->last;
4141
ac53db59
RR
4142 /*
4143 * Someone really wants this to run. If it's not unfair, run it.
4144 */
4145 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
4146 se = cfs_rq->next;
4147
f685ceac 4148 clear_buddies(cfs_rq, se);
4793241b
PZ
4149
4150 return se;
aa2ac252
PZ
4151}
4152
678d5718 4153static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
d3d9dc33 4154
ab6cde26 4155static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
bf0f6f24
IM
4156{
4157 /*
4158 * If still on the runqueue then deactivate_task()
4159 * was not called and update_curr() has to be done:
4160 */
4161 if (prev->on_rq)
b7cc0896 4162 update_curr(cfs_rq);
bf0f6f24 4163
d3d9dc33
PT
4164 /* throttle cfs_rqs exceeding runtime */
4165 check_cfs_rq_runtime(cfs_rq);
4166
4fa8d299 4167 check_spread(cfs_rq, prev);
cb251765 4168
30cfdcfc 4169 if (prev->on_rq) {
4fa8d299 4170 update_stats_wait_start(cfs_rq, prev);
30cfdcfc
DA
4171 /* Put 'current' back into the tree. */
4172 __enqueue_entity(cfs_rq, prev);
9d85f21c 4173 /* in !on_rq case, update occurred at dequeue */
88c0616e 4174 update_load_avg(cfs_rq, prev, 0);
30cfdcfc 4175 }
429d43bc 4176 cfs_rq->curr = NULL;
bf0f6f24
IM
4177}
4178
8f4d37ec
PZ
4179static void
4180entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
bf0f6f24 4181{
bf0f6f24 4182 /*
30cfdcfc 4183 * Update run-time statistics of the 'current'.
bf0f6f24 4184 */
30cfdcfc 4185 update_curr(cfs_rq);
bf0f6f24 4186
9d85f21c
PT
4187 /*
4188 * Ensure that runnable average is periodically updated.
4189 */
88c0616e 4190 update_load_avg(cfs_rq, curr, UPDATE_TG);
1ea6c46a 4191 update_cfs_group(curr);
9d85f21c 4192
8f4d37ec
PZ
4193#ifdef CONFIG_SCHED_HRTICK
4194 /*
4195 * queued ticks are scheduled to match the slice, so don't bother
4196 * validating it and just reschedule.
4197 */
983ed7a6 4198 if (queued) {
8875125e 4199 resched_curr(rq_of(cfs_rq));
983ed7a6
HH
4200 return;
4201 }
8f4d37ec
PZ
4202 /*
4203 * don't let the period tick interfere with the hrtick preemption
4204 */
4205 if (!sched_feat(DOUBLE_TICK) &&
4206 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
4207 return;
4208#endif
4209
2c2efaed 4210 if (cfs_rq->nr_running > 1)
2e09bf55 4211 check_preempt_tick(cfs_rq, curr);
bf0f6f24
IM
4212}
4213
ab84d31e
PT
4214
4215/**************************************************
4216 * CFS bandwidth control machinery
4217 */
4218
4219#ifdef CONFIG_CFS_BANDWIDTH
029632fb
PZ
4220
4221#ifdef HAVE_JUMP_LABEL
c5905afb 4222static struct static_key __cfs_bandwidth_used;
029632fb
PZ
4223
4224static inline bool cfs_bandwidth_used(void)
4225{
c5905afb 4226 return static_key_false(&__cfs_bandwidth_used);
029632fb
PZ
4227}
4228
1ee14e6c 4229void cfs_bandwidth_usage_inc(void)
029632fb 4230{
ce48c146 4231 static_key_slow_inc_cpuslocked(&__cfs_bandwidth_used);
1ee14e6c
BS
4232}
4233
4234void cfs_bandwidth_usage_dec(void)
4235{
ce48c146 4236 static_key_slow_dec_cpuslocked(&__cfs_bandwidth_used);
029632fb
PZ
4237}
4238#else /* HAVE_JUMP_LABEL */
4239static bool cfs_bandwidth_used(void)
4240{
4241 return true;
4242}
4243
1ee14e6c
BS
4244void cfs_bandwidth_usage_inc(void) {}
4245void cfs_bandwidth_usage_dec(void) {}
029632fb
PZ
4246#endif /* HAVE_JUMP_LABEL */
4247
ab84d31e
PT
4248/*
4249 * default period for cfs group bandwidth.
4250 * default: 0.1s, units: nanoseconds
4251 */
4252static inline u64 default_cfs_period(void)
4253{
4254 return 100000000ULL;
4255}
ec12cb7f
PT
4256
4257static inline u64 sched_cfs_bandwidth_slice(void)
4258{
4259 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
4260}
4261
a9cf55b2
PT
4262/*
4263 * Replenish runtime according to assigned quota and update expiration time.
4264 * We use sched_clock_cpu directly instead of rq->clock to avoid adding
4265 * additional synchronization around rq->lock.
4266 *
4267 * requires cfs_b->lock
4268 */
029632fb 4269void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
a9cf55b2
PT
4270{
4271 u64 now;
4272
4273 if (cfs_b->quota == RUNTIME_INF)
4274 return;
4275
4276 now = sched_clock_cpu(smp_processor_id());
4277 cfs_b->runtime = cfs_b->quota;
4278 cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
512ac999 4279 cfs_b->expires_seq++;
a9cf55b2
PT
4280}
4281
029632fb
PZ
4282static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
4283{
4284 return &tg->cfs_bandwidth;
4285}
4286
f1b17280
PT
4287/* rq->task_clock normalized against any time this cfs_rq has spent throttled */
4288static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
4289{
4290 if (unlikely(cfs_rq->throttle_count))
1a99ae3f 4291 return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time;
f1b17280 4292
78becc27 4293 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
f1b17280
PT
4294}
4295
85dac906
PT
4296/* returns 0 on failure to allocate runtime */
4297static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
ec12cb7f
PT
4298{
4299 struct task_group *tg = cfs_rq->tg;
4300 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
a9cf55b2 4301 u64 amount = 0, min_amount, expires;
512ac999 4302 int expires_seq;
ec12cb7f
PT
4303
4304 /* note: this is a positive sum as runtime_remaining <= 0 */
4305 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
4306
4307 raw_spin_lock(&cfs_b->lock);
4308 if (cfs_b->quota == RUNTIME_INF)
4309 amount = min_amount;
58088ad0 4310 else {
77a4d1a1 4311 start_cfs_bandwidth(cfs_b);
58088ad0
PT
4312
4313 if (cfs_b->runtime > 0) {
4314 amount = min(cfs_b->runtime, min_amount);
4315 cfs_b->runtime -= amount;
4316 cfs_b->idle = 0;
4317 }
ec12cb7f 4318 }
512ac999 4319 expires_seq = cfs_b->expires_seq;
a9cf55b2 4320 expires = cfs_b->runtime_expires;
ec12cb7f
PT
4321 raw_spin_unlock(&cfs_b->lock);
4322
4323 cfs_rq->runtime_remaining += amount;
a9cf55b2
PT
4324 /*
4325 * we may have advanced our local expiration to account for allowed
4326 * spread between our sched_clock and the one on which runtime was
4327 * issued.
4328 */
512ac999
XP
4329 if (cfs_rq->expires_seq != expires_seq) {
4330 cfs_rq->expires_seq = expires_seq;
a9cf55b2 4331 cfs_rq->runtime_expires = expires;
512ac999 4332 }
85dac906
PT
4333
4334 return cfs_rq->runtime_remaining > 0;
ec12cb7f
PT
4335}
4336
a9cf55b2
PT
4337/*
4338 * Note: This depends on the synchronization provided by sched_clock and the
4339 * fact that rq->clock snapshots this value.
4340 */
4341static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
ec12cb7f 4342{
a9cf55b2 4343 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
a9cf55b2
PT
4344
4345 /* if the deadline is ahead of our clock, nothing to do */
78becc27 4346 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
ec12cb7f
PT
4347 return;
4348
a9cf55b2
PT
4349 if (cfs_rq->runtime_remaining < 0)
4350 return;
4351
4352 /*
4353 * If the local deadline has passed we have to consider the
4354 * possibility that our sched_clock is 'fast' and the global deadline
4355 * has not truly expired.
4356 *
4357 * Fortunately we can check determine whether this the case by checking
512ac999 4358 * whether the global deadline(cfs_b->expires_seq) has advanced.
a9cf55b2 4359 */
512ac999 4360 if (cfs_rq->expires_seq == cfs_b->expires_seq) {
a9cf55b2
PT
4361 /* extend local deadline, drift is bounded above by 2 ticks */
4362 cfs_rq->runtime_expires += TICK_NSEC;
4363 } else {
4364 /* global deadline is ahead, expiration has passed */
4365 cfs_rq->runtime_remaining = 0;
4366 }
4367}
4368
9dbdb155 4369static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
a9cf55b2
PT
4370{
4371 /* dock delta_exec before expiring quota (as it could span periods) */
ec12cb7f 4372 cfs_rq->runtime_remaining -= delta_exec;
a9cf55b2
PT
4373 expire_cfs_rq_runtime(cfs_rq);
4374
4375 if (likely(cfs_rq->runtime_remaining > 0))
ec12cb7f
PT
4376 return;
4377
85dac906
PT
4378 /*
4379 * if we're unable to extend our runtime we resched so that the active
4380 * hierarchy can be throttled
4381 */
4382 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
8875125e 4383 resched_curr(rq_of(cfs_rq));
ec12cb7f
PT
4384}
4385
6c16a6dc 4386static __always_inline
9dbdb155 4387void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
ec12cb7f 4388{
56f570e5 4389 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
ec12cb7f
PT
4390 return;
4391
4392 __account_cfs_rq_runtime(cfs_rq, delta_exec);
4393}
4394
85dac906
PT
4395static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
4396{
56f570e5 4397 return cfs_bandwidth_used() && cfs_rq->throttled;
85dac906
PT
4398}
4399
64660c86
PT
4400/* check whether cfs_rq, or any parent, is throttled */
4401static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
4402{
56f570e5 4403 return cfs_bandwidth_used() && cfs_rq->throttle_count;
64660c86
PT
4404}
4405
4406/*
4407 * Ensure that neither of the group entities corresponding to src_cpu or
4408 * dest_cpu are members of a throttled hierarchy when performing group
4409 * load-balance operations.
4410 */
4411static inline int throttled_lb_pair(struct task_group *tg,
4412 int src_cpu, int dest_cpu)
4413{
4414 struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
4415
4416 src_cfs_rq = tg->cfs_rq[src_cpu];
4417 dest_cfs_rq = tg->cfs_rq[dest_cpu];
4418
4419 return throttled_hierarchy(src_cfs_rq) ||
4420 throttled_hierarchy(dest_cfs_rq);
4421}
4422
64660c86
PT
4423static int tg_unthrottle_up(struct task_group *tg, void *data)
4424{
4425 struct rq *rq = data;
4426 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
4427
4428 cfs_rq->throttle_count--;
64660c86 4429 if (!cfs_rq->throttle_count) {
f1b17280 4430 /* adjust cfs_rq_clock_task() */
78becc27 4431 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
f1b17280 4432 cfs_rq->throttled_clock_task;
64660c86 4433 }
64660c86
PT
4434
4435 return 0;
4436}
4437
4438static int tg_throttle_down(struct task_group *tg, void *data)
4439{
4440 struct rq *rq = data;
4441 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
4442
82958366
PT
4443 /* group is entering throttled state, stop time */
4444 if (!cfs_rq->throttle_count)
78becc27 4445 cfs_rq->throttled_clock_task = rq_clock_task(rq);
64660c86
PT
4446 cfs_rq->throttle_count++;
4447
4448 return 0;
4449}
4450
d3d9dc33 4451static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
85dac906
PT
4452{
4453 struct rq *rq = rq_of(cfs_rq);
4454 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
4455 struct sched_entity *se;
4456 long task_delta, dequeue = 1;
77a4d1a1 4457 bool empty;
85dac906
PT
4458
4459 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
4460
f1b17280 4461 /* freeze hierarchy runnable averages while throttled */
64660c86
PT
4462 rcu_read_lock();
4463 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
4464 rcu_read_unlock();
85dac906
PT
4465
4466 task_delta = cfs_rq->h_nr_running;
4467 for_each_sched_entity(se) {
4468 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
4469 /* throttled entity or throttle-on-deactivate */
4470 if (!se->on_rq)
4471 break;
4472
4473 if (dequeue)
4474 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
4475 qcfs_rq->h_nr_running -= task_delta;
4476
4477 if (qcfs_rq->load.weight)
4478 dequeue = 0;
4479 }
4480
4481 if (!se)
72465447 4482 sub_nr_running(rq, task_delta);
85dac906
PT
4483
4484 cfs_rq->throttled = 1;
78becc27 4485 cfs_rq->throttled_clock = rq_clock(rq);
85dac906 4486 raw_spin_lock(&cfs_b->lock);
d49db342 4487 empty = list_empty(&cfs_b->throttled_cfs_rq);
77a4d1a1 4488
c06f04c7
BS
4489 /*
4490 * Add to the _head_ of the list, so that an already-started
baa9be4f
PA
4491 * distribute_cfs_runtime will not see us. If disribute_cfs_runtime is
4492 * not running add to the tail so that later runqueues don't get starved.
c06f04c7 4493 */
baa9be4f
PA
4494 if (cfs_b->distribute_running)
4495 list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
4496 else
4497 list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
77a4d1a1
PZ
4498
4499 /*
4500 * If we're the first throttled task, make sure the bandwidth
4501 * timer is running.
4502 */
4503 if (empty)
4504 start_cfs_bandwidth(cfs_b);
4505
85dac906
PT
4506 raw_spin_unlock(&cfs_b->lock);
4507}
4508
029632fb 4509void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
671fd9da
PT
4510{
4511 struct rq *rq = rq_of(cfs_rq);
4512 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
4513 struct sched_entity *se;
4514 int enqueue = 1;
4515 long task_delta;
4516
22b958d8 4517 se = cfs_rq->tg->se[cpu_of(rq)];
671fd9da
PT
4518
4519 cfs_rq->throttled = 0;
1a55af2e
FW
4520
4521 update_rq_clock(rq);
4522
671fd9da 4523 raw_spin_lock(&cfs_b->lock);
78becc27 4524 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
671fd9da
PT
4525 list_del_rcu(&cfs_rq->throttled_list);
4526 raw_spin_unlock(&cfs_b->lock);
4527
64660c86
PT
4528 /* update hierarchical throttle state */
4529 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
4530
671fd9da
PT
4531 if (!cfs_rq->load.weight)
4532 return;
4533
4534 task_delta = cfs_rq->h_nr_running;
4535 for_each_sched_entity(se) {
4536 if (se->on_rq)
4537 enqueue = 0;
4538
4539 cfs_rq = cfs_rq_of(se);
4540 if (enqueue)
4541 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
4542 cfs_rq->h_nr_running += task_delta;
4543
4544 if (cfs_rq_throttled(cfs_rq))
4545 break;
4546 }
4547
4548 if (!se)
72465447 4549 add_nr_running(rq, task_delta);
671fd9da 4550
97fb7a0a 4551 /* Determine whether we need to wake up potentially idle CPU: */
671fd9da 4552 if (rq->curr == rq->idle && rq->cfs.nr_running)
8875125e 4553 resched_curr(rq);
671fd9da
PT
4554}
4555
4556static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
4557 u64 remaining, u64 expires)
4558{
4559 struct cfs_rq *cfs_rq;
c06f04c7
BS
4560 u64 runtime;
4561 u64 starting_runtime = remaining;
671fd9da
PT
4562
4563 rcu_read_lock();
4564 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
4565 throttled_list) {
4566 struct rq *rq = rq_of(cfs_rq);
8a8c69c3 4567 struct rq_flags rf;
671fd9da 4568
8a8c69c3 4569 rq_lock(rq, &rf);
671fd9da
PT
4570 if (!cfs_rq_throttled(cfs_rq))
4571 goto next;
4572
4573 runtime = -cfs_rq->runtime_remaining + 1;
4574 if (runtime > remaining)
4575 runtime = remaining;
4576 remaining -= runtime;
4577
4578 cfs_rq->runtime_remaining += runtime;
4579 cfs_rq->runtime_expires = expires;
4580
4581 /* we check whether we're throttled above */
4582 if (cfs_rq->runtime_remaining > 0)
4583 unthrottle_cfs_rq(cfs_rq);
4584
4585next:
8a8c69c3 4586 rq_unlock(rq, &rf);
671fd9da
PT
4587
4588 if (!remaining)
4589 break;
4590 }
4591 rcu_read_unlock();
4592
c06f04c7 4593 return starting_runtime - remaining;
671fd9da
PT
4594}
4595
58088ad0
PT
4596/*
4597 * Responsible for refilling a task_group's bandwidth and unthrottling its
4598 * cfs_rqs as appropriate. If there has been no activity within the last
4599 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
4600 * used to track this state.
4601 */
4602static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
4603{
671fd9da 4604 u64 runtime, runtime_expires;
51f2176d 4605 int throttled;
58088ad0 4606
58088ad0
PT
4607 /* no need to continue the timer with no bandwidth constraint */
4608 if (cfs_b->quota == RUNTIME_INF)
51f2176d 4609 goto out_deactivate;
58088ad0 4610
671fd9da 4611 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
e8da1b18 4612 cfs_b->nr_periods += overrun;
671fd9da 4613
51f2176d
BS
4614 /*
4615 * idle depends on !throttled (for the case of a large deficit), and if
4616 * we're going inactive then everything else can be deferred
4617 */
4618 if (cfs_b->idle && !throttled)
4619 goto out_deactivate;
a9cf55b2
PT
4620
4621 __refill_cfs_bandwidth_runtime(cfs_b);
4622
671fd9da
PT
4623 if (!throttled) {
4624 /* mark as potentially idle for the upcoming period */
4625 cfs_b->idle = 1;
51f2176d 4626 return 0;
671fd9da
PT
4627 }
4628
e8da1b18
NR
4629 /* account preceding periods in which throttling occurred */
4630 cfs_b->nr_throttled += overrun;
4631
671fd9da 4632 runtime_expires = cfs_b->runtime_expires;
671fd9da
PT
4633
4634 /*
c06f04c7
BS
4635 * This check is repeated as we are holding onto the new bandwidth while
4636 * we unthrottle. This can potentially race with an unthrottled group
4637 * trying to acquire new bandwidth from the global pool. This can result
4638 * in us over-using our runtime if it is all used during this loop, but
4639 * only by limited amounts in that extreme case.
671fd9da 4640 */
baa9be4f 4641 while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) {
c06f04c7 4642 runtime = cfs_b->runtime;
baa9be4f 4643 cfs_b->distribute_running = 1;
671fd9da
PT
4644 raw_spin_unlock(&cfs_b->lock);
4645 /* we can't nest cfs_b->lock while distributing bandwidth */
4646 runtime = distribute_cfs_runtime(cfs_b, runtime,
4647 runtime_expires);
4648 raw_spin_lock(&cfs_b->lock);
4649
baa9be4f 4650 cfs_b->distribute_running = 0;
671fd9da 4651 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
c06f04c7 4652
b5c0ce7b 4653 lsub_positive(&cfs_b->runtime, runtime);
671fd9da 4654 }
58088ad0 4655
671fd9da
PT
4656 /*
4657 * While we are ensured activity in the period following an
4658 * unthrottle, this also covers the case in which the new bandwidth is
4659 * insufficient to cover the existing bandwidth deficit. (Forcing the
4660 * timer to remain active while there are any throttled entities.)
4661 */
4662 cfs_b->idle = 0;
58088ad0 4663
51f2176d
BS
4664 return 0;
4665
4666out_deactivate:
51f2176d 4667 return 1;
58088ad0 4668}
d3d9dc33 4669
d8b4986d
PT
4670/* a cfs_rq won't donate quota below this amount */
4671static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
4672/* minimum remaining period time to redistribute slack quota */
4673static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
4674/* how long we wait to gather additional slack before distributing */
4675static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
4676
db06e78c
BS
4677/*
4678 * Are we near the end of the current quota period?
4679 *
4680 * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
4961b6e1 4681 * hrtimer base being cleared by hrtimer_start. In the case of
db06e78c
BS
4682 * migrate_hrtimers, base is never cleared, so we are fine.
4683 */
d8b4986d
PT
4684static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
4685{
4686 struct hrtimer *refresh_timer = &cfs_b->period_timer;
4687 u64 remaining;
4688
4689 /* if the call-back is running a quota refresh is already occurring */
4690 if (hrtimer_callback_running(refresh_timer))
4691 return 1;
4692
4693 /* is a quota refresh about to occur? */
4694 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
4695 if (remaining < min_expire)
4696 return 1;
4697
4698 return 0;
4699}
4700
4701static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
4702{
4703 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
4704
4705 /* if there's a quota refresh soon don't bother with slack */
4706 if (runtime_refresh_within(cfs_b, min_left))
4707 return;
4708
4cfafd30
PZ
4709 hrtimer_start(&cfs_b->slack_timer,
4710 ns_to_ktime(cfs_bandwidth_slack_period),
4711 HRTIMER_MODE_REL);
d8b4986d
PT
4712}
4713
4714/* we know any runtime found here is valid as update_curr() precedes return */
4715static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4716{
4717 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
4718 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
4719
4720 if (slack_runtime <= 0)
4721 return;
4722
4723 raw_spin_lock(&cfs_b->lock);
4724 if (cfs_b->quota != RUNTIME_INF &&
4725 cfs_rq->runtime_expires == cfs_b->runtime_expires) {
4726 cfs_b->runtime += slack_runtime;
4727
4728 /* we are under rq->lock, defer unthrottling using a timer */
4729 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
4730 !list_empty(&cfs_b->throttled_cfs_rq))
4731 start_cfs_slack_bandwidth(cfs_b);
4732 }
4733 raw_spin_unlock(&cfs_b->lock);
4734
4735 /* even if it's not valid for return we don't want to try again */
4736 cfs_rq->runtime_remaining -= slack_runtime;
4737}
4738
4739static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4740{
56f570e5
PT
4741 if (!cfs_bandwidth_used())
4742 return;
4743
fccfdc6f 4744 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
d8b4986d
PT
4745 return;
4746
4747 __return_cfs_rq_runtime(cfs_rq);
4748}
4749
4750/*
4751 * This is done with a timer (instead of inline with bandwidth return) since
4752 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
4753 */
4754static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
4755{
4756 u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
4757 u64 expires;
4758
4759 /* confirm we're still not at a refresh boundary */
db06e78c 4760 raw_spin_lock(&cfs_b->lock);
baa9be4f
PA
4761 if (cfs_b->distribute_running) {
4762 raw_spin_unlock(&cfs_b->lock);
4763 return;
4764 }
4765
db06e78c
BS
4766 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
4767 raw_spin_unlock(&cfs_b->lock);
d8b4986d 4768 return;
db06e78c 4769 }
d8b4986d 4770
c06f04c7 4771 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
d8b4986d 4772 runtime = cfs_b->runtime;
c06f04c7 4773
d8b4986d 4774 expires = cfs_b->runtime_expires;
baa9be4f
PA
4775 if (runtime)
4776 cfs_b->distribute_running = 1;
4777
d8b4986d
PT
4778 raw_spin_unlock(&cfs_b->lock);
4779
4780 if (!runtime)
4781 return;
4782
4783 runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
4784
4785 raw_spin_lock(&cfs_b->lock);
4786 if (expires == cfs_b->runtime_expires)
b5c0ce7b 4787 lsub_positive(&cfs_b->runtime, runtime);
baa9be4f 4788 cfs_b->distribute_running = 0;
d8b4986d
PT
4789 raw_spin_unlock(&cfs_b->lock);
4790}
4791
d3d9dc33
PT
4792/*
4793 * When a group wakes up we want to make sure that its quota is not already
4794 * expired/exceeded, otherwise it may be allowed to steal additional ticks of
4795 * runtime as update_curr() throttling can not not trigger until it's on-rq.
4796 */
4797static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
4798{
56f570e5
PT
4799 if (!cfs_bandwidth_used())
4800 return;
4801
d3d9dc33
PT
4802 /* an active group must be handled by the update_curr()->put() path */
4803 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
4804 return;
4805
4806 /* ensure the group is not already throttled */
4807 if (cfs_rq_throttled(cfs_rq))
4808 return;
4809
4810 /* update runtime allocation */
4811 account_cfs_rq_runtime(cfs_rq, 0);
4812 if (cfs_rq->runtime_remaining <= 0)
4813 throttle_cfs_rq(cfs_rq);
4814}
4815
55e16d30
PZ
4816static void sync_throttle(struct task_group *tg, int cpu)
4817{
4818 struct cfs_rq *pcfs_rq, *cfs_rq;
4819
4820 if (!cfs_bandwidth_used())
4821 return;
4822
4823 if (!tg->parent)
4824 return;
4825
4826 cfs_rq = tg->cfs_rq[cpu];
4827 pcfs_rq = tg->parent->cfs_rq[cpu];
4828
4829 cfs_rq->throttle_count = pcfs_rq->throttle_count;
b8922125 4830 cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
55e16d30
PZ
4831}
4832
d3d9dc33 4833/* conditionally throttle active cfs_rq's from put_prev_entity() */
678d5718 4834static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
d3d9dc33 4835{
56f570e5 4836 if (!cfs_bandwidth_used())
678d5718 4837 return false;
56f570e5 4838
d3d9dc33 4839 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
678d5718 4840 return false;
d3d9dc33
PT
4841
4842 /*
4843 * it's possible for a throttled entity to be forced into a running
4844 * state (e.g. set_curr_task), in this case we're finished.
4845 */
4846 if (cfs_rq_throttled(cfs_rq))
678d5718 4847 return true;
d3d9dc33
PT
4848
4849 throttle_cfs_rq(cfs_rq);
678d5718 4850 return true;
d3d9dc33 4851}
029632fb 4852
029632fb
PZ
4853static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
4854{
4855 struct cfs_bandwidth *cfs_b =
4856 container_of(timer, struct cfs_bandwidth, slack_timer);
77a4d1a1 4857
029632fb
PZ
4858 do_sched_cfs_slack_timer(cfs_b);
4859
4860 return HRTIMER_NORESTART;
4861}
4862
4863static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
4864{
4865 struct cfs_bandwidth *cfs_b =
4866 container_of(timer, struct cfs_bandwidth, period_timer);
029632fb
PZ
4867 int overrun;
4868 int idle = 0;
4869
51f2176d 4870 raw_spin_lock(&cfs_b->lock);
029632fb 4871 for (;;) {
77a4d1a1 4872 overrun = hrtimer_forward_now(timer, cfs_b->period);
029632fb
PZ
4873 if (!overrun)
4874 break;
4875
4876 idle = do_sched_cfs_period_timer(cfs_b, overrun);
4877 }
4cfafd30
PZ
4878 if (idle)
4879 cfs_b->period_active = 0;
51f2176d 4880 raw_spin_unlock(&cfs_b->lock);
029632fb
PZ
4881
4882 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
4883}
4884
4885void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4886{
4887 raw_spin_lock_init(&cfs_b->lock);
4888 cfs_b->runtime = 0;
4889 cfs_b->quota = RUNTIME_INF;
4890 cfs_b->period = ns_to_ktime(default_cfs_period());
4891
4892 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
4cfafd30 4893 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
029632fb
PZ
4894 cfs_b->period_timer.function = sched_cfs_period_timer;
4895 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
4896 cfs_b->slack_timer.function = sched_cfs_slack_timer;
baa9be4f 4897 cfs_b->distribute_running = 0;
029632fb
PZ
4898}
4899
4900static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4901{
4902 cfs_rq->runtime_enabled = 0;
4903 INIT_LIST_HEAD(&cfs_rq->throttled_list);
4904}
4905
77a4d1a1 4906void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
029632fb 4907{
f1d1be8a
XP
4908 u64 overrun;
4909
4cfafd30 4910 lockdep_assert_held(&cfs_b->lock);
029632fb 4911
f1d1be8a
XP
4912 if (cfs_b->period_active)
4913 return;
4914
4915 cfs_b->period_active = 1;
4916 overrun = hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
4917 cfs_b->runtime_expires += (overrun + 1) * ktime_to_ns(cfs_b->period);
4918 cfs_b->expires_seq++;
4919 hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
029632fb
PZ
4920}
4921
4922static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4923{
7f1a169b
TH
4924 /* init_cfs_bandwidth() was not called */
4925 if (!cfs_b->throttled_cfs_rq.next)
4926 return;
4927
029632fb
PZ
4928 hrtimer_cancel(&cfs_b->period_timer);
4929 hrtimer_cancel(&cfs_b->slack_timer);
4930}
4931
502ce005 4932/*
97fb7a0a 4933 * Both these CPU hotplug callbacks race against unregister_fair_sched_group()
502ce005
PZ
4934 *
4935 * The race is harmless, since modifying bandwidth settings of unhooked group
4936 * bits doesn't do much.
4937 */
4938
4939/* cpu online calback */
0e59bdae
KT
4940static void __maybe_unused update_runtime_enabled(struct rq *rq)
4941{
502ce005 4942 struct task_group *tg;
0e59bdae 4943
502ce005
PZ
4944 lockdep_assert_held(&rq->lock);
4945
4946 rcu_read_lock();
4947 list_for_each_entry_rcu(tg, &task_groups, list) {
4948 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
4949 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
0e59bdae
KT
4950
4951 raw_spin_lock(&cfs_b->lock);
4952 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
4953 raw_spin_unlock(&cfs_b->lock);
4954 }
502ce005 4955 rcu_read_unlock();
0e59bdae
KT
4956}
4957
502ce005 4958/* cpu offline callback */
38dc3348 4959static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
029632fb 4960{
502ce005
PZ
4961 struct task_group *tg;
4962
4963 lockdep_assert_held(&rq->lock);
4964
4965 rcu_read_lock();
4966 list_for_each_entry_rcu(tg, &task_groups, list) {
4967 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
029632fb 4968
029632fb
PZ
4969 if (!cfs_rq->runtime_enabled)
4970 continue;
4971
4972 /*
4973 * clock_task is not advancing so we just need to make sure
4974 * there's some valid quota amount
4975 */
51f2176d 4976 cfs_rq->runtime_remaining = 1;
0e59bdae 4977 /*
97fb7a0a 4978 * Offline rq is schedulable till CPU is completely disabled
0e59bdae
KT
4979 * in take_cpu_down(), so we prevent new cfs throttling here.
4980 */
4981 cfs_rq->runtime_enabled = 0;
4982
029632fb
PZ
4983 if (cfs_rq_throttled(cfs_rq))
4984 unthrottle_cfs_rq(cfs_rq);
4985 }
502ce005 4986 rcu_read_unlock();
029632fb
PZ
4987}
4988
4989#else /* CONFIG_CFS_BANDWIDTH */
f1b17280
PT
4990static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
4991{
78becc27 4992 return rq_clock_task(rq_of(cfs_rq));
f1b17280
PT
4993}
4994
9dbdb155 4995static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
678d5718 4996static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
d3d9dc33 4997static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
55e16d30 4998static inline void sync_throttle(struct task_group *tg, int cpu) {}
6c16a6dc 4999static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
85dac906
PT
5000
5001static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
5002{
5003 return 0;
5004}
64660c86
PT
5005
5006static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
5007{
5008 return 0;
5009}
5010
5011static inline int throttled_lb_pair(struct task_group *tg,
5012 int src_cpu, int dest_cpu)
5013{
5014 return 0;
5015}
029632fb
PZ
5016
5017void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
5018
5019#ifdef CONFIG_FAIR_GROUP_SCHED
5020static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
ab84d31e
PT
5021#endif
5022
029632fb
PZ
5023static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
5024{
5025 return NULL;
5026}
5027static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
0e59bdae 5028static inline void update_runtime_enabled(struct rq *rq) {}
a4c96ae3 5029static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
029632fb
PZ
5030
5031#endif /* CONFIG_CFS_BANDWIDTH */
5032
bf0f6f24
IM
5033/**************************************************
5034 * CFS operations on tasks:
5035 */
5036
8f4d37ec
PZ
5037#ifdef CONFIG_SCHED_HRTICK
5038static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
5039{
8f4d37ec
PZ
5040 struct sched_entity *se = &p->se;
5041 struct cfs_rq *cfs_rq = cfs_rq_of(se);
5042
9148a3a1 5043 SCHED_WARN_ON(task_rq(p) != rq);
8f4d37ec 5044
8bf46a39 5045 if (rq->cfs.h_nr_running > 1) {
8f4d37ec
PZ
5046 u64 slice = sched_slice(cfs_rq, se);
5047 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
5048 s64 delta = slice - ran;
5049
5050 if (delta < 0) {
5051 if (rq->curr == p)
8875125e 5052 resched_curr(rq);
8f4d37ec
PZ
5053 return;
5054 }
31656519 5055 hrtick_start(rq, delta);
8f4d37ec
PZ
5056 }
5057}
a4c2f00f
PZ
5058
5059/*
5060 * called from enqueue/dequeue and updates the hrtick when the
5061 * current task is from our class and nr_running is low enough
5062 * to matter.
5063 */
5064static void hrtick_update(struct rq *rq)
5065{
5066 struct task_struct *curr = rq->curr;
5067
b39e66ea 5068 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
a4c2f00f
PZ
5069 return;
5070
5071 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
5072 hrtick_start_fair(rq, curr);
5073}
55e12e5e 5074#else /* !CONFIG_SCHED_HRTICK */
8f4d37ec
PZ
5075static inline void
5076hrtick_start_fair(struct rq *rq, struct task_struct *p)
5077{
5078}
a4c2f00f
PZ
5079
5080static inline void hrtick_update(struct rq *rq)
5081{
5082}
8f4d37ec
PZ
5083#endif
5084
bf0f6f24
IM
5085/*
5086 * The enqueue_task method is called before nr_running is
5087 * increased. Here we update the fair scheduling stats and
5088 * then put the task into the rbtree:
5089 */
ea87bb78 5090static void
371fd7e7 5091enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
bf0f6f24
IM
5092{
5093 struct cfs_rq *cfs_rq;
62fb1851 5094 struct sched_entity *se = &p->se;
bf0f6f24 5095
2539fc82
PB
5096 /*
5097 * The code below (indirectly) updates schedutil which looks at
5098 * the cfs_rq utilization to select a frequency.
5099 * Let's add the task's estimated utilization to the cfs_rq's
5100 * estimated utilization, before we update schedutil.
5101 */
5102 util_est_enqueue(&rq->cfs, p);
5103
8c34ab19
RW
5104 /*
5105 * If in_iowait is set, the code below may not trigger any cpufreq
5106 * utilization updates, so do it here explicitly with the IOWAIT flag
5107 * passed.
5108 */
5109 if (p->in_iowait)
674e7541 5110 cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
8c34ab19 5111
bf0f6f24 5112 for_each_sched_entity(se) {
62fb1851 5113 if (se->on_rq)
bf0f6f24
IM
5114 break;
5115 cfs_rq = cfs_rq_of(se);
88ec22d3 5116 enqueue_entity(cfs_rq, se, flags);
85dac906
PT
5117
5118 /*
5119 * end evaluation on encountering a throttled cfs_rq
5120 *
5121 * note: in the case of encountering a throttled cfs_rq we will
5122 * post the final h_nr_running increment below.
e210bffd 5123 */
85dac906
PT
5124 if (cfs_rq_throttled(cfs_rq))
5125 break;
953bfcd1 5126 cfs_rq->h_nr_running++;
85dac906 5127
88ec22d3 5128 flags = ENQUEUE_WAKEUP;
bf0f6f24 5129 }
8f4d37ec 5130
2069dd75 5131 for_each_sched_entity(se) {
0f317143 5132 cfs_rq = cfs_rq_of(se);
953bfcd1 5133 cfs_rq->h_nr_running++;
2069dd75 5134
85dac906
PT
5135 if (cfs_rq_throttled(cfs_rq))
5136 break;
5137
88c0616e 5138 update_load_avg(cfs_rq, se, UPDATE_TG);
1ea6c46a 5139 update_cfs_group(se);
2069dd75
PZ
5140 }
5141
cd126afe 5142 if (!se)
72465447 5143 add_nr_running(rq, 1);
cd126afe 5144
a4c2f00f 5145 hrtick_update(rq);
bf0f6f24
IM
5146}
5147
2f36825b
VP
5148static void set_next_buddy(struct sched_entity *se);
5149
bf0f6f24
IM
5150/*
5151 * The dequeue_task method is called before nr_running is
5152 * decreased. We remove the task from the rbtree and
5153 * update the fair scheduling stats:
5154 */
371fd7e7 5155static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
bf0f6f24
IM
5156{
5157 struct cfs_rq *cfs_rq;
62fb1851 5158 struct sched_entity *se = &p->se;
2f36825b 5159 int task_sleep = flags & DEQUEUE_SLEEP;
bf0f6f24
IM
5160
5161 for_each_sched_entity(se) {
5162 cfs_rq = cfs_rq_of(se);
371fd7e7 5163 dequeue_entity(cfs_rq, se, flags);
85dac906
PT
5164
5165 /*
5166 * end evaluation on encountering a throttled cfs_rq
5167 *
5168 * note: in the case of encountering a throttled cfs_rq we will
5169 * post the final h_nr_running decrement below.
5170 */
5171 if (cfs_rq_throttled(cfs_rq))
5172 break;
953bfcd1 5173 cfs_rq->h_nr_running--;
2069dd75 5174
bf0f6f24 5175 /* Don't dequeue parent if it has other entities besides us */
2f36825b 5176 if (cfs_rq->load.weight) {
754bd598
KK
5177 /* Avoid re-evaluating load for this entity: */
5178 se = parent_entity(se);
2f36825b
VP
5179 /*
5180 * Bias pick_next to pick a task from this cfs_rq, as
5181 * p is sleeping when it is within its sched_slice.
5182 */
754bd598
KK
5183 if (task_sleep && se && !throttled_hierarchy(cfs_rq))
5184 set_next_buddy(se);
bf0f6f24 5185 break;
2f36825b 5186 }
371fd7e7 5187 flags |= DEQUEUE_SLEEP;
bf0f6f24 5188 }
8f4d37ec 5189
2069dd75 5190 for_each_sched_entity(se) {
0f317143 5191 cfs_rq = cfs_rq_of(se);
953bfcd1 5192 cfs_rq->h_nr_running--;
2069dd75 5193
85dac906
PT
5194 if (cfs_rq_throttled(cfs_rq))
5195 break;
5196
88c0616e 5197 update_load_avg(cfs_rq, se, UPDATE_TG);
1ea6c46a 5198 update_cfs_group(se);
2069dd75
PZ
5199 }
5200
cd126afe 5201 if (!se)
72465447 5202 sub_nr_running(rq, 1);
cd126afe 5203
7f65ea42 5204 util_est_dequeue(&rq->cfs, p, task_sleep);
a4c2f00f 5205 hrtick_update(rq);
bf0f6f24
IM
5206}
5207
e7693a36 5208#ifdef CONFIG_SMP
10e2f1ac
PZ
5209
5210/* Working cpumask for: load_balance, load_balance_newidle. */
5211DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
5212DEFINE_PER_CPU(cpumask_var_t, select_idle_mask);
5213
9fd81dd5 5214#ifdef CONFIG_NO_HZ_COMMON
3289bdb4
PZ
5215/*
5216 * per rq 'load' arrray crap; XXX kill this.
5217 */
5218
5219/*
d937cdc5 5220 * The exact cpuload calculated at every tick would be:
3289bdb4 5221 *
d937cdc5
PZ
5222 * load' = (1 - 1/2^i) * load + (1/2^i) * cur_load
5223 *
97fb7a0a
IM
5224 * If a CPU misses updates for n ticks (as it was idle) and update gets
5225 * called on the n+1-th tick when CPU may be busy, then we have:
d937cdc5
PZ
5226 *
5227 * load_n = (1 - 1/2^i)^n * load_0
5228 * load_n+1 = (1 - 1/2^i) * load_n + (1/2^i) * cur_load
3289bdb4
PZ
5229 *
5230 * decay_load_missed() below does efficient calculation of
3289bdb4 5231 *
d937cdc5
PZ
5232 * load' = (1 - 1/2^i)^n * load
5233 *
5234 * Because x^(n+m) := x^n * x^m we can decompose any x^n in power-of-2 factors.
5235 * This allows us to precompute the above in said factors, thereby allowing the
5236 * reduction of an arbitrary n in O(log_2 n) steps. (See also
5237 * fixed_power_int())
3289bdb4 5238 *
d937cdc5 5239 * The calculation is approximated on a 128 point scale.
3289bdb4
PZ
5240 */
5241#define DEGRADE_SHIFT 7
d937cdc5
PZ
5242
5243static const u8 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
5244static const u8 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
5245 { 0, 0, 0, 0, 0, 0, 0, 0 },
5246 { 64, 32, 8, 0, 0, 0, 0, 0 },
5247 { 96, 72, 40, 12, 1, 0, 0, 0 },
5248 { 112, 98, 75, 43, 15, 1, 0, 0 },
5249 { 120, 112, 98, 76, 45, 16, 2, 0 }
5250};
3289bdb4
PZ
5251
5252/*
5253 * Update cpu_load for any missed ticks, due to tickless idle. The backlog
5254 * would be when CPU is idle and so we just decay the old load without
5255 * adding any new load.
5256 */
5257static unsigned long
5258decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
5259{
5260 int j = 0;
5261
5262 if (!missed_updates)
5263 return load;
5264
5265 if (missed_updates >= degrade_zero_ticks[idx])
5266 return 0;
5267
5268 if (idx == 1)
5269 return load >> missed_updates;
5270
5271 while (missed_updates) {
5272 if (missed_updates % 2)
5273 load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
5274
5275 missed_updates >>= 1;
5276 j++;
5277 }
5278 return load;
5279}
e022e0d3
PZ
5280
5281static struct {
5282 cpumask_var_t idle_cpus_mask;
5283 atomic_t nr_cpus;
f643ea22 5284 int has_blocked; /* Idle CPUS has blocked load */
e022e0d3 5285 unsigned long next_balance; /* in jiffy units */
f643ea22 5286 unsigned long next_blocked; /* Next update of blocked load in jiffies */
e022e0d3
PZ
5287} nohz ____cacheline_aligned;
5288
9fd81dd5 5289#endif /* CONFIG_NO_HZ_COMMON */
3289bdb4 5290
59543275 5291/**
cee1afce 5292 * __cpu_load_update - update the rq->cpu_load[] statistics
59543275
BP
5293 * @this_rq: The rq to update statistics for
5294 * @this_load: The current load
5295 * @pending_updates: The number of missed updates
59543275 5296 *
3289bdb4 5297 * Update rq->cpu_load[] statistics. This function is usually called every
59543275
BP
5298 * scheduler tick (TICK_NSEC).
5299 *
5300 * This function computes a decaying average:
5301 *
5302 * load[i]' = (1 - 1/2^i) * load[i] + (1/2^i) * load
5303 *
5304 * Because of NOHZ it might not get called on every tick which gives need for
5305 * the @pending_updates argument.
5306 *
5307 * load[i]_n = (1 - 1/2^i) * load[i]_n-1 + (1/2^i) * load_n-1
5308 * = A * load[i]_n-1 + B ; A := (1 - 1/2^i), B := (1/2^i) * load
5309 * = A * (A * load[i]_n-2 + B) + B
5310 * = A * (A * (A * load[i]_n-3 + B) + B) + B
5311 * = A^3 * load[i]_n-3 + (A^2 + A + 1) * B
5312 * = A^n * load[i]_0 + (A^(n-1) + A^(n-2) + ... + 1) * B
5313 * = A^n * load[i]_0 + ((1 - A^n) / (1 - A)) * B
5314 * = (1 - 1/2^i)^n * (load[i]_0 - load) + load
5315 *
5316 * In the above we've assumed load_n := load, which is true for NOHZ_FULL as
5317 * any change in load would have resulted in the tick being turned back on.
5318 *
5319 * For regular NOHZ, this reduces to:
5320 *
5321 * load[i]_n = (1 - 1/2^i)^n * load[i]_0
5322 *
5323 * see decay_load_misses(). For NOHZ_FULL we get to subtract and add the extra
1f41906a 5324 * term.
3289bdb4 5325 */
1f41906a
FW
5326static void cpu_load_update(struct rq *this_rq, unsigned long this_load,
5327 unsigned long pending_updates)
3289bdb4 5328{
9fd81dd5 5329 unsigned long __maybe_unused tickless_load = this_rq->cpu_load[0];
3289bdb4
PZ
5330 int i, scale;
5331
5332 this_rq->nr_load_updates++;
5333
5334 /* Update our load: */
5335 this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
5336 for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
5337 unsigned long old_load, new_load;
5338
5339 /* scale is effectively 1 << i now, and >> i divides by scale */
5340
7400d3bb 5341 old_load = this_rq->cpu_load[i];
9fd81dd5 5342#ifdef CONFIG_NO_HZ_COMMON
3289bdb4 5343 old_load = decay_load_missed(old_load, pending_updates - 1, i);
7400d3bb
BP
5344 if (tickless_load) {
5345 old_load -= decay_load_missed(tickless_load, pending_updates - 1, i);
5346 /*
5347 * old_load can never be a negative value because a
5348 * decayed tickless_load cannot be greater than the
5349 * original tickless_load.
5350 */
5351 old_load += tickless_load;
5352 }
9fd81dd5 5353#endif
3289bdb4
PZ
5354 new_load = this_load;
5355 /*
5356 * Round up the averaging division if load is increasing. This
5357 * prevents us from getting stuck on 9 if the load is 10, for
5358 * example.
5359 */
5360 if (new_load > old_load)
5361 new_load += scale - 1;
5362
5363 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
5364 }
3289bdb4
PZ
5365}
5366
7ea241af 5367/* Used instead of source_load when we know the type == 0 */
c7132dd6 5368static unsigned long weighted_cpuload(struct rq *rq)
7ea241af 5369{
c7132dd6 5370 return cfs_rq_runnable_load_avg(&rq->cfs);
7ea241af
YD
5371}
5372
3289bdb4 5373#ifdef CONFIG_NO_HZ_COMMON
1f41906a
FW
5374/*
5375 * There is no sane way to deal with nohz on smp when using jiffies because the
97fb7a0a 5376 * CPU doing the jiffies update might drift wrt the CPU doing the jiffy reading
1f41906a
FW
5377 * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}.
5378 *
5379 * Therefore we need to avoid the delta approach from the regular tick when
5380 * possible since that would seriously skew the load calculation. This is why we
5381 * use cpu_load_update_periodic() for CPUs out of nohz. However we'll rely on
5382 * jiffies deltas for updates happening while in nohz mode (idle ticks, idle
5383 * loop exit, nohz_idle_balance, nohz full exit...)
5384 *
5385 * This means we might still be one tick off for nohz periods.
5386 */
5387
5388static void cpu_load_update_nohz(struct rq *this_rq,
5389 unsigned long curr_jiffies,
5390 unsigned long load)
be68a682
FW
5391{
5392 unsigned long pending_updates;
5393
5394 pending_updates = curr_jiffies - this_rq->last_load_update_tick;
5395 if (pending_updates) {
5396 this_rq->last_load_update_tick = curr_jiffies;
5397 /*
5398 * In the regular NOHZ case, we were idle, this means load 0.
5399 * In the NOHZ_FULL case, we were non-idle, we should consider
5400 * its weighted load.
5401 */
1f41906a 5402 cpu_load_update(this_rq, load, pending_updates);
be68a682
FW
5403 }
5404}
5405
3289bdb4
PZ
5406/*
5407 * Called from nohz_idle_balance() to update the load ratings before doing the
5408 * idle balance.
5409 */
cee1afce 5410static void cpu_load_update_idle(struct rq *this_rq)
3289bdb4 5411{
3289bdb4
PZ
5412 /*
5413 * bail if there's load or we're actually up-to-date.
5414 */
c7132dd6 5415 if (weighted_cpuload(this_rq))
3289bdb4
PZ
5416 return;
5417
1f41906a 5418 cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), 0);
3289bdb4
PZ
5419}
5420
5421/*
1f41906a
FW
5422 * Record CPU load on nohz entry so we know the tickless load to account
5423 * on nohz exit. cpu_load[0] happens then to be updated more frequently
5424 * than other cpu_load[idx] but it should be fine as cpu_load readers
5425 * shouldn't rely into synchronized cpu_load[*] updates.
3289bdb4 5426 */
1f41906a 5427void cpu_load_update_nohz_start(void)
3289bdb4
PZ
5428{
5429 struct rq *this_rq = this_rq();
1f41906a
FW
5430
5431 /*
5432 * This is all lockless but should be fine. If weighted_cpuload changes
5433 * concurrently we'll exit nohz. And cpu_load write can race with
5434 * cpu_load_update_idle() but both updater would be writing the same.
5435 */
c7132dd6 5436 this_rq->cpu_load[0] = weighted_cpuload(this_rq);
1f41906a
FW
5437}
5438
5439/*
5440 * Account the tickless load in the end of a nohz frame.
5441 */
5442void cpu_load_update_nohz_stop(void)
5443{
316c1608 5444 unsigned long curr_jiffies = READ_ONCE(jiffies);
1f41906a
FW
5445 struct rq *this_rq = this_rq();
5446 unsigned long load;
8a8c69c3 5447 struct rq_flags rf;
3289bdb4
PZ
5448
5449 if (curr_jiffies == this_rq->last_load_update_tick)
5450 return;
5451
c7132dd6 5452 load = weighted_cpuload(this_rq);
8a8c69c3 5453 rq_lock(this_rq, &rf);
b52fad2d 5454 update_rq_clock(this_rq);
1f41906a 5455 cpu_load_update_nohz(this_rq, curr_jiffies, load);
8a8c69c3 5456 rq_unlock(this_rq, &rf);
3289bdb4 5457}
1f41906a
FW
5458#else /* !CONFIG_NO_HZ_COMMON */
5459static inline void cpu_load_update_nohz(struct rq *this_rq,
5460 unsigned long curr_jiffies,
5461 unsigned long load) { }
5462#endif /* CONFIG_NO_HZ_COMMON */
5463
5464static void cpu_load_update_periodic(struct rq *this_rq, unsigned long load)
5465{
9fd81dd5 5466#ifdef CONFIG_NO_HZ_COMMON
1f41906a
FW
5467 /* See the mess around cpu_load_update_nohz(). */
5468 this_rq->last_load_update_tick = READ_ONCE(jiffies);
9fd81dd5 5469#endif
1f41906a
FW
5470 cpu_load_update(this_rq, load, 1);
5471}
3289bdb4
PZ
5472
5473/*
5474 * Called from scheduler_tick()
5475 */
cee1afce 5476void cpu_load_update_active(struct rq *this_rq)
3289bdb4 5477{
c7132dd6 5478 unsigned long load = weighted_cpuload(this_rq);
1f41906a
FW
5479
5480 if (tick_nohz_tick_stopped())
5481 cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), load);
5482 else
5483 cpu_load_update_periodic(this_rq, load);
3289bdb4
PZ
5484}
5485
029632fb 5486/*
97fb7a0a 5487 * Return a low guess at the load of a migration-source CPU weighted
029632fb
PZ
5488 * according to the scheduling class and "nice" value.
5489 *
5490 * We want to under-estimate the load of migration sources, to
5491 * balance conservatively.
5492 */
5493static unsigned long source_load(int cpu, int type)
5494{
5495 struct rq *rq = cpu_rq(cpu);
c7132dd6 5496 unsigned long total = weighted_cpuload(rq);
029632fb
PZ
5497
5498 if (type == 0 || !sched_feat(LB_BIAS))
5499 return total;
5500
5501 return min(rq->cpu_load[type-1], total);
5502}
5503
5504/*
97fb7a0a 5505 * Return a high guess at the load of a migration-target CPU weighted
029632fb
PZ
5506 * according to the scheduling class and "nice" value.
5507 */
5508static unsigned long target_load(int cpu, int type)
5509{
5510 struct rq *rq = cpu_rq(cpu);
c7132dd6 5511 unsigned long total = weighted_cpuload(rq);
029632fb
PZ
5512
5513 if (type == 0 || !sched_feat(LB_BIAS))
5514 return total;
5515
5516 return max(rq->cpu_load[type-1], total);
5517}
5518
ced549fa 5519static unsigned long capacity_of(int cpu)
029632fb 5520{
ced549fa 5521 return cpu_rq(cpu)->cpu_capacity;
029632fb
PZ
5522}
5523
ca6d75e6
VG
5524static unsigned long capacity_orig_of(int cpu)
5525{
5526 return cpu_rq(cpu)->cpu_capacity_orig;
5527}
5528
029632fb
PZ
5529static unsigned long cpu_avg_load_per_task(int cpu)
5530{
5531 struct rq *rq = cpu_rq(cpu);
316c1608 5532 unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running);
c7132dd6 5533 unsigned long load_avg = weighted_cpuload(rq);
029632fb
PZ
5534
5535 if (nr_running)
b92486cb 5536 return load_avg / nr_running;
029632fb
PZ
5537
5538 return 0;
5539}
5540
c58d25f3
PZ
5541static void record_wakee(struct task_struct *p)
5542{
5543 /*
5544 * Only decay a single time; tasks that have less then 1 wakeup per
5545 * jiffy will not have built up many flips.
5546 */
5547 if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) {
5548 current->wakee_flips >>= 1;
5549 current->wakee_flip_decay_ts = jiffies;
5550 }
5551
5552 if (current->last_wakee != p) {
5553 current->last_wakee = p;
5554 current->wakee_flips++;
5555 }
5556}
5557
63b0e9ed
MG
5558/*
5559 * Detect M:N waker/wakee relationships via a switching-frequency heuristic.
c58d25f3 5560 *
63b0e9ed 5561 * A waker of many should wake a different task than the one last awakened
c58d25f3
PZ
5562 * at a frequency roughly N times higher than one of its wakees.
5563 *
5564 * In order to determine whether we should let the load spread vs consolidating
5565 * to shared cache, we look for a minimum 'flip' frequency of llc_size in one
5566 * partner, and a factor of lls_size higher frequency in the other.
5567 *
5568 * With both conditions met, we can be relatively sure that the relationship is
5569 * non-monogamous, with partner count exceeding socket size.
5570 *
5571 * Waker/wakee being client/server, worker/dispatcher, interrupt source or
5572 * whatever is irrelevant, spread criteria is apparent partner count exceeds
5573 * socket size.
63b0e9ed 5574 */
62470419
MW
5575static int wake_wide(struct task_struct *p)
5576{
63b0e9ed
MG
5577 unsigned int master = current->wakee_flips;
5578 unsigned int slave = p->wakee_flips;
7d9ffa89 5579 int factor = this_cpu_read(sd_llc_size);
62470419 5580
63b0e9ed
MG
5581 if (master < slave)
5582 swap(master, slave);
5583 if (slave < factor || master < slave * factor)
5584 return 0;
5585 return 1;
62470419
MW
5586}
5587
90001d67 5588/*
d153b153
PZ
5589 * The purpose of wake_affine() is to quickly determine on which CPU we can run
5590 * soonest. For the purpose of speed we only consider the waking and previous
5591 * CPU.
90001d67 5592 *
7332dec0
MG
5593 * wake_affine_idle() - only considers 'now', it check if the waking CPU is
5594 * cache-affine and is (or will be) idle.
f2cdd9cc
PZ
5595 *
5596 * wake_affine_weight() - considers the weight to reflect the average
5597 * scheduling latency of the CPUs. This seems to work
5598 * for the overloaded case.
90001d67 5599 */
3b76c4a3 5600static int
89a55f56 5601wake_affine_idle(int this_cpu, int prev_cpu, int sync)
90001d67 5602{
7332dec0
MG
5603 /*
5604 * If this_cpu is idle, it implies the wakeup is from interrupt
5605 * context. Only allow the move if cache is shared. Otherwise an
5606 * interrupt intensive workload could force all tasks onto one
5607 * node depending on the IO topology or IRQ affinity settings.
806486c3
MG
5608 *
5609 * If the prev_cpu is idle and cache affine then avoid a migration.
5610 * There is no guarantee that the cache hot data from an interrupt
5611 * is more important than cache hot data on the prev_cpu and from
5612 * a cpufreq perspective, it's better to have higher utilisation
5613 * on one CPU.
7332dec0 5614 */
943d355d
RJ
5615 if (available_idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu))
5616 return available_idle_cpu(prev_cpu) ? prev_cpu : this_cpu;
90001d67 5617
d153b153 5618 if (sync && cpu_rq(this_cpu)->nr_running == 1)
3b76c4a3 5619 return this_cpu;
90001d67 5620
3b76c4a3 5621 return nr_cpumask_bits;
90001d67
PZ
5622}
5623
3b76c4a3 5624static int
f2cdd9cc
PZ
5625wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
5626 int this_cpu, int prev_cpu, int sync)
90001d67 5627{
90001d67
PZ
5628 s64 this_eff_load, prev_eff_load;
5629 unsigned long task_load;
5630
f2cdd9cc 5631 this_eff_load = target_load(this_cpu, sd->wake_idx);
90001d67 5632
90001d67
PZ
5633 if (sync) {
5634 unsigned long current_load = task_h_load(current);
5635
f2cdd9cc 5636 if (current_load > this_eff_load)
3b76c4a3 5637 return this_cpu;
90001d67 5638
f2cdd9cc 5639 this_eff_load -= current_load;
90001d67
PZ
5640 }
5641
90001d67
PZ
5642 task_load = task_h_load(p);
5643
f2cdd9cc
PZ
5644 this_eff_load += task_load;
5645 if (sched_feat(WA_BIAS))
5646 this_eff_load *= 100;
5647 this_eff_load *= capacity_of(prev_cpu);
90001d67 5648
eeb60398 5649 prev_eff_load = source_load(prev_cpu, sd->wake_idx);
f2cdd9cc
PZ
5650 prev_eff_load -= task_load;
5651 if (sched_feat(WA_BIAS))
5652 prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
5653 prev_eff_load *= capacity_of(this_cpu);
90001d67 5654
082f764a
MG
5655 /*
5656 * If sync, adjust the weight of prev_eff_load such that if
5657 * prev_eff == this_eff that select_idle_sibling() will consider
5658 * stacking the wakee on top of the waker if no other CPU is
5659 * idle.
5660 */
5661 if (sync)
5662 prev_eff_load += 1;
5663
5664 return this_eff_load < prev_eff_load ? this_cpu : nr_cpumask_bits;
90001d67
PZ
5665}
5666
772bd008 5667static int wake_affine(struct sched_domain *sd, struct task_struct *p,
7ebb66a1 5668 int this_cpu, int prev_cpu, int sync)
098fb9db 5669{
3b76c4a3 5670 int target = nr_cpumask_bits;
098fb9db 5671
89a55f56 5672 if (sched_feat(WA_IDLE))
3b76c4a3 5673 target = wake_affine_idle(this_cpu, prev_cpu, sync);
90001d67 5674
3b76c4a3
MG
5675 if (sched_feat(WA_WEIGHT) && target == nr_cpumask_bits)
5676 target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync);
098fb9db 5677
ae92882e 5678 schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts);
3b76c4a3
MG
5679 if (target == nr_cpumask_bits)
5680 return prev_cpu;
098fb9db 5681
3b76c4a3
MG
5682 schedstat_inc(sd->ttwu_move_affine);
5683 schedstat_inc(p->se.statistics.nr_wakeups_affine);
5684 return target;
098fb9db
IM
5685}
5686
c469933e 5687static unsigned long cpu_util_without(int cpu, struct task_struct *p);
6a0b19c0 5688
c469933e 5689static unsigned long capacity_spare_without(int cpu, struct task_struct *p)
6a0b19c0 5690{
c469933e 5691 return max_t(long, capacity_of(cpu) - cpu_util_without(cpu, p), 0);
6a0b19c0
MR
5692}
5693
aaee1203
PZ
5694/*
5695 * find_idlest_group finds and returns the least busy CPU group within the
5696 * domain.
6fee85cc
BJ
5697 *
5698 * Assumes p is allowed on at least one CPU in sd.
aaee1203
PZ
5699 */
5700static struct sched_group *
78e7ed53 5701find_idlest_group(struct sched_domain *sd, struct task_struct *p,
c44f2a02 5702 int this_cpu, int sd_flag)
e7693a36 5703{
b3bd3de6 5704 struct sched_group *idlest = NULL, *group = sd->groups;
6a0b19c0 5705 struct sched_group *most_spare_sg = NULL;
0d10ab95
BJ
5706 unsigned long min_runnable_load = ULONG_MAX;
5707 unsigned long this_runnable_load = ULONG_MAX;
5708 unsigned long min_avg_load = ULONG_MAX, this_avg_load = ULONG_MAX;
6a0b19c0 5709 unsigned long most_spare = 0, this_spare = 0;
c44f2a02 5710 int load_idx = sd->forkexec_idx;
6b94780e
VG
5711 int imbalance_scale = 100 + (sd->imbalance_pct-100)/2;
5712 unsigned long imbalance = scale_load_down(NICE_0_LOAD) *
5713 (sd->imbalance_pct-100) / 100;
e7693a36 5714
c44f2a02
VG
5715 if (sd_flag & SD_BALANCE_WAKE)
5716 load_idx = sd->wake_idx;
5717
aaee1203 5718 do {
6b94780e
VG
5719 unsigned long load, avg_load, runnable_load;
5720 unsigned long spare_cap, max_spare_cap;
aaee1203
PZ
5721 int local_group;
5722 int i;
e7693a36 5723
aaee1203 5724 /* Skip over this group if it has no CPUs allowed */
ae4df9d6 5725 if (!cpumask_intersects(sched_group_span(group),
0c98d344 5726 &p->cpus_allowed))
aaee1203
PZ
5727 continue;
5728
5729 local_group = cpumask_test_cpu(this_cpu,
ae4df9d6 5730 sched_group_span(group));
aaee1203 5731
6a0b19c0
MR
5732 /*
5733 * Tally up the load of all CPUs in the group and find
5734 * the group containing the CPU with most spare capacity.
5735 */
aaee1203 5736 avg_load = 0;
6b94780e 5737 runnable_load = 0;
6a0b19c0 5738 max_spare_cap = 0;
aaee1203 5739
ae4df9d6 5740 for_each_cpu(i, sched_group_span(group)) {
97fb7a0a 5741 /* Bias balancing toward CPUs of our domain */
aaee1203
PZ
5742 if (local_group)
5743 load = source_load(i, load_idx);
5744 else
5745 load = target_load(i, load_idx);
5746
6b94780e
VG
5747 runnable_load += load;
5748
5749 avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs);
6a0b19c0 5750
c469933e 5751 spare_cap = capacity_spare_without(i, p);
6a0b19c0
MR
5752
5753 if (spare_cap > max_spare_cap)
5754 max_spare_cap = spare_cap;
aaee1203
PZ
5755 }
5756
63b2ca30 5757 /* Adjust by relative CPU capacity of the group */
6b94780e
VG
5758 avg_load = (avg_load * SCHED_CAPACITY_SCALE) /
5759 group->sgc->capacity;
5760 runnable_load = (runnable_load * SCHED_CAPACITY_SCALE) /
5761 group->sgc->capacity;
aaee1203
PZ
5762
5763 if (local_group) {
6b94780e
VG
5764 this_runnable_load = runnable_load;
5765 this_avg_load = avg_load;
6a0b19c0
MR
5766 this_spare = max_spare_cap;
5767 } else {
6b94780e
VG
5768 if (min_runnable_load > (runnable_load + imbalance)) {
5769 /*
5770 * The runnable load is significantly smaller
97fb7a0a 5771 * so we can pick this new CPU:
6b94780e
VG
5772 */
5773 min_runnable_load = runnable_load;
5774 min_avg_load = avg_load;
5775 idlest = group;
5776 } else if ((runnable_load < (min_runnable_load + imbalance)) &&
5777 (100*min_avg_load > imbalance_scale*avg_load)) {
5778 /*
5779 * The runnable loads are close so take the
97fb7a0a 5780 * blocked load into account through avg_load:
6b94780e
VG
5781 */
5782 min_avg_load = avg_load;
6a0b19c0
MR
5783 idlest = group;
5784 }
5785
5786 if (most_spare < max_spare_cap) {
5787 most_spare = max_spare_cap;
5788 most_spare_sg = group;
5789 }
aaee1203
PZ
5790 }
5791 } while (group = group->next, group != sd->groups);
5792
6a0b19c0
MR
5793 /*
5794 * The cross-over point between using spare capacity or least load
5795 * is too conservative for high utilization tasks on partially
5796 * utilized systems if we require spare_capacity > task_util(p),
5797 * so we allow for some task stuffing by using
5798 * spare_capacity > task_util(p)/2.
f519a3f1
VG
5799 *
5800 * Spare capacity can't be used for fork because the utilization has
5801 * not been set yet, we must first select a rq to compute the initial
5802 * utilization.
6a0b19c0 5803 */
f519a3f1
VG
5804 if (sd_flag & SD_BALANCE_FORK)
5805 goto skip_spare;
5806
6a0b19c0 5807 if (this_spare > task_util(p) / 2 &&
6b94780e 5808 imbalance_scale*this_spare > 100*most_spare)
6a0b19c0 5809 return NULL;
6b94780e
VG
5810
5811 if (most_spare > task_util(p) / 2)
6a0b19c0
MR
5812 return most_spare_sg;
5813
f519a3f1 5814skip_spare:
6b94780e
VG
5815 if (!idlest)
5816 return NULL;
5817
2c833627
MG
5818 /*
5819 * When comparing groups across NUMA domains, it's possible for the
5820 * local domain to be very lightly loaded relative to the remote
5821 * domains but "imbalance" skews the comparison making remote CPUs
5822 * look much more favourable. When considering cross-domain, add
5823 * imbalance to the runnable load on the remote node and consider
5824 * staying local.
5825 */
5826 if ((sd->flags & SD_NUMA) &&
5827 min_runnable_load + imbalance >= this_runnable_load)
5828 return NULL;
5829
6b94780e 5830 if (min_runnable_load > (this_runnable_load + imbalance))
aaee1203 5831 return NULL;
6b94780e
VG
5832
5833 if ((this_runnable_load < (min_runnable_load + imbalance)) &&
5834 (100*this_avg_load < imbalance_scale*min_avg_load))
5835 return NULL;
5836
aaee1203
PZ
5837 return idlest;
5838}
5839
5840/*
97fb7a0a 5841 * find_idlest_group_cpu - find the idlest CPU among the CPUs in the group.
aaee1203
PZ
5842 */
5843static int
18bd1b4b 5844find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
aaee1203
PZ
5845{
5846 unsigned long load, min_load = ULONG_MAX;
83a0a96a
NP
5847 unsigned int min_exit_latency = UINT_MAX;
5848 u64 latest_idle_timestamp = 0;
5849 int least_loaded_cpu = this_cpu;
5850 int shallowest_idle_cpu = -1;
aaee1203
PZ
5851 int i;
5852
eaecf41f
MR
5853 /* Check if we have any choice: */
5854 if (group->group_weight == 1)
ae4df9d6 5855 return cpumask_first(sched_group_span(group));
eaecf41f 5856
aaee1203 5857 /* Traverse only the allowed CPUs */
ae4df9d6 5858 for_each_cpu_and(i, sched_group_span(group), &p->cpus_allowed) {
943d355d 5859 if (available_idle_cpu(i)) {
83a0a96a
NP
5860 struct rq *rq = cpu_rq(i);
5861 struct cpuidle_state *idle = idle_get_state(rq);
5862 if (idle && idle->exit_latency < min_exit_latency) {
5863 /*
5864 * We give priority to a CPU whose idle state
5865 * has the smallest exit latency irrespective
5866 * of any idle timestamp.
5867 */
5868 min_exit_latency = idle->exit_latency;
5869 latest_idle_timestamp = rq->idle_stamp;
5870 shallowest_idle_cpu = i;
5871 } else if ((!idle || idle->exit_latency == min_exit_latency) &&
5872 rq->idle_stamp > latest_idle_timestamp) {
5873 /*
5874 * If equal or no active idle state, then
5875 * the most recently idled CPU might have
5876 * a warmer cache.
5877 */
5878 latest_idle_timestamp = rq->idle_stamp;
5879 shallowest_idle_cpu = i;
5880 }
9f96742a 5881 } else if (shallowest_idle_cpu == -1) {
c7132dd6 5882 load = weighted_cpuload(cpu_rq(i));
18cec7e0 5883 if (load < min_load) {
83a0a96a
NP
5884 min_load = load;
5885 least_loaded_cpu = i;
5886 }
e7693a36
GH
5887 }
5888 }
5889
83a0a96a 5890 return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
aaee1203 5891}
e7693a36 5892
18bd1b4b
BJ
5893static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p,
5894 int cpu, int prev_cpu, int sd_flag)
5895{
93f50f90 5896 int new_cpu = cpu;
18bd1b4b 5897
6fee85cc
BJ
5898 if (!cpumask_intersects(sched_domain_span(sd), &p->cpus_allowed))
5899 return prev_cpu;
5900
c976a862 5901 /*
c469933e
PB
5902 * We need task's util for capacity_spare_without, sync it up to
5903 * prev_cpu's last_update_time.
c976a862
VK
5904 */
5905 if (!(sd_flag & SD_BALANCE_FORK))
5906 sync_entity_load_avg(&p->se);
5907
18bd1b4b
BJ
5908 while (sd) {
5909 struct sched_group *group;
5910 struct sched_domain *tmp;
5911 int weight;
5912
5913 if (!(sd->flags & sd_flag)) {
5914 sd = sd->child;
5915 continue;
5916 }
5917
5918 group = find_idlest_group(sd, p, cpu, sd_flag);
5919 if (!group) {
5920 sd = sd->child;
5921 continue;
5922 }
5923
5924 new_cpu = find_idlest_group_cpu(group, p, cpu);
e90381ea 5925 if (new_cpu == cpu) {
97fb7a0a 5926 /* Now try balancing at a lower domain level of 'cpu': */
18bd1b4b
BJ
5927 sd = sd->child;
5928 continue;
5929 }
5930
97fb7a0a 5931 /* Now try balancing at a lower domain level of 'new_cpu': */
18bd1b4b
BJ
5932 cpu = new_cpu;
5933 weight = sd->span_weight;
5934 sd = NULL;
5935 for_each_domain(cpu, tmp) {
5936 if (weight <= tmp->span_weight)
5937 break;
5938 if (tmp->flags & sd_flag)
5939 sd = tmp;
5940 }
18bd1b4b
BJ
5941 }
5942
5943 return new_cpu;
5944}
5945
10e2f1ac 5946#ifdef CONFIG_SCHED_SMT
ba2591a5 5947DEFINE_STATIC_KEY_FALSE(sched_smt_present);
10e2f1ac
PZ
5948
5949static inline void set_idle_cores(int cpu, int val)
5950{
5951 struct sched_domain_shared *sds;
5952
5953 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
5954 if (sds)
5955 WRITE_ONCE(sds->has_idle_cores, val);
5956}
5957
5958static inline bool test_idle_cores(int cpu, bool def)
5959{
5960 struct sched_domain_shared *sds;
5961
5962 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
5963 if (sds)
5964 return READ_ONCE(sds->has_idle_cores);
5965
5966 return def;
5967}
5968
5969/*
5970 * Scans the local SMT mask to see if the entire core is idle, and records this
5971 * information in sd_llc_shared->has_idle_cores.
5972 *
5973 * Since SMT siblings share all cache levels, inspecting this limited remote
5974 * state should be fairly cheap.
5975 */
1b568f0a 5976void __update_idle_core(struct rq *rq)
10e2f1ac
PZ
5977{
5978 int core = cpu_of(rq);
5979 int cpu;
5980
5981 rcu_read_lock();
5982 if (test_idle_cores(core, true))
5983 goto unlock;
5984
5985 for_each_cpu(cpu, cpu_smt_mask(core)) {
5986 if (cpu == core)
5987 continue;
5988
943d355d 5989 if (!available_idle_cpu(cpu))
10e2f1ac
PZ
5990 goto unlock;
5991 }
5992
5993 set_idle_cores(core, 1);
5994unlock:
5995 rcu_read_unlock();
5996}
5997
5998/*
5999 * Scan the entire LLC domain for idle cores; this dynamically switches off if
6000 * there are no idle cores left in the system; tracked through
6001 * sd_llc->shared->has_idle_cores and enabled through update_idle_core() above.
6002 */
6003static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
6004{
6005 struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
c743f0a5 6006 int core, cpu;
10e2f1ac 6007
1b568f0a
PZ
6008 if (!static_branch_likely(&sched_smt_present))
6009 return -1;
6010
10e2f1ac
PZ
6011 if (!test_idle_cores(target, false))
6012 return -1;
6013
0c98d344 6014 cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed);
10e2f1ac 6015
c743f0a5 6016 for_each_cpu_wrap(core, cpus, target) {
10e2f1ac
PZ
6017 bool idle = true;
6018
6019 for_each_cpu(cpu, cpu_smt_mask(core)) {
6020 cpumask_clear_cpu(cpu, cpus);
943d355d 6021 if (!available_idle_cpu(cpu))
10e2f1ac
PZ
6022 idle = false;
6023 }
6024
6025 if (idle)
6026 return core;
6027 }
6028
6029 /*
6030 * Failed to find an idle core; stop looking for one.
6031 */
6032 set_idle_cores(target, 0);
6033
6034 return -1;
6035}
6036
6037/*
6038 * Scan the local SMT mask for idle CPUs.
6039 */
6040static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
6041{
6042 int cpu;
6043
1b568f0a
PZ
6044 if (!static_branch_likely(&sched_smt_present))
6045 return -1;
6046
10e2f1ac 6047 for_each_cpu(cpu, cpu_smt_mask(target)) {
0c98d344 6048 if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
10e2f1ac 6049 continue;
943d355d 6050 if (available_idle_cpu(cpu))
10e2f1ac
PZ
6051 return cpu;
6052 }
6053
6054 return -1;
6055}
6056
6057#else /* CONFIG_SCHED_SMT */
6058
6059static inline int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
6060{
6061 return -1;
6062}
6063
6064static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
6065{
6066 return -1;
6067}
6068
6069#endif /* CONFIG_SCHED_SMT */
6070
6071/*
6072 * Scan the LLC domain for idle CPUs; this is dynamically regulated by
6073 * comparing the average scan cost (tracked in sd->avg_scan_cost) against the
6074 * average idle time for this rq (as found in rq->avg_idle).
a50bde51 6075 */
10e2f1ac
PZ
6076static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target)
6077{
9cfb38a7 6078 struct sched_domain *this_sd;
1ad3aaf3 6079 u64 avg_cost, avg_idle;
10e2f1ac
PZ
6080 u64 time, cost;
6081 s64 delta;
1ad3aaf3 6082 int cpu, nr = INT_MAX;
10e2f1ac 6083
9cfb38a7
WL
6084 this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
6085 if (!this_sd)
6086 return -1;
6087
10e2f1ac
PZ
6088 /*
6089 * Due to large variance we need a large fuzz factor; hackbench in
6090 * particularly is sensitive here.
6091 */
1ad3aaf3
PZ
6092 avg_idle = this_rq()->avg_idle / 512;
6093 avg_cost = this_sd->avg_scan_cost + 1;
6094
6095 if (sched_feat(SIS_AVG_CPU) && avg_idle < avg_cost)
10e2f1ac
PZ
6096 return -1;
6097
1ad3aaf3
PZ
6098 if (sched_feat(SIS_PROP)) {
6099 u64 span_avg = sd->span_weight * avg_idle;
6100 if (span_avg > 4*avg_cost)
6101 nr = div_u64(span_avg, avg_cost);
6102 else
6103 nr = 4;
6104 }
6105
10e2f1ac
PZ
6106 time = local_clock();
6107
c743f0a5 6108 for_each_cpu_wrap(cpu, sched_domain_span(sd), target) {
1ad3aaf3
PZ
6109 if (!--nr)
6110 return -1;
0c98d344 6111 if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
10e2f1ac 6112 continue;
943d355d 6113 if (available_idle_cpu(cpu))
10e2f1ac
PZ
6114 break;
6115 }
6116
6117 time = local_clock() - time;
6118 cost = this_sd->avg_scan_cost;
6119 delta = (s64)(time - cost) / 8;
6120 this_sd->avg_scan_cost += delta;
6121
6122 return cpu;
6123}
6124
6125/*
6126 * Try and locate an idle core/thread in the LLC cache domain.
a50bde51 6127 */
772bd008 6128static int select_idle_sibling(struct task_struct *p, int prev, int target)
a50bde51 6129{
99bd5e2f 6130 struct sched_domain *sd;
32e839dd 6131 int i, recent_used_cpu;
a50bde51 6132
943d355d 6133 if (available_idle_cpu(target))
e0a79f52 6134 return target;
99bd5e2f
SS
6135
6136 /*
97fb7a0a 6137 * If the previous CPU is cache affine and idle, don't be stupid:
99bd5e2f 6138 */
943d355d 6139 if (prev != target && cpus_share_cache(prev, target) && available_idle_cpu(prev))
772bd008 6140 return prev;
a50bde51 6141
97fb7a0a 6142 /* Check a recently used CPU as a potential idle candidate: */
32e839dd
MG
6143 recent_used_cpu = p->recent_used_cpu;
6144 if (recent_used_cpu != prev &&
6145 recent_used_cpu != target &&
6146 cpus_share_cache(recent_used_cpu, target) &&
943d355d 6147 available_idle_cpu(recent_used_cpu) &&
32e839dd
MG
6148 cpumask_test_cpu(p->recent_used_cpu, &p->cpus_allowed)) {
6149 /*
6150 * Replace recent_used_cpu with prev as it is a potential
97fb7a0a 6151 * candidate for the next wake:
32e839dd
MG
6152 */
6153 p->recent_used_cpu = prev;
6154 return recent_used_cpu;
6155 }
6156
518cd623 6157 sd = rcu_dereference(per_cpu(sd_llc, target));
10e2f1ac
PZ
6158 if (!sd)
6159 return target;
772bd008 6160
10e2f1ac
PZ
6161 i = select_idle_core(p, sd, target);
6162 if ((unsigned)i < nr_cpumask_bits)
6163 return i;
37407ea7 6164
10e2f1ac
PZ
6165 i = select_idle_cpu(p, sd, target);
6166 if ((unsigned)i < nr_cpumask_bits)
6167 return i;
6168
6169 i = select_idle_smt(p, sd, target);
6170 if ((unsigned)i < nr_cpumask_bits)
6171 return i;
970e1789 6172
a50bde51
PZ
6173 return target;
6174}
231678b7 6175
f9be3e59
PB
6176/**
6177 * Amount of capacity of a CPU that is (estimated to be) used by CFS tasks
6178 * @cpu: the CPU to get the utilization of
6179 *
6180 * The unit of the return value must be the one of capacity so we can compare
6181 * the utilization with the capacity of the CPU that is available for CFS task
6182 * (ie cpu_capacity).
231678b7
DE
6183 *
6184 * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the
6185 * recent utilization of currently non-runnable tasks on a CPU. It represents
6186 * the amount of utilization of a CPU in the range [0..capacity_orig] where
6187 * capacity_orig is the cpu_capacity available at the highest frequency
6188 * (arch_scale_freq_capacity()).
6189 * The utilization of a CPU converges towards a sum equal to or less than the
6190 * current capacity (capacity_curr <= capacity_orig) of the CPU because it is
6191 * the running time on this CPU scaled by capacity_curr.
6192 *
f9be3e59
PB
6193 * The estimated utilization of a CPU is defined to be the maximum between its
6194 * cfs_rq.avg.util_avg and the sum of the estimated utilization of the tasks
6195 * currently RUNNABLE on that CPU.
6196 * This allows to properly represent the expected utilization of a CPU which
6197 * has just got a big task running since a long sleep period. At the same time
6198 * however it preserves the benefits of the "blocked utilization" in
6199 * describing the potential for other tasks waking up on the same CPU.
6200 *
231678b7
DE
6201 * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even
6202 * higher than capacity_orig because of unfortunate rounding in
6203 * cfs.avg.util_avg or just after migrating tasks and new task wakeups until
6204 * the average stabilizes with the new running time. We need to check that the
6205 * utilization stays within the range of [0..capacity_orig] and cap it if
6206 * necessary. Without utilization capping, a group could be seen as overloaded
6207 * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of
6208 * available capacity. We allow utilization to overshoot capacity_curr (but not
6209 * capacity_orig) as it useful for predicting the capacity required after task
6210 * migrations (scheduler-driven DVFS).
f9be3e59
PB
6211 *
6212 * Return: the (estimated) utilization for the specified CPU
8bb5b00c 6213 */
f9be3e59 6214static inline unsigned long cpu_util(int cpu)
8bb5b00c 6215{
f9be3e59
PB
6216 struct cfs_rq *cfs_rq;
6217 unsigned int util;
6218
6219 cfs_rq = &cpu_rq(cpu)->cfs;
6220 util = READ_ONCE(cfs_rq->avg.util_avg);
6221
6222 if (sched_feat(UTIL_EST))
6223 util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued));
8bb5b00c 6224
f9be3e59 6225 return min_t(unsigned long, util, capacity_orig_of(cpu));
8bb5b00c 6226}
a50bde51 6227
104cb16d 6228/*
c469933e
PB
6229 * cpu_util_without: compute cpu utilization without any contributions from *p
6230 * @cpu: the CPU which utilization is requested
6231 * @p: the task which utilization should be discounted
6232 *
6233 * The utilization of a CPU is defined by the utilization of tasks currently
6234 * enqueued on that CPU as well as tasks which are currently sleeping after an
6235 * execution on that CPU.
6236 *
6237 * This method returns the utilization of the specified CPU by discounting the
6238 * utilization of the specified task, whenever the task is currently
6239 * contributing to the CPU utilization.
104cb16d 6240 */
c469933e 6241static unsigned long cpu_util_without(int cpu, struct task_struct *p)
104cb16d 6242{
f9be3e59
PB
6243 struct cfs_rq *cfs_rq;
6244 unsigned int util;
104cb16d
MR
6245
6246 /* Task has no contribution or is new */
f9be3e59 6247 if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time))
104cb16d
MR
6248 return cpu_util(cpu);
6249
f9be3e59
PB
6250 cfs_rq = &cpu_rq(cpu)->cfs;
6251 util = READ_ONCE(cfs_rq->avg.util_avg);
6252
c469933e 6253 /* Discount task's util from CPU's util */
b5c0ce7b 6254 lsub_positive(&util, task_util(p));
104cb16d 6255
f9be3e59
PB
6256 /*
6257 * Covered cases:
6258 *
6259 * a) if *p is the only task sleeping on this CPU, then:
6260 * cpu_util (== task_util) > util_est (== 0)
6261 * and thus we return:
c469933e 6262 * cpu_util_without = (cpu_util - task_util) = 0
f9be3e59
PB
6263 *
6264 * b) if other tasks are SLEEPING on this CPU, which is now exiting
6265 * IDLE, then:
6266 * cpu_util >= task_util
6267 * cpu_util > util_est (== 0)
6268 * and thus we discount *p's blocked utilization to return:
c469933e 6269 * cpu_util_without = (cpu_util - task_util) >= 0
f9be3e59
PB
6270 *
6271 * c) if other tasks are RUNNABLE on that CPU and
6272 * util_est > cpu_util
6273 * then we use util_est since it returns a more restrictive
6274 * estimation of the spare capacity on that CPU, by just
6275 * considering the expected utilization of tasks already
6276 * runnable on that CPU.
6277 *
6278 * Cases a) and b) are covered by the above code, while case c) is
6279 * covered by the following code when estimated utilization is
6280 * enabled.
6281 */
c469933e
PB
6282 if (sched_feat(UTIL_EST)) {
6283 unsigned int estimated =
6284 READ_ONCE(cfs_rq->avg.util_est.enqueued);
6285
6286 /*
6287 * Despite the following checks we still have a small window
6288 * for a possible race, when an execl's select_task_rq_fair()
6289 * races with LB's detach_task():
6290 *
6291 * detach_task()
6292 * p->on_rq = TASK_ON_RQ_MIGRATING;
6293 * ---------------------------------- A
6294 * deactivate_task() \
6295 * dequeue_task() + RaceTime
6296 * util_est_dequeue() /
6297 * ---------------------------------- B
6298 *
6299 * The additional check on "current == p" it's required to
6300 * properly fix the execl regression and it helps in further
6301 * reducing the chances for the above race.
6302 */
b5c0ce7b
PB
6303 if (unlikely(task_on_rq_queued(p) || current == p))
6304 lsub_positive(&estimated, _task_util_est(p));
6305
c469933e
PB
6306 util = max(util, estimated);
6307 }
f9be3e59
PB
6308
6309 /*
6310 * Utilization (estimated) can exceed the CPU capacity, thus let's
6311 * clamp to the maximum CPU capacity to ensure consistency with
6312 * the cpu_util call.
6313 */
6314 return min_t(unsigned long, util, capacity_orig_of(cpu));
104cb16d
MR
6315}
6316
3273163c
MR
6317/*
6318 * Disable WAKE_AFFINE in the case where task @p doesn't fit in the
6319 * capacity of either the waking CPU @cpu or the previous CPU @prev_cpu.
6320 *
6321 * In that case WAKE_AFFINE doesn't make sense and we'll let
6322 * BALANCE_WAKE sort things out.
6323 */
6324static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
6325{
6326 long min_cap, max_cap;
6327
df054e84
MR
6328 if (!static_branch_unlikely(&sched_asym_cpucapacity))
6329 return 0;
6330
3273163c
MR
6331 min_cap = min(capacity_orig_of(prev_cpu), capacity_orig_of(cpu));
6332 max_cap = cpu_rq(cpu)->rd->max_cpu_capacity;
6333
6334 /* Minimum capacity is close to max, no need to abort wake_affine */
6335 if (max_cap - min_cap < max_cap >> 3)
6336 return 0;
6337
104cb16d
MR
6338 /* Bring task utilization in sync with prev_cpu */
6339 sync_entity_load_avg(&p->se);
6340
3b1baa64 6341 return !task_fits_capacity(p, min_cap);
3273163c
MR
6342}
6343
aaee1203 6344/*
de91b9cb
MR
6345 * select_task_rq_fair: Select target runqueue for the waking task in domains
6346 * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE,
6347 * SD_BALANCE_FORK, or SD_BALANCE_EXEC.
aaee1203 6348 *
97fb7a0a
IM
6349 * Balances load by selecting the idlest CPU in the idlest group, or under
6350 * certain conditions an idle sibling CPU if the domain has SD_WAKE_AFFINE set.
aaee1203 6351 *
97fb7a0a 6352 * Returns the target CPU number.
aaee1203
PZ
6353 *
6354 * preempt must be disabled.
6355 */
0017d735 6356static int
ac66f547 6357select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
aaee1203 6358{
f1d88b44 6359 struct sched_domain *tmp, *sd = NULL;
c88d5910 6360 int cpu = smp_processor_id();
63b0e9ed 6361 int new_cpu = prev_cpu;
99bd5e2f 6362 int want_affine = 0;
24d0c1d6 6363 int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING);
c88d5910 6364
c58d25f3
PZ
6365 if (sd_flag & SD_BALANCE_WAKE) {
6366 record_wakee(p);
3273163c 6367 want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu)
0c98d344 6368 && cpumask_test_cpu(cpu, &p->cpus_allowed);
c58d25f3 6369 }
aaee1203 6370
dce840a0 6371 rcu_read_lock();
aaee1203 6372 for_each_domain(cpu, tmp) {
e4f42888 6373 if (!(tmp->flags & SD_LOAD_BALANCE))
63b0e9ed 6374 break;
e4f42888 6375
fe3bcfe1 6376 /*
97fb7a0a 6377 * If both 'cpu' and 'prev_cpu' are part of this domain,
99bd5e2f 6378 * cpu is a valid SD_WAKE_AFFINE target.
fe3bcfe1 6379 */
99bd5e2f
SS
6380 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
6381 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
f1d88b44
VK
6382 if (cpu != prev_cpu)
6383 new_cpu = wake_affine(tmp, p, cpu, prev_cpu, sync);
6384
6385 sd = NULL; /* Prefer wake_affine over balance flags */
29cd8bae 6386 break;
f03542a7 6387 }
29cd8bae 6388
f03542a7 6389 if (tmp->flags & sd_flag)
29cd8bae 6390 sd = tmp;
63b0e9ed
MG
6391 else if (!want_affine)
6392 break;
29cd8bae
PZ
6393 }
6394
f1d88b44
VK
6395 if (unlikely(sd)) {
6396 /* Slow path */
18bd1b4b 6397 new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag);
f1d88b44
VK
6398 } else if (sd_flag & SD_BALANCE_WAKE) { /* XXX always ? */
6399 /* Fast path */
6400
6401 new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
6402
6403 if (want_affine)
6404 current->recent_used_cpu = cpu;
e7693a36 6405 }
dce840a0 6406 rcu_read_unlock();
e7693a36 6407
c88d5910 6408 return new_cpu;
e7693a36 6409}
0a74bef8 6410
144d8487
PZ
6411static void detach_entity_cfs_rq(struct sched_entity *se);
6412
0a74bef8 6413/*
97fb7a0a 6414 * Called immediately before a task is migrated to a new CPU; task_cpu(p) and
0a74bef8 6415 * cfs_rq_of(p) references at time of call are still valid and identify the
97fb7a0a 6416 * previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held.
0a74bef8 6417 */
3f9672ba 6418static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
0a74bef8 6419{
59efa0ba
PZ
6420 /*
6421 * As blocked tasks retain absolute vruntime the migration needs to
6422 * deal with this by subtracting the old and adding the new
6423 * min_vruntime -- the latter is done by enqueue_entity() when placing
6424 * the task on the new runqueue.
6425 */
6426 if (p->state == TASK_WAKING) {
6427 struct sched_entity *se = &p->se;
6428 struct cfs_rq *cfs_rq = cfs_rq_of(se);
6429 u64 min_vruntime;
6430
6431#ifndef CONFIG_64BIT
6432 u64 min_vruntime_copy;
6433
6434 do {
6435 min_vruntime_copy = cfs_rq->min_vruntime_copy;
6436 smp_rmb();
6437 min_vruntime = cfs_rq->min_vruntime;
6438 } while (min_vruntime != min_vruntime_copy);
6439#else
6440 min_vruntime = cfs_rq->min_vruntime;
6441#endif
6442
6443 se->vruntime -= min_vruntime;
6444 }
6445
144d8487
PZ
6446 if (p->on_rq == TASK_ON_RQ_MIGRATING) {
6447 /*
6448 * In case of TASK_ON_RQ_MIGRATING we in fact hold the 'old'
6449 * rq->lock and can modify state directly.
6450 */
6451 lockdep_assert_held(&task_rq(p)->lock);
6452 detach_entity_cfs_rq(&p->se);
6453
6454 } else {
6455 /*
6456 * We are supposed to update the task to "current" time, then
6457 * its up to date and ready to go to new CPU/cfs_rq. But we
6458 * have difficulty in getting what current time is, so simply
6459 * throw away the out-of-date time. This will result in the
6460 * wakee task is less decayed, but giving the wakee more load
6461 * sounds not bad.
6462 */
6463 remove_entity_load_avg(&p->se);
6464 }
9d89c257
YD
6465
6466 /* Tell new CPU we are migrated */
6467 p->se.avg.last_update_time = 0;
3944a927
BS
6468
6469 /* We have migrated, no longer consider this task hot */
9d89c257 6470 p->se.exec_start = 0;
3f9672ba
SD
6471
6472 update_scan_period(p, new_cpu);
0a74bef8 6473}
12695578
YD
6474
6475static void task_dead_fair(struct task_struct *p)
6476{
6477 remove_entity_load_avg(&p->se);
6478}
e7693a36
GH
6479#endif /* CONFIG_SMP */
6480
a555e9d8 6481static unsigned long wakeup_gran(struct sched_entity *se)
0bbd3336
PZ
6482{
6483 unsigned long gran = sysctl_sched_wakeup_granularity;
6484
6485 /*
e52fb7c0
PZ
6486 * Since its curr running now, convert the gran from real-time
6487 * to virtual-time in his units.
13814d42
MG
6488 *
6489 * By using 'se' instead of 'curr' we penalize light tasks, so
6490 * they get preempted easier. That is, if 'se' < 'curr' then
6491 * the resulting gran will be larger, therefore penalizing the
6492 * lighter, if otoh 'se' > 'curr' then the resulting gran will
6493 * be smaller, again penalizing the lighter task.
6494 *
6495 * This is especially important for buddies when the leftmost
6496 * task is higher priority than the buddy.
0bbd3336 6497 */
f4ad9bd2 6498 return calc_delta_fair(gran, se);
0bbd3336
PZ
6499}
6500
464b7527
PZ
6501/*
6502 * Should 'se' preempt 'curr'.
6503 *
6504 * |s1
6505 * |s2
6506 * |s3
6507 * g
6508 * |<--->|c
6509 *
6510 * w(c, s1) = -1
6511 * w(c, s2) = 0
6512 * w(c, s3) = 1
6513 *
6514 */
6515static int
6516wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
6517{
6518 s64 gran, vdiff = curr->vruntime - se->vruntime;
6519
6520 if (vdiff <= 0)
6521 return -1;
6522
a555e9d8 6523 gran = wakeup_gran(se);
464b7527
PZ
6524 if (vdiff > gran)
6525 return 1;
6526
6527 return 0;
6528}
6529
02479099
PZ
6530static void set_last_buddy(struct sched_entity *se)
6531{
1da1843f 6532 if (entity_is_task(se) && unlikely(task_has_idle_policy(task_of(se))))
69c80f3e
VP
6533 return;
6534
c5ae366e
DA
6535 for_each_sched_entity(se) {
6536 if (SCHED_WARN_ON(!se->on_rq))
6537 return;
69c80f3e 6538 cfs_rq_of(se)->last = se;
c5ae366e 6539 }
02479099
PZ
6540}
6541
6542static void set_next_buddy(struct sched_entity *se)
6543{
1da1843f 6544 if (entity_is_task(se) && unlikely(task_has_idle_policy(task_of(se))))
69c80f3e
VP
6545 return;
6546
c5ae366e
DA
6547 for_each_sched_entity(se) {
6548 if (SCHED_WARN_ON(!se->on_rq))
6549 return;
69c80f3e 6550 cfs_rq_of(se)->next = se;
c5ae366e 6551 }
02479099
PZ
6552}
6553
ac53db59
RR
6554static void set_skip_buddy(struct sched_entity *se)
6555{
69c80f3e
VP
6556 for_each_sched_entity(se)
6557 cfs_rq_of(se)->skip = se;
ac53db59
RR
6558}
6559
bf0f6f24
IM
6560/*
6561 * Preempt the current task with a newly woken task if needed:
6562 */
5a9b86f6 6563static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
bf0f6f24
IM
6564{
6565 struct task_struct *curr = rq->curr;
8651a86c 6566 struct sched_entity *se = &curr->se, *pse = &p->se;
03e89e45 6567 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
f685ceac 6568 int scale = cfs_rq->nr_running >= sched_nr_latency;
2f36825b 6569 int next_buddy_marked = 0;
bf0f6f24 6570
4ae7d5ce
IM
6571 if (unlikely(se == pse))
6572 return;
6573
5238cdd3 6574 /*
163122b7 6575 * This is possible from callers such as attach_tasks(), in which we
5238cdd3
PT
6576 * unconditionally check_prempt_curr() after an enqueue (which may have
6577 * lead to a throttle). This both saves work and prevents false
6578 * next-buddy nomination below.
6579 */
6580 if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
6581 return;
6582
2f36825b 6583 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
3cb63d52 6584 set_next_buddy(pse);
2f36825b
VP
6585 next_buddy_marked = 1;
6586 }
57fdc26d 6587
aec0a514
BR
6588 /*
6589 * We can come here with TIF_NEED_RESCHED already set from new task
6590 * wake up path.
5238cdd3
PT
6591 *
6592 * Note: this also catches the edge-case of curr being in a throttled
6593 * group (e.g. via set_curr_task), since update_curr() (in the
6594 * enqueue of curr) will have resulted in resched being set. This
6595 * prevents us from potentially nominating it as a false LAST_BUDDY
6596 * below.
aec0a514
BR
6597 */
6598 if (test_tsk_need_resched(curr))
6599 return;
6600
a2f5c9ab 6601 /* Idle tasks are by definition preempted by non-idle tasks. */
1da1843f
VK
6602 if (unlikely(task_has_idle_policy(curr)) &&
6603 likely(!task_has_idle_policy(p)))
a2f5c9ab
DH
6604 goto preempt;
6605
91c234b4 6606 /*
a2f5c9ab
DH
6607 * Batch and idle tasks do not preempt non-idle tasks (their preemption
6608 * is driven by the tick):
91c234b4 6609 */
8ed92e51 6610 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
91c234b4 6611 return;
bf0f6f24 6612
464b7527 6613 find_matching_se(&se, &pse);
9bbd7374 6614 update_curr(cfs_rq_of(se));
002f128b 6615 BUG_ON(!pse);
2f36825b
VP
6616 if (wakeup_preempt_entity(se, pse) == 1) {
6617 /*
6618 * Bias pick_next to pick the sched entity that is
6619 * triggering this preemption.
6620 */
6621 if (!next_buddy_marked)
6622 set_next_buddy(pse);
3a7e73a2 6623 goto preempt;
2f36825b 6624 }
464b7527 6625
3a7e73a2 6626 return;
a65ac745 6627
3a7e73a2 6628preempt:
8875125e 6629 resched_curr(rq);
3a7e73a2
PZ
6630 /*
6631 * Only set the backward buddy when the current task is still
6632 * on the rq. This can happen when a wakeup gets interleaved
6633 * with schedule on the ->pre_schedule() or idle_balance()
6634 * point, either of which can * drop the rq lock.
6635 *
6636 * Also, during early boot the idle thread is in the fair class,
6637 * for obvious reasons its a bad idea to schedule back to it.
6638 */
6639 if (unlikely(!se->on_rq || curr == rq->idle))
6640 return;
6641
6642 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
6643 set_last_buddy(se);
bf0f6f24
IM
6644}
6645
606dba2e 6646static struct task_struct *
d8ac8971 6647pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
bf0f6f24
IM
6648{
6649 struct cfs_rq *cfs_rq = &rq->cfs;
6650 struct sched_entity *se;
678d5718 6651 struct task_struct *p;
37e117c0 6652 int new_tasks;
678d5718 6653
6e83125c 6654again:
678d5718 6655 if (!cfs_rq->nr_running)
38033c37 6656 goto idle;
678d5718 6657
9674f5ca 6658#ifdef CONFIG_FAIR_GROUP_SCHED
3f1d2a31 6659 if (prev->sched_class != &fair_sched_class)
678d5718
PZ
6660 goto simple;
6661
6662 /*
6663 * Because of the set_next_buddy() in dequeue_task_fair() it is rather
6664 * likely that a next task is from the same cgroup as the current.
6665 *
6666 * Therefore attempt to avoid putting and setting the entire cgroup
6667 * hierarchy, only change the part that actually changes.
6668 */
6669
6670 do {
6671 struct sched_entity *curr = cfs_rq->curr;
6672
6673 /*
6674 * Since we got here without doing put_prev_entity() we also
6675 * have to consider cfs_rq->curr. If it is still a runnable
6676 * entity, update_curr() will update its vruntime, otherwise
6677 * forget we've ever seen it.
6678 */
54d27365
BS
6679 if (curr) {
6680 if (curr->on_rq)
6681 update_curr(cfs_rq);
6682 else
6683 curr = NULL;
678d5718 6684
54d27365
BS
6685 /*
6686 * This call to check_cfs_rq_runtime() will do the
6687 * throttle and dequeue its entity in the parent(s).
9674f5ca 6688 * Therefore the nr_running test will indeed
54d27365
BS
6689 * be correct.
6690 */
9674f5ca
VK
6691 if (unlikely(check_cfs_rq_runtime(cfs_rq))) {
6692 cfs_rq = &rq->cfs;
6693
6694 if (!cfs_rq->nr_running)
6695 goto idle;
6696
54d27365 6697 goto simple;
9674f5ca 6698 }
54d27365 6699 }
678d5718
PZ
6700
6701 se = pick_next_entity(cfs_rq, curr);
6702 cfs_rq = group_cfs_rq(se);
6703 } while (cfs_rq);
6704
6705 p = task_of(se);
6706
6707 /*
6708 * Since we haven't yet done put_prev_entity and if the selected task
6709 * is a different task than we started out with, try and touch the
6710 * least amount of cfs_rqs.
6711 */
6712 if (prev != p) {
6713 struct sched_entity *pse = &prev->se;
6714
6715 while (!(cfs_rq = is_same_group(se, pse))) {
6716 int se_depth = se->depth;
6717 int pse_depth = pse->depth;
6718
6719 if (se_depth <= pse_depth) {
6720 put_prev_entity(cfs_rq_of(pse), pse);
6721 pse = parent_entity(pse);
6722 }
6723 if (se_depth >= pse_depth) {
6724 set_next_entity(cfs_rq_of(se), se);
6725 se = parent_entity(se);
6726 }
6727 }
6728
6729 put_prev_entity(cfs_rq, pse);
6730 set_next_entity(cfs_rq, se);
6731 }
6732
93824900 6733 goto done;
678d5718 6734simple:
678d5718 6735#endif
bf0f6f24 6736
3f1d2a31 6737 put_prev_task(rq, prev);
606dba2e 6738
bf0f6f24 6739 do {
678d5718 6740 se = pick_next_entity(cfs_rq, NULL);
f4b6755f 6741 set_next_entity(cfs_rq, se);
bf0f6f24
IM
6742 cfs_rq = group_cfs_rq(se);
6743 } while (cfs_rq);
6744
8f4d37ec 6745 p = task_of(se);
678d5718 6746
13a453c2 6747done: __maybe_unused;
93824900
UR
6748#ifdef CONFIG_SMP
6749 /*
6750 * Move the next running task to the front of
6751 * the list, so our cfs_tasks list becomes MRU
6752 * one.
6753 */
6754 list_move(&p->se.group_node, &rq->cfs_tasks);
6755#endif
6756
b39e66ea
MG
6757 if (hrtick_enabled(rq))
6758 hrtick_start_fair(rq, p);
8f4d37ec 6759
3b1baa64
MR
6760 update_misfit_status(p, rq);
6761
8f4d37ec 6762 return p;
38033c37
PZ
6763
6764idle:
3b1baa64 6765 update_misfit_status(NULL, rq);
46f69fa3
MF
6766 new_tasks = idle_balance(rq, rf);
6767
37e117c0
PZ
6768 /*
6769 * Because idle_balance() releases (and re-acquires) rq->lock, it is
6770 * possible for any higher priority task to appear. In that case we
6771 * must re-start the pick_next_entity() loop.
6772 */
e4aa358b 6773 if (new_tasks < 0)
37e117c0
PZ
6774 return RETRY_TASK;
6775
e4aa358b 6776 if (new_tasks > 0)
38033c37 6777 goto again;
38033c37
PZ
6778
6779 return NULL;
bf0f6f24
IM
6780}
6781
6782/*
6783 * Account for a descheduled task:
6784 */
31ee529c 6785static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
bf0f6f24
IM
6786{
6787 struct sched_entity *se = &prev->se;
6788 struct cfs_rq *cfs_rq;
6789
6790 for_each_sched_entity(se) {
6791 cfs_rq = cfs_rq_of(se);
ab6cde26 6792 put_prev_entity(cfs_rq, se);
bf0f6f24
IM
6793 }
6794}
6795
ac53db59
RR
6796/*
6797 * sched_yield() is very simple
6798 *
6799 * The magic of dealing with the ->skip buddy is in pick_next_entity.
6800 */
6801static void yield_task_fair(struct rq *rq)
6802{
6803 struct task_struct *curr = rq->curr;
6804 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
6805 struct sched_entity *se = &curr->se;
6806
6807 /*
6808 * Are we the only task in the tree?
6809 */
6810 if (unlikely(rq->nr_running == 1))
6811 return;
6812
6813 clear_buddies(cfs_rq, se);
6814
6815 if (curr->policy != SCHED_BATCH) {
6816 update_rq_clock(rq);
6817 /*
6818 * Update run-time statistics of the 'current'.
6819 */
6820 update_curr(cfs_rq);
916671c0
MG
6821 /*
6822 * Tell update_rq_clock() that we've just updated,
6823 * so we don't do microscopic update in schedule()
6824 * and double the fastpath cost.
6825 */
adcc8da8 6826 rq_clock_skip_update(rq);
ac53db59
RR
6827 }
6828
6829 set_skip_buddy(se);
6830}
6831
d95f4122
MG
6832static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
6833{
6834 struct sched_entity *se = &p->se;
6835
5238cdd3
PT
6836 /* throttled hierarchies are not runnable */
6837 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
d95f4122
MG
6838 return false;
6839
6840 /* Tell the scheduler that we'd really like pse to run next. */
6841 set_next_buddy(se);
6842
d95f4122
MG
6843 yield_task_fair(rq);
6844
6845 return true;
6846}
6847
681f3e68 6848#ifdef CONFIG_SMP
bf0f6f24 6849/**************************************************
e9c84cb8
PZ
6850 * Fair scheduling class load-balancing methods.
6851 *
6852 * BASICS
6853 *
6854 * The purpose of load-balancing is to achieve the same basic fairness the
97fb7a0a 6855 * per-CPU scheduler provides, namely provide a proportional amount of compute
e9c84cb8
PZ
6856 * time to each task. This is expressed in the following equation:
6857 *
6858 * W_i,n/P_i == W_j,n/P_j for all i,j (1)
6859 *
97fb7a0a 6860 * Where W_i,n is the n-th weight average for CPU i. The instantaneous weight
e9c84cb8
PZ
6861 * W_i,0 is defined as:
6862 *
6863 * W_i,0 = \Sum_j w_i,j (2)
6864 *
97fb7a0a 6865 * Where w_i,j is the weight of the j-th runnable task on CPU i. This weight
1c3de5e1 6866 * is derived from the nice value as per sched_prio_to_weight[].
e9c84cb8
PZ
6867 *
6868 * The weight average is an exponential decay average of the instantaneous
6869 * weight:
6870 *
6871 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3)
6872 *
97fb7a0a 6873 * C_i is the compute capacity of CPU i, typically it is the
e9c84cb8
PZ
6874 * fraction of 'recent' time available for SCHED_OTHER task execution. But it
6875 * can also include other factors [XXX].
6876 *
6877 * To achieve this balance we define a measure of imbalance which follows
6878 * directly from (1):
6879 *
ced549fa 6880 * imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j } (4)
e9c84cb8
PZ
6881 *
6882 * We them move tasks around to minimize the imbalance. In the continuous
6883 * function space it is obvious this converges, in the discrete case we get
6884 * a few fun cases generally called infeasible weight scenarios.
6885 *
6886 * [XXX expand on:
6887 * - infeasible weights;
6888 * - local vs global optima in the discrete case. ]
6889 *
6890 *
6891 * SCHED DOMAINS
6892 *
6893 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
97fb7a0a 6894 * for all i,j solution, we create a tree of CPUs that follows the hardware
e9c84cb8 6895 * topology where each level pairs two lower groups (or better). This results
97fb7a0a 6896 * in O(log n) layers. Furthermore we reduce the number of CPUs going up the
e9c84cb8 6897 * tree to only the first of the previous level and we decrease the frequency
97fb7a0a 6898 * of load-balance at each level inv. proportional to the number of CPUs in
e9c84cb8
PZ
6899 * the groups.
6900 *
6901 * This yields:
6902 *
6903 * log_2 n 1 n
6904 * \Sum { --- * --- * 2^i } = O(n) (5)
6905 * i = 0 2^i 2^i
6906 * `- size of each group
97fb7a0a 6907 * | | `- number of CPUs doing load-balance
e9c84cb8
PZ
6908 * | `- freq
6909 * `- sum over all levels
6910 *
6911 * Coupled with a limit on how many tasks we can migrate every balance pass,
6912 * this makes (5) the runtime complexity of the balancer.
6913 *
6914 * An important property here is that each CPU is still (indirectly) connected
97fb7a0a 6915 * to every other CPU in at most O(log n) steps:
e9c84cb8
PZ
6916 *
6917 * The adjacency matrix of the resulting graph is given by:
6918 *
97a7142f 6919 * log_2 n
e9c84cb8
PZ
6920 * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6)
6921 * k = 0
6922 *
6923 * And you'll find that:
6924 *
6925 * A^(log_2 n)_i,j != 0 for all i,j (7)
6926 *
97fb7a0a 6927 * Showing there's indeed a path between every CPU in at most O(log n) steps.
e9c84cb8
PZ
6928 * The task movement gives a factor of O(m), giving a convergence complexity
6929 * of:
6930 *
6931 * O(nm log n), n := nr_cpus, m := nr_tasks (8)
6932 *
6933 *
6934 * WORK CONSERVING
6935 *
6936 * In order to avoid CPUs going idle while there's still work to do, new idle
97fb7a0a 6937 * balancing is more aggressive and has the newly idle CPU iterate up the domain
e9c84cb8
PZ
6938 * tree itself instead of relying on other CPUs to bring it work.
6939 *
6940 * This adds some complexity to both (5) and (8) but it reduces the total idle
6941 * time.
6942 *
6943 * [XXX more?]
6944 *
6945 *
6946 * CGROUPS
6947 *
6948 * Cgroups make a horror show out of (2), instead of a simple sum we get:
6949 *
6950 * s_k,i
6951 * W_i,0 = \Sum_j \Prod_k w_k * ----- (9)
6952 * S_k
6953 *
6954 * Where
6955 *
6956 * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10)
6957 *
97fb7a0a 6958 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on CPU i.
e9c84cb8
PZ
6959 *
6960 * The big problem is S_k, its a global sum needed to compute a local (W_i)
6961 * property.
6962 *
6963 * [XXX write more on how we solve this.. _after_ merging pjt's patches that
6964 * rewrite all of this once again.]
97a7142f 6965 */
bf0f6f24 6966
ed387b78
HS
6967static unsigned long __read_mostly max_load_balance_interval = HZ/10;
6968
0ec8aa00
PZ
6969enum fbq_type { regular, remote, all };
6970
3b1baa64
MR
6971enum group_type {
6972 group_other = 0,
6973 group_misfit_task,
6974 group_imbalanced,
6975 group_overloaded,
6976};
6977
ddcdf6e7 6978#define LBF_ALL_PINNED 0x01
367456c7 6979#define LBF_NEED_BREAK 0x02
6263322c
PZ
6980#define LBF_DST_PINNED 0x04
6981#define LBF_SOME_PINNED 0x08
e022e0d3 6982#define LBF_NOHZ_STATS 0x10
f643ea22 6983#define LBF_NOHZ_AGAIN 0x20
ddcdf6e7
PZ
6984
6985struct lb_env {
6986 struct sched_domain *sd;
6987
ddcdf6e7 6988 struct rq *src_rq;
85c1e7da 6989 int src_cpu;
ddcdf6e7
PZ
6990
6991 int dst_cpu;
6992 struct rq *dst_rq;
6993
88b8dac0
SV
6994 struct cpumask *dst_grpmask;
6995 int new_dst_cpu;
ddcdf6e7 6996 enum cpu_idle_type idle;
bd939f45 6997 long imbalance;
b9403130
MW
6998 /* The set of CPUs under consideration for load-balancing */
6999 struct cpumask *cpus;
7000
ddcdf6e7 7001 unsigned int flags;
367456c7
PZ
7002
7003 unsigned int loop;
7004 unsigned int loop_break;
7005 unsigned int loop_max;
0ec8aa00
PZ
7006
7007 enum fbq_type fbq_type;
cad68e55 7008 enum group_type src_grp_type;
163122b7 7009 struct list_head tasks;
ddcdf6e7
PZ
7010};
7011
029632fb
PZ
7012/*
7013 * Is this task likely cache-hot:
7014 */
5d5e2b1b 7015static int task_hot(struct task_struct *p, struct lb_env *env)
029632fb
PZ
7016{
7017 s64 delta;
7018
e5673f28
KT
7019 lockdep_assert_held(&env->src_rq->lock);
7020
029632fb
PZ
7021 if (p->sched_class != &fair_sched_class)
7022 return 0;
7023
1da1843f 7024 if (unlikely(task_has_idle_policy(p)))
029632fb
PZ
7025 return 0;
7026
7027 /*
7028 * Buddy candidates are cache hot:
7029 */
5d5e2b1b 7030 if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running &&
029632fb
PZ
7031 (&p->se == cfs_rq_of(&p->se)->next ||
7032 &p->se == cfs_rq_of(&p->se)->last))
7033 return 1;
7034
7035 if (sysctl_sched_migration_cost == -1)
7036 return 1;
7037 if (sysctl_sched_migration_cost == 0)
7038 return 0;
7039
5d5e2b1b 7040 delta = rq_clock_task(env->src_rq) - p->se.exec_start;
029632fb
PZ
7041
7042 return delta < (s64)sysctl_sched_migration_cost;
7043}
7044
3a7053b3 7045#ifdef CONFIG_NUMA_BALANCING
c1ceac62 7046/*
2a1ed24c
SD
7047 * Returns 1, if task migration degrades locality
7048 * Returns 0, if task migration improves locality i.e migration preferred.
7049 * Returns -1, if task migration is not affected by locality.
c1ceac62 7050 */
2a1ed24c 7051static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
3a7053b3 7052{
b1ad065e 7053 struct numa_group *numa_group = rcu_dereference(p->numa_group);
f35678b6
SD
7054 unsigned long src_weight, dst_weight;
7055 int src_nid, dst_nid, dist;
3a7053b3 7056
2a595721 7057 if (!static_branch_likely(&sched_numa_balancing))
2a1ed24c
SD
7058 return -1;
7059
c3b9bc5b 7060 if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
2a1ed24c 7061 return -1;
7a0f3083
MG
7062
7063 src_nid = cpu_to_node(env->src_cpu);
7064 dst_nid = cpu_to_node(env->dst_cpu);
7065
83e1d2cd 7066 if (src_nid == dst_nid)
2a1ed24c 7067 return -1;
7a0f3083 7068
2a1ed24c
SD
7069 /* Migrating away from the preferred node is always bad. */
7070 if (src_nid == p->numa_preferred_nid) {
7071 if (env->src_rq->nr_running > env->src_rq->nr_preferred_running)
7072 return 1;
7073 else
7074 return -1;
7075 }
b1ad065e 7076
c1ceac62
RR
7077 /* Encourage migration to the preferred node. */
7078 if (dst_nid == p->numa_preferred_nid)
2a1ed24c 7079 return 0;
b1ad065e 7080
739294fb 7081 /* Leaving a core idle is often worse than degrading locality. */
f35678b6 7082 if (env->idle == CPU_IDLE)
739294fb
RR
7083 return -1;
7084
f35678b6 7085 dist = node_distance(src_nid, dst_nid);
c1ceac62 7086 if (numa_group) {
f35678b6
SD
7087 src_weight = group_weight(p, src_nid, dist);
7088 dst_weight = group_weight(p, dst_nid, dist);
c1ceac62 7089 } else {
f35678b6
SD
7090 src_weight = task_weight(p, src_nid, dist);
7091 dst_weight = task_weight(p, dst_nid, dist);
b1ad065e
RR
7092 }
7093
f35678b6 7094 return dst_weight < src_weight;
7a0f3083
MG
7095}
7096
3a7053b3 7097#else
2a1ed24c 7098static inline int migrate_degrades_locality(struct task_struct *p,
3a7053b3
MG
7099 struct lb_env *env)
7100{
2a1ed24c 7101 return -1;
7a0f3083 7102}
3a7053b3
MG
7103#endif
7104
1e3c88bd
PZ
7105/*
7106 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
7107 */
7108static
8e45cb54 7109int can_migrate_task(struct task_struct *p, struct lb_env *env)
1e3c88bd 7110{
2a1ed24c 7111 int tsk_cache_hot;
e5673f28
KT
7112
7113 lockdep_assert_held(&env->src_rq->lock);
7114
1e3c88bd
PZ
7115 /*
7116 * We do not migrate tasks that are:
d3198084 7117 * 1) throttled_lb_pair, or
1e3c88bd 7118 * 2) cannot be migrated to this CPU due to cpus_allowed, or
d3198084
JK
7119 * 3) running (obviously), or
7120 * 4) are cache-hot on their current CPU.
1e3c88bd 7121 */
d3198084
JK
7122 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
7123 return 0;
7124
0c98d344 7125 if (!cpumask_test_cpu(env->dst_cpu, &p->cpus_allowed)) {
e02e60c1 7126 int cpu;
88b8dac0 7127
ae92882e 7128 schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
88b8dac0 7129
6263322c
PZ
7130 env->flags |= LBF_SOME_PINNED;
7131
88b8dac0 7132 /*
97fb7a0a 7133 * Remember if this task can be migrated to any other CPU in
88b8dac0
SV
7134 * our sched_group. We may want to revisit it if we couldn't
7135 * meet load balance goals by pulling other tasks on src_cpu.
7136 *
65a4433a
JH
7137 * Avoid computing new_dst_cpu for NEWLY_IDLE or if we have
7138 * already computed one in current iteration.
88b8dac0 7139 */
65a4433a 7140 if (env->idle == CPU_NEWLY_IDLE || (env->flags & LBF_DST_PINNED))
88b8dac0
SV
7141 return 0;
7142
97fb7a0a 7143 /* Prevent to re-select dst_cpu via env's CPUs: */
e02e60c1 7144 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
0c98d344 7145 if (cpumask_test_cpu(cpu, &p->cpus_allowed)) {
6263322c 7146 env->flags |= LBF_DST_PINNED;
e02e60c1
JK
7147 env->new_dst_cpu = cpu;
7148 break;
7149 }
88b8dac0 7150 }
e02e60c1 7151
1e3c88bd
PZ
7152 return 0;
7153 }
88b8dac0
SV
7154
7155 /* Record that we found atleast one task that could run on dst_cpu */
8e45cb54 7156 env->flags &= ~LBF_ALL_PINNED;
1e3c88bd 7157
ddcdf6e7 7158 if (task_running(env->src_rq, p)) {
ae92882e 7159 schedstat_inc(p->se.statistics.nr_failed_migrations_running);
1e3c88bd
PZ
7160 return 0;
7161 }
7162
7163 /*
7164 * Aggressive migration if:
3a7053b3
MG
7165 * 1) destination numa is preferred
7166 * 2) task is cache cold, or
7167 * 3) too many balance attempts have failed.
1e3c88bd 7168 */
2a1ed24c
SD
7169 tsk_cache_hot = migrate_degrades_locality(p, env);
7170 if (tsk_cache_hot == -1)
7171 tsk_cache_hot = task_hot(p, env);
3a7053b3 7172
2a1ed24c 7173 if (tsk_cache_hot <= 0 ||
7a96c231 7174 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
2a1ed24c 7175 if (tsk_cache_hot == 1) {
ae92882e
JP
7176 schedstat_inc(env->sd->lb_hot_gained[env->idle]);
7177 schedstat_inc(p->se.statistics.nr_forced_migrations);
3a7053b3 7178 }
1e3c88bd
PZ
7179 return 1;
7180 }
7181
ae92882e 7182 schedstat_inc(p->se.statistics.nr_failed_migrations_hot);
4e2dcb73 7183 return 0;
1e3c88bd
PZ
7184}
7185
897c395f 7186/*
163122b7
KT
7187 * detach_task() -- detach the task for the migration specified in env
7188 */
7189static void detach_task(struct task_struct *p, struct lb_env *env)
7190{
7191 lockdep_assert_held(&env->src_rq->lock);
7192
163122b7 7193 p->on_rq = TASK_ON_RQ_MIGRATING;
5704ac0a 7194 deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK);
163122b7
KT
7195 set_task_cpu(p, env->dst_cpu);
7196}
7197
897c395f 7198/*
e5673f28 7199 * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as
897c395f 7200 * part of active balancing operations within "domain".
897c395f 7201 *
e5673f28 7202 * Returns a task if successful and NULL otherwise.
897c395f 7203 */
e5673f28 7204static struct task_struct *detach_one_task(struct lb_env *env)
897c395f 7205{
93824900 7206 struct task_struct *p;
897c395f 7207
e5673f28
KT
7208 lockdep_assert_held(&env->src_rq->lock);
7209
93824900
UR
7210 list_for_each_entry_reverse(p,
7211 &env->src_rq->cfs_tasks, se.group_node) {
367456c7
PZ
7212 if (!can_migrate_task(p, env))
7213 continue;
897c395f 7214
163122b7 7215 detach_task(p, env);
e5673f28 7216
367456c7 7217 /*
e5673f28 7218 * Right now, this is only the second place where
163122b7 7219 * lb_gained[env->idle] is updated (other is detach_tasks)
e5673f28 7220 * so we can safely collect stats here rather than
163122b7 7221 * inside detach_tasks().
367456c7 7222 */
ae92882e 7223 schedstat_inc(env->sd->lb_gained[env->idle]);
e5673f28 7224 return p;
897c395f 7225 }
e5673f28 7226 return NULL;
897c395f
PZ
7227}
7228
eb95308e
PZ
7229static const unsigned int sched_nr_migrate_break = 32;
7230
5d6523eb 7231/*
163122b7
KT
7232 * detach_tasks() -- tries to detach up to imbalance weighted load from
7233 * busiest_rq, as part of a balancing operation within domain "sd".
5d6523eb 7234 *
163122b7 7235 * Returns number of detached tasks if successful and 0 otherwise.
5d6523eb 7236 */
163122b7 7237static int detach_tasks(struct lb_env *env)
1e3c88bd 7238{
5d6523eb
PZ
7239 struct list_head *tasks = &env->src_rq->cfs_tasks;
7240 struct task_struct *p;
367456c7 7241 unsigned long load;
163122b7
KT
7242 int detached = 0;
7243
7244 lockdep_assert_held(&env->src_rq->lock);
1e3c88bd 7245
bd939f45 7246 if (env->imbalance <= 0)
5d6523eb 7247 return 0;
1e3c88bd 7248
5d6523eb 7249 while (!list_empty(tasks)) {
985d3a4c
YD
7250 /*
7251 * We don't want to steal all, otherwise we may be treated likewise,
7252 * which could at worst lead to a livelock crash.
7253 */
7254 if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1)
7255 break;
7256
93824900 7257 p = list_last_entry(tasks, struct task_struct, se.group_node);
1e3c88bd 7258
367456c7
PZ
7259 env->loop++;
7260 /* We've more or less seen every task there is, call it quits */
5d6523eb 7261 if (env->loop > env->loop_max)
367456c7 7262 break;
5d6523eb
PZ
7263
7264 /* take a breather every nr_migrate tasks */
367456c7 7265 if (env->loop > env->loop_break) {
eb95308e 7266 env->loop_break += sched_nr_migrate_break;
8e45cb54 7267 env->flags |= LBF_NEED_BREAK;
ee00e66f 7268 break;
a195f004 7269 }
1e3c88bd 7270
d3198084 7271 if (!can_migrate_task(p, env))
367456c7
PZ
7272 goto next;
7273
7274 load = task_h_load(p);
5d6523eb 7275
eb95308e 7276 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
367456c7
PZ
7277 goto next;
7278
bd939f45 7279 if ((load / 2) > env->imbalance)
367456c7 7280 goto next;
1e3c88bd 7281
163122b7
KT
7282 detach_task(p, env);
7283 list_add(&p->se.group_node, &env->tasks);
7284
7285 detached++;
bd939f45 7286 env->imbalance -= load;
1e3c88bd
PZ
7287
7288#ifdef CONFIG_PREEMPT
ee00e66f
PZ
7289 /*
7290 * NEWIDLE balancing is a source of latency, so preemptible
163122b7 7291 * kernels will stop after the first task is detached to minimize
ee00e66f
PZ
7292 * the critical section.
7293 */
5d6523eb 7294 if (env->idle == CPU_NEWLY_IDLE)
ee00e66f 7295 break;
1e3c88bd
PZ
7296#endif
7297
ee00e66f
PZ
7298 /*
7299 * We only want to steal up to the prescribed amount of
7300 * weighted load.
7301 */
bd939f45 7302 if (env->imbalance <= 0)
ee00e66f 7303 break;
367456c7
PZ
7304
7305 continue;
7306next:
93824900 7307 list_move(&p->se.group_node, tasks);
1e3c88bd 7308 }
5d6523eb 7309
1e3c88bd 7310 /*
163122b7
KT
7311 * Right now, this is one of only two places we collect this stat
7312 * so we can safely collect detach_one_task() stats here rather
7313 * than inside detach_one_task().
1e3c88bd 7314 */
ae92882e 7315 schedstat_add(env->sd->lb_gained[env->idle], detached);
1e3c88bd 7316
163122b7
KT
7317 return detached;
7318}
7319
7320/*
7321 * attach_task() -- attach the task detached by detach_task() to its new rq.
7322 */
7323static void attach_task(struct rq *rq, struct task_struct *p)
7324{
7325 lockdep_assert_held(&rq->lock);
7326
7327 BUG_ON(task_rq(p) != rq);
5704ac0a 7328 activate_task(rq, p, ENQUEUE_NOCLOCK);
3ea94de1 7329 p->on_rq = TASK_ON_RQ_QUEUED;
163122b7
KT
7330 check_preempt_curr(rq, p, 0);
7331}
7332
7333/*
7334 * attach_one_task() -- attaches the task returned from detach_one_task() to
7335 * its new rq.
7336 */
7337static void attach_one_task(struct rq *rq, struct task_struct *p)
7338{
8a8c69c3
PZ
7339 struct rq_flags rf;
7340
7341 rq_lock(rq, &rf);
5704ac0a 7342 update_rq_clock(rq);
163122b7 7343 attach_task(rq, p);
8a8c69c3 7344 rq_unlock(rq, &rf);
163122b7
KT
7345}
7346
7347/*
7348 * attach_tasks() -- attaches all tasks detached by detach_tasks() to their
7349 * new rq.
7350 */
7351static void attach_tasks(struct lb_env *env)
7352{
7353 struct list_head *tasks = &env->tasks;
7354 struct task_struct *p;
8a8c69c3 7355 struct rq_flags rf;
163122b7 7356
8a8c69c3 7357 rq_lock(env->dst_rq, &rf);
5704ac0a 7358 update_rq_clock(env->dst_rq);
163122b7
KT
7359
7360 while (!list_empty(tasks)) {
7361 p = list_first_entry(tasks, struct task_struct, se.group_node);
7362 list_del_init(&p->se.group_node);
1e3c88bd 7363
163122b7
KT
7364 attach_task(env->dst_rq, p);
7365 }
7366
8a8c69c3 7367 rq_unlock(env->dst_rq, &rf);
1e3c88bd
PZ
7368}
7369
1936c53c
VG
7370static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
7371{
7372 if (cfs_rq->avg.load_avg)
7373 return true;
7374
7375 if (cfs_rq->avg.util_avg)
7376 return true;
7377
7378 return false;
7379}
7380
91c27493 7381static inline bool others_have_blocked(struct rq *rq)
371bf427
VG
7382{
7383 if (READ_ONCE(rq->avg_rt.util_avg))
7384 return true;
7385
3727e0e1
VG
7386 if (READ_ONCE(rq->avg_dl.util_avg))
7387 return true;
7388
11d4afd4 7389#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
91c27493
VG
7390 if (READ_ONCE(rq->avg_irq.util_avg))
7391 return true;
7392#endif
7393
371bf427
VG
7394 return false;
7395}
7396
1936c53c
VG
7397#ifdef CONFIG_FAIR_GROUP_SCHED
7398
a9e7f654
TH
7399static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
7400{
7401 if (cfs_rq->load.weight)
7402 return false;
7403
7404 if (cfs_rq->avg.load_sum)
7405 return false;
7406
7407 if (cfs_rq->avg.util_sum)
7408 return false;
7409
1ea6c46a 7410 if (cfs_rq->avg.runnable_load_sum)
a9e7f654
TH
7411 return false;
7412
7413 return true;
7414}
7415
48a16753 7416static void update_blocked_averages(int cpu)
9e3081ca 7417{
9e3081ca 7418 struct rq *rq = cpu_rq(cpu);
a9e7f654 7419 struct cfs_rq *cfs_rq, *pos;
12b04875 7420 const struct sched_class *curr_class;
8a8c69c3 7421 struct rq_flags rf;
f643ea22 7422 bool done = true;
9e3081ca 7423
8a8c69c3 7424 rq_lock_irqsave(rq, &rf);
48a16753 7425 update_rq_clock(rq);
9d89c257 7426
9763b67f
PZ
7427 /*
7428 * Iterates the task_group tree in a bottom up fashion, see
7429 * list_add_leaf_cfs_rq() for details.
7430 */
a9e7f654 7431 for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) {
bc427898
VG
7432 struct sched_entity *se;
7433
9d89c257
YD
7434 /* throttled entities do not contribute to load */
7435 if (throttled_hierarchy(cfs_rq))
7436 continue;
48a16753 7437
3a123bbb 7438 if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq))
9d89c257 7439 update_tg_load_avg(cfs_rq, 0);
4e516076 7440
bc427898
VG
7441 /* Propagate pending load changes to the parent, if any: */
7442 se = cfs_rq->tg->se[cpu];
7443 if (se && !skip_blocked_update(se))
88c0616e 7444 update_load_avg(cfs_rq_of(se), se, 0);
a9e7f654
TH
7445
7446 /*
7447 * There can be a lot of idle CPU cgroups. Don't let fully
7448 * decayed cfs_rqs linger on the list.
7449 */
7450 if (cfs_rq_is_decayed(cfs_rq))
7451 list_del_leaf_cfs_rq(cfs_rq);
1936c53c
VG
7452
7453 /* Don't need periodic decay once load/util_avg are null */
7454 if (cfs_rq_has_blocked(cfs_rq))
f643ea22 7455 done = false;
9d89c257 7456 }
12b04875
VG
7457
7458 curr_class = rq->curr->sched_class;
7459 update_rt_rq_load_avg(rq_clock_task(rq), rq, curr_class == &rt_sched_class);
7460 update_dl_rq_load_avg(rq_clock_task(rq), rq, curr_class == &dl_sched_class);
91c27493 7461 update_irq_load_avg(rq, 0);
371bf427 7462 /* Don't need periodic decay once load/util_avg are null */
91c27493 7463 if (others_have_blocked(rq))
371bf427 7464 done = false;
e022e0d3
PZ
7465
7466#ifdef CONFIG_NO_HZ_COMMON
7467 rq->last_blocked_load_update_tick = jiffies;
f643ea22
VG
7468 if (done)
7469 rq->has_blocked_load = 0;
e022e0d3 7470#endif
8a8c69c3 7471 rq_unlock_irqrestore(rq, &rf);
9e3081ca
PZ
7472}
7473
9763b67f 7474/*
68520796 7475 * Compute the hierarchical load factor for cfs_rq and all its ascendants.
9763b67f
PZ
7476 * This needs to be done in a top-down fashion because the load of a child
7477 * group is a fraction of its parents load.
7478 */
68520796 7479static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
9763b67f 7480{
68520796
VD
7481 struct rq *rq = rq_of(cfs_rq);
7482 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
a35b6466 7483 unsigned long now = jiffies;
68520796 7484 unsigned long load;
a35b6466 7485
68520796 7486 if (cfs_rq->last_h_load_update == now)
a35b6466
PZ
7487 return;
7488
68520796
VD
7489 cfs_rq->h_load_next = NULL;
7490 for_each_sched_entity(se) {
7491 cfs_rq = cfs_rq_of(se);
7492 cfs_rq->h_load_next = se;
7493 if (cfs_rq->last_h_load_update == now)
7494 break;
7495 }
a35b6466 7496
68520796 7497 if (!se) {
7ea241af 7498 cfs_rq->h_load = cfs_rq_load_avg(cfs_rq);
68520796
VD
7499 cfs_rq->last_h_load_update = now;
7500 }
7501
7502 while ((se = cfs_rq->h_load_next) != NULL) {
7503 load = cfs_rq->h_load;
7ea241af
YD
7504 load = div64_ul(load * se->avg.load_avg,
7505 cfs_rq_load_avg(cfs_rq) + 1);
68520796
VD
7506 cfs_rq = group_cfs_rq(se);
7507 cfs_rq->h_load = load;
7508 cfs_rq->last_h_load_update = now;
7509 }
9763b67f
PZ
7510}
7511
367456c7 7512static unsigned long task_h_load(struct task_struct *p)
230059de 7513{
367456c7 7514 struct cfs_rq *cfs_rq = task_cfs_rq(p);
230059de 7515
68520796 7516 update_cfs_rq_h_load(cfs_rq);
9d89c257 7517 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load,
7ea241af 7518 cfs_rq_load_avg(cfs_rq) + 1);
230059de
PZ
7519}
7520#else
48a16753 7521static inline void update_blocked_averages(int cpu)
9e3081ca 7522{
6c1d47c0
VG
7523 struct rq *rq = cpu_rq(cpu);
7524 struct cfs_rq *cfs_rq = &rq->cfs;
12b04875 7525 const struct sched_class *curr_class;
8a8c69c3 7526 struct rq_flags rf;
6c1d47c0 7527
8a8c69c3 7528 rq_lock_irqsave(rq, &rf);
6c1d47c0 7529 update_rq_clock(rq);
3a123bbb 7530 update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
12b04875
VG
7531
7532 curr_class = rq->curr->sched_class;
7533 update_rt_rq_load_avg(rq_clock_task(rq), rq, curr_class == &rt_sched_class);
7534 update_dl_rq_load_avg(rq_clock_task(rq), rq, curr_class == &dl_sched_class);
91c27493 7535 update_irq_load_avg(rq, 0);
e022e0d3
PZ
7536#ifdef CONFIG_NO_HZ_COMMON
7537 rq->last_blocked_load_update_tick = jiffies;
91c27493 7538 if (!cfs_rq_has_blocked(cfs_rq) && !others_have_blocked(rq))
f643ea22 7539 rq->has_blocked_load = 0;
e022e0d3 7540#endif
8a8c69c3 7541 rq_unlock_irqrestore(rq, &rf);
9e3081ca
PZ
7542}
7543
367456c7 7544static unsigned long task_h_load(struct task_struct *p)
1e3c88bd 7545{
9d89c257 7546 return p->se.avg.load_avg;
1e3c88bd 7547}
230059de 7548#endif
1e3c88bd 7549
1e3c88bd 7550/********** Helpers for find_busiest_group ************************/
caeb178c 7551
1e3c88bd
PZ
7552/*
7553 * sg_lb_stats - stats of a sched_group required for load_balancing
7554 */
7555struct sg_lb_stats {
7556 unsigned long avg_load; /*Avg load across the CPUs of the group */
7557 unsigned long group_load; /* Total load over the CPUs of the group */
1e3c88bd 7558 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
56cf515b 7559 unsigned long load_per_task;
63b2ca30 7560 unsigned long group_capacity;
9e91d61d 7561 unsigned long group_util; /* Total utilization of the group */
147c5fc2 7562 unsigned int sum_nr_running; /* Nr tasks running in the group */
147c5fc2
PZ
7563 unsigned int idle_cpus;
7564 unsigned int group_weight;
caeb178c 7565 enum group_type group_type;
ea67821b 7566 int group_no_capacity;
3b1baa64 7567 unsigned long group_misfit_task_load; /* A CPU has a task too big for its capacity */
0ec8aa00
PZ
7568#ifdef CONFIG_NUMA_BALANCING
7569 unsigned int nr_numa_running;
7570 unsigned int nr_preferred_running;
7571#endif
1e3c88bd
PZ
7572};
7573
56cf515b
JK
7574/*
7575 * sd_lb_stats - Structure to store the statistics of a sched_domain
7576 * during load balancing.
7577 */
7578struct sd_lb_stats {
7579 struct sched_group *busiest; /* Busiest group in this sd */
7580 struct sched_group *local; /* Local group in this sd */
90001d67 7581 unsigned long total_running;
56cf515b 7582 unsigned long total_load; /* Total load of all groups in sd */
63b2ca30 7583 unsigned long total_capacity; /* Total capacity of all groups in sd */
56cf515b
JK
7584 unsigned long avg_load; /* Average load across all groups in sd */
7585
56cf515b 7586 struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
147c5fc2 7587 struct sg_lb_stats local_stat; /* Statistics of the local group */
56cf515b
JK
7588};
7589
147c5fc2
PZ
7590static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
7591{
7592 /*
7593 * Skimp on the clearing to avoid duplicate work. We can avoid clearing
7594 * local_stat because update_sg_lb_stats() does a full clear/assignment.
7595 * We must however clear busiest_stat::avg_load because
7596 * update_sd_pick_busiest() reads this before assignment.
7597 */
7598 *sds = (struct sd_lb_stats){
7599 .busiest = NULL,
7600 .local = NULL,
90001d67 7601 .total_running = 0UL,
147c5fc2 7602 .total_load = 0UL,
63b2ca30 7603 .total_capacity = 0UL,
147c5fc2
PZ
7604 .busiest_stat = {
7605 .avg_load = 0UL,
caeb178c
RR
7606 .sum_nr_running = 0,
7607 .group_type = group_other,
147c5fc2
PZ
7608 },
7609 };
7610}
7611
1e3c88bd
PZ
7612/**
7613 * get_sd_load_idx - Obtain the load index for a given sched domain.
7614 * @sd: The sched_domain whose load_idx is to be obtained.
ed1b7732 7615 * @idle: The idle status of the CPU for whose sd load_idx is obtained.
e69f6186
YB
7616 *
7617 * Return: The load index.
1e3c88bd
PZ
7618 */
7619static inline int get_sd_load_idx(struct sched_domain *sd,
7620 enum cpu_idle_type idle)
7621{
7622 int load_idx;
7623
7624 switch (idle) {
7625 case CPU_NOT_IDLE:
7626 load_idx = sd->busy_idx;
7627 break;
7628
7629 case CPU_NEWLY_IDLE:
7630 load_idx = sd->newidle_idx;
7631 break;
7632 default:
7633 load_idx = sd->idle_idx;
7634 break;
7635 }
7636
7637 return load_idx;
7638}
7639
287cdaac 7640static unsigned long scale_rt_capacity(struct sched_domain *sd, int cpu)
1e3c88bd
PZ
7641{
7642 struct rq *rq = cpu_rq(cpu);
287cdaac 7643 unsigned long max = arch_scale_cpu_capacity(sd, cpu);
523e979d 7644 unsigned long used, free;
523e979d 7645 unsigned long irq;
b654f7de 7646
2e62c474 7647 irq = cpu_util_irq(rq);
cadefd3d 7648
523e979d
VG
7649 if (unlikely(irq >= max))
7650 return 1;
aa483808 7651
523e979d
VG
7652 used = READ_ONCE(rq->avg_rt.util_avg);
7653 used += READ_ONCE(rq->avg_dl.util_avg);
1e3c88bd 7654
523e979d
VG
7655 if (unlikely(used >= max))
7656 return 1;
1e3c88bd 7657
523e979d 7658 free = max - used;
2e62c474
VG
7659
7660 return scale_irq_capacity(free, irq, max);
1e3c88bd
PZ
7661}
7662
ced549fa 7663static void update_cpu_capacity(struct sched_domain *sd, int cpu)
1e3c88bd 7664{
287cdaac 7665 unsigned long capacity = scale_rt_capacity(sd, cpu);
1e3c88bd
PZ
7666 struct sched_group *sdg = sd->groups;
7667
523e979d 7668 cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(sd, cpu);
1e3c88bd 7669
ced549fa
NP
7670 if (!capacity)
7671 capacity = 1;
1e3c88bd 7672
ced549fa
NP
7673 cpu_rq(cpu)->cpu_capacity = capacity;
7674 sdg->sgc->capacity = capacity;
bf475ce0 7675 sdg->sgc->min_capacity = capacity;
e3d6d0cb 7676 sdg->sgc->max_capacity = capacity;
1e3c88bd
PZ
7677}
7678
63b2ca30 7679void update_group_capacity(struct sched_domain *sd, int cpu)
1e3c88bd
PZ
7680{
7681 struct sched_domain *child = sd->child;
7682 struct sched_group *group, *sdg = sd->groups;
e3d6d0cb 7683 unsigned long capacity, min_capacity, max_capacity;
4ec4412e
VG
7684 unsigned long interval;
7685
7686 interval = msecs_to_jiffies(sd->balance_interval);
7687 interval = clamp(interval, 1UL, max_load_balance_interval);
63b2ca30 7688 sdg->sgc->next_update = jiffies + interval;
1e3c88bd
PZ
7689
7690 if (!child) {
ced549fa 7691 update_cpu_capacity(sd, cpu);
1e3c88bd
PZ
7692 return;
7693 }
7694
dc7ff76e 7695 capacity = 0;
bf475ce0 7696 min_capacity = ULONG_MAX;
e3d6d0cb 7697 max_capacity = 0;
1e3c88bd 7698
74a5ce20
PZ
7699 if (child->flags & SD_OVERLAP) {
7700 /*
7701 * SD_OVERLAP domains cannot assume that child groups
7702 * span the current group.
7703 */
7704
ae4df9d6 7705 for_each_cpu(cpu, sched_group_span(sdg)) {
63b2ca30 7706 struct sched_group_capacity *sgc;
9abf24d4 7707 struct rq *rq = cpu_rq(cpu);
863bffc8 7708
9abf24d4 7709 /*
63b2ca30 7710 * build_sched_domains() -> init_sched_groups_capacity()
9abf24d4
SD
7711 * gets here before we've attached the domains to the
7712 * runqueues.
7713 *
ced549fa
NP
7714 * Use capacity_of(), which is set irrespective of domains
7715 * in update_cpu_capacity().
9abf24d4 7716 *
dc7ff76e 7717 * This avoids capacity from being 0 and
9abf24d4 7718 * causing divide-by-zero issues on boot.
9abf24d4
SD
7719 */
7720 if (unlikely(!rq->sd)) {
ced549fa 7721 capacity += capacity_of(cpu);
bf475ce0
MR
7722 } else {
7723 sgc = rq->sd->groups->sgc;
7724 capacity += sgc->capacity;
9abf24d4 7725 }
863bffc8 7726
bf475ce0 7727 min_capacity = min(capacity, min_capacity);
e3d6d0cb 7728 max_capacity = max(capacity, max_capacity);
863bffc8 7729 }
74a5ce20
PZ
7730 } else {
7731 /*
7732 * !SD_OVERLAP domains can assume that child groups
7733 * span the current group.
97a7142f 7734 */
74a5ce20
PZ
7735
7736 group = child->groups;
7737 do {
bf475ce0
MR
7738 struct sched_group_capacity *sgc = group->sgc;
7739
7740 capacity += sgc->capacity;
7741 min_capacity = min(sgc->min_capacity, min_capacity);
e3d6d0cb 7742 max_capacity = max(sgc->max_capacity, max_capacity);
74a5ce20
PZ
7743 group = group->next;
7744 } while (group != child->groups);
7745 }
1e3c88bd 7746
63b2ca30 7747 sdg->sgc->capacity = capacity;
bf475ce0 7748 sdg->sgc->min_capacity = min_capacity;
e3d6d0cb 7749 sdg->sgc->max_capacity = max_capacity;
1e3c88bd
PZ
7750}
7751
9d5efe05 7752/*
ea67821b
VG
7753 * Check whether the capacity of the rq has been noticeably reduced by side
7754 * activity. The imbalance_pct is used for the threshold.
7755 * Return true is the capacity is reduced
9d5efe05
SV
7756 */
7757static inline int
ea67821b 7758check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
9d5efe05 7759{
ea67821b
VG
7760 return ((rq->cpu_capacity * sd->imbalance_pct) <
7761 (rq->cpu_capacity_orig * 100));
9d5efe05
SV
7762}
7763
30ce5dab
PZ
7764/*
7765 * Group imbalance indicates (and tries to solve) the problem where balancing
0c98d344 7766 * groups is inadequate due to ->cpus_allowed constraints.
30ce5dab 7767 *
97fb7a0a
IM
7768 * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a
7769 * cpumask covering 1 CPU of the first group and 3 CPUs of the second group.
30ce5dab
PZ
7770 * Something like:
7771 *
2b4d5b25
IM
7772 * { 0 1 2 3 } { 4 5 6 7 }
7773 * * * * *
30ce5dab
PZ
7774 *
7775 * If we were to balance group-wise we'd place two tasks in the first group and
7776 * two tasks in the second group. Clearly this is undesired as it will overload
97fb7a0a 7777 * cpu 3 and leave one of the CPUs in the second group unused.
30ce5dab
PZ
7778 *
7779 * The current solution to this issue is detecting the skew in the first group
6263322c
PZ
7780 * by noticing the lower domain failed to reach balance and had difficulty
7781 * moving tasks due to affinity constraints.
30ce5dab
PZ
7782 *
7783 * When this is so detected; this group becomes a candidate for busiest; see
ed1b7732 7784 * update_sd_pick_busiest(). And calculate_imbalance() and
6263322c 7785 * find_busiest_group() avoid some of the usual balance conditions to allow it
30ce5dab
PZ
7786 * to create an effective group imbalance.
7787 *
7788 * This is a somewhat tricky proposition since the next run might not find the
7789 * group imbalance and decide the groups need to be balanced again. A most
7790 * subtle and fragile situation.
7791 */
7792
6263322c 7793static inline int sg_imbalanced(struct sched_group *group)
30ce5dab 7794{
63b2ca30 7795 return group->sgc->imbalance;
30ce5dab
PZ
7796}
7797
b37d9316 7798/*
ea67821b
VG
7799 * group_has_capacity returns true if the group has spare capacity that could
7800 * be used by some tasks.
7801 * We consider that a group has spare capacity if the * number of task is
9e91d61d
DE
7802 * smaller than the number of CPUs or if the utilization is lower than the
7803 * available capacity for CFS tasks.
ea67821b
VG
7804 * For the latter, we use a threshold to stabilize the state, to take into
7805 * account the variance of the tasks' load and to return true if the available
7806 * capacity in meaningful for the load balancer.
7807 * As an example, an available capacity of 1% can appear but it doesn't make
7808 * any benefit for the load balance.
b37d9316 7809 */
ea67821b
VG
7810static inline bool
7811group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs)
b37d9316 7812{
ea67821b
VG
7813 if (sgs->sum_nr_running < sgs->group_weight)
7814 return true;
c61037e9 7815
ea67821b 7816 if ((sgs->group_capacity * 100) >
9e91d61d 7817 (sgs->group_util * env->sd->imbalance_pct))
ea67821b 7818 return true;
b37d9316 7819
ea67821b
VG
7820 return false;
7821}
7822
7823/*
7824 * group_is_overloaded returns true if the group has more tasks than it can
7825 * handle.
7826 * group_is_overloaded is not equals to !group_has_capacity because a group
7827 * with the exact right number of tasks, has no more spare capacity but is not
7828 * overloaded so both group_has_capacity and group_is_overloaded return
7829 * false.
7830 */
7831static inline bool
7832group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs)
7833{
7834 if (sgs->sum_nr_running <= sgs->group_weight)
7835 return false;
b37d9316 7836
ea67821b 7837 if ((sgs->group_capacity * 100) <
9e91d61d 7838 (sgs->group_util * env->sd->imbalance_pct))
ea67821b 7839 return true;
b37d9316 7840
ea67821b 7841 return false;
b37d9316
PZ
7842}
7843
9e0994c0 7844/*
e3d6d0cb 7845 * group_smaller_min_cpu_capacity: Returns true if sched_group sg has smaller
9e0994c0
MR
7846 * per-CPU capacity than sched_group ref.
7847 */
7848static inline bool
e3d6d0cb 7849group_smaller_min_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
9e0994c0
MR
7850{
7851 return sg->sgc->min_capacity * capacity_margin <
7852 ref->sgc->min_capacity * 1024;
7853}
7854
e3d6d0cb
MR
7855/*
7856 * group_smaller_max_cpu_capacity: Returns true if sched_group sg has smaller
7857 * per-CPU capacity_orig than sched_group ref.
7858 */
7859static inline bool
7860group_smaller_max_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
7861{
7862 return sg->sgc->max_capacity * capacity_margin <
7863 ref->sgc->max_capacity * 1024;
7864}
7865
79a89f92
LY
7866static inline enum
7867group_type group_classify(struct sched_group *group,
7868 struct sg_lb_stats *sgs)
caeb178c 7869{
ea67821b 7870 if (sgs->group_no_capacity)
caeb178c
RR
7871 return group_overloaded;
7872
7873 if (sg_imbalanced(group))
7874 return group_imbalanced;
7875
3b1baa64
MR
7876 if (sgs->group_misfit_task_load)
7877 return group_misfit_task;
7878
caeb178c
RR
7879 return group_other;
7880}
7881
63928384 7882static bool update_nohz_stats(struct rq *rq, bool force)
e022e0d3
PZ
7883{
7884#ifdef CONFIG_NO_HZ_COMMON
7885 unsigned int cpu = rq->cpu;
7886
f643ea22
VG
7887 if (!rq->has_blocked_load)
7888 return false;
7889
e022e0d3 7890 if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
f643ea22 7891 return false;
e022e0d3 7892
63928384 7893 if (!force && !time_after(jiffies, rq->last_blocked_load_update_tick))
f643ea22 7894 return true;
e022e0d3
PZ
7895
7896 update_blocked_averages(cpu);
f643ea22
VG
7897
7898 return rq->has_blocked_load;
7899#else
7900 return false;
e022e0d3
PZ
7901#endif
7902}
7903
1e3c88bd
PZ
7904/**
7905 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
cd96891d 7906 * @env: The load balancing environment.
1e3c88bd 7907 * @group: sched_group whose statistics are to be updated.
1e3c88bd 7908 * @load_idx: Load index of sched_domain of this_cpu for load calc.
1e3c88bd 7909 * @local_group: Does group contain this_cpu.
1e3c88bd 7910 * @sgs: variable to hold the statistics for this group.
757ffdd7 7911 * @overload: Indicate pullable load (e.g. >1 runnable task).
1e3c88bd 7912 */
bd939f45
PZ
7913static inline void update_sg_lb_stats(struct lb_env *env,
7914 struct sched_group *group, int load_idx,
4486edd1
TC
7915 int local_group, struct sg_lb_stats *sgs,
7916 bool *overload)
1e3c88bd 7917{
30ce5dab 7918 unsigned long load;
a426f99c 7919 int i, nr_running;
1e3c88bd 7920
b72ff13c
PZ
7921 memset(sgs, 0, sizeof(*sgs));
7922
ae4df9d6 7923 for_each_cpu_and(i, sched_group_span(group), env->cpus) {
1e3c88bd
PZ
7924 struct rq *rq = cpu_rq(i);
7925
63928384 7926 if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq, false))
f643ea22 7927 env->flags |= LBF_NOHZ_AGAIN;
e022e0d3 7928
97fb7a0a 7929 /* Bias balancing toward CPUs of our domain: */
6263322c 7930 if (local_group)
04f733b4 7931 load = target_load(i, load_idx);
6263322c 7932 else
1e3c88bd 7933 load = source_load(i, load_idx);
1e3c88bd
PZ
7934
7935 sgs->group_load += load;
9e91d61d 7936 sgs->group_util += cpu_util(i);
65fdac08 7937 sgs->sum_nr_running += rq->cfs.h_nr_running;
4486edd1 7938
a426f99c
WL
7939 nr_running = rq->nr_running;
7940 if (nr_running > 1)
4486edd1
TC
7941 *overload = true;
7942
0ec8aa00
PZ
7943#ifdef CONFIG_NUMA_BALANCING
7944 sgs->nr_numa_running += rq->nr_numa_running;
7945 sgs->nr_preferred_running += rq->nr_preferred_running;
7946#endif
c7132dd6 7947 sgs->sum_weighted_load += weighted_cpuload(rq);
a426f99c
WL
7948 /*
7949 * No need to call idle_cpu() if nr_running is not 0
7950 */
7951 if (!nr_running && idle_cpu(i))
aae6d3dd 7952 sgs->idle_cpus++;
3b1baa64
MR
7953
7954 if (env->sd->flags & SD_ASYM_CPUCAPACITY &&
757ffdd7 7955 sgs->group_misfit_task_load < rq->misfit_task_load) {
3b1baa64 7956 sgs->group_misfit_task_load = rq->misfit_task_load;
757ffdd7
VS
7957 *overload = 1;
7958 }
1e3c88bd
PZ
7959 }
7960
63b2ca30
NP
7961 /* Adjust by relative CPU capacity of the group */
7962 sgs->group_capacity = group->sgc->capacity;
ca8ce3d0 7963 sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity;
1e3c88bd 7964
dd5feea1 7965 if (sgs->sum_nr_running)
38d0f770 7966 sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
1e3c88bd 7967
aae6d3dd 7968 sgs->group_weight = group->group_weight;
b37d9316 7969
ea67821b 7970 sgs->group_no_capacity = group_is_overloaded(env, sgs);
79a89f92 7971 sgs->group_type = group_classify(group, sgs);
1e3c88bd
PZ
7972}
7973
532cb4c4
MN
7974/**
7975 * update_sd_pick_busiest - return 1 on busiest group
cd96891d 7976 * @env: The load balancing environment.
532cb4c4
MN
7977 * @sds: sched_domain statistics
7978 * @sg: sched_group candidate to be checked for being the busiest
b6b12294 7979 * @sgs: sched_group statistics
532cb4c4
MN
7980 *
7981 * Determine if @sg is a busier group than the previously selected
7982 * busiest group.
e69f6186
YB
7983 *
7984 * Return: %true if @sg is a busier group than the previously selected
7985 * busiest group. %false otherwise.
532cb4c4 7986 */
bd939f45 7987static bool update_sd_pick_busiest(struct lb_env *env,
532cb4c4
MN
7988 struct sd_lb_stats *sds,
7989 struct sched_group *sg,
bd939f45 7990 struct sg_lb_stats *sgs)
532cb4c4 7991{
caeb178c 7992 struct sg_lb_stats *busiest = &sds->busiest_stat;
532cb4c4 7993
cad68e55
MR
7994 /*
7995 * Don't try to pull misfit tasks we can't help.
7996 * We can use max_capacity here as reduction in capacity on some
7997 * CPUs in the group should either be possible to resolve
7998 * internally or be covered by avg_load imbalance (eventually).
7999 */
8000 if (sgs->group_type == group_misfit_task &&
8001 (!group_smaller_max_cpu_capacity(sg, sds->local) ||
8002 !group_has_capacity(env, &sds->local_stat)))
8003 return false;
8004
caeb178c 8005 if (sgs->group_type > busiest->group_type)
532cb4c4
MN
8006 return true;
8007
caeb178c
RR
8008 if (sgs->group_type < busiest->group_type)
8009 return false;
8010
8011 if (sgs->avg_load <= busiest->avg_load)
8012 return false;
8013
9e0994c0
MR
8014 if (!(env->sd->flags & SD_ASYM_CPUCAPACITY))
8015 goto asym_packing;
8016
8017 /*
8018 * Candidate sg has no more than one task per CPU and
8019 * has higher per-CPU capacity. Migrating tasks to less
8020 * capable CPUs may harm throughput. Maximize throughput,
8021 * power/energy consequences are not considered.
8022 */
8023 if (sgs->sum_nr_running <= sgs->group_weight &&
e3d6d0cb 8024 group_smaller_min_cpu_capacity(sds->local, sg))
9e0994c0
MR
8025 return false;
8026
cad68e55
MR
8027 /*
8028 * If we have more than one misfit sg go with the biggest misfit.
8029 */
8030 if (sgs->group_type == group_misfit_task &&
8031 sgs->group_misfit_task_load < busiest->group_misfit_task_load)
9e0994c0
MR
8032 return false;
8033
8034asym_packing:
caeb178c
RR
8035 /* This is the busiest node in its class. */
8036 if (!(env->sd->flags & SD_ASYM_PACKING))
532cb4c4
MN
8037 return true;
8038
97fb7a0a 8039 /* No ASYM_PACKING if target CPU is already busy */
1f621e02
SD
8040 if (env->idle == CPU_NOT_IDLE)
8041 return true;
532cb4c4 8042 /*
afe06efd
TC
8043 * ASYM_PACKING needs to move all the work to the highest
8044 * prority CPUs in the group, therefore mark all groups
8045 * of lower priority than ourself as busy.
532cb4c4 8046 */
afe06efd
TC
8047 if (sgs->sum_nr_running &&
8048 sched_asym_prefer(env->dst_cpu, sg->asym_prefer_cpu)) {
532cb4c4
MN
8049 if (!sds->busiest)
8050 return true;
8051
97fb7a0a 8052 /* Prefer to move from lowest priority CPU's work */
afe06efd
TC
8053 if (sched_asym_prefer(sds->busiest->asym_prefer_cpu,
8054 sg->asym_prefer_cpu))
532cb4c4
MN
8055 return true;
8056 }
8057
8058 return false;
8059}
8060
0ec8aa00
PZ
8061#ifdef CONFIG_NUMA_BALANCING
8062static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
8063{
8064 if (sgs->sum_nr_running > sgs->nr_numa_running)
8065 return regular;
8066 if (sgs->sum_nr_running > sgs->nr_preferred_running)
8067 return remote;
8068 return all;
8069}
8070
8071static inline enum fbq_type fbq_classify_rq(struct rq *rq)
8072{
8073 if (rq->nr_running > rq->nr_numa_running)
8074 return regular;
8075 if (rq->nr_running > rq->nr_preferred_running)
8076 return remote;
8077 return all;
8078}
8079#else
8080static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
8081{
8082 return all;
8083}
8084
8085static inline enum fbq_type fbq_classify_rq(struct rq *rq)
8086{
8087 return regular;
8088}
8089#endif /* CONFIG_NUMA_BALANCING */
8090
1e3c88bd 8091/**
461819ac 8092 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
cd96891d 8093 * @env: The load balancing environment.
1e3c88bd
PZ
8094 * @sds: variable to hold the statistics for this sched_domain.
8095 */
0ec8aa00 8096static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
1e3c88bd 8097{
bd939f45
PZ
8098 struct sched_domain *child = env->sd->child;
8099 struct sched_group *sg = env->sd->groups;
05b40e05 8100 struct sg_lb_stats *local = &sds->local_stat;
56cf515b 8101 struct sg_lb_stats tmp_sgs;
dbbad719 8102 int load_idx;
4486edd1 8103 bool overload = false;
dbbad719 8104 bool prefer_sibling = child && child->flags & SD_PREFER_SIBLING;
1e3c88bd 8105
e022e0d3 8106#ifdef CONFIG_NO_HZ_COMMON
f643ea22 8107 if (env->idle == CPU_NEWLY_IDLE && READ_ONCE(nohz.has_blocked))
e022e0d3 8108 env->flags |= LBF_NOHZ_STATS;
e022e0d3
PZ
8109#endif
8110
bd939f45 8111 load_idx = get_sd_load_idx(env->sd, env->idle);
1e3c88bd
PZ
8112
8113 do {
56cf515b 8114 struct sg_lb_stats *sgs = &tmp_sgs;
1e3c88bd
PZ
8115 int local_group;
8116
ae4df9d6 8117 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg));
56cf515b
JK
8118 if (local_group) {
8119 sds->local = sg;
05b40e05 8120 sgs = local;
b72ff13c
PZ
8121
8122 if (env->idle != CPU_NEWLY_IDLE ||
63b2ca30
NP
8123 time_after_eq(jiffies, sg->sgc->next_update))
8124 update_group_capacity(env->sd, env->dst_cpu);
56cf515b 8125 }
1e3c88bd 8126
4486edd1
TC
8127 update_sg_lb_stats(env, sg, load_idx, local_group, sgs,
8128 &overload);
1e3c88bd 8129
b72ff13c
PZ
8130 if (local_group)
8131 goto next_group;
8132
1e3c88bd
PZ
8133 /*
8134 * In case the child domain prefers tasks go to siblings
ea67821b 8135 * first, lower the sg capacity so that we'll try
75dd321d
NR
8136 * and move all the excess tasks away. We lower the capacity
8137 * of a group only if the local group has the capacity to fit
ea67821b
VG
8138 * these excess tasks. The extra check prevents the case where
8139 * you always pull from the heaviest group when it is already
8140 * under-utilized (possible with a large weight task outweighs
8141 * the tasks on the system).
1e3c88bd 8142 */
b72ff13c 8143 if (prefer_sibling && sds->local &&
05b40e05
SD
8144 group_has_capacity(env, local) &&
8145 (sgs->sum_nr_running > local->sum_nr_running + 1)) {
ea67821b 8146 sgs->group_no_capacity = 1;
79a89f92 8147 sgs->group_type = group_classify(sg, sgs);
cb0b9f24 8148 }
1e3c88bd 8149
b72ff13c 8150 if (update_sd_pick_busiest(env, sds, sg, sgs)) {
532cb4c4 8151 sds->busiest = sg;
56cf515b 8152 sds->busiest_stat = *sgs;
1e3c88bd
PZ
8153 }
8154
b72ff13c
PZ
8155next_group:
8156 /* Now, start updating sd_lb_stats */
90001d67 8157 sds->total_running += sgs->sum_nr_running;
b72ff13c 8158 sds->total_load += sgs->group_load;
63b2ca30 8159 sds->total_capacity += sgs->group_capacity;
b72ff13c 8160
532cb4c4 8161 sg = sg->next;
bd939f45 8162 } while (sg != env->sd->groups);
0ec8aa00 8163
f643ea22
VG
8164#ifdef CONFIG_NO_HZ_COMMON
8165 if ((env->flags & LBF_NOHZ_AGAIN) &&
8166 cpumask_subset(nohz.idle_cpus_mask, sched_domain_span(env->sd))) {
8167
8168 WRITE_ONCE(nohz.next_blocked,
8169 jiffies + msecs_to_jiffies(LOAD_AVG_PERIOD));
8170 }
8171#endif
8172
0ec8aa00
PZ
8173 if (env->sd->flags & SD_NUMA)
8174 env->fbq_type = fbq_classify_group(&sds->busiest_stat);
4486edd1
TC
8175
8176 if (!env->sd->parent) {
8177 /* update overload indicator if we are at root domain */
e90c8fe1
VS
8178 if (READ_ONCE(env->dst_rq->rd->overload) != overload)
8179 WRITE_ONCE(env->dst_rq->rd->overload, overload);
4486edd1 8180 }
532cb4c4
MN
8181}
8182
532cb4c4
MN
8183/**
8184 * check_asym_packing - Check to see if the group is packed into the
0ba42a59 8185 * sched domain.
532cb4c4
MN
8186 *
8187 * This is primarily intended to used at the sibling level. Some
8188 * cores like POWER7 prefer to use lower numbered SMT threads. In the
8189 * case of POWER7, it can move to lower SMT modes only when higher
8190 * threads are idle. When in lower SMT modes, the threads will
8191 * perform better since they share less core resources. Hence when we
8192 * have idle threads, we want them to be the higher ones.
8193 *
8194 * This packing function is run on idle threads. It checks to see if
8195 * the busiest CPU in this domain (core in the P7 case) has a higher
8196 * CPU number than the packing function is being run on. Here we are
8197 * assuming lower CPU number will be equivalent to lower a SMT thread
8198 * number.
8199 *
e69f6186 8200 * Return: 1 when packing is required and a task should be moved to
46123355 8201 * this CPU. The amount of the imbalance is returned in env->imbalance.
b6b12294 8202 *
cd96891d 8203 * @env: The load balancing environment.
532cb4c4 8204 * @sds: Statistics of the sched_domain which is to be packed
532cb4c4 8205 */
bd939f45 8206static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
532cb4c4
MN
8207{
8208 int busiest_cpu;
8209
bd939f45 8210 if (!(env->sd->flags & SD_ASYM_PACKING))
532cb4c4
MN
8211 return 0;
8212
1f621e02
SD
8213 if (env->idle == CPU_NOT_IDLE)
8214 return 0;
8215
532cb4c4
MN
8216 if (!sds->busiest)
8217 return 0;
8218
afe06efd
TC
8219 busiest_cpu = sds->busiest->asym_prefer_cpu;
8220 if (sched_asym_prefer(busiest_cpu, env->dst_cpu))
532cb4c4
MN
8221 return 0;
8222
bd939f45 8223 env->imbalance = DIV_ROUND_CLOSEST(
63b2ca30 8224 sds->busiest_stat.avg_load * sds->busiest_stat.group_capacity,
ca8ce3d0 8225 SCHED_CAPACITY_SCALE);
bd939f45 8226
532cb4c4 8227 return 1;
1e3c88bd
PZ
8228}
8229
8230/**
8231 * fix_small_imbalance - Calculate the minor imbalance that exists
8232 * amongst the groups of a sched_domain, during
8233 * load balancing.
cd96891d 8234 * @env: The load balancing environment.
1e3c88bd 8235 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
1e3c88bd 8236 */
bd939f45
PZ
8237static inline
8238void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
1e3c88bd 8239{
63b2ca30 8240 unsigned long tmp, capa_now = 0, capa_move = 0;
1e3c88bd 8241 unsigned int imbn = 2;
dd5feea1 8242 unsigned long scaled_busy_load_per_task;
56cf515b 8243 struct sg_lb_stats *local, *busiest;
1e3c88bd 8244
56cf515b
JK
8245 local = &sds->local_stat;
8246 busiest = &sds->busiest_stat;
1e3c88bd 8247
56cf515b
JK
8248 if (!local->sum_nr_running)
8249 local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
8250 else if (busiest->load_per_task > local->load_per_task)
8251 imbn = 1;
dd5feea1 8252
56cf515b 8253 scaled_busy_load_per_task =
ca8ce3d0 8254 (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
63b2ca30 8255 busiest->group_capacity;
56cf515b 8256
3029ede3
VD
8257 if (busiest->avg_load + scaled_busy_load_per_task >=
8258 local->avg_load + (scaled_busy_load_per_task * imbn)) {
56cf515b 8259 env->imbalance = busiest->load_per_task;
1e3c88bd
PZ
8260 return;
8261 }
8262
8263 /*
8264 * OK, we don't have enough imbalance to justify moving tasks,
ced549fa 8265 * however we may be able to increase total CPU capacity used by
1e3c88bd
PZ
8266 * moving them.
8267 */
8268
63b2ca30 8269 capa_now += busiest->group_capacity *
56cf515b 8270 min(busiest->load_per_task, busiest->avg_load);
63b2ca30 8271 capa_now += local->group_capacity *
56cf515b 8272 min(local->load_per_task, local->avg_load);
ca8ce3d0 8273 capa_now /= SCHED_CAPACITY_SCALE;
1e3c88bd
PZ
8274
8275 /* Amount of load we'd subtract */
a2cd4260 8276 if (busiest->avg_load > scaled_busy_load_per_task) {
63b2ca30 8277 capa_move += busiest->group_capacity *
56cf515b 8278 min(busiest->load_per_task,
a2cd4260 8279 busiest->avg_load - scaled_busy_load_per_task);
56cf515b 8280 }
1e3c88bd
PZ
8281
8282 /* Amount of load we'd add */
63b2ca30 8283 if (busiest->avg_load * busiest->group_capacity <
ca8ce3d0 8284 busiest->load_per_task * SCHED_CAPACITY_SCALE) {
63b2ca30
NP
8285 tmp = (busiest->avg_load * busiest->group_capacity) /
8286 local->group_capacity;
56cf515b 8287 } else {
ca8ce3d0 8288 tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
63b2ca30 8289 local->group_capacity;
56cf515b 8290 }
63b2ca30 8291 capa_move += local->group_capacity *
3ae11c90 8292 min(local->load_per_task, local->avg_load + tmp);
ca8ce3d0 8293 capa_move /= SCHED_CAPACITY_SCALE;
1e3c88bd
PZ
8294
8295 /* Move if we gain throughput */
63b2ca30 8296 if (capa_move > capa_now)
56cf515b 8297 env->imbalance = busiest->load_per_task;
1e3c88bd
PZ
8298}
8299
8300/**
8301 * calculate_imbalance - Calculate the amount of imbalance present within the
8302 * groups of a given sched_domain during load balance.
bd939f45 8303 * @env: load balance environment
1e3c88bd 8304 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
1e3c88bd 8305 */
bd939f45 8306static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
1e3c88bd 8307{
dd5feea1 8308 unsigned long max_pull, load_above_capacity = ~0UL;
56cf515b
JK
8309 struct sg_lb_stats *local, *busiest;
8310
8311 local = &sds->local_stat;
56cf515b 8312 busiest = &sds->busiest_stat;
dd5feea1 8313
caeb178c 8314 if (busiest->group_type == group_imbalanced) {
30ce5dab
PZ
8315 /*
8316 * In the group_imb case we cannot rely on group-wide averages
97fb7a0a 8317 * to ensure CPU-load equilibrium, look at wider averages. XXX
30ce5dab 8318 */
56cf515b
JK
8319 busiest->load_per_task =
8320 min(busiest->load_per_task, sds->avg_load);
dd5feea1
SS
8321 }
8322
1e3c88bd 8323 /*
885e542c
DE
8324 * Avg load of busiest sg can be less and avg load of local sg can
8325 * be greater than avg load across all sgs of sd because avg load
8326 * factors in sg capacity and sgs with smaller group_type are
8327 * skipped when updating the busiest sg:
1e3c88bd 8328 */
cad68e55
MR
8329 if (busiest->group_type != group_misfit_task &&
8330 (busiest->avg_load <= sds->avg_load ||
8331 local->avg_load >= sds->avg_load)) {
bd939f45
PZ
8332 env->imbalance = 0;
8333 return fix_small_imbalance(env, sds);
1e3c88bd
PZ
8334 }
8335
9a5d9ba6 8336 /*
97fb7a0a 8337 * If there aren't any idle CPUs, avoid creating some.
9a5d9ba6
PZ
8338 */
8339 if (busiest->group_type == group_overloaded &&
8340 local->group_type == group_overloaded) {
1be0eb2a 8341 load_above_capacity = busiest->sum_nr_running * SCHED_CAPACITY_SCALE;
cfa10334 8342 if (load_above_capacity > busiest->group_capacity) {
ea67821b 8343 load_above_capacity -= busiest->group_capacity;
26656215 8344 load_above_capacity *= scale_load_down(NICE_0_LOAD);
cfa10334
MR
8345 load_above_capacity /= busiest->group_capacity;
8346 } else
ea67821b 8347 load_above_capacity = ~0UL;
dd5feea1
SS
8348 }
8349
8350 /*
97fb7a0a 8351 * We're trying to get all the CPUs to the average_load, so we don't
dd5feea1 8352 * want to push ourselves above the average load, nor do we wish to
97fb7a0a 8353 * reduce the max loaded CPU below the average load. At the same time,
0a9b23ce
DE
8354 * we also don't want to reduce the group load below the group
8355 * capacity. Thus we look for the minimum possible imbalance.
dd5feea1 8356 */
30ce5dab 8357 max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity);
1e3c88bd
PZ
8358
8359 /* How much load to actually move to equalise the imbalance */
56cf515b 8360 env->imbalance = min(
63b2ca30
NP
8361 max_pull * busiest->group_capacity,
8362 (sds->avg_load - local->avg_load) * local->group_capacity
ca8ce3d0 8363 ) / SCHED_CAPACITY_SCALE;
1e3c88bd 8364
cad68e55
MR
8365 /* Boost imbalance to allow misfit task to be balanced. */
8366 if (busiest->group_type == group_misfit_task) {
8367 env->imbalance = max_t(long, env->imbalance,
8368 busiest->group_misfit_task_load);
8369 }
8370
1e3c88bd
PZ
8371 /*
8372 * if *imbalance is less than the average load per runnable task
25985edc 8373 * there is no guarantee that any tasks will be moved so we'll have
1e3c88bd
PZ
8374 * a think about bumping its value to force at least one task to be
8375 * moved
8376 */
56cf515b 8377 if (env->imbalance < busiest->load_per_task)
bd939f45 8378 return fix_small_imbalance(env, sds);
1e3c88bd 8379}
fab47622 8380
1e3c88bd
PZ
8381/******* find_busiest_group() helpers end here *********************/
8382
8383/**
8384 * find_busiest_group - Returns the busiest group within the sched_domain
0a9b23ce 8385 * if there is an imbalance.
1e3c88bd
PZ
8386 *
8387 * Also calculates the amount of weighted load which should be moved
8388 * to restore balance.
8389 *
cd96891d 8390 * @env: The load balancing environment.
1e3c88bd 8391 *
e69f6186 8392 * Return: - The busiest group if imbalance exists.
1e3c88bd 8393 */
56cf515b 8394static struct sched_group *find_busiest_group(struct lb_env *env)
1e3c88bd 8395{
56cf515b 8396 struct sg_lb_stats *local, *busiest;
1e3c88bd
PZ
8397 struct sd_lb_stats sds;
8398
147c5fc2 8399 init_sd_lb_stats(&sds);
1e3c88bd
PZ
8400
8401 /*
8402 * Compute the various statistics relavent for load balancing at
8403 * this level.
8404 */
23f0d209 8405 update_sd_lb_stats(env, &sds);
56cf515b
JK
8406 local = &sds.local_stat;
8407 busiest = &sds.busiest_stat;
1e3c88bd 8408
ea67821b 8409 /* ASYM feature bypasses nice load balance check */
1f621e02 8410 if (check_asym_packing(env, &sds))
532cb4c4
MN
8411 return sds.busiest;
8412
cc57aa8f 8413 /* There is no busy sibling group to pull tasks from */
56cf515b 8414 if (!sds.busiest || busiest->sum_nr_running == 0)
1e3c88bd
PZ
8415 goto out_balanced;
8416
90001d67 8417 /* XXX broken for overlapping NUMA groups */
ca8ce3d0
NP
8418 sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load)
8419 / sds.total_capacity;
b0432d8f 8420
866ab43e
PZ
8421 /*
8422 * If the busiest group is imbalanced the below checks don't
30ce5dab 8423 * work because they assume all things are equal, which typically
866ab43e
PZ
8424 * isn't true due to cpus_allowed constraints and the like.
8425 */
caeb178c 8426 if (busiest->group_type == group_imbalanced)
866ab43e
PZ
8427 goto force_balance;
8428
583ffd99
BJ
8429 /*
8430 * When dst_cpu is idle, prevent SMP nice and/or asymmetric group
8431 * capacities from resulting in underutilization due to avg_load.
8432 */
8433 if (env->idle != CPU_NOT_IDLE && group_has_capacity(env, local) &&
ea67821b 8434 busiest->group_no_capacity)
fab47622
NR
8435 goto force_balance;
8436
cad68e55
MR
8437 /* Misfit tasks should be dealt with regardless of the avg load */
8438 if (busiest->group_type == group_misfit_task)
8439 goto force_balance;
8440
cc57aa8f 8441 /*
9c58c79a 8442 * If the local group is busier than the selected busiest group
cc57aa8f
PZ
8443 * don't try and pull any tasks.
8444 */
56cf515b 8445 if (local->avg_load >= busiest->avg_load)
1e3c88bd
PZ
8446 goto out_balanced;
8447
cc57aa8f
PZ
8448 /*
8449 * Don't pull any tasks if this group is already above the domain
8450 * average load.
8451 */
56cf515b 8452 if (local->avg_load >= sds.avg_load)
1e3c88bd
PZ
8453 goto out_balanced;
8454
bd939f45 8455 if (env->idle == CPU_IDLE) {
aae6d3dd 8456 /*
97fb7a0a 8457 * This CPU is idle. If the busiest group is not overloaded
43f4d666 8458 * and there is no imbalance between this and busiest group
97fb7a0a 8459 * wrt idle CPUs, it is balanced. The imbalance becomes
43f4d666
VG
8460 * significant if the diff is greater than 1 otherwise we
8461 * might end up to just move the imbalance on another group
aae6d3dd 8462 */
43f4d666
VG
8463 if ((busiest->group_type != group_overloaded) &&
8464 (local->idle_cpus <= (busiest->idle_cpus + 1)))
aae6d3dd 8465 goto out_balanced;
c186fafe
PZ
8466 } else {
8467 /*
8468 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
8469 * imbalance_pct to be conservative.
8470 */
56cf515b
JK
8471 if (100 * busiest->avg_load <=
8472 env->sd->imbalance_pct * local->avg_load)
c186fafe 8473 goto out_balanced;
aae6d3dd 8474 }
1e3c88bd 8475
fab47622 8476force_balance:
1e3c88bd 8477 /* Looks like there is an imbalance. Compute it */
cad68e55 8478 env->src_grp_type = busiest->group_type;
bd939f45 8479 calculate_imbalance(env, &sds);
bb3485c8 8480 return env->imbalance ? sds.busiest : NULL;
1e3c88bd
PZ
8481
8482out_balanced:
bd939f45 8483 env->imbalance = 0;
1e3c88bd
PZ
8484 return NULL;
8485}
8486
8487/*
97fb7a0a 8488 * find_busiest_queue - find the busiest runqueue among the CPUs in the group.
1e3c88bd 8489 */
bd939f45 8490static struct rq *find_busiest_queue(struct lb_env *env,
b9403130 8491 struct sched_group *group)
1e3c88bd
PZ
8492{
8493 struct rq *busiest = NULL, *rq;
ced549fa 8494 unsigned long busiest_load = 0, busiest_capacity = 1;
1e3c88bd
PZ
8495 int i;
8496
ae4df9d6 8497 for_each_cpu_and(i, sched_group_span(group), env->cpus) {
ea67821b 8498 unsigned long capacity, wl;
0ec8aa00
PZ
8499 enum fbq_type rt;
8500
8501 rq = cpu_rq(i);
8502 rt = fbq_classify_rq(rq);
1e3c88bd 8503
0ec8aa00
PZ
8504 /*
8505 * We classify groups/runqueues into three groups:
8506 * - regular: there are !numa tasks
8507 * - remote: there are numa tasks that run on the 'wrong' node
8508 * - all: there is no distinction
8509 *
8510 * In order to avoid migrating ideally placed numa tasks,
8511 * ignore those when there's better options.
8512 *
8513 * If we ignore the actual busiest queue to migrate another
8514 * task, the next balance pass can still reduce the busiest
8515 * queue by moving tasks around inside the node.
8516 *
8517 * If we cannot move enough load due to this classification
8518 * the next pass will adjust the group classification and
8519 * allow migration of more tasks.
8520 *
8521 * Both cases only affect the total convergence complexity.
8522 */
8523 if (rt > env->fbq_type)
8524 continue;
8525
cad68e55
MR
8526 /*
8527 * For ASYM_CPUCAPACITY domains with misfit tasks we simply
8528 * seek the "biggest" misfit task.
8529 */
8530 if (env->src_grp_type == group_misfit_task) {
8531 if (rq->misfit_task_load > busiest_load) {
8532 busiest_load = rq->misfit_task_load;
8533 busiest = rq;
8534 }
8535
8536 continue;
8537 }
8538
ced549fa 8539 capacity = capacity_of(i);
9d5efe05 8540
4ad3831a
CR
8541 /*
8542 * For ASYM_CPUCAPACITY domains, don't pick a CPU that could
8543 * eventually lead to active_balancing high->low capacity.
8544 * Higher per-CPU capacity is considered better than balancing
8545 * average load.
8546 */
8547 if (env->sd->flags & SD_ASYM_CPUCAPACITY &&
8548 capacity_of(env->dst_cpu) < capacity &&
8549 rq->nr_running == 1)
8550 continue;
8551
c7132dd6 8552 wl = weighted_cpuload(rq);
1e3c88bd 8553
6e40f5bb
TG
8554 /*
8555 * When comparing with imbalance, use weighted_cpuload()
97fb7a0a 8556 * which is not scaled with the CPU capacity.
6e40f5bb 8557 */
ea67821b
VG
8558
8559 if (rq->nr_running == 1 && wl > env->imbalance &&
8560 !check_cpu_capacity(rq, env->sd))
1e3c88bd
PZ
8561 continue;
8562
6e40f5bb 8563 /*
97fb7a0a
IM
8564 * For the load comparisons with the other CPU's, consider
8565 * the weighted_cpuload() scaled with the CPU capacity, so
8566 * that the load can be moved away from the CPU that is
ced549fa 8567 * potentially running at a lower capacity.
95a79b80 8568 *
ced549fa 8569 * Thus we're looking for max(wl_i / capacity_i), crosswise
95a79b80 8570 * multiplication to rid ourselves of the division works out
ced549fa
NP
8571 * to: wl_i * capacity_j > wl_j * capacity_i; where j is
8572 * our previous maximum.
6e40f5bb 8573 */
ced549fa 8574 if (wl * busiest_capacity > busiest_load * capacity) {
95a79b80 8575 busiest_load = wl;
ced549fa 8576 busiest_capacity = capacity;
1e3c88bd
PZ
8577 busiest = rq;
8578 }
8579 }
8580
8581 return busiest;
8582}
8583
8584/*
8585 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
8586 * so long as it is large enough.
8587 */
8588#define MAX_PINNED_INTERVAL 512
8589
bd939f45 8590static int need_active_balance(struct lb_env *env)
1af3ed3d 8591{
bd939f45
PZ
8592 struct sched_domain *sd = env->sd;
8593
8594 if (env->idle == CPU_NEWLY_IDLE) {
532cb4c4
MN
8595
8596 /*
8597 * ASYM_PACKING needs to force migrate tasks from busy but
afe06efd
TC
8598 * lower priority CPUs in order to pack all tasks in the
8599 * highest priority CPUs.
532cb4c4 8600 */
afe06efd
TC
8601 if ((sd->flags & SD_ASYM_PACKING) &&
8602 sched_asym_prefer(env->dst_cpu, env->src_cpu))
532cb4c4 8603 return 1;
1af3ed3d
PZ
8604 }
8605
1aaf90a4
VG
8606 /*
8607 * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task.
8608 * It's worth migrating the task if the src_cpu's capacity is reduced
8609 * because of other sched_class or IRQs if more capacity stays
8610 * available on dst_cpu.
8611 */
8612 if ((env->idle != CPU_NOT_IDLE) &&
8613 (env->src_rq->cfs.h_nr_running == 1)) {
8614 if ((check_cpu_capacity(env->src_rq, sd)) &&
8615 (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100))
8616 return 1;
8617 }
8618
cad68e55
MR
8619 if (env->src_grp_type == group_misfit_task)
8620 return 1;
8621
1af3ed3d
PZ
8622 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
8623}
8624
969c7921
TH
8625static int active_load_balance_cpu_stop(void *data);
8626
23f0d209
JK
8627static int should_we_balance(struct lb_env *env)
8628{
8629 struct sched_group *sg = env->sd->groups;
23f0d209
JK
8630 int cpu, balance_cpu = -1;
8631
024c9d2f
PZ
8632 /*
8633 * Ensure the balancing environment is consistent; can happen
8634 * when the softirq triggers 'during' hotplug.
8635 */
8636 if (!cpumask_test_cpu(env->dst_cpu, env->cpus))
8637 return 0;
8638
23f0d209 8639 /*
97fb7a0a 8640 * In the newly idle case, we will allow all the CPUs
23f0d209
JK
8641 * to do the newly idle load balance.
8642 */
8643 if (env->idle == CPU_NEWLY_IDLE)
8644 return 1;
8645
97fb7a0a 8646 /* Try to find first idle CPU */
e5c14b1f 8647 for_each_cpu_and(cpu, group_balance_mask(sg), env->cpus) {
af218122 8648 if (!idle_cpu(cpu))
23f0d209
JK
8649 continue;
8650
8651 balance_cpu = cpu;
8652 break;
8653 }
8654
8655 if (balance_cpu == -1)
8656 balance_cpu = group_balance_cpu(sg);
8657
8658 /*
97fb7a0a 8659 * First idle CPU or the first CPU(busiest) in this sched group
23f0d209
JK
8660 * is eligible for doing load balancing at this and above domains.
8661 */
b0cff9d8 8662 return balance_cpu == env->dst_cpu;
23f0d209
JK
8663}
8664
1e3c88bd
PZ
8665/*
8666 * Check this_cpu to ensure it is balanced within domain. Attempt to move
8667 * tasks if there is an imbalance.
8668 */
8669static int load_balance(int this_cpu, struct rq *this_rq,
8670 struct sched_domain *sd, enum cpu_idle_type idle,
23f0d209 8671 int *continue_balancing)
1e3c88bd 8672{
88b8dac0 8673 int ld_moved, cur_ld_moved, active_balance = 0;
6263322c 8674 struct sched_domain *sd_parent = sd->parent;
1e3c88bd 8675 struct sched_group *group;
1e3c88bd 8676 struct rq *busiest;
8a8c69c3 8677 struct rq_flags rf;
4ba29684 8678 struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
1e3c88bd 8679
8e45cb54
PZ
8680 struct lb_env env = {
8681 .sd = sd,
ddcdf6e7
PZ
8682 .dst_cpu = this_cpu,
8683 .dst_rq = this_rq,
ae4df9d6 8684 .dst_grpmask = sched_group_span(sd->groups),
8e45cb54 8685 .idle = idle,
eb95308e 8686 .loop_break = sched_nr_migrate_break,
b9403130 8687 .cpus = cpus,
0ec8aa00 8688 .fbq_type = all,
163122b7 8689 .tasks = LIST_HEAD_INIT(env.tasks),
8e45cb54
PZ
8690 };
8691
65a4433a 8692 cpumask_and(cpus, sched_domain_span(sd), cpu_active_mask);
1e3c88bd 8693
ae92882e 8694 schedstat_inc(sd->lb_count[idle]);
1e3c88bd
PZ
8695
8696redo:
23f0d209
JK
8697 if (!should_we_balance(&env)) {
8698 *continue_balancing = 0;
1e3c88bd 8699 goto out_balanced;
23f0d209 8700 }
1e3c88bd 8701
23f0d209 8702 group = find_busiest_group(&env);
1e3c88bd 8703 if (!group) {
ae92882e 8704 schedstat_inc(sd->lb_nobusyg[idle]);
1e3c88bd
PZ
8705 goto out_balanced;
8706 }
8707
b9403130 8708 busiest = find_busiest_queue(&env, group);
1e3c88bd 8709 if (!busiest) {
ae92882e 8710 schedstat_inc(sd->lb_nobusyq[idle]);
1e3c88bd
PZ
8711 goto out_balanced;
8712 }
8713
78feefc5 8714 BUG_ON(busiest == env.dst_rq);
1e3c88bd 8715
ae92882e 8716 schedstat_add(sd->lb_imbalance[idle], env.imbalance);
1e3c88bd 8717
1aaf90a4
VG
8718 env.src_cpu = busiest->cpu;
8719 env.src_rq = busiest;
8720
1e3c88bd
PZ
8721 ld_moved = 0;
8722 if (busiest->nr_running > 1) {
8723 /*
8724 * Attempt to move tasks. If find_busiest_group has found
8725 * an imbalance but busiest->nr_running <= 1, the group is
8726 * still unbalanced. ld_moved simply stays zero, so it is
8727 * correctly treated as an imbalance.
8728 */
8e45cb54 8729 env.flags |= LBF_ALL_PINNED;
c82513e5 8730 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
8e45cb54 8731
5d6523eb 8732more_balance:
8a8c69c3 8733 rq_lock_irqsave(busiest, &rf);
3bed5e21 8734 update_rq_clock(busiest);
88b8dac0
SV
8735
8736 /*
8737 * cur_ld_moved - load moved in current iteration
8738 * ld_moved - cumulative load moved across iterations
8739 */
163122b7 8740 cur_ld_moved = detach_tasks(&env);
1e3c88bd
PZ
8741
8742 /*
163122b7
KT
8743 * We've detached some tasks from busiest_rq. Every
8744 * task is masked "TASK_ON_RQ_MIGRATING", so we can safely
8745 * unlock busiest->lock, and we are able to be sure
8746 * that nobody can manipulate the tasks in parallel.
8747 * See task_rq_lock() family for the details.
1e3c88bd 8748 */
163122b7 8749
8a8c69c3 8750 rq_unlock(busiest, &rf);
163122b7
KT
8751
8752 if (cur_ld_moved) {
8753 attach_tasks(&env);
8754 ld_moved += cur_ld_moved;
8755 }
8756
8a8c69c3 8757 local_irq_restore(rf.flags);
88b8dac0 8758
f1cd0858
JK
8759 if (env.flags & LBF_NEED_BREAK) {
8760 env.flags &= ~LBF_NEED_BREAK;
8761 goto more_balance;
8762 }
8763
88b8dac0
SV
8764 /*
8765 * Revisit (affine) tasks on src_cpu that couldn't be moved to
8766 * us and move them to an alternate dst_cpu in our sched_group
8767 * where they can run. The upper limit on how many times we
97fb7a0a 8768 * iterate on same src_cpu is dependent on number of CPUs in our
88b8dac0
SV
8769 * sched_group.
8770 *
8771 * This changes load balance semantics a bit on who can move
8772 * load to a given_cpu. In addition to the given_cpu itself
8773 * (or a ilb_cpu acting on its behalf where given_cpu is
8774 * nohz-idle), we now have balance_cpu in a position to move
8775 * load to given_cpu. In rare situations, this may cause
8776 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
8777 * _independently_ and at _same_ time to move some load to
8778 * given_cpu) causing exceess load to be moved to given_cpu.
8779 * This however should not happen so much in practice and
8780 * moreover subsequent load balance cycles should correct the
8781 * excess load moved.
8782 */
6263322c 8783 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
88b8dac0 8784
97fb7a0a 8785 /* Prevent to re-select dst_cpu via env's CPUs */
7aff2e3a
VD
8786 cpumask_clear_cpu(env.dst_cpu, env.cpus);
8787
78feefc5 8788 env.dst_rq = cpu_rq(env.new_dst_cpu);
88b8dac0 8789 env.dst_cpu = env.new_dst_cpu;
6263322c 8790 env.flags &= ~LBF_DST_PINNED;
88b8dac0
SV
8791 env.loop = 0;
8792 env.loop_break = sched_nr_migrate_break;
e02e60c1 8793
88b8dac0
SV
8794 /*
8795 * Go back to "more_balance" rather than "redo" since we
8796 * need to continue with same src_cpu.
8797 */
8798 goto more_balance;
8799 }
1e3c88bd 8800
6263322c
PZ
8801 /*
8802 * We failed to reach balance because of affinity.
8803 */
8804 if (sd_parent) {
63b2ca30 8805 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
6263322c 8806
afdeee05 8807 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0)
6263322c 8808 *group_imbalance = 1;
6263322c
PZ
8809 }
8810
1e3c88bd 8811 /* All tasks on this runqueue were pinned by CPU affinity */
8e45cb54 8812 if (unlikely(env.flags & LBF_ALL_PINNED)) {
1e3c88bd 8813 cpumask_clear_cpu(cpu_of(busiest), cpus);
65a4433a
JH
8814 /*
8815 * Attempting to continue load balancing at the current
8816 * sched_domain level only makes sense if there are
8817 * active CPUs remaining as possible busiest CPUs to
8818 * pull load from which are not contained within the
8819 * destination group that is receiving any migrated
8820 * load.
8821 */
8822 if (!cpumask_subset(cpus, env.dst_grpmask)) {
bbf18b19
PN
8823 env.loop = 0;
8824 env.loop_break = sched_nr_migrate_break;
1e3c88bd 8825 goto redo;
bbf18b19 8826 }
afdeee05 8827 goto out_all_pinned;
1e3c88bd
PZ
8828 }
8829 }
8830
8831 if (!ld_moved) {
ae92882e 8832 schedstat_inc(sd->lb_failed[idle]);
58b26c4c
VP
8833 /*
8834 * Increment the failure counter only on periodic balance.
8835 * We do not want newidle balance, which can be very
8836 * frequent, pollute the failure counter causing
8837 * excessive cache_hot migrations and active balances.
8838 */
8839 if (idle != CPU_NEWLY_IDLE)
8840 sd->nr_balance_failed++;
1e3c88bd 8841
bd939f45 8842 if (need_active_balance(&env)) {
8a8c69c3
PZ
8843 unsigned long flags;
8844
1e3c88bd
PZ
8845 raw_spin_lock_irqsave(&busiest->lock, flags);
8846
97fb7a0a
IM
8847 /*
8848 * Don't kick the active_load_balance_cpu_stop,
8849 * if the curr task on busiest CPU can't be
8850 * moved to this_cpu:
1e3c88bd 8851 */
0c98d344 8852 if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) {
1e3c88bd
PZ
8853 raw_spin_unlock_irqrestore(&busiest->lock,
8854 flags);
8e45cb54 8855 env.flags |= LBF_ALL_PINNED;
1e3c88bd
PZ
8856 goto out_one_pinned;
8857 }
8858
969c7921
TH
8859 /*
8860 * ->active_balance synchronizes accesses to
8861 * ->active_balance_work. Once set, it's cleared
8862 * only after active load balance is finished.
8863 */
1e3c88bd
PZ
8864 if (!busiest->active_balance) {
8865 busiest->active_balance = 1;
8866 busiest->push_cpu = this_cpu;
8867 active_balance = 1;
8868 }
8869 raw_spin_unlock_irqrestore(&busiest->lock, flags);
969c7921 8870
bd939f45 8871 if (active_balance) {
969c7921
TH
8872 stop_one_cpu_nowait(cpu_of(busiest),
8873 active_load_balance_cpu_stop, busiest,
8874 &busiest->active_balance_work);
bd939f45 8875 }
1e3c88bd 8876
d02c0711 8877 /* We've kicked active balancing, force task migration. */
1e3c88bd
PZ
8878 sd->nr_balance_failed = sd->cache_nice_tries+1;
8879 }
8880 } else
8881 sd->nr_balance_failed = 0;
8882
8883 if (likely(!active_balance)) {
8884 /* We were unbalanced, so reset the balancing interval */
8885 sd->balance_interval = sd->min_interval;
8886 } else {
8887 /*
8888 * If we've begun active balancing, start to back off. This
8889 * case may not be covered by the all_pinned logic if there
8890 * is only 1 task on the busy runqueue (because we don't call
163122b7 8891 * detach_tasks).
1e3c88bd
PZ
8892 */
8893 if (sd->balance_interval < sd->max_interval)
8894 sd->balance_interval *= 2;
8895 }
8896
1e3c88bd
PZ
8897 goto out;
8898
8899out_balanced:
afdeee05
VG
8900 /*
8901 * We reach balance although we may have faced some affinity
8902 * constraints. Clear the imbalance flag if it was set.
8903 */
8904 if (sd_parent) {
8905 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
8906
8907 if (*group_imbalance)
8908 *group_imbalance = 0;
8909 }
8910
8911out_all_pinned:
8912 /*
8913 * We reach balance because all tasks are pinned at this level so
8914 * we can't migrate them. Let the imbalance flag set so parent level
8915 * can try to migrate them.
8916 */
ae92882e 8917 schedstat_inc(sd->lb_balanced[idle]);
1e3c88bd
PZ
8918
8919 sd->nr_balance_failed = 0;
8920
8921out_one_pinned:
3f130a37
VS
8922 ld_moved = 0;
8923
8924 /*
8925 * idle_balance() disregards balance intervals, so we could repeatedly
8926 * reach this code, which would lead to balance_interval skyrocketting
8927 * in a short amount of time. Skip the balance_interval increase logic
8928 * to avoid that.
8929 */
8930 if (env.idle == CPU_NEWLY_IDLE)
8931 goto out;
8932
1e3c88bd 8933 /* tune up the balancing interval */
47b7aee1
VS
8934 if ((env.flags & LBF_ALL_PINNED &&
8935 sd->balance_interval < MAX_PINNED_INTERVAL) ||
8936 sd->balance_interval < sd->max_interval)
1e3c88bd 8937 sd->balance_interval *= 2;
1e3c88bd 8938out:
1e3c88bd
PZ
8939 return ld_moved;
8940}
8941
52a08ef1
JL
8942static inline unsigned long
8943get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
8944{
8945 unsigned long interval = sd->balance_interval;
8946
8947 if (cpu_busy)
8948 interval *= sd->busy_factor;
8949
8950 /* scale ms to jiffies */
8951 interval = msecs_to_jiffies(interval);
8952 interval = clamp(interval, 1UL, max_load_balance_interval);
8953
8954 return interval;
8955}
8956
8957static inline void
31851a98 8958update_next_balance(struct sched_domain *sd, unsigned long *next_balance)
52a08ef1
JL
8959{
8960 unsigned long interval, next;
8961
31851a98
LY
8962 /* used by idle balance, so cpu_busy = 0 */
8963 interval = get_sd_balance_interval(sd, 0);
52a08ef1
JL
8964 next = sd->last_balance + interval;
8965
8966 if (time_after(*next_balance, next))
8967 *next_balance = next;
8968}
8969
1e3c88bd 8970/*
97fb7a0a 8971 * active_load_balance_cpu_stop is run by the CPU stopper. It pushes
969c7921
TH
8972 * running tasks off the busiest CPU onto idle CPUs. It requires at
8973 * least 1 task to be running on each physical CPU where possible, and
8974 * avoids physical / logical imbalances.
1e3c88bd 8975 */
969c7921 8976static int active_load_balance_cpu_stop(void *data)
1e3c88bd 8977{
969c7921
TH
8978 struct rq *busiest_rq = data;
8979 int busiest_cpu = cpu_of(busiest_rq);
1e3c88bd 8980 int target_cpu = busiest_rq->push_cpu;
969c7921 8981 struct rq *target_rq = cpu_rq(target_cpu);
1e3c88bd 8982 struct sched_domain *sd;
e5673f28 8983 struct task_struct *p = NULL;
8a8c69c3 8984 struct rq_flags rf;
969c7921 8985
8a8c69c3 8986 rq_lock_irq(busiest_rq, &rf);
edd8e41d
PZ
8987 /*
8988 * Between queueing the stop-work and running it is a hole in which
8989 * CPUs can become inactive. We should not move tasks from or to
8990 * inactive CPUs.
8991 */
8992 if (!cpu_active(busiest_cpu) || !cpu_active(target_cpu))
8993 goto out_unlock;
969c7921 8994
97fb7a0a 8995 /* Make sure the requested CPU hasn't gone down in the meantime: */
969c7921
TH
8996 if (unlikely(busiest_cpu != smp_processor_id() ||
8997 !busiest_rq->active_balance))
8998 goto out_unlock;
1e3c88bd
PZ
8999
9000 /* Is there any task to move? */
9001 if (busiest_rq->nr_running <= 1)
969c7921 9002 goto out_unlock;
1e3c88bd
PZ
9003
9004 /*
9005 * This condition is "impossible", if it occurs
9006 * we need to fix it. Originally reported by
97fb7a0a 9007 * Bjorn Helgaas on a 128-CPU setup.
1e3c88bd
PZ
9008 */
9009 BUG_ON(busiest_rq == target_rq);
9010
1e3c88bd 9011 /* Search for an sd spanning us and the target CPU. */
dce840a0 9012 rcu_read_lock();
1e3c88bd
PZ
9013 for_each_domain(target_cpu, sd) {
9014 if ((sd->flags & SD_LOAD_BALANCE) &&
9015 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
9016 break;
9017 }
9018
9019 if (likely(sd)) {
8e45cb54
PZ
9020 struct lb_env env = {
9021 .sd = sd,
ddcdf6e7
PZ
9022 .dst_cpu = target_cpu,
9023 .dst_rq = target_rq,
9024 .src_cpu = busiest_rq->cpu,
9025 .src_rq = busiest_rq,
8e45cb54 9026 .idle = CPU_IDLE,
65a4433a
JH
9027 /*
9028 * can_migrate_task() doesn't need to compute new_dst_cpu
9029 * for active balancing. Since we have CPU_IDLE, but no
9030 * @dst_grpmask we need to make that test go away with lying
9031 * about DST_PINNED.
9032 */
9033 .flags = LBF_DST_PINNED,
8e45cb54
PZ
9034 };
9035
ae92882e 9036 schedstat_inc(sd->alb_count);
3bed5e21 9037 update_rq_clock(busiest_rq);
1e3c88bd 9038
e5673f28 9039 p = detach_one_task(&env);
d02c0711 9040 if (p) {
ae92882e 9041 schedstat_inc(sd->alb_pushed);
d02c0711
SD
9042 /* Active balancing done, reset the failure counter. */
9043 sd->nr_balance_failed = 0;
9044 } else {
ae92882e 9045 schedstat_inc(sd->alb_failed);
d02c0711 9046 }
1e3c88bd 9047 }
dce840a0 9048 rcu_read_unlock();
969c7921
TH
9049out_unlock:
9050 busiest_rq->active_balance = 0;
8a8c69c3 9051 rq_unlock(busiest_rq, &rf);
e5673f28
KT
9052
9053 if (p)
9054 attach_one_task(target_rq, p);
9055
9056 local_irq_enable();
9057
969c7921 9058 return 0;
1e3c88bd
PZ
9059}
9060
af3fe03c
PZ
9061static DEFINE_SPINLOCK(balancing);
9062
9063/*
9064 * Scale the max load_balance interval with the number of CPUs in the system.
9065 * This trades load-balance latency on larger machines for less cross talk.
9066 */
9067void update_max_interval(void)
9068{
9069 max_load_balance_interval = HZ*num_online_cpus()/10;
9070}
9071
9072/*
9073 * It checks each scheduling domain to see if it is due to be balanced,
9074 * and initiates a balancing operation if so.
9075 *
9076 * Balancing parameters are set up in init_sched_domains.
9077 */
9078static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
9079{
9080 int continue_balancing = 1;
9081 int cpu = rq->cpu;
9082 unsigned long interval;
9083 struct sched_domain *sd;
9084 /* Earliest time when we have to do rebalance again */
9085 unsigned long next_balance = jiffies + 60*HZ;
9086 int update_next_balance = 0;
9087 int need_serialize, need_decay = 0;
9088 u64 max_cost = 0;
9089
9090 rcu_read_lock();
9091 for_each_domain(cpu, sd) {
9092 /*
9093 * Decay the newidle max times here because this is a regular
9094 * visit to all the domains. Decay ~1% per second.
9095 */
9096 if (time_after(jiffies, sd->next_decay_max_lb_cost)) {
9097 sd->max_newidle_lb_cost =
9098 (sd->max_newidle_lb_cost * 253) / 256;
9099 sd->next_decay_max_lb_cost = jiffies + HZ;
9100 need_decay = 1;
9101 }
9102 max_cost += sd->max_newidle_lb_cost;
9103
9104 if (!(sd->flags & SD_LOAD_BALANCE))
9105 continue;
9106
9107 /*
9108 * Stop the load balance at this level. There is another
9109 * CPU in our sched group which is doing load balancing more
9110 * actively.
9111 */
9112 if (!continue_balancing) {
9113 if (need_decay)
9114 continue;
9115 break;
9116 }
9117
9118 interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
9119
9120 need_serialize = sd->flags & SD_SERIALIZE;
9121 if (need_serialize) {
9122 if (!spin_trylock(&balancing))
9123 goto out;
9124 }
9125
9126 if (time_after_eq(jiffies, sd->last_balance + interval)) {
9127 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
9128 /*
9129 * The LBF_DST_PINNED logic could have changed
9130 * env->dst_cpu, so we can't know our idle
9131 * state even if we migrated tasks. Update it.
9132 */
9133 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
9134 }
9135 sd->last_balance = jiffies;
9136 interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
9137 }
9138 if (need_serialize)
9139 spin_unlock(&balancing);
9140out:
9141 if (time_after(next_balance, sd->last_balance + interval)) {
9142 next_balance = sd->last_balance + interval;
9143 update_next_balance = 1;
9144 }
9145 }
9146 if (need_decay) {
9147 /*
9148 * Ensure the rq-wide value also decays but keep it at a
9149 * reasonable floor to avoid funnies with rq->avg_idle.
9150 */
9151 rq->max_idle_balance_cost =
9152 max((u64)sysctl_sched_migration_cost, max_cost);
9153 }
9154 rcu_read_unlock();
9155
9156 /*
9157 * next_balance will be updated only when there is a need.
9158 * When the cpu is attached to null domain for ex, it will not be
9159 * updated.
9160 */
9161 if (likely(update_next_balance)) {
9162 rq->next_balance = next_balance;
9163
9164#ifdef CONFIG_NO_HZ_COMMON
9165 /*
9166 * If this CPU has been elected to perform the nohz idle
9167 * balance. Other idle CPUs have already rebalanced with
9168 * nohz_idle_balance() and nohz.next_balance has been
9169 * updated accordingly. This CPU is now running the idle load
9170 * balance for itself and we need to update the
9171 * nohz.next_balance accordingly.
9172 */
9173 if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance))
9174 nohz.next_balance = rq->next_balance;
9175#endif
9176 }
9177}
9178
d987fc7f
MG
9179static inline int on_null_domain(struct rq *rq)
9180{
9181 return unlikely(!rcu_dereference_sched(rq->sd));
9182}
9183
3451d024 9184#ifdef CONFIG_NO_HZ_COMMON
83cd4fe2
VP
9185/*
9186 * idle load balancing details
83cd4fe2
VP
9187 * - When one of the busy CPUs notice that there may be an idle rebalancing
9188 * needed, they will kick the idle load balancer, which then does idle
9189 * load balancing for all the idle CPUs.
9190 */
1e3c88bd 9191
3dd0337d 9192static inline int find_new_ilb(void)
1e3c88bd 9193{
0b005cf5 9194 int ilb = cpumask_first(nohz.idle_cpus_mask);
1e3c88bd 9195
786d6dc7
SS
9196 if (ilb < nr_cpu_ids && idle_cpu(ilb))
9197 return ilb;
9198
9199 return nr_cpu_ids;
1e3c88bd 9200}
1e3c88bd 9201
83cd4fe2
VP
9202/*
9203 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
9204 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
9205 * CPU (if there is one).
9206 */
a4064fb6 9207static void kick_ilb(unsigned int flags)
83cd4fe2
VP
9208{
9209 int ilb_cpu;
9210
9211 nohz.next_balance++;
9212
3dd0337d 9213 ilb_cpu = find_new_ilb();
83cd4fe2 9214
0b005cf5
SS
9215 if (ilb_cpu >= nr_cpu_ids)
9216 return;
83cd4fe2 9217
a4064fb6 9218 flags = atomic_fetch_or(flags, nohz_flags(ilb_cpu));
b7031a02 9219 if (flags & NOHZ_KICK_MASK)
1c792db7 9220 return;
4550487a 9221
1c792db7
SS
9222 /*
9223 * Use smp_send_reschedule() instead of resched_cpu().
97fb7a0a 9224 * This way we generate a sched IPI on the target CPU which
1c792db7
SS
9225 * is idle. And the softirq performing nohz idle load balance
9226 * will be run before returning from the IPI.
9227 */
9228 smp_send_reschedule(ilb_cpu);
4550487a
PZ
9229}
9230
9231/*
9232 * Current heuristic for kicking the idle load balancer in the presence
9233 * of an idle cpu in the system.
9234 * - This rq has more than one task.
9235 * - This rq has at least one CFS task and the capacity of the CPU is
9236 * significantly reduced because of RT tasks or IRQs.
9237 * - At parent of LLC scheduler domain level, this cpu's scheduler group has
9238 * multiple busy cpu.
9239 * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
9240 * domain span are idle.
9241 */
9242static void nohz_balancer_kick(struct rq *rq)
9243{
9244 unsigned long now = jiffies;
9245 struct sched_domain_shared *sds;
9246 struct sched_domain *sd;
9247 int nr_busy, i, cpu = rq->cpu;
a4064fb6 9248 unsigned int flags = 0;
4550487a
PZ
9249
9250 if (unlikely(rq->idle_balance))
9251 return;
9252
9253 /*
9254 * We may be recently in ticked or tickless idle mode. At the first
9255 * busy tick after returning from idle, we will update the busy stats.
9256 */
00357f5e 9257 nohz_balance_exit_idle(rq);
4550487a
PZ
9258
9259 /*
9260 * None are in tickless mode and hence no need for NOHZ idle load
9261 * balancing.
9262 */
9263 if (likely(!atomic_read(&nohz.nr_cpus)))
9264 return;
9265
f643ea22
VG
9266 if (READ_ONCE(nohz.has_blocked) &&
9267 time_after(now, READ_ONCE(nohz.next_blocked)))
a4064fb6
PZ
9268 flags = NOHZ_STATS_KICK;
9269
4550487a 9270 if (time_before(now, nohz.next_balance))
a4064fb6 9271 goto out;
4550487a 9272
5fbdfae5 9273 if (rq->nr_running >= 2 || rq->misfit_task_load) {
a4064fb6 9274 flags = NOHZ_KICK_MASK;
4550487a
PZ
9275 goto out;
9276 }
9277
9278 rcu_read_lock();
9279 sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
9280 if (sds) {
9281 /*
9282 * XXX: write a coherent comment on why we do this.
9283 * See also: http://lkml.kernel.org/r/20111202010832.602203411@sbsiddha-desk.sc.intel.com
9284 */
9285 nr_busy = atomic_read(&sds->nr_busy_cpus);
9286 if (nr_busy > 1) {
a4064fb6 9287 flags = NOHZ_KICK_MASK;
4550487a
PZ
9288 goto unlock;
9289 }
9290
9291 }
9292
9293 sd = rcu_dereference(rq->sd);
9294 if (sd) {
9295 if ((rq->cfs.h_nr_running >= 1) &&
9296 check_cpu_capacity(rq, sd)) {
a4064fb6 9297 flags = NOHZ_KICK_MASK;
4550487a
PZ
9298 goto unlock;
9299 }
9300 }
9301
9302 sd = rcu_dereference(per_cpu(sd_asym, cpu));
9303 if (sd) {
9304 for_each_cpu(i, sched_domain_span(sd)) {
9305 if (i == cpu ||
9306 !cpumask_test_cpu(i, nohz.idle_cpus_mask))
9307 continue;
9308
9309 if (sched_asym_prefer(i, cpu)) {
a4064fb6 9310 flags = NOHZ_KICK_MASK;
4550487a
PZ
9311 goto unlock;
9312 }
9313 }
9314 }
9315unlock:
9316 rcu_read_unlock();
9317out:
a4064fb6
PZ
9318 if (flags)
9319 kick_ilb(flags);
83cd4fe2
VP
9320}
9321
00357f5e 9322static void set_cpu_sd_state_busy(int cpu)
71325960 9323{
00357f5e 9324 struct sched_domain *sd;
a22e47a4 9325
00357f5e
PZ
9326 rcu_read_lock();
9327 sd = rcu_dereference(per_cpu(sd_llc, cpu));
a22e47a4 9328
00357f5e
PZ
9329 if (!sd || !sd->nohz_idle)
9330 goto unlock;
9331 sd->nohz_idle = 0;
9332
9333 atomic_inc(&sd->shared->nr_busy_cpus);
9334unlock:
9335 rcu_read_unlock();
71325960
SS
9336}
9337
00357f5e
PZ
9338void nohz_balance_exit_idle(struct rq *rq)
9339{
9340 SCHED_WARN_ON(rq != this_rq());
9341
9342 if (likely(!rq->nohz_tick_stopped))
9343 return;
9344
9345 rq->nohz_tick_stopped = 0;
9346 cpumask_clear_cpu(rq->cpu, nohz.idle_cpus_mask);
9347 atomic_dec(&nohz.nr_cpus);
9348
9349 set_cpu_sd_state_busy(rq->cpu);
9350}
9351
9352static void set_cpu_sd_state_idle(int cpu)
69e1e811
SS
9353{
9354 struct sched_domain *sd;
69e1e811 9355
69e1e811 9356 rcu_read_lock();
0e369d75 9357 sd = rcu_dereference(per_cpu(sd_llc, cpu));
25f55d9d
VG
9358
9359 if (!sd || sd->nohz_idle)
9360 goto unlock;
9361 sd->nohz_idle = 1;
9362
0e369d75 9363 atomic_dec(&sd->shared->nr_busy_cpus);
25f55d9d 9364unlock:
69e1e811
SS
9365 rcu_read_unlock();
9366}
9367
1e3c88bd 9368/*
97fb7a0a 9369 * This routine will record that the CPU is going idle with tick stopped.
0b005cf5 9370 * This info will be used in performing idle load balancing in the future.
1e3c88bd 9371 */
c1cc017c 9372void nohz_balance_enter_idle(int cpu)
1e3c88bd 9373{
00357f5e
PZ
9374 struct rq *rq = cpu_rq(cpu);
9375
9376 SCHED_WARN_ON(cpu != smp_processor_id());
9377
97fb7a0a 9378 /* If this CPU is going down, then nothing needs to be done: */
71325960
SS
9379 if (!cpu_active(cpu))
9380 return;
9381
387bc8b5 9382 /* Spare idle load balancing on CPUs that don't want to be disturbed: */
de201559 9383 if (!housekeeping_cpu(cpu, HK_FLAG_SCHED))
387bc8b5
FW
9384 return;
9385
f643ea22
VG
9386 /*
9387 * Can be set safely without rq->lock held
9388 * If a clear happens, it will have evaluated last additions because
9389 * rq->lock is held during the check and the clear
9390 */
9391 rq->has_blocked_load = 1;
9392
9393 /*
9394 * The tick is still stopped but load could have been added in the
9395 * meantime. We set the nohz.has_blocked flag to trig a check of the
9396 * *_avg. The CPU is already part of nohz.idle_cpus_mask so the clear
9397 * of nohz.has_blocked can only happen after checking the new load
9398 */
00357f5e 9399 if (rq->nohz_tick_stopped)
f643ea22 9400 goto out;
1e3c88bd 9401
97fb7a0a 9402 /* If we're a completely isolated CPU, we don't play: */
00357f5e 9403 if (on_null_domain(rq))
d987fc7f
MG
9404 return;
9405
00357f5e
PZ
9406 rq->nohz_tick_stopped = 1;
9407
c1cc017c
AS
9408 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
9409 atomic_inc(&nohz.nr_cpus);
00357f5e 9410
f643ea22
VG
9411 /*
9412 * Ensures that if nohz_idle_balance() fails to observe our
9413 * @idle_cpus_mask store, it must observe the @has_blocked
9414 * store.
9415 */
9416 smp_mb__after_atomic();
9417
00357f5e 9418 set_cpu_sd_state_idle(cpu);
f643ea22
VG
9419
9420out:
9421 /*
9422 * Each time a cpu enter idle, we assume that it has blocked load and
9423 * enable the periodic update of the load of idle cpus
9424 */
9425 WRITE_ONCE(nohz.has_blocked, 1);
1e3c88bd 9426}
1e3c88bd 9427
1e3c88bd 9428/*
31e77c93
VG
9429 * Internal function that runs load balance for all idle cpus. The load balance
9430 * can be a simple update of blocked load or a complete load balance with
9431 * tasks movement depending of flags.
9432 * The function returns false if the loop has stopped before running
9433 * through all idle CPUs.
1e3c88bd 9434 */
31e77c93
VG
9435static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
9436 enum cpu_idle_type idle)
83cd4fe2 9437{
c5afb6a8 9438 /* Earliest time when we have to do rebalance again */
a4064fb6
PZ
9439 unsigned long now = jiffies;
9440 unsigned long next_balance = now + 60*HZ;
f643ea22 9441 bool has_blocked_load = false;
c5afb6a8 9442 int update_next_balance = 0;
b7031a02 9443 int this_cpu = this_rq->cpu;
b7031a02 9444 int balance_cpu;
31e77c93 9445 int ret = false;
b7031a02 9446 struct rq *rq;
83cd4fe2 9447
b7031a02 9448 SCHED_WARN_ON((flags & NOHZ_KICK_MASK) == NOHZ_BALANCE_KICK);
83cd4fe2 9449
f643ea22
VG
9450 /*
9451 * We assume there will be no idle load after this update and clear
9452 * the has_blocked flag. If a cpu enters idle in the mean time, it will
9453 * set the has_blocked flag and trig another update of idle load.
9454 * Because a cpu that becomes idle, is added to idle_cpus_mask before
9455 * setting the flag, we are sure to not clear the state and not
9456 * check the load of an idle cpu.
9457 */
9458 WRITE_ONCE(nohz.has_blocked, 0);
9459
9460 /*
9461 * Ensures that if we miss the CPU, we must see the has_blocked
9462 * store from nohz_balance_enter_idle().
9463 */
9464 smp_mb();
9465
83cd4fe2 9466 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
8a6d42d1 9467 if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
83cd4fe2
VP
9468 continue;
9469
9470 /*
97fb7a0a
IM
9471 * If this CPU gets work to do, stop the load balancing
9472 * work being done for other CPUs. Next load
83cd4fe2
VP
9473 * balancing owner will pick it up.
9474 */
f643ea22
VG
9475 if (need_resched()) {
9476 has_blocked_load = true;
9477 goto abort;
9478 }
83cd4fe2 9479
5ed4f1d9
VG
9480 rq = cpu_rq(balance_cpu);
9481
63928384 9482 has_blocked_load |= update_nohz_stats(rq, true);
f643ea22 9483
ed61bbc6
TC
9484 /*
9485 * If time for next balance is due,
9486 * do the balance.
9487 */
9488 if (time_after_eq(jiffies, rq->next_balance)) {
8a8c69c3
PZ
9489 struct rq_flags rf;
9490
31e77c93 9491 rq_lock_irqsave(rq, &rf);
ed61bbc6 9492 update_rq_clock(rq);
cee1afce 9493 cpu_load_update_idle(rq);
31e77c93 9494 rq_unlock_irqrestore(rq, &rf);
8a8c69c3 9495
b7031a02
PZ
9496 if (flags & NOHZ_BALANCE_KICK)
9497 rebalance_domains(rq, CPU_IDLE);
ed61bbc6 9498 }
83cd4fe2 9499
c5afb6a8
VG
9500 if (time_after(next_balance, rq->next_balance)) {
9501 next_balance = rq->next_balance;
9502 update_next_balance = 1;
9503 }
83cd4fe2 9504 }
c5afb6a8 9505
31e77c93
VG
9506 /* Newly idle CPU doesn't need an update */
9507 if (idle != CPU_NEWLY_IDLE) {
9508 update_blocked_averages(this_cpu);
9509 has_blocked_load |= this_rq->has_blocked_load;
9510 }
9511
b7031a02
PZ
9512 if (flags & NOHZ_BALANCE_KICK)
9513 rebalance_domains(this_rq, CPU_IDLE);
9514
f643ea22
VG
9515 WRITE_ONCE(nohz.next_blocked,
9516 now + msecs_to_jiffies(LOAD_AVG_PERIOD));
9517
31e77c93
VG
9518 /* The full idle balance loop has been done */
9519 ret = true;
9520
f643ea22
VG
9521abort:
9522 /* There is still blocked load, enable periodic update */
9523 if (has_blocked_load)
9524 WRITE_ONCE(nohz.has_blocked, 1);
a4064fb6 9525
c5afb6a8
VG
9526 /*
9527 * next_balance will be updated only when there is a need.
9528 * When the CPU is attached to null domain for ex, it will not be
9529 * updated.
9530 */
9531 if (likely(update_next_balance))
9532 nohz.next_balance = next_balance;
b7031a02 9533
31e77c93
VG
9534 return ret;
9535}
9536
9537/*
9538 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
9539 * rebalancing for all the cpus for whom scheduler ticks are stopped.
9540 */
9541static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
9542{
9543 int this_cpu = this_rq->cpu;
9544 unsigned int flags;
9545
9546 if (!(atomic_read(nohz_flags(this_cpu)) & NOHZ_KICK_MASK))
9547 return false;
9548
9549 if (idle != CPU_IDLE) {
9550 atomic_andnot(NOHZ_KICK_MASK, nohz_flags(this_cpu));
9551 return false;
9552 }
9553
9554 /*
9555 * barrier, pairs with nohz_balance_enter_idle(), ensures ...
9556 */
9557 flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(this_cpu));
9558 if (!(flags & NOHZ_KICK_MASK))
9559 return false;
9560
9561 _nohz_idle_balance(this_rq, flags, idle);
9562
b7031a02 9563 return true;
83cd4fe2 9564}
31e77c93
VG
9565
9566static void nohz_newidle_balance(struct rq *this_rq)
9567{
9568 int this_cpu = this_rq->cpu;
9569
9570 /*
9571 * This CPU doesn't want to be disturbed by scheduler
9572 * housekeeping
9573 */
9574 if (!housekeeping_cpu(this_cpu, HK_FLAG_SCHED))
9575 return;
9576
9577 /* Will wake up very soon. No time for doing anything else*/
9578 if (this_rq->avg_idle < sysctl_sched_migration_cost)
9579 return;
9580
9581 /* Don't need to update blocked load of idle CPUs*/
9582 if (!READ_ONCE(nohz.has_blocked) ||
9583 time_before(jiffies, READ_ONCE(nohz.next_blocked)))
9584 return;
9585
9586 raw_spin_unlock(&this_rq->lock);
9587 /*
9588 * This CPU is going to be idle and blocked load of idle CPUs
9589 * need to be updated. Run the ilb locally as it is a good
9590 * candidate for ilb instead of waking up another idle CPU.
9591 * Kick an normal ilb if we failed to do the update.
9592 */
9593 if (!_nohz_idle_balance(this_rq, NOHZ_STATS_KICK, CPU_NEWLY_IDLE))
9594 kick_ilb(NOHZ_STATS_KICK);
9595 raw_spin_lock(&this_rq->lock);
9596}
9597
dd707247
PZ
9598#else /* !CONFIG_NO_HZ_COMMON */
9599static inline void nohz_balancer_kick(struct rq *rq) { }
9600
31e77c93 9601static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
b7031a02
PZ
9602{
9603 return false;
9604}
31e77c93
VG
9605
9606static inline void nohz_newidle_balance(struct rq *this_rq) { }
dd707247 9607#endif /* CONFIG_NO_HZ_COMMON */
83cd4fe2 9608
47ea5412
PZ
9609/*
9610 * idle_balance is called by schedule() if this_cpu is about to become
9611 * idle. Attempts to pull tasks from other CPUs.
9612 */
9613static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
9614{
9615 unsigned long next_balance = jiffies + HZ;
9616 int this_cpu = this_rq->cpu;
9617 struct sched_domain *sd;
9618 int pulled_task = 0;
9619 u64 curr_cost = 0;
9620
9621 /*
9622 * We must set idle_stamp _before_ calling idle_balance(), such that we
9623 * measure the duration of idle_balance() as idle time.
9624 */
9625 this_rq->idle_stamp = rq_clock(this_rq);
9626
9627 /*
9628 * Do not pull tasks towards !active CPUs...
9629 */
9630 if (!cpu_active(this_cpu))
9631 return 0;
9632
9633 /*
9634 * This is OK, because current is on_cpu, which avoids it being picked
9635 * for load-balance and preemption/IRQs are still disabled avoiding
9636 * further scheduler activity on it and we're being very careful to
9637 * re-start the picking loop.
9638 */
9639 rq_unpin_lock(this_rq, rf);
9640
9641 if (this_rq->avg_idle < sysctl_sched_migration_cost ||
e90c8fe1 9642 !READ_ONCE(this_rq->rd->overload)) {
31e77c93 9643
47ea5412
PZ
9644 rcu_read_lock();
9645 sd = rcu_dereference_check_sched_domain(this_rq->sd);
9646 if (sd)
9647 update_next_balance(sd, &next_balance);
9648 rcu_read_unlock();
9649
31e77c93
VG
9650 nohz_newidle_balance(this_rq);
9651
47ea5412
PZ
9652 goto out;
9653 }
9654
9655 raw_spin_unlock(&this_rq->lock);
9656
9657 update_blocked_averages(this_cpu);
9658 rcu_read_lock();
9659 for_each_domain(this_cpu, sd) {
9660 int continue_balancing = 1;
9661 u64 t0, domain_cost;
9662
9663 if (!(sd->flags & SD_LOAD_BALANCE))
9664 continue;
9665
9666 if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) {
9667 update_next_balance(sd, &next_balance);
9668 break;
9669 }
9670
9671 if (sd->flags & SD_BALANCE_NEWIDLE) {
9672 t0 = sched_clock_cpu(this_cpu);
9673
9674 pulled_task = load_balance(this_cpu, this_rq,
9675 sd, CPU_NEWLY_IDLE,
9676 &continue_balancing);
9677
9678 domain_cost = sched_clock_cpu(this_cpu) - t0;
9679 if (domain_cost > sd->max_newidle_lb_cost)
9680 sd->max_newidle_lb_cost = domain_cost;
9681
9682 curr_cost += domain_cost;
9683 }
9684
9685 update_next_balance(sd, &next_balance);
9686
9687 /*
9688 * Stop searching for tasks to pull if there are
9689 * now runnable tasks on this rq.
9690 */
9691 if (pulled_task || this_rq->nr_running > 0)
9692 break;
9693 }
9694 rcu_read_unlock();
9695
9696 raw_spin_lock(&this_rq->lock);
9697
9698 if (curr_cost > this_rq->max_idle_balance_cost)
9699 this_rq->max_idle_balance_cost = curr_cost;
9700
457be908 9701out:
47ea5412
PZ
9702 /*
9703 * While browsing the domains, we released the rq lock, a task could
9704 * have been enqueued in the meantime. Since we're not going idle,
9705 * pretend we pulled a task.
9706 */
9707 if (this_rq->cfs.h_nr_running && !pulled_task)
9708 pulled_task = 1;
9709
47ea5412
PZ
9710 /* Move the next balance forward */
9711 if (time_after(this_rq->next_balance, next_balance))
9712 this_rq->next_balance = next_balance;
9713
9714 /* Is there a task of a high priority class? */
9715 if (this_rq->nr_running != this_rq->cfs.h_nr_running)
9716 pulled_task = -1;
9717
9718 if (pulled_task)
9719 this_rq->idle_stamp = 0;
9720
9721 rq_repin_lock(this_rq, rf);
9722
9723 return pulled_task;
9724}
9725
83cd4fe2
VP
9726/*
9727 * run_rebalance_domains is triggered when needed from the scheduler tick.
9728 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
9729 */
0766f788 9730static __latent_entropy void run_rebalance_domains(struct softirq_action *h)
1e3c88bd 9731{
208cb16b 9732 struct rq *this_rq = this_rq();
6eb57e0d 9733 enum cpu_idle_type idle = this_rq->idle_balance ?
1e3c88bd
PZ
9734 CPU_IDLE : CPU_NOT_IDLE;
9735
1e3c88bd 9736 /*
97fb7a0a
IM
9737 * If this CPU has a pending nohz_balance_kick, then do the
9738 * balancing on behalf of the other idle CPUs whose ticks are
d4573c3e 9739 * stopped. Do nohz_idle_balance *before* rebalance_domains to
97fb7a0a 9740 * give the idle CPUs a chance to load balance. Else we may
d4573c3e
PM
9741 * load balance only within the local sched_domain hierarchy
9742 * and abort nohz_idle_balance altogether if we pull some load.
1e3c88bd 9743 */
b7031a02
PZ
9744 if (nohz_idle_balance(this_rq, idle))
9745 return;
9746
9747 /* normal load balance */
9748 update_blocked_averages(this_rq->cpu);
d4573c3e 9749 rebalance_domains(this_rq, idle);
1e3c88bd
PZ
9750}
9751
1e3c88bd
PZ
9752/*
9753 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
1e3c88bd 9754 */
7caff66f 9755void trigger_load_balance(struct rq *rq)
1e3c88bd 9756{
1e3c88bd 9757 /* Don't need to rebalance while attached to NULL domain */
c726099e
DL
9758 if (unlikely(on_null_domain(rq)))
9759 return;
9760
9761 if (time_after_eq(jiffies, rq->next_balance))
1e3c88bd 9762 raise_softirq(SCHED_SOFTIRQ);
4550487a
PZ
9763
9764 nohz_balancer_kick(rq);
1e3c88bd
PZ
9765}
9766
0bcdcf28
CE
9767static void rq_online_fair(struct rq *rq)
9768{
9769 update_sysctl();
0e59bdae
KT
9770
9771 update_runtime_enabled(rq);
0bcdcf28
CE
9772}
9773
9774static void rq_offline_fair(struct rq *rq)
9775{
9776 update_sysctl();
a4c96ae3
PB
9777
9778 /* Ensure any throttled groups are reachable by pick_next_task */
9779 unthrottle_offline_cfs_rqs(rq);
0bcdcf28
CE
9780}
9781
55e12e5e 9782#endif /* CONFIG_SMP */
e1d1484f 9783
bf0f6f24 9784/*
d84b3131
FW
9785 * scheduler tick hitting a task of our scheduling class.
9786 *
9787 * NOTE: This function can be called remotely by the tick offload that
9788 * goes along full dynticks. Therefore no local assumption can be made
9789 * and everything must be accessed through the @rq and @curr passed in
9790 * parameters.
bf0f6f24 9791 */
8f4d37ec 9792static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
bf0f6f24
IM
9793{
9794 struct cfs_rq *cfs_rq;
9795 struct sched_entity *se = &curr->se;
9796
9797 for_each_sched_entity(se) {
9798 cfs_rq = cfs_rq_of(se);
8f4d37ec 9799 entity_tick(cfs_rq, se, queued);
bf0f6f24 9800 }
18bf2805 9801
b52da86e 9802 if (static_branch_unlikely(&sched_numa_balancing))
cbee9f88 9803 task_tick_numa(rq, curr);
3b1baa64
MR
9804
9805 update_misfit_status(curr, rq);
bf0f6f24
IM
9806}
9807
9808/*
cd29fe6f
PZ
9809 * called on fork with the child task as argument from the parent's context
9810 * - child not yet on the tasklist
9811 * - preemption disabled
bf0f6f24 9812 */
cd29fe6f 9813static void task_fork_fair(struct task_struct *p)
bf0f6f24 9814{
4fc420c9
DN
9815 struct cfs_rq *cfs_rq;
9816 struct sched_entity *se = &p->se, *curr;
cd29fe6f 9817 struct rq *rq = this_rq();
8a8c69c3 9818 struct rq_flags rf;
bf0f6f24 9819
8a8c69c3 9820 rq_lock(rq, &rf);
861d034e
PZ
9821 update_rq_clock(rq);
9822
4fc420c9
DN
9823 cfs_rq = task_cfs_rq(current);
9824 curr = cfs_rq->curr;
e210bffd
PZ
9825 if (curr) {
9826 update_curr(cfs_rq);
b5d9d734 9827 se->vruntime = curr->vruntime;
e210bffd 9828 }
aeb73b04 9829 place_entity(cfs_rq, se, 1);
4d78e7b6 9830
cd29fe6f 9831 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
87fefa38 9832 /*
edcb60a3
IM
9833 * Upon rescheduling, sched_class::put_prev_task() will place
9834 * 'current' within the tree based on its new key value.
9835 */
4d78e7b6 9836 swap(curr->vruntime, se->vruntime);
8875125e 9837 resched_curr(rq);
4d78e7b6 9838 }
bf0f6f24 9839
88ec22d3 9840 se->vruntime -= cfs_rq->min_vruntime;
8a8c69c3 9841 rq_unlock(rq, &rf);
bf0f6f24
IM
9842}
9843
cb469845
SR
9844/*
9845 * Priority of the task has changed. Check to see if we preempt
9846 * the current task.
9847 */
da7a735e
PZ
9848static void
9849prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
cb469845 9850{
da0c1e65 9851 if (!task_on_rq_queued(p))
da7a735e
PZ
9852 return;
9853
cb469845
SR
9854 /*
9855 * Reschedule if we are currently running on this runqueue and
9856 * our priority decreased, or if we are not currently running on
9857 * this runqueue and our priority is higher than the current's
9858 */
da7a735e 9859 if (rq->curr == p) {
cb469845 9860 if (p->prio > oldprio)
8875125e 9861 resched_curr(rq);
cb469845 9862 } else
15afe09b 9863 check_preempt_curr(rq, p, 0);
cb469845
SR
9864}
9865
daa59407 9866static inline bool vruntime_normalized(struct task_struct *p)
da7a735e
PZ
9867{
9868 struct sched_entity *se = &p->se;
da7a735e
PZ
9869
9870 /*
daa59407
BP
9871 * In both the TASK_ON_RQ_QUEUED and TASK_ON_RQ_MIGRATING cases,
9872 * the dequeue_entity(.flags=0) will already have normalized the
9873 * vruntime.
9874 */
9875 if (p->on_rq)
9876 return true;
9877
9878 /*
9879 * When !on_rq, vruntime of the task has usually NOT been normalized.
9880 * But there are some cases where it has already been normalized:
da7a735e 9881 *
daa59407
BP
9882 * - A forked child which is waiting for being woken up by
9883 * wake_up_new_task().
9884 * - A task which has been woken up by try_to_wake_up() and
9885 * waiting for actually being woken up by sched_ttwu_pending().
da7a735e 9886 */
d0cdb3ce
SM
9887 if (!se->sum_exec_runtime ||
9888 (p->state == TASK_WAKING && p->sched_remote_wakeup))
daa59407
BP
9889 return true;
9890
9891 return false;
9892}
9893
09a43ace
VG
9894#ifdef CONFIG_FAIR_GROUP_SCHED
9895/*
9896 * Propagate the changes of the sched_entity across the tg tree to make it
9897 * visible to the root
9898 */
9899static void propagate_entity_cfs_rq(struct sched_entity *se)
9900{
9901 struct cfs_rq *cfs_rq;
9902
9903 /* Start to propagate at parent */
9904 se = se->parent;
9905
9906 for_each_sched_entity(se) {
9907 cfs_rq = cfs_rq_of(se);
9908
9909 if (cfs_rq_throttled(cfs_rq))
9910 break;
9911
88c0616e 9912 update_load_avg(cfs_rq, se, UPDATE_TG);
09a43ace
VG
9913 }
9914}
9915#else
9916static void propagate_entity_cfs_rq(struct sched_entity *se) { }
9917#endif
9918
df217913 9919static void detach_entity_cfs_rq(struct sched_entity *se)
daa59407 9920{
daa59407
BP
9921 struct cfs_rq *cfs_rq = cfs_rq_of(se);
9922
9d89c257 9923 /* Catch up with the cfs_rq and remove our load when we leave */
88c0616e 9924 update_load_avg(cfs_rq, se, 0);
a05e8c51 9925 detach_entity_load_avg(cfs_rq, se);
7c3edd2c 9926 update_tg_load_avg(cfs_rq, false);
09a43ace 9927 propagate_entity_cfs_rq(se);
da7a735e
PZ
9928}
9929
df217913 9930static void attach_entity_cfs_rq(struct sched_entity *se)
cb469845 9931{
daa59407 9932 struct cfs_rq *cfs_rq = cfs_rq_of(se);
7855a35a
BP
9933
9934#ifdef CONFIG_FAIR_GROUP_SCHED
eb7a59b2
M
9935 /*
9936 * Since the real-depth could have been changed (only FAIR
9937 * class maintain depth value), reset depth properly.
9938 */
9939 se->depth = se->parent ? se->parent->depth + 1 : 0;
9940#endif
7855a35a 9941
df217913 9942 /* Synchronize entity with its cfs_rq */
88c0616e 9943 update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD);
ea14b57e 9944 attach_entity_load_avg(cfs_rq, se, 0);
7c3edd2c 9945 update_tg_load_avg(cfs_rq, false);
09a43ace 9946 propagate_entity_cfs_rq(se);
df217913
VG
9947}
9948
9949static void detach_task_cfs_rq(struct task_struct *p)
9950{
9951 struct sched_entity *se = &p->se;
9952 struct cfs_rq *cfs_rq = cfs_rq_of(se);
9953
9954 if (!vruntime_normalized(p)) {
9955 /*
9956 * Fix up our vruntime so that the current sleep doesn't
9957 * cause 'unlimited' sleep bonus.
9958 */
9959 place_entity(cfs_rq, se, 0);
9960 se->vruntime -= cfs_rq->min_vruntime;
9961 }
9962
9963 detach_entity_cfs_rq(se);
9964}
9965
9966static void attach_task_cfs_rq(struct task_struct *p)
9967{
9968 struct sched_entity *se = &p->se;
9969 struct cfs_rq *cfs_rq = cfs_rq_of(se);
9970
9971 attach_entity_cfs_rq(se);
daa59407
BP
9972
9973 if (!vruntime_normalized(p))
9974 se->vruntime += cfs_rq->min_vruntime;
9975}
6efdb105 9976
daa59407
BP
9977static void switched_from_fair(struct rq *rq, struct task_struct *p)
9978{
9979 detach_task_cfs_rq(p);
9980}
9981
9982static void switched_to_fair(struct rq *rq, struct task_struct *p)
9983{
9984 attach_task_cfs_rq(p);
7855a35a 9985
daa59407 9986 if (task_on_rq_queued(p)) {
7855a35a 9987 /*
daa59407
BP
9988 * We were most likely switched from sched_rt, so
9989 * kick off the schedule if running, otherwise just see
9990 * if we can still preempt the current task.
7855a35a 9991 */
daa59407
BP
9992 if (rq->curr == p)
9993 resched_curr(rq);
9994 else
9995 check_preempt_curr(rq, p, 0);
7855a35a 9996 }
cb469845
SR
9997}
9998
83b699ed
SV
9999/* Account for a task changing its policy or group.
10000 *
10001 * This routine is mostly called to set cfs_rq->curr field when a task
10002 * migrates between groups/classes.
10003 */
10004static void set_curr_task_fair(struct rq *rq)
10005{
10006 struct sched_entity *se = &rq->curr->se;
10007
ec12cb7f
PT
10008 for_each_sched_entity(se) {
10009 struct cfs_rq *cfs_rq = cfs_rq_of(se);
10010
10011 set_next_entity(cfs_rq, se);
10012 /* ensure bandwidth has been allocated on our new cfs_rq */
10013 account_cfs_rq_runtime(cfs_rq, 0);
10014 }
83b699ed
SV
10015}
10016
029632fb
PZ
10017void init_cfs_rq(struct cfs_rq *cfs_rq)
10018{
bfb06889 10019 cfs_rq->tasks_timeline = RB_ROOT_CACHED;
029632fb
PZ
10020 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
10021#ifndef CONFIG_64BIT
10022 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
10023#endif
141965c7 10024#ifdef CONFIG_SMP
2a2f5d4e 10025 raw_spin_lock_init(&cfs_rq->removed.lock);
9ee474f5 10026#endif
029632fb
PZ
10027}
10028
810b3817 10029#ifdef CONFIG_FAIR_GROUP_SCHED
ea86cb4b
VG
10030static void task_set_group_fair(struct task_struct *p)
10031{
10032 struct sched_entity *se = &p->se;
10033
10034 set_task_rq(p, task_cpu(p));
10035 se->depth = se->parent ? se->parent->depth + 1 : 0;
10036}
10037
bc54da21 10038static void task_move_group_fair(struct task_struct *p)
810b3817 10039{
daa59407 10040 detach_task_cfs_rq(p);
b2b5ce02 10041 set_task_rq(p, task_cpu(p));
6efdb105
BP
10042
10043#ifdef CONFIG_SMP
10044 /* Tell se's cfs_rq has been changed -- migrated */
10045 p->se.avg.last_update_time = 0;
10046#endif
daa59407 10047 attach_task_cfs_rq(p);
810b3817 10048}
029632fb 10049
ea86cb4b
VG
10050static void task_change_group_fair(struct task_struct *p, int type)
10051{
10052 switch (type) {
10053 case TASK_SET_GROUP:
10054 task_set_group_fair(p);
10055 break;
10056
10057 case TASK_MOVE_GROUP:
10058 task_move_group_fair(p);
10059 break;
10060 }
10061}
10062
029632fb
PZ
10063void free_fair_sched_group(struct task_group *tg)
10064{
10065 int i;
10066
10067 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
10068
10069 for_each_possible_cpu(i) {
10070 if (tg->cfs_rq)
10071 kfree(tg->cfs_rq[i]);
6fe1f348 10072 if (tg->se)
029632fb
PZ
10073 kfree(tg->se[i]);
10074 }
10075
10076 kfree(tg->cfs_rq);
10077 kfree(tg->se);
10078}
10079
10080int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
10081{
029632fb 10082 struct sched_entity *se;
b7fa30c9 10083 struct cfs_rq *cfs_rq;
029632fb
PZ
10084 int i;
10085
6396bb22 10086 tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL);
029632fb
PZ
10087 if (!tg->cfs_rq)
10088 goto err;
6396bb22 10089 tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL);
029632fb
PZ
10090 if (!tg->se)
10091 goto err;
10092
10093 tg->shares = NICE_0_LOAD;
10094
10095 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
10096
10097 for_each_possible_cpu(i) {
10098 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
10099 GFP_KERNEL, cpu_to_node(i));
10100 if (!cfs_rq)
10101 goto err;
10102
10103 se = kzalloc_node(sizeof(struct sched_entity),
10104 GFP_KERNEL, cpu_to_node(i));
10105 if (!se)
10106 goto err_free_rq;
10107
10108 init_cfs_rq(cfs_rq);
10109 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
540247fb 10110 init_entity_runnable_average(se);
029632fb
PZ
10111 }
10112
10113 return 1;
10114
10115err_free_rq:
10116 kfree(cfs_rq);
10117err:
10118 return 0;
10119}
10120
8663e24d
PZ
10121void online_fair_sched_group(struct task_group *tg)
10122{
10123 struct sched_entity *se;
10124 struct rq *rq;
10125 int i;
10126
10127 for_each_possible_cpu(i) {
10128 rq = cpu_rq(i);
10129 se = tg->se[i];
10130
10131 raw_spin_lock_irq(&rq->lock);
4126bad6 10132 update_rq_clock(rq);
d0326691 10133 attach_entity_cfs_rq(se);
55e16d30 10134 sync_throttle(tg, i);
8663e24d
PZ
10135 raw_spin_unlock_irq(&rq->lock);
10136 }
10137}
10138
6fe1f348 10139void unregister_fair_sched_group(struct task_group *tg)
029632fb 10140{
029632fb 10141 unsigned long flags;
6fe1f348
PZ
10142 struct rq *rq;
10143 int cpu;
029632fb 10144
6fe1f348
PZ
10145 for_each_possible_cpu(cpu) {
10146 if (tg->se[cpu])
10147 remove_entity_load_avg(tg->se[cpu]);
029632fb 10148
6fe1f348
PZ
10149 /*
10150 * Only empty task groups can be destroyed; so we can speculatively
10151 * check on_list without danger of it being re-added.
10152 */
10153 if (!tg->cfs_rq[cpu]->on_list)
10154 continue;
10155
10156 rq = cpu_rq(cpu);
10157
10158 raw_spin_lock_irqsave(&rq->lock, flags);
10159 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
10160 raw_spin_unlock_irqrestore(&rq->lock, flags);
10161 }
029632fb
PZ
10162}
10163
10164void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
10165 struct sched_entity *se, int cpu,
10166 struct sched_entity *parent)
10167{
10168 struct rq *rq = cpu_rq(cpu);
10169
10170 cfs_rq->tg = tg;
10171 cfs_rq->rq = rq;
029632fb
PZ
10172 init_cfs_rq_runtime(cfs_rq);
10173
10174 tg->cfs_rq[cpu] = cfs_rq;
10175 tg->se[cpu] = se;
10176
10177 /* se could be NULL for root_task_group */
10178 if (!se)
10179 return;
10180
fed14d45 10181 if (!parent) {
029632fb 10182 se->cfs_rq = &rq->cfs;
fed14d45
PZ
10183 se->depth = 0;
10184 } else {
029632fb 10185 se->cfs_rq = parent->my_q;
fed14d45
PZ
10186 se->depth = parent->depth + 1;
10187 }
029632fb
PZ
10188
10189 se->my_q = cfs_rq;
0ac9b1c2
PT
10190 /* guarantee group entities always have weight */
10191 update_load_set(&se->load, NICE_0_LOAD);
029632fb
PZ
10192 se->parent = parent;
10193}
10194
10195static DEFINE_MUTEX(shares_mutex);
10196
10197int sched_group_set_shares(struct task_group *tg, unsigned long shares)
10198{
10199 int i;
029632fb
PZ
10200
10201 /*
10202 * We can't change the weight of the root cgroup.
10203 */
10204 if (!tg->se[0])
10205 return -EINVAL;
10206
10207 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
10208
10209 mutex_lock(&shares_mutex);
10210 if (tg->shares == shares)
10211 goto done;
10212
10213 tg->shares = shares;
10214 for_each_possible_cpu(i) {
10215 struct rq *rq = cpu_rq(i);
8a8c69c3
PZ
10216 struct sched_entity *se = tg->se[i];
10217 struct rq_flags rf;
029632fb 10218
029632fb 10219 /* Propagate contribution to hierarchy */
8a8c69c3 10220 rq_lock_irqsave(rq, &rf);
71b1da46 10221 update_rq_clock(rq);
89ee048f 10222 for_each_sched_entity(se) {
88c0616e 10223 update_load_avg(cfs_rq_of(se), se, UPDATE_TG);
1ea6c46a 10224 update_cfs_group(se);
89ee048f 10225 }
8a8c69c3 10226 rq_unlock_irqrestore(rq, &rf);
029632fb
PZ
10227 }
10228
10229done:
10230 mutex_unlock(&shares_mutex);
10231 return 0;
10232}
10233#else /* CONFIG_FAIR_GROUP_SCHED */
10234
10235void free_fair_sched_group(struct task_group *tg) { }
10236
10237int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
10238{
10239 return 1;
10240}
10241
8663e24d
PZ
10242void online_fair_sched_group(struct task_group *tg) { }
10243
6fe1f348 10244void unregister_fair_sched_group(struct task_group *tg) { }
029632fb
PZ
10245
10246#endif /* CONFIG_FAIR_GROUP_SCHED */
10247
810b3817 10248
6d686f45 10249static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
0d721cea
PW
10250{
10251 struct sched_entity *se = &task->se;
0d721cea
PW
10252 unsigned int rr_interval = 0;
10253
10254 /*
10255 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
10256 * idle runqueue:
10257 */
0d721cea 10258 if (rq->cfs.load.weight)
a59f4e07 10259 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
0d721cea
PW
10260
10261 return rr_interval;
10262}
10263
bf0f6f24
IM
10264/*
10265 * All the scheduling class methods:
10266 */
029632fb 10267const struct sched_class fair_sched_class = {
5522d5d5 10268 .next = &idle_sched_class,
bf0f6f24
IM
10269 .enqueue_task = enqueue_task_fair,
10270 .dequeue_task = dequeue_task_fair,
10271 .yield_task = yield_task_fair,
d95f4122 10272 .yield_to_task = yield_to_task_fair,
bf0f6f24 10273
2e09bf55 10274 .check_preempt_curr = check_preempt_wakeup,
bf0f6f24
IM
10275
10276 .pick_next_task = pick_next_task_fair,
10277 .put_prev_task = put_prev_task_fair,
10278
681f3e68 10279#ifdef CONFIG_SMP
4ce72a2c 10280 .select_task_rq = select_task_rq_fair,
0a74bef8 10281 .migrate_task_rq = migrate_task_rq_fair,
141965c7 10282
0bcdcf28
CE
10283 .rq_online = rq_online_fair,
10284 .rq_offline = rq_offline_fair,
88ec22d3 10285
12695578 10286 .task_dead = task_dead_fair,
c5b28038 10287 .set_cpus_allowed = set_cpus_allowed_common,
681f3e68 10288#endif
bf0f6f24 10289
83b699ed 10290 .set_curr_task = set_curr_task_fair,
bf0f6f24 10291 .task_tick = task_tick_fair,
cd29fe6f 10292 .task_fork = task_fork_fair,
cb469845
SR
10293
10294 .prio_changed = prio_changed_fair,
da7a735e 10295 .switched_from = switched_from_fair,
cb469845 10296 .switched_to = switched_to_fair,
810b3817 10297
0d721cea
PW
10298 .get_rr_interval = get_rr_interval_fair,
10299
6e998916
SG
10300 .update_curr = update_curr_fair,
10301
810b3817 10302#ifdef CONFIG_FAIR_GROUP_SCHED
ea86cb4b 10303 .task_change_group = task_change_group_fair,
810b3817 10304#endif
bf0f6f24
IM
10305};
10306
10307#ifdef CONFIG_SCHED_DEBUG
029632fb 10308void print_cfs_stats(struct seq_file *m, int cpu)
bf0f6f24 10309{
a9e7f654 10310 struct cfs_rq *cfs_rq, *pos;
bf0f6f24 10311
5973e5b9 10312 rcu_read_lock();
a9e7f654 10313 for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos)
5cef9eca 10314 print_cfs_rq(m, cpu, cfs_rq);
5973e5b9 10315 rcu_read_unlock();
bf0f6f24 10316}
397f2378
SD
10317
10318#ifdef CONFIG_NUMA_BALANCING
10319void show_numa_stats(struct task_struct *p, struct seq_file *m)
10320{
10321 int node;
10322 unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0;
10323
10324 for_each_online_node(node) {
10325 if (p->numa_faults) {
10326 tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)];
10327 tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)];
10328 }
10329 if (p->numa_group) {
10330 gsf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 0)],
10331 gpf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 1)];
10332 }
10333 print_numa_stats(m, node, tsf, tpf, gsf, gpf);
10334 }
10335}
10336#endif /* CONFIG_NUMA_BALANCING */
10337#endif /* CONFIG_SCHED_DEBUG */
029632fb
PZ
10338
10339__init void init_sched_fair_class(void)
10340{
10341#ifdef CONFIG_SMP
10342 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
10343
3451d024 10344#ifdef CONFIG_NO_HZ_COMMON
554cecaf 10345 nohz.next_balance = jiffies;
f643ea22 10346 nohz.next_blocked = jiffies;
029632fb 10347 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
029632fb
PZ
10348#endif
10349#endif /* SMP */
10350
10351}