Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64...
[linux-block.git] / kernel / sched / features.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
51e0304c
IM
2/*
3 * Only give sleepers 50% of their service deficit. This allows
4 * them to run sooner, but does not allow tons of sleepers to
5 * rip the spread apart.
6 */
f8b6d1cc 7SCHED_FEAT(GENTLE_FAIR_SLEEPERS, true)
e26af0e8 8
e26af0e8
PZ
9/*
10 * Place new tasks ahead so that they do not starve already running
11 * tasks
12 */
f8b6d1cc 13SCHED_FEAT(START_DEBIT, true)
e26af0e8 14
e26af0e8
PZ
15/*
16 * Prefer to schedule the task we woke last (assuming it failed
17 * wakeup-preemption), since its likely going to consume data we
18 * touched, increases cache locality.
19 */
f8b6d1cc 20SCHED_FEAT(NEXT_BUDDY, false)
e26af0e8
PZ
21
22/*
23 * Prefer to schedule the task that ran last (when we did
24 * wake-preempt) as that likely will touch the same data, increases
25 * cache locality.
26 */
f8b6d1cc 27SCHED_FEAT(LAST_BUDDY, true)
e26af0e8
PZ
28
29/*
3b03706f 30 * Consider buddies to be cache hot, decreases the likeliness of a
e26af0e8
PZ
31 * cache buddy being migrated away, increases cache locality.
32 */
f8b6d1cc 33SCHED_FEAT(CACHE_HOT_BUDDY, true)
e26af0e8 34
8ed92e51
IM
35/*
36 * Allow wakeup-time preemption of the current task:
37 */
38SCHED_FEAT(WAKEUP_PREEMPTION, true)
39
f8b6d1cc 40SCHED_FEAT(HRTICK, false)
e0ee463c 41SCHED_FEAT(HRTICK_DL, false)
f8b6d1cc 42SCHED_FEAT(DOUBLE_TICK, false)
e26af0e8 43
aa483808 44/*
5d4dfddd 45 * Decrement CPU capacity based on time not spent running tasks
aa483808 46 */
5d4dfddd 47SCHED_FEAT(NONTASK_CAPACITY, true)
317f3941
PZ
48
49/*
50 * Queue remote wakeups on the target CPU and process them
51 * using the scheduler IPI. Reduces rq->lock contention/bounces.
52 */
f8b6d1cc 53SCHED_FEAT(TTWU_QUEUE, true)
e3589f6c 54
4c77b18c
PZ
55/*
56 * When doing wakeups, attempt to limit superfluous scans of the LLC domain.
57 */
1ad3aaf3 58SCHED_FEAT(SIS_PROP, true)
4c77b18c 59
26ae58d2
PZ
60/*
61 * Issue a WARN when we do multiple update_rq_clock() calls
62 * in a single rq->lock section. Default disabled because the
63 * annotations are not complete.
64 */
65SCHED_FEAT(WARN_DOUBLE_CLOCK, false)
66
b6366f04
SR
67#ifdef HAVE_RT_PUSH_IPI
68/*
69 * In order to avoid a thundering herd attack of CPUs that are
70 * lowering their priorities at the same time, and there being
71 * a single CPU that has an RT task that can migrate and is waiting
72 * to run, where the other CPUs will try to take that CPUs
73 * rq lock and possibly create a large contention, sending an
74 * IPI to that CPU and let that CPU push the RT task to where
75 * it should go may be a better scenario.
76 */
77SCHED_FEAT(RT_PUSH_IPI, true)
78#endif
79
2586af1a 80SCHED_FEAT(RT_RUNTIME_SHARE, false)
eb95308e 81SCHED_FEAT(LB_MIN, false)
a9280514
PZ
82SCHED_FEAT(ATTACH_AGE_LOAD, true)
83
d153b153 84SCHED_FEAT(WA_IDLE, true)
f2cdd9cc
PZ
85SCHED_FEAT(WA_WEIGHT, true)
86SCHED_FEAT(WA_BIAS, true)
7f65ea42
PB
87
88/*
89 * UtilEstimation. Use estimated CPU utilization.
90 */
d519329f 91SCHED_FEAT(UTIL_EST, true)
b8c96361 92SCHED_FEAT(UTIL_EST_FASTUP, true)
0c2de3f0 93
c006fac5
PT
94SCHED_FEAT(LATENCY_WARN, false)
95
0c2de3f0
PZ
96SCHED_FEAT(ALT_PERIOD, true)
97SCHED_FEAT(BASE_SLICE, true)