Commit | Line | Data |
---|---|---|
aab03e05 DF |
1 | /* |
2 | * Deadline Scheduling Class (SCHED_DEADLINE) | |
3 | * | |
4 | * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS). | |
5 | * | |
6 | * Tasks that periodically executes their instances for less than their | |
7 | * runtime won't miss any of their deadlines. | |
8 | * Tasks that are not periodic or sporadic or that tries to execute more | |
9 | * than their reserved bandwidth will be slowed down (and may potentially | |
10 | * miss some of their deadlines), and won't affect any other task. | |
11 | * | |
12 | * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>, | |
1baca4ce | 13 | * Juri Lelli <juri.lelli@gmail.com>, |
aab03e05 DF |
14 | * Michael Trimarchi <michael@amarulasolutions.com>, |
15 | * Fabio Checconi <fchecconi@gmail.com> | |
16 | */ | |
17 | #include "sched.h" | |
18 | ||
aab03e05 DF |
19 | static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se) |
20 | { | |
21 | return container_of(dl_se, struct task_struct, dl); | |
22 | } | |
23 | ||
24 | static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq) | |
25 | { | |
26 | return container_of(dl_rq, struct rq, dl); | |
27 | } | |
28 | ||
29 | static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se) | |
30 | { | |
31 | struct task_struct *p = dl_task_of(dl_se); | |
32 | struct rq *rq = task_rq(p); | |
33 | ||
34 | return &rq->dl; | |
35 | } | |
36 | ||
37 | static inline int on_dl_rq(struct sched_dl_entity *dl_se) | |
38 | { | |
39 | return !RB_EMPTY_NODE(&dl_se->rb_node); | |
40 | } | |
41 | ||
42 | static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq) | |
43 | { | |
44 | struct sched_dl_entity *dl_se = &p->dl; | |
45 | ||
46 | return dl_rq->rb_leftmost == &dl_se->rb_node; | |
47 | } | |
48 | ||
49 | void init_dl_rq(struct dl_rq *dl_rq, struct rq *rq) | |
50 | { | |
51 | dl_rq->rb_root = RB_ROOT; | |
1baca4ce JL |
52 | |
53 | #ifdef CONFIG_SMP | |
54 | /* zero means no -deadline tasks */ | |
55 | dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0; | |
56 | ||
57 | dl_rq->dl_nr_migratory = 0; | |
58 | dl_rq->overloaded = 0; | |
59 | dl_rq->pushable_dl_tasks_root = RB_ROOT; | |
60 | #endif | |
61 | } | |
62 | ||
63 | #ifdef CONFIG_SMP | |
64 | ||
65 | static inline int dl_overloaded(struct rq *rq) | |
66 | { | |
67 | return atomic_read(&rq->rd->dlo_count); | |
68 | } | |
69 | ||
70 | static inline void dl_set_overload(struct rq *rq) | |
71 | { | |
72 | if (!rq->online) | |
73 | return; | |
74 | ||
75 | cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask); | |
76 | /* | |
77 | * Must be visible before the overload count is | |
78 | * set (as in sched_rt.c). | |
79 | * | |
80 | * Matched by the barrier in pull_dl_task(). | |
81 | */ | |
82 | smp_wmb(); | |
83 | atomic_inc(&rq->rd->dlo_count); | |
84 | } | |
85 | ||
86 | static inline void dl_clear_overload(struct rq *rq) | |
87 | { | |
88 | if (!rq->online) | |
89 | return; | |
90 | ||
91 | atomic_dec(&rq->rd->dlo_count); | |
92 | cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask); | |
93 | } | |
94 | ||
95 | static void update_dl_migration(struct dl_rq *dl_rq) | |
96 | { | |
97 | if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_total > 1) { | |
98 | if (!dl_rq->overloaded) { | |
99 | dl_set_overload(rq_of_dl_rq(dl_rq)); | |
100 | dl_rq->overloaded = 1; | |
101 | } | |
102 | } else if (dl_rq->overloaded) { | |
103 | dl_clear_overload(rq_of_dl_rq(dl_rq)); | |
104 | dl_rq->overloaded = 0; | |
105 | } | |
106 | } | |
107 | ||
108 | static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) | |
109 | { | |
110 | struct task_struct *p = dl_task_of(dl_se); | |
111 | dl_rq = &rq_of_dl_rq(dl_rq)->dl; | |
112 | ||
113 | dl_rq->dl_nr_total++; | |
114 | if (p->nr_cpus_allowed > 1) | |
115 | dl_rq->dl_nr_migratory++; | |
116 | ||
117 | update_dl_migration(dl_rq); | |
118 | } | |
119 | ||
120 | static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) | |
121 | { | |
122 | struct task_struct *p = dl_task_of(dl_se); | |
123 | dl_rq = &rq_of_dl_rq(dl_rq)->dl; | |
124 | ||
125 | dl_rq->dl_nr_total--; | |
126 | if (p->nr_cpus_allowed > 1) | |
127 | dl_rq->dl_nr_migratory--; | |
128 | ||
129 | update_dl_migration(dl_rq); | |
130 | } | |
131 | ||
132 | /* | |
133 | * The list of pushable -deadline task is not a plist, like in | |
134 | * sched_rt.c, it is an rb-tree with tasks ordered by deadline. | |
135 | */ | |
136 | static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) | |
137 | { | |
138 | struct dl_rq *dl_rq = &rq->dl; | |
139 | struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node; | |
140 | struct rb_node *parent = NULL; | |
141 | struct task_struct *entry; | |
142 | int leftmost = 1; | |
143 | ||
144 | BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks)); | |
145 | ||
146 | while (*link) { | |
147 | parent = *link; | |
148 | entry = rb_entry(parent, struct task_struct, | |
149 | pushable_dl_tasks); | |
150 | if (dl_entity_preempt(&p->dl, &entry->dl)) | |
151 | link = &parent->rb_left; | |
152 | else { | |
153 | link = &parent->rb_right; | |
154 | leftmost = 0; | |
155 | } | |
156 | } | |
157 | ||
158 | if (leftmost) | |
159 | dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks; | |
160 | ||
161 | rb_link_node(&p->pushable_dl_tasks, parent, link); | |
162 | rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root); | |
aab03e05 DF |
163 | } |
164 | ||
1baca4ce JL |
165 | static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) |
166 | { | |
167 | struct dl_rq *dl_rq = &rq->dl; | |
168 | ||
169 | if (RB_EMPTY_NODE(&p->pushable_dl_tasks)) | |
170 | return; | |
171 | ||
172 | if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) { | |
173 | struct rb_node *next_node; | |
174 | ||
175 | next_node = rb_next(&p->pushable_dl_tasks); | |
176 | dl_rq->pushable_dl_tasks_leftmost = next_node; | |
177 | } | |
178 | ||
179 | rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root); | |
180 | RB_CLEAR_NODE(&p->pushable_dl_tasks); | |
181 | } | |
182 | ||
183 | static inline int has_pushable_dl_tasks(struct rq *rq) | |
184 | { | |
185 | return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root); | |
186 | } | |
187 | ||
188 | static int push_dl_task(struct rq *rq); | |
189 | ||
190 | #else | |
191 | ||
192 | static inline | |
193 | void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) | |
194 | { | |
195 | } | |
196 | ||
197 | static inline | |
198 | void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) | |
199 | { | |
200 | } | |
201 | ||
202 | static inline | |
203 | void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) | |
204 | { | |
205 | } | |
206 | ||
207 | static inline | |
208 | void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) | |
209 | { | |
210 | } | |
211 | ||
212 | #endif /* CONFIG_SMP */ | |
213 | ||
aab03e05 DF |
214 | static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags); |
215 | static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags); | |
216 | static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, | |
217 | int flags); | |
218 | ||
219 | /* | |
220 | * We are being explicitly informed that a new instance is starting, | |
221 | * and this means that: | |
222 | * - the absolute deadline of the entity has to be placed at | |
223 | * current time + relative deadline; | |
224 | * - the runtime of the entity has to be set to the maximum value. | |
225 | * | |
226 | * The capability of specifying such event is useful whenever a -deadline | |
227 | * entity wants to (try to!) synchronize its behaviour with the scheduler's | |
228 | * one, and to (try to!) reconcile itself with its own scheduling | |
229 | * parameters. | |
230 | */ | |
2d3d891d DF |
231 | static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se, |
232 | struct sched_dl_entity *pi_se) | |
aab03e05 DF |
233 | { |
234 | struct dl_rq *dl_rq = dl_rq_of_se(dl_se); | |
235 | struct rq *rq = rq_of_dl_rq(dl_rq); | |
236 | ||
237 | WARN_ON(!dl_se->dl_new || dl_se->dl_throttled); | |
238 | ||
239 | /* | |
240 | * We use the regular wall clock time to set deadlines in the | |
241 | * future; in fact, we must consider execution overheads (time | |
242 | * spent on hardirq context, etc.). | |
243 | */ | |
2d3d891d DF |
244 | dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; |
245 | dl_se->runtime = pi_se->dl_runtime; | |
aab03e05 DF |
246 | dl_se->dl_new = 0; |
247 | } | |
248 | ||
249 | /* | |
250 | * Pure Earliest Deadline First (EDF) scheduling does not deal with the | |
251 | * possibility of a entity lasting more than what it declared, and thus | |
252 | * exhausting its runtime. | |
253 | * | |
254 | * Here we are interested in making runtime overrun possible, but we do | |
255 | * not want a entity which is misbehaving to affect the scheduling of all | |
256 | * other entities. | |
257 | * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS) | |
258 | * is used, in order to confine each entity within its own bandwidth. | |
259 | * | |
260 | * This function deals exactly with that, and ensures that when the runtime | |
261 | * of a entity is replenished, its deadline is also postponed. That ensures | |
262 | * the overrunning entity can't interfere with other entity in the system and | |
263 | * can't make them miss their deadlines. Reasons why this kind of overruns | |
264 | * could happen are, typically, a entity voluntarily trying to overcome its | |
265 | * runtime, or it just underestimated it during sched_setscheduler_ex(). | |
266 | */ | |
2d3d891d DF |
267 | static void replenish_dl_entity(struct sched_dl_entity *dl_se, |
268 | struct sched_dl_entity *pi_se) | |
aab03e05 DF |
269 | { |
270 | struct dl_rq *dl_rq = dl_rq_of_se(dl_se); | |
271 | struct rq *rq = rq_of_dl_rq(dl_rq); | |
272 | ||
2d3d891d DF |
273 | BUG_ON(pi_se->dl_runtime <= 0); |
274 | ||
275 | /* | |
276 | * This could be the case for a !-dl task that is boosted. | |
277 | * Just go with full inherited parameters. | |
278 | */ | |
279 | if (dl_se->dl_deadline == 0) { | |
280 | dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; | |
281 | dl_se->runtime = pi_se->dl_runtime; | |
282 | } | |
283 | ||
aab03e05 DF |
284 | /* |
285 | * We keep moving the deadline away until we get some | |
286 | * available runtime for the entity. This ensures correct | |
287 | * handling of situations where the runtime overrun is | |
288 | * arbitrary large. | |
289 | */ | |
290 | while (dl_se->runtime <= 0) { | |
2d3d891d DF |
291 | dl_se->deadline += pi_se->dl_period; |
292 | dl_se->runtime += pi_se->dl_runtime; | |
aab03e05 DF |
293 | } |
294 | ||
295 | /* | |
296 | * At this point, the deadline really should be "in | |
297 | * the future" with respect to rq->clock. If it's | |
298 | * not, we are, for some reason, lagging too much! | |
299 | * Anyway, after having warn userspace abut that, | |
300 | * we still try to keep the things running by | |
301 | * resetting the deadline and the budget of the | |
302 | * entity. | |
303 | */ | |
304 | if (dl_time_before(dl_se->deadline, rq_clock(rq))) { | |
305 | static bool lag_once = false; | |
306 | ||
307 | if (!lag_once) { | |
308 | lag_once = true; | |
309 | printk_sched("sched: DL replenish lagged to much\n"); | |
310 | } | |
2d3d891d DF |
311 | dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; |
312 | dl_se->runtime = pi_se->dl_runtime; | |
aab03e05 DF |
313 | } |
314 | } | |
315 | ||
316 | /* | |
317 | * Here we check if --at time t-- an entity (which is probably being | |
318 | * [re]activated or, in general, enqueued) can use its remaining runtime | |
319 | * and its current deadline _without_ exceeding the bandwidth it is | |
320 | * assigned (function returns true if it can't). We are in fact applying | |
321 | * one of the CBS rules: when a task wakes up, if the residual runtime | |
322 | * over residual deadline fits within the allocated bandwidth, then we | |
323 | * can keep the current (absolute) deadline and residual budget without | |
324 | * disrupting the schedulability of the system. Otherwise, we should | |
325 | * refill the runtime and set the deadline a period in the future, | |
326 | * because keeping the current (absolute) deadline of the task would | |
327 | * result in breaking guarantees promised to other tasks. | |
328 | * | |
329 | * This function returns true if: | |
330 | * | |
755378a4 | 331 | * runtime / (deadline - t) > dl_runtime / dl_period , |
aab03e05 DF |
332 | * |
333 | * IOW we can't recycle current parameters. | |
755378a4 HG |
334 | * |
335 | * Notice that the bandwidth check is done against the period. For | |
336 | * task with deadline equal to period this is the same of using | |
337 | * dl_deadline instead of dl_period in the equation above. | |
aab03e05 | 338 | */ |
2d3d891d DF |
339 | static bool dl_entity_overflow(struct sched_dl_entity *dl_se, |
340 | struct sched_dl_entity *pi_se, u64 t) | |
aab03e05 DF |
341 | { |
342 | u64 left, right; | |
343 | ||
344 | /* | |
345 | * left and right are the two sides of the equation above, | |
346 | * after a bit of shuffling to use multiplications instead | |
347 | * of divisions. | |
348 | * | |
349 | * Note that none of the time values involved in the two | |
350 | * multiplications are absolute: dl_deadline and dl_runtime | |
351 | * are the relative deadline and the maximum runtime of each | |
352 | * instance, runtime is the runtime left for the last instance | |
353 | * and (deadline - t), since t is rq->clock, is the time left | |
354 | * to the (absolute) deadline. Even if overflowing the u64 type | |
355 | * is very unlikely to occur in both cases, here we scale down | |
356 | * as we want to avoid that risk at all. Scaling down by 10 | |
357 | * means that we reduce granularity to 1us. We are fine with it, | |
358 | * since this is only a true/false check and, anyway, thinking | |
359 | * of anything below microseconds resolution is actually fiction | |
360 | * (but still we want to give the user that illusion >;). | |
361 | */ | |
2d3d891d DF |
362 | left = (pi_se->dl_period >> 10) * (dl_se->runtime >> 10); |
363 | right = ((dl_se->deadline - t) >> 10) * (pi_se->dl_runtime >> 10); | |
aab03e05 DF |
364 | |
365 | return dl_time_before(right, left); | |
366 | } | |
367 | ||
368 | /* | |
369 | * When a -deadline entity is queued back on the runqueue, its runtime and | |
370 | * deadline might need updating. | |
371 | * | |
372 | * The policy here is that we update the deadline of the entity only if: | |
373 | * - the current deadline is in the past, | |
374 | * - using the remaining runtime with the current deadline would make | |
375 | * the entity exceed its bandwidth. | |
376 | */ | |
2d3d891d DF |
377 | static void update_dl_entity(struct sched_dl_entity *dl_se, |
378 | struct sched_dl_entity *pi_se) | |
aab03e05 DF |
379 | { |
380 | struct dl_rq *dl_rq = dl_rq_of_se(dl_se); | |
381 | struct rq *rq = rq_of_dl_rq(dl_rq); | |
382 | ||
383 | /* | |
384 | * The arrival of a new instance needs special treatment, i.e., | |
385 | * the actual scheduling parameters have to be "renewed". | |
386 | */ | |
387 | if (dl_se->dl_new) { | |
2d3d891d | 388 | setup_new_dl_entity(dl_se, pi_se); |
aab03e05 DF |
389 | return; |
390 | } | |
391 | ||
392 | if (dl_time_before(dl_se->deadline, rq_clock(rq)) || | |
2d3d891d DF |
393 | dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) { |
394 | dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; | |
395 | dl_se->runtime = pi_se->dl_runtime; | |
aab03e05 DF |
396 | } |
397 | } | |
398 | ||
399 | /* | |
400 | * If the entity depleted all its runtime, and if we want it to sleep | |
401 | * while waiting for some new execution time to become available, we | |
402 | * set the bandwidth enforcement timer to the replenishment instant | |
403 | * and try to activate it. | |
404 | * | |
405 | * Notice that it is important for the caller to know if the timer | |
406 | * actually started or not (i.e., the replenishment instant is in | |
407 | * the future or in the past). | |
408 | */ | |
2d3d891d | 409 | static int start_dl_timer(struct sched_dl_entity *dl_se, bool boosted) |
aab03e05 DF |
410 | { |
411 | struct dl_rq *dl_rq = dl_rq_of_se(dl_se); | |
412 | struct rq *rq = rq_of_dl_rq(dl_rq); | |
413 | ktime_t now, act; | |
414 | ktime_t soft, hard; | |
415 | unsigned long range; | |
416 | s64 delta; | |
417 | ||
2d3d891d DF |
418 | if (boosted) |
419 | return 0; | |
aab03e05 DF |
420 | /* |
421 | * We want the timer to fire at the deadline, but considering | |
422 | * that it is actually coming from rq->clock and not from | |
423 | * hrtimer's time base reading. | |
424 | */ | |
425 | act = ns_to_ktime(dl_se->deadline); | |
426 | now = hrtimer_cb_get_time(&dl_se->dl_timer); | |
427 | delta = ktime_to_ns(now) - rq_clock(rq); | |
428 | act = ktime_add_ns(act, delta); | |
429 | ||
430 | /* | |
431 | * If the expiry time already passed, e.g., because the value | |
432 | * chosen as the deadline is too small, don't even try to | |
433 | * start the timer in the past! | |
434 | */ | |
435 | if (ktime_us_delta(act, now) < 0) | |
436 | return 0; | |
437 | ||
438 | hrtimer_set_expires(&dl_se->dl_timer, act); | |
439 | ||
440 | soft = hrtimer_get_softexpires(&dl_se->dl_timer); | |
441 | hard = hrtimer_get_expires(&dl_se->dl_timer); | |
442 | range = ktime_to_ns(ktime_sub(hard, soft)); | |
443 | __hrtimer_start_range_ns(&dl_se->dl_timer, soft, | |
444 | range, HRTIMER_MODE_ABS, 0); | |
445 | ||
446 | return hrtimer_active(&dl_se->dl_timer); | |
447 | } | |
448 | ||
449 | /* | |
450 | * This is the bandwidth enforcement timer callback. If here, we know | |
451 | * a task is not on its dl_rq, since the fact that the timer was running | |
452 | * means the task is throttled and needs a runtime replenishment. | |
453 | * | |
454 | * However, what we actually do depends on the fact the task is active, | |
455 | * (it is on its rq) or has been removed from there by a call to | |
456 | * dequeue_task_dl(). In the former case we must issue the runtime | |
457 | * replenishment and add the task back to the dl_rq; in the latter, we just | |
458 | * do nothing but clearing dl_throttled, so that runtime and deadline | |
459 | * updating (and the queueing back to dl_rq) will be done by the | |
460 | * next call to enqueue_task_dl(). | |
461 | */ | |
462 | static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) | |
463 | { | |
464 | struct sched_dl_entity *dl_se = container_of(timer, | |
465 | struct sched_dl_entity, | |
466 | dl_timer); | |
467 | struct task_struct *p = dl_task_of(dl_se); | |
468 | struct rq *rq = task_rq(p); | |
469 | raw_spin_lock(&rq->lock); | |
470 | ||
471 | /* | |
472 | * We need to take care of a possible races here. In fact, the | |
473 | * task might have changed its scheduling policy to something | |
474 | * different from SCHED_DEADLINE or changed its reservation | |
475 | * parameters (through sched_setscheduler()). | |
476 | */ | |
477 | if (!dl_task(p) || dl_se->dl_new) | |
478 | goto unlock; | |
479 | ||
480 | sched_clock_tick(); | |
481 | update_rq_clock(rq); | |
482 | dl_se->dl_throttled = 0; | |
483 | if (p->on_rq) { | |
484 | enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); | |
485 | if (task_has_dl_policy(rq->curr)) | |
486 | check_preempt_curr_dl(rq, p, 0); | |
487 | else | |
488 | resched_task(rq->curr); | |
1baca4ce JL |
489 | #ifdef CONFIG_SMP |
490 | /* | |
491 | * Queueing this task back might have overloaded rq, | |
492 | * check if we need to kick someone away. | |
493 | */ | |
494 | if (has_pushable_dl_tasks(rq)) | |
495 | push_dl_task(rq); | |
496 | #endif | |
aab03e05 DF |
497 | } |
498 | unlock: | |
499 | raw_spin_unlock(&rq->lock); | |
500 | ||
501 | return HRTIMER_NORESTART; | |
502 | } | |
503 | ||
504 | void init_dl_task_timer(struct sched_dl_entity *dl_se) | |
505 | { | |
506 | struct hrtimer *timer = &dl_se->dl_timer; | |
507 | ||
508 | if (hrtimer_active(timer)) { | |
509 | hrtimer_try_to_cancel(timer); | |
510 | return; | |
511 | } | |
512 | ||
513 | hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | |
514 | timer->function = dl_task_timer; | |
515 | } | |
516 | ||
517 | static | |
518 | int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se) | |
519 | { | |
520 | int dmiss = dl_time_before(dl_se->deadline, rq_clock(rq)); | |
521 | int rorun = dl_se->runtime <= 0; | |
522 | ||
523 | if (!rorun && !dmiss) | |
524 | return 0; | |
525 | ||
526 | /* | |
527 | * If we are beyond our current deadline and we are still | |
528 | * executing, then we have already used some of the runtime of | |
529 | * the next instance. Thus, if we do not account that, we are | |
530 | * stealing bandwidth from the system at each deadline miss! | |
531 | */ | |
532 | if (dmiss) { | |
533 | dl_se->runtime = rorun ? dl_se->runtime : 0; | |
534 | dl_se->runtime -= rq_clock(rq) - dl_se->deadline; | |
535 | } | |
536 | ||
537 | return 1; | |
538 | } | |
539 | ||
540 | /* | |
541 | * Update the current task's runtime statistics (provided it is still | |
542 | * a -deadline task and has not been removed from the dl_rq). | |
543 | */ | |
544 | static void update_curr_dl(struct rq *rq) | |
545 | { | |
546 | struct task_struct *curr = rq->curr; | |
547 | struct sched_dl_entity *dl_se = &curr->dl; | |
548 | u64 delta_exec; | |
549 | ||
550 | if (!dl_task(curr) || !on_dl_rq(dl_se)) | |
551 | return; | |
552 | ||
553 | /* | |
554 | * Consumed budget is computed considering the time as | |
555 | * observed by schedulable tasks (excluding time spent | |
556 | * in hardirq context, etc.). Deadlines are instead | |
557 | * computed using hard walltime. This seems to be the more | |
558 | * natural solution, but the full ramifications of this | |
559 | * approach need further study. | |
560 | */ | |
561 | delta_exec = rq_clock_task(rq) - curr->se.exec_start; | |
562 | if (unlikely((s64)delta_exec < 0)) | |
563 | delta_exec = 0; | |
564 | ||
565 | schedstat_set(curr->se.statistics.exec_max, | |
566 | max(curr->se.statistics.exec_max, delta_exec)); | |
567 | ||
568 | curr->se.sum_exec_runtime += delta_exec; | |
569 | account_group_exec_runtime(curr, delta_exec); | |
570 | ||
571 | curr->se.exec_start = rq_clock_task(rq); | |
572 | cpuacct_charge(curr, delta_exec); | |
573 | ||
239be4a9 DF |
574 | sched_rt_avg_update(rq, delta_exec); |
575 | ||
aab03e05 DF |
576 | dl_se->runtime -= delta_exec; |
577 | if (dl_runtime_exceeded(rq, dl_se)) { | |
578 | __dequeue_task_dl(rq, curr, 0); | |
2d3d891d | 579 | if (likely(start_dl_timer(dl_se, curr->dl.dl_boosted))) |
aab03e05 DF |
580 | dl_se->dl_throttled = 1; |
581 | else | |
582 | enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH); | |
583 | ||
584 | if (!is_leftmost(curr, &rq->dl)) | |
585 | resched_task(curr); | |
586 | } | |
587 | } | |
588 | ||
1baca4ce JL |
589 | #ifdef CONFIG_SMP |
590 | ||
591 | static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu); | |
592 | ||
593 | static inline u64 next_deadline(struct rq *rq) | |
594 | { | |
595 | struct task_struct *next = pick_next_earliest_dl_task(rq, rq->cpu); | |
596 | ||
597 | if (next && dl_prio(next->prio)) | |
598 | return next->dl.deadline; | |
599 | else | |
600 | return 0; | |
601 | } | |
602 | ||
603 | static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) | |
604 | { | |
605 | struct rq *rq = rq_of_dl_rq(dl_rq); | |
606 | ||
607 | if (dl_rq->earliest_dl.curr == 0 || | |
608 | dl_time_before(deadline, dl_rq->earliest_dl.curr)) { | |
609 | /* | |
610 | * If the dl_rq had no -deadline tasks, or if the new task | |
611 | * has shorter deadline than the current one on dl_rq, we | |
612 | * know that the previous earliest becomes our next earliest, | |
613 | * as the new task becomes the earliest itself. | |
614 | */ | |
615 | dl_rq->earliest_dl.next = dl_rq->earliest_dl.curr; | |
616 | dl_rq->earliest_dl.curr = deadline; | |
617 | } else if (dl_rq->earliest_dl.next == 0 || | |
618 | dl_time_before(deadline, dl_rq->earliest_dl.next)) { | |
619 | /* | |
620 | * On the other hand, if the new -deadline task has a | |
621 | * a later deadline than the earliest one on dl_rq, but | |
622 | * it is earlier than the next (if any), we must | |
623 | * recompute the next-earliest. | |
624 | */ | |
625 | dl_rq->earliest_dl.next = next_deadline(rq); | |
626 | } | |
627 | } | |
628 | ||
629 | static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) | |
630 | { | |
631 | struct rq *rq = rq_of_dl_rq(dl_rq); | |
632 | ||
633 | /* | |
634 | * Since we may have removed our earliest (and/or next earliest) | |
635 | * task we must recompute them. | |
636 | */ | |
637 | if (!dl_rq->dl_nr_running) { | |
638 | dl_rq->earliest_dl.curr = 0; | |
639 | dl_rq->earliest_dl.next = 0; | |
640 | } else { | |
641 | struct rb_node *leftmost = dl_rq->rb_leftmost; | |
642 | struct sched_dl_entity *entry; | |
643 | ||
644 | entry = rb_entry(leftmost, struct sched_dl_entity, rb_node); | |
645 | dl_rq->earliest_dl.curr = entry->deadline; | |
646 | dl_rq->earliest_dl.next = next_deadline(rq); | |
647 | } | |
648 | } | |
649 | ||
650 | #else | |
651 | ||
652 | static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} | |
653 | static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {} | |
654 | ||
655 | #endif /* CONFIG_SMP */ | |
656 | ||
657 | static inline | |
658 | void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) | |
659 | { | |
660 | int prio = dl_task_of(dl_se)->prio; | |
661 | u64 deadline = dl_se->deadline; | |
662 | ||
663 | WARN_ON(!dl_prio(prio)); | |
664 | dl_rq->dl_nr_running++; | |
665 | ||
666 | inc_dl_deadline(dl_rq, deadline); | |
667 | inc_dl_migration(dl_se, dl_rq); | |
668 | } | |
669 | ||
670 | static inline | |
671 | void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) | |
672 | { | |
673 | int prio = dl_task_of(dl_se)->prio; | |
674 | ||
675 | WARN_ON(!dl_prio(prio)); | |
676 | WARN_ON(!dl_rq->dl_nr_running); | |
677 | dl_rq->dl_nr_running--; | |
678 | ||
679 | dec_dl_deadline(dl_rq, dl_se->deadline); | |
680 | dec_dl_migration(dl_se, dl_rq); | |
681 | } | |
682 | ||
aab03e05 DF |
683 | static void __enqueue_dl_entity(struct sched_dl_entity *dl_se) |
684 | { | |
685 | struct dl_rq *dl_rq = dl_rq_of_se(dl_se); | |
686 | struct rb_node **link = &dl_rq->rb_root.rb_node; | |
687 | struct rb_node *parent = NULL; | |
688 | struct sched_dl_entity *entry; | |
689 | int leftmost = 1; | |
690 | ||
691 | BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node)); | |
692 | ||
693 | while (*link) { | |
694 | parent = *link; | |
695 | entry = rb_entry(parent, struct sched_dl_entity, rb_node); | |
696 | if (dl_time_before(dl_se->deadline, entry->deadline)) | |
697 | link = &parent->rb_left; | |
698 | else { | |
699 | link = &parent->rb_right; | |
700 | leftmost = 0; | |
701 | } | |
702 | } | |
703 | ||
704 | if (leftmost) | |
705 | dl_rq->rb_leftmost = &dl_se->rb_node; | |
706 | ||
707 | rb_link_node(&dl_se->rb_node, parent, link); | |
708 | rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root); | |
709 | ||
1baca4ce | 710 | inc_dl_tasks(dl_se, dl_rq); |
aab03e05 DF |
711 | } |
712 | ||
713 | static void __dequeue_dl_entity(struct sched_dl_entity *dl_se) | |
714 | { | |
715 | struct dl_rq *dl_rq = dl_rq_of_se(dl_se); | |
716 | ||
717 | if (RB_EMPTY_NODE(&dl_se->rb_node)) | |
718 | return; | |
719 | ||
720 | if (dl_rq->rb_leftmost == &dl_se->rb_node) { | |
721 | struct rb_node *next_node; | |
722 | ||
723 | next_node = rb_next(&dl_se->rb_node); | |
724 | dl_rq->rb_leftmost = next_node; | |
725 | } | |
726 | ||
727 | rb_erase(&dl_se->rb_node, &dl_rq->rb_root); | |
728 | RB_CLEAR_NODE(&dl_se->rb_node); | |
729 | ||
1baca4ce | 730 | dec_dl_tasks(dl_se, dl_rq); |
aab03e05 DF |
731 | } |
732 | ||
733 | static void | |
2d3d891d DF |
734 | enqueue_dl_entity(struct sched_dl_entity *dl_se, |
735 | struct sched_dl_entity *pi_se, int flags) | |
aab03e05 DF |
736 | { |
737 | BUG_ON(on_dl_rq(dl_se)); | |
738 | ||
739 | /* | |
740 | * If this is a wakeup or a new instance, the scheduling | |
741 | * parameters of the task might need updating. Otherwise, | |
742 | * we want a replenishment of its runtime. | |
743 | */ | |
744 | if (!dl_se->dl_new && flags & ENQUEUE_REPLENISH) | |
2d3d891d | 745 | replenish_dl_entity(dl_se, pi_se); |
aab03e05 | 746 | else |
2d3d891d | 747 | update_dl_entity(dl_se, pi_se); |
aab03e05 DF |
748 | |
749 | __enqueue_dl_entity(dl_se); | |
750 | } | |
751 | ||
752 | static void dequeue_dl_entity(struct sched_dl_entity *dl_se) | |
753 | { | |
754 | __dequeue_dl_entity(dl_se); | |
755 | } | |
756 | ||
757 | static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) | |
758 | { | |
2d3d891d DF |
759 | struct task_struct *pi_task = rt_mutex_get_top_task(p); |
760 | struct sched_dl_entity *pi_se = &p->dl; | |
761 | ||
762 | /* | |
763 | * Use the scheduling parameters of the top pi-waiter | |
764 | * task if we have one and its (relative) deadline is | |
765 | * smaller than our one... OTW we keep our runtime and | |
766 | * deadline. | |
767 | */ | |
768 | if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) | |
769 | pi_se = &pi_task->dl; | |
770 | ||
aab03e05 DF |
771 | /* |
772 | * If p is throttled, we do nothing. In fact, if it exhausted | |
773 | * its budget it needs a replenishment and, since it now is on | |
774 | * its rq, the bandwidth timer callback (which clearly has not | |
775 | * run yet) will take care of this. | |
776 | */ | |
777 | if (p->dl.dl_throttled) | |
778 | return; | |
779 | ||
2d3d891d | 780 | enqueue_dl_entity(&p->dl, pi_se, flags); |
1baca4ce JL |
781 | |
782 | if (!task_current(rq, p) && p->nr_cpus_allowed > 1) | |
783 | enqueue_pushable_dl_task(rq, p); | |
784 | ||
aab03e05 DF |
785 | inc_nr_running(rq); |
786 | } | |
787 | ||
788 | static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) | |
789 | { | |
790 | dequeue_dl_entity(&p->dl); | |
1baca4ce | 791 | dequeue_pushable_dl_task(rq, p); |
aab03e05 DF |
792 | } |
793 | ||
794 | static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) | |
795 | { | |
796 | update_curr_dl(rq); | |
797 | __dequeue_task_dl(rq, p, flags); | |
798 | ||
799 | dec_nr_running(rq); | |
800 | } | |
801 | ||
802 | /* | |
803 | * Yield task semantic for -deadline tasks is: | |
804 | * | |
805 | * get off from the CPU until our next instance, with | |
806 | * a new runtime. This is of little use now, since we | |
807 | * don't have a bandwidth reclaiming mechanism. Anyway, | |
808 | * bandwidth reclaiming is planned for the future, and | |
809 | * yield_task_dl will indicate that some spare budget | |
810 | * is available for other task instances to use it. | |
811 | */ | |
812 | static void yield_task_dl(struct rq *rq) | |
813 | { | |
814 | struct task_struct *p = rq->curr; | |
815 | ||
816 | /* | |
817 | * We make the task go to sleep until its current deadline by | |
818 | * forcing its runtime to zero. This way, update_curr_dl() stops | |
819 | * it and the bandwidth timer will wake it up and will give it | |
820 | * new scheduling parameters (thanks to dl_new=1). | |
821 | */ | |
822 | if (p->dl.runtime > 0) { | |
823 | rq->curr->dl.dl_new = 1; | |
824 | p->dl.runtime = 0; | |
825 | } | |
826 | update_curr_dl(rq); | |
827 | } | |
828 | ||
1baca4ce JL |
829 | #ifdef CONFIG_SMP |
830 | ||
831 | static int find_later_rq(struct task_struct *task); | |
832 | static int latest_cpu_find(struct cpumask *span, | |
833 | struct task_struct *task, | |
834 | struct cpumask *later_mask); | |
835 | ||
836 | static int | |
837 | select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags) | |
838 | { | |
839 | struct task_struct *curr; | |
840 | struct rq *rq; | |
841 | ||
842 | if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK) | |
843 | goto out; | |
844 | ||
845 | rq = cpu_rq(cpu); | |
846 | ||
847 | rcu_read_lock(); | |
848 | curr = ACCESS_ONCE(rq->curr); /* unlocked access */ | |
849 | ||
850 | /* | |
851 | * If we are dealing with a -deadline task, we must | |
852 | * decide where to wake it up. | |
853 | * If it has a later deadline and the current task | |
854 | * on this rq can't move (provided the waking task | |
855 | * can!) we prefer to send it somewhere else. On the | |
856 | * other hand, if it has a shorter deadline, we | |
857 | * try to make it stay here, it might be important. | |
858 | */ | |
859 | if (unlikely(dl_task(curr)) && | |
860 | (curr->nr_cpus_allowed < 2 || | |
861 | !dl_entity_preempt(&p->dl, &curr->dl)) && | |
862 | (p->nr_cpus_allowed > 1)) { | |
863 | int target = find_later_rq(p); | |
864 | ||
865 | if (target != -1) | |
866 | cpu = target; | |
867 | } | |
868 | rcu_read_unlock(); | |
869 | ||
870 | out: | |
871 | return cpu; | |
872 | } | |
873 | ||
874 | static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p) | |
875 | { | |
876 | /* | |
877 | * Current can't be migrated, useless to reschedule, | |
878 | * let's hope p can move out. | |
879 | */ | |
880 | if (rq->curr->nr_cpus_allowed == 1 || | |
881 | latest_cpu_find(rq->rd->span, rq->curr, NULL) == -1) | |
882 | return; | |
883 | ||
884 | /* | |
885 | * p is migratable, so let's not schedule it and | |
886 | * see if it is pushed or pulled somewhere else. | |
887 | */ | |
888 | if (p->nr_cpus_allowed != 1 && | |
889 | latest_cpu_find(rq->rd->span, p, NULL) != -1) | |
890 | return; | |
891 | ||
892 | resched_task(rq->curr); | |
893 | } | |
894 | ||
895 | #endif /* CONFIG_SMP */ | |
896 | ||
aab03e05 DF |
897 | /* |
898 | * Only called when both the current and waking task are -deadline | |
899 | * tasks. | |
900 | */ | |
901 | static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, | |
902 | int flags) | |
903 | { | |
1baca4ce | 904 | if (dl_entity_preempt(&p->dl, &rq->curr->dl)) { |
aab03e05 | 905 | resched_task(rq->curr); |
1baca4ce JL |
906 | return; |
907 | } | |
908 | ||
909 | #ifdef CONFIG_SMP | |
910 | /* | |
911 | * In the unlikely case current and p have the same deadline | |
912 | * let us try to decide what's the best thing to do... | |
913 | */ | |
914 | if ((s64)(p->dl.deadline - rq->curr->dl.deadline) == 0 && | |
915 | !need_resched()) | |
916 | check_preempt_equal_dl(rq, p); | |
917 | #endif /* CONFIG_SMP */ | |
aab03e05 DF |
918 | } |
919 | ||
920 | #ifdef CONFIG_SCHED_HRTICK | |
921 | static void start_hrtick_dl(struct rq *rq, struct task_struct *p) | |
922 | { | |
923 | s64 delta = p->dl.dl_runtime - p->dl.runtime; | |
924 | ||
925 | if (delta > 10000) | |
926 | hrtick_start(rq, p->dl.runtime); | |
927 | } | |
928 | #endif | |
929 | ||
930 | static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq, | |
931 | struct dl_rq *dl_rq) | |
932 | { | |
933 | struct rb_node *left = dl_rq->rb_leftmost; | |
934 | ||
935 | if (!left) | |
936 | return NULL; | |
937 | ||
938 | return rb_entry(left, struct sched_dl_entity, rb_node); | |
939 | } | |
940 | ||
941 | struct task_struct *pick_next_task_dl(struct rq *rq) | |
942 | { | |
943 | struct sched_dl_entity *dl_se; | |
944 | struct task_struct *p; | |
945 | struct dl_rq *dl_rq; | |
946 | ||
947 | dl_rq = &rq->dl; | |
948 | ||
949 | if (unlikely(!dl_rq->dl_nr_running)) | |
950 | return NULL; | |
951 | ||
952 | dl_se = pick_next_dl_entity(rq, dl_rq); | |
953 | BUG_ON(!dl_se); | |
954 | ||
955 | p = dl_task_of(dl_se); | |
956 | p->se.exec_start = rq_clock_task(rq); | |
1baca4ce JL |
957 | |
958 | /* Running task will never be pushed. */ | |
959 | if (p) | |
960 | dequeue_pushable_dl_task(rq, p); | |
961 | ||
aab03e05 DF |
962 | #ifdef CONFIG_SCHED_HRTICK |
963 | if (hrtick_enabled(rq)) | |
964 | start_hrtick_dl(rq, p); | |
965 | #endif | |
1baca4ce JL |
966 | |
967 | #ifdef CONFIG_SMP | |
968 | rq->post_schedule = has_pushable_dl_tasks(rq); | |
969 | #endif /* CONFIG_SMP */ | |
970 | ||
aab03e05 DF |
971 | return p; |
972 | } | |
973 | ||
974 | static void put_prev_task_dl(struct rq *rq, struct task_struct *p) | |
975 | { | |
976 | update_curr_dl(rq); | |
1baca4ce JL |
977 | |
978 | if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) | |
979 | enqueue_pushable_dl_task(rq, p); | |
aab03e05 DF |
980 | } |
981 | ||
982 | static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued) | |
983 | { | |
984 | update_curr_dl(rq); | |
985 | ||
986 | #ifdef CONFIG_SCHED_HRTICK | |
987 | if (hrtick_enabled(rq) && queued && p->dl.runtime > 0) | |
988 | start_hrtick_dl(rq, p); | |
989 | #endif | |
990 | } | |
991 | ||
992 | static void task_fork_dl(struct task_struct *p) | |
993 | { | |
994 | /* | |
995 | * SCHED_DEADLINE tasks cannot fork and this is achieved through | |
996 | * sched_fork() | |
997 | */ | |
998 | } | |
999 | ||
1000 | static void task_dead_dl(struct task_struct *p) | |
1001 | { | |
1002 | struct hrtimer *timer = &p->dl.dl_timer; | |
1003 | ||
2d3d891d | 1004 | hrtimer_cancel(timer); |
aab03e05 DF |
1005 | } |
1006 | ||
1007 | static void set_curr_task_dl(struct rq *rq) | |
1008 | { | |
1009 | struct task_struct *p = rq->curr; | |
1010 | ||
1011 | p->se.exec_start = rq_clock_task(rq); | |
1baca4ce JL |
1012 | |
1013 | /* You can't push away the running task */ | |
1014 | dequeue_pushable_dl_task(rq, p); | |
1015 | } | |
1016 | ||
1017 | #ifdef CONFIG_SMP | |
1018 | ||
1019 | /* Only try algorithms three times */ | |
1020 | #define DL_MAX_TRIES 3 | |
1021 | ||
1022 | static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) | |
1023 | { | |
1024 | if (!task_running(rq, p) && | |
1025 | (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) && | |
1026 | (p->nr_cpus_allowed > 1)) | |
1027 | return 1; | |
1028 | ||
1029 | return 0; | |
1030 | } | |
1031 | ||
1032 | /* Returns the second earliest -deadline task, NULL otherwise */ | |
1033 | static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu) | |
1034 | { | |
1035 | struct rb_node *next_node = rq->dl.rb_leftmost; | |
1036 | struct sched_dl_entity *dl_se; | |
1037 | struct task_struct *p = NULL; | |
1038 | ||
1039 | next_node: | |
1040 | next_node = rb_next(next_node); | |
1041 | if (next_node) { | |
1042 | dl_se = rb_entry(next_node, struct sched_dl_entity, rb_node); | |
1043 | p = dl_task_of(dl_se); | |
1044 | ||
1045 | if (pick_dl_task(rq, p, cpu)) | |
1046 | return p; | |
1047 | ||
1048 | goto next_node; | |
1049 | } | |
1050 | ||
1051 | return NULL; | |
1052 | } | |
1053 | ||
1054 | static int latest_cpu_find(struct cpumask *span, | |
1055 | struct task_struct *task, | |
1056 | struct cpumask *later_mask) | |
1057 | { | |
1058 | const struct sched_dl_entity *dl_se = &task->dl; | |
1059 | int cpu, found = -1, best = 0; | |
1060 | u64 max_dl = 0; | |
1061 | ||
1062 | for_each_cpu(cpu, span) { | |
1063 | struct rq *rq = cpu_rq(cpu); | |
1064 | struct dl_rq *dl_rq = &rq->dl; | |
1065 | ||
1066 | if (cpumask_test_cpu(cpu, &task->cpus_allowed) && | |
1067 | (!dl_rq->dl_nr_running || dl_time_before(dl_se->deadline, | |
1068 | dl_rq->earliest_dl.curr))) { | |
1069 | if (later_mask) | |
1070 | cpumask_set_cpu(cpu, later_mask); | |
1071 | if (!best && !dl_rq->dl_nr_running) { | |
1072 | best = 1; | |
1073 | found = cpu; | |
1074 | } else if (!best && | |
1075 | dl_time_before(max_dl, | |
1076 | dl_rq->earliest_dl.curr)) { | |
1077 | max_dl = dl_rq->earliest_dl.curr; | |
1078 | found = cpu; | |
1079 | } | |
1080 | } else if (later_mask) | |
1081 | cpumask_clear_cpu(cpu, later_mask); | |
1082 | } | |
1083 | ||
1084 | return found; | |
1085 | } | |
1086 | ||
1087 | static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl); | |
1088 | ||
1089 | static int find_later_rq(struct task_struct *task) | |
1090 | { | |
1091 | struct sched_domain *sd; | |
1092 | struct cpumask *later_mask = __get_cpu_var(local_cpu_mask_dl); | |
1093 | int this_cpu = smp_processor_id(); | |
1094 | int best_cpu, cpu = task_cpu(task); | |
1095 | ||
1096 | /* Make sure the mask is initialized first */ | |
1097 | if (unlikely(!later_mask)) | |
1098 | return -1; | |
1099 | ||
1100 | if (task->nr_cpus_allowed == 1) | |
1101 | return -1; | |
1102 | ||
1103 | best_cpu = latest_cpu_find(task_rq(task)->rd->span, task, later_mask); | |
1104 | if (best_cpu == -1) | |
1105 | return -1; | |
1106 | ||
1107 | /* | |
1108 | * If we are here, some target has been found, | |
1109 | * the most suitable of which is cached in best_cpu. | |
1110 | * This is, among the runqueues where the current tasks | |
1111 | * have later deadlines than the task's one, the rq | |
1112 | * with the latest possible one. | |
1113 | * | |
1114 | * Now we check how well this matches with task's | |
1115 | * affinity and system topology. | |
1116 | * | |
1117 | * The last cpu where the task run is our first | |
1118 | * guess, since it is most likely cache-hot there. | |
1119 | */ | |
1120 | if (cpumask_test_cpu(cpu, later_mask)) | |
1121 | return cpu; | |
1122 | /* | |
1123 | * Check if this_cpu is to be skipped (i.e., it is | |
1124 | * not in the mask) or not. | |
1125 | */ | |
1126 | if (!cpumask_test_cpu(this_cpu, later_mask)) | |
1127 | this_cpu = -1; | |
1128 | ||
1129 | rcu_read_lock(); | |
1130 | for_each_domain(cpu, sd) { | |
1131 | if (sd->flags & SD_WAKE_AFFINE) { | |
1132 | ||
1133 | /* | |
1134 | * If possible, preempting this_cpu is | |
1135 | * cheaper than migrating. | |
1136 | */ | |
1137 | if (this_cpu != -1 && | |
1138 | cpumask_test_cpu(this_cpu, sched_domain_span(sd))) { | |
1139 | rcu_read_unlock(); | |
1140 | return this_cpu; | |
1141 | } | |
1142 | ||
1143 | /* | |
1144 | * Last chance: if best_cpu is valid and is | |
1145 | * in the mask, that becomes our choice. | |
1146 | */ | |
1147 | if (best_cpu < nr_cpu_ids && | |
1148 | cpumask_test_cpu(best_cpu, sched_domain_span(sd))) { | |
1149 | rcu_read_unlock(); | |
1150 | return best_cpu; | |
1151 | } | |
1152 | } | |
1153 | } | |
1154 | rcu_read_unlock(); | |
1155 | ||
1156 | /* | |
1157 | * At this point, all our guesses failed, we just return | |
1158 | * 'something', and let the caller sort the things out. | |
1159 | */ | |
1160 | if (this_cpu != -1) | |
1161 | return this_cpu; | |
1162 | ||
1163 | cpu = cpumask_any(later_mask); | |
1164 | if (cpu < nr_cpu_ids) | |
1165 | return cpu; | |
1166 | ||
1167 | return -1; | |
1168 | } | |
1169 | ||
1170 | /* Locks the rq it finds */ | |
1171 | static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) | |
1172 | { | |
1173 | struct rq *later_rq = NULL; | |
1174 | int tries; | |
1175 | int cpu; | |
1176 | ||
1177 | for (tries = 0; tries < DL_MAX_TRIES; tries++) { | |
1178 | cpu = find_later_rq(task); | |
1179 | ||
1180 | if ((cpu == -1) || (cpu == rq->cpu)) | |
1181 | break; | |
1182 | ||
1183 | later_rq = cpu_rq(cpu); | |
1184 | ||
1185 | /* Retry if something changed. */ | |
1186 | if (double_lock_balance(rq, later_rq)) { | |
1187 | if (unlikely(task_rq(task) != rq || | |
1188 | !cpumask_test_cpu(later_rq->cpu, | |
1189 | &task->cpus_allowed) || | |
1190 | task_running(rq, task) || !task->on_rq)) { | |
1191 | double_unlock_balance(rq, later_rq); | |
1192 | later_rq = NULL; | |
1193 | break; | |
1194 | } | |
1195 | } | |
1196 | ||
1197 | /* | |
1198 | * If the rq we found has no -deadline task, or | |
1199 | * its earliest one has a later deadline than our | |
1200 | * task, the rq is a good one. | |
1201 | */ | |
1202 | if (!later_rq->dl.dl_nr_running || | |
1203 | dl_time_before(task->dl.deadline, | |
1204 | later_rq->dl.earliest_dl.curr)) | |
1205 | break; | |
1206 | ||
1207 | /* Otherwise we try again. */ | |
1208 | double_unlock_balance(rq, later_rq); | |
1209 | later_rq = NULL; | |
1210 | } | |
1211 | ||
1212 | return later_rq; | |
1213 | } | |
1214 | ||
1215 | static struct task_struct *pick_next_pushable_dl_task(struct rq *rq) | |
1216 | { | |
1217 | struct task_struct *p; | |
1218 | ||
1219 | if (!has_pushable_dl_tasks(rq)) | |
1220 | return NULL; | |
1221 | ||
1222 | p = rb_entry(rq->dl.pushable_dl_tasks_leftmost, | |
1223 | struct task_struct, pushable_dl_tasks); | |
1224 | ||
1225 | BUG_ON(rq->cpu != task_cpu(p)); | |
1226 | BUG_ON(task_current(rq, p)); | |
1227 | BUG_ON(p->nr_cpus_allowed <= 1); | |
1228 | ||
1229 | BUG_ON(!p->se.on_rq); | |
1230 | BUG_ON(!dl_task(p)); | |
1231 | ||
1232 | return p; | |
1233 | } | |
1234 | ||
1235 | /* | |
1236 | * See if the non running -deadline tasks on this rq | |
1237 | * can be sent to some other CPU where they can preempt | |
1238 | * and start executing. | |
1239 | */ | |
1240 | static int push_dl_task(struct rq *rq) | |
1241 | { | |
1242 | struct task_struct *next_task; | |
1243 | struct rq *later_rq; | |
1244 | ||
1245 | if (!rq->dl.overloaded) | |
1246 | return 0; | |
1247 | ||
1248 | next_task = pick_next_pushable_dl_task(rq); | |
1249 | if (!next_task) | |
1250 | return 0; | |
1251 | ||
1252 | retry: | |
1253 | if (unlikely(next_task == rq->curr)) { | |
1254 | WARN_ON(1); | |
1255 | return 0; | |
1256 | } | |
1257 | ||
1258 | /* | |
1259 | * If next_task preempts rq->curr, and rq->curr | |
1260 | * can move away, it makes sense to just reschedule | |
1261 | * without going further in pushing next_task. | |
1262 | */ | |
1263 | if (dl_task(rq->curr) && | |
1264 | dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) && | |
1265 | rq->curr->nr_cpus_allowed > 1) { | |
1266 | resched_task(rq->curr); | |
1267 | return 0; | |
1268 | } | |
1269 | ||
1270 | /* We might release rq lock */ | |
1271 | get_task_struct(next_task); | |
1272 | ||
1273 | /* Will lock the rq it'll find */ | |
1274 | later_rq = find_lock_later_rq(next_task, rq); | |
1275 | if (!later_rq) { | |
1276 | struct task_struct *task; | |
1277 | ||
1278 | /* | |
1279 | * We must check all this again, since | |
1280 | * find_lock_later_rq releases rq->lock and it is | |
1281 | * then possible that next_task has migrated. | |
1282 | */ | |
1283 | task = pick_next_pushable_dl_task(rq); | |
1284 | if (task_cpu(next_task) == rq->cpu && task == next_task) { | |
1285 | /* | |
1286 | * The task is still there. We don't try | |
1287 | * again, some other cpu will pull it when ready. | |
1288 | */ | |
1289 | dequeue_pushable_dl_task(rq, next_task); | |
1290 | goto out; | |
1291 | } | |
1292 | ||
1293 | if (!task) | |
1294 | /* No more tasks */ | |
1295 | goto out; | |
1296 | ||
1297 | put_task_struct(next_task); | |
1298 | next_task = task; | |
1299 | goto retry; | |
1300 | } | |
1301 | ||
1302 | deactivate_task(rq, next_task, 0); | |
1303 | set_task_cpu(next_task, later_rq->cpu); | |
1304 | activate_task(later_rq, next_task, 0); | |
1305 | ||
1306 | resched_task(later_rq->curr); | |
1307 | ||
1308 | double_unlock_balance(rq, later_rq); | |
1309 | ||
1310 | out: | |
1311 | put_task_struct(next_task); | |
1312 | ||
1313 | return 1; | |
1314 | } | |
1315 | ||
1316 | static void push_dl_tasks(struct rq *rq) | |
1317 | { | |
1318 | /* Terminates as it moves a -deadline task */ | |
1319 | while (push_dl_task(rq)) | |
1320 | ; | |
aab03e05 DF |
1321 | } |
1322 | ||
1baca4ce JL |
1323 | static int pull_dl_task(struct rq *this_rq) |
1324 | { | |
1325 | int this_cpu = this_rq->cpu, ret = 0, cpu; | |
1326 | struct task_struct *p; | |
1327 | struct rq *src_rq; | |
1328 | u64 dmin = LONG_MAX; | |
1329 | ||
1330 | if (likely(!dl_overloaded(this_rq))) | |
1331 | return 0; | |
1332 | ||
1333 | /* | |
1334 | * Match the barrier from dl_set_overloaded; this guarantees that if we | |
1335 | * see overloaded we must also see the dlo_mask bit. | |
1336 | */ | |
1337 | smp_rmb(); | |
1338 | ||
1339 | for_each_cpu(cpu, this_rq->rd->dlo_mask) { | |
1340 | if (this_cpu == cpu) | |
1341 | continue; | |
1342 | ||
1343 | src_rq = cpu_rq(cpu); | |
1344 | ||
1345 | /* | |
1346 | * It looks racy, abd it is! However, as in sched_rt.c, | |
1347 | * we are fine with this. | |
1348 | */ | |
1349 | if (this_rq->dl.dl_nr_running && | |
1350 | dl_time_before(this_rq->dl.earliest_dl.curr, | |
1351 | src_rq->dl.earliest_dl.next)) | |
1352 | continue; | |
1353 | ||
1354 | /* Might drop this_rq->lock */ | |
1355 | double_lock_balance(this_rq, src_rq); | |
1356 | ||
1357 | /* | |
1358 | * If there are no more pullable tasks on the | |
1359 | * rq, we're done with it. | |
1360 | */ | |
1361 | if (src_rq->dl.dl_nr_running <= 1) | |
1362 | goto skip; | |
1363 | ||
1364 | p = pick_next_earliest_dl_task(src_rq, this_cpu); | |
1365 | ||
1366 | /* | |
1367 | * We found a task to be pulled if: | |
1368 | * - it preempts our current (if there's one), | |
1369 | * - it will preempt the last one we pulled (if any). | |
1370 | */ | |
1371 | if (p && dl_time_before(p->dl.deadline, dmin) && | |
1372 | (!this_rq->dl.dl_nr_running || | |
1373 | dl_time_before(p->dl.deadline, | |
1374 | this_rq->dl.earliest_dl.curr))) { | |
1375 | WARN_ON(p == src_rq->curr); | |
1376 | WARN_ON(!p->se.on_rq); | |
1377 | ||
1378 | /* | |
1379 | * Then we pull iff p has actually an earlier | |
1380 | * deadline than the current task of its runqueue. | |
1381 | */ | |
1382 | if (dl_time_before(p->dl.deadline, | |
1383 | src_rq->curr->dl.deadline)) | |
1384 | goto skip; | |
1385 | ||
1386 | ret = 1; | |
1387 | ||
1388 | deactivate_task(src_rq, p, 0); | |
1389 | set_task_cpu(p, this_cpu); | |
1390 | activate_task(this_rq, p, 0); | |
1391 | dmin = p->dl.deadline; | |
1392 | ||
1393 | /* Is there any other task even earlier? */ | |
1394 | } | |
1395 | skip: | |
1396 | double_unlock_balance(this_rq, src_rq); | |
1397 | } | |
1398 | ||
1399 | return ret; | |
1400 | } | |
1401 | ||
1402 | static void pre_schedule_dl(struct rq *rq, struct task_struct *prev) | |
1403 | { | |
1404 | /* Try to pull other tasks here */ | |
1405 | if (dl_task(prev)) | |
1406 | pull_dl_task(rq); | |
1407 | } | |
1408 | ||
1409 | static void post_schedule_dl(struct rq *rq) | |
1410 | { | |
1411 | push_dl_tasks(rq); | |
1412 | } | |
1413 | ||
1414 | /* | |
1415 | * Since the task is not running and a reschedule is not going to happen | |
1416 | * anytime soon on its runqueue, we try pushing it away now. | |
1417 | */ | |
1418 | static void task_woken_dl(struct rq *rq, struct task_struct *p) | |
1419 | { | |
1420 | if (!task_running(rq, p) && | |
1421 | !test_tsk_need_resched(rq->curr) && | |
1422 | has_pushable_dl_tasks(rq) && | |
1423 | p->nr_cpus_allowed > 1 && | |
1424 | dl_task(rq->curr) && | |
1425 | (rq->curr->nr_cpus_allowed < 2 || | |
1426 | dl_entity_preempt(&rq->curr->dl, &p->dl))) { | |
1427 | push_dl_tasks(rq); | |
1428 | } | |
1429 | } | |
1430 | ||
1431 | static void set_cpus_allowed_dl(struct task_struct *p, | |
1432 | const struct cpumask *new_mask) | |
1433 | { | |
1434 | struct rq *rq; | |
1435 | int weight; | |
1436 | ||
1437 | BUG_ON(!dl_task(p)); | |
1438 | ||
1439 | /* | |
1440 | * Update only if the task is actually running (i.e., | |
1441 | * it is on the rq AND it is not throttled). | |
1442 | */ | |
1443 | if (!on_dl_rq(&p->dl)) | |
1444 | return; | |
1445 | ||
1446 | weight = cpumask_weight(new_mask); | |
1447 | ||
1448 | /* | |
1449 | * Only update if the process changes its state from whether it | |
1450 | * can migrate or not. | |
1451 | */ | |
1452 | if ((p->nr_cpus_allowed > 1) == (weight > 1)) | |
1453 | return; | |
1454 | ||
1455 | rq = task_rq(p); | |
1456 | ||
1457 | /* | |
1458 | * The process used to be able to migrate OR it can now migrate | |
1459 | */ | |
1460 | if (weight <= 1) { | |
1461 | if (!task_current(rq, p)) | |
1462 | dequeue_pushable_dl_task(rq, p); | |
1463 | BUG_ON(!rq->dl.dl_nr_migratory); | |
1464 | rq->dl.dl_nr_migratory--; | |
1465 | } else { | |
1466 | if (!task_current(rq, p)) | |
1467 | enqueue_pushable_dl_task(rq, p); | |
1468 | rq->dl.dl_nr_migratory++; | |
1469 | } | |
1470 | ||
1471 | update_dl_migration(&rq->dl); | |
1472 | } | |
1473 | ||
1474 | /* Assumes rq->lock is held */ | |
1475 | static void rq_online_dl(struct rq *rq) | |
1476 | { | |
1477 | if (rq->dl.overloaded) | |
1478 | dl_set_overload(rq); | |
1479 | } | |
1480 | ||
1481 | /* Assumes rq->lock is held */ | |
1482 | static void rq_offline_dl(struct rq *rq) | |
1483 | { | |
1484 | if (rq->dl.overloaded) | |
1485 | dl_clear_overload(rq); | |
1486 | } | |
1487 | ||
1488 | void init_sched_dl_class(void) | |
1489 | { | |
1490 | unsigned int i; | |
1491 | ||
1492 | for_each_possible_cpu(i) | |
1493 | zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i), | |
1494 | GFP_KERNEL, cpu_to_node(i)); | |
1495 | } | |
1496 | ||
1497 | #endif /* CONFIG_SMP */ | |
1498 | ||
aab03e05 DF |
1499 | static void switched_from_dl(struct rq *rq, struct task_struct *p) |
1500 | { | |
1baca4ce | 1501 | if (hrtimer_active(&p->dl.dl_timer) && !dl_policy(p->policy)) |
aab03e05 | 1502 | hrtimer_try_to_cancel(&p->dl.dl_timer); |
1baca4ce JL |
1503 | |
1504 | #ifdef CONFIG_SMP | |
1505 | /* | |
1506 | * Since this might be the only -deadline task on the rq, | |
1507 | * this is the right place to try to pull some other one | |
1508 | * from an overloaded cpu, if any. | |
1509 | */ | |
1510 | if (!rq->dl.dl_nr_running) | |
1511 | pull_dl_task(rq); | |
1512 | #endif | |
aab03e05 DF |
1513 | } |
1514 | ||
1baca4ce JL |
1515 | /* |
1516 | * When switching to -deadline, we may overload the rq, then | |
1517 | * we try to push someone off, if possible. | |
1518 | */ | |
aab03e05 DF |
1519 | static void switched_to_dl(struct rq *rq, struct task_struct *p) |
1520 | { | |
1baca4ce JL |
1521 | int check_resched = 1; |
1522 | ||
aab03e05 DF |
1523 | /* |
1524 | * If p is throttled, don't consider the possibility | |
1525 | * of preempting rq->curr, the check will be done right | |
1526 | * after its runtime will get replenished. | |
1527 | */ | |
1528 | if (unlikely(p->dl.dl_throttled)) | |
1529 | return; | |
1530 | ||
1531 | if (p->on_rq || rq->curr != p) { | |
1baca4ce JL |
1532 | #ifdef CONFIG_SMP |
1533 | if (rq->dl.overloaded && push_dl_task(rq) && rq != task_rq(p)) | |
1534 | /* Only reschedule if pushing failed */ | |
1535 | check_resched = 0; | |
1536 | #endif /* CONFIG_SMP */ | |
1537 | if (check_resched && task_has_dl_policy(rq->curr)) | |
aab03e05 | 1538 | check_preempt_curr_dl(rq, p, 0); |
aab03e05 DF |
1539 | } |
1540 | } | |
1541 | ||
1baca4ce JL |
1542 | /* |
1543 | * If the scheduling parameters of a -deadline task changed, | |
1544 | * a push or pull operation might be needed. | |
1545 | */ | |
aab03e05 DF |
1546 | static void prio_changed_dl(struct rq *rq, struct task_struct *p, |
1547 | int oldprio) | |
1548 | { | |
1baca4ce | 1549 | if (p->on_rq || rq->curr == p) { |
aab03e05 | 1550 | #ifdef CONFIG_SMP |
1baca4ce JL |
1551 | /* |
1552 | * This might be too much, but unfortunately | |
1553 | * we don't have the old deadline value, and | |
1554 | * we can't argue if the task is increasing | |
1555 | * or lowering its prio, so... | |
1556 | */ | |
1557 | if (!rq->dl.overloaded) | |
1558 | pull_dl_task(rq); | |
1559 | ||
1560 | /* | |
1561 | * If we now have a earlier deadline task than p, | |
1562 | * then reschedule, provided p is still on this | |
1563 | * runqueue. | |
1564 | */ | |
1565 | if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline) && | |
1566 | rq->curr == p) | |
1567 | resched_task(p); | |
1568 | #else | |
1569 | /* | |
1570 | * Again, we don't know if p has a earlier | |
1571 | * or later deadline, so let's blindly set a | |
1572 | * (maybe not needed) rescheduling point. | |
1573 | */ | |
1574 | resched_task(p); | |
1575 | #endif /* CONFIG_SMP */ | |
1576 | } else | |
1577 | switched_to_dl(rq, p); | |
aab03e05 | 1578 | } |
aab03e05 DF |
1579 | |
1580 | const struct sched_class dl_sched_class = { | |
1581 | .next = &rt_sched_class, | |
1582 | .enqueue_task = enqueue_task_dl, | |
1583 | .dequeue_task = dequeue_task_dl, | |
1584 | .yield_task = yield_task_dl, | |
1585 | ||
1586 | .check_preempt_curr = check_preempt_curr_dl, | |
1587 | ||
1588 | .pick_next_task = pick_next_task_dl, | |
1589 | .put_prev_task = put_prev_task_dl, | |
1590 | ||
1591 | #ifdef CONFIG_SMP | |
1592 | .select_task_rq = select_task_rq_dl, | |
1baca4ce JL |
1593 | .set_cpus_allowed = set_cpus_allowed_dl, |
1594 | .rq_online = rq_online_dl, | |
1595 | .rq_offline = rq_offline_dl, | |
1596 | .pre_schedule = pre_schedule_dl, | |
1597 | .post_schedule = post_schedule_dl, | |
1598 | .task_woken = task_woken_dl, | |
aab03e05 DF |
1599 | #endif |
1600 | ||
1601 | .set_curr_task = set_curr_task_dl, | |
1602 | .task_tick = task_tick_dl, | |
1603 | .task_fork = task_fork_dl, | |
1604 | .task_dead = task_dead_dl, | |
1605 | ||
1606 | .prio_changed = prio_changed_dl, | |
1607 | .switched_from = switched_from_dl, | |
1608 | .switched_to = switched_to_dl, | |
1609 | }; |