ASoC: Merge up v6.6-rc7
[linux-block.git] / drivers / gpu / drm / scheduler / sched_main.c
CommitLineData
a72ce6f8
JZ
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
a72ce6f8 22 */
1b1f42d8 23
2d33948e
ND
24/**
25 * DOC: Overview
26 *
27 * The GPU scheduler provides entities which allow userspace to push jobs
28 * into software queues which are then scheduled on a hardware run queue.
29 * The software queues have a priority among them. The scheduler selects the entities
30 * from the run queue using a FIFO. The scheduler provides dependency handling
31 * features among jobs. The driver is supposed to provide callback functions for
32 * backend operations to the scheduler like submitting a job to hardware run queue,
33 * returning the dependencies of a job etc.
34 *
35 * The organisation of the scheduler is the following:
36 *
37 * 1. Each hw run queue has one scheduler
38 * 2. Each scheduler has multiple run queues with different priorities
39 * (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
40 * 3. Each scheduler run queue has a queue of entities to schedule
41 * 4. Entities themselves maintain a queue of jobs that will be scheduled on
42 * the hardware.
43 *
44 * The jobs in a entity are always scheduled in the order that they were pushed.
96c7c2f4
DK
45 *
46 * Note that once a job was taken from the entities queue and pushed to the
47 * hardware, i.e. the pending queue, the entity must not be referenced anymore
48 * through the jobs entity pointer.
2d33948e
ND
49 */
50
a72ce6f8
JZ
51#include <linux/kthread.h>
52#include <linux/wait.h>
53#include <linux/sched.h>
83a7772b 54#include <linux/completion.h>
dbe48d03 55#include <linux/dma-resv.h>
ae7e81c0 56#include <uapi/linux/sched/types.h>
7c1be93c
SR
57
58#include <drm/drm_print.h>
dbe48d03 59#include <drm/drm_gem.h>
c087bbb6 60#include <drm/drm_syncobj.h>
1b1f42d8
LS
61#include <drm/gpu_scheduler.h>
62#include <drm/spsc_queue.h>
83f4b118 63
353da3c5 64#define CREATE_TRACE_POINTS
a70cdb9e 65#include "gpu_scheduler_trace.h"
353da3c5 66
1b1f42d8
LS
67#define to_drm_sched_job(sched_job) \
68 container_of((sched_job), struct drm_sched_job, queue_node)
83f4b118 69
977d97f1 70int drm_sched_policy = DRM_SCHED_POLICY_FIFO;
08fb97de
AG
71
72/**
73 * DOC: sched_policy (int)
74 * Used to override default entities scheduling policy in a run queue.
75 */
977d97f1 76MODULE_PARM_DESC(sched_policy, "Specify the scheduling policy for entities on a run-queue, " __stringify(DRM_SCHED_POLICY_RR) " = Round Robin, " __stringify(DRM_SCHED_POLICY_FIFO) " = FIFO (default).");
08fb97de
AG
77module_param_named(sched_policy, drm_sched_policy, int, 0444);
78
79static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a,
80 const struct rb_node *b)
81{
82 struct drm_sched_entity *ent_a = rb_entry((a), struct drm_sched_entity, rb_tree_node);
83 struct drm_sched_entity *ent_b = rb_entry((b), struct drm_sched_entity, rb_tree_node);
84
85 return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting);
86}
87
88static inline void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity)
89{
90 struct drm_sched_rq *rq = entity->rq;
91
92 if (!RB_EMPTY_NODE(&entity->rb_tree_node)) {
93 rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root);
94 RB_CLEAR_NODE(&entity->rb_tree_node);
95 }
96}
97
98void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts)
99{
100 /*
101 * Both locks need to be grabbed, one to protect from entity->rq change
102 * for entity from within concurrent drm_sched_entity_select_rq and the
103 * other to update the rb tree structure.
104 */
105 spin_lock(&entity->rq_lock);
106 spin_lock(&entity->rq->lock);
107
108 drm_sched_rq_remove_fifo_locked(entity);
109
110 entity->oldest_job_waiting = ts;
111
112 rb_add_cached(&entity->rb_tree_node, &entity->rq->rb_tree_root,
113 drm_sched_entity_compare_before);
114
115 spin_unlock(&entity->rq->lock);
116 spin_unlock(&entity->rq_lock);
117}
118
2d33948e
ND
119/**
120 * drm_sched_rq_init - initialize a given run queue struct
121 *
26b5cf49 122 * @sched: scheduler instance to associate with this run queue
2d33948e
ND
123 * @rq: scheduler run queue
124 *
125 * Initializes a scheduler runqueue.
126 */
8dc9fbbf
ND
127static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
128 struct drm_sched_rq *rq)
a72ce6f8 129{
2b184d8d 130 spin_lock_init(&rq->lock);
432a4ff8 131 INIT_LIST_HEAD(&rq->entities);
08fb97de 132 rq->rb_tree_root = RB_ROOT_CACHED;
432a4ff8 133 rq->current_entity = NULL;
8dc9fbbf 134 rq->sched = sched;
a72ce6f8
JZ
135}
136
2d33948e
ND
137/**
138 * drm_sched_rq_add_entity - add an entity
139 *
140 * @rq: scheduler run queue
141 * @entity: scheduler entity
142 *
143 * Adds a scheduler entity to the run queue.
144 */
620e762f
CK
145void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
146 struct drm_sched_entity *entity)
a72ce6f8 147{
e8deea2d
CZ
148 if (!list_empty(&entity->list))
149 return;
08fb97de 150
2b184d8d 151 spin_lock(&rq->lock);
08fb97de 152
f2f12eb9 153 atomic_inc(rq->sched->score);
432a4ff8 154 list_add_tail(&entity->list, &rq->entities);
08fb97de 155
2b184d8d 156 spin_unlock(&rq->lock);
a72ce6f8
JZ
157}
158
2d33948e
ND
159/**
160 * drm_sched_rq_remove_entity - remove an entity
161 *
162 * @rq: scheduler run queue
163 * @entity: scheduler entity
164 *
165 * Removes a scheduler entity from the run queue.
166 */
620e762f
CK
167void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
168 struct drm_sched_entity *entity)
a72ce6f8 169{
e8deea2d
CZ
170 if (list_empty(&entity->list))
171 return;
08fb97de 172
2b184d8d 173 spin_lock(&rq->lock);
08fb97de 174
f2f12eb9 175 atomic_dec(rq->sched->score);
432a4ff8 176 list_del_init(&entity->list);
08fb97de 177
432a4ff8
CK
178 if (rq->current_entity == entity)
179 rq->current_entity = NULL;
08fb97de
AG
180
181 if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
182 drm_sched_rq_remove_fifo_locked(entity);
183
2b184d8d 184 spin_unlock(&rq->lock);
a72ce6f8
JZ
185}
186
187/**
08fb97de 188 * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run
3d651936 189 *
2d33948e 190 * @rq: scheduler run queue to check.
3d651936
CK
191 *
192 * Try to find a ready entity, returns NULL if none found.
a72ce6f8 193 */
1b1f42d8 194static struct drm_sched_entity *
08fb97de 195drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
a72ce6f8 196{
1b1f42d8 197 struct drm_sched_entity *entity;
432a4ff8 198
2b184d8d
CK
199 spin_lock(&rq->lock);
200
201 entity = rq->current_entity;
432a4ff8
CK
202 if (entity) {
203 list_for_each_entry_continue(entity, &rq->entities, list) {
1b1f42d8 204 if (drm_sched_entity_is_ready(entity)) {
432a4ff8 205 rq->current_entity = entity;
83a7772b 206 reinit_completion(&entity->entity_idle);
2b184d8d 207 spin_unlock(&rq->lock);
3d651936 208 return entity;
432a4ff8 209 }
a72ce6f8 210 }
a72ce6f8 211 }
a72ce6f8 212
432a4ff8 213 list_for_each_entry(entity, &rq->entities, list) {
a72ce6f8 214
1b1f42d8 215 if (drm_sched_entity_is_ready(entity)) {
432a4ff8 216 rq->current_entity = entity;
83a7772b 217 reinit_completion(&entity->entity_idle);
2b184d8d 218 spin_unlock(&rq->lock);
3d651936 219 return entity;
432a4ff8 220 }
a72ce6f8 221
432a4ff8
CK
222 if (entity == rq->current_entity)
223 break;
224 }
a72ce6f8 225
2b184d8d
CK
226 spin_unlock(&rq->lock);
227
432a4ff8 228 return NULL;
a72ce6f8
JZ
229}
230
08fb97de
AG
231/**
232 * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run
233 *
234 * @rq: scheduler run queue to check.
235 *
236 * Find oldest waiting ready entity, returns NULL if none found.
237 */
238static struct drm_sched_entity *
239drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
240{
241 struct rb_node *rb;
242
243 spin_lock(&rq->lock);
244 for (rb = rb_first_cached(&rq->rb_tree_root); rb; rb = rb_next(rb)) {
245 struct drm_sched_entity *entity;
246
247 entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node);
248 if (drm_sched_entity_is_ready(entity)) {
249 rq->current_entity = entity;
250 reinit_completion(&entity->entity_idle);
251 break;
252 }
253 }
254 spin_unlock(&rq->lock);
255
256 return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL;
257}
258
71173e78
LT
259/**
260 * drm_sched_job_done - complete a job
261 * @s_job: pointer to the job which is done
262 *
263 * Finish the job's fence and wake up the worker thread.
264 */
539f9ee4 265static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
71173e78
LT
266{
267 struct drm_sched_fence *s_fence = s_job->s_fence;
268 struct drm_gpu_scheduler *sched = s_fence->sched;
269
270 atomic_dec(&sched->hw_rq_count);
f2f12eb9 271 atomic_dec(sched->score);
71173e78
LT
272
273 trace_drm_sched_process_job(s_fence);
274
275 dma_fence_get(&s_fence->finished);
539f9ee4 276 drm_sched_fence_finished(s_fence, result);
71173e78
LT
277 dma_fence_put(&s_fence->finished);
278 wake_up_interruptible(&sched->wake_up_worker);
279}
280
281/**
282 * drm_sched_job_done_cb - the callback for a done job
283 * @f: fence
284 * @cb: fence callbacks
285 */
286static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
287{
288 struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
289
539f9ee4 290 drm_sched_job_done(s_job, f->error);
71173e78
LT
291}
292
b981c86f
CK
293/**
294 * drm_sched_start_timeout - start timeout for reset worker
295 *
296 * @sched: scheduler instance to start the worker for
297 *
298 * Start the timeout for the given scheduler.
299 */
300static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
301{
302 if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
6efa4b46 303 !list_empty(&sched->pending_list))
78efe21b 304 queue_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout);
b981c86f
CK
305}
306
8fe159b0
CK
307/**
308 * drm_sched_fault - immediately start timeout handler
309 *
310 * @sched: scheduler where the timeout handling should be started.
311 *
312 * Start timeout handling immediately when the driver detects a hardware fault.
313 */
314void drm_sched_fault(struct drm_gpu_scheduler *sched)
315{
2da5bffe 316 if (sched->timeout_wq)
11b3b9f4 317 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0);
8fe159b0
CK
318}
319EXPORT_SYMBOL(drm_sched_fault);
320
1db8c142
SM
321/**
322 * drm_sched_suspend_timeout - Suspend scheduler job timeout
323 *
324 * @sched: scheduler instance for which to suspend the timeout
325 *
326 * Suspend the delayed work timeout for the scheduler. This is done by
327 * modifying the delayed work timeout to an arbitrary large value,
a7fbb630 328 * MAX_SCHEDULE_TIMEOUT in this case.
1db8c142
SM
329 *
330 * Returns the timeout remaining
331 *
332 */
333unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
334{
335 unsigned long sched_timeout, now = jiffies;
336
337 sched_timeout = sched->work_tdr.timer.expires;
338
339 /*
340 * Modify the timeout to an arbitrarily large value. This also prevents
341 * the timeout to be restarted when new submissions arrive
342 */
78efe21b 343 if (mod_delayed_work(sched->timeout_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
1db8c142
SM
344 && time_after(sched_timeout, now))
345 return sched_timeout - now;
346 else
347 return sched->timeout;
348}
349EXPORT_SYMBOL(drm_sched_suspend_timeout);
350
351/**
352 * drm_sched_resume_timeout - Resume scheduler job timeout
353 *
354 * @sched: scheduler instance for which to resume the timeout
355 * @remaining: remaining timeout
356 *
a7fbb630 357 * Resume the delayed work timeout for the scheduler.
1db8c142
SM
358 */
359void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
360 unsigned long remaining)
361{
a7fbb630 362 spin_lock(&sched->job_list_lock);
1db8c142 363
6efa4b46 364 if (list_empty(&sched->pending_list))
1db8c142
SM
365 cancel_delayed_work(&sched->work_tdr);
366 else
78efe21b 367 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, remaining);
1db8c142 368
a7fbb630 369 spin_unlock(&sched->job_list_lock);
1db8c142
SM
370}
371EXPORT_SYMBOL(drm_sched_resume_timeout);
372
1b1f42d8 373static void drm_sched_job_begin(struct drm_sched_job *s_job)
0de2479c 374{
1b1f42d8 375 struct drm_gpu_scheduler *sched = s_job->sched;
0de2479c 376
a7fbb630 377 spin_lock(&sched->job_list_lock);
6efa4b46 378 list_add_tail(&s_job->list, &sched->pending_list);
b981c86f 379 drm_sched_start_timeout(sched);
a7fbb630 380 spin_unlock(&sched->job_list_lock);
0de2479c
ML
381}
382
1b1f42d8 383static void drm_sched_job_timedout(struct work_struct *work)
0e51a772 384{
6a962430
ND
385 struct drm_gpu_scheduler *sched;
386 struct drm_sched_job *job;
75973e58 387 enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_NOMINAL;
6a962430
ND
388
389 sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
135517d3
AG
390
391 /* Protects against concurrent deletion in drm_sched_get_cleanup_job */
a7fbb630 392 spin_lock(&sched->job_list_lock);
6efa4b46 393 job = list_first_entry_or_null(&sched->pending_list,
8935ff00 394 struct drm_sched_job, list);
0e51a772 395
b576ff90 396 if (job) {
135517d3
AG
397 /*
398 * Remove the bad job so it cannot be freed by concurrent
399 * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
400 * is parked at which point it's safe.
401 */
8935ff00 402 list_del_init(&job->list);
a7fbb630 403 spin_unlock(&sched->job_list_lock);
135517d3 404
75973e58 405 status = job->sched->ops->timedout_job(job);
0efd2d2f 406
b576ff90
AG
407 /*
408 * Guilty job did complete and hence needs to be manually removed
409 * See drm_sched_stop doc.
410 */
411 if (sched->free_guilty) {
412 job->sched->ops->free_job(job);
413 sched->free_guilty = false;
414 }
135517d3 415 } else {
a7fbb630 416 spin_unlock(&sched->job_list_lock);
a5343b8a 417 }
5918045c 418
75973e58
AG
419 if (status != DRM_GPU_SCHED_STAT_ENODEV) {
420 spin_lock(&sched->job_list_lock);
421 drm_sched_start_timeout(sched);
422 spin_unlock(&sched->job_list_lock);
423 }
0e51a772
CK
424}
425
2d33948e 426/**
f5d35632 427 * drm_sched_stop - stop the scheduler
2d33948e
ND
428 *
429 * @sched: scheduler instance
d0f29d49 430 * @bad: job which caused the time out
2d33948e 431 *
5918045c
CK
432 * Stop the scheduler and also removes and frees all completed jobs.
433 * Note: bad job will not be freed as it might be used later and so it's
434 * callers responsibility to release it manually if it's not part of the
6efa4b46 435 * pending list any more.
5918045c 436 *
2d33948e 437 */
5918045c 438void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
e686e75d 439{
5918045c 440 struct drm_sched_job *s_job, *tmp;
e686e75d 441
222b5f04
AG
442 kthread_park(sched->thread);
443
135517d3
AG
444 /*
445 * Reinsert back the bad job here - now it's safe as
446 * drm_sched_get_cleanup_job cannot race against us and release the
447 * bad job at this point - we parked (waited for) any in progress
448 * (earlier) cleanups and drm_sched_get_cleanup_job will not be called
449 * now until the scheduler thread is unparked.
450 */
451 if (bad && bad->sched == sched)
452 /*
453 * Add at the head of the queue to reflect it was the earliest
454 * job extracted.
455 */
6efa4b46 456 list_add(&bad->list, &sched->pending_list);
135517d3 457
222b5f04 458 /*
5918045c 459 * Iterate the job list from later to earlier one and either deactive
6efa4b46 460 * their HW callbacks or remove them from pending list if they already
5918045c
CK
461 * signaled.
462 * This iteration is thread safe as sched thread is stopped.
222b5f04 463 */
6efa4b46 464 list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
8935ff00 465 list) {
a6bef67e
CZ
466 if (s_job->s_fence->parent &&
467 dma_fence_remove_callback(s_job->s_fence->parent,
3741540e 468 &s_job->cb)) {
45ecaea7
AG
469 dma_fence_put(s_job->s_fence->parent);
470 s_job->s_fence->parent = NULL;
65781c78 471 atomic_dec(&sched->hw_rq_count);
222b5f04 472 } else {
5918045c 473 /*
6efa4b46 474 * remove job from pending_list.
5918045c
CK
475 * Locking here is for concurrent resume timeout
476 */
a7fbb630 477 spin_lock(&sched->job_list_lock);
8935ff00 478 list_del_init(&s_job->list);
a7fbb630 479 spin_unlock(&sched->job_list_lock);
5918045c
CK
480
481 /*
482 * Wait for job's HW fence callback to finish using s_job
483 * before releasing it.
484 *
485 * Job is still alive so fence refcount at least 1
486 */
487 dma_fence_wait(&s_job->s_fence->finished, false);
488
489 /*
490 * We must keep bad job alive for later use during
a5343b8a
AG
491 * recovery by some of the drivers but leave a hint
492 * that the guilty job must be released.
5918045c
CK
493 */
494 if (bad != s_job)
495 sched->ops->free_job(s_job);
a5343b8a
AG
496 else
497 sched->free_guilty = true;
e686e75d
CZ
498 }
499 }
290764af
AG
500
501 /*
502 * Stop pending timer in flight as we rearm it in drm_sched_start. This
503 * avoids the pending timeout work in progress to fire right away after
504 * this TDR finished and before the newly restarted jobs had a
505 * chance to complete.
506 */
507 cancel_delayed_work(&sched->work_tdr);
65781c78 508}
222b5f04
AG
509
510EXPORT_SYMBOL(drm_sched_stop);
65781c78 511
2d33948e 512/**
e9d2871f 513 * drm_sched_start - recover jobs after a reset
2d33948e
ND
514 *
515 * @sched: scheduler instance
d0f29d49 516 * @full_recovery: proceed with complete sched restart
2d33948e
ND
517 *
518 */
222b5f04 519void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
ec75f573 520{
1b1f42d8 521 struct drm_sched_job *s_job, *tmp;
ec75f573
CZ
522 int r;
523
3741540e
AG
524 /*
525 * Locking the list is not required here as the sched thread is parked
5918045c 526 * so no new jobs are being inserted or removed. Also concurrent
3741540e
AG
527 * GPU recovers can't run in parallel.
528 */
6efa4b46 529 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
222b5f04 530 struct dma_fence *fence = s_job->s_fence->parent;
8ee3a52e 531
5918045c
CK
532 atomic_inc(&sched->hw_rq_count);
533
534 if (!full_recovery)
535 continue;
536
ec75f573 537 if (fence) {
3741540e 538 r = dma_fence_add_callback(fence, &s_job->cb,
71173e78 539 drm_sched_job_done_cb);
ec75f573 540 if (r == -ENOENT)
539f9ee4 541 drm_sched_job_done(s_job, fence->error);
ec75f573 542 else if (r)
8ab62eda 543 DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
ec75f573 544 r);
222b5f04 545 } else
03877d62 546 drm_sched_job_done(s_job, -ECANCELED);
ec75f573 547 }
222b5f04 548
5918045c 549 if (full_recovery) {
a7fbb630 550 spin_lock(&sched->job_list_lock);
5918045c 551 drm_sched_start_timeout(sched);
a7fbb630 552 spin_unlock(&sched->job_list_lock);
5918045c 553 }
222b5f04 554
222b5f04 555 kthread_unpark(sched->thread);
ec75f573 556}
222b5f04
AG
557EXPORT_SYMBOL(drm_sched_start);
558
559/**
5efbe6aa 560 * drm_sched_resubmit_jobs - Deprecated, don't use in new code!
222b5f04
AG
561 *
562 * @sched: scheduler instance
563 *
5efbe6aa
CK
564 * Re-submitting jobs was a concept AMD came up as cheap way to implement
565 * recovery after a job timeout.
566 *
567 * This turned out to be not working very well. First of all there are many
568 * problem with the dma_fence implementation and requirements. Either the
569 * implementation is risking deadlocks with core memory management or violating
570 * documented implementation details of the dma_fence object.
571 *
572 * Drivers can still save and restore their state for recovery operations, but
573 * we shouldn't make this a general scheduler feature around the dma_fence
574 * interface.
222b5f04
AG
575 */
576void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
577{
578 struct drm_sched_job *s_job, *tmp;
579 uint64_t guilty_context;
580 bool found_guilty = false;
e91e5f08 581 struct dma_fence *fence;
222b5f04 582
6efa4b46 583 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
222b5f04
AG
584 struct drm_sched_fence *s_fence = s_job->s_fence;
585
586 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
587 found_guilty = true;
588 guilty_context = s_job->s_fence->scheduled.context;
589 }
590
591 if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
592 dma_fence_set_error(&s_fence->finished, -ECANCELED);
593
e91e5f08
AG
594 fence = sched->ops->run_job(s_job);
595
596 if (IS_ERR_OR_NULL(fence)) {
d7c5782a
AG
597 if (IS_ERR(fence))
598 dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
599
e91e5f08 600 s_job->s_fence->parent = NULL;
e91e5f08 601 } else {
45ecaea7
AG
602
603 s_job->s_fence->parent = dma_fence_get(fence);
604
605 /* Drop for orignal kref_init */
606 dma_fence_put(fence);
e91e5f08 607 }
222b5f04
AG
608 }
609}
06a2d7cc 610EXPORT_SYMBOL(drm_sched_resubmit_jobs);
ec75f573 611
563e1e66 612/**
2d33948e 613 * drm_sched_job_init - init a scheduler job
2d33948e 614 * @job: scheduler job to init
2d33948e
ND
615 * @entity: scheduler entity to use
616 * @owner: job owner for debugging
617 *
618 * Refer to drm_sched_entity_push_job() documentation
563e1e66 619 * for locking considerations.
2d33948e 620 *
dbe48d03
DV
621 * Drivers must make sure drm_sched_job_cleanup() if this function returns
622 * successfully, even when @job is aborted before drm_sched_job_arm() is called.
623 *
624 * WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware
625 * has died, which can mean that there's no valid runqueue for a @entity.
626 * This function returns -ENOENT in this case (which probably should be -EIO as
627 * a more meanigful return value).
628 *
2d33948e 629 * Returns 0 for success, negative error code otherwise.
563e1e66 630 */
1b1f42d8 631int drm_sched_job_init(struct drm_sched_job *job,
1b1f42d8 632 struct drm_sched_entity *entity,
595a9cd6 633 void *owner)
e686941a 634{
faf6e1a8
AG
635 if (!entity->rq)
636 return -ENOENT;
637
8ee3a52e 638 job->entity = entity;
dbe48d03 639 job->s_fence = drm_sched_fence_alloc(entity, owner);
e686941a
ML
640 if (!job->s_fence)
641 return -ENOMEM;
642
8935ff00 643 INIT_LIST_HEAD(&job->list);
4835096b 644
ebd5f742
DV
645 xa_init_flags(&job->dependencies, XA_FLAGS_ALLOC);
646
e686941a
ML
647 return 0;
648}
1b1f42d8 649EXPORT_SYMBOL(drm_sched_job_init);
e686941a 650
26efecf9 651/**
dbe48d03
DV
652 * drm_sched_job_arm - arm a scheduler job for execution
653 * @job: scheduler job to arm
654 *
655 * This arms a scheduler job for execution. Specifically it initializes the
656 * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
657 * or other places that need to track the completion of this job.
658 *
659 * Refer to drm_sched_entity_push_job() documentation for locking
660 * considerations.
26efecf9 661 *
dbe48d03
DV
662 * This can only be called if drm_sched_job_init() succeeded.
663 */
664void drm_sched_job_arm(struct drm_sched_job *job)
665{
666 struct drm_gpu_scheduler *sched;
667 struct drm_sched_entity *entity = job->entity;
668
669 BUG_ON(!entity);
6d602e03 670 drm_sched_entity_select_rq(entity);
dbe48d03
DV
671 sched = entity->rq->sched;
672
673 job->sched = sched;
674 job->s_priority = entity->rq - sched->sched_rq;
675 job->id = atomic64_inc_return(&sched->job_id_count);
676
677 drm_sched_fence_init(job->s_fence, job->entity);
678}
679EXPORT_SYMBOL(drm_sched_job_arm);
680
ebd5f742
DV
681/**
682 * drm_sched_job_add_dependency - adds the fence as a job dependency
683 * @job: scheduler job to add the dependencies to
684 * @fence: the dma_fence to add to the list of dependencies.
685 *
686 * Note that @fence is consumed in both the success and error cases.
687 *
688 * Returns:
689 * 0 on success, or an error on failing to expand the array.
690 */
691int drm_sched_job_add_dependency(struct drm_sched_job *job,
692 struct dma_fence *fence)
693{
694 struct dma_fence *entry;
695 unsigned long index;
696 u32 id = 0;
697 int ret;
698
699 if (!fence)
700 return 0;
701
702 /* Deduplicate if we already depend on a fence from the same context.
703 * This lets the size of the array of deps scale with the number of
704 * engines involved, rather than the number of BOs.
705 */
706 xa_for_each(&job->dependencies, index, entry) {
707 if (entry->context != fence->context)
708 continue;
709
710 if (dma_fence_is_later(fence, entry)) {
711 dma_fence_put(entry);
712 xa_store(&job->dependencies, index, fence, GFP_KERNEL);
713 } else {
714 dma_fence_put(fence);
715 }
716 return 0;
717 }
718
719 ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL);
720 if (ret != 0)
721 dma_fence_put(fence);
722
723 return ret;
724}
725EXPORT_SYMBOL(drm_sched_job_add_dependency);
726
c087bbb6
MC
727/**
728 * drm_sched_job_add_syncobj_dependency - adds a syncobj's fence as a job dependency
729 * @job: scheduler job to add the dependencies to
bd2eefd0 730 * @file: drm file private pointer
c087bbb6
MC
731 * @handle: syncobj handle to lookup
732 * @point: timeline point
733 *
734 * This adds the fence matching the given syncobj to @job.
735 *
736 * Returns:
737 * 0 on success, or an error on failing to expand the array.
738 */
739int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job,
740 struct drm_file *file,
741 u32 handle,
742 u32 point)
743{
744 struct dma_fence *fence;
745 int ret;
746
747 ret = drm_syncobj_find_fence(file, handle, point, 0, &fence);
748 if (ret)
749 return ret;
750
751 return drm_sched_job_add_dependency(job, fence);
752}
753EXPORT_SYMBOL(drm_sched_job_add_syncobj_dependency);
754
ebd5f742 755/**
4d5230b5 756 * drm_sched_job_add_resv_dependencies - add all fences from the resv to the job
ebd5f742 757 * @job: scheduler job to add the dependencies to
4d5230b5
CK
758 * @resv: the dma_resv object to get the fences from
759 * @usage: the dma_resv_usage to use to filter the fences
ebd5f742 760 *
4d5230b5
CK
761 * This adds all fences matching the given usage from @resv to @job.
762 * Must be called with the @resv lock held.
ebd5f742
DV
763 *
764 * Returns:
765 * 0 on success, or an error on failing to expand the array.
766 */
4d5230b5
CK
767int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
768 struct dma_resv *resv,
769 enum dma_resv_usage usage)
ebd5f742 770{
9c2ba265
CK
771 struct dma_resv_iter cursor;
772 struct dma_fence *fence;
ebd5f742 773 int ret;
ebd5f742 774
4d5230b5 775 dma_resv_assert_held(resv);
b892d391 776
4d5230b5 777 dma_resv_for_each_fence(&cursor, resv, usage, fence) {
4eaf02d6
CK
778 /* Make sure to grab an additional ref on the added fence */
779 dma_fence_get(fence);
963d0b35
RC
780 ret = drm_sched_job_add_dependency(job, fence);
781 if (ret) {
782 dma_fence_put(fence);
783 return ret;
784 }
ebd5f742 785 }
9c2ba265 786 return 0;
ebd5f742 787}
4d5230b5 788EXPORT_SYMBOL(drm_sched_job_add_resv_dependencies);
ebd5f742 789
4d5230b5
CK
790/**
791 * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job
792 * dependencies
793 * @job: scheduler job to add the dependencies to
794 * @obj: the gem object to add new dependencies from.
795 * @write: whether the job might write the object (so we need to depend on
796 * shared fences in the reservation object).
797 *
798 * This should be called after drm_gem_lock_reservations() on your array of
799 * GEM objects used in the job but before updating the reservations with your
800 * own fences.
801 *
802 * Returns:
803 * 0 on success, or an error on failing to expand the array.
804 */
805int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
806 struct drm_gem_object *obj,
807 bool write)
808{
809 return drm_sched_job_add_resv_dependencies(job, obj->resv,
810 dma_resv_usage_rw(write));
811}
812EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
ebd5f742 813
dbe48d03
DV
814/**
815 * drm_sched_job_cleanup - clean up scheduler job resources
26efecf9 816 * @job: scheduler job to clean up
dbe48d03
DV
817 *
818 * Cleans up the resources allocated with drm_sched_job_init().
819 *
820 * Drivers should call this from their error unwind code if @job is aborted
821 * before drm_sched_job_arm() is called.
822 *
823 * After that point of no return @job is committed to be executed by the
824 * scheduler, and this function should be called from the
825 * &drm_sched_backend_ops.free_job callback.
26efecf9
SM
826 */
827void drm_sched_job_cleanup(struct drm_sched_job *job)
828{
ebd5f742
DV
829 struct dma_fence *fence;
830 unsigned long index;
831
dbe48d03
DV
832 if (kref_read(&job->s_fence->finished.refcount)) {
833 /* drm_sched_job_arm() has been called */
834 dma_fence_put(&job->s_fence->finished);
835 } else {
836 /* aborted job before committing to run it */
d4c16733 837 drm_sched_fence_free(job->s_fence);
dbe48d03
DV
838 }
839
26efecf9 840 job->s_fence = NULL;
ebd5f742
DV
841
842 xa_for_each(&job->dependencies, index, fence) {
843 dma_fence_put(fence);
844 }
845 xa_destroy(&job->dependencies);
846
26efecf9
SM
847}
848EXPORT_SYMBOL(drm_sched_job_cleanup);
849
e688b728 850/**
e0727008 851 * drm_sched_can_queue -- Can we queue more to the hardware?
2d33948e
ND
852 * @sched: scheduler instance
853 *
854 * Return true if we can push more jobs to the hw, otherwise false.
e688b728 855 */
e0727008 856static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched)
e688b728
CK
857{
858 return atomic_read(&sched->hw_rq_count) <
859 sched->hw_submission_limit;
860}
861
88079006 862/**
3655c590 863 * drm_sched_wakeup_if_can_queue - Wake up the scheduler
2d33948e
ND
864 * @sched: scheduler instance
865 *
3655c590 866 * Wake up the scheduler if we can queue jobs.
88079006 867 */
3655c590 868void drm_sched_wakeup_if_can_queue(struct drm_gpu_scheduler *sched)
88079006 869{
e0727008 870 if (drm_sched_can_queue(sched))
c2b6bd7e 871 wake_up_interruptible(&sched->wake_up_worker);
88079006
CK
872}
873
e688b728 874/**
2d33948e
ND
875 * drm_sched_select_entity - Select next entity to process
876 *
877 * @sched: scheduler instance
878 *
879 * Returns the entity to process or NULL if none are found.
880 */
1b1f42d8
LS
881static struct drm_sched_entity *
882drm_sched_select_entity(struct drm_gpu_scheduler *sched)
e688b728 883{
1b1f42d8 884 struct drm_sched_entity *entity;
d033a6de 885 int i;
e688b728 886
e0727008 887 if (!drm_sched_can_queue(sched))
e688b728
CK
888 return NULL;
889
890 /* Kernel run queue has higher priority than normal run queue*/
e2d732fd 891 for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
08fb97de
AG
892 entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
893 drm_sched_rq_select_entity_fifo(&sched->sched_rq[i]) :
894 drm_sched_rq_select_entity_rr(&sched->sched_rq[i]);
d033a6de
CZ
895 if (entity)
896 break;
897 }
e688b728 898
3d651936 899 return entity;
e688b728
CK
900}
901
5918045c 902/**
588b9828 903 * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
5918045c
CK
904 *
905 * @sched: scheduler instance
906 *
6efa4b46 907 * Returns the next finished job from the pending list (if there is one)
588b9828 908 * ready for it to be destroyed.
5918045c 909 */
588b9828
SP
910static struct drm_sched_job *
911drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
5918045c 912{
1774baa6 913 struct drm_sched_job *job, *next;
5918045c 914
a7fbb630 915 spin_lock(&sched->job_list_lock);
5918045c 916
6efa4b46 917 job = list_first_entry_or_null(&sched->pending_list,
8935ff00 918 struct drm_sched_job, list);
5918045c 919
bafaf67c 920 if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
6efa4b46 921 /* remove job from pending_list */
8935ff00 922 list_del_init(&job->list);
bcf26654
ML
923
924 /* cancel this job's TO timer */
925 cancel_delayed_work(&sched->work_tdr);
1774baa6
RS
926 /* make the scheduled timestamp more accurate */
927 next = list_first_entry_or_null(&sched->pending_list,
928 typeof(*next), list);
bcf26654
ML
929
930 if (next) {
1774baa6 931 next->s_fence->scheduled.timestamp =
b83ce9cb 932 dma_fence_timestamp(&job->s_fence->finished);
bcf26654
ML
933 /* start TO timer for next job */
934 drm_sched_start_timeout(sched);
935 }
588b9828
SP
936 } else {
937 job = NULL;
5918045c
CK
938 }
939
a7fbb630 940 spin_unlock(&sched->job_list_lock);
3741540e 941
588b9828 942 return job;
6f0e54a9
CK
943}
944
ec2edcc2
ND
945/**
946 * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
947 * @sched_list: list of drm_gpu_schedulers
948 * @num_sched_list: number of drm_gpu_schedulers in the sched_list
949 *
950 * Returns pointer of the sched with the least load or NULL if none of the
951 * drm_gpu_schedulers are ready
952 */
953struct drm_gpu_scheduler *
954drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
955 unsigned int num_sched_list)
956{
957 struct drm_gpu_scheduler *sched, *picked_sched = NULL;
958 int i;
d41a39dd 959 unsigned int min_score = UINT_MAX, num_score;
ec2edcc2
ND
960
961 for (i = 0; i < num_sched_list; ++i) {
962 sched = sched_list[i];
963
964 if (!sched->ready) {
965 DRM_WARN("scheduler %s is not ready, skipping",
966 sched->name);
967 continue;
968 }
969
f2f12eb9 970 num_score = atomic_read(sched->score);
d41a39dd
ND
971 if (num_score < min_score) {
972 min_score = num_score;
ec2edcc2
ND
973 picked_sched = sched;
974 }
975 }
976
977 return picked_sched;
978}
979EXPORT_SYMBOL(drm_sched_pick_best);
980
2d33948e
ND
981/**
982 * drm_sched_blocked - check if the scheduler is blocked
983 *
984 * @sched: scheduler instance
985 *
986 * Returns true if blocked, otherwise false.
987 */
1b1f42d8 988static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
0875dc9e
CZ
989{
990 if (kthread_should_park()) {
991 kthread_parkme();
992 return true;
993 }
994
995 return false;
996}
997
2d33948e
ND
998/**
999 * drm_sched_main - main scheduler thread
1000 *
1001 * @param: scheduler instance
1002 *
1003 * Returns 0.
1004 */
1b1f42d8 1005static int drm_sched_main(void *param)
a72ce6f8 1006{
1b1f42d8 1007 struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
83f4b118 1008 int r;
a72ce6f8 1009
7b31e940 1010 sched_set_fifo_low(current);
a72ce6f8
JZ
1011
1012 while (!kthread_should_stop()) {
1b1f42d8
LS
1013 struct drm_sched_entity *entity = NULL;
1014 struct drm_sched_fence *s_fence;
1015 struct drm_sched_job *sched_job;
f54d1867 1016 struct dma_fence *fence;
588b9828 1017 struct drm_sched_job *cleanup_job = NULL;
6f0e54a9 1018
c2b6bd7e 1019 wait_event_interruptible(sched->wake_up_worker,
588b9828 1020 (cleanup_job = drm_sched_get_cleanup_job(sched)) ||
1b1f42d8
LS
1021 (!drm_sched_blocked(sched) &&
1022 (entity = drm_sched_select_entity(sched))) ||
588b9828
SP
1023 kthread_should_stop());
1024
bcf26654 1025 if (cleanup_job)
588b9828 1026 sched->ops->free_job(cleanup_job);
f85a6dd9 1027
3d651936
CK
1028 if (!entity)
1029 continue;
1030
1b1f42d8 1031 sched_job = drm_sched_entity_pop_job(entity);
83a7772b 1032
3b5ac97a 1033 if (!sched_job) {
9f1ecfc5 1034 complete_all(&entity->entity_idle);
f85a6dd9 1035 continue;
3b5ac97a 1036 }
f85a6dd9 1037
4c7eb91c 1038 s_fence = sched_job->s_fence;
2440ff2c 1039
b034b572 1040 atomic_inc(&sched->hw_rq_count);
1b1f42d8 1041 drm_sched_job_begin(sched_job);
7392c329 1042
c2c91828 1043 trace_drm_run_job(sched_job, entity);
4c7eb91c 1044 fence = sched->ops->run_job(sched_job);
9f1ecfc5 1045 complete_all(&entity->entity_idle);
db8b4968 1046 drm_sched_fence_scheduled(s_fence, fence);
29d25355 1047
e91e5f08 1048 if (!IS_ERR_OR_NULL(fence)) {
45ecaea7
AG
1049 /* Drop for original kref_init of the fence */
1050 dma_fence_put(fence);
1051
3741540e 1052 r = dma_fence_add_callback(fence, &sched_job->cb,
71173e78 1053 drm_sched_job_done_cb);
6f0e54a9 1054 if (r == -ENOENT)
539f9ee4 1055 drm_sched_job_done(sched_job, fence->error);
6f0e54a9 1056 else if (r)
8ab62eda 1057 DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
16a7133f 1058 r);
e91e5f08 1059 } else {
539f9ee4
CK
1060 drm_sched_job_done(sched_job, IS_ERR(fence) ?
1061 PTR_ERR(fence) : 0);
e91e5f08 1062 }
aef4852e 1063
c2b6bd7e 1064 wake_up(&sched->job_scheduled);
a72ce6f8
JZ
1065 }
1066 return 0;
1067}
1068
a72ce6f8 1069/**
2d33948e 1070 * drm_sched_init - Init a gpu scheduler instance
a72ce6f8 1071 *
2d33948e
ND
1072 * @sched: scheduler instance
1073 * @ops: backend operations for this scheduler
1074 * @hw_submission: number of hw submissions that can be in flight
1075 * @hang_limit: number of times to allow a job to hang before dropping it
1076 * @timeout: timeout value in jiffies for the scheduler
78efe21b
BB
1077 * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is
1078 * used
f2f12eb9 1079 * @score: optional score atomic shared with other schedulers
2d33948e 1080 * @name: name used for debugging
f8ad757e 1081 * @dev: target &struct device
a72ce6f8 1082 *
4f839a24 1083 * Return 0 on success, otherwise error code.
2d33948e 1084 */
1b1f42d8
LS
1085int drm_sched_init(struct drm_gpu_scheduler *sched,
1086 const struct drm_sched_backend_ops *ops,
78efe21b
BB
1087 unsigned hw_submission, unsigned hang_limit,
1088 long timeout, struct workqueue_struct *timeout_wq,
8ab62eda 1089 atomic_t *score, const char *name, struct device *dev)
a72ce6f8 1090{
9afd0756 1091 int i, ret;
a72ce6f8 1092 sched->ops = ops;
4cef9267 1093 sched->hw_submission_limit = hw_submission;
4f839a24 1094 sched->name = name;
2440ff2c 1095 sched->timeout = timeout;
78efe21b 1096 sched->timeout_wq = timeout_wq ? : system_wq;
95aa9b1d 1097 sched->hang_limit = hang_limit;
f2f12eb9 1098 sched->score = score ? score : &sched->_score;
8ab62eda 1099 sched->dev = dev;
e2d732fd 1100 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++)
8dc9fbbf 1101 drm_sched_rq_init(sched, &sched->sched_rq[i]);
a72ce6f8 1102
c2b6bd7e
CK
1103 init_waitqueue_head(&sched->wake_up_worker);
1104 init_waitqueue_head(&sched->job_scheduled);
6efa4b46 1105 INIT_LIST_HEAD(&sched->pending_list);
4835096b 1106 spin_lock_init(&sched->job_list_lock);
c746ba22 1107 atomic_set(&sched->hw_rq_count, 0);
6a962430 1108 INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
f2f12eb9 1109 atomic_set(&sched->_score, 0);
93f8b367 1110 atomic64_set(&sched->job_id_count, 0);
4f839a24 1111
a72ce6f8 1112 /* Each scheduler will run on a seperate kernel thread */
1b1f42d8 1113 sched->thread = kthread_run(drm_sched_main, sched, sched->name);
f4956598 1114 if (IS_ERR(sched->thread)) {
9afd0756
SM
1115 ret = PTR_ERR(sched->thread);
1116 sched->thread = NULL;
8ab62eda 1117 DRM_DEV_ERROR(sched->dev, "Failed to create scheduler for %s.\n", name);
9afd0756 1118 return ret;
a72ce6f8
JZ
1119 }
1120
faf6e1a8 1121 sched->ready = true;
4f839a24 1122 return 0;
a72ce6f8 1123}
1b1f42d8 1124EXPORT_SYMBOL(drm_sched_init);
a72ce6f8
JZ
1125
1126/**
2d33948e
ND
1127 * drm_sched_fini - Destroy a gpu scheduler
1128 *
1129 * @sched: scheduler instance
a72ce6f8 1130 *
2d33948e 1131 * Tears down and cleans up the scheduler.
a72ce6f8 1132 */
1b1f42d8 1133void drm_sched_fini(struct drm_gpu_scheduler *sched)
a72ce6f8 1134{
c61cdbdb
AG
1135 struct drm_sched_entity *s_entity;
1136 int i;
1137
32544d02
DA
1138 if (sched->thread)
1139 kthread_stop(sched->thread);
faf6e1a8 1140
c61cdbdb
AG
1141 for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
1142 struct drm_sched_rq *rq = &sched->sched_rq[i];
1143
c61cdbdb
AG
1144 spin_lock(&rq->lock);
1145 list_for_each_entry(s_entity, &rq->entities, list)
1146 /*
1147 * Prevents reinsertion and marks job_queue as idle,
1148 * it will removed from rq in drm_sched_entity_fini
1149 * eventually
1150 */
1151 s_entity->stopped = true;
1152 spin_unlock(&rq->lock);
1153
1154 }
1155
1156 /* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */
1157 wake_up_all(&sched->job_scheduled);
1158
e582951b
AG
1159 /* Confirm no work left behind accessing device structures */
1160 cancel_delayed_work_sync(&sched->work_tdr);
1161
faf6e1a8 1162 sched->ready = false;
a72ce6f8 1163}
1b1f42d8 1164EXPORT_SYMBOL(drm_sched_fini);
e6c6338f
JZ
1165
1166/**
06a2d7cc 1167 * drm_sched_increase_karma - Update sched_entity guilty flag
e6c6338f
JZ
1168 *
1169 * @bad: The job guilty of time out
e6c6338f 1170 *
06a2d7cc
CK
1171 * Increment on every hang caused by the 'bad' job. If this exceeds the hang
1172 * limit of the scheduler then the respective sched entity is marked guilty and
1173 * jobs from it will not be scheduled further
e6c6338f 1174 */
06a2d7cc 1175void drm_sched_increase_karma(struct drm_sched_job *bad)
e6c6338f
JZ
1176{
1177 int i;
1178 struct drm_sched_entity *tmp;
1179 struct drm_sched_entity *entity;
1180 struct drm_gpu_scheduler *sched = bad->sched;
1181
1182 /* don't change @bad's karma if it's from KERNEL RQ,
1183 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
1184 * corrupt but keep in mind that kernel jobs always considered good.
1185 */
1186 if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
06a2d7cc 1187 atomic_inc(&bad->karma);
e6c6338f
JZ
1188
1189 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL;
1190 i++) {
1191 struct drm_sched_rq *rq = &sched->sched_rq[i];
1192
1193 spin_lock(&rq->lock);
1194 list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
1195 if (bad->s_fence->scheduled.context ==
1196 entity->fence_context) {
1197 if (entity->guilty)
06a2d7cc 1198 atomic_set(entity->guilty, 1);
e6c6338f
JZ
1199 break;
1200 }
1201 }
1202 spin_unlock(&rq->lock);
1203 if (&entity->list != &rq->entities)
1204 break;
1205 }
1206 }
1207}
06a2d7cc 1208EXPORT_SYMBOL(drm_sched_increase_karma);