drm/scheduler: fix drm_sched_job_add_implicit_dependencies
[linux-block.git] / drivers / gpu / drm / scheduler / sched_main.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 /**
25  * DOC: Overview
26  *
27  * The GPU scheduler provides entities which allow userspace to push jobs
28  * into software queues which are then scheduled on a hardware run queue.
29  * The software queues have a priority among them. The scheduler selects the entities
30  * from the run queue using a FIFO. The scheduler provides dependency handling
31  * features among jobs. The driver is supposed to provide callback functions for
32  * backend operations to the scheduler like submitting a job to hardware run queue,
33  * returning the dependencies of a job etc.
34  *
35  * The organisation of the scheduler is the following:
36  *
37  * 1. Each hw run queue has one scheduler
38  * 2. Each scheduler has multiple run queues with different priorities
39  *    (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
40  * 3. Each scheduler run queue has a queue of entities to schedule
41  * 4. Entities themselves maintain a queue of jobs that will be scheduled on
42  *    the hardware.
43  *
44  * The jobs in a entity are always scheduled in the order that they were pushed.
45  */
46
47 #include <linux/kthread.h>
48 #include <linux/wait.h>
49 #include <linux/sched.h>
50 #include <linux/completion.h>
51 #include <linux/dma-resv.h>
52 #include <uapi/linux/sched/types.h>
53
54 #include <drm/drm_print.h>
55 #include <drm/drm_gem.h>
56 #include <drm/gpu_scheduler.h>
57 #include <drm/spsc_queue.h>
58
59 #define CREATE_TRACE_POINTS
60 #include "gpu_scheduler_trace.h"
61
62 #define to_drm_sched_job(sched_job)             \
63                 container_of((sched_job), struct drm_sched_job, queue_node)
64
65 /**
66  * drm_sched_rq_init - initialize a given run queue struct
67  *
68  * @sched: scheduler instance to associate with this run queue
69  * @rq: scheduler run queue
70  *
71  * Initializes a scheduler runqueue.
72  */
73 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
74                               struct drm_sched_rq *rq)
75 {
76         spin_lock_init(&rq->lock);
77         INIT_LIST_HEAD(&rq->entities);
78         rq->current_entity = NULL;
79         rq->sched = sched;
80 }
81
82 /**
83  * drm_sched_rq_add_entity - add an entity
84  *
85  * @rq: scheduler run queue
86  * @entity: scheduler entity
87  *
88  * Adds a scheduler entity to the run queue.
89  */
90 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
91                              struct drm_sched_entity *entity)
92 {
93         if (!list_empty(&entity->list))
94                 return;
95         spin_lock(&rq->lock);
96         atomic_inc(rq->sched->score);
97         list_add_tail(&entity->list, &rq->entities);
98         spin_unlock(&rq->lock);
99 }
100
101 /**
102  * drm_sched_rq_remove_entity - remove an entity
103  *
104  * @rq: scheduler run queue
105  * @entity: scheduler entity
106  *
107  * Removes a scheduler entity from the run queue.
108  */
109 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
110                                 struct drm_sched_entity *entity)
111 {
112         if (list_empty(&entity->list))
113                 return;
114         spin_lock(&rq->lock);
115         atomic_dec(rq->sched->score);
116         list_del_init(&entity->list);
117         if (rq->current_entity == entity)
118                 rq->current_entity = NULL;
119         spin_unlock(&rq->lock);
120 }
121
122 /**
123  * drm_sched_rq_select_entity - Select an entity which could provide a job to run
124  *
125  * @rq: scheduler run queue to check.
126  *
127  * Try to find a ready entity, returns NULL if none found.
128  */
129 static struct drm_sched_entity *
130 drm_sched_rq_select_entity(struct drm_sched_rq *rq)
131 {
132         struct drm_sched_entity *entity;
133
134         spin_lock(&rq->lock);
135
136         entity = rq->current_entity;
137         if (entity) {
138                 list_for_each_entry_continue(entity, &rq->entities, list) {
139                         if (drm_sched_entity_is_ready(entity)) {
140                                 rq->current_entity = entity;
141                                 reinit_completion(&entity->entity_idle);
142                                 spin_unlock(&rq->lock);
143                                 return entity;
144                         }
145                 }
146         }
147
148         list_for_each_entry(entity, &rq->entities, list) {
149
150                 if (drm_sched_entity_is_ready(entity)) {
151                         rq->current_entity = entity;
152                         reinit_completion(&entity->entity_idle);
153                         spin_unlock(&rq->lock);
154                         return entity;
155                 }
156
157                 if (entity == rq->current_entity)
158                         break;
159         }
160
161         spin_unlock(&rq->lock);
162
163         return NULL;
164 }
165
166 /**
167  * drm_sched_job_done - complete a job
168  * @s_job: pointer to the job which is done
169  *
170  * Finish the job's fence and wake up the worker thread.
171  */
172 static void drm_sched_job_done(struct drm_sched_job *s_job)
173 {
174         struct drm_sched_fence *s_fence = s_job->s_fence;
175         struct drm_gpu_scheduler *sched = s_fence->sched;
176
177         atomic_dec(&sched->hw_rq_count);
178         atomic_dec(sched->score);
179
180         trace_drm_sched_process_job(s_fence);
181
182         dma_fence_get(&s_fence->finished);
183         drm_sched_fence_finished(s_fence);
184         dma_fence_put(&s_fence->finished);
185         wake_up_interruptible(&sched->wake_up_worker);
186 }
187
188 /**
189  * drm_sched_job_done_cb - the callback for a done job
190  * @f: fence
191  * @cb: fence callbacks
192  */
193 static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
194 {
195         struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
196
197         drm_sched_job_done(s_job);
198 }
199
200 /**
201  * drm_sched_dependency_optimized
202  *
203  * @fence: the dependency fence
204  * @entity: the entity which depends on the above fence
205  *
206  * Returns true if the dependency can be optimized and false otherwise
207  */
208 bool drm_sched_dependency_optimized(struct dma_fence* fence,
209                                     struct drm_sched_entity *entity)
210 {
211         struct drm_gpu_scheduler *sched = entity->rq->sched;
212         struct drm_sched_fence *s_fence;
213
214         if (!fence || dma_fence_is_signaled(fence))
215                 return false;
216         if (fence->context == entity->fence_context)
217                 return true;
218         s_fence = to_drm_sched_fence(fence);
219         if (s_fence && s_fence->sched == sched)
220                 return true;
221
222         return false;
223 }
224 EXPORT_SYMBOL(drm_sched_dependency_optimized);
225
226 /**
227  * drm_sched_start_timeout - start timeout for reset worker
228  *
229  * @sched: scheduler instance to start the worker for
230  *
231  * Start the timeout for the given scheduler.
232  */
233 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
234 {
235         if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
236             !list_empty(&sched->pending_list))
237                 queue_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout);
238 }
239
240 /**
241  * drm_sched_fault - immediately start timeout handler
242  *
243  * @sched: scheduler where the timeout handling should be started.
244  *
245  * Start timeout handling immediately when the driver detects a hardware fault.
246  */
247 void drm_sched_fault(struct drm_gpu_scheduler *sched)
248 {
249         mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0);
250 }
251 EXPORT_SYMBOL(drm_sched_fault);
252
253 /**
254  * drm_sched_suspend_timeout - Suspend scheduler job timeout
255  *
256  * @sched: scheduler instance for which to suspend the timeout
257  *
258  * Suspend the delayed work timeout for the scheduler. This is done by
259  * modifying the delayed work timeout to an arbitrary large value,
260  * MAX_SCHEDULE_TIMEOUT in this case.
261  *
262  * Returns the timeout remaining
263  *
264  */
265 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
266 {
267         unsigned long sched_timeout, now = jiffies;
268
269         sched_timeout = sched->work_tdr.timer.expires;
270
271         /*
272          * Modify the timeout to an arbitrarily large value. This also prevents
273          * the timeout to be restarted when new submissions arrive
274          */
275         if (mod_delayed_work(sched->timeout_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
276                         && time_after(sched_timeout, now))
277                 return sched_timeout - now;
278         else
279                 return sched->timeout;
280 }
281 EXPORT_SYMBOL(drm_sched_suspend_timeout);
282
283 /**
284  * drm_sched_resume_timeout - Resume scheduler job timeout
285  *
286  * @sched: scheduler instance for which to resume the timeout
287  * @remaining: remaining timeout
288  *
289  * Resume the delayed work timeout for the scheduler.
290  */
291 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
292                 unsigned long remaining)
293 {
294         spin_lock(&sched->job_list_lock);
295
296         if (list_empty(&sched->pending_list))
297                 cancel_delayed_work(&sched->work_tdr);
298         else
299                 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, remaining);
300
301         spin_unlock(&sched->job_list_lock);
302 }
303 EXPORT_SYMBOL(drm_sched_resume_timeout);
304
305 static void drm_sched_job_begin(struct drm_sched_job *s_job)
306 {
307         struct drm_gpu_scheduler *sched = s_job->sched;
308
309         spin_lock(&sched->job_list_lock);
310         list_add_tail(&s_job->list, &sched->pending_list);
311         drm_sched_start_timeout(sched);
312         spin_unlock(&sched->job_list_lock);
313 }
314
315 static void drm_sched_job_timedout(struct work_struct *work)
316 {
317         struct drm_gpu_scheduler *sched;
318         struct drm_sched_job *job;
319         enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_NOMINAL;
320
321         sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
322
323         /* Protects against concurrent deletion in drm_sched_get_cleanup_job */
324         spin_lock(&sched->job_list_lock);
325         job = list_first_entry_or_null(&sched->pending_list,
326                                        struct drm_sched_job, list);
327
328         if (job) {
329                 /*
330                  * Remove the bad job so it cannot be freed by concurrent
331                  * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
332                  * is parked at which point it's safe.
333                  */
334                 list_del_init(&job->list);
335                 spin_unlock(&sched->job_list_lock);
336
337                 status = job->sched->ops->timedout_job(job);
338
339                 /*
340                  * Guilty job did complete and hence needs to be manually removed
341                  * See drm_sched_stop doc.
342                  */
343                 if (sched->free_guilty) {
344                         job->sched->ops->free_job(job);
345                         sched->free_guilty = false;
346                 }
347         } else {
348                 spin_unlock(&sched->job_list_lock);
349         }
350
351         if (status != DRM_GPU_SCHED_STAT_ENODEV) {
352                 spin_lock(&sched->job_list_lock);
353                 drm_sched_start_timeout(sched);
354                 spin_unlock(&sched->job_list_lock);
355         }
356 }
357
358  /**
359   * drm_sched_increase_karma - Update sched_entity guilty flag
360   *
361   * @bad: The job guilty of time out
362   *
363   * Increment on every hang caused by the 'bad' job. If this exceeds the hang
364   * limit of the scheduler then the respective sched entity is marked guilty and
365   * jobs from it will not be scheduled further
366   */
367 void drm_sched_increase_karma(struct drm_sched_job *bad)
368 {
369         drm_sched_increase_karma_ext(bad, 1);
370 }
371 EXPORT_SYMBOL(drm_sched_increase_karma);
372
373 void drm_sched_reset_karma(struct drm_sched_job *bad)
374 {
375         drm_sched_increase_karma_ext(bad, 0);
376 }
377 EXPORT_SYMBOL(drm_sched_reset_karma);
378
379 /**
380  * drm_sched_stop - stop the scheduler
381  *
382  * @sched: scheduler instance
383  * @bad: job which caused the time out
384  *
385  * Stop the scheduler and also removes and frees all completed jobs.
386  * Note: bad job will not be freed as it might be used later and so it's
387  * callers responsibility to release it manually if it's not part of the
388  * pending list any more.
389  *
390  */
391 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
392 {
393         struct drm_sched_job *s_job, *tmp;
394
395         kthread_park(sched->thread);
396
397         /*
398          * Reinsert back the bad job here - now it's safe as
399          * drm_sched_get_cleanup_job cannot race against us and release the
400          * bad job at this point - we parked (waited for) any in progress
401          * (earlier) cleanups and drm_sched_get_cleanup_job will not be called
402          * now until the scheduler thread is unparked.
403          */
404         if (bad && bad->sched == sched)
405                 /*
406                  * Add at the head of the queue to reflect it was the earliest
407                  * job extracted.
408                  */
409                 list_add(&bad->list, &sched->pending_list);
410
411         /*
412          * Iterate the job list from later to  earlier one and either deactive
413          * their HW callbacks or remove them from pending list if they already
414          * signaled.
415          * This iteration is thread safe as sched thread is stopped.
416          */
417         list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
418                                          list) {
419                 if (s_job->s_fence->parent &&
420                     dma_fence_remove_callback(s_job->s_fence->parent,
421                                               &s_job->cb)) {
422                         atomic_dec(&sched->hw_rq_count);
423                 } else {
424                         /*
425                          * remove job from pending_list.
426                          * Locking here is for concurrent resume timeout
427                          */
428                         spin_lock(&sched->job_list_lock);
429                         list_del_init(&s_job->list);
430                         spin_unlock(&sched->job_list_lock);
431
432                         /*
433                          * Wait for job's HW fence callback to finish using s_job
434                          * before releasing it.
435                          *
436                          * Job is still alive so fence refcount at least 1
437                          */
438                         dma_fence_wait(&s_job->s_fence->finished, false);
439
440                         /*
441                          * We must keep bad job alive for later use during
442                          * recovery by some of the drivers but leave a hint
443                          * that the guilty job must be released.
444                          */
445                         if (bad != s_job)
446                                 sched->ops->free_job(s_job);
447                         else
448                                 sched->free_guilty = true;
449                 }
450         }
451
452         /*
453          * Stop pending timer in flight as we rearm it in  drm_sched_start. This
454          * avoids the pending timeout work in progress to fire right away after
455          * this TDR finished and before the newly restarted jobs had a
456          * chance to complete.
457          */
458         cancel_delayed_work(&sched->work_tdr);
459 }
460
461 EXPORT_SYMBOL(drm_sched_stop);
462
463 /**
464  * drm_sched_start - recover jobs after a reset
465  *
466  * @sched: scheduler instance
467  * @full_recovery: proceed with complete sched restart
468  *
469  */
470 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
471 {
472         struct drm_sched_job *s_job, *tmp;
473         int r;
474
475         /*
476          * Locking the list is not required here as the sched thread is parked
477          * so no new jobs are being inserted or removed. Also concurrent
478          * GPU recovers can't run in parallel.
479          */
480         list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
481                 struct dma_fence *fence = s_job->s_fence->parent;
482
483                 atomic_inc(&sched->hw_rq_count);
484
485                 if (!full_recovery)
486                         continue;
487
488                 if (fence) {
489                         r = dma_fence_add_callback(fence, &s_job->cb,
490                                                    drm_sched_job_done_cb);
491                         if (r == -ENOENT)
492                                 drm_sched_job_done(s_job);
493                         else if (r)
494                                 DRM_ERROR("fence add callback failed (%d)\n",
495                                           r);
496                 } else
497                         drm_sched_job_done(s_job);
498         }
499
500         if (full_recovery) {
501                 spin_lock(&sched->job_list_lock);
502                 drm_sched_start_timeout(sched);
503                 spin_unlock(&sched->job_list_lock);
504         }
505
506         kthread_unpark(sched->thread);
507 }
508 EXPORT_SYMBOL(drm_sched_start);
509
510 /**
511  * drm_sched_resubmit_jobs - helper to relaunch jobs from the pending list
512  *
513  * @sched: scheduler instance
514  *
515  */
516 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
517 {
518         drm_sched_resubmit_jobs_ext(sched, INT_MAX);
519 }
520 EXPORT_SYMBOL(drm_sched_resubmit_jobs);
521
522 /**
523  * drm_sched_resubmit_jobs_ext - helper to relunch certain number of jobs from mirror ring list
524  *
525  * @sched: scheduler instance
526  * @max: job numbers to relaunch
527  *
528  */
529 void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max)
530 {
531         struct drm_sched_job *s_job, *tmp;
532         uint64_t guilty_context;
533         bool found_guilty = false;
534         struct dma_fence *fence;
535         int i = 0;
536
537         list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
538                 struct drm_sched_fence *s_fence = s_job->s_fence;
539
540                 if (i >= max)
541                         break;
542
543                 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
544                         found_guilty = true;
545                         guilty_context = s_job->s_fence->scheduled.context;
546                 }
547
548                 if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
549                         dma_fence_set_error(&s_fence->finished, -ECANCELED);
550
551                 dma_fence_put(s_job->s_fence->parent);
552                 fence = sched->ops->run_job(s_job);
553                 i++;
554
555                 if (IS_ERR_OR_NULL(fence)) {
556                         if (IS_ERR(fence))
557                                 dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
558
559                         s_job->s_fence->parent = NULL;
560                 } else {
561                         s_job->s_fence->parent = fence;
562                 }
563         }
564 }
565 EXPORT_SYMBOL(drm_sched_resubmit_jobs_ext);
566
567 /**
568  * drm_sched_job_init - init a scheduler job
569  * @job: scheduler job to init
570  * @entity: scheduler entity to use
571  * @owner: job owner for debugging
572  *
573  * Refer to drm_sched_entity_push_job() documentation
574  * for locking considerations.
575  *
576  * Drivers must make sure drm_sched_job_cleanup() if this function returns
577  * successfully, even when @job is aborted before drm_sched_job_arm() is called.
578  *
579  * WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware
580  * has died, which can mean that there's no valid runqueue for a @entity.
581  * This function returns -ENOENT in this case (which probably should be -EIO as
582  * a more meanigful return value).
583  *
584  * Returns 0 for success, negative error code otherwise.
585  */
586 int drm_sched_job_init(struct drm_sched_job *job,
587                        struct drm_sched_entity *entity,
588                        void *owner)
589 {
590         drm_sched_entity_select_rq(entity);
591         if (!entity->rq)
592                 return -ENOENT;
593
594         job->entity = entity;
595         job->s_fence = drm_sched_fence_alloc(entity, owner);
596         if (!job->s_fence)
597                 return -ENOMEM;
598
599         INIT_LIST_HEAD(&job->list);
600
601         xa_init_flags(&job->dependencies, XA_FLAGS_ALLOC);
602
603         return 0;
604 }
605 EXPORT_SYMBOL(drm_sched_job_init);
606
607 /**
608  * drm_sched_job_arm - arm a scheduler job for execution
609  * @job: scheduler job to arm
610  *
611  * This arms a scheduler job for execution. Specifically it initializes the
612  * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
613  * or other places that need to track the completion of this job.
614  *
615  * Refer to drm_sched_entity_push_job() documentation for locking
616  * considerations.
617  *
618  * This can only be called if drm_sched_job_init() succeeded.
619  */
620 void drm_sched_job_arm(struct drm_sched_job *job)
621 {
622         struct drm_gpu_scheduler *sched;
623         struct drm_sched_entity *entity = job->entity;
624
625         BUG_ON(!entity);
626
627         sched = entity->rq->sched;
628
629         job->sched = sched;
630         job->s_priority = entity->rq - sched->sched_rq;
631         job->id = atomic64_inc_return(&sched->job_id_count);
632
633         drm_sched_fence_init(job->s_fence, job->entity);
634 }
635 EXPORT_SYMBOL(drm_sched_job_arm);
636
637 /**
638  * drm_sched_job_add_dependency - adds the fence as a job dependency
639  * @job: scheduler job to add the dependencies to
640  * @fence: the dma_fence to add to the list of dependencies.
641  *
642  * Note that @fence is consumed in both the success and error cases.
643  *
644  * Returns:
645  * 0 on success, or an error on failing to expand the array.
646  */
647 int drm_sched_job_add_dependency(struct drm_sched_job *job,
648                                  struct dma_fence *fence)
649 {
650         struct dma_fence *entry;
651         unsigned long index;
652         u32 id = 0;
653         int ret;
654
655         if (!fence)
656                 return 0;
657
658         /* Deduplicate if we already depend on a fence from the same context.
659          * This lets the size of the array of deps scale with the number of
660          * engines involved, rather than the number of BOs.
661          */
662         xa_for_each(&job->dependencies, index, entry) {
663                 if (entry->context != fence->context)
664                         continue;
665
666                 if (dma_fence_is_later(fence, entry)) {
667                         dma_fence_put(entry);
668                         xa_store(&job->dependencies, index, fence, GFP_KERNEL);
669                 } else {
670                         dma_fence_put(fence);
671                 }
672                 return 0;
673         }
674
675         ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL);
676         if (ret != 0)
677                 dma_fence_put(fence);
678
679         return ret;
680 }
681 EXPORT_SYMBOL(drm_sched_job_add_dependency);
682
683 /**
684  * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job
685  *   dependencies
686  * @job: scheduler job to add the dependencies to
687  * @obj: the gem object to add new dependencies from.
688  * @write: whether the job might write the object (so we need to depend on
689  * shared fences in the reservation object).
690  *
691  * This should be called after drm_gem_lock_reservations() on your array of
692  * GEM objects used in the job but before updating the reservations with your
693  * own fences.
694  *
695  * Returns:
696  * 0 on success, or an error on failing to expand the array.
697  */
698 int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
699                                             struct drm_gem_object *obj,
700                                             bool write)
701 {
702         struct dma_resv_iter cursor;
703         struct dma_fence *fence;
704         int ret;
705
706         dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {
707                 ret = drm_sched_job_add_dependency(job, fence);
708                 if (ret)
709                         return ret;
710
711                 /* Make sure to grab an additional ref on the added fence */
712                 dma_fence_get(fence);
713         }
714         return 0;
715 }
716 EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
717
718
719 /**
720  * drm_sched_job_cleanup - clean up scheduler job resources
721  * @job: scheduler job to clean up
722  *
723  * Cleans up the resources allocated with drm_sched_job_init().
724  *
725  * Drivers should call this from their error unwind code if @job is aborted
726  * before drm_sched_job_arm() is called.
727  *
728  * After that point of no return @job is committed to be executed by the
729  * scheduler, and this function should be called from the
730  * &drm_sched_backend_ops.free_job callback.
731  */
732 void drm_sched_job_cleanup(struct drm_sched_job *job)
733 {
734         struct dma_fence *fence;
735         unsigned long index;
736
737         if (kref_read(&job->s_fence->finished.refcount)) {
738                 /* drm_sched_job_arm() has been called */
739                 dma_fence_put(&job->s_fence->finished);
740         } else {
741                 /* aborted job before committing to run it */
742                 drm_sched_fence_free(job->s_fence);
743         }
744
745         job->s_fence = NULL;
746
747         xa_for_each(&job->dependencies, index, fence) {
748                 dma_fence_put(fence);
749         }
750         xa_destroy(&job->dependencies);
751
752 }
753 EXPORT_SYMBOL(drm_sched_job_cleanup);
754
755 /**
756  * drm_sched_ready - is the scheduler ready
757  *
758  * @sched: scheduler instance
759  *
760  * Return true if we can push more jobs to the hw, otherwise false.
761  */
762 static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
763 {
764         return atomic_read(&sched->hw_rq_count) <
765                 sched->hw_submission_limit;
766 }
767
768 /**
769  * drm_sched_wakeup - Wake up the scheduler when it is ready
770  *
771  * @sched: scheduler instance
772  *
773  */
774 void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
775 {
776         if (drm_sched_ready(sched))
777                 wake_up_interruptible(&sched->wake_up_worker);
778 }
779
780 /**
781  * drm_sched_select_entity - Select next entity to process
782  *
783  * @sched: scheduler instance
784  *
785  * Returns the entity to process or NULL if none are found.
786  */
787 static struct drm_sched_entity *
788 drm_sched_select_entity(struct drm_gpu_scheduler *sched)
789 {
790         struct drm_sched_entity *entity;
791         int i;
792
793         if (!drm_sched_ready(sched))
794                 return NULL;
795
796         /* Kernel run queue has higher priority than normal run queue*/
797         for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
798                 entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
799                 if (entity)
800                         break;
801         }
802
803         return entity;
804 }
805
806 /**
807  * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
808  *
809  * @sched: scheduler instance
810  *
811  * Returns the next finished job from the pending list (if there is one)
812  * ready for it to be destroyed.
813  */
814 static struct drm_sched_job *
815 drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
816 {
817         struct drm_sched_job *job, *next;
818
819         spin_lock(&sched->job_list_lock);
820
821         job = list_first_entry_or_null(&sched->pending_list,
822                                        struct drm_sched_job, list);
823
824         if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
825                 /* remove job from pending_list */
826                 list_del_init(&job->list);
827
828                 /* cancel this job's TO timer */
829                 cancel_delayed_work(&sched->work_tdr);
830                 /* make the scheduled timestamp more accurate */
831                 next = list_first_entry_or_null(&sched->pending_list,
832                                                 typeof(*next), list);
833
834                 if (next) {
835                         next->s_fence->scheduled.timestamp =
836                                 job->s_fence->finished.timestamp;
837                         /* start TO timer for next job */
838                         drm_sched_start_timeout(sched);
839                 }
840         } else {
841                 job = NULL;
842         }
843
844         spin_unlock(&sched->job_list_lock);
845
846         return job;
847 }
848
849 /**
850  * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
851  * @sched_list: list of drm_gpu_schedulers
852  * @num_sched_list: number of drm_gpu_schedulers in the sched_list
853  *
854  * Returns pointer of the sched with the least load or NULL if none of the
855  * drm_gpu_schedulers are ready
856  */
857 struct drm_gpu_scheduler *
858 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
859                      unsigned int num_sched_list)
860 {
861         struct drm_gpu_scheduler *sched, *picked_sched = NULL;
862         int i;
863         unsigned int min_score = UINT_MAX, num_score;
864
865         for (i = 0; i < num_sched_list; ++i) {
866                 sched = sched_list[i];
867
868                 if (!sched->ready) {
869                         DRM_WARN("scheduler %s is not ready, skipping",
870                                  sched->name);
871                         continue;
872                 }
873
874                 num_score = atomic_read(sched->score);
875                 if (num_score < min_score) {
876                         min_score = num_score;
877                         picked_sched = sched;
878                 }
879         }
880
881         return picked_sched;
882 }
883 EXPORT_SYMBOL(drm_sched_pick_best);
884
885 /**
886  * drm_sched_blocked - check if the scheduler is blocked
887  *
888  * @sched: scheduler instance
889  *
890  * Returns true if blocked, otherwise false.
891  */
892 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
893 {
894         if (kthread_should_park()) {
895                 kthread_parkme();
896                 return true;
897         }
898
899         return false;
900 }
901
902 /**
903  * drm_sched_main - main scheduler thread
904  *
905  * @param: scheduler instance
906  *
907  * Returns 0.
908  */
909 static int drm_sched_main(void *param)
910 {
911         struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
912         int r;
913
914         sched_set_fifo_low(current);
915
916         while (!kthread_should_stop()) {
917                 struct drm_sched_entity *entity = NULL;
918                 struct drm_sched_fence *s_fence;
919                 struct drm_sched_job *sched_job;
920                 struct dma_fence *fence;
921                 struct drm_sched_job *cleanup_job = NULL;
922
923                 wait_event_interruptible(sched->wake_up_worker,
924                                          (cleanup_job = drm_sched_get_cleanup_job(sched)) ||
925                                          (!drm_sched_blocked(sched) &&
926                                           (entity = drm_sched_select_entity(sched))) ||
927                                          kthread_should_stop());
928
929                 if (cleanup_job)
930                         sched->ops->free_job(cleanup_job);
931
932                 if (!entity)
933                         continue;
934
935                 sched_job = drm_sched_entity_pop_job(entity);
936
937                 if (!sched_job) {
938                         complete(&entity->entity_idle);
939                         continue;
940                 }
941
942                 s_fence = sched_job->s_fence;
943
944                 atomic_inc(&sched->hw_rq_count);
945                 drm_sched_job_begin(sched_job);
946
947                 trace_drm_run_job(sched_job, entity);
948                 fence = sched->ops->run_job(sched_job);
949                 complete(&entity->entity_idle);
950                 drm_sched_fence_scheduled(s_fence);
951
952                 if (!IS_ERR_OR_NULL(fence)) {
953                         s_fence->parent = dma_fence_get(fence);
954                         r = dma_fence_add_callback(fence, &sched_job->cb,
955                                                    drm_sched_job_done_cb);
956                         if (r == -ENOENT)
957                                 drm_sched_job_done(sched_job);
958                         else if (r)
959                                 DRM_ERROR("fence add callback failed (%d)\n",
960                                           r);
961                         dma_fence_put(fence);
962                 } else {
963                         if (IS_ERR(fence))
964                                 dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
965
966                         drm_sched_job_done(sched_job);
967                 }
968
969                 wake_up(&sched->job_scheduled);
970         }
971         return 0;
972 }
973
974 /**
975  * drm_sched_init - Init a gpu scheduler instance
976  *
977  * @sched: scheduler instance
978  * @ops: backend operations for this scheduler
979  * @hw_submission: number of hw submissions that can be in flight
980  * @hang_limit: number of times to allow a job to hang before dropping it
981  * @timeout: timeout value in jiffies for the scheduler
982  * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is
983  *              used
984  * @score: optional score atomic shared with other schedulers
985  * @name: name used for debugging
986  *
987  * Return 0 on success, otherwise error code.
988  */
989 int drm_sched_init(struct drm_gpu_scheduler *sched,
990                    const struct drm_sched_backend_ops *ops,
991                    unsigned hw_submission, unsigned hang_limit,
992                    long timeout, struct workqueue_struct *timeout_wq,
993                    atomic_t *score, const char *name)
994 {
995         int i, ret;
996         sched->ops = ops;
997         sched->hw_submission_limit = hw_submission;
998         sched->name = name;
999         sched->timeout = timeout;
1000         sched->timeout_wq = timeout_wq ? : system_wq;
1001         sched->hang_limit = hang_limit;
1002         sched->score = score ? score : &sched->_score;
1003         for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++)
1004                 drm_sched_rq_init(sched, &sched->sched_rq[i]);
1005
1006         init_waitqueue_head(&sched->wake_up_worker);
1007         init_waitqueue_head(&sched->job_scheduled);
1008         INIT_LIST_HEAD(&sched->pending_list);
1009         spin_lock_init(&sched->job_list_lock);
1010         atomic_set(&sched->hw_rq_count, 0);
1011         INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
1012         atomic_set(&sched->_score, 0);
1013         atomic64_set(&sched->job_id_count, 0);
1014
1015         /* Each scheduler will run on a seperate kernel thread */
1016         sched->thread = kthread_run(drm_sched_main, sched, sched->name);
1017         if (IS_ERR(sched->thread)) {
1018                 ret = PTR_ERR(sched->thread);
1019                 sched->thread = NULL;
1020                 DRM_ERROR("Failed to create scheduler for %s.\n", name);
1021                 return ret;
1022         }
1023
1024         sched->ready = true;
1025         return 0;
1026 }
1027 EXPORT_SYMBOL(drm_sched_init);
1028
1029 /**
1030  * drm_sched_fini - Destroy a gpu scheduler
1031  *
1032  * @sched: scheduler instance
1033  *
1034  * Tears down and cleans up the scheduler.
1035  */
1036 void drm_sched_fini(struct drm_gpu_scheduler *sched)
1037 {
1038         struct drm_sched_entity *s_entity;
1039         int i;
1040
1041         if (sched->thread)
1042                 kthread_stop(sched->thread);
1043
1044         for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
1045                 struct drm_sched_rq *rq = &sched->sched_rq[i];
1046
1047                 if (!rq)
1048                         continue;
1049
1050                 spin_lock(&rq->lock);
1051                 list_for_each_entry(s_entity, &rq->entities, list)
1052                         /*
1053                          * Prevents reinsertion and marks job_queue as idle,
1054                          * it will removed from rq in drm_sched_entity_fini
1055                          * eventually
1056                          */
1057                         s_entity->stopped = true;
1058                 spin_unlock(&rq->lock);
1059
1060         }
1061
1062         /* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */
1063         wake_up_all(&sched->job_scheduled);
1064
1065         /* Confirm no work left behind accessing device structures */
1066         cancel_delayed_work_sync(&sched->work_tdr);
1067
1068         sched->ready = false;
1069 }
1070 EXPORT_SYMBOL(drm_sched_fini);
1071
1072 /**
1073  * drm_sched_increase_karma_ext - Update sched_entity guilty flag
1074  *
1075  * @bad: The job guilty of time out
1076  * @type: type for increase/reset karma
1077  *
1078  */
1079 void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type)
1080 {
1081         int i;
1082         struct drm_sched_entity *tmp;
1083         struct drm_sched_entity *entity;
1084         struct drm_gpu_scheduler *sched = bad->sched;
1085
1086         /* don't change @bad's karma if it's from KERNEL RQ,
1087          * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
1088          * corrupt but keep in mind that kernel jobs always considered good.
1089          */
1090         if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
1091                 if (type == 0)
1092                         atomic_set(&bad->karma, 0);
1093                 else if (type == 1)
1094                         atomic_inc(&bad->karma);
1095
1096                 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL;
1097                      i++) {
1098                         struct drm_sched_rq *rq = &sched->sched_rq[i];
1099
1100                         spin_lock(&rq->lock);
1101                         list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
1102                                 if (bad->s_fence->scheduled.context ==
1103                                     entity->fence_context) {
1104                                         if (entity->guilty)
1105                                                 atomic_set(entity->guilty, type);
1106                                         break;
1107                                 }
1108                         }
1109                         spin_unlock(&rq->lock);
1110                         if (&entity->list != &rq->entities)
1111                                 break;
1112                 }
1113         }
1114 }
1115 EXPORT_SYMBOL(drm_sched_increase_karma_ext);