Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[linux-2.6-block.git] / drivers / gpu / drm / amd / scheduler / gpu_scheduler.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
27 #include <drm/drmP.h>
28 #include "gpu_scheduler.h"
29
30 #define CREATE_TRACE_POINTS
31 #include "gpu_sched_trace.h"
32
33 static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
34 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
35
36 struct kmem_cache *sched_fence_slab;
37 atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
38
39 /* Initialize a given run queue struct */
40 static void amd_sched_rq_init(struct amd_sched_rq *rq)
41 {
42         spin_lock_init(&rq->lock);
43         INIT_LIST_HEAD(&rq->entities);
44         rq->current_entity = NULL;
45 }
46
47 static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
48                                     struct amd_sched_entity *entity)
49 {
50         if (!list_empty(&entity->list))
51                 return;
52         spin_lock(&rq->lock);
53         list_add_tail(&entity->list, &rq->entities);
54         spin_unlock(&rq->lock);
55 }
56
57 static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
58                                        struct amd_sched_entity *entity)
59 {
60         if (list_empty(&entity->list))
61                 return;
62         spin_lock(&rq->lock);
63         list_del_init(&entity->list);
64         if (rq->current_entity == entity)
65                 rq->current_entity = NULL;
66         spin_unlock(&rq->lock);
67 }
68
69 /**
70  * Select an entity which could provide a job to run
71  *
72  * @rq          The run queue to check.
73  *
74  * Try to find a ready entity, returns NULL if none found.
75  */
76 static struct amd_sched_entity *
77 amd_sched_rq_select_entity(struct amd_sched_rq *rq)
78 {
79         struct amd_sched_entity *entity;
80
81         spin_lock(&rq->lock);
82
83         entity = rq->current_entity;
84         if (entity) {
85                 list_for_each_entry_continue(entity, &rq->entities, list) {
86                         if (amd_sched_entity_is_ready(entity)) {
87                                 rq->current_entity = entity;
88                                 spin_unlock(&rq->lock);
89                                 return entity;
90                         }
91                 }
92         }
93
94         list_for_each_entry(entity, &rq->entities, list) {
95
96                 if (amd_sched_entity_is_ready(entity)) {
97                         rq->current_entity = entity;
98                         spin_unlock(&rq->lock);
99                         return entity;
100                 }
101
102                 if (entity == rq->current_entity)
103                         break;
104         }
105
106         spin_unlock(&rq->lock);
107
108         return NULL;
109 }
110
111 /**
112  * Init a context entity used by scheduler when submit to HW ring.
113  *
114  * @sched       The pointer to the scheduler
115  * @entity      The pointer to a valid amd_sched_entity
116  * @rq          The run queue this entity belongs
117  * @kernel      If this is an entity for the kernel
118  * @jobs        The max number of jobs in the job queue
119  *
120  * return 0 if succeed. negative error code on failure
121 */
122 int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
123                           struct amd_sched_entity *entity,
124                           struct amd_sched_rq *rq,
125                           uint32_t jobs)
126 {
127         int r;
128
129         if (!(sched && entity && rq))
130                 return -EINVAL;
131
132         memset(entity, 0, sizeof(struct amd_sched_entity));
133         INIT_LIST_HEAD(&entity->list);
134         entity->rq = rq;
135         entity->sched = sched;
136
137         spin_lock_init(&entity->queue_lock);
138         r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
139         if (r)
140                 return r;
141
142         atomic_set(&entity->fence_seq, 0);
143         entity->fence_context = fence_context_alloc(1);
144
145         return 0;
146 }
147
148 /**
149  * Query if entity is initialized
150  *
151  * @sched       Pointer to scheduler instance
152  * @entity      The pointer to a valid scheduler entity
153  *
154  * return true if entity is initialized, false otherwise
155 */
156 static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
157                                             struct amd_sched_entity *entity)
158 {
159         return entity->sched == sched &&
160                 entity->rq != NULL;
161 }
162
163 /**
164  * Check if entity is idle
165  *
166  * @entity      The pointer to a valid scheduler entity
167  *
168  * Return true if entity don't has any unscheduled jobs.
169  */
170 static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
171 {
172         rmb();
173         if (kfifo_is_empty(&entity->job_queue))
174                 return true;
175
176         return false;
177 }
178
179 /**
180  * Check if entity is ready
181  *
182  * @entity      The pointer to a valid scheduler entity
183  *
184  * Return true if entity could provide a job.
185  */
186 static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
187 {
188         if (kfifo_is_empty(&entity->job_queue))
189                 return false;
190
191         if (ACCESS_ONCE(entity->dependency))
192                 return false;
193
194         return true;
195 }
196
197 /**
198  * Destroy a context entity
199  *
200  * @sched       Pointer to scheduler instance
201  * @entity      The pointer to a valid scheduler entity
202  *
203  * Cleanup and free the allocated resources.
204  */
205 void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
206                            struct amd_sched_entity *entity)
207 {
208         struct amd_sched_rq *rq = entity->rq;
209
210         if (!amd_sched_entity_is_initialized(sched, entity))
211                 return;
212
213         /**
214          * The client will not queue more IBs during this fini, consume existing
215          * queued IBs
216         */
217         wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
218
219         amd_sched_rq_remove_entity(rq, entity);
220         kfifo_free(&entity->job_queue);
221 }
222
223 static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
224 {
225         struct amd_sched_entity *entity =
226                 container_of(cb, struct amd_sched_entity, cb);
227         entity->dependency = NULL;
228         fence_put(f);
229         amd_sched_wakeup(entity->sched);
230 }
231
232 static void amd_sched_entity_clear_dep(struct fence *f, struct fence_cb *cb)
233 {
234         struct amd_sched_entity *entity =
235                 container_of(cb, struct amd_sched_entity, cb);
236         entity->dependency = NULL;
237         fence_put(f);
238 }
239
240 static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
241 {
242         struct amd_gpu_scheduler *sched = entity->sched;
243         struct fence * fence = entity->dependency;
244         struct amd_sched_fence *s_fence;
245
246         if (fence->context == entity->fence_context) {
247                 /* We can ignore fences from ourself */
248                 fence_put(entity->dependency);
249                 return false;
250         }
251
252         s_fence = to_amd_sched_fence(fence);
253         if (s_fence && s_fence->sched == sched) {
254                 /* Fence is from the same scheduler */
255                 if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &fence->flags)) {
256                         /* Ignore it when it is already scheduled */
257                         fence_put(entity->dependency);
258                         return false;
259                 }
260
261                 /* Wait for fence to be scheduled */
262                 entity->cb.func = amd_sched_entity_clear_dep;
263                 list_add_tail(&entity->cb.node, &s_fence->scheduled_cb);
264                 return true;
265         }
266
267         if (!fence_add_callback(entity->dependency, &entity->cb,
268                                 amd_sched_entity_wakeup))
269                 return true;
270
271         fence_put(entity->dependency);
272         return false;
273 }
274
275 static struct amd_sched_job *
276 amd_sched_entity_pop_job(struct amd_sched_entity *entity)
277 {
278         struct amd_gpu_scheduler *sched = entity->sched;
279         struct amd_sched_job *sched_job;
280
281         if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
282                 return NULL;
283
284         while ((entity->dependency = sched->ops->dependency(sched_job)))
285                 if (amd_sched_entity_add_dependency_cb(entity))
286                         return NULL;
287
288         return sched_job;
289 }
290
291 /**
292  * Helper to submit a job to the job queue
293  *
294  * @sched_job           The pointer to job required to submit
295  *
296  * Returns true if we could submit the job.
297  */
298 static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
299 {
300         struct amd_gpu_scheduler *sched = sched_job->sched;
301         struct amd_sched_entity *entity = sched_job->s_entity;
302         bool added, first = false;
303
304         spin_lock(&entity->queue_lock);
305         added = kfifo_in(&entity->job_queue, &sched_job,
306                         sizeof(sched_job)) == sizeof(sched_job);
307
308         if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
309                 first = true;
310
311         spin_unlock(&entity->queue_lock);
312
313         /* first job wakes up scheduler */
314         if (first) {
315                 /* Add the entity to the run queue */
316                 amd_sched_rq_add_entity(entity->rq, entity);
317                 amd_sched_wakeup(sched);
318         }
319         return added;
320 }
321
322 /**
323  * Submit a job to the job queue
324  *
325  * @sched_job           The pointer to job required to submit
326  *
327  * Returns 0 for success, negative error code otherwise.
328  */
329 void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
330 {
331         struct amd_sched_entity *entity = sched_job->s_entity;
332
333         trace_amd_sched_job(sched_job);
334         wait_event(entity->sched->job_scheduled,
335                    amd_sched_entity_in(sched_job));
336 }
337
338 /**
339  * Return ture if we can push more jobs to the hw.
340  */
341 static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
342 {
343         return atomic_read(&sched->hw_rq_count) <
344                 sched->hw_submission_limit;
345 }
346
347 /**
348  * Wake up the scheduler when it is ready
349  */
350 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
351 {
352         if (amd_sched_ready(sched))
353                 wake_up_interruptible(&sched->wake_up_worker);
354 }
355
356 /**
357  * Select next entity to process
358 */
359 static struct amd_sched_entity *
360 amd_sched_select_entity(struct amd_gpu_scheduler *sched)
361 {
362         struct amd_sched_entity *entity;
363         int i;
364
365         if (!amd_sched_ready(sched))
366                 return NULL;
367
368         /* Kernel run queue has higher priority than normal run queue*/
369         for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++) {
370                 entity = amd_sched_rq_select_entity(&sched->sched_rq[i]);
371                 if (entity)
372                         break;
373         }
374
375         return entity;
376 }
377
378 static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
379 {
380         struct amd_sched_fence *s_fence =
381                 container_of(cb, struct amd_sched_fence, cb);
382         struct amd_gpu_scheduler *sched = s_fence->sched;
383         unsigned long flags;
384
385         atomic_dec(&sched->hw_rq_count);
386         amd_sched_fence_signal(s_fence);
387         if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
388                 cancel_delayed_work(&s_fence->dwork);
389                 spin_lock_irqsave(&sched->fence_list_lock, flags);
390                 list_del_init(&s_fence->list);
391                 spin_unlock_irqrestore(&sched->fence_list_lock, flags);
392         }
393         trace_amd_sched_process_job(s_fence);
394         fence_put(&s_fence->base);
395         wake_up_interruptible(&sched->wake_up_worker);
396 }
397
398 static void amd_sched_fence_work_func(struct work_struct *work)
399 {
400         struct amd_sched_fence *s_fence =
401                 container_of(work, struct amd_sched_fence, dwork.work);
402         struct amd_gpu_scheduler *sched = s_fence->sched;
403         struct amd_sched_fence *entity, *tmp;
404         unsigned long flags;
405
406         DRM_ERROR("[%s] scheduler is timeout!\n", sched->name);
407
408         /* Clean all pending fences */
409         spin_lock_irqsave(&sched->fence_list_lock, flags);
410         list_for_each_entry_safe(entity, tmp, &sched->fence_list, list) {
411                 DRM_ERROR("  fence no %d\n", entity->base.seqno);
412                 cancel_delayed_work(&entity->dwork);
413                 list_del_init(&entity->list);
414                 fence_put(&entity->base);
415         }
416         spin_unlock_irqrestore(&sched->fence_list_lock, flags);
417 }
418
419 static int amd_sched_main(void *param)
420 {
421         struct sched_param sparam = {.sched_priority = 1};
422         struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
423         int r, count;
424
425         spin_lock_init(&sched->fence_list_lock);
426         INIT_LIST_HEAD(&sched->fence_list);
427         sched_setscheduler(current, SCHED_FIFO, &sparam);
428
429         while (!kthread_should_stop()) {
430                 struct amd_sched_entity *entity;
431                 struct amd_sched_fence *s_fence;
432                 struct amd_sched_job *sched_job;
433                 struct fence *fence;
434                 unsigned long flags;
435
436                 wait_event_interruptible(sched->wake_up_worker,
437                         (entity = amd_sched_select_entity(sched)) ||
438                         kthread_should_stop());
439
440                 if (!entity)
441                         continue;
442
443                 sched_job = amd_sched_entity_pop_job(entity);
444                 if (!sched_job)
445                         continue;
446
447                 s_fence = sched_job->s_fence;
448
449                 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
450                         INIT_DELAYED_WORK(&s_fence->dwork, amd_sched_fence_work_func);
451                         schedule_delayed_work(&s_fence->dwork, sched->timeout);
452                         spin_lock_irqsave(&sched->fence_list_lock, flags);
453                         list_add_tail(&s_fence->list, &sched->fence_list);
454                         spin_unlock_irqrestore(&sched->fence_list_lock, flags);
455                 }
456
457                 atomic_inc(&sched->hw_rq_count);
458                 fence = sched->ops->run_job(sched_job);
459                 amd_sched_fence_scheduled(s_fence);
460                 if (fence) {
461                         r = fence_add_callback(fence, &s_fence->cb,
462                                                amd_sched_process_job);
463                         if (r == -ENOENT)
464                                 amd_sched_process_job(fence, &s_fence->cb);
465                         else if (r)
466                                 DRM_ERROR("fence add callback failed (%d)\n", r);
467                         fence_put(fence);
468                 } else {
469                         DRM_ERROR("Failed to run job!\n");
470                         amd_sched_process_job(NULL, &s_fence->cb);
471                 }
472
473                 count = kfifo_out(&entity->job_queue, &sched_job,
474                                 sizeof(sched_job));
475                 WARN_ON(count != sizeof(sched_job));
476                 wake_up(&sched->job_scheduled);
477         }
478         return 0;
479 }
480
481 /**
482  * Init a gpu scheduler instance
483  *
484  * @sched               The pointer to the scheduler
485  * @ops                 The backend operations for this scheduler.
486  * @hw_submissions      Number of hw submissions to do.
487  * @name                Name used for debugging
488  *
489  * Return 0 on success, otherwise error code.
490 */
491 int amd_sched_init(struct amd_gpu_scheduler *sched,
492                    struct amd_sched_backend_ops *ops,
493                    unsigned hw_submission, long timeout, const char *name)
494 {
495         int i;
496         sched->ops = ops;
497         sched->hw_submission_limit = hw_submission;
498         sched->name = name;
499         sched->timeout = timeout;
500         for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++)
501                 amd_sched_rq_init(&sched->sched_rq[i]);
502
503         init_waitqueue_head(&sched->wake_up_worker);
504         init_waitqueue_head(&sched->job_scheduled);
505         atomic_set(&sched->hw_rq_count, 0);
506         if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
507                 sched_fence_slab = kmem_cache_create(
508                         "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
509                         SLAB_HWCACHE_ALIGN, NULL);
510                 if (!sched_fence_slab)
511                         return -ENOMEM;
512         }
513
514         /* Each scheduler will run on a seperate kernel thread */
515         sched->thread = kthread_run(amd_sched_main, sched, sched->name);
516         if (IS_ERR(sched->thread)) {
517                 DRM_ERROR("Failed to create scheduler for %s.\n", name);
518                 return PTR_ERR(sched->thread);
519         }
520
521         return 0;
522 }
523
524 /**
525  * Destroy a gpu scheduler
526  *
527  * @sched       The pointer to the scheduler
528  */
529 void amd_sched_fini(struct amd_gpu_scheduler *sched)
530 {
531         if (sched->thread)
532                 kthread_stop(sched->thread);
533         if (atomic_dec_and_test(&sched_fence_slab_ref))
534                 kmem_cache_destroy(sched_fence_slab);
535 }