Commit | Line | Data |
---|---|---|
a72ce6f8 JZ |
1 | /* |
2 | * Copyright 2015 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * | |
23 | */ | |
24 | #include <linux/kthread.h> | |
25 | #include <linux/wait.h> | |
26 | #include <linux/sched.h> | |
27 | #include <drm/drmP.h> | |
28 | #include "gpu_scheduler.h" | |
29 | ||
353da3c5 CZ |
30 | #define CREATE_TRACE_POINTS |
31 | #include "gpu_sched_trace.h" | |
32 | ||
3d651936 | 33 | static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity); |
88079006 | 34 | static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); |
ec75f573 | 35 | static void amd_sched_process_job(struct fence *f, struct fence_cb *cb); |
88079006 | 36 | |
f5617f9d CZ |
37 | struct kmem_cache *sched_fence_slab; |
38 | atomic_t sched_fence_slab_ref = ATOMIC_INIT(0); | |
39 | ||
a72ce6f8 | 40 | /* Initialize a given run queue struct */ |
432a4ff8 | 41 | static void amd_sched_rq_init(struct amd_sched_rq *rq) |
a72ce6f8 | 42 | { |
2b184d8d | 43 | spin_lock_init(&rq->lock); |
432a4ff8 | 44 | INIT_LIST_HEAD(&rq->entities); |
432a4ff8 | 45 | rq->current_entity = NULL; |
a72ce6f8 JZ |
46 | } |
47 | ||
432a4ff8 CK |
48 | static void amd_sched_rq_add_entity(struct amd_sched_rq *rq, |
49 | struct amd_sched_entity *entity) | |
a72ce6f8 | 50 | { |
e8deea2d CZ |
51 | if (!list_empty(&entity->list)) |
52 | return; | |
2b184d8d | 53 | spin_lock(&rq->lock); |
432a4ff8 | 54 | list_add_tail(&entity->list, &rq->entities); |
2b184d8d | 55 | spin_unlock(&rq->lock); |
a72ce6f8 JZ |
56 | } |
57 | ||
432a4ff8 CK |
58 | static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq, |
59 | struct amd_sched_entity *entity) | |
a72ce6f8 | 60 | { |
e8deea2d CZ |
61 | if (list_empty(&entity->list)) |
62 | return; | |
2b184d8d | 63 | spin_lock(&rq->lock); |
432a4ff8 CK |
64 | list_del_init(&entity->list); |
65 | if (rq->current_entity == entity) | |
66 | rq->current_entity = NULL; | |
2b184d8d | 67 | spin_unlock(&rq->lock); |
a72ce6f8 JZ |
68 | } |
69 | ||
70 | /** | |
3d651936 CK |
71 | * Select an entity which could provide a job to run |
72 | * | |
73 | * @rq The run queue to check. | |
74 | * | |
75 | * Try to find a ready entity, returns NULL if none found. | |
a72ce6f8 | 76 | */ |
3d651936 CK |
77 | static struct amd_sched_entity * |
78 | amd_sched_rq_select_entity(struct amd_sched_rq *rq) | |
a72ce6f8 | 79 | { |
2b184d8d | 80 | struct amd_sched_entity *entity; |
432a4ff8 | 81 | |
2b184d8d CK |
82 | spin_lock(&rq->lock); |
83 | ||
84 | entity = rq->current_entity; | |
432a4ff8 CK |
85 | if (entity) { |
86 | list_for_each_entry_continue(entity, &rq->entities, list) { | |
3d651936 | 87 | if (amd_sched_entity_is_ready(entity)) { |
432a4ff8 | 88 | rq->current_entity = entity; |
2b184d8d | 89 | spin_unlock(&rq->lock); |
3d651936 | 90 | return entity; |
432a4ff8 | 91 | } |
a72ce6f8 | 92 | } |
a72ce6f8 | 93 | } |
a72ce6f8 | 94 | |
432a4ff8 | 95 | list_for_each_entry(entity, &rq->entities, list) { |
a72ce6f8 | 96 | |
3d651936 | 97 | if (amd_sched_entity_is_ready(entity)) { |
432a4ff8 | 98 | rq->current_entity = entity; |
2b184d8d | 99 | spin_unlock(&rq->lock); |
3d651936 | 100 | return entity; |
432a4ff8 | 101 | } |
a72ce6f8 | 102 | |
432a4ff8 CK |
103 | if (entity == rq->current_entity) |
104 | break; | |
105 | } | |
a72ce6f8 | 106 | |
2b184d8d CK |
107 | spin_unlock(&rq->lock); |
108 | ||
432a4ff8 | 109 | return NULL; |
a72ce6f8 JZ |
110 | } |
111 | ||
a72ce6f8 JZ |
112 | /** |
113 | * Init a context entity used by scheduler when submit to HW ring. | |
114 | * | |
115 | * @sched The pointer to the scheduler | |
91404fb2 | 116 | * @entity The pointer to a valid amd_sched_entity |
a72ce6f8 | 117 | * @rq The run queue this entity belongs |
0e89d0c1 | 118 | * @kernel If this is an entity for the kernel |
1333f723 | 119 | * @jobs The max number of jobs in the job queue |
a72ce6f8 JZ |
120 | * |
121 | * return 0 if succeed. negative error code on failure | |
122 | */ | |
91404fb2 | 123 | int amd_sched_entity_init(struct amd_gpu_scheduler *sched, |
6f0e54a9 | 124 | struct amd_sched_entity *entity, |
432a4ff8 | 125 | struct amd_sched_rq *rq, |
6f0e54a9 | 126 | uint32_t jobs) |
a72ce6f8 | 127 | { |
0f75aee7 CK |
128 | int r; |
129 | ||
a72ce6f8 JZ |
130 | if (!(sched && entity && rq)) |
131 | return -EINVAL; | |
132 | ||
91404fb2 | 133 | memset(entity, 0, sizeof(struct amd_sched_entity)); |
0f75aee7 CK |
134 | INIT_LIST_HEAD(&entity->list); |
135 | entity->rq = rq; | |
136 | entity->sched = sched; | |
a72ce6f8 JZ |
137 | |
138 | spin_lock_init(&entity->queue_lock); | |
0f75aee7 CK |
139 | r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL); |
140 | if (r) | |
141 | return r; | |
142 | ||
ce882e6d | 143 | atomic_set(&entity->fence_seq, 0); |
6fc13675 | 144 | entity->fence_context = fence_context_alloc(2); |
a72ce6f8 | 145 | |
a72ce6f8 JZ |
146 | return 0; |
147 | } | |
148 | ||
149 | /** | |
150 | * Query if entity is initialized | |
151 | * | |
152 | * @sched Pointer to scheduler instance | |
153 | * @entity The pointer to a valid scheduler entity | |
154 | * | |
155 | * return true if entity is initialized, false otherwise | |
156 | */ | |
d54fdb94 CK |
157 | static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched, |
158 | struct amd_sched_entity *entity) | |
a72ce6f8 | 159 | { |
0f75aee7 CK |
160 | return entity->sched == sched && |
161 | entity->rq != NULL; | |
a72ce6f8 JZ |
162 | } |
163 | ||
aef4852e CK |
164 | /** |
165 | * Check if entity is idle | |
166 | * | |
167 | * @entity The pointer to a valid scheduler entity | |
168 | * | |
169 | * Return true if entity don't has any unscheduled jobs. | |
170 | */ | |
171 | static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity) | |
a72ce6f8 | 172 | { |
aef4852e CK |
173 | rmb(); |
174 | if (kfifo_is_empty(&entity->job_queue)) | |
a72ce6f8 JZ |
175 | return true; |
176 | ||
177 | return false; | |
178 | } | |
179 | ||
3d651936 CK |
180 | /** |
181 | * Check if entity is ready | |
182 | * | |
183 | * @entity The pointer to a valid scheduler entity | |
184 | * | |
185 | * Return true if entity could provide a job. | |
186 | */ | |
187 | static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity) | |
188 | { | |
189 | if (kfifo_is_empty(&entity->job_queue)) | |
190 | return false; | |
191 | ||
192 | if (ACCESS_ONCE(entity->dependency)) | |
193 | return false; | |
194 | ||
195 | return true; | |
196 | } | |
197 | ||
a72ce6f8 JZ |
198 | /** |
199 | * Destroy a context entity | |
200 | * | |
201 | * @sched Pointer to scheduler instance | |
202 | * @entity The pointer to a valid scheduler entity | |
203 | * | |
062c7fb3 | 204 | * Cleanup and free the allocated resources. |
a72ce6f8 | 205 | */ |
062c7fb3 CK |
206 | void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, |
207 | struct amd_sched_entity *entity) | |
a72ce6f8 | 208 | { |
0f75aee7 | 209 | struct amd_sched_rq *rq = entity->rq; |
a72ce6f8 | 210 | |
d54fdb94 | 211 | if (!amd_sched_entity_is_initialized(sched, entity)) |
062c7fb3 | 212 | return; |
6c859274 | 213 | |
a72ce6f8 JZ |
214 | /** |
215 | * The client will not queue more IBs during this fini, consume existing | |
216 | * queued IBs | |
217 | */ | |
c2b6bd7e | 218 | wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity)); |
a72ce6f8 | 219 | |
432a4ff8 | 220 | amd_sched_rq_remove_entity(rq, entity); |
a72ce6f8 | 221 | kfifo_free(&entity->job_queue); |
a72ce6f8 JZ |
222 | } |
223 | ||
e61235db CK |
224 | static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb) |
225 | { | |
226 | struct amd_sched_entity *entity = | |
227 | container_of(cb, struct amd_sched_entity, cb); | |
228 | entity->dependency = NULL; | |
229 | fence_put(f); | |
0f75aee7 | 230 | amd_sched_wakeup(entity->sched); |
e61235db CK |
231 | } |
232 | ||
777dbd45 ML |
233 | static void amd_sched_entity_clear_dep(struct fence *f, struct fence_cb *cb) |
234 | { | |
235 | struct amd_sched_entity *entity = | |
236 | container_of(cb, struct amd_sched_entity, cb); | |
237 | entity->dependency = NULL; | |
238 | fence_put(f); | |
239 | } | |
240 | ||
393a0bd4 CK |
241 | static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) |
242 | { | |
243 | struct amd_gpu_scheduler *sched = entity->sched; | |
244 | struct fence * fence = entity->dependency; | |
245 | struct amd_sched_fence *s_fence; | |
246 | ||
247 | if (fence->context == entity->fence_context) { | |
248 | /* We can ignore fences from ourself */ | |
249 | fence_put(entity->dependency); | |
250 | return false; | |
251 | } | |
252 | ||
253 | s_fence = to_amd_sched_fence(fence); | |
254 | if (s_fence && s_fence->sched == sched) { | |
393a0bd4 | 255 | |
6fc13675 CK |
256 | /* |
257 | * Fence is from the same scheduler, only need to wait for | |
258 | * it to be scheduled | |
259 | */ | |
260 | fence = fence_get(&s_fence->scheduled); | |
261 | fence_put(entity->dependency); | |
262 | entity->dependency = fence; | |
263 | if (!fence_add_callback(fence, &entity->cb, | |
264 | amd_sched_entity_clear_dep)) | |
265 | return true; | |
266 | ||
267 | /* Ignore it when it is already scheduled */ | |
268 | fence_put(fence); | |
269 | return false; | |
393a0bd4 CK |
270 | } |
271 | ||
272 | if (!fence_add_callback(entity->dependency, &entity->cb, | |
273 | amd_sched_entity_wakeup)) | |
274 | return true; | |
275 | ||
276 | fence_put(entity->dependency); | |
277 | return false; | |
278 | } | |
279 | ||
69bd5bf1 CK |
280 | static struct amd_sched_job * |
281 | amd_sched_entity_pop_job(struct amd_sched_entity *entity) | |
282 | { | |
0f75aee7 | 283 | struct amd_gpu_scheduler *sched = entity->sched; |
4c7eb91c | 284 | struct amd_sched_job *sched_job; |
69bd5bf1 | 285 | |
4c7eb91c | 286 | if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job))) |
69bd5bf1 CK |
287 | return NULL; |
288 | ||
393a0bd4 CK |
289 | while ((entity->dependency = sched->ops->dependency(sched_job))) |
290 | if (amd_sched_entity_add_dependency_cb(entity)) | |
e61235db | 291 | return NULL; |
e61235db | 292 | |
4c7eb91c | 293 | return sched_job; |
69bd5bf1 CK |
294 | } |
295 | ||
a72ce6f8 | 296 | /** |
6c859274 | 297 | * Helper to submit a job to the job queue |
a72ce6f8 | 298 | * |
4c7eb91c | 299 | * @sched_job The pointer to job required to submit |
6c859274 CK |
300 | * |
301 | * Returns true if we could submit the job. | |
302 | */ | |
4c7eb91c | 303 | static bool amd_sched_entity_in(struct amd_sched_job *sched_job) |
a72ce6f8 | 304 | { |
786b5219 | 305 | struct amd_gpu_scheduler *sched = sched_job->sched; |
4c7eb91c | 306 | struct amd_sched_entity *entity = sched_job->s_entity; |
6c859274 CK |
307 | bool added, first = false; |
308 | ||
309 | spin_lock(&entity->queue_lock); | |
4c7eb91c JZ |
310 | added = kfifo_in(&entity->job_queue, &sched_job, |
311 | sizeof(sched_job)) == sizeof(sched_job); | |
6c859274 | 312 | |
4c7eb91c | 313 | if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job)) |
6c859274 CK |
314 | first = true; |
315 | ||
316 | spin_unlock(&entity->queue_lock); | |
317 | ||
318 | /* first job wakes up scheduler */ | |
e8deea2d CZ |
319 | if (first) { |
320 | /* Add the entity to the run queue */ | |
321 | amd_sched_rq_add_entity(entity->rq, entity); | |
786b5219 | 322 | amd_sched_wakeup(sched); |
e8deea2d | 323 | } |
6c859274 CK |
324 | return added; |
325 | } | |
326 | ||
0de2479c ML |
327 | /* job_finish is called after hw fence signaled, and |
328 | * the job had already been deleted from ring_mirror_list | |
329 | */ | |
c5f74f78 | 330 | static void amd_sched_job_finish(struct work_struct *work) |
0de2479c | 331 | { |
c5f74f78 CK |
332 | struct amd_sched_job *s_job = container_of(work, struct amd_sched_job, |
333 | finish_work); | |
0de2479c ML |
334 | struct amd_gpu_scheduler *sched = s_job->sched; |
335 | ||
f42d20a9 | 336 | /* remove job from ring_mirror_list */ |
1059e117 | 337 | spin_lock(&sched->job_list_lock); |
f42d20a9 | 338 | list_del_init(&s_job->node); |
0de2479c | 339 | if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { |
c5f74f78 CK |
340 | struct amd_sched_job *next; |
341 | ||
1059e117 | 342 | spin_unlock(&sched->job_list_lock); |
c5f74f78 | 343 | cancel_delayed_work_sync(&s_job->work_tdr); |
1059e117 | 344 | spin_lock(&sched->job_list_lock); |
0de2479c ML |
345 | |
346 | /* queue TDR for next job */ | |
347 | next = list_first_entry_or_null(&sched->ring_mirror_list, | |
348 | struct amd_sched_job, node); | |
349 | ||
c5f74f78 | 350 | if (next) |
0de2479c | 351 | schedule_delayed_work(&next->work_tdr, sched->timeout); |
0de2479c | 352 | } |
1059e117 | 353 | spin_unlock(&sched->job_list_lock); |
c5f74f78 CK |
354 | sched->ops->free_job(s_job); |
355 | } | |
356 | ||
357 | static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb) | |
358 | { | |
359 | struct amd_sched_job *job = container_of(cb, struct amd_sched_job, | |
360 | finish_cb); | |
361 | schedule_work(&job->finish_work); | |
0de2479c ML |
362 | } |
363 | ||
7392c329 | 364 | static void amd_sched_job_begin(struct amd_sched_job *s_job) |
0de2479c ML |
365 | { |
366 | struct amd_gpu_scheduler *sched = s_job->sched; | |
367 | ||
1059e117 | 368 | spin_lock(&sched->job_list_lock); |
f42d20a9 | 369 | list_add_tail(&s_job->node, &sched->ring_mirror_list); |
0de2479c | 370 | if (sched->timeout != MAX_SCHEDULE_TIMEOUT && |
16a7133f CK |
371 | list_first_entry_or_null(&sched->ring_mirror_list, |
372 | struct amd_sched_job, node) == s_job) | |
0de2479c | 373 | schedule_delayed_work(&s_job->work_tdr, sched->timeout); |
1059e117 | 374 | spin_unlock(&sched->job_list_lock); |
0de2479c ML |
375 | } |
376 | ||
0e51a772 CK |
377 | static void amd_sched_job_timedout(struct work_struct *work) |
378 | { | |
379 | struct amd_sched_job *job = container_of(work, struct amd_sched_job, | |
380 | work_tdr.work); | |
381 | ||
382 | job->sched->ops->timedout_job(job); | |
383 | } | |
384 | ||
e686e75d CZ |
385 | void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched) |
386 | { | |
387 | struct amd_sched_job *s_job; | |
388 | ||
389 | spin_lock(&sched->job_list_lock); | |
390 | list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) { | |
391 | if (fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) { | |
392 | fence_put(s_job->s_fence->parent); | |
393 | s_job->s_fence->parent = NULL; | |
394 | } | |
395 | } | |
396 | spin_unlock(&sched->job_list_lock); | |
397 | } | |
398 | ||
ec75f573 CZ |
399 | void amd_sched_job_recovery(struct amd_gpu_scheduler *sched) |
400 | { | |
401 | struct amd_sched_job *s_job; | |
402 | int r; | |
403 | ||
404 | spin_lock(&sched->job_list_lock); | |
405 | s_job = list_first_entry_or_null(&sched->ring_mirror_list, | |
406 | struct amd_sched_job, node); | |
407 | if (s_job) | |
408 | schedule_delayed_work(&s_job->work_tdr, sched->timeout); | |
409 | ||
410 | list_for_each_entry(s_job, &sched->ring_mirror_list, node) { | |
411 | struct amd_sched_fence *s_fence = s_job->s_fence; | |
412 | struct fence *fence = sched->ops->run_job(s_job); | |
413 | if (fence) { | |
414 | s_fence->parent = fence_get(fence); | |
415 | r = fence_add_callback(fence, &s_fence->cb, | |
416 | amd_sched_process_job); | |
417 | if (r == -ENOENT) | |
418 | amd_sched_process_job(fence, &s_fence->cb); | |
419 | else if (r) | |
420 | DRM_ERROR("fence add callback failed (%d)\n", | |
421 | r); | |
422 | fence_put(fence); | |
423 | } else { | |
424 | DRM_ERROR("Failed to run job!\n"); | |
425 | amd_sched_process_job(NULL, &s_fence->cb); | |
426 | } | |
427 | } | |
428 | spin_unlock(&sched->job_list_lock); | |
429 | } | |
430 | ||
6c859274 CK |
431 | /** |
432 | * Submit a job to the job queue | |
433 | * | |
4c7eb91c | 434 | * @sched_job The pointer to job required to submit |
6c859274 CK |
435 | * |
436 | * Returns 0 for success, negative error code otherwise. | |
437 | */ | |
e2840221 | 438 | void amd_sched_entity_push_job(struct amd_sched_job *sched_job) |
6c859274 CK |
439 | { |
440 | struct amd_sched_entity *entity = sched_job->s_entity; | |
6c859274 | 441 | |
786b5219 | 442 | trace_amd_sched_job(sched_job); |
6fc13675 | 443 | fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb, |
c5f74f78 | 444 | amd_sched_job_finish_cb); |
0f75aee7 | 445 | wait_event(entity->sched->job_scheduled, |
c9f0fe5e | 446 | amd_sched_entity_in(sched_job)); |
a72ce6f8 JZ |
447 | } |
448 | ||
e686941a ML |
449 | /* init a sched_job with basic field */ |
450 | int amd_sched_job_init(struct amd_sched_job *job, | |
16a7133f CK |
451 | struct amd_gpu_scheduler *sched, |
452 | struct amd_sched_entity *entity, | |
595a9cd6 | 453 | void *owner) |
e686941a ML |
454 | { |
455 | job->sched = sched; | |
456 | job->s_entity = entity; | |
457 | job->s_fence = amd_sched_fence_create(entity, owner); | |
458 | if (!job->s_fence) | |
459 | return -ENOMEM; | |
460 | ||
c5f74f78 CK |
461 | INIT_WORK(&job->finish_work, amd_sched_job_finish); |
462 | INIT_LIST_HEAD(&job->node); | |
0e51a772 | 463 | INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout); |
4835096b | 464 | |
e686941a ML |
465 | return 0; |
466 | } | |
467 | ||
e688b728 CK |
468 | /** |
469 | * Return ture if we can push more jobs to the hw. | |
470 | */ | |
471 | static bool amd_sched_ready(struct amd_gpu_scheduler *sched) | |
472 | { | |
473 | return atomic_read(&sched->hw_rq_count) < | |
474 | sched->hw_submission_limit; | |
475 | } | |
476 | ||
88079006 CK |
477 | /** |
478 | * Wake up the scheduler when it is ready | |
479 | */ | |
480 | static void amd_sched_wakeup(struct amd_gpu_scheduler *sched) | |
481 | { | |
482 | if (amd_sched_ready(sched)) | |
c2b6bd7e | 483 | wake_up_interruptible(&sched->wake_up_worker); |
88079006 CK |
484 | } |
485 | ||
e688b728 | 486 | /** |
3d651936 | 487 | * Select next entity to process |
e688b728 | 488 | */ |
3d651936 CK |
489 | static struct amd_sched_entity * |
490 | amd_sched_select_entity(struct amd_gpu_scheduler *sched) | |
e688b728 | 491 | { |
3d651936 | 492 | struct amd_sched_entity *entity; |
d033a6de | 493 | int i; |
e688b728 CK |
494 | |
495 | if (!amd_sched_ready(sched)) | |
496 | return NULL; | |
497 | ||
498 | /* Kernel run queue has higher priority than normal run queue*/ | |
d033a6de CZ |
499 | for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++) { |
500 | entity = amd_sched_rq_select_entity(&sched->sched_rq[i]); | |
501 | if (entity) | |
502 | break; | |
503 | } | |
e688b728 | 504 | |
3d651936 | 505 | return entity; |
e688b728 CK |
506 | } |
507 | ||
6f0e54a9 CK |
508 | static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) |
509 | { | |
258f3f99 CK |
510 | struct amd_sched_fence *s_fence = |
511 | container_of(cb, struct amd_sched_fence, cb); | |
9b398fa5 | 512 | struct amd_gpu_scheduler *sched = s_fence->sched; |
6f0e54a9 | 513 | |
c746ba22 | 514 | atomic_dec(&sched->hw_rq_count); |
6fc13675 | 515 | amd_sched_fence_finished(s_fence); |
cccd9bce | 516 | |
7034decf | 517 | trace_amd_sched_process_job(s_fence); |
6fc13675 | 518 | fence_put(&s_fence->finished); |
c2b6bd7e | 519 | wake_up_interruptible(&sched->wake_up_worker); |
6f0e54a9 CK |
520 | } |
521 | ||
0875dc9e CZ |
522 | static bool amd_sched_blocked(struct amd_gpu_scheduler *sched) |
523 | { | |
524 | if (kthread_should_park()) { | |
525 | kthread_parkme(); | |
526 | return true; | |
527 | } | |
528 | ||
529 | return false; | |
530 | } | |
531 | ||
a72ce6f8 JZ |
532 | static int amd_sched_main(void *param) |
533 | { | |
a72ce6f8 | 534 | struct sched_param sparam = {.sched_priority = 1}; |
a72ce6f8 | 535 | struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param; |
5134e999 | 536 | int r, count; |
a72ce6f8 JZ |
537 | |
538 | sched_setscheduler(current, SCHED_FIFO, &sparam); | |
539 | ||
540 | while (!kthread_should_stop()) { | |
0875dc9e | 541 | struct amd_sched_entity *entity = NULL; |
258f3f99 | 542 | struct amd_sched_fence *s_fence; |
4c7eb91c | 543 | struct amd_sched_job *sched_job; |
6f0e54a9 CK |
544 | struct fence *fence; |
545 | ||
c2b6bd7e | 546 | wait_event_interruptible(sched->wake_up_worker, |
0875dc9e CZ |
547 | (!amd_sched_blocked(sched) && |
548 | (entity = amd_sched_select_entity(sched))) || | |
549 | kthread_should_stop()); | |
f85a6dd9 | 550 | |
3d651936 CK |
551 | if (!entity) |
552 | continue; | |
553 | ||
554 | sched_job = amd_sched_entity_pop_job(entity); | |
4c7eb91c | 555 | if (!sched_job) |
f85a6dd9 CK |
556 | continue; |
557 | ||
4c7eb91c | 558 | s_fence = sched_job->s_fence; |
2440ff2c | 559 | |
b034b572 | 560 | atomic_inc(&sched->hw_rq_count); |
7392c329 | 561 | amd_sched_job_begin(sched_job); |
7392c329 | 562 | |
4c7eb91c | 563 | fence = sched->ops->run_job(sched_job); |
393a0bd4 | 564 | amd_sched_fence_scheduled(s_fence); |
6f0e54a9 | 565 | if (fence) { |
754ce0fa | 566 | s_fence->parent = fence_get(fence); |
258f3f99 | 567 | r = fence_add_callback(fence, &s_fence->cb, |
6f0e54a9 CK |
568 | amd_sched_process_job); |
569 | if (r == -ENOENT) | |
258f3f99 | 570 | amd_sched_process_job(fence, &s_fence->cb); |
6f0e54a9 | 571 | else if (r) |
16a7133f CK |
572 | DRM_ERROR("fence add callback failed (%d)\n", |
573 | r); | |
6f0e54a9 | 574 | fence_put(fence); |
27439fca CK |
575 | } else { |
576 | DRM_ERROR("Failed to run job!\n"); | |
258f3f99 | 577 | amd_sched_process_job(NULL, &s_fence->cb); |
6f0e54a9 | 578 | } |
aef4852e | 579 | |
4c7eb91c JZ |
580 | count = kfifo_out(&entity->job_queue, &sched_job, |
581 | sizeof(sched_job)); | |
582 | WARN_ON(count != sizeof(sched_job)); | |
c2b6bd7e | 583 | wake_up(&sched->job_scheduled); |
a72ce6f8 JZ |
584 | } |
585 | return 0; | |
586 | } | |
587 | ||
a72ce6f8 | 588 | /** |
4f839a24 | 589 | * Init a gpu scheduler instance |
a72ce6f8 | 590 | * |
4f839a24 | 591 | * @sched The pointer to the scheduler |
69f7dd65 | 592 | * @ops The backend operations for this scheduler. |
69f7dd65 | 593 | * @hw_submissions Number of hw submissions to do. |
4f839a24 | 594 | * @name Name used for debugging |
a72ce6f8 | 595 | * |
4f839a24 | 596 | * Return 0 on success, otherwise error code. |
a72ce6f8 | 597 | */ |
4f839a24 | 598 | int amd_sched_init(struct amd_gpu_scheduler *sched, |
62250a91 | 599 | const struct amd_sched_backend_ops *ops, |
2440ff2c | 600 | unsigned hw_submission, long timeout, const char *name) |
a72ce6f8 | 601 | { |
d033a6de | 602 | int i; |
a72ce6f8 | 603 | sched->ops = ops; |
4cef9267 | 604 | sched->hw_submission_limit = hw_submission; |
4f839a24 | 605 | sched->name = name; |
2440ff2c | 606 | sched->timeout = timeout; |
d033a6de CZ |
607 | for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++) |
608 | amd_sched_rq_init(&sched->sched_rq[i]); | |
a72ce6f8 | 609 | |
c2b6bd7e CK |
610 | init_waitqueue_head(&sched->wake_up_worker); |
611 | init_waitqueue_head(&sched->job_scheduled); | |
4835096b ML |
612 | INIT_LIST_HEAD(&sched->ring_mirror_list); |
613 | spin_lock_init(&sched->job_list_lock); | |
c746ba22 | 614 | atomic_set(&sched->hw_rq_count, 0); |
f5617f9d CZ |
615 | if (atomic_inc_return(&sched_fence_slab_ref) == 1) { |
616 | sched_fence_slab = kmem_cache_create( | |
617 | "amd_sched_fence", sizeof(struct amd_sched_fence), 0, | |
618 | SLAB_HWCACHE_ALIGN, NULL); | |
619 | if (!sched_fence_slab) | |
620 | return -ENOMEM; | |
621 | } | |
4f839a24 | 622 | |
a72ce6f8 | 623 | /* Each scheduler will run on a seperate kernel thread */ |
c14692f0 | 624 | sched->thread = kthread_run(amd_sched_main, sched, sched->name); |
f4956598 | 625 | if (IS_ERR(sched->thread)) { |
4f839a24 CK |
626 | DRM_ERROR("Failed to create scheduler for %s.\n", name); |
627 | return PTR_ERR(sched->thread); | |
a72ce6f8 JZ |
628 | } |
629 | ||
4f839a24 | 630 | return 0; |
a72ce6f8 JZ |
631 | } |
632 | ||
633 | /** | |
634 | * Destroy a gpu scheduler | |
635 | * | |
636 | * @sched The pointer to the scheduler | |
a72ce6f8 | 637 | */ |
4f839a24 | 638 | void amd_sched_fini(struct amd_gpu_scheduler *sched) |
a72ce6f8 | 639 | { |
32544d02 DA |
640 | if (sched->thread) |
641 | kthread_stop(sched->thread); | |
f5617f9d CZ |
642 | if (atomic_dec_and_test(&sched_fence_slab_ref)) |
643 | kmem_cache_destroy(sched_fence_slab); | |
a72ce6f8 | 644 | } |