Commit | Line | Data |
---|---|---|
620e762f CK |
1 | /* |
2 | * Copyright 2015 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | */ | |
23 | ||
24 | #include <linux/kthread.h> | |
7c1be93c | 25 | #include <linux/slab.h> |
83a7772b | 26 | #include <linux/completion.h> |
7c1be93c SR |
27 | |
28 | #include <drm/drm_print.h> | |
620e762f CK |
29 | #include <drm/gpu_scheduler.h> |
30 | ||
31 | #include "gpu_scheduler_trace.h" | |
32 | ||
33 | #define to_drm_sched_job(sched_job) \ | |
34 | container_of((sched_job), struct drm_sched_job, queue_node) | |
35 | ||
36 | /** | |
37 | * drm_sched_entity_init - Init a context entity used by scheduler when | |
38 | * submit to HW ring. | |
39 | * | |
40 | * @entity: scheduler entity to init | |
b3ac1766 ND |
41 | * @priority: priority of the entity |
42 | * @sched_list: the list of drm scheds on which jobs from this | |
620e762f | 43 | * entity can be submitted |
b3ac1766 | 44 | * @num_sched_list: number of drm sched in sched_list |
620e762f CK |
45 | * @guilty: atomic_t set to 1 when a job on this queue |
46 | * is found to be guilty causing a timeout | |
47 | * | |
981b04d9 DV |
48 | * Note that the &sched_list must have at least one element to schedule the entity. |
49 | * | |
50 | * For changing @priority later on at runtime see | |
51 | * drm_sched_entity_set_priority(). For changing the set of schedulers | |
52 | * @sched_list at runtime see drm_sched_entity_modify_sched(). | |
53 | * | |
54 | * An entity is cleaned up by callind drm_sched_entity_fini(). See also | |
55 | * drm_sched_entity_destroy(). | |
620e762f CK |
56 | * |
57 | * Returns 0 on success or a negative error code on failure. | |
7b10574e | 58 | */ |
620e762f | 59 | int drm_sched_entity_init(struct drm_sched_entity *entity, |
b3ac1766 ND |
60 | enum drm_sched_priority priority, |
61 | struct drm_gpu_scheduler **sched_list, | |
62 | unsigned int num_sched_list, | |
620e762f CK |
63 | atomic_t *guilty) |
64 | { | |
b3ac1766 | 65 | if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0]))) |
620e762f CK |
66 | return -EINVAL; |
67 | ||
68 | memset(entity, 0, sizeof(struct drm_sched_entity)); | |
69 | INIT_LIST_HEAD(&entity->list); | |
1decbf6b | 70 | entity->rq = NULL; |
620e762f | 71 | entity->guilty = guilty; |
b3ac1766 ND |
72 | entity->num_sched_list = num_sched_list; |
73 | entity->priority = priority; | |
8c23056b ND |
74 | entity->sched_list = num_sched_list > 1 ? sched_list : NULL; |
75 | entity->last_scheduled = NULL; | |
08fb97de | 76 | RB_CLEAR_NODE(&entity->rb_tree_node); |
b3ac1766 | 77 | |
8c23056b ND |
78 | if(num_sched_list) |
79 | entity->rq = &sched_list[0]->sched_rq[entity->priority]; | |
620e762f | 80 | |
83a7772b AG |
81 | init_completion(&entity->entity_idle); |
82 | ||
170fb58e | 83 | /* We start in an idle state. */ |
03dec92c | 84 | complete_all(&entity->entity_idle); |
170fb58e | 85 | |
620e762f CK |
86 | spin_lock_init(&entity->rq_lock); |
87 | spsc_queue_init(&entity->job_queue); | |
88 | ||
89 | atomic_set(&entity->fence_seq, 0); | |
90 | entity->fence_context = dma_fence_context_alloc(2); | |
91 | ||
92 | return 0; | |
93 | } | |
94 | EXPORT_SYMBOL(drm_sched_entity_init); | |
95 | ||
b37aced3 ND |
96 | /** |
97 | * drm_sched_entity_modify_sched - Modify sched of an entity | |
98 | * @entity: scheduler entity to init | |
99 | * @sched_list: the list of new drm scheds which will replace | |
100 | * existing entity->sched_list | |
101 | * @num_sched_list: number of drm sched in sched_list | |
981b04d9 DV |
102 | * |
103 | * Note that this must be called under the same common lock for @entity as | |
104 | * drm_sched_job_arm() and drm_sched_entity_push_job(), or the driver needs to | |
105 | * guarantee through some other means that this is never called while new jobs | |
106 | * can be pushed to @entity. | |
b37aced3 ND |
107 | */ |
108 | void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, | |
109 | struct drm_gpu_scheduler **sched_list, | |
110 | unsigned int num_sched_list) | |
111 | { | |
112 | WARN_ON(!num_sched_list || !sched_list); | |
113 | ||
114 | entity->sched_list = sched_list; | |
115 | entity->num_sched_list = num_sched_list; | |
116 | } | |
117 | EXPORT_SYMBOL(drm_sched_entity_modify_sched); | |
118 | ||
620e762f CK |
119 | static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity) |
120 | { | |
7b10574e | 121 | rmb(); /* for list_empty to work without lock */ |
620e762f CK |
122 | |
123 | if (list_empty(&entity->list) || | |
c61cdbdb AG |
124 | spsc_queue_count(&entity->job_queue) == 0 || |
125 | entity->stopped) | |
620e762f CK |
126 | return true; |
127 | ||
128 | return false; | |
129 | } | |
130 | ||
981b04d9 | 131 | /* Return true if entity could provide a job. */ |
620e762f CK |
132 | bool drm_sched_entity_is_ready(struct drm_sched_entity *entity) |
133 | { | |
134 | if (spsc_queue_peek(&entity->job_queue) == NULL) | |
135 | return false; | |
136 | ||
137 | if (READ_ONCE(entity->dependency)) | |
138 | return false; | |
139 | ||
140 | return true; | |
141 | } | |
142 | ||
2fdb8a8f CK |
143 | static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk) |
144 | { | |
145 | struct drm_sched_job *job = container_of(wrk, typeof(*job), work); | |
146 | ||
147 | drm_sched_fence_finished(job->s_fence); | |
148 | WARN_ON(job->s_fence->parent); | |
149 | job->sched->ops->free_job(job); | |
150 | } | |
151 | ||
152 | /* Signal the scheduler finished fence when the entity in question is killed. */ | |
153 | static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, | |
154 | struct dma_fence_cb *cb) | |
155 | { | |
156 | struct drm_sched_job *job = container_of(cb, struct drm_sched_job, | |
157 | finish_cb); | |
158 | int r; | |
159 | ||
160 | dma_fence_put(f); | |
161 | ||
162 | /* Wait for all dependencies to avoid data corruptions */ | |
163 | while (!xa_empty(&job->dependencies)) { | |
164 | f = xa_erase(&job->dependencies, job->last_dependency++); | |
165 | r = dma_fence_add_callback(f, &job->finish_cb, | |
166 | drm_sched_entity_kill_jobs_cb); | |
167 | if (!r) | |
168 | return; | |
169 | ||
170 | dma_fence_put(f); | |
171 | } | |
172 | ||
173 | INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work); | |
174 | schedule_work(&job->work); | |
175 | } | |
176 | ||
177 | /* Remove the entity from the scheduler and kill all pending jobs */ | |
178 | static void drm_sched_entity_kill(struct drm_sched_entity *entity) | |
179 | { | |
180 | struct drm_sched_job *job; | |
181 | struct dma_fence *prev; | |
182 | ||
183 | if (!entity->rq) | |
184 | return; | |
185 | ||
186 | spin_lock(&entity->rq_lock); | |
187 | entity->stopped = true; | |
188 | drm_sched_rq_remove_entity(entity->rq, entity); | |
189 | spin_unlock(&entity->rq_lock); | |
190 | ||
191 | /* Make sure this entity is not used by the scheduler at the moment */ | |
192 | wait_for_completion(&entity->entity_idle); | |
193 | ||
194 | prev = dma_fence_get(entity->last_scheduled); | |
195 | while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) { | |
196 | struct drm_sched_fence *s_fence = job->s_fence; | |
197 | ||
198 | dma_fence_set_error(&s_fence->finished, -ESRCH); | |
199 | ||
200 | dma_fence_get(&s_fence->finished); | |
201 | if (!prev || dma_fence_add_callback(prev, &job->finish_cb, | |
202 | drm_sched_entity_kill_jobs_cb)) | |
203 | drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb); | |
204 | ||
205 | prev = &s_fence->finished; | |
206 | } | |
207 | dma_fence_put(prev); | |
208 | } | |
209 | ||
620e762f CK |
210 | /** |
211 | * drm_sched_entity_flush - Flush a context entity | |
212 | * | |
213 | * @entity: scheduler entity | |
214 | * @timeout: time to wait in for Q to become empty in jiffies. | |
215 | * | |
7b10574e CK |
216 | * Splitting drm_sched_entity_fini() into two functions, The first one does the |
217 | * waiting, removes the entity from the runqueue and returns an error when the | |
218 | * process was killed. | |
620e762f CK |
219 | * |
220 | * Returns the remaining time in jiffies left from the input timeout | |
221 | */ | |
222 | long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) | |
223 | { | |
224 | struct drm_gpu_scheduler *sched; | |
225 | struct task_struct *last_user; | |
226 | long ret = timeout; | |
227 | ||
1decbf6b BN |
228 | if (!entity->rq) |
229 | return 0; | |
230 | ||
620e762f CK |
231 | sched = entity->rq->sched; |
232 | /** | |
233 | * The client will not queue more IBs during this fini, consume existing | |
234 | * queued IBs or discard them on SIGKILL | |
7b10574e | 235 | */ |
620e762f CK |
236 | if (current->flags & PF_EXITING) { |
237 | if (timeout) | |
238 | ret = wait_event_timeout( | |
239 | sched->job_scheduled, | |
240 | drm_sched_entity_is_idle(entity), | |
241 | timeout); | |
242 | } else { | |
243 | wait_event_killable(sched->job_scheduled, | |
244 | drm_sched_entity_is_idle(entity)); | |
245 | } | |
246 | ||
247 | /* For killed process disable any more IBs enqueue right now */ | |
248 | last_user = cmpxchg(&entity->last_user, current->group_leader, NULL); | |
249 | if ((!last_user || last_user == current->group_leader) && | |
2fdb8a8f CK |
250 | (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) |
251 | drm_sched_entity_kill(entity); | |
620e762f CK |
252 | |
253 | return ret; | |
254 | } | |
255 | EXPORT_SYMBOL(drm_sched_entity_flush); | |
256 | ||
257 | /** | |
04be0c5b | 258 | * drm_sched_entity_fini - Destroy a context entity |
620e762f CK |
259 | * |
260 | * @entity: scheduler entity | |
261 | * | |
981b04d9 | 262 | * Cleanups up @entity which has been initialized by drm_sched_entity_init(). |
620e762f | 263 | * |
981b04d9 DV |
264 | * If there are potentially job still in flight or getting newly queued |
265 | * drm_sched_entity_flush() must be called first. This function then goes over | |
266 | * the entity and signals all jobs with an error code if the process was killed. | |
620e762f CK |
267 | */ |
268 | void drm_sched_entity_fini(struct drm_sched_entity *entity) | |
269 | { | |
2fdb8a8f CK |
270 | /* |
271 | * If consumption of existing IBs wasn't completed. Forcefully remove | |
272 | * them here. Also makes sure that the scheduler won't touch this entity | |
273 | * any more. | |
620e762f | 274 | */ |
2fdb8a8f CK |
275 | drm_sched_entity_kill(entity); |
276 | ||
277 | if (entity->dependency) { | |
278 | dma_fence_remove_callback(entity->dependency, &entity->cb); | |
279 | dma_fence_put(entity->dependency); | |
280 | entity->dependency = NULL; | |
620e762f CK |
281 | } |
282 | ||
283 | dma_fence_put(entity->last_scheduled); | |
284 | entity->last_scheduled = NULL; | |
620e762f CK |
285 | } |
286 | EXPORT_SYMBOL(drm_sched_entity_fini); | |
287 | ||
288 | /** | |
04be0c5b | 289 | * drm_sched_entity_destroy - Destroy a context entity |
620e762f CK |
290 | * @entity: scheduler entity |
291 | * | |
981b04d9 DV |
292 | * Calls drm_sched_entity_flush() and drm_sched_entity_fini() as a |
293 | * convenience wrapper. | |
620e762f CK |
294 | */ |
295 | void drm_sched_entity_destroy(struct drm_sched_entity *entity) | |
296 | { | |
297 | drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); | |
298 | drm_sched_entity_fini(entity); | |
299 | } | |
300 | EXPORT_SYMBOL(drm_sched_entity_destroy); | |
301 | ||
981b04d9 | 302 | /* drm_sched_entity_clear_dep - callback to clear the entities dependency */ |
7b10574e CK |
303 | static void drm_sched_entity_clear_dep(struct dma_fence *f, |
304 | struct dma_fence_cb *cb) | |
620e762f CK |
305 | { |
306 | struct drm_sched_entity *entity = | |
307 | container_of(cb, struct drm_sched_entity, cb); | |
7b10574e | 308 | |
620e762f CK |
309 | entity->dependency = NULL; |
310 | dma_fence_put(f); | |
620e762f CK |
311 | } |
312 | ||
00d44b96 | 313 | /* |
7b10574e CK |
314 | * drm_sched_entity_clear_dep - callback to clear the entities dependency and |
315 | * wake up scheduler | |
316 | */ | |
317 | static void drm_sched_entity_wakeup(struct dma_fence *f, | |
318 | struct dma_fence_cb *cb) | |
620e762f CK |
319 | { |
320 | struct drm_sched_entity *entity = | |
321 | container_of(cb, struct drm_sched_entity, cb); | |
7b10574e CK |
322 | |
323 | drm_sched_entity_clear_dep(f, cb); | |
324 | drm_sched_wakeup(entity->rq->sched); | |
620e762f CK |
325 | } |
326 | ||
620e762f CK |
327 | /** |
328 | * drm_sched_entity_set_priority - Sets priority of the entity | |
329 | * | |
330 | * @entity: scheduler entity | |
331 | * @priority: scheduler priority | |
332 | * | |
333 | * Update the priority of runqueus used for the entity. | |
334 | */ | |
335 | void drm_sched_entity_set_priority(struct drm_sched_entity *entity, | |
336 | enum drm_sched_priority priority) | |
337 | { | |
620e762f | 338 | spin_lock(&entity->rq_lock); |
b3ac1766 | 339 | entity->priority = priority; |
620e762f CK |
340 | spin_unlock(&entity->rq_lock); |
341 | } | |
342 | EXPORT_SYMBOL(drm_sched_entity_set_priority); | |
343 | ||
981b04d9 | 344 | /* |
7b10574e CK |
345 | * Add a callback to the current dependency of the entity to wake up the |
346 | * scheduler when the entity becomes available. | |
347 | */ | |
620e762f CK |
348 | static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) |
349 | { | |
350 | struct drm_gpu_scheduler *sched = entity->rq->sched; | |
7b10574e | 351 | struct dma_fence *fence = entity->dependency; |
620e762f CK |
352 | struct drm_sched_fence *s_fence; |
353 | ||
354 | if (fence->context == entity->fence_context || | |
7b10574e CK |
355 | fence->context == entity->fence_context + 1) { |
356 | /* | |
357 | * Fence is a scheduled/finished fence from a job | |
358 | * which belongs to the same entity, we can ignore | |
359 | * fences from ourself | |
360 | */ | |
620e762f CK |
361 | dma_fence_put(entity->dependency); |
362 | return false; | |
363 | } | |
364 | ||
365 | s_fence = to_drm_sched_fence(fence); | |
7b476aff CK |
366 | if (s_fence && s_fence->sched == sched && |
367 | !test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) { | |
620e762f CK |
368 | |
369 | /* | |
370 | * Fence is from the same scheduler, only need to wait for | |
371 | * it to be scheduled | |
372 | */ | |
373 | fence = dma_fence_get(&s_fence->scheduled); | |
374 | dma_fence_put(entity->dependency); | |
375 | entity->dependency = fence; | |
376 | if (!dma_fence_add_callback(fence, &entity->cb, | |
377 | drm_sched_entity_clear_dep)) | |
378 | return true; | |
379 | ||
380 | /* Ignore it when it is already scheduled */ | |
381 | dma_fence_put(fence); | |
382 | return false; | |
383 | } | |
384 | ||
385 | if (!dma_fence_add_callback(entity->dependency, &entity->cb, | |
386 | drm_sched_entity_wakeup)) | |
387 | return true; | |
388 | ||
389 | dma_fence_put(entity->dependency); | |
390 | return false; | |
391 | } | |
392 | ||
2fdb8a8f CK |
393 | static struct dma_fence * |
394 | drm_sched_job_dependency(struct drm_sched_job *job, | |
395 | struct drm_sched_entity *entity) | |
396 | { | |
397 | if (!xa_empty(&job->dependencies)) | |
398 | return xa_erase(&job->dependencies, job->last_dependency++); | |
399 | ||
a82f30b0 CK |
400 | if (job->sched->ops->prepare_job) |
401 | return job->sched->ops->prepare_job(job, entity); | |
2fdb8a8f CK |
402 | |
403 | return NULL; | |
404 | } | |
405 | ||
620e762f CK |
406 | struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) |
407 | { | |
7b10574e | 408 | struct drm_sched_job *sched_job; |
620e762f | 409 | |
7b10574e | 410 | sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue)); |
620e762f CK |
411 | if (!sched_job) |
412 | return NULL; | |
413 | ||
7b10574e | 414 | while ((entity->dependency = |
ebd5f742 | 415 | drm_sched_job_dependency(sched_job, entity))) { |
82abf337 | 416 | trace_drm_sched_job_wait_dep(sched_job, entity->dependency); |
7b10574e | 417 | |
82abf337 | 418 | if (drm_sched_entity_add_dependency_cb(entity)) |
620e762f | 419 | return NULL; |
620e762f CK |
420 | } |
421 | ||
422 | /* skip jobs from entity that marked guilty */ | |
423 | if (entity->guilty && atomic_read(entity->guilty)) | |
424 | dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED); | |
425 | ||
426 | dma_fence_put(entity->last_scheduled); | |
b0a5303d | 427 | |
620e762f CK |
428 | entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished); |
429 | ||
b0a5303d DV |
430 | /* |
431 | * If the queue is empty we allow drm_sched_entity_select_rq() to | |
432 | * locklessly access ->last_scheduled. This only works if we set the | |
433 | * pointer before we dequeue and if we a write barrier here. | |
434 | */ | |
435 | smp_wmb(); | |
436 | ||
620e762f | 437 | spsc_queue_pop(&entity->job_queue); |
08fb97de AG |
438 | |
439 | /* | |
440 | * Update the entity's location in the min heap according to | |
441 | * the timestamp of the next job, if any. | |
442 | */ | |
443 | if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) { | |
444 | struct drm_sched_job *next; | |
445 | ||
446 | next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue)); | |
447 | if (next) | |
448 | drm_sched_rq_update_fifo(entity, next->submit_ts); | |
449 | } | |
450 | ||
620e762f CK |
451 | return sched_job; |
452 | } | |
453 | ||
620e762f CK |
454 | void drm_sched_entity_select_rq(struct drm_sched_entity *entity) |
455 | { | |
456 | struct dma_fence *fence; | |
ec2edcc2 | 457 | struct drm_gpu_scheduler *sched; |
620e762f CK |
458 | struct drm_sched_rq *rq; |
459 | ||
b0a5303d DV |
460 | /* single possible engine and already selected */ |
461 | if (!entity->sched_list) | |
462 | return; | |
463 | ||
464 | /* queue non-empty, stay on the same engine */ | |
465 | if (spsc_queue_count(&entity->job_queue)) | |
620e762f CK |
466 | return; |
467 | ||
b0a5303d DV |
468 | /* |
469 | * Only when the queue is empty are we guaranteed that the scheduler | |
470 | * thread cannot change ->last_scheduled. To enforce ordering we need | |
471 | * a read barrier here. See drm_sched_entity_pop_job() for the other | |
472 | * side. | |
473 | */ | |
474 | smp_rmb(); | |
475 | ||
476 | fence = entity->last_scheduled; | |
477 | ||
478 | /* stay on the same engine if the previous job hasn't finished */ | |
620e762f CK |
479 | if (fence && !dma_fence_is_signaled(fence)) |
480 | return; | |
481 | ||
b3ac1766 | 482 | spin_lock(&entity->rq_lock); |
ec2edcc2 ND |
483 | sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list); |
484 | rq = sched ? &sched->sched_rq[entity->priority] : NULL; | |
b3ac1766 ND |
485 | if (rq != entity->rq) { |
486 | drm_sched_rq_remove_entity(entity->rq, entity); | |
487 | entity->rq = rq; | |
488 | } | |
620e762f | 489 | spin_unlock(&entity->rq_lock); |
ac4eb83a CK |
490 | |
491 | if (entity->num_sched_list == 1) | |
492 | entity->sched_list = NULL; | |
620e762f CK |
493 | } |
494 | ||
495 | /** | |
496 | * drm_sched_entity_push_job - Submit a job to the entity's job queue | |
620e762f | 497 | * @sched_job: job to submit |
620e762f | 498 | * |
dbe48d03 DV |
499 | * Note: To guarantee that the order of insertion to queue matches the job's |
500 | * fence sequence number this function should be called with drm_sched_job_arm() | |
981b04d9 DV |
501 | * under common lock for the struct drm_sched_entity that was set up for |
502 | * @sched_job in drm_sched_job_init(). | |
620e762f CK |
503 | * |
504 | * Returns 0 for success, negative error code otherwise. | |
505 | */ | |
0e10e9a1 | 506 | void drm_sched_entity_push_job(struct drm_sched_job *sched_job) |
620e762f | 507 | { |
0e10e9a1 | 508 | struct drm_sched_entity *entity = sched_job->entity; |
620e762f | 509 | bool first; |
1e1d3574 | 510 | ktime_t submit_ts; |
620e762f CK |
511 | |
512 | trace_drm_sched_job(sched_job, entity); | |
f2f12eb9 | 513 | atomic_inc(entity->rq->sched->score); |
620e762f | 514 | WRITE_ONCE(entity->last_user, current->group_leader); |
1e1d3574 AL |
515 | |
516 | /* | |
517 | * After the sched_job is pushed into the entity queue, it may be | |
518 | * completed and freed up at any time. We can no longer access it. | |
519 | * Make sure to set the submit_ts first, to avoid a race. | |
520 | */ | |
521 | sched_job->submit_ts = submit_ts = ktime_get(); | |
620e762f CK |
522 | first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node); |
523 | ||
524 | /* first job wakes up scheduler */ | |
525 | if (first) { | |
526 | /* Add the entity to the run queue */ | |
527 | spin_lock(&entity->rq_lock); | |
62347a33 AG |
528 | if (entity->stopped) { |
529 | spin_unlock(&entity->rq_lock); | |
530 | ||
531 | DRM_ERROR("Trying to push to a killed entity\n"); | |
532 | return; | |
533 | } | |
08fb97de | 534 | |
620e762f CK |
535 | drm_sched_rq_add_entity(entity->rq, entity); |
536 | spin_unlock(&entity->rq_lock); | |
08fb97de AG |
537 | |
538 | if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) | |
1e1d3574 | 539 | drm_sched_rq_update_fifo(entity, submit_ts); |
08fb97de | 540 | |
620e762f CK |
541 | drm_sched_wakeup(entity->rq->sched); |
542 | } | |
543 | } | |
544 | EXPORT_SYMBOL(drm_sched_entity_push_job); |