Commit | Line | Data |
---|---|---|
1b1f42d8 LS |
1 | /* |
2 | * Copyright 2015 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | */ | |
23 | ||
24 | #ifndef _DRM_GPU_SCHEDULER_H_ | |
25 | #define _DRM_GPU_SCHEDULER_H_ | |
26 | ||
27 | #include <drm/spsc_queue.h> | |
28 | #include <linux/dma-fence.h> | |
dc10218d | 29 | #include <linux/completion.h> |
ebd5f742 | 30 | #include <linux/xarray.h> |
7d64c40a | 31 | #include <linux/workqueue.h> |
1b1f42d8 | 32 | |
741f01e6 AG |
33 | #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000) |
34 | ||
7b476aff CK |
35 | /** |
36 | * DRM_SCHED_FENCE_DONT_PIPELINE - Prefent dependency pipelining | |
37 | * | |
38 | * Setting this flag on a scheduler fence prevents pipelining of jobs depending | |
39 | * on this fence. In other words we always insert a full CPU round trip before | |
40 | * dependen jobs are pushed to the hw queue. | |
41 | */ | |
42 | #define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS | |
43 | ||
4d5230b5 CK |
44 | enum dma_resv_usage; |
45 | struct dma_resv; | |
ebd5f742 DV |
46 | struct drm_gem_object; |
47 | ||
1b1f42d8 LS |
48 | struct drm_gpu_scheduler; |
49 | struct drm_sched_rq; | |
50 | ||
e2d732fd LT |
51 | /* These are often used as an (initial) index |
52 | * to an array, and as such should start at 0. | |
53 | */ | |
1b1f42d8 LS |
54 | enum drm_sched_priority { |
55 | DRM_SCHED_PRIORITY_MIN, | |
1b1f42d8 | 56 | DRM_SCHED_PRIORITY_NORMAL, |
e2d732fd | 57 | DRM_SCHED_PRIORITY_HIGH, |
1b1f42d8 | 58 | DRM_SCHED_PRIORITY_KERNEL, |
e2d732fd LT |
59 | |
60 | DRM_SCHED_PRIORITY_COUNT, | |
1b1f42d8 LS |
61 | DRM_SCHED_PRIORITY_UNSET = -2 |
62 | }; | |
63 | ||
08fb97de AG |
64 | /* Used to chose between FIFO and RR jobs scheduling */ |
65 | extern int drm_sched_policy; | |
66 | ||
67 | #define DRM_SCHED_POLICY_RR 0 | |
68 | #define DRM_SCHED_POLICY_FIFO 1 | |
69 | ||
1b1f42d8 | 70 | /** |
2d33948e ND |
71 | * struct drm_sched_entity - A wrapper around a job queue (typically |
72 | * attached to the DRM file_priv). | |
73 | * | |
1a61ee07 EA |
74 | * Entities will emit jobs in order to their corresponding hardware |
75 | * ring, and the scheduler will alternate between entities based on | |
76 | * scheduling policy. | |
2d33948e | 77 | */ |
1b1f42d8 | 78 | struct drm_sched_entity { |
981b04d9 DV |
79 | /** |
80 | * @list: | |
81 | * | |
82 | * Used to append this struct to the list of entities in the runqueue | |
83 | * @rq under &drm_sched_rq.entities. | |
84 | * | |
85 | * Protected by &drm_sched_rq.lock of @rq. | |
86 | */ | |
1b1f42d8 | 87 | struct list_head list; |
981b04d9 DV |
88 | |
89 | /** | |
90 | * @rq: | |
91 | * | |
92 | * Runqueue on which this entity is currently scheduled. | |
93 | * | |
94 | * FIXME: Locking is very unclear for this. Writers are protected by | |
95 | * @rq_lock, but readers are generally lockless and seem to just race | |
96 | * with not even a READ_ONCE. | |
97 | */ | |
1b1f42d8 | 98 | struct drm_sched_rq *rq; |
981b04d9 DV |
99 | |
100 | /** | |
101 | * @sched_list: | |
102 | * | |
103 | * A list of schedulers (struct drm_gpu_scheduler). Jobs from this entity can | |
104 | * be scheduled on any scheduler on this list. | |
105 | * | |
106 | * This can be modified by calling drm_sched_entity_modify_sched(). | |
107 | * Locking is entirely up to the driver, see the above function for more | |
108 | * details. | |
109 | * | |
110 | * This will be set to NULL if &num_sched_list equals 1 and @rq has been | |
111 | * set already. | |
112 | * | |
113 | * FIXME: This means priority changes through | |
114 | * drm_sched_entity_set_priority() will be lost henceforth in this case. | |
115 | */ | |
b3ac1766 | 116 | struct drm_gpu_scheduler **sched_list; |
981b04d9 DV |
117 | |
118 | /** | |
119 | * @num_sched_list: | |
120 | * | |
121 | * Number of drm_gpu_schedulers in the @sched_list. | |
122 | */ | |
9e3e90c5 | 123 | unsigned int num_sched_list; |
981b04d9 DV |
124 | |
125 | /** | |
126 | * @priority: | |
127 | * | |
128 | * Priority of the entity. This can be modified by calling | |
129 | * drm_sched_entity_set_priority(). Protected by &rq_lock. | |
130 | */ | |
b3ac1766 | 131 | enum drm_sched_priority priority; |
981b04d9 DV |
132 | |
133 | /** | |
134 | * @rq_lock: | |
135 | * | |
136 | * Lock to modify the runqueue to which this entity belongs. | |
137 | */ | |
1b1f42d8 | 138 | spinlock_t rq_lock; |
1b1f42d8 | 139 | |
981b04d9 DV |
140 | /** |
141 | * @job_queue: the list of jobs of this entity. | |
142 | */ | |
1b1f42d8 LS |
143 | struct spsc_queue job_queue; |
144 | ||
981b04d9 DV |
145 | /** |
146 | * @fence_seq: | |
147 | * | |
148 | * A linearly increasing seqno incremented with each new | |
149 | * &drm_sched_fence which is part of the entity. | |
150 | * | |
151 | * FIXME: Callers of drm_sched_job_arm() need to ensure correct locking, | |
152 | * this doesn't need to be atomic. | |
153 | */ | |
1b1f42d8 | 154 | atomic_t fence_seq; |
981b04d9 DV |
155 | |
156 | /** | |
157 | * @fence_context: | |
158 | * | |
159 | * A unique context for all the fences which belong to this entity. The | |
160 | * &drm_sched_fence.scheduled uses the fence_context but | |
161 | * &drm_sched_fence.finished uses fence_context + 1. | |
162 | */ | |
1b1f42d8 LS |
163 | uint64_t fence_context; |
164 | ||
981b04d9 DV |
165 | /** |
166 | * @dependency: | |
167 | * | |
168 | * The dependency fence of the job which is on the top of the job queue. | |
169 | */ | |
1b1f42d8 | 170 | struct dma_fence *dependency; |
981b04d9 DV |
171 | |
172 | /** | |
173 | * @cb: | |
174 | * | |
175 | * Callback for the dependency fence above. | |
176 | */ | |
1b1f42d8 | 177 | struct dma_fence_cb cb; |
981b04d9 DV |
178 | |
179 | /** | |
180 | * @guilty: | |
181 | * | |
182 | * Points to entities' guilty. | |
183 | */ | |
2d33948e | 184 | atomic_t *guilty; |
981b04d9 DV |
185 | |
186 | /** | |
187 | * @last_scheduled: | |
188 | * | |
189 | * Points to the finished fence of the last scheduled job. Only written | |
190 | * by the scheduler thread, can be accessed locklessly from | |
191 | * drm_sched_job_arm() iff the queue is empty. | |
192 | */ | |
2d33948e | 193 | struct dma_fence *last_scheduled; |
981b04d9 DV |
194 | |
195 | /** | |
196 | * @last_user: last group leader pushing a job into the entity. | |
197 | */ | |
43bce41c | 198 | struct task_struct *last_user; |
981b04d9 DV |
199 | |
200 | /** | |
201 | * @stopped: | |
202 | * | |
203 | * Marks the enity as removed from rq and destined for | |
204 | * termination. This is set by calling drm_sched_entity_flush() and by | |
205 | * drm_sched_fini(). | |
206 | */ | |
62347a33 | 207 | bool stopped; |
981b04d9 DV |
208 | |
209 | /** | |
210 | * @entity_idle: | |
211 | * | |
212 | * Signals when entity is not in use, used to sequence entity cleanup in | |
213 | * drm_sched_entity_fini(). | |
214 | */ | |
83a7772b | 215 | struct completion entity_idle; |
08fb97de AG |
216 | |
217 | /** | |
218 | * @oldest_job_waiting: | |
219 | * | |
220 | * Marks earliest job waiting in SW queue | |
221 | */ | |
222 | ktime_t oldest_job_waiting; | |
223 | ||
224 | /** | |
225 | * @rb_tree_node: | |
226 | * | |
227 | * The node used to insert this entity into time based priority queue | |
228 | */ | |
229 | struct rb_node rb_tree_node; | |
230 | ||
df622729 LS |
231 | /** |
232 | * @elapsed_ns: | |
233 | * | |
234 | * Records the amount of time where jobs from this entity were active | |
235 | * on the GPU. | |
236 | */ | |
237 | uint64_t elapsed_ns; | |
1b1f42d8 LS |
238 | }; |
239 | ||
240 | /** | |
2d33948e ND |
241 | * struct drm_sched_rq - queue of entities to be scheduled. |
242 | * | |
243 | * @lock: to modify the entities list. | |
8dc9fbbf | 244 | * @sched: the scheduler to which this rq belongs to. |
2d33948e ND |
245 | * @entities: list of the entities to be scheduled. |
246 | * @current_entity: the entity which is to be scheduled. | |
08fb97de | 247 | * @rb_tree_root: root of time based priory queue of entities for FIFO scheduling |
2d33948e | 248 | * |
1b1f42d8 LS |
249 | * Run queue is a set of entities scheduling command submissions for |
250 | * one specific ring. It implements the scheduling policy that selects | |
251 | * the next entity to emit commands from. | |
2d33948e | 252 | */ |
1b1f42d8 LS |
253 | struct drm_sched_rq { |
254 | spinlock_t lock; | |
8dc9fbbf | 255 | struct drm_gpu_scheduler *sched; |
1b1f42d8 LS |
256 | struct list_head entities; |
257 | struct drm_sched_entity *current_entity; | |
08fb97de | 258 | struct rb_root_cached rb_tree_root; |
1b1f42d8 LS |
259 | }; |
260 | ||
2d33948e ND |
261 | /** |
262 | * struct drm_sched_fence - fences corresponding to the scheduling of a job. | |
263 | */ | |
1b1f42d8 | 264 | struct drm_sched_fence { |
2d33948e ND |
265 | /** |
266 | * @scheduled: this fence is what will be signaled by the scheduler | |
267 | * when the job is scheduled. | |
268 | */ | |
1b1f42d8 | 269 | struct dma_fence scheduled; |
1a61ee07 | 270 | |
2d33948e ND |
271 | /** |
272 | * @finished: this fence is what will be signaled by the scheduler | |
273 | * when the job is completed. | |
274 | * | |
275 | * When setting up an out fence for the job, you should use | |
276 | * this, since it's available immediately upon | |
277 | * drm_sched_job_init(), and the fence returned by the driver | |
278 | * from run_job() won't be created until the dependencies have | |
279 | * resolved. | |
280 | */ | |
1b1f42d8 | 281 | struct dma_fence finished; |
1a61ee07 | 282 | |
2d33948e ND |
283 | /** |
284 | * @parent: the fence returned by &drm_sched_backend_ops.run_job | |
285 | * when scheduling the job on hardware. We signal the | |
286 | * &drm_sched_fence.finished fence once parent is signalled. | |
287 | */ | |
1b1f42d8 | 288 | struct dma_fence *parent; |
2d33948e ND |
289 | /** |
290 | * @sched: the scheduler instance to which the job having this struct | |
291 | * belongs to. | |
292 | */ | |
1b1f42d8 | 293 | struct drm_gpu_scheduler *sched; |
2d33948e ND |
294 | /** |
295 | * @lock: the lock used by the scheduled and the finished fences. | |
296 | */ | |
1b1f42d8 | 297 | spinlock_t lock; |
2d33948e ND |
298 | /** |
299 | * @owner: job owner for debugging | |
300 | */ | |
1b1f42d8 LS |
301 | void *owner; |
302 | }; | |
303 | ||
304 | struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); | |
305 | ||
1a61ee07 | 306 | /** |
2d33948e ND |
307 | * struct drm_sched_job - A job to be run by an entity. |
308 | * | |
309 | * @queue_node: used to append this struct to the queue of jobs in an entity. | |
c365d304 | 310 | * @list: a job participates in a "pending" and "done" lists. |
2d33948e ND |
311 | * @sched: the scheduler instance on which this job is scheduled. |
312 | * @s_fence: contains the fences for the scheduling of job. | |
313 | * @finish_cb: the callback for the finished fence. | |
e795df5b | 314 | * @work: Helper to reschdeule job kill to different context. |
2d33948e ND |
315 | * @id: a unique id assigned to each job scheduled on the scheduler. |
316 | * @karma: increment on every hang caused by this job. If this exceeds the hang | |
317 | * limit of the scheduler then the job is marked guilty and will not | |
318 | * be scheduled further. | |
319 | * @s_priority: the priority of the job. | |
320 | * @entity: the entity to which this job belongs. | |
3741540e | 321 | * @cb: the callback for the parent fence in s_fence. |
1a61ee07 EA |
322 | * |
323 | * A job is created by the driver using drm_sched_job_init(), and | |
324 | * should call drm_sched_entity_push_job() once it wants the scheduler | |
325 | * to schedule the job. | |
326 | */ | |
1b1f42d8 LS |
327 | struct drm_sched_job { |
328 | struct spsc_node queue_node; | |
8935ff00 | 329 | struct list_head list; |
1b1f42d8 LS |
330 | struct drm_gpu_scheduler *sched; |
331 | struct drm_sched_fence *s_fence; | |
542cff78 AG |
332 | |
333 | /* | |
334 | * work is used only after finish_cb has been used and will not be | |
335 | * accessed anymore. | |
336 | */ | |
337 | union { | |
338 | struct dma_fence_cb finish_cb; | |
a82f30b0 | 339 | struct work_struct work; |
542cff78 AG |
340 | }; |
341 | ||
1b1f42d8 LS |
342 | uint64_t id; |
343 | atomic_t karma; | |
344 | enum drm_sched_priority s_priority; | |
8935ff00 | 345 | struct drm_sched_entity *entity; |
3741540e | 346 | struct dma_fence_cb cb; |
ebd5f742 DV |
347 | /** |
348 | * @dependencies: | |
349 | * | |
350 | * Contains the dependencies as struct dma_fence for this job, see | |
351 | * drm_sched_job_add_dependency() and | |
352 | * drm_sched_job_add_implicit_dependencies(). | |
353 | */ | |
354 | struct xarray dependencies; | |
355 | ||
356 | /** @last_dependency: tracks @dependencies as they signal */ | |
357 | unsigned long last_dependency; | |
08fb97de AG |
358 | |
359 | /** | |
360 | * @submit_ts: | |
361 | * | |
362 | * When the job was pushed into the entity queue. | |
363 | */ | |
364 | ktime_t submit_ts; | |
1b1f42d8 LS |
365 | }; |
366 | ||
367 | static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job, | |
368 | int threshold) | |
369 | { | |
6efa4b46 | 370 | return s_job && atomic_inc_return(&s_job->karma) > threshold; |
1b1f42d8 LS |
371 | } |
372 | ||
a6a1f036 LT |
373 | enum drm_gpu_sched_stat { |
374 | DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */ | |
375 | DRM_GPU_SCHED_STAT_NOMINAL, | |
376 | DRM_GPU_SCHED_STAT_ENODEV, | |
377 | }; | |
378 | ||
1b1f42d8 | 379 | /** |
f8ad757e RD |
380 | * struct drm_sched_backend_ops - Define the backend operations |
381 | * called by the scheduler | |
2d33948e | 382 | * |
f8ad757e | 383 | * These functions should be implemented in the driver side. |
2d33948e | 384 | */ |
1b1f42d8 | 385 | struct drm_sched_backend_ops { |
2d33948e | 386 | /** |
a82f30b0 | 387 | * @prepare_job: |
ebd5f742 DV |
388 | * |
389 | * Called when the scheduler is considering scheduling this job next, to | |
390 | * get another struct dma_fence for this job to block on. Once it | |
391 | * returns NULL, run_job() may be called. | |
392 | * | |
a82f30b0 CK |
393 | * Can be NULL if no additional preparation to the dependencies are |
394 | * necessary. Skipped when jobs are killed instead of run. | |
1a61ee07 | 395 | */ |
a82f30b0 CK |
396 | struct dma_fence *(*prepare_job)(struct drm_sched_job *sched_job, |
397 | struct drm_sched_entity *s_entity); | |
1a61ee07 | 398 | |
2d33948e ND |
399 | /** |
400 | * @run_job: Called to execute the job once all of the dependencies | |
401 | * have been resolved. This may be called multiple times, if | |
1a61ee07 EA |
402 | * timedout_job() has happened and drm_sched_job_recovery() |
403 | * decides to try it again. | |
404 | */ | |
1b1f42d8 | 405 | struct dma_fence *(*run_job)(struct drm_sched_job *sched_job); |
1a61ee07 | 406 | |
2d33948e | 407 | /** |
a6a1f036 LT |
408 | * @timedout_job: Called when a job has taken too long to execute, |
409 | * to trigger GPU recovery. | |
410 | * | |
1fad1b7e BB |
411 | * This method is called in a workqueue context. |
412 | * | |
413 | * Drivers typically issue a reset to recover from GPU hangs, and this | |
414 | * procedure usually follows the following workflow: | |
415 | * | |
416 | * 1. Stop the scheduler using drm_sched_stop(). This will park the | |
417 | * scheduler thread and cancel the timeout work, guaranteeing that | |
418 | * nothing is queued while we reset the hardware queue | |
419 | * 2. Try to gracefully stop non-faulty jobs (optional) | |
420 | * 3. Issue a GPU reset (driver-specific) | |
421 | * 4. Re-submit jobs using drm_sched_resubmit_jobs() | |
422 | * 5. Restart the scheduler using drm_sched_start(). At that point, new | |
423 | * jobs can be queued, and the scheduler thread is unblocked | |
424 | * | |
78efe21b BB |
425 | * Note that some GPUs have distinct hardware queues but need to reset |
426 | * the GPU globally, which requires extra synchronization between the | |
427 | * timeout handler of the different &drm_gpu_scheduler. One way to | |
428 | * achieve this synchronization is to create an ordered workqueue | |
429 | * (using alloc_ordered_workqueue()) at the driver level, and pass this | |
430 | * queue to drm_sched_init(), to guarantee that timeout handlers are | |
431 | * executed sequentially. The above workflow needs to be slightly | |
432 | * adjusted in that case: | |
433 | * | |
434 | * 1. Stop all schedulers impacted by the reset using drm_sched_stop() | |
435 | * 2. Try to gracefully stop non-faulty jobs on all queues impacted by | |
436 | * the reset (optional) | |
437 | * 3. Issue a GPU reset on all faulty queues (driver-specific) | |
438 | * 4. Re-submit jobs on all schedulers impacted by the reset using | |
439 | * drm_sched_resubmit_jobs() | |
440 | * 5. Restart all schedulers that were stopped in step #1 using | |
441 | * drm_sched_start() | |
442 | * | |
a6a1f036 LT |
443 | * Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal, |
444 | * and the underlying driver has started or completed recovery. | |
445 | * | |
446 | * Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer | |
447 | * available, i.e. has been unplugged. | |
1a61ee07 | 448 | */ |
a6a1f036 | 449 | enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job); |
1a61ee07 | 450 | |
2d33948e ND |
451 | /** |
452 | * @free_job: Called once the job's finished fence has been signaled | |
453 | * and it's time to clean it up. | |
1a61ee07 | 454 | */ |
1b1f42d8 LS |
455 | void (*free_job)(struct drm_sched_job *sched_job); |
456 | }; | |
457 | ||
458 | /** | |
f8ad757e | 459 | * struct drm_gpu_scheduler - scheduler instance-specific data |
2d33948e ND |
460 | * |
461 | * @ops: backend operations provided by the driver. | |
462 | * @hw_submission_limit: the max size of the hardware queue. | |
463 | * @timeout: the time after which a job is removed from the scheduler. | |
464 | * @name: name of the ring for which this scheduler is being used. | |
465 | * @sched_rq: priority wise array of run queues. | |
466 | * @wake_up_worker: the wait queue on which the scheduler sleeps until a job | |
467 | * is ready to be scheduled. | |
468 | * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler | |
469 | * waits on this wait queue until all the scheduled jobs are | |
470 | * finished. | |
471 | * @hw_rq_count: the number of jobs currently in the hardware queue. | |
472 | * @job_id_count: used to assign unique id to the each job. | |
78efe21b | 473 | * @timeout_wq: workqueue used to queue @work_tdr |
6a962430 ND |
474 | * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the |
475 | * timeout interval is over. | |
2d33948e | 476 | * @thread: the kthread on which the scheduler which run. |
6efa4b46 LT |
477 | * @pending_list: the list of jobs which are currently in the job queue. |
478 | * @job_list_lock: lock to protect the pending_list. | |
2d33948e | 479 | * @hang_limit: once the hangs by a job crosses this limit then it is marked |
95b2151f | 480 | * guilty and it will no longer be considered for scheduling. |
d41a39dd | 481 | * @score: score to help loadbalancer pick a idle sched |
be318fd8 | 482 | * @_score: score used when the driver doesn't provide one |
faf6e1a8 | 483 | * @ready: marks if the underlying HW is ready to work |
a5343b8a | 484 | * @free_guilty: A hit to time out handler to free the guilty job. |
f8ad757e | 485 | * @dev: system &struct device |
2d33948e ND |
486 | * |
487 | * One scheduler is implemented for each hardware ring. | |
488 | */ | |
1b1f42d8 LS |
489 | struct drm_gpu_scheduler { |
490 | const struct drm_sched_backend_ops *ops; | |
491 | uint32_t hw_submission_limit; | |
492 | long timeout; | |
493 | const char *name; | |
e2d732fd | 494 | struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_COUNT]; |
1b1f42d8 LS |
495 | wait_queue_head_t wake_up_worker; |
496 | wait_queue_head_t job_scheduled; | |
497 | atomic_t hw_rq_count; | |
498 | atomic64_t job_id_count; | |
78efe21b | 499 | struct workqueue_struct *timeout_wq; |
6a962430 | 500 | struct delayed_work work_tdr; |
1b1f42d8 | 501 | struct task_struct *thread; |
6efa4b46 | 502 | struct list_head pending_list; |
1b1f42d8 LS |
503 | spinlock_t job_list_lock; |
504 | int hang_limit; | |
f2f12eb9 CK |
505 | atomic_t *score; |
506 | atomic_t _score; | |
d41a39dd | 507 | bool ready; |
a5343b8a | 508 | bool free_guilty; |
8ab62eda | 509 | struct device *dev; |
1b1f42d8 LS |
510 | }; |
511 | ||
512 | int drm_sched_init(struct drm_gpu_scheduler *sched, | |
513 | const struct drm_sched_backend_ops *ops, | |
78efe21b BB |
514 | uint32_t hw_submission, unsigned hang_limit, |
515 | long timeout, struct workqueue_struct *timeout_wq, | |
8ab62eda | 516 | atomic_t *score, const char *name, struct device *dev); |
faf6e1a8 | 517 | |
1b1f42d8 | 518 | void drm_sched_fini(struct drm_gpu_scheduler *sched); |
620e762f CK |
519 | int drm_sched_job_init(struct drm_sched_job *job, |
520 | struct drm_sched_entity *entity, | |
521 | void *owner); | |
dbe48d03 | 522 | void drm_sched_job_arm(struct drm_sched_job *job); |
ebd5f742 DV |
523 | int drm_sched_job_add_dependency(struct drm_sched_job *job, |
524 | struct dma_fence *fence); | |
4d5230b5 CK |
525 | int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job, |
526 | struct dma_resv *resv, | |
527 | enum dma_resv_usage usage); | |
ebd5f742 DV |
528 | int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job, |
529 | struct drm_gem_object *obj, | |
530 | bool write); | |
531 | ||
532 | ||
b37aced3 ND |
533 | void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, |
534 | struct drm_gpu_scheduler **sched_list, | |
535 | unsigned int num_sched_list); | |
536 | ||
26efecf9 | 537 | void drm_sched_job_cleanup(struct drm_sched_job *job); |
620e762f | 538 | void drm_sched_wakeup(struct drm_gpu_scheduler *sched); |
5918045c | 539 | void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad); |
222b5f04 AG |
540 | void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery); |
541 | void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched); | |
542 | void drm_sched_increase_karma(struct drm_sched_job *bad); | |
e6c6338f JZ |
543 | void drm_sched_reset_karma(struct drm_sched_job *bad); |
544 | void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type); | |
620e762f CK |
545 | bool drm_sched_dependency_optimized(struct dma_fence* fence, |
546 | struct drm_sched_entity *entity); | |
8fe159b0 | 547 | void drm_sched_fault(struct drm_gpu_scheduler *sched); |
620e762f CK |
548 | |
549 | void drm_sched_rq_add_entity(struct drm_sched_rq *rq, | |
550 | struct drm_sched_entity *entity); | |
551 | void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, | |
552 | struct drm_sched_entity *entity); | |
1b1f42d8 | 553 | |
08fb97de AG |
554 | void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts); |
555 | ||
aa16b6c6 | 556 | int drm_sched_entity_init(struct drm_sched_entity *entity, |
b3ac1766 ND |
557 | enum drm_sched_priority priority, |
558 | struct drm_gpu_scheduler **sched_list, | |
9e3e90c5 | 559 | unsigned int num_sched_list, |
8344c53f | 560 | atomic_t *guilty); |
cdc50176 ND |
561 | long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout); |
562 | void drm_sched_entity_fini(struct drm_sched_entity *entity); | |
563 | void drm_sched_entity_destroy(struct drm_sched_entity *entity); | |
620e762f CK |
564 | void drm_sched_entity_select_rq(struct drm_sched_entity *entity); |
565 | struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity); | |
0e10e9a1 | 566 | void drm_sched_entity_push_job(struct drm_sched_job *sched_job); |
7febe4bf CK |
567 | void drm_sched_entity_set_priority(struct drm_sched_entity *entity, |
568 | enum drm_sched_priority priority); | |
620e762f CK |
569 | bool drm_sched_entity_is_ready(struct drm_sched_entity *entity); |
570 | ||
dbe48d03 | 571 | struct drm_sched_fence *drm_sched_fence_alloc( |
1b1f42d8 | 572 | struct drm_sched_entity *s_entity, void *owner); |
dbe48d03 DV |
573 | void drm_sched_fence_init(struct drm_sched_fence *fence, |
574 | struct drm_sched_entity *entity); | |
d4c16733 | 575 | void drm_sched_fence_free(struct drm_sched_fence *fence); |
dbe48d03 | 576 | |
1b1f42d8 LS |
577 | void drm_sched_fence_scheduled(struct drm_sched_fence *fence); |
578 | void drm_sched_fence_finished(struct drm_sched_fence *fence); | |
1b1f42d8 | 579 | |
1db8c142 SM |
580 | unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched); |
581 | void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, | |
582 | unsigned long remaining); | |
ec2edcc2 ND |
583 | struct drm_gpu_scheduler * |
584 | drm_sched_pick_best(struct drm_gpu_scheduler **sched_list, | |
585 | unsigned int num_sched_list); | |
1db8c142 | 586 | |
1b1f42d8 | 587 | #endif |