Commit | Line | Data |
---|---|---|
1b1f42d8 LS |
1 | /* |
2 | * Copyright 2015 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | */ | |
23 | ||
24 | #ifndef _DRM_GPU_SCHEDULER_H_ | |
25 | #define _DRM_GPU_SCHEDULER_H_ | |
26 | ||
27 | #include <drm/spsc_queue.h> | |
28 | #include <linux/dma-fence.h> | |
dc10218d | 29 | #include <linux/completion.h> |
ebd5f742 | 30 | #include <linux/xarray.h> |
7d64c40a | 31 | #include <linux/workqueue.h> |
1b1f42d8 | 32 | |
741f01e6 AG |
33 | #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000) |
34 | ||
7b476aff CK |
35 | /** |
36 | * DRM_SCHED_FENCE_DONT_PIPELINE - Prefent dependency pipelining | |
37 | * | |
38 | * Setting this flag on a scheduler fence prevents pipelining of jobs depending | |
39 | * on this fence. In other words we always insert a full CPU round trip before | |
40 | * dependen jobs are pushed to the hw queue. | |
41 | */ | |
42 | #define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS | |
43 | ||
f3823da7 RC |
44 | /** |
45 | * DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT - A fence deadline hint has been set | |
46 | * | |
47 | * Because we could have a deadline hint can be set before the backing hw | |
48 | * fence is created, we need to keep track of whether a deadline has already | |
49 | * been set. | |
50 | */ | |
51 | #define DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT (DMA_FENCE_FLAG_USER_BITS + 1) | |
52 | ||
4d5230b5 CK |
53 | enum dma_resv_usage; |
54 | struct dma_resv; | |
ebd5f742 DV |
55 | struct drm_gem_object; |
56 | ||
1b1f42d8 LS |
57 | struct drm_gpu_scheduler; |
58 | struct drm_sched_rq; | |
59 | ||
c087bbb6 MC |
60 | struct drm_file; |
61 | ||
e2d732fd LT |
62 | /* These are often used as an (initial) index |
63 | * to an array, and as such should start at 0. | |
64 | */ | |
1b1f42d8 LS |
65 | enum drm_sched_priority { |
66 | DRM_SCHED_PRIORITY_MIN, | |
1b1f42d8 | 67 | DRM_SCHED_PRIORITY_NORMAL, |
e2d732fd | 68 | DRM_SCHED_PRIORITY_HIGH, |
1b1f42d8 | 69 | DRM_SCHED_PRIORITY_KERNEL, |
e2d732fd LT |
70 | |
71 | DRM_SCHED_PRIORITY_COUNT, | |
1b1f42d8 LS |
72 | DRM_SCHED_PRIORITY_UNSET = -2 |
73 | }; | |
74 | ||
08fb97de AG |
75 | /* Used to chose between FIFO and RR jobs scheduling */ |
76 | extern int drm_sched_policy; | |
77 | ||
78 | #define DRM_SCHED_POLICY_RR 0 | |
79 | #define DRM_SCHED_POLICY_FIFO 1 | |
80 | ||
1b1f42d8 | 81 | /** |
2d33948e ND |
82 | * struct drm_sched_entity - A wrapper around a job queue (typically |
83 | * attached to the DRM file_priv). | |
84 | * | |
1a61ee07 EA |
85 | * Entities will emit jobs in order to their corresponding hardware |
86 | * ring, and the scheduler will alternate between entities based on | |
87 | * scheduling policy. | |
2d33948e | 88 | */ |
1b1f42d8 | 89 | struct drm_sched_entity { |
981b04d9 DV |
90 | /** |
91 | * @list: | |
92 | * | |
93 | * Used to append this struct to the list of entities in the runqueue | |
94 | * @rq under &drm_sched_rq.entities. | |
95 | * | |
96 | * Protected by &drm_sched_rq.lock of @rq. | |
97 | */ | |
1b1f42d8 | 98 | struct list_head list; |
981b04d9 DV |
99 | |
100 | /** | |
101 | * @rq: | |
102 | * | |
103 | * Runqueue on which this entity is currently scheduled. | |
104 | * | |
105 | * FIXME: Locking is very unclear for this. Writers are protected by | |
106 | * @rq_lock, but readers are generally lockless and seem to just race | |
107 | * with not even a READ_ONCE. | |
108 | */ | |
1b1f42d8 | 109 | struct drm_sched_rq *rq; |
981b04d9 DV |
110 | |
111 | /** | |
112 | * @sched_list: | |
113 | * | |
114 | * A list of schedulers (struct drm_gpu_scheduler). Jobs from this entity can | |
115 | * be scheduled on any scheduler on this list. | |
116 | * | |
117 | * This can be modified by calling drm_sched_entity_modify_sched(). | |
118 | * Locking is entirely up to the driver, see the above function for more | |
119 | * details. | |
120 | * | |
121 | * This will be set to NULL if &num_sched_list equals 1 and @rq has been | |
122 | * set already. | |
123 | * | |
124 | * FIXME: This means priority changes through | |
125 | * drm_sched_entity_set_priority() will be lost henceforth in this case. | |
126 | */ | |
b3ac1766 | 127 | struct drm_gpu_scheduler **sched_list; |
981b04d9 DV |
128 | |
129 | /** | |
130 | * @num_sched_list: | |
131 | * | |
132 | * Number of drm_gpu_schedulers in the @sched_list. | |
133 | */ | |
9e3e90c5 | 134 | unsigned int num_sched_list; |
981b04d9 DV |
135 | |
136 | /** | |
137 | * @priority: | |
138 | * | |
139 | * Priority of the entity. This can be modified by calling | |
140 | * drm_sched_entity_set_priority(). Protected by &rq_lock. | |
141 | */ | |
b3ac1766 | 142 | enum drm_sched_priority priority; |
981b04d9 DV |
143 | |
144 | /** | |
145 | * @rq_lock: | |
146 | * | |
147 | * Lock to modify the runqueue to which this entity belongs. | |
148 | */ | |
1b1f42d8 | 149 | spinlock_t rq_lock; |
1b1f42d8 | 150 | |
981b04d9 DV |
151 | /** |
152 | * @job_queue: the list of jobs of this entity. | |
153 | */ | |
1b1f42d8 LS |
154 | struct spsc_queue job_queue; |
155 | ||
981b04d9 DV |
156 | /** |
157 | * @fence_seq: | |
158 | * | |
159 | * A linearly increasing seqno incremented with each new | |
160 | * &drm_sched_fence which is part of the entity. | |
161 | * | |
162 | * FIXME: Callers of drm_sched_job_arm() need to ensure correct locking, | |
163 | * this doesn't need to be atomic. | |
164 | */ | |
1b1f42d8 | 165 | atomic_t fence_seq; |
981b04d9 DV |
166 | |
167 | /** | |
168 | * @fence_context: | |
169 | * | |
170 | * A unique context for all the fences which belong to this entity. The | |
171 | * &drm_sched_fence.scheduled uses the fence_context but | |
172 | * &drm_sched_fence.finished uses fence_context + 1. | |
173 | */ | |
1b1f42d8 LS |
174 | uint64_t fence_context; |
175 | ||
981b04d9 DV |
176 | /** |
177 | * @dependency: | |
178 | * | |
179 | * The dependency fence of the job which is on the top of the job queue. | |
180 | */ | |
1b1f42d8 | 181 | struct dma_fence *dependency; |
981b04d9 DV |
182 | |
183 | /** | |
184 | * @cb: | |
185 | * | |
186 | * Callback for the dependency fence above. | |
187 | */ | |
1b1f42d8 | 188 | struct dma_fence_cb cb; |
981b04d9 DV |
189 | |
190 | /** | |
191 | * @guilty: | |
192 | * | |
193 | * Points to entities' guilty. | |
194 | */ | |
2d33948e | 195 | atomic_t *guilty; |
981b04d9 DV |
196 | |
197 | /** | |
198 | * @last_scheduled: | |
199 | * | |
200 | * Points to the finished fence of the last scheduled job. Only written | |
201 | * by the scheduler thread, can be accessed locklessly from | |
202 | * drm_sched_job_arm() iff the queue is empty. | |
203 | */ | |
2d33948e | 204 | struct dma_fence *last_scheduled; |
981b04d9 DV |
205 | |
206 | /** | |
207 | * @last_user: last group leader pushing a job into the entity. | |
208 | */ | |
43bce41c | 209 | struct task_struct *last_user; |
981b04d9 DV |
210 | |
211 | /** | |
212 | * @stopped: | |
213 | * | |
214 | * Marks the enity as removed from rq and destined for | |
215 | * termination. This is set by calling drm_sched_entity_flush() and by | |
216 | * drm_sched_fini(). | |
217 | */ | |
62347a33 | 218 | bool stopped; |
981b04d9 DV |
219 | |
220 | /** | |
221 | * @entity_idle: | |
222 | * | |
223 | * Signals when entity is not in use, used to sequence entity cleanup in | |
224 | * drm_sched_entity_fini(). | |
225 | */ | |
83a7772b | 226 | struct completion entity_idle; |
08fb97de AG |
227 | |
228 | /** | |
229 | * @oldest_job_waiting: | |
230 | * | |
231 | * Marks earliest job waiting in SW queue | |
232 | */ | |
233 | ktime_t oldest_job_waiting; | |
234 | ||
235 | /** | |
236 | * @rb_tree_node: | |
237 | * | |
238 | * The node used to insert this entity into time based priority queue | |
239 | */ | |
240 | struct rb_node rb_tree_node; | |
241 | ||
1b1f42d8 LS |
242 | }; |
243 | ||
244 | /** | |
2d33948e ND |
245 | * struct drm_sched_rq - queue of entities to be scheduled. |
246 | * | |
247 | * @lock: to modify the entities list. | |
8dc9fbbf | 248 | * @sched: the scheduler to which this rq belongs to. |
2d33948e ND |
249 | * @entities: list of the entities to be scheduled. |
250 | * @current_entity: the entity which is to be scheduled. | |
08fb97de | 251 | * @rb_tree_root: root of time based priory queue of entities for FIFO scheduling |
2d33948e | 252 | * |
1b1f42d8 LS |
253 | * Run queue is a set of entities scheduling command submissions for |
254 | * one specific ring. It implements the scheduling policy that selects | |
255 | * the next entity to emit commands from. | |
2d33948e | 256 | */ |
1b1f42d8 LS |
257 | struct drm_sched_rq { |
258 | spinlock_t lock; | |
8dc9fbbf | 259 | struct drm_gpu_scheduler *sched; |
1b1f42d8 LS |
260 | struct list_head entities; |
261 | struct drm_sched_entity *current_entity; | |
08fb97de | 262 | struct rb_root_cached rb_tree_root; |
1b1f42d8 LS |
263 | }; |
264 | ||
2d33948e ND |
265 | /** |
266 | * struct drm_sched_fence - fences corresponding to the scheduling of a job. | |
267 | */ | |
1b1f42d8 | 268 | struct drm_sched_fence { |
2d33948e ND |
269 | /** |
270 | * @scheduled: this fence is what will be signaled by the scheduler | |
271 | * when the job is scheduled. | |
272 | */ | |
1b1f42d8 | 273 | struct dma_fence scheduled; |
1a61ee07 | 274 | |
2d33948e ND |
275 | /** |
276 | * @finished: this fence is what will be signaled by the scheduler | |
277 | * when the job is completed. | |
278 | * | |
279 | * When setting up an out fence for the job, you should use | |
280 | * this, since it's available immediately upon | |
281 | * drm_sched_job_init(), and the fence returned by the driver | |
282 | * from run_job() won't be created until the dependencies have | |
283 | * resolved. | |
284 | */ | |
1b1f42d8 | 285 | struct dma_fence finished; |
1a61ee07 | 286 | |
f3823da7 RC |
287 | /** |
288 | * @deadline: deadline set on &drm_sched_fence.finished which | |
289 | * potentially needs to be propagated to &drm_sched_fence.parent | |
290 | */ | |
291 | ktime_t deadline; | |
292 | ||
2d33948e ND |
293 | /** |
294 | * @parent: the fence returned by &drm_sched_backend_ops.run_job | |
295 | * when scheduling the job on hardware. We signal the | |
296 | * &drm_sched_fence.finished fence once parent is signalled. | |
297 | */ | |
1b1f42d8 | 298 | struct dma_fence *parent; |
2d33948e ND |
299 | /** |
300 | * @sched: the scheduler instance to which the job having this struct | |
301 | * belongs to. | |
302 | */ | |
1b1f42d8 | 303 | struct drm_gpu_scheduler *sched; |
2d33948e ND |
304 | /** |
305 | * @lock: the lock used by the scheduled and the finished fences. | |
306 | */ | |
1b1f42d8 | 307 | spinlock_t lock; |
2d33948e ND |
308 | /** |
309 | * @owner: job owner for debugging | |
310 | */ | |
1b1f42d8 LS |
311 | void *owner; |
312 | }; | |
313 | ||
314 | struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); | |
315 | ||
1a61ee07 | 316 | /** |
2d33948e ND |
317 | * struct drm_sched_job - A job to be run by an entity. |
318 | * | |
319 | * @queue_node: used to append this struct to the queue of jobs in an entity. | |
c365d304 | 320 | * @list: a job participates in a "pending" and "done" lists. |
2d33948e ND |
321 | * @sched: the scheduler instance on which this job is scheduled. |
322 | * @s_fence: contains the fences for the scheduling of job. | |
323 | * @finish_cb: the callback for the finished fence. | |
e795df5b | 324 | * @work: Helper to reschdeule job kill to different context. |
2d33948e ND |
325 | * @id: a unique id assigned to each job scheduled on the scheduler. |
326 | * @karma: increment on every hang caused by this job. If this exceeds the hang | |
327 | * limit of the scheduler then the job is marked guilty and will not | |
328 | * be scheduled further. | |
329 | * @s_priority: the priority of the job. | |
330 | * @entity: the entity to which this job belongs. | |
3741540e | 331 | * @cb: the callback for the parent fence in s_fence. |
1a61ee07 EA |
332 | * |
333 | * A job is created by the driver using drm_sched_job_init(), and | |
334 | * should call drm_sched_entity_push_job() once it wants the scheduler | |
335 | * to schedule the job. | |
336 | */ | |
1b1f42d8 LS |
337 | struct drm_sched_job { |
338 | struct spsc_node queue_node; | |
8935ff00 | 339 | struct list_head list; |
1b1f42d8 LS |
340 | struct drm_gpu_scheduler *sched; |
341 | struct drm_sched_fence *s_fence; | |
542cff78 AG |
342 | |
343 | /* | |
344 | * work is used only after finish_cb has been used and will not be | |
345 | * accessed anymore. | |
346 | */ | |
347 | union { | |
348 | struct dma_fence_cb finish_cb; | |
a82f30b0 | 349 | struct work_struct work; |
542cff78 AG |
350 | }; |
351 | ||
1b1f42d8 LS |
352 | uint64_t id; |
353 | atomic_t karma; | |
354 | enum drm_sched_priority s_priority; | |
8935ff00 | 355 | struct drm_sched_entity *entity; |
3741540e | 356 | struct dma_fence_cb cb; |
ebd5f742 DV |
357 | /** |
358 | * @dependencies: | |
359 | * | |
360 | * Contains the dependencies as struct dma_fence for this job, see | |
361 | * drm_sched_job_add_dependency() and | |
362 | * drm_sched_job_add_implicit_dependencies(). | |
363 | */ | |
364 | struct xarray dependencies; | |
365 | ||
366 | /** @last_dependency: tracks @dependencies as they signal */ | |
367 | unsigned long last_dependency; | |
08fb97de AG |
368 | |
369 | /** | |
370 | * @submit_ts: | |
371 | * | |
372 | * When the job was pushed into the entity queue. | |
373 | */ | |
374 | ktime_t submit_ts; | |
1b1f42d8 LS |
375 | }; |
376 | ||
377 | static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job, | |
378 | int threshold) | |
379 | { | |
6efa4b46 | 380 | return s_job && atomic_inc_return(&s_job->karma) > threshold; |
1b1f42d8 LS |
381 | } |
382 | ||
a6a1f036 LT |
383 | enum drm_gpu_sched_stat { |
384 | DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */ | |
385 | DRM_GPU_SCHED_STAT_NOMINAL, | |
386 | DRM_GPU_SCHED_STAT_ENODEV, | |
387 | }; | |
388 | ||
1b1f42d8 | 389 | /** |
f8ad757e RD |
390 | * struct drm_sched_backend_ops - Define the backend operations |
391 | * called by the scheduler | |
2d33948e | 392 | * |
f8ad757e | 393 | * These functions should be implemented in the driver side. |
2d33948e | 394 | */ |
1b1f42d8 | 395 | struct drm_sched_backend_ops { |
2d33948e | 396 | /** |
a82f30b0 | 397 | * @prepare_job: |
ebd5f742 DV |
398 | * |
399 | * Called when the scheduler is considering scheduling this job next, to | |
400 | * get another struct dma_fence for this job to block on. Once it | |
401 | * returns NULL, run_job() may be called. | |
402 | * | |
a82f30b0 CK |
403 | * Can be NULL if no additional preparation to the dependencies are |
404 | * necessary. Skipped when jobs are killed instead of run. | |
1a61ee07 | 405 | */ |
a82f30b0 CK |
406 | struct dma_fence *(*prepare_job)(struct drm_sched_job *sched_job, |
407 | struct drm_sched_entity *s_entity); | |
1a61ee07 | 408 | |
2d33948e ND |
409 | /** |
410 | * @run_job: Called to execute the job once all of the dependencies | |
411 | * have been resolved. This may be called multiple times, if | |
1a61ee07 EA |
412 | * timedout_job() has happened and drm_sched_job_recovery() |
413 | * decides to try it again. | |
414 | */ | |
1b1f42d8 | 415 | struct dma_fence *(*run_job)(struct drm_sched_job *sched_job); |
1a61ee07 | 416 | |
2d33948e | 417 | /** |
a6a1f036 LT |
418 | * @timedout_job: Called when a job has taken too long to execute, |
419 | * to trigger GPU recovery. | |
420 | * | |
1fad1b7e BB |
421 | * This method is called in a workqueue context. |
422 | * | |
423 | * Drivers typically issue a reset to recover from GPU hangs, and this | |
424 | * procedure usually follows the following workflow: | |
425 | * | |
426 | * 1. Stop the scheduler using drm_sched_stop(). This will park the | |
427 | * scheduler thread and cancel the timeout work, guaranteeing that | |
428 | * nothing is queued while we reset the hardware queue | |
429 | * 2. Try to gracefully stop non-faulty jobs (optional) | |
430 | * 3. Issue a GPU reset (driver-specific) | |
431 | * 4. Re-submit jobs using drm_sched_resubmit_jobs() | |
432 | * 5. Restart the scheduler using drm_sched_start(). At that point, new | |
433 | * jobs can be queued, and the scheduler thread is unblocked | |
434 | * | |
78efe21b BB |
435 | * Note that some GPUs have distinct hardware queues but need to reset |
436 | * the GPU globally, which requires extra synchronization between the | |
437 | * timeout handler of the different &drm_gpu_scheduler. One way to | |
438 | * achieve this synchronization is to create an ordered workqueue | |
439 | * (using alloc_ordered_workqueue()) at the driver level, and pass this | |
440 | * queue to drm_sched_init(), to guarantee that timeout handlers are | |
441 | * executed sequentially. The above workflow needs to be slightly | |
442 | * adjusted in that case: | |
443 | * | |
444 | * 1. Stop all schedulers impacted by the reset using drm_sched_stop() | |
445 | * 2. Try to gracefully stop non-faulty jobs on all queues impacted by | |
446 | * the reset (optional) | |
447 | * 3. Issue a GPU reset on all faulty queues (driver-specific) | |
448 | * 4. Re-submit jobs on all schedulers impacted by the reset using | |
449 | * drm_sched_resubmit_jobs() | |
450 | * 5. Restart all schedulers that were stopped in step #1 using | |
451 | * drm_sched_start() | |
452 | * | |
a6a1f036 LT |
453 | * Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal, |
454 | * and the underlying driver has started or completed recovery. | |
455 | * | |
456 | * Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer | |
457 | * available, i.e. has been unplugged. | |
1a61ee07 | 458 | */ |
a6a1f036 | 459 | enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job); |
1a61ee07 | 460 | |
2d33948e ND |
461 | /** |
462 | * @free_job: Called once the job's finished fence has been signaled | |
463 | * and it's time to clean it up. | |
1a61ee07 | 464 | */ |
1b1f42d8 LS |
465 | void (*free_job)(struct drm_sched_job *sched_job); |
466 | }; | |
467 | ||
468 | /** | |
f8ad757e | 469 | * struct drm_gpu_scheduler - scheduler instance-specific data |
2d33948e ND |
470 | * |
471 | * @ops: backend operations provided by the driver. | |
472 | * @hw_submission_limit: the max size of the hardware queue. | |
473 | * @timeout: the time after which a job is removed from the scheduler. | |
474 | * @name: name of the ring for which this scheduler is being used. | |
475 | * @sched_rq: priority wise array of run queues. | |
476 | * @wake_up_worker: the wait queue on which the scheduler sleeps until a job | |
477 | * is ready to be scheduled. | |
478 | * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler | |
479 | * waits on this wait queue until all the scheduled jobs are | |
480 | * finished. | |
481 | * @hw_rq_count: the number of jobs currently in the hardware queue. | |
482 | * @job_id_count: used to assign unique id to the each job. | |
78efe21b | 483 | * @timeout_wq: workqueue used to queue @work_tdr |
6a962430 ND |
484 | * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the |
485 | * timeout interval is over. | |
2d33948e | 486 | * @thread: the kthread on which the scheduler which run. |
6efa4b46 LT |
487 | * @pending_list: the list of jobs which are currently in the job queue. |
488 | * @job_list_lock: lock to protect the pending_list. | |
2d33948e | 489 | * @hang_limit: once the hangs by a job crosses this limit then it is marked |
95b2151f | 490 | * guilty and it will no longer be considered for scheduling. |
d41a39dd | 491 | * @score: score to help loadbalancer pick a idle sched |
be318fd8 | 492 | * @_score: score used when the driver doesn't provide one |
faf6e1a8 | 493 | * @ready: marks if the underlying HW is ready to work |
a5343b8a | 494 | * @free_guilty: A hit to time out handler to free the guilty job. |
f8ad757e | 495 | * @dev: system &struct device |
2d33948e ND |
496 | * |
497 | * One scheduler is implemented for each hardware ring. | |
498 | */ | |
1b1f42d8 LS |
499 | struct drm_gpu_scheduler { |
500 | const struct drm_sched_backend_ops *ops; | |
501 | uint32_t hw_submission_limit; | |
502 | long timeout; | |
503 | const char *name; | |
e2d732fd | 504 | struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_COUNT]; |
1b1f42d8 LS |
505 | wait_queue_head_t wake_up_worker; |
506 | wait_queue_head_t job_scheduled; | |
507 | atomic_t hw_rq_count; | |
508 | atomic64_t job_id_count; | |
78efe21b | 509 | struct workqueue_struct *timeout_wq; |
6a962430 | 510 | struct delayed_work work_tdr; |
1b1f42d8 | 511 | struct task_struct *thread; |
6efa4b46 | 512 | struct list_head pending_list; |
1b1f42d8 LS |
513 | spinlock_t job_list_lock; |
514 | int hang_limit; | |
f2f12eb9 CK |
515 | atomic_t *score; |
516 | atomic_t _score; | |
d41a39dd | 517 | bool ready; |
a5343b8a | 518 | bool free_guilty; |
8ab62eda | 519 | struct device *dev; |
1b1f42d8 LS |
520 | }; |
521 | ||
522 | int drm_sched_init(struct drm_gpu_scheduler *sched, | |
523 | const struct drm_sched_backend_ops *ops, | |
78efe21b BB |
524 | uint32_t hw_submission, unsigned hang_limit, |
525 | long timeout, struct workqueue_struct *timeout_wq, | |
8ab62eda | 526 | atomic_t *score, const char *name, struct device *dev); |
faf6e1a8 | 527 | |
1b1f42d8 | 528 | void drm_sched_fini(struct drm_gpu_scheduler *sched); |
620e762f CK |
529 | int drm_sched_job_init(struct drm_sched_job *job, |
530 | struct drm_sched_entity *entity, | |
531 | void *owner); | |
dbe48d03 | 532 | void drm_sched_job_arm(struct drm_sched_job *job); |
ebd5f742 DV |
533 | int drm_sched_job_add_dependency(struct drm_sched_job *job, |
534 | struct dma_fence *fence); | |
c087bbb6 MC |
535 | int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job, |
536 | struct drm_file *file, | |
537 | u32 handle, | |
538 | u32 point); | |
4d5230b5 CK |
539 | int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job, |
540 | struct dma_resv *resv, | |
541 | enum dma_resv_usage usage); | |
ebd5f742 DV |
542 | int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job, |
543 | struct drm_gem_object *obj, | |
544 | bool write); | |
545 | ||
546 | ||
b37aced3 ND |
547 | void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, |
548 | struct drm_gpu_scheduler **sched_list, | |
549 | unsigned int num_sched_list); | |
550 | ||
26efecf9 | 551 | void drm_sched_job_cleanup(struct drm_sched_job *job); |
620e762f | 552 | void drm_sched_wakeup(struct drm_gpu_scheduler *sched); |
5918045c | 553 | void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad); |
222b5f04 AG |
554 | void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery); |
555 | void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched); | |
556 | void drm_sched_increase_karma(struct drm_sched_job *bad); | |
e6c6338f JZ |
557 | void drm_sched_reset_karma(struct drm_sched_job *bad); |
558 | void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type); | |
620e762f CK |
559 | bool drm_sched_dependency_optimized(struct dma_fence* fence, |
560 | struct drm_sched_entity *entity); | |
8fe159b0 | 561 | void drm_sched_fault(struct drm_gpu_scheduler *sched); |
620e762f CK |
562 | |
563 | void drm_sched_rq_add_entity(struct drm_sched_rq *rq, | |
564 | struct drm_sched_entity *entity); | |
565 | void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, | |
566 | struct drm_sched_entity *entity); | |
1b1f42d8 | 567 | |
08fb97de AG |
568 | void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts); |
569 | ||
aa16b6c6 | 570 | int drm_sched_entity_init(struct drm_sched_entity *entity, |
b3ac1766 ND |
571 | enum drm_sched_priority priority, |
572 | struct drm_gpu_scheduler **sched_list, | |
9e3e90c5 | 573 | unsigned int num_sched_list, |
8344c53f | 574 | atomic_t *guilty); |
cdc50176 ND |
575 | long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout); |
576 | void drm_sched_entity_fini(struct drm_sched_entity *entity); | |
577 | void drm_sched_entity_destroy(struct drm_sched_entity *entity); | |
620e762f CK |
578 | void drm_sched_entity_select_rq(struct drm_sched_entity *entity); |
579 | struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity); | |
0e10e9a1 | 580 | void drm_sched_entity_push_job(struct drm_sched_job *sched_job); |
7febe4bf CK |
581 | void drm_sched_entity_set_priority(struct drm_sched_entity *entity, |
582 | enum drm_sched_priority priority); | |
620e762f CK |
583 | bool drm_sched_entity_is_ready(struct drm_sched_entity *entity); |
584 | ||
f3823da7 RC |
585 | void drm_sched_fence_set_parent(struct drm_sched_fence *s_fence, |
586 | struct dma_fence *fence); | |
dbe48d03 | 587 | struct drm_sched_fence *drm_sched_fence_alloc( |
1b1f42d8 | 588 | struct drm_sched_entity *s_entity, void *owner); |
dbe48d03 DV |
589 | void drm_sched_fence_init(struct drm_sched_fence *fence, |
590 | struct drm_sched_entity *entity); | |
d4c16733 | 591 | void drm_sched_fence_free(struct drm_sched_fence *fence); |
dbe48d03 | 592 | |
1b1f42d8 LS |
593 | void drm_sched_fence_scheduled(struct drm_sched_fence *fence); |
594 | void drm_sched_fence_finished(struct drm_sched_fence *fence); | |
1b1f42d8 | 595 | |
1db8c142 SM |
596 | unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched); |
597 | void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, | |
598 | unsigned long remaining); | |
ec2edcc2 ND |
599 | struct drm_gpu_scheduler * |
600 | drm_sched_pick_best(struct drm_gpu_scheduler **sched_list, | |
601 | unsigned int num_sched_list); | |
1db8c142 | 602 | |
1b1f42d8 | 603 | #endif |