Commit | Line | Data |
---|---|---|
1b1f42d8 LS |
1 | /* |
2 | * Copyright 2015 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | */ | |
23 | ||
24 | #ifndef _DRM_GPU_SCHEDULER_H_ | |
25 | #define _DRM_GPU_SCHEDULER_H_ | |
26 | ||
27 | #include <drm/spsc_queue.h> | |
28 | #include <linux/dma-fence.h> | |
29 | ||
741f01e6 AG |
30 | #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000) |
31 | ||
1b1f42d8 LS |
32 | struct drm_gpu_scheduler; |
33 | struct drm_sched_rq; | |
34 | ||
35 | enum drm_sched_priority { | |
36 | DRM_SCHED_PRIORITY_MIN, | |
37 | DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN, | |
38 | DRM_SCHED_PRIORITY_NORMAL, | |
39 | DRM_SCHED_PRIORITY_HIGH_SW, | |
40 | DRM_SCHED_PRIORITY_HIGH_HW, | |
41 | DRM_SCHED_PRIORITY_KERNEL, | |
42 | DRM_SCHED_PRIORITY_MAX, | |
43 | DRM_SCHED_PRIORITY_INVALID = -1, | |
44 | DRM_SCHED_PRIORITY_UNSET = -2 | |
45 | }; | |
46 | ||
47 | /** | |
2d33948e ND |
48 | * struct drm_sched_entity - A wrapper around a job queue (typically |
49 | * attached to the DRM file_priv). | |
50 | * | |
51 | * @list: used to append this struct to the list of entities in the | |
52 | * runqueue. | |
ac0a6cf1 ND |
53 | * @rq: runqueue on which this entity is currently scheduled. |
54 | * @rq_list: a list of run queues on which jobs from this entity can | |
55 | * be scheduled | |
56 | * @num_rq_list: number of run queues in the rq_list | |
2d33948e | 57 | * @rq_lock: lock to modify the runqueue to which this entity belongs. |
2d33948e ND |
58 | * @job_queue: the list of jobs of this entity. |
59 | * @fence_seq: a linearly increasing seqno incremented with each | |
60 | * new &drm_sched_fence which is part of the entity. | |
61 | * @fence_context: a unique context for all the fences which belong | |
62 | * to this entity. | |
63 | * The &drm_sched_fence.scheduled uses the | |
64 | * fence_context but &drm_sched_fence.finished uses | |
65 | * fence_context + 1. | |
66 | * @dependency: the dependency fence of the job which is on the top | |
67 | * of the job queue. | |
68 | * @cb: callback for the dependency fence above. | |
69 | * @guilty: points to ctx's guilty. | |
70 | * @fini_status: contains the exit status in case the process was signalled. | |
71 | * @last_scheduled: points to the finished fence of the last scheduled job. | |
43bce41c | 72 | * @last_user: last group leader pushing a job into the entity. |
62347a33 | 73 | * @stopped: Marks the enity as removed from rq and destined for termination. |
1a61ee07 EA |
74 | * |
75 | * Entities will emit jobs in order to their corresponding hardware | |
76 | * ring, and the scheduler will alternate between entities based on | |
77 | * scheduling policy. | |
2d33948e | 78 | */ |
1b1f42d8 LS |
79 | struct drm_sched_entity { |
80 | struct list_head list; | |
81 | struct drm_sched_rq *rq; | |
ac0a6cf1 ND |
82 | struct drm_sched_rq **rq_list; |
83 | unsigned int num_rq_list; | |
1b1f42d8 | 84 | spinlock_t rq_lock; |
1b1f42d8 | 85 | |
1b1f42d8 LS |
86 | struct spsc_queue job_queue; |
87 | ||
88 | atomic_t fence_seq; | |
89 | uint64_t fence_context; | |
90 | ||
91 | struct dma_fence *dependency; | |
92 | struct dma_fence_cb cb; | |
2d33948e | 93 | atomic_t *guilty; |
2d33948e | 94 | struct dma_fence *last_scheduled; |
43bce41c | 95 | struct task_struct *last_user; |
62347a33 | 96 | bool stopped; |
1b1f42d8 LS |
97 | }; |
98 | ||
99 | /** | |
2d33948e ND |
100 | * struct drm_sched_rq - queue of entities to be scheduled. |
101 | * | |
102 | * @lock: to modify the entities list. | |
8dc9fbbf | 103 | * @sched: the scheduler to which this rq belongs to. |
2d33948e ND |
104 | * @entities: list of the entities to be scheduled. |
105 | * @current_entity: the entity which is to be scheduled. | |
106 | * | |
1b1f42d8 LS |
107 | * Run queue is a set of entities scheduling command submissions for |
108 | * one specific ring. It implements the scheduling policy that selects | |
109 | * the next entity to emit commands from. | |
2d33948e | 110 | */ |
1b1f42d8 LS |
111 | struct drm_sched_rq { |
112 | spinlock_t lock; | |
8dc9fbbf | 113 | struct drm_gpu_scheduler *sched; |
1b1f42d8 LS |
114 | struct list_head entities; |
115 | struct drm_sched_entity *current_entity; | |
116 | }; | |
117 | ||
2d33948e ND |
118 | /** |
119 | * struct drm_sched_fence - fences corresponding to the scheduling of a job. | |
120 | */ | |
1b1f42d8 | 121 | struct drm_sched_fence { |
2d33948e ND |
122 | /** |
123 | * @scheduled: this fence is what will be signaled by the scheduler | |
124 | * when the job is scheduled. | |
125 | */ | |
1b1f42d8 | 126 | struct dma_fence scheduled; |
1a61ee07 | 127 | |
2d33948e ND |
128 | /** |
129 | * @finished: this fence is what will be signaled by the scheduler | |
130 | * when the job is completed. | |
131 | * | |
132 | * When setting up an out fence for the job, you should use | |
133 | * this, since it's available immediately upon | |
134 | * drm_sched_job_init(), and the fence returned by the driver | |
135 | * from run_job() won't be created until the dependencies have | |
136 | * resolved. | |
137 | */ | |
1b1f42d8 | 138 | struct dma_fence finished; |
1a61ee07 | 139 | |
2d33948e ND |
140 | /** |
141 | * @cb: the callback for the parent fence below. | |
142 | */ | |
1b1f42d8 | 143 | struct dma_fence_cb cb; |
2d33948e ND |
144 | /** |
145 | * @parent: the fence returned by &drm_sched_backend_ops.run_job | |
146 | * when scheduling the job on hardware. We signal the | |
147 | * &drm_sched_fence.finished fence once parent is signalled. | |
148 | */ | |
1b1f42d8 | 149 | struct dma_fence *parent; |
2d33948e ND |
150 | /** |
151 | * @sched: the scheduler instance to which the job having this struct | |
152 | * belongs to. | |
153 | */ | |
1b1f42d8 | 154 | struct drm_gpu_scheduler *sched; |
2d33948e ND |
155 | /** |
156 | * @lock: the lock used by the scheduled and the finished fences. | |
157 | */ | |
1b1f42d8 | 158 | spinlock_t lock; |
2d33948e ND |
159 | /** |
160 | * @owner: job owner for debugging | |
161 | */ | |
1b1f42d8 LS |
162 | void *owner; |
163 | }; | |
164 | ||
165 | struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); | |
166 | ||
1a61ee07 | 167 | /** |
2d33948e ND |
168 | * struct drm_sched_job - A job to be run by an entity. |
169 | * | |
170 | * @queue_node: used to append this struct to the queue of jobs in an entity. | |
171 | * @sched: the scheduler instance on which this job is scheduled. | |
172 | * @s_fence: contains the fences for the scheduling of job. | |
173 | * @finish_cb: the callback for the finished fence. | |
174 | * @finish_work: schedules the function @drm_sched_job_finish once the job has | |
175 | * finished to remove the job from the | |
176 | * @drm_gpu_scheduler.ring_mirror_list. | |
177 | * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list. | |
2d33948e ND |
178 | * @id: a unique id assigned to each job scheduled on the scheduler. |
179 | * @karma: increment on every hang caused by this job. If this exceeds the hang | |
180 | * limit of the scheduler then the job is marked guilty and will not | |
181 | * be scheduled further. | |
182 | * @s_priority: the priority of the job. | |
183 | * @entity: the entity to which this job belongs. | |
1a61ee07 EA |
184 | * |
185 | * A job is created by the driver using drm_sched_job_init(), and | |
186 | * should call drm_sched_entity_push_job() once it wants the scheduler | |
187 | * to schedule the job. | |
188 | */ | |
1b1f42d8 LS |
189 | struct drm_sched_job { |
190 | struct spsc_node queue_node; | |
191 | struct drm_gpu_scheduler *sched; | |
192 | struct drm_sched_fence *s_fence; | |
193 | struct dma_fence_cb finish_cb; | |
194 | struct work_struct finish_work; | |
195 | struct list_head node; | |
1b1f42d8 LS |
196 | uint64_t id; |
197 | atomic_t karma; | |
198 | enum drm_sched_priority s_priority; | |
8ee3a52e | 199 | struct drm_sched_entity *entity; |
1b1f42d8 LS |
200 | }; |
201 | ||
202 | static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job, | |
203 | int threshold) | |
204 | { | |
205 | return (s_job && atomic_inc_return(&s_job->karma) > threshold); | |
206 | } | |
207 | ||
208 | /** | |
2d33948e ND |
209 | * struct drm_sched_backend_ops |
210 | * | |
1b1f42d8 | 211 | * Define the backend operations called by the scheduler, |
2d33948e ND |
212 | * these functions should be implemented in driver side. |
213 | */ | |
1b1f42d8 | 214 | struct drm_sched_backend_ops { |
2d33948e ND |
215 | /** |
216 | * @dependency: Called when the scheduler is considering scheduling | |
217 | * this job next, to get another struct dma_fence for this job to | |
1a61ee07 EA |
218 | * block on. Once it returns NULL, run_job() may be called. |
219 | */ | |
1b1f42d8 LS |
220 | struct dma_fence *(*dependency)(struct drm_sched_job *sched_job, |
221 | struct drm_sched_entity *s_entity); | |
1a61ee07 | 222 | |
2d33948e ND |
223 | /** |
224 | * @run_job: Called to execute the job once all of the dependencies | |
225 | * have been resolved. This may be called multiple times, if | |
1a61ee07 EA |
226 | * timedout_job() has happened and drm_sched_job_recovery() |
227 | * decides to try it again. | |
228 | */ | |
1b1f42d8 | 229 | struct dma_fence *(*run_job)(struct drm_sched_job *sched_job); |
1a61ee07 | 230 | |
2d33948e ND |
231 | /** |
232 | * @timedout_job: Called when a job has taken too long to execute, | |
233 | * to trigger GPU recovery. | |
1a61ee07 | 234 | */ |
1b1f42d8 | 235 | void (*timedout_job)(struct drm_sched_job *sched_job); |
1a61ee07 | 236 | |
2d33948e ND |
237 | /** |
238 | * @free_job: Called once the job's finished fence has been signaled | |
239 | * and it's time to clean it up. | |
1a61ee07 | 240 | */ |
1b1f42d8 LS |
241 | void (*free_job)(struct drm_sched_job *sched_job); |
242 | }; | |
243 | ||
244 | /** | |
2d33948e ND |
245 | * struct drm_gpu_scheduler |
246 | * | |
247 | * @ops: backend operations provided by the driver. | |
248 | * @hw_submission_limit: the max size of the hardware queue. | |
249 | * @timeout: the time after which a job is removed from the scheduler. | |
250 | * @name: name of the ring for which this scheduler is being used. | |
251 | * @sched_rq: priority wise array of run queues. | |
252 | * @wake_up_worker: the wait queue on which the scheduler sleeps until a job | |
253 | * is ready to be scheduled. | |
254 | * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler | |
255 | * waits on this wait queue until all the scheduled jobs are | |
256 | * finished. | |
257 | * @hw_rq_count: the number of jobs currently in the hardware queue. | |
258 | * @job_id_count: used to assign unique id to the each job. | |
6a962430 ND |
259 | * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the |
260 | * timeout interval is over. | |
2d33948e ND |
261 | * @thread: the kthread on which the scheduler which run. |
262 | * @ring_mirror_list: the list of jobs which are currently in the job queue. | |
263 | * @job_list_lock: lock to protect the ring_mirror_list. | |
264 | * @hang_limit: once the hangs by a job crosses this limit then it is marked | |
265 | * guilty and it will be considered for scheduling further. | |
249a07c0 | 266 | * @num_jobs: the number of jobs in queue in the scheduler |
2d33948e ND |
267 | * |
268 | * One scheduler is implemented for each hardware ring. | |
269 | */ | |
1b1f42d8 LS |
270 | struct drm_gpu_scheduler { |
271 | const struct drm_sched_backend_ops *ops; | |
272 | uint32_t hw_submission_limit; | |
273 | long timeout; | |
274 | const char *name; | |
275 | struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_MAX]; | |
276 | wait_queue_head_t wake_up_worker; | |
277 | wait_queue_head_t job_scheduled; | |
278 | atomic_t hw_rq_count; | |
279 | atomic64_t job_id_count; | |
6a962430 | 280 | struct delayed_work work_tdr; |
1b1f42d8 LS |
281 | struct task_struct *thread; |
282 | struct list_head ring_mirror_list; | |
283 | spinlock_t job_list_lock; | |
284 | int hang_limit; | |
249a07c0 | 285 | atomic_t num_jobs; |
1b1f42d8 LS |
286 | }; |
287 | ||
288 | int drm_sched_init(struct drm_gpu_scheduler *sched, | |
289 | const struct drm_sched_backend_ops *ops, | |
290 | uint32_t hw_submission, unsigned hang_limit, long timeout, | |
291 | const char *name); | |
292 | void drm_sched_fini(struct drm_gpu_scheduler *sched); | |
620e762f CK |
293 | int drm_sched_job_init(struct drm_sched_job *job, |
294 | struct drm_sched_entity *entity, | |
295 | void *owner); | |
296 | void drm_sched_wakeup(struct drm_gpu_scheduler *sched); | |
297 | void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, | |
298 | struct drm_sched_job *job); | |
299 | void drm_sched_job_recovery(struct drm_gpu_scheduler *sched); | |
300 | bool drm_sched_dependency_optimized(struct dma_fence* fence, | |
301 | struct drm_sched_entity *entity); | |
8fe159b0 | 302 | void drm_sched_fault(struct drm_gpu_scheduler *sched); |
620e762f CK |
303 | void drm_sched_job_kickout(struct drm_sched_job *s_job); |
304 | ||
305 | void drm_sched_rq_add_entity(struct drm_sched_rq *rq, | |
306 | struct drm_sched_entity *entity); | |
307 | void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, | |
308 | struct drm_sched_entity *entity); | |
1b1f42d8 | 309 | |
aa16b6c6 ND |
310 | int drm_sched_entity_init(struct drm_sched_entity *entity, |
311 | struct drm_sched_rq **rq_list, | |
312 | unsigned int num_rq_list, | |
8344c53f | 313 | atomic_t *guilty); |
cdc50176 ND |
314 | long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout); |
315 | void drm_sched_entity_fini(struct drm_sched_entity *entity); | |
316 | void drm_sched_entity_destroy(struct drm_sched_entity *entity); | |
620e762f CK |
317 | void drm_sched_entity_select_rq(struct drm_sched_entity *entity); |
318 | struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity); | |
1b1f42d8 LS |
319 | void drm_sched_entity_push_job(struct drm_sched_job *sched_job, |
320 | struct drm_sched_entity *entity); | |
7febe4bf CK |
321 | void drm_sched_entity_set_priority(struct drm_sched_entity *entity, |
322 | enum drm_sched_priority priority); | |
620e762f CK |
323 | bool drm_sched_entity_is_ready(struct drm_sched_entity *entity); |
324 | ||
1b1f42d8 LS |
325 | struct drm_sched_fence *drm_sched_fence_create( |
326 | struct drm_sched_entity *s_entity, void *owner); | |
327 | void drm_sched_fence_scheduled(struct drm_sched_fence *fence); | |
328 | void drm_sched_fence_finished(struct drm_sched_fence *fence); | |
1b1f42d8 LS |
329 | |
330 | #endif |