Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2015 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * Authors: monk liu <monk.liu@amd.com> | |
23 | */ | |
24 | ||
25 | #include <drm/drmP.h> | |
c2636dc5 | 26 | #include <drm/drm_auth.h> |
d38ceaf9 | 27 | #include "amdgpu.h" |
52c6a62c | 28 | #include "amdgpu_sched.h" |
d38ceaf9 | 29 | |
c2636dc5 | 30 | static int amdgpu_ctx_priority_permit(struct drm_file *filp, |
1b1f42d8 | 31 | enum drm_sched_priority priority) |
c2636dc5 AR |
32 | { |
33 | /* NORMAL and below are accessible by everyone */ | |
1b1f42d8 | 34 | if (priority <= DRM_SCHED_PRIORITY_NORMAL) |
c2636dc5 AR |
35 | return 0; |
36 | ||
37 | if (capable(CAP_SYS_NICE)) | |
38 | return 0; | |
39 | ||
40 | if (drm_is_current_master(filp)) | |
41 | return 0; | |
42 | ||
43 | return -EACCES; | |
44 | } | |
45 | ||
46 | static int amdgpu_ctx_init(struct amdgpu_device *adev, | |
1b1f42d8 | 47 | enum drm_sched_priority priority, |
c2636dc5 AR |
48 | struct drm_file *filp, |
49 | struct amdgpu_ctx *ctx) | |
d38ceaf9 | 50 | { |
21c16bf6 | 51 | unsigned i, j; |
47f38501 | 52 | int r; |
d38ceaf9 | 53 | |
1b1f42d8 | 54 | if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX) |
c2636dc5 AR |
55 | return -EINVAL; |
56 | ||
57 | r = amdgpu_ctx_priority_permit(filp, priority); | |
58 | if (r) | |
59 | return r; | |
60 | ||
23ca0e4e CZ |
61 | memset(ctx, 0, sizeof(*ctx)); |
62 | ctx->adev = adev; | |
63 | kref_init(&ctx->refcount); | |
64 | spin_lock_init(&ctx->ring_lock); | |
a750b47e | 65 | ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS, |
f54d1867 | 66 | sizeof(struct dma_fence*), GFP_KERNEL); |
37cd0ca2 CZ |
67 | if (!ctx->fences) |
68 | return -ENOMEM; | |
d38ceaf9 | 69 | |
0ae94444 AG |
70 | mutex_init(&ctx->lock); |
71 | ||
37cd0ca2 CZ |
72 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { |
73 | ctx->rings[i].sequence = 1; | |
a750b47e | 74 | ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i]; |
37cd0ca2 | 75 | } |
ce199ad6 NH |
76 | |
77 | ctx->reset_counter = atomic_read(&adev->gpu_reset_counter); | |
668ca1b4 | 78 | ctx->reset_counter_query = ctx->reset_counter; |
e55f2b64 | 79 | ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter); |
c23be4ae | 80 | ctx->init_priority = priority; |
1b1f42d8 | 81 | ctx->override_priority = DRM_SCHED_PRIORITY_UNSET; |
ce199ad6 | 82 | |
cadf97b1 CZ |
83 | /* create context entity for each ring */ |
84 | for (i = 0; i < adev->num_rings; i++) { | |
20874179 | 85 | struct amdgpu_ring *ring = adev->rings[i]; |
1b1f42d8 | 86 | struct drm_sched_rq *rq; |
20874179 | 87 | |
c2636dc5 | 88 | rq = &ring->sched.sched_rq[priority]; |
75fbed20 ML |
89 | |
90 | if (ring == &adev->gfx.kiq.ring) | |
91 | continue; | |
92 | ||
1b1f42d8 | 93 | r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity, |
1102900d | 94 | rq, amdgpu_sched_jobs, &ctx->guilty); |
cadf97b1 | 95 | if (r) |
8ed8147a | 96 | goto failed; |
cadf97b1 CZ |
97 | } |
98 | ||
effd924d AR |
99 | r = amdgpu_queue_mgr_init(adev, &ctx->queue_mgr); |
100 | if (r) | |
101 | goto failed; | |
102 | ||
d38ceaf9 | 103 | return 0; |
8ed8147a HR |
104 | |
105 | failed: | |
106 | for (j = 0; j < i; j++) | |
1b1f42d8 | 107 | drm_sched_entity_fini(&adev->rings[j]->sched, |
8ed8147a HR |
108 | &ctx->rings[j].entity); |
109 | kfree(ctx->fences); | |
110 | ctx->fences = NULL; | |
111 | return r; | |
d38ceaf9 AD |
112 | } |
113 | ||
8ee3a52e | 114 | static void amdgpu_ctx_fini(struct kref *ref) |
d38ceaf9 | 115 | { |
8ee3a52e | 116 | struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount); |
47f38501 CK |
117 | struct amdgpu_device *adev = ctx->adev; |
118 | unsigned i, j; | |
119 | ||
fe295b27 DA |
120 | if (!adev) |
121 | return; | |
122 | ||
47f38501 | 123 | for (i = 0; i < AMDGPU_MAX_RINGS; ++i) |
37cd0ca2 | 124 | for (j = 0; j < amdgpu_sched_jobs; ++j) |
f54d1867 | 125 | dma_fence_put(ctx->rings[i].fences[j]); |
37cd0ca2 | 126 | kfree(ctx->fences); |
54ddf3a6 | 127 | ctx->fences = NULL; |
47f38501 | 128 | |
effd924d | 129 | amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr); |
0ae94444 AG |
130 | |
131 | mutex_destroy(&ctx->lock); | |
8ee3a52e ED |
132 | |
133 | kfree(ctx); | |
47f38501 CK |
134 | } |
135 | ||
136 | static int amdgpu_ctx_alloc(struct amdgpu_device *adev, | |
137 | struct amdgpu_fpriv *fpriv, | |
c2636dc5 | 138 | struct drm_file *filp, |
1b1f42d8 | 139 | enum drm_sched_priority priority, |
47f38501 CK |
140 | uint32_t *id) |
141 | { | |
142 | struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; | |
d38ceaf9 | 143 | struct amdgpu_ctx *ctx; |
47f38501 | 144 | int r; |
d38ceaf9 | 145 | |
47f38501 CK |
146 | ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); |
147 | if (!ctx) | |
148 | return -ENOMEM; | |
149 | ||
150 | mutex_lock(&mgr->lock); | |
151 | r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL); | |
152 | if (r < 0) { | |
0147ee0f | 153 | mutex_unlock(&mgr->lock); |
47f38501 CK |
154 | kfree(ctx); |
155 | return r; | |
156 | } | |
c2636dc5 | 157 | |
47f38501 | 158 | *id = (uint32_t)r; |
c2636dc5 | 159 | r = amdgpu_ctx_init(adev, priority, filp, ctx); |
c648ed7c CZ |
160 | if (r) { |
161 | idr_remove(&mgr->ctx_handles, *id); | |
162 | *id = 0; | |
163 | kfree(ctx); | |
164 | } | |
47f38501 | 165 | mutex_unlock(&mgr->lock); |
47f38501 CK |
166 | return r; |
167 | } | |
168 | ||
169 | static void amdgpu_ctx_do_release(struct kref *ref) | |
170 | { | |
171 | struct amdgpu_ctx *ctx; | |
8ee3a52e | 172 | u32 i; |
47f38501 CK |
173 | |
174 | ctx = container_of(ref, struct amdgpu_ctx, refcount); | |
175 | ||
8ee3a52e ED |
176 | for (i = 0; i < ctx->adev->num_rings; i++) |
177 | drm_sched_entity_fini(&ctx->adev->rings[i]->sched, | |
178 | &ctx->rings[i].entity); | |
47f38501 | 179 | |
8ee3a52e | 180 | amdgpu_ctx_fini(ref); |
47f38501 CK |
181 | } |
182 | ||
183 | static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id) | |
184 | { | |
185 | struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; | |
186 | struct amdgpu_ctx *ctx; | |
187 | ||
188 | mutex_lock(&mgr->lock); | |
d3e709e6 MW |
189 | ctx = idr_remove(&mgr->ctx_handles, id); |
190 | if (ctx) | |
23ca0e4e | 191 | kref_put(&ctx->refcount, amdgpu_ctx_do_release); |
47f38501 | 192 | mutex_unlock(&mgr->lock); |
d3e709e6 | 193 | return ctx ? 0 : -EINVAL; |
d38ceaf9 AD |
194 | } |
195 | ||
d94aed5a MO |
196 | static int amdgpu_ctx_query(struct amdgpu_device *adev, |
197 | struct amdgpu_fpriv *fpriv, uint32_t id, | |
198 | union drm_amdgpu_ctx_out *out) | |
d38ceaf9 AD |
199 | { |
200 | struct amdgpu_ctx *ctx; | |
23ca0e4e | 201 | struct amdgpu_ctx_mgr *mgr; |
d94aed5a | 202 | unsigned reset_counter; |
d38ceaf9 | 203 | |
23ca0e4e CZ |
204 | if (!fpriv) |
205 | return -EINVAL; | |
206 | ||
207 | mgr = &fpriv->ctx_mgr; | |
0147ee0f | 208 | mutex_lock(&mgr->lock); |
d38ceaf9 | 209 | ctx = idr_find(&mgr->ctx_handles, id); |
d94aed5a | 210 | if (!ctx) { |
0147ee0f | 211 | mutex_unlock(&mgr->lock); |
d94aed5a | 212 | return -EINVAL; |
d38ceaf9 | 213 | } |
d94aed5a MO |
214 | |
215 | /* TODO: these two are always zero */ | |
0b492a4c AD |
216 | out->state.flags = 0x0; |
217 | out->state.hangs = 0x0; | |
d94aed5a MO |
218 | |
219 | /* determine if a GPU reset has occured since the last call */ | |
220 | reset_counter = atomic_read(&adev->gpu_reset_counter); | |
221 | /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */ | |
668ca1b4 | 222 | if (ctx->reset_counter_query == reset_counter) |
d94aed5a MO |
223 | out->state.reset_status = AMDGPU_CTX_NO_RESET; |
224 | else | |
225 | out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET; | |
668ca1b4 | 226 | ctx->reset_counter_query = reset_counter; |
d94aed5a | 227 | |
0147ee0f | 228 | mutex_unlock(&mgr->lock); |
d94aed5a | 229 | return 0; |
d38ceaf9 AD |
230 | } |
231 | ||
bc1b1bf6 ML |
232 | static int amdgpu_ctx_query2(struct amdgpu_device *adev, |
233 | struct amdgpu_fpriv *fpriv, uint32_t id, | |
234 | union drm_amdgpu_ctx_out *out) | |
235 | { | |
236 | struct amdgpu_ctx *ctx; | |
237 | struct amdgpu_ctx_mgr *mgr; | |
238 | ||
239 | if (!fpriv) | |
240 | return -EINVAL; | |
241 | ||
242 | mgr = &fpriv->ctx_mgr; | |
243 | mutex_lock(&mgr->lock); | |
244 | ctx = idr_find(&mgr->ctx_handles, id); | |
245 | if (!ctx) { | |
246 | mutex_unlock(&mgr->lock); | |
247 | return -EINVAL; | |
248 | } | |
249 | ||
250 | out->state.flags = 0x0; | |
251 | out->state.hangs = 0x0; | |
252 | ||
253 | if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter)) | |
254 | out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET; | |
255 | ||
256 | if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter)) | |
257 | out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST; | |
258 | ||
259 | if (atomic_read(&ctx->guilty)) | |
260 | out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY; | |
261 | ||
262 | mutex_unlock(&mgr->lock); | |
263 | return 0; | |
264 | } | |
265 | ||
d38ceaf9 | 266 | int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, |
d94aed5a | 267 | struct drm_file *filp) |
d38ceaf9 AD |
268 | { |
269 | int r; | |
270 | uint32_t id; | |
1b1f42d8 | 271 | enum drm_sched_priority priority; |
d38ceaf9 AD |
272 | |
273 | union drm_amdgpu_ctx *args = data; | |
274 | struct amdgpu_device *adev = dev->dev_private; | |
275 | struct amdgpu_fpriv *fpriv = filp->driver_priv; | |
276 | ||
277 | r = 0; | |
278 | id = args->in.ctx_id; | |
c2636dc5 AR |
279 | priority = amdgpu_to_sched_priority(args->in.priority); |
280 | ||
b6d8a439 AR |
281 | /* For backwards compatibility reasons, we need to accept |
282 | * ioctls with garbage in the priority field */ | |
1b1f42d8 LS |
283 | if (priority == DRM_SCHED_PRIORITY_INVALID) |
284 | priority = DRM_SCHED_PRIORITY_NORMAL; | |
d38ceaf9 AD |
285 | |
286 | switch (args->in.op) { | |
a750b47e | 287 | case AMDGPU_CTX_OP_ALLOC_CTX: |
c2636dc5 | 288 | r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id); |
a750b47e CK |
289 | args->out.alloc.ctx_id = id; |
290 | break; | |
291 | case AMDGPU_CTX_OP_FREE_CTX: | |
292 | r = amdgpu_ctx_free(fpriv, id); | |
293 | break; | |
294 | case AMDGPU_CTX_OP_QUERY_STATE: | |
295 | r = amdgpu_ctx_query(adev, fpriv, id, &args->out); | |
296 | break; | |
bc1b1bf6 ML |
297 | case AMDGPU_CTX_OP_QUERY_STATE2: |
298 | r = amdgpu_ctx_query2(adev, fpriv, id, &args->out); | |
299 | break; | |
a750b47e CK |
300 | default: |
301 | return -EINVAL; | |
d38ceaf9 AD |
302 | } |
303 | ||
304 | return r; | |
305 | } | |
66b3cf2a JZ |
306 | |
307 | struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id) | |
308 | { | |
309 | struct amdgpu_ctx *ctx; | |
23ca0e4e CZ |
310 | struct amdgpu_ctx_mgr *mgr; |
311 | ||
312 | if (!fpriv) | |
313 | return NULL; | |
314 | ||
315 | mgr = &fpriv->ctx_mgr; | |
66b3cf2a JZ |
316 | |
317 | mutex_lock(&mgr->lock); | |
318 | ctx = idr_find(&mgr->ctx_handles, id); | |
319 | if (ctx) | |
320 | kref_get(&ctx->refcount); | |
321 | mutex_unlock(&mgr->lock); | |
322 | return ctx; | |
323 | } | |
324 | ||
325 | int amdgpu_ctx_put(struct amdgpu_ctx *ctx) | |
326 | { | |
66b3cf2a JZ |
327 | if (ctx == NULL) |
328 | return -EINVAL; | |
329 | ||
66b3cf2a | 330 | kref_put(&ctx->refcount, amdgpu_ctx_do_release); |
66b3cf2a JZ |
331 | return 0; |
332 | } | |
21c16bf6 | 333 | |
eb01abc7 ML |
334 | int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, |
335 | struct dma_fence *fence, uint64_t* handler) | |
21c16bf6 CK |
336 | { |
337 | struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; | |
ce882e6d | 338 | uint64_t seq = cring->sequence; |
b43a9a7e | 339 | unsigned idx = 0; |
f54d1867 | 340 | struct dma_fence *other = NULL; |
21c16bf6 | 341 | |
5b011235 | 342 | idx = seq & (amdgpu_sched_jobs - 1); |
b43a9a7e | 343 | other = cring->fences[idx]; |
0ae94444 AG |
344 | if (other) |
345 | BUG_ON(!dma_fence_is_signaled(other)); | |
21c16bf6 | 346 | |
f54d1867 | 347 | dma_fence_get(fence); |
21c16bf6 CK |
348 | |
349 | spin_lock(&ctx->ring_lock); | |
350 | cring->fences[idx] = fence; | |
ce882e6d | 351 | cring->sequence++; |
21c16bf6 CK |
352 | spin_unlock(&ctx->ring_lock); |
353 | ||
f54d1867 | 354 | dma_fence_put(other); |
eb01abc7 ML |
355 | if (handler) |
356 | *handler = seq; | |
21c16bf6 | 357 | |
eb01abc7 | 358 | return 0; |
21c16bf6 CK |
359 | } |
360 | ||
f54d1867 CW |
361 | struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, |
362 | struct amdgpu_ring *ring, uint64_t seq) | |
21c16bf6 CK |
363 | { |
364 | struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; | |
f54d1867 | 365 | struct dma_fence *fence; |
21c16bf6 CK |
366 | |
367 | spin_lock(&ctx->ring_lock); | |
b43a9a7e | 368 | |
d7b1eeb2 ML |
369 | if (seq == ~0ull) |
370 | seq = ctx->rings[ring->idx].sequence - 1; | |
371 | ||
ce882e6d | 372 | if (seq >= cring->sequence) { |
21c16bf6 CK |
373 | spin_unlock(&ctx->ring_lock); |
374 | return ERR_PTR(-EINVAL); | |
375 | } | |
376 | ||
b43a9a7e | 377 | |
37cd0ca2 | 378 | if (seq + amdgpu_sched_jobs < cring->sequence) { |
21c16bf6 CK |
379 | spin_unlock(&ctx->ring_lock); |
380 | return NULL; | |
381 | } | |
382 | ||
f54d1867 | 383 | fence = dma_fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]); |
21c16bf6 CK |
384 | spin_unlock(&ctx->ring_lock); |
385 | ||
386 | return fence; | |
387 | } | |
efd4ccb5 | 388 | |
c23be4ae | 389 | void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, |
1b1f42d8 | 390 | enum drm_sched_priority priority) |
c23be4ae AR |
391 | { |
392 | int i; | |
393 | struct amdgpu_device *adev = ctx->adev; | |
1b1f42d8 LS |
394 | struct drm_sched_rq *rq; |
395 | struct drm_sched_entity *entity; | |
c23be4ae | 396 | struct amdgpu_ring *ring; |
1b1f42d8 | 397 | enum drm_sched_priority ctx_prio; |
c23be4ae AR |
398 | |
399 | ctx->override_priority = priority; | |
400 | ||
1b1f42d8 | 401 | ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ? |
c23be4ae AR |
402 | ctx->init_priority : ctx->override_priority; |
403 | ||
404 | for (i = 0; i < adev->num_rings; i++) { | |
405 | ring = adev->rings[i]; | |
406 | entity = &ctx->rings[i].entity; | |
407 | rq = &ring->sched.sched_rq[ctx_prio]; | |
408 | ||
409 | if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) | |
410 | continue; | |
411 | ||
1b1f42d8 | 412 | drm_sched_entity_set_rq(entity, rq); |
c23be4ae AR |
413 | } |
414 | } | |
415 | ||
0ae94444 AG |
416 | int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id) |
417 | { | |
418 | struct amdgpu_ctx_ring *cring = &ctx->rings[ring_id]; | |
419 | unsigned idx = cring->sequence & (amdgpu_sched_jobs - 1); | |
420 | struct dma_fence *other = cring->fences[idx]; | |
421 | ||
422 | if (other) { | |
423 | signed long r; | |
719a39a1 | 424 | r = dma_fence_wait(other, true); |
0ae94444 | 425 | if (r < 0) { |
719a39a1 AG |
426 | if (r != -ERESTARTSYS) |
427 | DRM_ERROR("Error (%ld) waiting for fence!\n", r); | |
428 | ||
0ae94444 AG |
429 | return r; |
430 | } | |
431 | } | |
432 | ||
433 | return 0; | |
434 | } | |
435 | ||
efd4ccb5 CK |
436 | void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr) |
437 | { | |
438 | mutex_init(&mgr->lock); | |
439 | idr_init(&mgr->ctx_handles); | |
440 | } | |
441 | ||
8ee3a52e ED |
442 | void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) |
443 | { | |
444 | struct amdgpu_ctx *ctx; | |
445 | struct idr *idp; | |
446 | uint32_t id, i; | |
447 | ||
448 | idp = &mgr->ctx_handles; | |
449 | ||
450 | idr_for_each_entry(idp, ctx, id) { | |
451 | ||
452 | if (!ctx->adev) | |
453 | return; | |
454 | ||
455 | for (i = 0; i < ctx->adev->num_rings; i++) | |
456 | if (kref_read(&ctx->refcount) == 1) | |
457 | drm_sched_entity_do_release(&ctx->adev->rings[i]->sched, | |
458 | &ctx->rings[i].entity); | |
459 | else | |
460 | DRM_ERROR("ctx %p is still alive\n", ctx); | |
461 | } | |
462 | } | |
463 | ||
464 | void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr) | |
465 | { | |
466 | struct amdgpu_ctx *ctx; | |
467 | struct idr *idp; | |
468 | uint32_t id, i; | |
469 | ||
470 | idp = &mgr->ctx_handles; | |
471 | ||
472 | idr_for_each_entry(idp, ctx, id) { | |
473 | ||
474 | if (!ctx->adev) | |
475 | return; | |
476 | ||
477 | for (i = 0; i < ctx->adev->num_rings; i++) | |
478 | if (kref_read(&ctx->refcount) == 1) | |
479 | drm_sched_entity_cleanup(&ctx->adev->rings[i]->sched, | |
480 | &ctx->rings[i].entity); | |
481 | else | |
482 | DRM_ERROR("ctx %p is still alive\n", ctx); | |
483 | } | |
484 | } | |
485 | ||
efd4ccb5 CK |
486 | void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr) |
487 | { | |
488 | struct amdgpu_ctx *ctx; | |
489 | struct idr *idp; | |
490 | uint32_t id; | |
491 | ||
8ee3a52e ED |
492 | amdgpu_ctx_mgr_entity_cleanup(mgr); |
493 | ||
efd4ccb5 CK |
494 | idp = &mgr->ctx_handles; |
495 | ||
496 | idr_for_each_entry(idp, ctx, id) { | |
8ee3a52e | 497 | if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1) |
efd4ccb5 CK |
498 | DRM_ERROR("ctx %p is still alive\n", ctx); |
499 | } | |
500 | ||
501 | idr_destroy(&mgr->ctx_handles); | |
502 | mutex_destroy(&mgr->lock); | |
503 | } |