drm/amd: drop use of drmP.h in amdgpu/amdgpu*
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ctx.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: monk liu <monk.liu@amd.com>
23 */
24
c2636dc5 25#include <drm/drm_auth.h>
d38ceaf9 26#include "amdgpu.h"
52c6a62c 27#include "amdgpu_sched.h"
ae363a21 28#include "amdgpu_ras.h"
d38ceaf9 29
1b1f2fec
CK
30#define to_amdgpu_ctx_entity(e) \
31 container_of((e), struct amdgpu_ctx_entity, entity)
32
33const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
34 [AMDGPU_HW_IP_GFX] = 1,
35 [AMDGPU_HW_IP_COMPUTE] = 4,
36 [AMDGPU_HW_IP_DMA] = 2,
37 [AMDGPU_HW_IP_UVD] = 1,
38 [AMDGPU_HW_IP_VCE] = 1,
39 [AMDGPU_HW_IP_UVD_ENC] = 1,
40 [AMDGPU_HW_IP_VCN_DEC] = 1,
41 [AMDGPU_HW_IP_VCN_ENC] = 1,
f52c9643 42 [AMDGPU_HW_IP_VCN_JPEG] = 1,
1b1f2fec
CK
43};
44
45static int amdgput_ctx_total_num_entities(void)
46{
47 unsigned i, num_entities = 0;
48
49 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
50 num_entities += amdgpu_ctx_num_entities[i];
51
52 return num_entities;
53}
0d346a14 54
c2636dc5 55static int amdgpu_ctx_priority_permit(struct drm_file *filp,
1b1f42d8 56 enum drm_sched_priority priority)
c2636dc5
AR
57{
58 /* NORMAL and below are accessible by everyone */
1b1f42d8 59 if (priority <= DRM_SCHED_PRIORITY_NORMAL)
c2636dc5
AR
60 return 0;
61
62 if (capable(CAP_SYS_NICE))
63 return 0;
64
65 if (drm_is_current_master(filp))
66 return 0;
67
68 return -EACCES;
69}
70
71static int amdgpu_ctx_init(struct amdgpu_device *adev,
1b1f42d8 72 enum drm_sched_priority priority,
c2636dc5
AR
73 struct drm_file *filp,
74 struct amdgpu_ctx *ctx)
d38ceaf9 75{
1b1f2fec
CK
76 unsigned num_entities = amdgput_ctx_total_num_entities();
77 unsigned i, j;
47f38501 78 int r;
d38ceaf9 79
1b1f42d8 80 if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX)
c2636dc5
AR
81 return -EINVAL;
82
83 r = amdgpu_ctx_priority_permit(filp, priority);
84 if (r)
85 return r;
86
23ca0e4e
CZ
87 memset(ctx, 0, sizeof(*ctx));
88 ctx->adev = adev;
1b1f2fec
CK
89
90 ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities,
f54d1867 91 sizeof(struct dma_fence*), GFP_KERNEL);
37cd0ca2
CZ
92 if (!ctx->fences)
93 return -ENOMEM;
d38ceaf9 94
1b1f2fec
CK
95 ctx->entities[0] = kcalloc(num_entities,
96 sizeof(struct amdgpu_ctx_entity),
97 GFP_KERNEL);
98 if (!ctx->entities[0]) {
99 r = -ENOMEM;
100 goto error_free_fences;
101 }
0ae94444 102
1b1f2fec
CK
103 for (i = 0; i < num_entities; ++i) {
104 struct amdgpu_ctx_entity *entity = &ctx->entities[0][i];
105
106 entity->sequence = 1;
107 entity->fences = &ctx->fences[amdgpu_sched_jobs * i];
37cd0ca2 108 }
1b1f2fec
CK
109 for (i = 1; i < AMDGPU_HW_IP_NUM; ++i)
110 ctx->entities[i] = ctx->entities[i - 1] +
111 amdgpu_ctx_num_entities[i - 1];
112
113 kref_init(&ctx->refcount);
114 spin_lock_init(&ctx->ring_lock);
115 mutex_init(&ctx->lock);
ce199ad6
NH
116
117 ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
668ca1b4 118 ctx->reset_counter_query = ctx->reset_counter;
e55f2b64 119 ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
c23be4ae 120 ctx->init_priority = priority;
1b1f42d8 121 ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
ce199ad6 122
1b1f2fec
CK
123 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
124 struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
125 struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS];
126 unsigned num_rings;
2a84e48e 127 unsigned num_rqs = 0;
1b1f2fec
CK
128
129 switch (i) {
130 case AMDGPU_HW_IP_GFX:
131 rings[0] = &adev->gfx.gfx_ring[0];
132 num_rings = 1;
133 break;
134 case AMDGPU_HW_IP_COMPUTE:
135 for (j = 0; j < adev->gfx.num_compute_rings; ++j)
136 rings[j] = &adev->gfx.compute_ring[j];
137 num_rings = adev->gfx.num_compute_rings;
138 break;
139 case AMDGPU_HW_IP_DMA:
140 for (j = 0; j < adev->sdma.num_instances; ++j)
141 rings[j] = &adev->sdma.instance[j].ring;
142 num_rings = adev->sdma.num_instances;
143 break;
144 case AMDGPU_HW_IP_UVD:
145 rings[0] = &adev->uvd.inst[0].ring;
146 num_rings = 1;
147 break;
148 case AMDGPU_HW_IP_VCE:
149 rings[0] = &adev->vce.ring[0];
150 num_rings = 1;
151 break;
152 case AMDGPU_HW_IP_UVD_ENC:
153 rings[0] = &adev->uvd.inst[0].ring_enc[0];
154 num_rings = 1;
155 break;
156 case AMDGPU_HW_IP_VCN_DEC:
157 rings[0] = &adev->vcn.ring_dec;
158 num_rings = 1;
159 break;
160 case AMDGPU_HW_IP_VCN_ENC:
161 rings[0] = &adev->vcn.ring_enc[0];
162 num_rings = 1;
163 break;
164 case AMDGPU_HW_IP_VCN_JPEG:
165 rings[0] = &adev->vcn.ring_jpeg;
166 num_rings = 1;
167 break;
168 }
75fbed20 169
2a84e48e
BN
170 for (j = 0; j < num_rings; ++j) {
171 if (!rings[j]->adev)
172 continue;
173
174 rqs[num_rqs++] = &rings[j]->sched.sched_rq[priority];
175 }
75fbed20 176
1b1f2fec
CK
177 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j)
178 r = drm_sched_entity_init(&ctx->entities[i][j].entity,
2a84e48e 179 rqs, num_rqs, &ctx->guilty);
cadf97b1 180 if (r)
1b1f2fec 181 goto error_cleanup_entities;
cadf97b1
CZ
182 }
183
d38ceaf9 184 return 0;
8ed8147a 185
1b1f2fec
CK
186error_cleanup_entities:
187 for (i = 0; i < num_entities; ++i)
188 drm_sched_entity_destroy(&ctx->entities[0][i].entity);
189 kfree(ctx->entities[0]);
190
191error_free_fences:
8ed8147a
HR
192 kfree(ctx->fences);
193 ctx->fences = NULL;
194 return r;
d38ceaf9
AD
195}
196
8ee3a52e 197static void amdgpu_ctx_fini(struct kref *ref)
d38ceaf9 198{
8ee3a52e 199 struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
1b1f2fec 200 unsigned num_entities = amdgput_ctx_total_num_entities();
47f38501
CK
201 struct amdgpu_device *adev = ctx->adev;
202 unsigned i, j;
203
fe295b27
DA
204 if (!adev)
205 return;
206
1b1f2fec 207 for (i = 0; i < num_entities; ++i)
37cd0ca2 208 for (j = 0; j < amdgpu_sched_jobs; ++j)
1b1f2fec 209 dma_fence_put(ctx->entities[0][i].fences[j]);
37cd0ca2 210 kfree(ctx->fences);
1b1f2fec 211 kfree(ctx->entities[0]);
47f38501 212
0ae94444 213 mutex_destroy(&ctx->lock);
8ee3a52e
ED
214
215 kfree(ctx);
47f38501
CK
216}
217
0d346a14
CK
218int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
219 u32 ring, struct drm_sched_entity **entity)
869a53d4 220{
1b1f2fec
CK
221 if (hw_ip >= AMDGPU_HW_IP_NUM) {
222 DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
223 return -EINVAL;
224 }
869a53d4
CK
225
226 /* Right now all IPs have only one instance - multiple rings. */
227 if (instance != 0) {
228 DRM_DEBUG("invalid ip instance: %d\n", instance);
229 return -EINVAL;
230 }
231
1b1f2fec
CK
232 if (ring >= amdgpu_ctx_num_entities[hw_ip]) {
233 DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring);
869a53d4
CK
234 return -EINVAL;
235 }
236
1b1f2fec 237 *entity = &ctx->entities[hw_ip][ring].entity;
869a53d4
CK
238 return 0;
239}
240
47f38501
CK
241static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
242 struct amdgpu_fpriv *fpriv,
c2636dc5 243 struct drm_file *filp,
1b1f42d8 244 enum drm_sched_priority priority,
47f38501
CK
245 uint32_t *id)
246{
247 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
d38ceaf9 248 struct amdgpu_ctx *ctx;
47f38501 249 int r;
d38ceaf9 250
47f38501
CK
251 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
252 if (!ctx)
253 return -ENOMEM;
254
255 mutex_lock(&mgr->lock);
08d1bdd4 256 r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
47f38501 257 if (r < 0) {
0147ee0f 258 mutex_unlock(&mgr->lock);
47f38501
CK
259 kfree(ctx);
260 return r;
261 }
c2636dc5 262
47f38501 263 *id = (uint32_t)r;
c2636dc5 264 r = amdgpu_ctx_init(adev, priority, filp, ctx);
c648ed7c
CZ
265 if (r) {
266 idr_remove(&mgr->ctx_handles, *id);
267 *id = 0;
268 kfree(ctx);
269 }
47f38501 270 mutex_unlock(&mgr->lock);
47f38501
CK
271 return r;
272}
273
274static void amdgpu_ctx_do_release(struct kref *ref)
275{
276 struct amdgpu_ctx *ctx;
1b1f2fec 277 unsigned num_entities;
8ee3a52e 278 u32 i;
47f38501
CK
279
280 ctx = container_of(ref, struct amdgpu_ctx, refcount);
281
1b1f2fec
CK
282 num_entities = 0;
283 for (i = 0; i < AMDGPU_HW_IP_NUM; i++)
284 num_entities += amdgpu_ctx_num_entities[i];
20b6b788 285
1b1f2fec
CK
286 for (i = 0; i < num_entities; i++)
287 drm_sched_entity_destroy(&ctx->entities[0][i].entity);
47f38501 288
8ee3a52e 289 amdgpu_ctx_fini(ref);
47f38501
CK
290}
291
292static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
293{
294 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
295 struct amdgpu_ctx *ctx;
296
297 mutex_lock(&mgr->lock);
d3e709e6
MW
298 ctx = idr_remove(&mgr->ctx_handles, id);
299 if (ctx)
23ca0e4e 300 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
47f38501 301 mutex_unlock(&mgr->lock);
d3e709e6 302 return ctx ? 0 : -EINVAL;
d38ceaf9
AD
303}
304
d94aed5a
MO
305static int amdgpu_ctx_query(struct amdgpu_device *adev,
306 struct amdgpu_fpriv *fpriv, uint32_t id,
307 union drm_amdgpu_ctx_out *out)
d38ceaf9
AD
308{
309 struct amdgpu_ctx *ctx;
23ca0e4e 310 struct amdgpu_ctx_mgr *mgr;
d94aed5a 311 unsigned reset_counter;
d38ceaf9 312
23ca0e4e
CZ
313 if (!fpriv)
314 return -EINVAL;
315
316 mgr = &fpriv->ctx_mgr;
0147ee0f 317 mutex_lock(&mgr->lock);
d38ceaf9 318 ctx = idr_find(&mgr->ctx_handles, id);
d94aed5a 319 if (!ctx) {
0147ee0f 320 mutex_unlock(&mgr->lock);
d94aed5a 321 return -EINVAL;
d38ceaf9 322 }
d94aed5a
MO
323
324 /* TODO: these two are always zero */
0b492a4c
AD
325 out->state.flags = 0x0;
326 out->state.hangs = 0x0;
d94aed5a
MO
327
328 /* determine if a GPU reset has occured since the last call */
329 reset_counter = atomic_read(&adev->gpu_reset_counter);
330 /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
668ca1b4 331 if (ctx->reset_counter_query == reset_counter)
d94aed5a
MO
332 out->state.reset_status = AMDGPU_CTX_NO_RESET;
333 else
334 out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
668ca1b4 335 ctx->reset_counter_query = reset_counter;
d94aed5a 336
0147ee0f 337 mutex_unlock(&mgr->lock);
d94aed5a 338 return 0;
d38ceaf9
AD
339}
340
bc1b1bf6
ML
341static int amdgpu_ctx_query2(struct amdgpu_device *adev,
342 struct amdgpu_fpriv *fpriv, uint32_t id,
343 union drm_amdgpu_ctx_out *out)
344{
345 struct amdgpu_ctx *ctx;
346 struct amdgpu_ctx_mgr *mgr;
ae363a21 347 uint32_t ras_counter;
bc1b1bf6
ML
348
349 if (!fpriv)
350 return -EINVAL;
351
352 mgr = &fpriv->ctx_mgr;
353 mutex_lock(&mgr->lock);
354 ctx = idr_find(&mgr->ctx_handles, id);
355 if (!ctx) {
356 mutex_unlock(&mgr->lock);
357 return -EINVAL;
358 }
359
360 out->state.flags = 0x0;
361 out->state.hangs = 0x0;
362
363 if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
364 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
365
366 if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
367 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
368
369 if (atomic_read(&ctx->guilty))
370 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
371
ae363a21 372 /*query ue count*/
373 ras_counter = amdgpu_ras_query_error_count(adev, false);
374 /*ras counter is monotonic increasing*/
375 if (ras_counter != ctx->ras_counter_ue) {
376 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
377 ctx->ras_counter_ue = ras_counter;
378 }
379
380 /*query ce count*/
381 ras_counter = amdgpu_ras_query_error_count(adev, true);
382 if (ras_counter != ctx->ras_counter_ce) {
383 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
384 ctx->ras_counter_ce = ras_counter;
385 }
386
bc1b1bf6
ML
387 mutex_unlock(&mgr->lock);
388 return 0;
389}
390
d38ceaf9 391int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
d94aed5a 392 struct drm_file *filp)
d38ceaf9
AD
393{
394 int r;
395 uint32_t id;
1b1f42d8 396 enum drm_sched_priority priority;
d38ceaf9
AD
397
398 union drm_amdgpu_ctx *args = data;
399 struct amdgpu_device *adev = dev->dev_private;
400 struct amdgpu_fpriv *fpriv = filp->driver_priv;
401
402 r = 0;
403 id = args->in.ctx_id;
c2636dc5
AR
404 priority = amdgpu_to_sched_priority(args->in.priority);
405
b6d8a439
AR
406 /* For backwards compatibility reasons, we need to accept
407 * ioctls with garbage in the priority field */
1b1f42d8
LS
408 if (priority == DRM_SCHED_PRIORITY_INVALID)
409 priority = DRM_SCHED_PRIORITY_NORMAL;
d38ceaf9
AD
410
411 switch (args->in.op) {
a750b47e 412 case AMDGPU_CTX_OP_ALLOC_CTX:
c2636dc5 413 r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
a750b47e
CK
414 args->out.alloc.ctx_id = id;
415 break;
416 case AMDGPU_CTX_OP_FREE_CTX:
417 r = amdgpu_ctx_free(fpriv, id);
418 break;
419 case AMDGPU_CTX_OP_QUERY_STATE:
420 r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
421 break;
bc1b1bf6
ML
422 case AMDGPU_CTX_OP_QUERY_STATE2:
423 r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
424 break;
a750b47e
CK
425 default:
426 return -EINVAL;
d38ceaf9
AD
427 }
428
429 return r;
430}
66b3cf2a
JZ
431
432struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
433{
434 struct amdgpu_ctx *ctx;
23ca0e4e
CZ
435 struct amdgpu_ctx_mgr *mgr;
436
437 if (!fpriv)
438 return NULL;
439
440 mgr = &fpriv->ctx_mgr;
66b3cf2a
JZ
441
442 mutex_lock(&mgr->lock);
443 ctx = idr_find(&mgr->ctx_handles, id);
444 if (ctx)
445 kref_get(&ctx->refcount);
446 mutex_unlock(&mgr->lock);
447 return ctx;
448}
449
450int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
451{
66b3cf2a
JZ
452 if (ctx == NULL)
453 return -EINVAL;
454
66b3cf2a 455 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
66b3cf2a
JZ
456 return 0;
457}
21c16bf6 458
85eff200
CK
459void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
460 struct drm_sched_entity *entity,
461 struct dma_fence *fence, uint64_t* handle)
21c16bf6 462{
1b1f2fec
CK
463 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
464 uint64_t seq = centity->sequence;
f54d1867 465 struct dma_fence *other = NULL;
0d346a14 466 unsigned idx = 0;
21c16bf6 467
5b011235 468 idx = seq & (amdgpu_sched_jobs - 1);
1b1f2fec 469 other = centity->fences[idx];
0ae94444
AG
470 if (other)
471 BUG_ON(!dma_fence_is_signaled(other));
21c16bf6 472
f54d1867 473 dma_fence_get(fence);
21c16bf6
CK
474
475 spin_lock(&ctx->ring_lock);
1b1f2fec
CK
476 centity->fences[idx] = fence;
477 centity->sequence++;
21c16bf6
CK
478 spin_unlock(&ctx->ring_lock);
479
f54d1867 480 dma_fence_put(other);
0d346a14
CK
481 if (handle)
482 *handle = seq;
21c16bf6
CK
483}
484
f54d1867 485struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
0d346a14
CK
486 struct drm_sched_entity *entity,
487 uint64_t seq)
21c16bf6 488{
1b1f2fec 489 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
f54d1867 490 struct dma_fence *fence;
21c16bf6
CK
491
492 spin_lock(&ctx->ring_lock);
b43a9a7e 493
d7b1eeb2 494 if (seq == ~0ull)
1b1f2fec 495 seq = centity->sequence - 1;
d7b1eeb2 496
1b1f2fec 497 if (seq >= centity->sequence) {
21c16bf6
CK
498 spin_unlock(&ctx->ring_lock);
499 return ERR_PTR(-EINVAL);
500 }
501
b43a9a7e 502
1b1f2fec 503 if (seq + amdgpu_sched_jobs < centity->sequence) {
21c16bf6
CK
504 spin_unlock(&ctx->ring_lock);
505 return NULL;
506 }
507
1b1f2fec 508 fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]);
21c16bf6
CK
509 spin_unlock(&ctx->ring_lock);
510
511 return fence;
512}
efd4ccb5 513
c23be4ae 514void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
1b1f42d8 515 enum drm_sched_priority priority)
c23be4ae 516{
1b1f2fec 517 unsigned num_entities = amdgput_ctx_total_num_entities();
1b1f42d8 518 enum drm_sched_priority ctx_prio;
1b1f2fec 519 unsigned i;
c23be4ae
AR
520
521 ctx->override_priority = priority;
522
1b1f42d8 523 ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
c23be4ae
AR
524 ctx->init_priority : ctx->override_priority;
525
1b1f2fec
CK
526 for (i = 0; i < num_entities; i++) {
527 struct drm_sched_entity *entity = &ctx->entities[0][i].entity;
c23be4ae 528
7febe4bf 529 drm_sched_entity_set_priority(entity, ctx_prio);
c23be4ae
AR
530 }
531}
532
0d346a14
CK
533int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
534 struct drm_sched_entity *entity)
0ae94444 535{
1b1f2fec
CK
536 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
537 unsigned idx = centity->sequence & (amdgpu_sched_jobs - 1);
538 struct dma_fence *other = centity->fences[idx];
0ae94444
AG
539
540 if (other) {
541 signed long r;
719a39a1 542 r = dma_fence_wait(other, true);
0ae94444 543 if (r < 0) {
719a39a1
AG
544 if (r != -ERESTARTSYS)
545 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
546
0ae94444
AG
547 return r;
548 }
549 }
550
551 return 0;
552}
553
efd4ccb5
CK
554void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
555{
556 mutex_init(&mgr->lock);
557 idr_init(&mgr->ctx_handles);
558}
559
56753e73 560long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
8ee3a52e 561{
1b1f2fec 562 unsigned num_entities = amdgput_ctx_total_num_entities();
8ee3a52e
ED
563 struct amdgpu_ctx *ctx;
564 struct idr *idp;
565 uint32_t id, i;
566
567 idp = &mgr->ctx_handles;
568
48ad368a 569 mutex_lock(&mgr->lock);
8ee3a52e 570 idr_for_each_entry(idp, ctx, id) {
1b1f2fec
CK
571 for (i = 0; i < num_entities; i++) {
572 struct drm_sched_entity *entity;
20b6b788 573
1b1f2fec 574 entity = &ctx->entities[0][i].entity;
56753e73 575 timeout = drm_sched_entity_flush(entity, timeout);
20b6b788 576 }
8ee3a52e 577 }
48ad368a 578 mutex_unlock(&mgr->lock);
56753e73 579 return timeout;
8ee3a52e
ED
580}
581
c49d8280 582void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
8ee3a52e 583{
1b1f2fec 584 unsigned num_entities = amdgput_ctx_total_num_entities();
8ee3a52e
ED
585 struct amdgpu_ctx *ctx;
586 struct idr *idp;
587 uint32_t id, i;
588
589 idp = &mgr->ctx_handles;
590
591 idr_for_each_entry(idp, ctx, id) {
1b1f2fec
CK
592 if (kref_read(&ctx->refcount) != 1) {
593 DRM_ERROR("ctx %p is still alive\n", ctx);
594 continue;
20b6b788 595 }
1b1f2fec
CK
596
597 for (i = 0; i < num_entities; i++)
598 drm_sched_entity_fini(&ctx->entities[0][i].entity);
8ee3a52e
ED
599 }
600}
601
efd4ccb5
CK
602void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
603{
604 struct amdgpu_ctx *ctx;
605 struct idr *idp;
606 uint32_t id;
607
c49d8280 608 amdgpu_ctx_mgr_entity_fini(mgr);
8ee3a52e 609
efd4ccb5
CK
610 idp = &mgr->ctx_handles;
611
612 idr_for_each_entry(idp, ctx, id) {
8ee3a52e 613 if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
efd4ccb5
CK
614 DRM_ERROR("ctx %p is still alive\n", ctx);
615 }
616
617 idr_destroy(&mgr->ctx_handles);
618 mutex_destroy(&mgr->lock);
619}