Merge tag 'amd-drm-next-6.1-2022-09-08' of https://gitlab.freedesktop.org/agd5f/linux...
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ctx.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: monk liu <monk.liu@amd.com>
23 */
24
c2636dc5 25#include <drm/drm_auth.h>
57230f0c 26#include <drm/drm_drv.h>
d38ceaf9 27#include "amdgpu.h"
52c6a62c 28#include "amdgpu_sched.h"
ae363a21 29#include "amdgpu_ras.h"
1c6d567b 30#include <linux/nospec.h>
d38ceaf9 31
1b1f2fec
CK
32#define to_amdgpu_ctx_entity(e) \
33 container_of((e), struct amdgpu_ctx_entity, entity)
34
35const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
36 [AMDGPU_HW_IP_GFX] = 1,
37 [AMDGPU_HW_IP_COMPUTE] = 4,
38 [AMDGPU_HW_IP_DMA] = 2,
39 [AMDGPU_HW_IP_UVD] = 1,
40 [AMDGPU_HW_IP_VCE] = 1,
41 [AMDGPU_HW_IP_UVD_ENC] = 1,
42 [AMDGPU_HW_IP_VCN_DEC] = 1,
43 [AMDGPU_HW_IP_VCN_ENC] = 1,
f52c9643 44 [AMDGPU_HW_IP_VCN_JPEG] = 1,
1b1f2fec
CK
45};
46
84d588c3
ND
47bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
48{
49 switch (ctx_prio) {
50 case AMDGPU_CTX_PRIORITY_UNSET:
51 case AMDGPU_CTX_PRIORITY_VERY_LOW:
52 case AMDGPU_CTX_PRIORITY_LOW:
53 case AMDGPU_CTX_PRIORITY_NORMAL:
54 case AMDGPU_CTX_PRIORITY_HIGH:
55 case AMDGPU_CTX_PRIORITY_VERY_HIGH:
56 return true;
57 default:
58 return false;
59 }
60}
61
62static enum drm_sched_priority
63amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
64{
65 switch (ctx_prio) {
66 case AMDGPU_CTX_PRIORITY_UNSET:
67 return DRM_SCHED_PRIORITY_UNSET;
68
69 case AMDGPU_CTX_PRIORITY_VERY_LOW:
70 return DRM_SCHED_PRIORITY_MIN;
71
72 case AMDGPU_CTX_PRIORITY_LOW:
73 return DRM_SCHED_PRIORITY_MIN;
74
75 case AMDGPU_CTX_PRIORITY_NORMAL:
76 return DRM_SCHED_PRIORITY_NORMAL;
77
78 case AMDGPU_CTX_PRIORITY_HIGH:
79 return DRM_SCHED_PRIORITY_HIGH;
80
81 case AMDGPU_CTX_PRIORITY_VERY_HIGH:
82 return DRM_SCHED_PRIORITY_HIGH;
83
84 /* This should not happen as we sanitized userspace provided priority
85 * already, WARN if this happens.
86 */
87 default:
88 WARN(1, "Invalid context priority %d\n", ctx_prio);
89 return DRM_SCHED_PRIORITY_NORMAL;
90 }
91
92}
93
c2636dc5 94static int amdgpu_ctx_priority_permit(struct drm_file *filp,
84d588c3 95 int32_t priority)
c2636dc5 96{
84d588c3 97 if (!amdgpu_ctx_priority_is_valid(priority))
977f7e10
ND
98 return -EINVAL;
99
c2636dc5 100 /* NORMAL and below are accessible by everyone */
84d588c3 101 if (priority <= AMDGPU_CTX_PRIORITY_NORMAL)
c2636dc5
AR
102 return 0;
103
104 if (capable(CAP_SYS_NICE))
105 return 0;
106
107 if (drm_is_current_master(filp))
108 return 0;
109
110 return -EACCES;
111}
112
b07d1d73 113static enum amdgpu_gfx_pipe_priority amdgpu_ctx_prio_to_gfx_pipe_prio(int32_t prio)
33abcb1f
ND
114{
115 switch (prio) {
84d588c3
ND
116 case AMDGPU_CTX_PRIORITY_HIGH:
117 case AMDGPU_CTX_PRIORITY_VERY_HIGH:
33abcb1f
ND
118 return AMDGPU_GFX_PIPE_PRIO_HIGH;
119 default:
120 return AMDGPU_GFX_PIPE_PRIO_NORMAL;
121 }
122}
123
7d7630fc
SS
124static enum amdgpu_ring_priority_level amdgpu_ctx_sched_prio_to_ring_prio(int32_t prio)
125{
126 switch (prio) {
127 case AMDGPU_CTX_PRIORITY_HIGH:
128 return AMDGPU_RING_PRIO_1;
129 case AMDGPU_CTX_PRIORITY_VERY_HIGH:
130 return AMDGPU_RING_PRIO_2;
131 default:
132 return AMDGPU_RING_PRIO_0;
133 }
134}
135
84d588c3 136static unsigned int amdgpu_ctx_get_hw_prio(struct amdgpu_ctx *ctx, u32 hw_ip)
1c6d567b 137{
69493c03 138 struct amdgpu_device *adev = ctx->mgr->adev;
1c6d567b 139 unsigned int hw_prio;
69493c03 140 int32_t ctx_prio;
1c6d567b 141
84d588c3
ND
142 ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
143 ctx->init_priority : ctx->override_priority;
144
145 switch (hw_ip) {
b07d1d73 146 case AMDGPU_HW_IP_GFX:
84d588c3 147 case AMDGPU_HW_IP_COMPUTE:
b07d1d73 148 hw_prio = amdgpu_ctx_prio_to_gfx_pipe_prio(ctx_prio);
84d588c3 149 break;
7d7630fc
SS
150 case AMDGPU_HW_IP_VCE:
151 case AMDGPU_HW_IP_VCN_ENC:
152 hw_prio = amdgpu_ctx_sched_prio_to_ring_prio(ctx_prio);
153 break;
84d588c3
ND
154 default:
155 hw_prio = AMDGPU_RING_PRIO_DEFAULT;
156 break;
157 }
158
1c6d567b
ND
159 hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
160 if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0)
161 hw_prio = AMDGPU_RING_PRIO_DEFAULT;
162
163 return hw_prio;
164}
165
af0b5416
CK
166/* Calculate the time spend on the hw */
167static ktime_t amdgpu_ctx_fence_time(struct dma_fence *fence)
168{
169 struct drm_sched_fence *s_fence;
170
171 if (!fence)
172 return ns_to_ktime(0);
173
174 /* When the fence is not even scheduled it can't have spend time */
175 s_fence = to_drm_sched_fence(fence);
176 if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->scheduled.flags))
177 return ns_to_ktime(0);
178
179 /* When it is still running account how much already spend */
180 if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->finished.flags))
181 return ktime_sub(ktime_get(), s_fence->scheduled.timestamp);
182
183 return ktime_sub(s_fence->finished.timestamp,
184 s_fence->scheduled.timestamp);
185}
186
187static ktime_t amdgpu_ctx_entity_time(struct amdgpu_ctx *ctx,
188 struct amdgpu_ctx_entity *centity)
189{
190 ktime_t res = ns_to_ktime(0);
191 uint32_t i;
192
193 spin_lock(&ctx->ring_lock);
194 for (i = 0; i < amdgpu_sched_jobs; i++) {
195 res = ktime_add(res, amdgpu_ctx_fence_time(centity->fences[i]));
196 }
197 spin_unlock(&ctx->ring_lock);
198 return res;
199}
84d588c3 200
1c6d567b 201static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
84d588c3 202 const u32 ring)
d38ceaf9 203{
af0b5416 204 struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
69493c03 205 struct amdgpu_device *adev = ctx->mgr->adev;
977f7e10 206 struct amdgpu_ctx_entity *entity;
84d588c3 207 enum drm_sched_priority drm_prio;
af0b5416
CK
208 unsigned int hw_prio, num_scheds;
209 int32_t ctx_prio;
47f38501 210 int r;
d38ceaf9 211
201a4eb9 212 entity = kzalloc(struct_size(entity, fences, amdgpu_sched_jobs),
977f7e10
ND
213 GFP_KERNEL);
214 if (!entity)
215 return -ENOMEM;
0ae94444 216
84d588c3
ND
217 ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
218 ctx->init_priority : ctx->override_priority;
af0b5416 219 entity->hw_ip = hw_ip;
977f7e10 220 entity->sequence = 1;
84d588c3
ND
221 hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip);
222 drm_prio = amdgpu_ctx_to_drm_sched_prio(ctx_prio);
1c6d567b
ND
223
224 hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
225 scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
226 num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
227
bc21585f
ND
228 /* disable load balance if the hw engine retains context among dependent jobs */
229 if (hw_ip == AMDGPU_HW_IP_VCN_ENC ||
230 hw_ip == AMDGPU_HW_IP_VCN_DEC ||
231 hw_ip == AMDGPU_HW_IP_UVD_ENC ||
232 hw_ip == AMDGPU_HW_IP_UVD) {
1c6d567b 233 sched = drm_sched_pick_best(scheds, num_scheds);
4ff7d8ba
ND
234 scheds = &sched;
235 num_scheds = 1;
cadf97b1
CZ
236 }
237
84d588c3 238 r = drm_sched_entity_init(&entity->entity, drm_prio, scheds, num_scheds,
977f7e10
ND
239 &ctx->guilty);
240 if (r)
241 goto error_free_entity;
242
d18b8ead
CK
243 /* It's not an error if we fail to install the new entity */
244 if (cmpxchg(&ctx->entities[hw_ip][ring], NULL, entity))
245 goto cleanup_entity;
246
d38ceaf9 247 return 0;
8ed8147a 248
d18b8ead
CK
249cleanup_entity:
250 drm_sched_entity_fini(&entity->entity);
251
977f7e10
ND
252error_free_entity:
253 kfree(entity);
1b1f2fec 254
977f7e10
ND
255 return r;
256}
63e3ab9a 257
af0b5416
CK
258static ktime_t amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
259{
260 ktime_t res = ns_to_ktime(0);
261 int i;
262
263 if (!entity)
264 return res;
265
266 for (i = 0; i < amdgpu_sched_jobs; ++i) {
267 res = ktime_add(res, amdgpu_ctx_fence_time(entity->fences[i]));
268 dma_fence_put(entity->fences[i]);
269 }
270
271 kfree(entity);
272 return res;
273}
274
8cda7a4f
AD
275static int amdgpu_ctx_get_stable_pstate(struct amdgpu_ctx *ctx,
276 u32 *stable_pstate)
277{
69493c03 278 struct amdgpu_device *adev = ctx->mgr->adev;
8cda7a4f
AD
279 enum amd_dpm_forced_level current_level;
280
8cda7a4f
AD
281 current_level = amdgpu_dpm_get_performance_level(adev);
282
283 switch (current_level) {
284 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
285 *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_STANDARD;
286 break;
287 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
288 *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK;
289 break;
290 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
291 *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK;
292 break;
293 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
294 *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_PEAK;
295 break;
296 default:
297 *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_NONE;
298 break;
299 }
300 return 0;
301}
302
958afce9
AD
303static int amdgpu_ctx_init(struct amdgpu_ctx_mgr *mgr, int32_t priority,
304 struct drm_file *filp, struct amdgpu_ctx *ctx)
305{
306 u32 current_stable_pstate;
307 int r;
308
309 r = amdgpu_ctx_priority_permit(filp, priority);
310 if (r)
311 return r;
312
313 memset(ctx, 0, sizeof(*ctx));
314
315 kref_init(&ctx->refcount);
316 ctx->mgr = mgr;
317 spin_lock_init(&ctx->ring_lock);
318 mutex_init(&ctx->lock);
319
320 ctx->reset_counter = atomic_read(&mgr->adev->gpu_reset_counter);
321 ctx->reset_counter_query = ctx->reset_counter;
322 ctx->vram_lost_counter = atomic_read(&mgr->adev->vram_lost_counter);
323 ctx->init_priority = priority;
324 ctx->override_priority = AMDGPU_CTX_PRIORITY_UNSET;
325
326 r = amdgpu_ctx_get_stable_pstate(ctx, &current_stable_pstate);
327 if (r)
328 return r;
329
330 ctx->stable_pstate = current_stable_pstate;
331
332 return 0;
333}
334
8cda7a4f
AD
335static int amdgpu_ctx_set_stable_pstate(struct amdgpu_ctx *ctx,
336 u32 stable_pstate)
337{
69493c03 338 struct amdgpu_device *adev = ctx->mgr->adev;
8cda7a4f 339 enum amd_dpm_forced_level level;
505c170b 340 u32 current_stable_pstate;
8cda7a4f
AD
341 int r;
342
8cda7a4f
AD
343 mutex_lock(&adev->pm.stable_pstate_ctx_lock);
344 if (adev->pm.stable_pstate_ctx && adev->pm.stable_pstate_ctx != ctx) {
345 r = -EBUSY;
346 goto done;
347 }
348
505c170b
AD
349 r = amdgpu_ctx_get_stable_pstate(ctx, &current_stable_pstate);
350 if (r || (stable_pstate == current_stable_pstate))
351 goto done;
352
8cda7a4f
AD
353 switch (stable_pstate) {
354 case AMDGPU_CTX_STABLE_PSTATE_NONE:
355 level = AMD_DPM_FORCED_LEVEL_AUTO;
356 break;
357 case AMDGPU_CTX_STABLE_PSTATE_STANDARD:
358 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
359 break;
360 case AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK:
361 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
362 break;
363 case AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK:
364 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
365 break;
366 case AMDGPU_CTX_STABLE_PSTATE_PEAK:
367 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
368 break;
369 default:
370 r = -EINVAL;
371 goto done;
372 }
373
374 r = amdgpu_dpm_force_performance_level(adev, level);
375
376 if (level == AMD_DPM_FORCED_LEVEL_AUTO)
377 adev->pm.stable_pstate_ctx = NULL;
378 else
379 adev->pm.stable_pstate_ctx = ctx;
380done:
381 mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
382
383 return r;
384}
385
8ee3a52e 386static void amdgpu_ctx_fini(struct kref *ref)
d38ceaf9 387{
8ee3a52e 388 struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
69493c03
CK
389 struct amdgpu_ctx_mgr *mgr = ctx->mgr;
390 struct amdgpu_device *adev = mgr->adev;
57230f0c 391 unsigned i, j, idx;
47f38501 392
fe295b27
DA
393 if (!adev)
394 return;
395
977f7e10
ND
396 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
397 for (j = 0; j < AMDGPU_MAX_ENTITY_NUM; ++j) {
af0b5416
CK
398 ktime_t spend;
399
400 spend = amdgpu_ctx_fini_entity(ctx->entities[i][j]);
401 atomic64_add(ktime_to_ns(spend), &mgr->time_spend[i]);
977f7e10 402 }
63e3ab9a 403 }
57230f0c 404
a79f56d1 405 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
958afce9 406 amdgpu_ctx_set_stable_pstate(ctx, ctx->stable_pstate);
57230f0c
AG
407 drm_dev_exit(idx);
408 }
409
94f4c496 410 mutex_destroy(&ctx->lock);
8ee3a52e 411 kfree(ctx);
47f38501
CK
412}
413
0d346a14
CK
414int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
415 u32 ring, struct drm_sched_entity **entity)
869a53d4 416{
977f7e10
ND
417 int r;
418
1b1f2fec
CK
419 if (hw_ip >= AMDGPU_HW_IP_NUM) {
420 DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
421 return -EINVAL;
422 }
869a53d4
CK
423
424 /* Right now all IPs have only one instance - multiple rings. */
425 if (instance != 0) {
426 DRM_DEBUG("invalid ip instance: %d\n", instance);
427 return -EINVAL;
428 }
429
1b1f2fec
CK
430 if (ring >= amdgpu_ctx_num_entities[hw_ip]) {
431 DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring);
869a53d4
CK
432 return -EINVAL;
433 }
434
977f7e10
ND
435 if (ctx->entities[hw_ip][ring] == NULL) {
436 r = amdgpu_ctx_init_entity(ctx, hw_ip, ring);
437 if (r)
438 return r;
439 }
440
441 *entity = &ctx->entities[hw_ip][ring]->entity;
869a53d4
CK
442 return 0;
443}
444
47f38501
CK
445static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
446 struct amdgpu_fpriv *fpriv,
c2636dc5 447 struct drm_file *filp,
84d588c3 448 int32_t priority,
47f38501
CK
449 uint32_t *id)
450{
451 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
d38ceaf9 452 struct amdgpu_ctx *ctx;
47f38501 453 int r;
d38ceaf9 454
47f38501
CK
455 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
456 if (!ctx)
457 return -ENOMEM;
458
459 mutex_lock(&mgr->lock);
08d1bdd4 460 r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
47f38501 461 if (r < 0) {
0147ee0f 462 mutex_unlock(&mgr->lock);
47f38501
CK
463 kfree(ctx);
464 return r;
465 }
c2636dc5 466
47f38501 467 *id = (uint32_t)r;
69493c03 468 r = amdgpu_ctx_init(mgr, priority, filp, ctx);
c648ed7c
CZ
469 if (r) {
470 idr_remove(&mgr->ctx_handles, *id);
471 *id = 0;
472 kfree(ctx);
473 }
47f38501 474 mutex_unlock(&mgr->lock);
47f38501
CK
475 return r;
476}
477
478static void amdgpu_ctx_do_release(struct kref *ref)
479{
480 struct amdgpu_ctx *ctx;
977f7e10 481 u32 i, j;
47f38501
CK
482
483 ctx = container_of(ref, struct amdgpu_ctx, refcount);
977f7e10
ND
484 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
485 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
486 if (!ctx->entities[i][j])
487 continue;
47f38501 488
977f7e10
ND
489 drm_sched_entity_destroy(&ctx->entities[i][j]->entity);
490 }
491 }
47f38501 492
8ee3a52e 493 amdgpu_ctx_fini(ref);
47f38501
CK
494}
495
496static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
497{
498 struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
499 struct amdgpu_ctx *ctx;
500
501 mutex_lock(&mgr->lock);
d3e709e6
MW
502 ctx = idr_remove(&mgr->ctx_handles, id);
503 if (ctx)
23ca0e4e 504 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
47f38501 505 mutex_unlock(&mgr->lock);
d3e709e6 506 return ctx ? 0 : -EINVAL;
d38ceaf9
AD
507}
508
d94aed5a
MO
509static int amdgpu_ctx_query(struct amdgpu_device *adev,
510 struct amdgpu_fpriv *fpriv, uint32_t id,
511 union drm_amdgpu_ctx_out *out)
d38ceaf9
AD
512{
513 struct amdgpu_ctx *ctx;
23ca0e4e 514 struct amdgpu_ctx_mgr *mgr;
d94aed5a 515 unsigned reset_counter;
d38ceaf9 516
23ca0e4e
CZ
517 if (!fpriv)
518 return -EINVAL;
519
520 mgr = &fpriv->ctx_mgr;
0147ee0f 521 mutex_lock(&mgr->lock);
d38ceaf9 522 ctx = idr_find(&mgr->ctx_handles, id);
d94aed5a 523 if (!ctx) {
0147ee0f 524 mutex_unlock(&mgr->lock);
d94aed5a 525 return -EINVAL;
d38ceaf9 526 }
d94aed5a
MO
527
528 /* TODO: these two are always zero */
0b492a4c
AD
529 out->state.flags = 0x0;
530 out->state.hangs = 0x0;
d94aed5a
MO
531
532 /* determine if a GPU reset has occured since the last call */
533 reset_counter = atomic_read(&adev->gpu_reset_counter);
534 /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
668ca1b4 535 if (ctx->reset_counter_query == reset_counter)
d94aed5a
MO
536 out->state.reset_status = AMDGPU_CTX_NO_RESET;
537 else
538 out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
668ca1b4 539 ctx->reset_counter_query = reset_counter;
d94aed5a 540
0147ee0f 541 mutex_unlock(&mgr->lock);
d94aed5a 542 return 0;
d38ceaf9
AD
543}
544
05adfd80
LT
545#define AMDGPU_RAS_COUNTE_DELAY_MS 3000
546
bc1b1bf6 547static int amdgpu_ctx_query2(struct amdgpu_device *adev,
05adfd80
LT
548 struct amdgpu_fpriv *fpriv, uint32_t id,
549 union drm_amdgpu_ctx_out *out)
bc1b1bf6 550{
05adfd80 551 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
bc1b1bf6
ML
552 struct amdgpu_ctx *ctx;
553 struct amdgpu_ctx_mgr *mgr;
554
555 if (!fpriv)
556 return -EINVAL;
557
558 mgr = &fpriv->ctx_mgr;
559 mutex_lock(&mgr->lock);
560 ctx = idr_find(&mgr->ctx_handles, id);
561 if (!ctx) {
562 mutex_unlock(&mgr->lock);
563 return -EINVAL;
564 }
565
566 out->state.flags = 0x0;
567 out->state.hangs = 0x0;
568
569 if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
570 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
571
572 if (ctx->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
573 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
574
575 if (atomic_read(&ctx->guilty))
576 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
577
05adfd80
LT
578 if (adev->ras_enabled && con) {
579 /* Return the cached values in O(1),
580 * and schedule delayed work to cache
581 * new vaues.
582 */
583 int ce_count, ue_count;
584
585 ce_count = atomic_read(&con->ras_ce_count);
586 ue_count = atomic_read(&con->ras_ue_count);
587
588 if (ce_count != ctx->ras_counter_ce) {
589 ctx->ras_counter_ce = ce_count;
590 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
591 }
592
593 if (ue_count != ctx->ras_counter_ue) {
594 ctx->ras_counter_ue = ue_count;
595 out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
596 }
597
598 schedule_delayed_work(&con->ras_counte_delay_work,
599 msecs_to_jiffies(AMDGPU_RAS_COUNTE_DELAY_MS));
600 }
601
bc1b1bf6
ML
602 mutex_unlock(&mgr->lock);
603 return 0;
604}
605
8cda7a4f
AD
606
607
608static int amdgpu_ctx_stable_pstate(struct amdgpu_device *adev,
609 struct amdgpu_fpriv *fpriv, uint32_t id,
610 bool set, u32 *stable_pstate)
611{
612 struct amdgpu_ctx *ctx;
613 struct amdgpu_ctx_mgr *mgr;
614 int r;
615
616 if (!fpriv)
617 return -EINVAL;
618
619 mgr = &fpriv->ctx_mgr;
620 mutex_lock(&mgr->lock);
621 ctx = idr_find(&mgr->ctx_handles, id);
622 if (!ctx) {
623 mutex_unlock(&mgr->lock);
624 return -EINVAL;
625 }
626
627 if (set)
628 r = amdgpu_ctx_set_stable_pstate(ctx, *stable_pstate);
629 else
630 r = amdgpu_ctx_get_stable_pstate(ctx, stable_pstate);
631
632 mutex_unlock(&mgr->lock);
633 return r;
634}
635
d38ceaf9 636int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
d94aed5a 637 struct drm_file *filp)
d38ceaf9
AD
638{
639 int r;
8cda7a4f 640 uint32_t id, stable_pstate;
84d588c3 641 int32_t priority;
d38ceaf9
AD
642
643 union drm_amdgpu_ctx *args = data;
1348969a 644 struct amdgpu_device *adev = drm_to_adev(dev);
d38ceaf9
AD
645 struct amdgpu_fpriv *fpriv = filp->driver_priv;
646
d38ceaf9 647 id = args->in.ctx_id;
84d588c3 648 priority = args->in.priority;
c2636dc5 649
b6d8a439
AR
650 /* For backwards compatibility reasons, we need to accept
651 * ioctls with garbage in the priority field */
84d588c3
ND
652 if (!amdgpu_ctx_priority_is_valid(priority))
653 priority = AMDGPU_CTX_PRIORITY_NORMAL;
d38ceaf9
AD
654
655 switch (args->in.op) {
a750b47e 656 case AMDGPU_CTX_OP_ALLOC_CTX:
c2636dc5 657 r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
a750b47e
CK
658 args->out.alloc.ctx_id = id;
659 break;
660 case AMDGPU_CTX_OP_FREE_CTX:
661 r = amdgpu_ctx_free(fpriv, id);
662 break;
663 case AMDGPU_CTX_OP_QUERY_STATE:
664 r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
665 break;
bc1b1bf6
ML
666 case AMDGPU_CTX_OP_QUERY_STATE2:
667 r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
668 break;
8cda7a4f
AD
669 case AMDGPU_CTX_OP_GET_STABLE_PSTATE:
670 if (args->in.flags)
671 return -EINVAL;
672 r = amdgpu_ctx_stable_pstate(adev, fpriv, id, false, &stable_pstate);
eed1a5c7
TR
673 if (!r)
674 args->out.pstate.flags = stable_pstate;
8cda7a4f
AD
675 break;
676 case AMDGPU_CTX_OP_SET_STABLE_PSTATE:
677 if (args->in.flags & ~AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK)
678 return -EINVAL;
679 stable_pstate = args->in.flags & AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK;
680 if (stable_pstate > AMDGPU_CTX_STABLE_PSTATE_PEAK)
681 return -EINVAL;
682 r = amdgpu_ctx_stable_pstate(adev, fpriv, id, true, &stable_pstate);
683 break;
a750b47e
CK
684 default:
685 return -EINVAL;
d38ceaf9
AD
686 }
687
688 return r;
689}
66b3cf2a
JZ
690
691struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id)
692{
693 struct amdgpu_ctx *ctx;
23ca0e4e
CZ
694 struct amdgpu_ctx_mgr *mgr;
695
696 if (!fpriv)
697 return NULL;
698
699 mgr = &fpriv->ctx_mgr;
66b3cf2a
JZ
700
701 mutex_lock(&mgr->lock);
702 ctx = idr_find(&mgr->ctx_handles, id);
703 if (ctx)
704 kref_get(&ctx->refcount);
705 mutex_unlock(&mgr->lock);
706 return ctx;
707}
708
709int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
710{
66b3cf2a
JZ
711 if (ctx == NULL)
712 return -EINVAL;
713
66b3cf2a 714 kref_put(&ctx->refcount, amdgpu_ctx_do_release);
66b3cf2a
JZ
715 return 0;
716}
21c16bf6 717
69493c03
CK
718uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
719 struct drm_sched_entity *entity,
720 struct dma_fence *fence)
21c16bf6 721{
1b1f2fec
CK
722 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
723 uint64_t seq = centity->sequence;
f54d1867 724 struct dma_fence *other = NULL;
0d346a14 725 unsigned idx = 0;
21c16bf6 726
5b011235 727 idx = seq & (amdgpu_sched_jobs - 1);
1b1f2fec 728 other = centity->fences[idx];
69493c03 729 WARN_ON(other && !dma_fence_is_signaled(other));
21c16bf6 730
f54d1867 731 dma_fence_get(fence);
21c16bf6
CK
732
733 spin_lock(&ctx->ring_lock);
1b1f2fec
CK
734 centity->fences[idx] = fence;
735 centity->sequence++;
21c16bf6
CK
736 spin_unlock(&ctx->ring_lock);
737
af0b5416
CK
738 atomic64_add(ktime_to_ns(amdgpu_ctx_fence_time(other)),
739 &ctx->mgr->time_spend[centity->hw_ip]);
740
f54d1867 741 dma_fence_put(other);
69493c03 742 return seq;
21c16bf6
CK
743}
744
f54d1867 745struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
0d346a14
CK
746 struct drm_sched_entity *entity,
747 uint64_t seq)
21c16bf6 748{
1b1f2fec 749 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
f54d1867 750 struct dma_fence *fence;
21c16bf6
CK
751
752 spin_lock(&ctx->ring_lock);
b43a9a7e 753
d7b1eeb2 754 if (seq == ~0ull)
1b1f2fec 755 seq = centity->sequence - 1;
d7b1eeb2 756
1b1f2fec 757 if (seq >= centity->sequence) {
21c16bf6
CK
758 spin_unlock(&ctx->ring_lock);
759 return ERR_PTR(-EINVAL);
760 }
761
b43a9a7e 762
1b1f2fec 763 if (seq + amdgpu_sched_jobs < centity->sequence) {
21c16bf6
CK
764 spin_unlock(&ctx->ring_lock);
765 return NULL;
766 }
767
1b1f2fec 768 fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]);
21c16bf6
CK
769 spin_unlock(&ctx->ring_lock);
770
771 return fence;
772}
efd4ccb5 773
2316a86b 774static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
84d588c3
ND
775 struct amdgpu_ctx_entity *aentity,
776 int hw_ip,
777 int32_t priority)
2316a86b 778{
69493c03 779 struct amdgpu_device *adev = ctx->mgr->adev;
1c6d567b 780 unsigned int hw_prio;
2316a86b
ND
781 struct drm_gpu_scheduler **scheds = NULL;
782 unsigned num_scheds;
783
784 /* set sw priority */
84d588c3
ND
785 drm_sched_entity_set_priority(&aentity->entity,
786 amdgpu_ctx_to_drm_sched_prio(priority));
2316a86b
ND
787
788 /* set hw priority */
b07d1d73 789 if (hw_ip == AMDGPU_HW_IP_COMPUTE || hw_ip == AMDGPU_HW_IP_GFX) {
84d588c3 790 hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip);
1c6d567b
ND
791 hw_prio = array_index_nospec(hw_prio, AMDGPU_RING_PRIO_MAX);
792 scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
793 num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
2316a86b
ND
794 drm_sched_entity_modify_sched(&aentity->entity, scheds,
795 num_scheds);
796 }
797}
798
c23be4ae 799void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
84d588c3 800 int32_t priority)
c23be4ae 801{
84d588c3 802 int32_t ctx_prio;
977f7e10 803 unsigned i, j;
c23be4ae
AR
804
805 ctx->override_priority = priority;
806
84d588c3 807 ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
c23be4ae 808 ctx->init_priority : ctx->override_priority;
977f7e10
ND
809 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
810 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
977f7e10
ND
811 if (!ctx->entities[i][j])
812 continue;
c23be4ae 813
2316a86b
ND
814 amdgpu_ctx_set_entity_priority(ctx, ctx->entities[i][j],
815 i, ctx_prio);
977f7e10 816 }
c23be4ae
AR
817 }
818}
819
0d346a14
CK
820int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
821 struct drm_sched_entity *entity)
0ae94444 822{
1b1f2fec 823 struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
75e1cafd
CK
824 struct dma_fence *other;
825 unsigned idx;
826 long r;
0ae94444 827
75e1cafd
CK
828 spin_lock(&ctx->ring_lock);
829 idx = centity->sequence & (amdgpu_sched_jobs - 1);
830 other = dma_fence_get(centity->fences[idx]);
831 spin_unlock(&ctx->ring_lock);
719a39a1 832
75e1cafd
CK
833 if (!other)
834 return 0;
0ae94444 835
75e1cafd
CK
836 r = dma_fence_wait(other, true);
837 if (r < 0 && r != -ERESTARTSYS)
838 DRM_ERROR("Error (%ld) waiting for fence!\n", r);
839
840 dma_fence_put(other);
841 return r;
0ae94444
AG
842}
843
69493c03
CK
844void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr,
845 struct amdgpu_device *adev)
efd4ccb5 846{
af0b5416
CK
847 unsigned int i;
848
69493c03 849 mgr->adev = adev;
efd4ccb5 850 mutex_init(&mgr->lock);
2ddd1e6c 851 idr_init_base(&mgr->ctx_handles, 1);
af0b5416
CK
852
853 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
854 atomic64_set(&mgr->time_spend[i], 0);
efd4ccb5
CK
855}
856
56753e73 857long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
8ee3a52e
ED
858{
859 struct amdgpu_ctx *ctx;
860 struct idr *idp;
977f7e10 861 uint32_t id, i, j;
8ee3a52e
ED
862
863 idp = &mgr->ctx_handles;
864
48ad368a 865 mutex_lock(&mgr->lock);
8ee3a52e 866 idr_for_each_entry(idp, ctx, id) {
977f7e10
ND
867 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
868 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
869 struct drm_sched_entity *entity;
870
871 if (!ctx->entities[i][j])
872 continue;
20b6b788 873
977f7e10
ND
874 entity = &ctx->entities[i][j]->entity;
875 timeout = drm_sched_entity_flush(entity, timeout);
876 }
20b6b788 877 }
8ee3a52e 878 }
48ad368a 879 mutex_unlock(&mgr->lock);
56753e73 880 return timeout;
8ee3a52e
ED
881}
882
c49d8280 883void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
8ee3a52e
ED
884{
885 struct amdgpu_ctx *ctx;
886 struct idr *idp;
977f7e10 887 uint32_t id, i, j;
8ee3a52e
ED
888
889 idp = &mgr->ctx_handles;
890
891 idr_for_each_entry(idp, ctx, id) {
1b1f2fec
CK
892 if (kref_read(&ctx->refcount) != 1) {
893 DRM_ERROR("ctx %p is still alive\n", ctx);
894 continue;
20b6b788 895 }
1b1f2fec 896
977f7e10
ND
897 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
898 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
899 struct drm_sched_entity *entity;
900
901 if (!ctx->entities[i][j])
902 continue;
903
904 entity = &ctx->entities[i][j]->entity;
905 drm_sched_entity_fini(entity);
906 }
907 }
8ee3a52e
ED
908 }
909}
910
efd4ccb5
CK
911void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
912{
913 struct amdgpu_ctx *ctx;
914 struct idr *idp;
915 uint32_t id;
916
c49d8280 917 amdgpu_ctx_mgr_entity_fini(mgr);
8ee3a52e 918
efd4ccb5
CK
919 idp = &mgr->ctx_handles;
920
921 idr_for_each_entry(idp, ctx, id) {
8ee3a52e 922 if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1)
efd4ccb5
CK
923 DRM_ERROR("ctx %p is still alive\n", ctx);
924 }
925
926 idr_destroy(&mgr->ctx_handles);
927 mutex_destroy(&mgr->lock);
928}
87444254 929
af0b5416
CK
930void amdgpu_ctx_mgr_usage(struct amdgpu_ctx_mgr *mgr,
931 ktime_t usage[AMDGPU_HW_IP_NUM])
87444254 932{
87444254 933 struct amdgpu_ctx *ctx;
af0b5416 934 unsigned int hw_ip, i;
87444254 935 uint32_t id;
87444254 936
af0b5416
CK
937 /*
938 * This is a little bit racy because it can be that a ctx or a fence are
939 * destroyed just in the moment we try to account them. But that is ok
940 * since exactly that case is explicitely allowed by the interface.
941 */
87444254 942 mutex_lock(&mgr->lock);
af0b5416
CK
943 for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
944 uint64_t ns = atomic64_read(&mgr->time_spend[hw_ip]);
5c439c38 945
af0b5416
CK
946 usage[hw_ip] = ns_to_ktime(ns);
947 }
87444254 948
af0b5416
CK
949 idr_for_each_entry(&mgr->ctx_handles, ctx, id) {
950 for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
951 for (i = 0; i < amdgpu_ctx_num_entities[hw_ip]; ++i) {
952 struct amdgpu_ctx_entity *centity;
953 ktime_t spend;
5c439c38 954
af0b5416
CK
955 centity = ctx->entities[hw_ip][i];
956 if (!centity)
957 continue;
958 spend = amdgpu_ctx_entity_time(ctx, centity);
959 usage[hw_ip] = ktime_add(usage[hw_ip], spend);
960 }
961 }
87444254 962 }
87444254 963 mutex_unlock(&mgr->lock);
87444254 964}