2 * Copyright 2008 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
25 * Jerome Glisse <glisse@freedesktop.org>
28 #include <linux/file.h>
29 #include <linux/pagemap.h>
30 #include <linux/sync_file.h>
31 #include <linux/dma-buf.h>
33 #include <drm/amdgpu_drm.h>
34 #include <drm/drm_syncobj.h>
35 #include "amdgpu_cs.h"
37 #include "amdgpu_trace.h"
38 #include "amdgpu_gmc.h"
39 #include "amdgpu_gem.h"
40 #include "amdgpu_ras.h"
42 static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
43 struct amdgpu_device *adev,
44 struct drm_file *filp,
45 union drm_amdgpu_cs *cs)
47 struct amdgpu_fpriv *fpriv = filp->driver_priv;
49 if (cs->in.num_chunks == 0)
52 memset(p, 0, sizeof(*p));
56 p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
60 if (atomic_read(&p->ctx->guilty)) {
61 amdgpu_ctx_put(p->ctx);
67 static int amdgpu_cs_job_idx(struct amdgpu_cs_parser *p,
68 struct drm_amdgpu_cs_chunk_ib *chunk_ib)
70 struct drm_sched_entity *entity;
74 r = amdgpu_ctx_get_entity(p->ctx, chunk_ib->ip_type,
75 chunk_ib->ip_instance,
76 chunk_ib->ring, &entity);
81 * Abort if there is no run queue associated with this entity.
82 * Possibly because of disabled HW IP.
84 if (entity->rq == NULL)
87 /* Check if we can add this IB to some existing job */
88 for (i = 0; i < p->gang_size; ++i)
89 if (p->entities[i] == entity)
92 /* If not increase the gang size if possible */
93 if (i == AMDGPU_CS_GANG_SIZE)
96 p->entities[i] = entity;
101 static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p,
102 struct drm_amdgpu_cs_chunk_ib *chunk_ib,
103 unsigned int *num_ibs)
107 r = amdgpu_cs_job_idx(p, chunk_ib);
112 p->gang_leader_idx = r;
116 static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
117 struct drm_amdgpu_cs_chunk_fence *data,
120 struct drm_gem_object *gobj;
121 struct amdgpu_bo *bo;
125 gobj = drm_gem_object_lookup(p->filp, data->handle);
129 bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
130 p->uf_entry.priority = 0;
131 p->uf_entry.tv.bo = &bo->tbo;
132 /* One for TTM and two for the CS job */
133 p->uf_entry.tv.num_shared = 3;
135 drm_gem_object_put(gobj);
137 size = amdgpu_bo_size(bo);
138 if (size != PAGE_SIZE || (data->offset + 8) > size) {
143 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
148 *offset = data->offset;
153 amdgpu_bo_unref(&bo);
157 static int amdgpu_cs_p1_bo_handles(struct amdgpu_cs_parser *p,
158 struct drm_amdgpu_bo_list_in *data)
160 struct drm_amdgpu_bo_list_entry *info;
163 r = amdgpu_bo_create_list_entry_array(data, &info);
167 r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
181 /* Copy the data from userspace and go over it the first time */
182 static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
183 union drm_amdgpu_cs *cs)
185 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
186 unsigned int num_ibs[AMDGPU_CS_GANG_SIZE] = { };
187 struct amdgpu_vm *vm = &fpriv->vm;
188 uint64_t *chunk_array_user;
189 uint64_t *chunk_array;
190 uint32_t uf_offset = 0;
195 chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t),
201 chunk_array_user = u64_to_user_ptr(cs->in.chunks);
202 if (copy_from_user(chunk_array, chunk_array_user,
203 sizeof(uint64_t)*cs->in.num_chunks)) {
208 p->nchunks = cs->in.num_chunks;
209 p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
216 for (i = 0; i < p->nchunks; i++) {
217 struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
218 struct drm_amdgpu_cs_chunk user_chunk;
219 uint32_t __user *cdata;
221 chunk_ptr = u64_to_user_ptr(chunk_array[i]);
222 if (copy_from_user(&user_chunk, chunk_ptr,
223 sizeof(struct drm_amdgpu_cs_chunk))) {
226 goto free_partial_kdata;
228 p->chunks[i].chunk_id = user_chunk.chunk_id;
229 p->chunks[i].length_dw = user_chunk.length_dw;
231 size = p->chunks[i].length_dw;
232 cdata = u64_to_user_ptr(user_chunk.chunk_data);
234 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t),
236 if (p->chunks[i].kdata == NULL) {
239 goto free_partial_kdata;
241 size *= sizeof(uint32_t);
242 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
244 goto free_partial_kdata;
247 /* Assume the worst on the following checks */
249 switch (p->chunks[i].chunk_id) {
250 case AMDGPU_CHUNK_ID_IB:
251 if (size < sizeof(struct drm_amdgpu_cs_chunk_ib))
252 goto free_partial_kdata;
254 ret = amdgpu_cs_p1_ib(p, p->chunks[i].kdata, num_ibs);
256 goto free_partial_kdata;
259 case AMDGPU_CHUNK_ID_FENCE:
260 if (size < sizeof(struct drm_amdgpu_cs_chunk_fence))
261 goto free_partial_kdata;
263 ret = amdgpu_cs_p1_user_fence(p, p->chunks[i].kdata,
266 goto free_partial_kdata;
269 case AMDGPU_CHUNK_ID_BO_HANDLES:
270 if (size < sizeof(struct drm_amdgpu_bo_list_in))
271 goto free_partial_kdata;
273 ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata);
275 goto free_partial_kdata;
278 case AMDGPU_CHUNK_ID_DEPENDENCIES:
279 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
280 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
281 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
282 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
283 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
287 goto free_partial_kdata;
293 goto free_partial_kdata;
296 for (i = 0; i < p->gang_size; ++i) {
297 ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm,
298 num_ibs[i], &p->jobs[i]);
302 p->gang_leader = p->jobs[p->gang_leader_idx];
304 if (p->ctx->vram_lost_counter != p->gang_leader->vram_lost_counter) {
309 if (p->uf_entry.tv.bo)
310 p->gang_leader->uf_addr = uf_offset;
313 /* Use this opportunity to fill in task info for the vm */
314 amdgpu_vm_set_task_info(vm);
322 kvfree(p->chunks[i].kdata);
332 static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
333 struct amdgpu_cs_chunk *chunk,
334 unsigned int *ce_preempt,
335 unsigned int *de_preempt)
337 struct drm_amdgpu_cs_chunk_ib *chunk_ib = chunk->kdata;
338 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
339 struct amdgpu_vm *vm = &fpriv->vm;
340 struct amdgpu_ring *ring;
341 struct amdgpu_job *job;
342 struct amdgpu_ib *ib;
345 r = amdgpu_cs_job_idx(p, chunk_ib);
350 ring = amdgpu_job_ring(job);
351 ib = &job->ibs[job->num_ibs++];
353 /* MM engine doesn't support user fences */
354 if (p->uf_entry.tv.bo && ring->funcs->no_user_fence)
357 if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
358 chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
359 if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
364 /* Each GFX command submit allows only 1 IB max
365 * preemptible for CE & DE */
366 if (*ce_preempt > 1 || *de_preempt > 1)
370 if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
371 job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
373 r = amdgpu_ib_get(p->adev, vm, ring->funcs->parse_cs ?
374 chunk_ib->ib_bytes : 0,
375 AMDGPU_IB_POOL_DELAYED, ib);
377 DRM_ERROR("Failed to get ib !\n");
381 ib->gpu_addr = chunk_ib->va_start;
382 ib->length_dw = chunk_ib->ib_bytes / 4;
383 ib->flags = chunk_ib->flags;
387 static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p,
388 struct amdgpu_cs_chunk *chunk)
390 struct drm_amdgpu_cs_chunk_dep *deps = chunk->kdata;
391 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
395 num_deps = chunk->length_dw * 4 /
396 sizeof(struct drm_amdgpu_cs_chunk_dep);
398 for (i = 0; i < num_deps; ++i) {
399 struct amdgpu_ctx *ctx;
400 struct drm_sched_entity *entity;
401 struct dma_fence *fence;
403 ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
407 r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
409 deps[i].ring, &entity);
415 fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
419 return PTR_ERR(fence);
423 if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
424 struct drm_sched_fence *s_fence;
425 struct dma_fence *old = fence;
427 s_fence = to_drm_sched_fence(fence);
428 fence = dma_fence_get(&s_fence->scheduled);
432 r = amdgpu_sync_fence(&p->sync, fence);
433 dma_fence_put(fence);
440 static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
441 uint32_t handle, u64 point,
444 struct dma_fence *fence;
447 r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
449 DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
454 r = amdgpu_sync_fence(&p->sync, fence);
459 * When we have an explicit dependency it might be necessary to insert a
460 * pipeline sync to make sure that all caches etc are flushed and the
461 * next job actually sees the results from the previous one.
463 if (fence->context == p->gang_leader->base.entity->fence_context)
464 r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
467 dma_fence_put(fence);
471 static int amdgpu_cs_p2_syncobj_in(struct amdgpu_cs_parser *p,
472 struct amdgpu_cs_chunk *chunk)
474 struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
478 num_deps = chunk->length_dw * 4 /
479 sizeof(struct drm_amdgpu_cs_chunk_sem);
480 for (i = 0; i < num_deps; ++i) {
481 r = amdgpu_syncobj_lookup_and_add(p, deps[i].handle, 0, 0);
489 static int amdgpu_cs_p2_syncobj_timeline_wait(struct amdgpu_cs_parser *p,
490 struct amdgpu_cs_chunk *chunk)
492 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
496 num_deps = chunk->length_dw * 4 /
497 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
498 for (i = 0; i < num_deps; ++i) {
499 r = amdgpu_syncobj_lookup_and_add(p, syncobj_deps[i].handle,
500 syncobj_deps[i].point,
501 syncobj_deps[i].flags);
509 static int amdgpu_cs_p2_syncobj_out(struct amdgpu_cs_parser *p,
510 struct amdgpu_cs_chunk *chunk)
512 struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
516 num_deps = chunk->length_dw * 4 /
517 sizeof(struct drm_amdgpu_cs_chunk_sem);
522 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
524 p->num_post_deps = 0;
530 for (i = 0; i < num_deps; ++i) {
531 p->post_deps[i].syncobj =
532 drm_syncobj_find(p->filp, deps[i].handle);
533 if (!p->post_deps[i].syncobj)
535 p->post_deps[i].chain = NULL;
536 p->post_deps[i].point = 0;
543 static int amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser *p,
544 struct amdgpu_cs_chunk *chunk)
546 struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
550 num_deps = chunk->length_dw * 4 /
551 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
556 p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
558 p->num_post_deps = 0;
563 for (i = 0; i < num_deps; ++i) {
564 struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
567 if (syncobj_deps[i].point) {
568 dep->chain = dma_fence_chain_alloc();
573 dep->syncobj = drm_syncobj_find(p->filp,
574 syncobj_deps[i].handle);
576 dma_fence_chain_free(dep->chain);
579 dep->point = syncobj_deps[i].point;
586 static int amdgpu_cs_pass2(struct amdgpu_cs_parser *p)
588 unsigned int ce_preempt = 0, de_preempt = 0;
591 for (i = 0; i < p->nchunks; ++i) {
592 struct amdgpu_cs_chunk *chunk;
594 chunk = &p->chunks[i];
596 switch (chunk->chunk_id) {
597 case AMDGPU_CHUNK_ID_IB:
598 r = amdgpu_cs_p2_ib(p, chunk, &ce_preempt, &de_preempt);
602 case AMDGPU_CHUNK_ID_DEPENDENCIES:
603 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
604 r = amdgpu_cs_p2_dependencies(p, chunk);
608 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
609 r = amdgpu_cs_p2_syncobj_in(p, chunk);
613 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
614 r = amdgpu_cs_p2_syncobj_out(p, chunk);
618 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
619 r = amdgpu_cs_p2_syncobj_timeline_wait(p, chunk);
623 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
624 r = amdgpu_cs_p2_syncobj_timeline_signal(p, chunk);
634 /* Convert microseconds to bytes. */
635 static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
637 if (us <= 0 || !adev->mm_stats.log2_max_MBps)
640 /* Since accum_us is incremented by a million per second, just
641 * multiply it by the number of MB/s to get the number of bytes.
643 return us << adev->mm_stats.log2_max_MBps;
646 static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
648 if (!adev->mm_stats.log2_max_MBps)
651 return bytes >> adev->mm_stats.log2_max_MBps;
654 /* Returns how many bytes TTM can move right now. If no bytes can be moved,
655 * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
656 * which means it can go over the threshold once. If that happens, the driver
657 * will be in debt and no other buffer migrations can be done until that debt
660 * This approach allows moving a buffer of any size (it's important to allow
663 * The currency is simply time in microseconds and it increases as the clock
664 * ticks. The accumulated microseconds (us) are converted to bytes and
667 static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
671 s64 time_us, increment_us;
672 u64 free_vram, total_vram, used_vram;
673 /* Allow a maximum of 200 accumulated ms. This is basically per-IB
676 * It means that in order to get full max MBps, at least 5 IBs per
677 * second must be submitted and not more than 200ms apart from each
680 const s64 us_upper_bound = 200000;
682 if (!adev->mm_stats.log2_max_MBps) {
688 total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
689 used_vram = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
690 free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
692 spin_lock(&adev->mm_stats.lock);
694 /* Increase the amount of accumulated us. */
695 time_us = ktime_to_us(ktime_get());
696 increment_us = time_us - adev->mm_stats.last_update_us;
697 adev->mm_stats.last_update_us = time_us;
698 adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
701 /* This prevents the short period of low performance when the VRAM
702 * usage is low and the driver is in debt or doesn't have enough
703 * accumulated us to fill VRAM quickly.
705 * The situation can occur in these cases:
706 * - a lot of VRAM is freed by userspace
707 * - the presence of a big buffer causes a lot of evictions
708 * (solution: split buffers into smaller ones)
710 * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
711 * accum_us to a positive number.
713 if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
716 /* Be more aggressive on dGPUs. Try to fill a portion of free
719 if (!(adev->flags & AMD_IS_APU))
720 min_us = bytes_to_us(adev, free_vram / 4);
722 min_us = 0; /* Reset accum_us on APUs. */
724 adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
727 /* This is set to 0 if the driver is in debt to disallow (optional)
730 *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
732 /* Do the same for visible VRAM if half of it is free */
733 if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
734 u64 total_vis_vram = adev->gmc.visible_vram_size;
736 amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
738 if (used_vis_vram < total_vis_vram) {
739 u64 free_vis_vram = total_vis_vram - used_vis_vram;
740 adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
741 increment_us, us_upper_bound);
743 if (free_vis_vram >= total_vis_vram / 2)
744 adev->mm_stats.accum_us_vis =
745 max(bytes_to_us(adev, free_vis_vram / 2),
746 adev->mm_stats.accum_us_vis);
749 *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
754 spin_unlock(&adev->mm_stats.lock);
757 /* Report how many bytes have really been moved for the last command
758 * submission. This can result in a debt that can stop buffer migrations
761 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
764 spin_lock(&adev->mm_stats.lock);
765 adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
766 adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
767 spin_unlock(&adev->mm_stats.lock);
770 static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo)
772 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
773 struct amdgpu_cs_parser *p = param;
774 struct ttm_operation_ctx ctx = {
775 .interruptible = true,
776 .no_wait_gpu = false,
777 .resv = bo->tbo.base.resv
782 if (bo->tbo.pin_count)
785 /* Don't move this buffer if we have depleted our allowance
786 * to move it. Don't move anything if the threshold is zero.
788 if (p->bytes_moved < p->bytes_moved_threshold &&
789 (!bo->tbo.base.dma_buf ||
790 list_empty(&bo->tbo.base.dma_buf->attachments))) {
791 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
792 (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
793 /* And don't move a CPU_ACCESS_REQUIRED BO to limited
794 * visible VRAM if we've depleted our allowance to do
797 if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
798 domain = bo->preferred_domains;
800 domain = bo->allowed_domains;
802 domain = bo->preferred_domains;
805 domain = bo->allowed_domains;
809 amdgpu_bo_placement_from_domain(bo, domain);
810 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
812 p->bytes_moved += ctx.bytes_moved;
813 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
814 amdgpu_bo_in_cpu_visible_vram(bo))
815 p->bytes_moved_vis += ctx.bytes_moved;
817 if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
818 domain = bo->allowed_domains;
825 static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
826 struct list_head *validated)
828 struct ttm_operation_ctx ctx = { true, false };
829 struct amdgpu_bo_list_entry *lobj;
832 list_for_each_entry(lobj, validated, tv.head) {
833 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
834 struct mm_struct *usermm;
836 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
837 if (usermm && usermm != current->mm)
840 if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) &&
841 lobj->user_invalidated && lobj->user_pages) {
842 amdgpu_bo_placement_from_domain(bo,
843 AMDGPU_GEM_DOMAIN_CPU);
844 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
848 amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
852 r = amdgpu_cs_bo_validate(p, bo);
856 kvfree(lobj->user_pages);
857 lobj->user_pages = NULL;
862 static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
863 union drm_amdgpu_cs *cs)
865 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
866 struct amdgpu_vm *vm = &fpriv->vm;
867 struct amdgpu_bo_list_entry *e;
868 struct list_head duplicates;
872 INIT_LIST_HEAD(&p->validated);
874 /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
875 if (cs->in.bo_list_handle) {
879 r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
883 } else if (!p->bo_list) {
884 /* Create a empty bo_list when no handle is provided */
885 r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
891 mutex_lock(&p->bo_list->bo_list_mutex);
893 /* One for TTM and one for the CS job */
894 amdgpu_bo_list_for_each_entry(e, p->bo_list)
895 e->tv.num_shared = 2;
897 amdgpu_bo_list_get_list(p->bo_list, &p->validated);
899 INIT_LIST_HEAD(&duplicates);
900 amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
902 if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent)
903 list_add(&p->uf_entry.tv.head, &p->validated);
905 /* Get userptr backing pages. If pages are updated after registered
906 * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do
907 * amdgpu_ttm_backend_bind() to flush and invalidate new pages
909 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
910 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
911 bool userpage_invalidated = false;
914 e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
915 sizeof(struct page *),
916 GFP_KERNEL | __GFP_ZERO);
917 if (!e->user_pages) {
918 DRM_ERROR("kvmalloc_array failure\n");
920 goto out_free_user_pages;
923 r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages, &e->range);
925 kvfree(e->user_pages);
926 e->user_pages = NULL;
927 goto out_free_user_pages;
930 for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
931 if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
932 userpage_invalidated = true;
936 e->user_invalidated = userpage_invalidated;
939 r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
941 if (unlikely(r != 0)) {
942 if (r != -ERESTARTSYS)
943 DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
944 goto out_free_user_pages;
947 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
948 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
950 e->bo_va = amdgpu_vm_bo_find(vm, bo);
953 amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
954 &p->bytes_moved_vis_threshold);
956 p->bytes_moved_vis = 0;
958 r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
959 amdgpu_cs_bo_validate, p);
961 DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
965 r = amdgpu_cs_list_validate(p, &duplicates);
969 r = amdgpu_cs_list_validate(p, &p->validated);
973 if (p->uf_entry.tv.bo) {
974 struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo);
976 r = amdgpu_ttm_alloc_gart(&uf->tbo);
980 p->gang_leader->uf_addr += amdgpu_bo_gpu_offset(uf);
983 amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
986 for (i = 0; i < p->gang_size; ++i)
987 amdgpu_job_set_resources(p->jobs[i], p->bo_list->gds_obj,
993 ttm_eu_backoff_reservation(&p->ticket, &p->validated);
996 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
997 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1001 amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
1002 kvfree(e->user_pages);
1003 e->user_pages = NULL;
1006 mutex_unlock(&p->bo_list->bo_list_mutex);
1010 static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *p)
1014 if (!trace_amdgpu_cs_enabled())
1017 for (i = 0; i < p->gang_size; ++i) {
1018 struct amdgpu_job *job = p->jobs[i];
1020 for (j = 0; j < job->num_ibs; ++j)
1021 trace_amdgpu_cs(p, job, &job->ibs[j]);
1025 static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p,
1026 struct amdgpu_job *job)
1028 struct amdgpu_ring *ring = amdgpu_job_ring(job);
1032 /* Only for UVD/VCE VM emulation */
1033 if (!ring->funcs->parse_cs && !ring->funcs->patch_cs_in_place)
1036 for (i = 0; i < job->num_ibs; ++i) {
1037 struct amdgpu_ib *ib = &job->ibs[i];
1038 struct amdgpu_bo_va_mapping *m;
1039 struct amdgpu_bo *aobj;
1043 va_start = ib->gpu_addr & AMDGPU_GMC_HOLE_MASK;
1044 r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
1046 DRM_ERROR("IB va_start is invalid\n");
1050 if ((va_start + ib->length_dw * 4) >
1051 (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
1052 DRM_ERROR("IB va_start+ib_bytes is invalid\n");
1056 /* the IB should be reserved at this point */
1057 r = amdgpu_bo_kmap(aobj, (void **)&kptr);
1062 kptr += va_start - (m->start * AMDGPU_GPU_PAGE_SIZE);
1064 if (ring->funcs->parse_cs) {
1065 memcpy(ib->ptr, kptr, ib->length_dw * 4);
1066 amdgpu_bo_kunmap(aobj);
1068 r = amdgpu_ring_parse_cs(ring, p, job, ib);
1072 ib->ptr = (uint32_t *)kptr;
1073 r = amdgpu_ring_patch_cs_in_place(ring, p, job, ib);
1074 amdgpu_bo_kunmap(aobj);
1083 static int amdgpu_cs_patch_jobs(struct amdgpu_cs_parser *p)
1088 for (i = 0; i < p->gang_size; ++i) {
1089 r = amdgpu_cs_patch_ibs(p, p->jobs[i]);
1096 static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
1098 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1099 struct amdgpu_job *job = p->gang_leader;
1100 struct amdgpu_device *adev = p->adev;
1101 struct amdgpu_vm *vm = &fpriv->vm;
1102 struct amdgpu_bo_list_entry *e;
1103 struct amdgpu_bo_va *bo_va;
1104 struct amdgpu_bo *bo;
1108 r = amdgpu_vm_clear_freed(adev, vm, NULL);
1112 r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
1116 r = amdgpu_sync_fence(&p->sync, fpriv->prt_va->last_pt_update);
1120 if (fpriv->csa_va) {
1121 bo_va = fpriv->csa_va;
1123 r = amdgpu_vm_bo_update(adev, bo_va, false);
1127 r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update);
1132 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
1133 /* ignore duplicates */
1134 bo = ttm_to_amdgpu_bo(e->tv.bo);
1142 r = amdgpu_vm_bo_update(adev, bo_va, false);
1146 r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update);
1151 r = amdgpu_vm_handle_moved(adev, vm);
1155 r = amdgpu_vm_update_pdes(adev, vm, false);
1159 r = amdgpu_sync_fence(&p->sync, vm->last_update);
1163 for (i = 0; i < p->gang_size; ++i) {
1169 job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo);
1172 if (amdgpu_vm_debug) {
1173 /* Invalidate all BOs to test for userspace bugs */
1174 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
1175 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1177 /* ignore duplicates */
1181 amdgpu_vm_bo_invalidate(adev, bo, false);
1188 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
1190 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1191 struct amdgpu_bo_list_entry *e;
1195 list_for_each_entry(e, &p->validated, tv.head) {
1196 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1197 struct dma_resv *resv = bo->tbo.base.resv;
1198 enum amdgpu_sync_mode sync_mode;
1200 sync_mode = amdgpu_bo_explicit_sync(bo) ?
1201 AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
1202 r = amdgpu_sync_resv(p->adev, &p->sync, resv, sync_mode,
1208 for (i = 0; i < p->gang_size; ++i) {
1209 r = amdgpu_sync_push_to_job(&p->sync, p->jobs[i]);
1214 r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
1215 if (r && r != -ERESTARTSYS)
1216 DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
1220 static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
1224 for (i = 0; i < p->num_post_deps; ++i) {
1225 if (p->post_deps[i].chain && p->post_deps[i].point) {
1226 drm_syncobj_add_point(p->post_deps[i].syncobj,
1227 p->post_deps[i].chain,
1228 p->fence, p->post_deps[i].point);
1229 p->post_deps[i].chain = NULL;
1231 drm_syncobj_replace_fence(p->post_deps[i].syncobj,
1237 static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1238 union drm_amdgpu_cs *cs)
1240 struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1241 struct amdgpu_job *leader = p->gang_leader;
1242 struct amdgpu_bo_list_entry *e;
1247 for (i = 0; i < p->gang_size; ++i)
1248 drm_sched_job_arm(&p->jobs[i]->base);
1250 for (i = 0; i < p->gang_size; ++i) {
1251 struct dma_fence *fence;
1253 if (p->jobs[i] == leader)
1256 fence = &p->jobs[i]->base.s_fence->scheduled;
1257 r = drm_sched_job_add_dependency(&leader->base, fence);
1262 if (p->gang_size > 1) {
1263 for (i = 0; i < p->gang_size; ++i)
1264 amdgpu_job_set_gang_leader(p->jobs[i], leader);
1267 /* No memory allocation is allowed while holding the notifier lock.
1268 * The lock is held until amdgpu_cs_submit is finished and fence is
1271 mutex_lock(&p->adev->notifier_lock);
1273 /* If userptr are invalidated after amdgpu_cs_parser_bos(), return
1274 * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
1277 amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1278 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1280 r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
1288 p->fence = dma_fence_get(&leader->base.s_fence->finished);
1289 list_for_each_entry(e, &p->validated, tv.head) {
1291 /* Everybody except for the gang leader uses READ */
1292 for (i = 0; i < p->gang_size; ++i) {
1293 if (p->jobs[i] == leader)
1296 dma_resv_add_fence(e->tv.bo->base.resv,
1297 &p->jobs[i]->base.s_fence->finished,
1298 DMA_RESV_USAGE_READ);
1301 /* The gang leader is remembered as writer */
1302 e->tv.num_shared = 0;
1305 seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_leader_idx],
1307 amdgpu_cs_post_dependencies(p);
1309 if ((leader->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
1310 !p->ctx->preamble_presented) {
1311 leader->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
1312 p->ctx->preamble_presented = true;
1315 cs->out.handle = seq;
1316 leader->uf_sequence = seq;
1318 amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
1319 for (i = 0; i < p->gang_size; ++i) {
1320 amdgpu_job_free_resources(p->jobs[i]);
1321 trace_amdgpu_cs_ioctl(p->jobs[i]);
1322 drm_sched_entity_push_job(&p->jobs[i]->base);
1326 amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
1327 ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
1329 mutex_unlock(&p->adev->notifier_lock);
1330 mutex_unlock(&p->bo_list->bo_list_mutex);
1334 mutex_unlock(&p->adev->notifier_lock);
1337 for (i = 0; i < p->gang_size; ++i)
1338 drm_sched_job_cleanup(&p->jobs[i]->base);
1342 /* Cleanup the parser structure */
1343 static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser)
1347 for (i = 0; i < parser->num_post_deps; i++) {
1348 drm_syncobj_put(parser->post_deps[i].syncobj);
1349 kfree(parser->post_deps[i].chain);
1351 kfree(parser->post_deps);
1353 dma_fence_put(parser->fence);
1356 amdgpu_ctx_put(parser->ctx);
1357 if (parser->bo_list)
1358 amdgpu_bo_list_put(parser->bo_list);
1360 for (i = 0; i < parser->nchunks; i++)
1361 kvfree(parser->chunks[i].kdata);
1362 kvfree(parser->chunks);
1363 for (i = 0; i < parser->gang_size; ++i) {
1364 if (parser->jobs[i])
1365 amdgpu_job_free(parser->jobs[i]);
1367 if (parser->uf_entry.tv.bo) {
1368 struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);
1370 amdgpu_bo_unref(&uf);
1374 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1376 struct amdgpu_device *adev = drm_to_adev(dev);
1377 struct amdgpu_cs_parser parser;
1380 if (amdgpu_ras_intr_triggered())
1383 if (!adev->accel_working)
1386 r = amdgpu_cs_parser_init(&parser, adev, filp, data);
1388 if (printk_ratelimit())
1389 DRM_ERROR("Failed to initialize parser %d!\n", r);
1393 r = amdgpu_cs_pass1(&parser, data);
1397 r = amdgpu_cs_pass2(&parser);
1401 r = amdgpu_cs_parser_bos(&parser, data);
1404 DRM_ERROR("Not enough memory for command submission!\n");
1405 else if (r != -ERESTARTSYS && r != -EAGAIN)
1406 DRM_ERROR("Failed to process the buffer list %d!\n", r);
1410 r = amdgpu_cs_patch_jobs(&parser);
1414 r = amdgpu_cs_vm_handling(&parser);
1418 r = amdgpu_cs_sync_rings(&parser);
1422 trace_amdgpu_cs_ibs(&parser);
1424 r = amdgpu_cs_submit(&parser, data);
1428 amdgpu_cs_parser_fini(&parser);
1432 ttm_eu_backoff_reservation(&parser.ticket, &parser.validated);
1433 mutex_unlock(&parser.bo_list->bo_list_mutex);
1436 amdgpu_cs_parser_fini(&parser);
1441 * amdgpu_cs_wait_ioctl - wait for a command submission to finish
1444 * @data: data from userspace
1445 * @filp: file private
1447 * Wait for the command submission identified by handle to finish.
1449 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1450 struct drm_file *filp)
1452 union drm_amdgpu_wait_cs *wait = data;
1453 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1454 struct drm_sched_entity *entity;
1455 struct amdgpu_ctx *ctx;
1456 struct dma_fence *fence;
1459 ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
1463 r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance,
1464 wait->in.ring, &entity);
1466 amdgpu_ctx_put(ctx);
1470 fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle);
1474 r = dma_fence_wait_timeout(fence, true, timeout);
1475 if (r > 0 && fence->error)
1477 dma_fence_put(fence);
1481 amdgpu_ctx_put(ctx);
1485 memset(wait, 0, sizeof(*wait));
1486 wait->out.status = (r == 0);
1492 * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
1494 * @adev: amdgpu device
1495 * @filp: file private
1496 * @user: drm_amdgpu_fence copied from user space
1498 static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
1499 struct drm_file *filp,
1500 struct drm_amdgpu_fence *user)
1502 struct drm_sched_entity *entity;
1503 struct amdgpu_ctx *ctx;
1504 struct dma_fence *fence;
1507 ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
1509 return ERR_PTR(-EINVAL);
1511 r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance,
1512 user->ring, &entity);
1514 amdgpu_ctx_put(ctx);
1518 fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no);
1519 amdgpu_ctx_put(ctx);
1524 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
1525 struct drm_file *filp)
1527 struct amdgpu_device *adev = drm_to_adev(dev);
1528 union drm_amdgpu_fence_to_handle *info = data;
1529 struct dma_fence *fence;
1530 struct drm_syncobj *syncobj;
1531 struct sync_file *sync_file;
1534 fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
1536 return PTR_ERR(fence);
1539 fence = dma_fence_get_stub();
1541 switch (info->in.what) {
1542 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
1543 r = drm_syncobj_create(&syncobj, 0, fence);
1544 dma_fence_put(fence);
1547 r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
1548 drm_syncobj_put(syncobj);
1551 case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
1552 r = drm_syncobj_create(&syncobj, 0, fence);
1553 dma_fence_put(fence);
1556 r = drm_syncobj_get_fd(syncobj, (int *)&info->out.handle);
1557 drm_syncobj_put(syncobj);
1560 case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
1561 fd = get_unused_fd_flags(O_CLOEXEC);
1563 dma_fence_put(fence);
1567 sync_file = sync_file_create(fence);
1568 dma_fence_put(fence);
1574 fd_install(fd, sync_file->file);
1575 info->out.handle = fd;
1579 dma_fence_put(fence);
1585 * amdgpu_cs_wait_all_fences - wait on all fences to signal
1587 * @adev: amdgpu device
1588 * @filp: file private
1589 * @wait: wait parameters
1590 * @fences: array of drm_amdgpu_fence
1592 static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1593 struct drm_file *filp,
1594 union drm_amdgpu_wait_fences *wait,
1595 struct drm_amdgpu_fence *fences)
1597 uint32_t fence_count = wait->in.fence_count;
1601 for (i = 0; i < fence_count; i++) {
1602 struct dma_fence *fence;
1603 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1605 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1607 return PTR_ERR(fence);
1611 r = dma_fence_wait_timeout(fence, true, timeout);
1612 dma_fence_put(fence);
1620 return fence->error;
1623 memset(wait, 0, sizeof(*wait));
1624 wait->out.status = (r > 0);
1630 * amdgpu_cs_wait_any_fence - wait on any fence to signal
1632 * @adev: amdgpu device
1633 * @filp: file private
1634 * @wait: wait parameters
1635 * @fences: array of drm_amdgpu_fence
1637 static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
1638 struct drm_file *filp,
1639 union drm_amdgpu_wait_fences *wait,
1640 struct drm_amdgpu_fence *fences)
1642 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1643 uint32_t fence_count = wait->in.fence_count;
1644 uint32_t first = ~0;
1645 struct dma_fence **array;
1649 /* Prepare the fence array */
1650 array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
1655 for (i = 0; i < fence_count; i++) {
1656 struct dma_fence *fence;
1658 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1659 if (IS_ERR(fence)) {
1661 goto err_free_fence_array;
1664 } else { /* NULL, the fence has been already signaled */
1671 r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
1674 goto err_free_fence_array;
1677 memset(wait, 0, sizeof(*wait));
1678 wait->out.status = (r > 0);
1679 wait->out.first_signaled = first;
1681 if (first < fence_count && array[first])
1682 r = array[first]->error;
1686 err_free_fence_array:
1687 for (i = 0; i < fence_count; i++)
1688 dma_fence_put(array[i]);
1695 * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
1698 * @data: data from userspace
1699 * @filp: file private
1701 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1702 struct drm_file *filp)
1704 struct amdgpu_device *adev = drm_to_adev(dev);
1705 union drm_amdgpu_wait_fences *wait = data;
1706 uint32_t fence_count = wait->in.fence_count;
1707 struct drm_amdgpu_fence *fences_user;
1708 struct drm_amdgpu_fence *fences;
1711 /* Get the fences from userspace */
1712 fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
1717 fences_user = u64_to_user_ptr(wait->in.fences);
1718 if (copy_from_user(fences, fences_user,
1719 sizeof(struct drm_amdgpu_fence) * fence_count)) {
1721 goto err_free_fences;
1724 if (wait->in.wait_all)
1725 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
1727 r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
1736 * amdgpu_cs_find_mapping - find bo_va for VM address
1738 * @parser: command submission parser context
1740 * @bo: resulting BO of the mapping found
1741 * @map: Placeholder to return found BO mapping
1743 * Search the buffer objects in the command submission context for a certain
1744 * virtual memory address. Returns allocation structure when found, NULL
1747 int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1748 uint64_t addr, struct amdgpu_bo **bo,
1749 struct amdgpu_bo_va_mapping **map)
1751 struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1752 struct ttm_operation_ctx ctx = { false, false };
1753 struct amdgpu_vm *vm = &fpriv->vm;
1754 struct amdgpu_bo_va_mapping *mapping;
1757 addr /= AMDGPU_GPU_PAGE_SIZE;
1759 mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
1760 if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
1763 *bo = mapping->bo_va->base.bo;
1766 /* Double check that the BO is reserved by this CS */
1767 if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket)
1770 if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
1771 (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1772 amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
1773 r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
1778 return amdgpu_ttm_alloc_gart(&(*bo)->tbo);