drm/amdgpu: implement more ib pools (v2)
authorxinhui pan <xinhui.pan@amd.com>
Thu, 26 Mar 2020 00:38:29 +0000 (08:38 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 1 Apr 2020 18:44:44 +0000 (14:44 -0400)
We have three ib pools, they are normal, VM, direct pools.

Any jobs which schedule IBs without dependence on gpu scheduler should
use DIRECT pool.

Any jobs schedule direct VM update IBs should use VM pool.

Any other jobs use NORMAL pool.

v2: squash in coding style fix

Signed-off-by: xinhui pan <xinhui.pan@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
25 files changed:
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
drivers/gpu/drm/amd/amdgpu/si_dma.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c

index b0597a84e137d3c81bd4d6add871342829982feb..7b3058fb5662b809f25970b2e88fd9e8c242d0a9 100644 (file)
@@ -388,6 +388,13 @@ struct amdgpu_sa_bo {
 int amdgpu_fence_slab_init(void);
 void amdgpu_fence_slab_fini(void);
 
+enum amdgpu_ib_pool_type {
+       AMDGPU_IB_POOL_NORMAL = 0,
+       AMDGPU_IB_POOL_VM,
+       AMDGPU_IB_POOL_DIRECT,
+
+       AMDGPU_IB_POOL_MAX
+};
 /*
  * IRQS.
  */
@@ -439,7 +446,9 @@ struct amdgpu_fpriv {
 int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
 
 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-                 unsigned size, struct amdgpu_ib *ib);
+                 unsigned size,
+                 enum amdgpu_ib_pool_type pool,
+                 struct amdgpu_ib *ib);
 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
                    struct dma_fence *f);
 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
@@ -843,7 +852,7 @@ struct amdgpu_device {
        unsigned                        num_rings;
        struct amdgpu_ring              *rings[AMDGPU_MAX_RINGS];
        bool                            ib_pool_ready;
-       struct amdgpu_sa_manager        ring_tmp_bo;
+       struct amdgpu_sa_manager        ring_tmp_bo[AMDGPU_IB_POOL_MAX];
 
        /* interrupts */
        struct amdgpu_irq               irq;
index af91627b19b0c5dd42a924fc2169136fd5165934..3eee5c7d83e0372195faaf06e52bbb3210855572 100644 (file)
@@ -924,7 +924,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
 
                ring = to_amdgpu_ring(entity->rq->sched);
                r =  amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ?
-                                  chunk_ib->ib_bytes : 0, ib);
+                                  chunk_ib->ib_bytes : 0, AMDGPU_IB_POOL_NORMAL, ib);
                if (r) {
                        DRM_ERROR("Failed to get ib !\n");
                        return r;
index ccbd7acfc4cb1be94259e19528cc271823305010..1adaac972190daac59a6e4948931eb3e896fa870 100644 (file)
  * Returns 0 on success, error on failure.
  */
 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-                 unsigned size, struct amdgpu_ib *ib)
+               unsigned size,
+               enum amdgpu_ib_pool_type pool_type,
+               struct amdgpu_ib *ib)
 {
        int r;
 
        if (size) {
-               r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
+               r = amdgpu_sa_bo_new(&adev->ring_tmp_bo[pool_type],
                                      &ib->sa_bo, size, 256);
                if (r) {
                        dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
@@ -280,19 +282,27 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
  */
 int amdgpu_ib_pool_init(struct amdgpu_device *adev)
 {
-       int r;
+       int r, i;
+       unsigned size;
 
        if (adev->ib_pool_ready) {
                return 0;
        }
-       r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo,
-                                     AMDGPU_IB_POOL_SIZE*64*1024,
-                                     AMDGPU_GPU_PAGE_SIZE,
-                                     AMDGPU_GEM_DOMAIN_GTT);
-       if (r) {
-               return r;
+       for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) {
+               if (i == AMDGPU_IB_POOL_DIRECT)
+                       size = PAGE_SIZE * 2;
+               else
+                       size = AMDGPU_IB_POOL_SIZE*64*1024;
+               r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo[i],
+                               size,
+                               AMDGPU_GPU_PAGE_SIZE,
+                               AMDGPU_GEM_DOMAIN_GTT);
+               if (r) {
+                       for (i--; i >= 0; i--)
+                               amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo[i]);
+                       return r;
+               }
        }
-
        adev->ib_pool_ready = true;
 
        return 0;
@@ -308,8 +318,11 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev)
  */
 void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
 {
+       int i;
+
        if (adev->ib_pool_ready) {
-               amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo);
+               for (i = 0; i < AMDGPU_IB_POOL_MAX; i++)
+                       amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo[i]);
                adev->ib_pool_ready = false;
        }
 }
@@ -406,7 +419,12 @@ static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data)
        struct drm_device *dev = node->minor->dev;
        struct amdgpu_device *adev = dev->dev_private;
 
-       amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo, m);
+       seq_printf(m, "-------------------- NORMAL -------------------- \n");
+       amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo[AMDGPU_IB_POOL_NORMAL], m);
+       seq_printf(m, "---------------------- VM ---------------------- \n");
+       amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo[AMDGPU_IB_POOL_VM], m);
+       seq_printf(m, "-------------------- DIRECT--------------------- \n");
+       amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo[AMDGPU_IB_POOL_DIRECT], m);
 
        return 0;
 
index 4981e443a88473050e27559e4f43acc1c5a13d58..2b99f5952375129030ba0605c86b60065a7dc4c4 100644 (file)
@@ -87,7 +87,8 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
 }
 
 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
-                            struct amdgpu_job **job)
+               enum amdgpu_ib_pool_type pool_type,
+               struct amdgpu_job **job)
 {
        int r;
 
@@ -95,7 +96,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
        if (r)
                return r;
 
-       r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
+       r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]);
        if (r)
                kfree(*job);
 
index 3f7b8433d17904fd8e535219108a47f0f54c9e3d..d4262069d501d374d878f41a949b5e19deb51edb 100644 (file)
@@ -38,6 +38,7 @@
 #define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0)
 
 struct amdgpu_fence;
+enum amdgpu_ib_pool_type;
 
 struct amdgpu_job {
        struct drm_sched_job    base;
@@ -67,8 +68,7 @@ struct amdgpu_job {
 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
                     struct amdgpu_job **job, struct amdgpu_vm *vm);
 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
-                            struct amdgpu_job **job);
-
+               enum amdgpu_ib_pool_type pool, struct amdgpu_job **job);
 void amdgpu_job_free_resources(struct amdgpu_job *job);
 void amdgpu_job_free(struct amdgpu_job *job);
 int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
index 5727f00afc8e6503647256cb171473eb0670aeda..d31d65e6b0398eaf6c20121f815b8fc4a88a1226 100644 (file)
@@ -144,7 +144,8 @@ static int amdgpu_jpeg_dec_set_reg(struct amdgpu_ring *ring, uint32_t handle,
        const unsigned ib_size_dw = 16;
        int i, r;
 
-       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+                                       AMDGPU_IB_POOL_DIRECT, &job);
        if (r)
                return r;
 
index 9f44ba7d9d972b5e2651761f11a65c102fd4abd1..1331b4c5bdca06341a2d2709ab4d670b9cccba56 100644 (file)
@@ -2043,7 +2043,8 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
        num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
        num_bytes = num_pages * 8;
 
-       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
+       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
+                                                                       AMDGPU_IB_POOL_NORMAL, &job);
        if (r)
                return r;
 
@@ -2102,7 +2103,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
        num_loops = DIV_ROUND_UP(byte_count, max_bytes);
        num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
 
-       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4,
+                       direct_submit ? AMDGPU_IB_POOL_DIRECT : AMDGPU_IB_POOL_NORMAL, &job);
        if (r)
                return r;
 
@@ -2191,7 +2193,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
        /* for IB padding */
        num_dw += 64;
 
-       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_NORMAL, &job);
        if (r)
                return r;
 
index 5fd32ad1c5751e9b9ca0f11fda0c937ad0d798e3..550282d9c1fc7a9fd5382a29a1e6c7386ae37377 100644 (file)
@@ -1056,7 +1056,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
                        goto err;
        }
 
-       r = amdgpu_job_alloc_with_ib(adev, 64, &job);
+       r = amdgpu_job_alloc_with_ib(adev, 64,
+                       direct ? AMDGPU_IB_POOL_DIRECT : AMDGPU_IB_POOL_NORMAL, &job);
        if (r)
                goto err;
 
index 59ddba137946bd5e5a345eab215fb5c823628d3c..d090455282e57c01cd87aa9b8ea6843fa90c6f38 100644 (file)
@@ -446,7 +446,8 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
        uint64_t addr;
        int i, r;
 
-       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+                                       AMDGPU_IB_POOL_DIRECT, &job);
        if (r)
                return r;
 
@@ -524,7 +525,8 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
        struct dma_fence *f = NULL;
        int i, r;
 
-       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+                       direct ? AMDGPU_IB_POOL_DIRECT : AMDGPU_IB_POOL_NORMAL, &job);
        if (r)
                return r;
 
index a41272fbcba23ab7f87915b71b9d7ddbc4b32c03..f55e2410f9488b92d23b831f5fa1c18351d92e0a 100644 (file)
@@ -390,7 +390,8 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
        uint64_t addr;
        int i, r;
 
-       r = amdgpu_job_alloc_with_ib(adev, 64, &job);
+       r = amdgpu_job_alloc_with_ib(adev, 64,
+                                       AMDGPU_IB_POOL_DIRECT, &job);
        if (r)
                goto err;
 
@@ -557,7 +558,8 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
        uint64_t addr;
        int i, r;
 
-       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+                                       AMDGPU_IB_POOL_DIRECT, &job);
        if (r)
                return r;
 
@@ -610,7 +612,8 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
        uint64_t addr;
        int i, r;
 
-       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+                                       AMDGPU_IB_POOL_DIRECT, &job);
        if (r)
                return r;
 
index cf96c335b258b479c37e10fadad3739a51862ad2..fbd451f3559ad752f84e81408c7a9fcc5339ea9a 100644 (file)
@@ -64,7 +64,8 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
        unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
        int r;
 
-       r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
+       r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4,
+                       p->direct ? AMDGPU_IB_POOL_VM : AMDGPU_IB_POOL_NORMAL, &p->job);
        if (r)
                return r;
 
@@ -223,7 +224,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
                        ndw = max(ndw, AMDGPU_VM_SDMA_MIN_NUM_DW);
                        ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
 
-                       r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
+                       r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4,
+                                       p->direct ? AMDGPU_IB_POOL_VM : AMDGPU_IB_POOL_NORMAL, &p->job);
                        if (r)
                                return r;
 
index 580d3f93d67093ec1925e671d8fea7d2a6f5e2ac..807a90cb71d954d632a7b64525b03c63a251ba60 100644 (file)
@@ -679,7 +679,8 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        tmp = 0xCAFEDEAD;
        adev->wb.wb[index] = cpu_to_le32(tmp);
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, 256, &ib);
+       r = amdgpu_ib_get(adev, NULL, 256,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r)
                goto err0;
 
index f6e3f59efa2f5e8dd811151a4bdc0299c4347c5b..22d6e2c60b54baa37aca6712f5f709cf42fcf1a0 100644 (file)
@@ -557,7 +557,8 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        gpu_addr = adev->wb.gpu_addr + (index * 4);
        adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, 16, &ib);
+       r = amdgpu_ib_get(adev, NULL, 16,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r)
                goto err1;
 
index 31f44d05e606d1e367da11ff176b4279c81dc2a4..8d01b8224e0bb652d92defdd9311aba6bfa44caa 100644 (file)
@@ -1914,7 +1914,8 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
        WREG32(scratch, 0xCAFEDEAD);
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, 256, &ib);
+       r = amdgpu_ib_get(adev, NULL, 256,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r)
                goto err1;
 
index 733d398c61ccb7b6b94c4b4384f4fd4f6f1d4ddb..a06239fdd79bbedf2afefd072bad2a13988286bd 100644 (file)
@@ -2364,7 +2364,8 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
        WREG32(scratch, 0xCAFEDEAD);
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, 256, &ib);
+       r = amdgpu_ib_get(adev, NULL, 256,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r)
                goto err1;
 
index fc32586ef80b1a5c91117b5f469a17826f349408..6f84b85892ce9036fcf05df3069b3bdadaf8891a 100644 (file)
@@ -888,7 +888,8 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        gpu_addr = adev->wb.gpu_addr + (index * 4);
        adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, 16, &ib);
+       r = amdgpu_ib_get(adev, NULL, 16,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r)
                goto err1;
 
@@ -1550,7 +1551,8 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
 
        /* allocate an indirect buffer to put the commands in */
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, total_size, &ib);
+       r = amdgpu_ib_get(adev, NULL, total_size,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r) {
                DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
                return r;
index 608ffe3b684e1e0d8e5c6783af50a6ccd5fad66b..7ed4456d90a774ed4912da3117ea1118c23f90a8 100644 (file)
@@ -1082,7 +1082,8 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        gpu_addr = adev->wb.gpu_addr + (index * 4);
        adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, 16, &ib);
+       r = amdgpu_ib_get(adev, NULL, 16,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r)
                goto err1;
 
@@ -4485,7 +4486,8 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
 
        /* allocate an indirect buffer to put the commands in */
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, total_size, &ib);
+       r = amdgpu_ib_get(adev, NULL, total_size,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r) {
                DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
                return r;
index 9775eca6fe434e044d21741eb5123ca16f2ec340..78d769e136433e27cb256822dc5371097e525867 100644 (file)
@@ -369,7 +369,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
         * translation. Avoid this by doing the invalidation from the SDMA
         * itself.
         */
-       r = amdgpu_job_alloc_with_ib(adev, 16 * 4, &job);
+       r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_VM, &job);
        if (r)
                goto error_alloc;
 
index 7d509a40076fa021f3724a4258fa779afe005a64..96fafd18b8c478b44223d83da4256f6f07d2ecef 100644 (file)
@@ -614,7 +614,8 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        tmp = 0xCAFEDEAD;
        adev->wb.wb[index] = cpu_to_le32(tmp);
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, 256, &ib);
+       r = amdgpu_ib_get(adev, NULL, 256,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r)
                goto err0;
 
index b6109a99fc43c4e35d27de930b93756d5c612d8e..0d5dcc99ff74776079a78b087021d83eb65fa920 100644 (file)
@@ -886,7 +886,8 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        tmp = 0xCAFEDEAD;
        adev->wb.wb[index] = cpu_to_le32(tmp);
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, 256, &ib);
+       r = amdgpu_ib_get(adev, NULL, 256,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r)
                goto err0;
 
index 9159bd46482b1a0165f7820141e1a0f374bf68dd..ccb3e2ca2b80e42f807a4b8bfe71f86cd1a6988c 100644 (file)
@@ -1539,7 +1539,8 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        tmp = 0xCAFEDEAD;
        adev->wb.wb[index] = cpu_to_le32(tmp);
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, 256, &ib);
+       r = amdgpu_ib_get(adev, NULL, 256,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r)
                goto err0;
 
index ebfd2cdf4e651215bc5ee0bf6d54be8ec9f46762..e9c427b9efa976838c1107d3b075f9c6f6928e3d 100644 (file)
@@ -948,7 +948,8 @@ static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        tmp = 0xCAFEDEAD;
        adev->wb.wb[index] = cpu_to_le32(tmp);
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, 256, &ib);
+       r = amdgpu_ib_get(adev, NULL, 256,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r) {
                DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
                goto err0;
index 42d5601b6bf35233f7fbea9cad6181da0ce6198f..f85d9a72563e9654b53245bd2d9816fa9bb448b3 100644 (file)
@@ -267,7 +267,8 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
        tmp = 0xCAFEDEAD;
        adev->wb.wb[index] = cpu_to_le32(tmp);
        memset(&ib, 0, sizeof(ib));
-       r = amdgpu_ib_get(adev, NULL, 256, &ib);
+       r = amdgpu_ib_get(adev, NULL, 256,
+                                       AMDGPU_IB_POOL_DIRECT, &ib);
        if (r)
                goto err0;
 
index e0aadcaf6c8b3cb5629ae5605de26d2fe4a53074..74b1eef5c04fb136699867cbe0257593f95cdde7 100644 (file)
@@ -216,7 +216,8 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
        uint64_t addr;
        int i, r;
 
-       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+                                       AMDGPU_IB_POOL_DIRECT, &job);
        if (r)
                return r;
 
@@ -279,7 +280,8 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
        uint64_t addr;
        int i, r;
 
-       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+                                       AMDGPU_IB_POOL_DIRECT, &job);
        if (r)
                return r;
 
index 20f10a5617ca5cdd462c57ea23e8e6f3c764ddd1..9cdef6b1e1f9c03ffaeea4fb3953d708d46d7edc 100644 (file)
@@ -224,7 +224,8 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
        uint64_t addr;
        int i, r;
 
-       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+                                       AMDGPU_IB_POOL_DIRECT, &job);
        if (r)
                return r;
 
@@ -286,7 +287,8 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handl
        uint64_t addr;
        int i, r;
 
-       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+       r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+                                       AMDGPU_IB_POOL_DIRECT, &job);
        if (r)
                return r;