drm/amdgpu: remove ring parameter from amdgpu_job_submit
authorChristian König <christian.koenig@amd.com>
Fri, 13 Jul 2018 11:54:56 +0000 (13:54 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 16 Jul 2018 21:11:52 +0000 (16:11 -0500)
We know the ring through the entity anyway.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Acked-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c

index 10e0a97c7c03ead2694c5dbe41430aa023699b8b..51ff751e093b9c7343a09979aa6ef47300486a1e 100644 (file)
@@ -117,21 +117,20 @@ void amdgpu_job_free(struct amdgpu_job *job)
        kfree(job);
 }
 
-int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
-                     struct drm_sched_entity *entity, void *owner,
-                     struct dma_fence **f)
+int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
+                     void *owner, struct dma_fence **f)
 {
        int r;
-       job->ring = ring;
 
        if (!f)
                return -EINVAL;
 
-       r = drm_sched_job_init(&job->base, &ring->sched, entity, owner);
+       r = drm_sched_job_init(&job->base, entity->sched, entity, owner);
        if (r)
                return r;
 
        job->owner = owner;
+       job->ring = to_amdgpu_ring(entity->sched);
        *f = dma_fence_get(&job->base.s_fence->finished);
        amdgpu_job_free_resources(job);
        amdgpu_ring_priority_get(job->ring, job->base.s_priority);
index 3151692312bdf6d9934bb3d63268c16c1eea1199..39f4230e1d37f3accbcac7f2fa7a4b1741b0b5e4 100644 (file)
@@ -67,7 +67,6 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
 
 void amdgpu_job_free_resources(struct amdgpu_job *job);
 void amdgpu_job_free(struct amdgpu_job *job);
-int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
-                     struct drm_sched_entity *entity, void *owner,
-                     struct dma_fence **f);
+int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
+                     void *owner, struct dma_fence **f);
 #endif
index a293f4e6760db15a0a93f5a2331d8ccd2e772ceb..5018c0b6bf1a4e0786cf494c9ce6cb4fafc71794 100644 (file)
@@ -44,6 +44,8 @@
 #define AMDGPU_FENCE_FLAG_INT           (1 << 1)
 #define AMDGPU_FENCE_FLAG_TC_WB_ONLY    (1 << 2)
 
+#define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
+
 enum amdgpu_ring_type {
        AMDGPU_RING_TYPE_GFX,
        AMDGPU_RING_TYPE_COMPUTE,
index 11a12483c995034dd563bee50bfcd84ca5cd5d89..9958e76d1c784e61fe322605f2b7b2c79ee78504 100644 (file)
@@ -2006,7 +2006,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
        if (r)
                goto error_free;
 
-       r = amdgpu_job_submit(job, ring, &adev->mman.entity,
+       r = amdgpu_job_submit(job, &adev->mman.entity,
                              AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
        if (r)
                goto error_free;
@@ -2083,7 +2083,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
                        DRM_ERROR("Error scheduling IBs (%d)\n", r);
                amdgpu_job_free(job);
        } else {
-               r = amdgpu_job_submit(job, ring, &adev->mman.entity,
+               r = amdgpu_job_submit(job, &adev->mman.entity,
                                      AMDGPU_FENCE_OWNER_UNDEFINED, fence);
                if (r)
                        goto error_free;
@@ -2175,7 +2175,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
 
        amdgpu_ring_pad_ib(ring, &job->ibs[0]);
        WARN_ON(job->ibs[0].length_dw > num_dw);
-       r = amdgpu_job_submit(job, ring, &adev->mman.entity,
+       r = amdgpu_job_submit(job, &adev->mman.entity,
                              AMDGPU_FENCE_OWNER_UNDEFINED, fence);
        if (r)
                goto error_free;
index a6c2cace4b9dcc133dbc1e5e21ce6da293f981c1..848b2e89881852a89c3c64f2b7ae6964188ef2c9 100644 (file)
@@ -1074,7 +1074,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
                if (r)
                        goto err_free;
 
-               r = amdgpu_job_submit(job, ring, &adev->uvd.inst[ring->me].entity,
+               r = amdgpu_job_submit(job, &adev->uvd.inst[ring->me].entity,
                                      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
                if (r)
                        goto err_free;
index ffb0fcc9707ec2eb53ab075a4f42afb339563f70..7dfb4c4b19c5fead6e0999e49cb244c15c38a378 100644 (file)
@@ -539,7 +539,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 
                amdgpu_job_free(job);
        } else {
-               r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
+               r = amdgpu_job_submit(job, &ring->adev->vce.entity,
                                      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
                if (r)
                        goto err;
index 484e2c19c02788d191ca24bd12080ab2d66089a3..5d3d783f2d7276fc5dad6d7884d92b9c30519d55 100644 (file)
@@ -425,8 +425,8 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
        if (r)
                goto error_free;
 
-       r = amdgpu_job_submit(job, ring, &vm->entity,
-                             AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
+       r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_UNDEFINED,
+                             &fence);
        if (r)
                goto error_free;
 
@@ -1120,8 +1120,8 @@ restart:
                amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
                                 AMDGPU_FENCE_OWNER_VM, false);
                WARN_ON(params.ib->length_dw > ndw);
-               r = amdgpu_job_submit(job, ring, &vm->entity,
-                                     AMDGPU_FENCE_OWNER_VM, &fence);
+               r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM,
+                                     &fence);
                if (r)
                        goto error;
 
@@ -1485,8 +1485,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 
        amdgpu_ring_pad_ib(ring, params.ib);
        WARN_ON(params.ib->length_dw > ndw);
-       r = amdgpu_job_submit(job, ring, &vm->entity,
-                             AMDGPU_FENCE_OWNER_VM, &f);
+       r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM, &f);
        if (r)
                goto error_free;
 
index 1c118c02e8cb06b3de1e7001e2b26441678f2fe0..591d1f21182393226e730a137b2a5452009cdf84 100644 (file)
@@ -320,7 +320,7 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
 
                amdgpu_job_free(job);
        } else {
-               r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
+               r = amdgpu_job_submit(job, &ring->adev->vce.entity,
                                      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
                if (r)
                        goto err;
index d48bc3393545240bf52e4b355c14e191e935c5ea..ceb0a7037897ed34b079c352184b1f0607ba4713 100644 (file)
@@ -321,7 +321,7 @@ int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 
                amdgpu_job_free(job);
        } else {
-               r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
+               r = amdgpu_job_submit(job, &ring->adev->vce.entity,
                                      AMDGPU_FENCE_OWNER_UNDEFINED, &f);
                if (r)
                        goto err;