drm/amdgpu: enable the static csa when mcbp enabled
authorJack Xiao <Jack.Xiao@amd.com>
Thu, 10 Jan 2019 07:50:10 +0000 (15:50 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 21 Jun 2019 14:36:54 +0000 (09:36 -0500)
CSA is the Context Save Area for preemption.

Acked-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Jack Xiao <Jack.Xiao@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c

index 3e2da24cd17a009c954141cd2e2c992ca3528fa5..c25e1ebc76c36051a91925319ee695370314f358 100644 (file)
@@ -872,7 +872,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
        if (r)
                return r;
 
-       if (amdgpu_sriov_vf(adev)) {
+       if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
                struct dma_fence *f;
 
                bo_va = fpriv->csa_va;
@@ -961,7 +961,8 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
                if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
                        continue;
 
-               if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX && amdgpu_sriov_vf(adev)) {
+               if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
+                   (amdgpu_mcbp || amdgpu_sriov_vf(adev))) {
                        if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
                                if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
                                        ce_preempt++;
index f83b8aa9da78934b014cc1ea5ddfd73475bca3ef..dfb1cca869aa7f60725c7e7027ef9cff7c012c79 100644 (file)
@@ -1722,7 +1722,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
                        adev->ip_blocks[i].status.hw = true;
 
                        /* right after GMC hw init, we create CSA */
-                       if (amdgpu_sriov_vf(adev)) {
+                       if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
                                r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
                                                                AMDGPU_GEM_DOMAIN_VRAM,
                                                                AMDGPU_CSA_SIZE);
index fe393a46f8811dc452dc3db6d062d0aa850e6b47..c84c5b483e20074658dc2d89ee9e290078d5831f 100644 (file)
@@ -217,9 +217,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
 
                /* drop preamble IBs if we don't have a context switch */
                if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
-                       skip_preamble &&
-                       !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST) &&
-                       !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
+                   skip_preamble &&
+                   !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST) &&
+                   !amdgpu_mcbp &&
+                   !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
                        continue;
 
                amdgpu_ring_emit_ib(ring, job, ib, status);
index 0f7cc98961d532825d3e0ec4b9e26deaadd211bf..429d0b6c45ef3377cdb91b0b446d203682a50b43 100644 (file)
@@ -709,7 +709,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                dev_info.ids_flags = 0;
                if (adev->flags & AMD_IS_APU)
                        dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
-               if (amdgpu_sriov_vf(adev))
+               if (amdgpu_mcbp || amdgpu_sriov_vf(adev))
                        dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
 
                vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
@@ -1003,7 +1003,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
                goto error_vm;
        }
 
-       if (amdgpu_sriov_vf(adev)) {
+       if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
                uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
 
                r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
@@ -1066,7 +1066,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
 
        amdgpu_vm_bo_rmv(adev, fpriv->prt_va);
 
-       if (amdgpu_sriov_vf(adev)) {
+       if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
                /* TODO: how to handle reserve failure */
                BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
                amdgpu_vm_bo_rmv(adev, fpriv->csa_va);