Merge tag 'amd-drm-next-5.19-2022-04-15' of https://gitlab.freedesktop.org/agd5f...
[linux-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_object.c
index a00022b6ee5b67aa600a8cd571e2f4ace672f614..5444515c1476e1f5f093497b156c0c62e3587a0a 100644 (file)
@@ -612,9 +612,8 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
                if (unlikely(r))
                        goto fail_unreserve;
 
-               amdgpu_bo_fence(bo, fence, false);
-               dma_fence_put(bo->tbo.moving);
-               bo->tbo.moving = dma_fence_get(fence);
+               dma_resv_add_fence(bo->tbo.base.resv, fence,
+                                  DMA_RESV_USAGE_KERNEL);
                dma_fence_put(fence);
        }
        if (!bp->resv)
@@ -761,6 +760,11 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
        if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
                return -EPERM;
 
+       r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL,
+                                 false, MAX_SCHEDULE_TIMEOUT);
+       if (r < 0)
+               return r;
+
        kptr = amdgpu_bo_kptr(bo);
        if (kptr) {
                if (ptr)
@@ -768,11 +772,6 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
                return 0;
        }
 
-       r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false,
-                                 MAX_SCHEDULE_TIMEOUT);
-       if (r < 0)
-               return r;
-
        r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap);
        if (r)
                return r;
@@ -1390,11 +1389,17 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
                     bool shared)
 {
        struct dma_resv *resv = bo->tbo.base.resv;
+       int r;
 
-       if (shared)
-               dma_resv_add_shared_fence(resv, fence);
-       else
-               dma_resv_add_excl_fence(resv, fence);
+       r = dma_resv_reserve_fences(resv, 1);
+       if (r) {
+               /* As last resort on OOM we block for the fence */
+               dma_fence_wait(fence, false);
+               return;
+       }
+
+       dma_resv_add_fence(resv, fence, shared ? DMA_RESV_USAGE_READ :
+                          DMA_RESV_USAGE_WRITE);
 }
 
 /**