drm/radeon: fence BO_VAs manually
authorChristian König <christian.koenig@amd.com>
Wed, 19 Nov 2014 13:01:26 +0000 (14:01 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 20 Nov 2014 18:00:18 +0000 (13:00 -0500)
This allows us to finally remove the VM fence and
so allow concurrent use of it from different engines.

Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_vm.c

index 79f5f5bf4c0c2e43bf45212f38224d16f8a02589..3207bb60715e961b72f083a0fbbb034a3ee0b2a2 100644 (file)
@@ -456,6 +456,7 @@ struct radeon_bo_va {
        struct list_head                bo_list;
        uint32_t                        flags;
        uint64_t                        addr;
+       struct radeon_fence             *last_pt_update;
        unsigned                        ref_count;
 
        /* protected by vm mutex */
@@ -915,6 +916,8 @@ struct radeon_vm_id {
 };
 
 struct radeon_vm {
+       struct mutex            mutex;
+
        struct rb_root          va;
 
        /* BOs moved, but not yet updated in the PT */
@@ -932,10 +935,6 @@ struct radeon_vm {
 
        struct radeon_bo_va     *ib_bo_va;
 
-       struct mutex            mutex;
-       /* last fence for cs using this vm */
-       struct radeon_fence     *fence;
-
        /* for id and flush management per ring */
        struct radeon_vm_id     ids[RADEON_NUM_RINGS];
 };
index 30437aa00014f846c72fd001f6205348b8b2546f..75f22e5e999fc455565f9bad0a7e2c2322199a9f 100644 (file)
@@ -505,6 +505,9 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
        if (r)
                return r;
 
+       radeon_sync_resv(p->rdev, &p->ib.sync, vm->page_directory->tbo.resv,
+                        true);
+
        r = radeon_vm_clear_freed(rdev, vm);
        if (r)
                return r;
@@ -536,6 +539,8 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
                r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
                if (r)
                        return r;
+
+               radeon_sync_fence(&p->ib.sync, bo_va->last_pt_update);
        }
 
        return radeon_vm_clear_invalids(rdev, vm);
@@ -580,7 +585,6 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
                        DRM_ERROR("Failed to sync rings: %i\n", r);
                goto out;
        }
-       radeon_sync_fence(&parser->ib.sync, vm->fence);
 
        if ((rdev->family >= CHIP_TAHITI) &&
            (parser->chunk_const_ib_idx != -1)) {
index e38efe4962f3411e4e98f4659b6aeb34d4457ed7..f45761469e9547bfa2f7f66a1509e327e0d65e9d 100644 (file)
@@ -275,9 +275,6 @@ void radeon_vm_fence(struct radeon_device *rdev,
 {
        unsigned vm_id = vm->ids[fence->ring].id;
 
-       radeon_fence_unref(&vm->fence);
-       vm->fence = radeon_fence_ref(fence);
-
        radeon_fence_unref(&rdev->vm_manager.active[vm_id]);
        rdev->vm_manager.active[vm_id] = radeon_fence_ref(fence);
 
@@ -707,8 +704,6 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
                }
                ib.fence->is_vm_update = true;
                radeon_bo_fence(pd, ib.fence, false);
-               radeon_fence_unref(&vm->fence);
-               vm->fence = radeon_fence_ref(ib.fence);
        }
        radeon_ib_free(rdev, &ib);
 
@@ -999,8 +994,8 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
        }
        ib.fence->is_vm_update = true;
        radeon_vm_fence_pts(vm, bo_va->it.start, bo_va->it.last + 1, ib.fence);
-       radeon_fence_unref(&vm->fence);
-       vm->fence = radeon_fence_ref(ib.fence);
+       radeon_fence_unref(&bo_va->last_pt_update);
+       bo_va->last_pt_update = radeon_fence_ref(ib.fence);
        radeon_ib_free(rdev, &ib);
 
        return 0;
@@ -1026,6 +1021,7 @@ int radeon_vm_clear_freed(struct radeon_device *rdev,
        list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
                r = radeon_vm_bo_update(rdev, bo_va, NULL);
                radeon_bo_unref(&bo_va->bo);
+               radeon_fence_unref(&bo_va->last_pt_update);
                kfree(bo_va);
                if (r)
                        return r;
@@ -1084,6 +1080,7 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev,
                bo_va->bo = radeon_bo_ref(bo_va->bo);
                list_add(&bo_va->vm_status, &vm->freed);
        } else {
+               radeon_fence_unref(&bo_va->last_pt_update);
                kfree(bo_va);
        }
 
@@ -1130,8 +1127,6 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
        int i, r;
 
        vm->ib_bo_va = NULL;
-       vm->fence = NULL;
-
        for (i = 0; i < RADEON_NUM_RINGS; ++i) {
                vm->ids[i].id = 0;
                vm->ids[i].flushed_updates = NULL;
@@ -1192,11 +1187,13 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
                if (!r) {
                        list_del_init(&bo_va->bo_list);
                        radeon_bo_unreserve(bo_va->bo);
+                       radeon_fence_unref(&bo_va->last_pt_update);
                        kfree(bo_va);
                }
        }
        list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
                radeon_bo_unref(&bo_va->bo);
+               radeon_fence_unref(&bo_va->last_pt_update);
                kfree(bo_va);
        }
 
@@ -1206,8 +1203,6 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
 
        radeon_bo_unref(&vm->page_directory);
 
-       radeon_fence_unref(&vm->fence);
-
        for (i = 0; i < RADEON_NUM_RINGS; ++i) {
                radeon_fence_unref(&vm->ids[i].flushed_updates);
                radeon_fence_unref(&vm->ids[i].last_id_use);