drm/amdgpu: Revert "add mutex for ba_va->valids/invalids"
authorChristian König <christian.koenig@amd.com>
Tue, 8 Mar 2016 17:03:27 +0000 (18:03 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 9 Mar 2016 18:04:02 +0000 (13:04 -0500)
Not needed any more because we need to protect the elements on the list anyway.

This reverts commit 38bf516c75b4ef0f5c716e05fa9baab7c52d6c39.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Acked-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

index 28b4088b2530f8fed8b723b1af115e46bd823a13..9a03d566bf6d939216395f64e7ddf03d57a832ba 100644 (file)
@@ -484,7 +484,6 @@ struct amdgpu_bo_va_mapping {
 
 /* bo virtual addresses in a specific vm */
 struct amdgpu_bo_va {
-       struct mutex                    mutex;
        /* protected by bo being reserved */
        struct list_head                bo_list;
        struct fence                    *last_pt_update;
index 0e6d0d1f4041c2e65c5ddb4121af942cb4919e88..b6c011b83641205b5a7466d499017a80b8d2275a 100644 (file)
@@ -1009,9 +1009,8 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
                bo_va = list_first_entry(&vm->invalidated,
                        struct amdgpu_bo_va, vm_status);
                spin_unlock(&vm->status_lock);
-               mutex_lock(&bo_va->mutex);
+
                r = amdgpu_vm_bo_update(adev, bo_va, NULL);
-               mutex_unlock(&bo_va->mutex);
                if (r)
                        return r;
 
@@ -1055,7 +1054,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
        INIT_LIST_HEAD(&bo_va->valids);
        INIT_LIST_HEAD(&bo_va->invalids);
        INIT_LIST_HEAD(&bo_va->vm_status);
-       mutex_init(&bo_va->mutex);
+
        list_add_tail(&bo_va->bo_list, &bo->va);
 
        return bo_va;
@@ -1131,9 +1130,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
        mapping->offset = offset;
        mapping->flags = flags;
 
-       mutex_lock(&bo_va->mutex);
        list_add(&mapping->list, &bo_va->invalids);
-       mutex_unlock(&bo_va->mutex);
        interval_tree_insert(&mapping->it, &vm->va);
 
        /* Make sure the page tables are allocated */
@@ -1215,7 +1212,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
        bool valid = true;
 
        saddr /= AMDGPU_GPU_PAGE_SIZE;
-       mutex_lock(&bo_va->mutex);
+
        list_for_each_entry(mapping, &bo_va->valids, list) {
                if (mapping->it.start == saddr)
                        break;
@@ -1229,12 +1226,10 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
                                break;
                }
 
-               if (&mapping->list == &bo_va->invalids) {
-                       mutex_unlock(&bo_va->mutex);
+               if (&mapping->list == &bo_va->invalids)
                        return -ENOENT;
-               }
        }
-       mutex_unlock(&bo_va->mutex);
+
        list_del(&mapping->list);
        interval_tree_remove(&mapping->it, &vm->va);
        trace_amdgpu_vm_bo_unmap(bo_va, mapping);
@@ -1280,8 +1275,8 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
                interval_tree_remove(&mapping->it, &vm->va);
                kfree(mapping);
        }
+
        fence_put(bo_va->last_pt_update);
-       mutex_destroy(&bo_va->mutex);
        kfree(bo_va);
 }