drm/radeon: define new SA interface v3
authorChristian König <deathsimple@vodafone.de>
Wed, 9 May 2012 13:34:54 +0000 (15:34 +0200)
committerDave Airlie <airlied@redhat.com>
Wed, 9 May 2012 16:22:37 +0000 (17:22 +0100)
Define the interface without modifying the allocation
algorithm in any way.

v2: rebase on top of fence new uint64 patch
v3: add ring to debugfs output

Signed-off-by: Jerome Glisse <jglisse@redhat.com>
Signed-off-by: Christian König <deathsimple@vodafone.de>
Signed-off-by: Dave Airlie <airlied@redhat.com>
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_gart.c
drivers/gpu/drm/radeon/radeon_object.h
drivers/gpu/drm/radeon/radeon_ring.c
drivers/gpu/drm/radeon/radeon_sa.c
drivers/gpu/drm/radeon/radeon_semaphore.c

index 9374ab1b42643e7a0bc585fb1624a1c8bf107ef9..ada70d157e13b3752bf582b8bd12402b4e8fe8aa 100644 (file)
@@ -398,6 +398,7 @@ struct radeon_sa_bo {
        struct radeon_sa_manager        *manager;
        unsigned                        soffset;
        unsigned                        eoffset;
+       struct radeon_fence             *fence;
 };
 
 /*
index c5789efb78a58544446ff9e3558441f1db764852..53dba8e5942f3d9e38decf8f90ad2fc38b312a1c 100644 (file)
@@ -326,7 +326,7 @@ static void radeon_vm_unbind_locked(struct radeon_device *rdev,
        rdev->vm_manager.use_bitmap &= ~(1 << vm->id);
        list_del_init(&vm->list);
        vm->id = -1;
-       radeon_sa_bo_free(rdev, &vm->sa_bo);
+       radeon_sa_bo_free(rdev, &vm->sa_bo, NULL);
        vm->pt = NULL;
 
        list_for_each_entry(bo_va, &vm->va, vm_list) {
@@ -395,7 +395,7 @@ int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm)
 retry:
        r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo,
                             RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8),
-                            RADEON_GPU_PAGE_SIZE);
+                            RADEON_GPU_PAGE_SIZE, false);
        if (r) {
                if (list_empty(&rdev->vm_manager.lru_vm)) {
                        return r;
@@ -426,7 +426,7 @@ retry_id:
        /* do hw bind */
        r = rdev->vm_manager.funcs->bind(rdev, vm, id);
        if (r) {
-               radeon_sa_bo_free(rdev, &vm->sa_bo);
+               radeon_sa_bo_free(rdev, &vm->sa_bo, NULL);
                return r;
        }
        rdev->vm_manager.use_bitmap |= 1 << id;
index 4fc7f07e06d18ef72b23a5b6abf9b9f1af08e08f..befec7d12132a131e58bb54773f80914e9bca761 100644 (file)
@@ -169,9 +169,10 @@ extern int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
 extern int radeon_sa_bo_new(struct radeon_device *rdev,
                            struct radeon_sa_manager *sa_manager,
                            struct radeon_sa_bo **sa_bo,
-                           unsigned size, unsigned align);
+                           unsigned size, unsigned align, bool block);
 extern void radeon_sa_bo_free(struct radeon_device *rdev,
-                             struct radeon_sa_bo **sa_bo);
+                             struct radeon_sa_bo **sa_bo,
+                             struct radeon_fence *fence);
 #if defined(CONFIG_DEBUG_FS)
 extern void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
                                         struct seq_file *m);
index 45adb37152e170bbc2a04d67f2aeb0270a96b5d9..1748d939657cee206ef011b69f386d40602e7b2c 100644 (file)
@@ -85,7 +85,7 @@ bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib)
        if (ib->fence && ib->fence->seq < RADEON_FENCE_NOTEMITED_SEQ) {
                if (radeon_fence_signaled(ib->fence)) {
                        radeon_fence_unref(&ib->fence);
-                       radeon_sa_bo_free(rdev, &ib->sa_bo);
+                       radeon_sa_bo_free(rdev, &ib->sa_bo, NULL);
                        done = true;
                }
        }
@@ -124,7 +124,7 @@ retry:
                if (rdev->ib_pool.ibs[idx].fence == NULL) {
                        r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager,
                                             &rdev->ib_pool.ibs[idx].sa_bo,
-                                            size, 256);
+                                            size, 256, false);
                        if (!r) {
                                *ib = &rdev->ib_pool.ibs[idx];
                                (*ib)->ptr = radeon_sa_bo_cpu_addr((*ib)->sa_bo);
@@ -173,7 +173,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
        }
        radeon_mutex_lock(&rdev->ib_pool.mutex);
        if (tmp->fence && tmp->fence->seq == RADEON_FENCE_NOTEMITED_SEQ) {
-               radeon_sa_bo_free(rdev, &tmp->sa_bo);
+               radeon_sa_bo_free(rdev, &tmp->sa_bo, NULL);
                radeon_fence_unref(&tmp->fence);
        }
        radeon_mutex_unlock(&rdev->ib_pool.mutex);
@@ -247,7 +247,7 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
        radeon_mutex_lock(&rdev->ib_pool.mutex);
        if (rdev->ib_pool.ready) {
                for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
-                       radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo);
+                       radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo, NULL);
                        radeon_fence_unref(&rdev->ib_pool.ibs[i].fence);
                }
                radeon_sa_bo_manager_fini(rdev, &rdev->ib_pool.sa_manager);
index 625f2d4f638a1a6fe25d1dd9e5a232772d8bbe71..90ee8add2443a838d3e23486b9ce417612825a9b 100644 (file)
@@ -129,20 +129,32 @@ int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
  *
  * Alignment can't be bigger than page size
  */
+
+static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo)
+{
+       list_del(&sa_bo->list);
+       radeon_fence_unref(&sa_bo->fence);
+       kfree(sa_bo);
+}
+
 int radeon_sa_bo_new(struct radeon_device *rdev,
                     struct radeon_sa_manager *sa_manager,
                     struct radeon_sa_bo **sa_bo,
-                    unsigned size, unsigned align)
+                    unsigned size, unsigned align, bool block)
 {
-       struct radeon_sa_bo *tmp;
+       struct radeon_fence *fence = NULL;
+       struct radeon_sa_bo *tmp, *next;
        struct list_head *head;
        unsigned offset = 0, wasted = 0;
+       int r;
 
        BUG_ON(align > RADEON_GPU_PAGE_SIZE);
        BUG_ON(size > sa_manager->size);
 
        *sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL);
 
+retry:
+
        spin_lock(&sa_manager->lock);
 
        /* no one ? */
@@ -153,7 +165,17 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
 
        /* look for a hole big enough */
        offset = 0;
-       list_for_each_entry(tmp, &sa_manager->sa_bo, list) {
+       list_for_each_entry_safe(tmp, next, &sa_manager->sa_bo, list) {
+               /* try to free this object */
+               if (tmp->fence) {
+                       if (radeon_fence_signaled(tmp->fence)) {
+                               radeon_sa_bo_remove_locked(tmp);
+                               continue;
+                       } else {
+                               fence = tmp->fence;
+                       }
+               }
+
                /* room before this object ? */
                if (offset < tmp->soffset && (tmp->soffset - offset) >= size) {
                        head = tmp->list.prev;
@@ -178,6 +200,13 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
        if ((sa_manager->size - offset) < size) {
                /* failed to find somethings big enough */
                spin_unlock(&sa_manager->lock);
+               if (block && fence) {
+                       r = radeon_fence_wait(fence, false);
+                       if (r)
+                               return r;
+
+                       goto retry;
+               }
                kfree(*sa_bo);
                *sa_bo = NULL;
                return -ENOMEM;
@@ -192,15 +221,22 @@ out:
        return 0;
 }
 
-void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo)
+void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
+                      struct radeon_fence *fence)
 {
+       struct radeon_sa_manager *sa_manager;
+
        if (!sa_bo || !*sa_bo)
                return;
 
-       spin_lock(&(*sa_bo)->manager->lock);
-       list_del_init(&(*sa_bo)->list);
-       spin_unlock(&(*sa_bo)->manager->lock);
-       kfree(*sa_bo);
+       sa_manager = (*sa_bo)->manager;
+       spin_lock(&sa_manager->lock);
+       if (fence && fence->seq && fence->seq < RADEON_FENCE_NOTEMITED_SEQ) {
+               (*sa_bo)->fence = radeon_fence_ref(fence);
+       } else {
+               radeon_sa_bo_remove_locked(*sa_bo);
+       }
+       spin_unlock(&sa_manager->lock);
        *sa_bo = NULL;
 }
 
@@ -212,8 +248,14 @@ void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
 
        spin_lock(&sa_manager->lock);
        list_for_each_entry(i, &sa_manager->sa_bo, list) {
-               seq_printf(m, "[%08x %08x] size %4d [%p]\n",
+               seq_printf(m, "[%08x %08x] size %4d (%p)",
                           i->soffset, i->eoffset, i->eoffset - i->soffset, i);
+               if (i->fence) {
+                       seq_printf(m, " protected by %Ld (%p) on ring %d\n",
+                                  i->fence->seq, i->fence, i->fence->ring);
+               } else {
+                       seq_printf(m, "\n");
+               }
        }
        spin_unlock(&sa_manager->lock);
 }
index d518d326235d30c71602255cfead04c80839505c..dbde874b2e5e537257f7de8fb12f99afcfc1d097 100644 (file)
@@ -72,7 +72,7 @@ static int radeon_semaphore_add_bo(struct radeon_device *rdev)
 static void radeon_semaphore_del_bo_locked(struct radeon_device *rdev,
                                           struct radeon_semaphore_bo *bo)
 {
-       radeon_sa_bo_free(rdev, &bo->ib->sa_bo);
+       radeon_sa_bo_free(rdev, &bo->ib->sa_bo, NULL);
        radeon_fence_unref(&bo->ib->fence);
        list_del(&bo->list);
        kfree(bo);