drm/radeon: allow UVD to use a second 256MB segment
authorChristian König <christian.koenig@amd.com>
Thu, 21 Aug 2014 10:18:12 +0000 (12:18 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 28 Aug 2014 02:46:08 +0000 (22:46 -0400)
This improves concurrent stream decoding.

Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_uvd.c

index a5ac95bf05ea6946a6a936cdfa2e5454e846afc8..83a24614138a07aeff2c1d2705775de65b1e5e30 100644 (file)
@@ -1642,7 +1642,8 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
                              uint32_t handle, struct radeon_fence **fence);
 int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
                               uint32_t handle, struct radeon_fence **fence);
-void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo);
+void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo,
+                                      uint32_t allowed_domains);
 void radeon_uvd_free_handles(struct radeon_device *rdev,
                             struct drm_file *filp);
 int radeon_uvd_cs_parse(struct radeon_cs_parser *parser);
index 0129c7efae3b929d854a9c8f6bd69c5f94586a5c..c97a42432e2b895dd65b478995ddbef004aacd1d 100644 (file)
@@ -491,6 +491,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
                bo = lobj->robj;
                if (!bo->pin_count) {
                        u32 domain = lobj->prefered_domains;
+                       u32 allowed = lobj->allowed_domains;
                        u32 current_domain =
                                radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
 
@@ -502,7 +503,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
                         * into account. We don't want to disallow buffer moves
                         * completely.
                         */
-                       if ((lobj->allowed_domains & current_domain) != 0 &&
+                       if ((allowed & current_domain) != 0 &&
                            (domain & current_domain) == 0 && /* will be moved */
                            bytes_moved > bytes_moved_threshold) {
                                /* don't move it */
@@ -512,7 +513,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
                retry:
                        radeon_ttm_placement_from_domain(bo, domain);
                        if (ring == R600_RING_TYPE_UVD_INDEX)
-                               radeon_uvd_force_into_uvd_segment(bo);
+                               radeon_uvd_force_into_uvd_segment(bo, allowed);
 
                        initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved);
                        r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
index 464d80145dfec027f965eca010e08e6b2da50b9c..1dedadd8f5df33a6a57f19c47f50dd821350e49d 100644 (file)
@@ -254,7 +254,8 @@ int radeon_uvd_resume(struct radeon_device *rdev)
        return 0;
 }
 
-void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo)
+void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo,
+                                      uint32_t allowed_domains)
 {
        int i;
 
@@ -262,6 +263,21 @@ void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo)
                rbo->placements[i].fpfn = 0 >> PAGE_SHIFT;
                rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
        }
+
+       /* If it must be in VRAM it must be in the first segment as well */
+       if (allowed_domains == RADEON_GEM_DOMAIN_VRAM)
+               return;
+
+       /* abort if we already have more than one placement */
+       if (rbo->placement.num_placement > 1)
+               return;
+
+       /* add another 256MB segment */
+       rbo->placements[1] = rbo->placements[0];
+       rbo->placements[1].fpfn += (256 * 1024 * 1024) >> PAGE_SHIFT;
+       rbo->placements[1].lpfn += (256 * 1024 * 1024) >> PAGE_SHIFT;
+       rbo->placement.num_placement++;
+       rbo->placement.num_busy_placement++;
 }
 
 void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
@@ -652,7 +668,7 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,
                return r;
 
        radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_VRAM);
-       radeon_uvd_force_into_uvd_segment(bo);
+       radeon_uvd_force_into_uvd_segment(bo, RADEON_GEM_DOMAIN_VRAM);
 
        r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
        if (r)