drm/radeon/kms: set gart pages to invalid on unbind and point to dummy page
authorDave Airlie <airlied@redhat.com>
Fri, 5 Feb 2010 06:00:07 +0000 (16:00 +1000)
committerDave Airlie <airlied@redhat.com>
Thu, 11 Feb 2010 09:11:32 +0000 (19:11 +1000)
this uses a new entrypoint to invalidate gart entries instead of using 0.
Changed to rather than pointing to 0 address point empty entry to dummy
page. This might help to avoid hard lockup if for some wrong
reasons GPU try to access unmapped GART entry.

I'm not 100% sure this is going to work, we probably need to allocate
a dummy page and point all the GTT entries at it similiar to what AGP does.
but we can test this first I suppose.

Signed-off-by: Jerome Glisse <jglisse@redhat.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r300.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_gart.c
drivers/gpu/drm/radeon/rs400.c
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/rv770.c

index c2f9752e4ee0e4744bfb7711d7435580a0611957..3368920df5f40fa1fff969786a9cd4bd9aeab133 100644 (file)
@@ -93,6 +93,7 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
        r = radeon_gart_table_vram_pin(rdev);
        if (r)
                return r;
+       radeon_gart_restore(rdev);
        /* Setup L2 cache */
        WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
                                ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
index 346ae3d7e0d4cbbb8511a37a4ffe60f687404729..bc7d9e9211c8733a2f6791e9de72b2499489a0c5 100644 (file)
@@ -197,6 +197,7 @@ int r100_pci_gart_enable(struct radeon_device *rdev)
 {
        uint32_t tmp;
 
+       radeon_gart_restore(rdev);
        /* discard memory request outside of configured range */
        tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
        WREG32(RADEON_AIC_CNTL, tmp);
index 5897bd00591e03776e07bb4761a861be660d8b08..654aca1cdf051250fc9f431ab5a50e72088f99f5 100644 (file)
@@ -117,6 +117,7 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
        r = radeon_gart_table_vram_pin(rdev);
        if (r)
                return r;
+       radeon_gart_restore(rdev);
        /* discard memory request outside of configured range */
        tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
        WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
index c9723daee3577aada3f4ca6ef967b158c13b16c7..4facbab2045660b0033ef8530a5d50a88b26d798 100644 (file)
@@ -416,6 +416,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
        r = radeon_gart_table_vram_pin(rdev);
        if (r)
                return r;
+       radeon_gart_restore(rdev);
 
        /* Setup L2 cache */
        WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
index 9eeca6f07222479c1dff508184865454c0a957a3..993cdf20d8e6725d95b9aaa97b05dec9874e64e5 100644 (file)
@@ -1145,6 +1145,7 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
 /* AGP */
 extern void radeon_agp_disable(struct radeon_device *rdev);
 extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
+extern void radeon_gart_restore(struct radeon_device *rdev);
 extern int radeon_modeset_init(struct radeon_device *rdev);
 extern void radeon_modeset_fini(struct radeon_device *rdev);
 extern bool radeon_card_posted(struct radeon_device *rdev);
@@ -1269,7 +1270,6 @@ extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
 extern int r600_cp_resume(struct radeon_device *rdev);
 extern void r600_cp_fini(struct radeon_device *rdev);
 extern int r600_count_pipe_bits(uint32_t val);
-extern int r600_gart_clear_page(struct radeon_device *rdev, int i);
 extern int r600_mc_wait_for_idle(struct radeon_device *rdev);
 extern int r600_pcie_gart_init(struct radeon_device *rdev);
 extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
index c224c1d944ef9aa111e87a15e32c1d25ef2b8644..fb55faf8e284a8aaba0735acfe02104ab5030f79 100644 (file)
@@ -238,6 +238,8 @@ bool radeon_boot_test_post_card(struct radeon_device *rdev)
 
 int radeon_dummy_page_init(struct radeon_device *rdev)
 {
+       if (rdev->dummy_page.page)
+               return 0;
        rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
        if (rdev->dummy_page.page == NULL)
                return -ENOMEM;
index e73d56e83fa68760e6a14dd70d74fcd1473ed0ea..1770d3c07fd03dc8563adce502763b62a3320c0d 100644 (file)
@@ -139,6 +139,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
        unsigned t;
        unsigned p;
        int i, j;
+       u64 page_base;
 
        if (!rdev->gart.ready) {
                WARN(1, "trying to unbind memory to unitialized GART !\n");
@@ -151,9 +152,11 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
                        pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
                                       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
                        rdev->gart.pages[p] = NULL;
-                       rdev->gart.pages_addr[p] = 0;
+                       rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
+                       page_base = rdev->gart.pages_addr[p];
                        for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
-                               radeon_gart_set_page(rdev, t, 0);
+                               radeon_gart_set_page(rdev, t, page_base);
+                               page_base += RADEON_GPU_PAGE_SIZE;
                        }
                }
        }
@@ -199,8 +202,26 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
        return 0;
 }
 
+void radeon_gart_restore(struct radeon_device *rdev)
+{
+       int i, j, t;
+       u64 page_base;
+
+       for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
+               page_base = rdev->gart.pages_addr[i];
+               for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+                       radeon_gart_set_page(rdev, t, page_base);
+                       page_base += RADEON_GPU_PAGE_SIZE;
+               }
+       }
+       mb();
+       radeon_gart_tlb_flush(rdev);
+}
+
 int radeon_gart_init(struct radeon_device *rdev)
 {
+       int r, i;
+
        if (rdev->gart.pages) {
                return 0;
        }
@@ -209,6 +230,9 @@ int radeon_gart_init(struct radeon_device *rdev)
                DRM_ERROR("Page size is smaller than GPU page size!\n");
                return -EINVAL;
        }
+       r = radeon_dummy_page_init(rdev);
+       if (r)
+               return r;
        /* Compute table size */
        rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
        rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
@@ -227,6 +251,10 @@ int radeon_gart_init(struct radeon_device *rdev)
                radeon_gart_fini(rdev);
                return -ENOMEM;
        }
+       /* set GART entry to point to the dummy page by default */
+       for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
+               rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
+       }
        return 0;
 }
 
index 287fcebfb4e67733ff27aef939380b5b92f3a742..1e4582e27c143d5ece3588fc875d96460f69be80 100644 (file)
@@ -113,6 +113,7 @@ int rs400_gart_enable(struct radeon_device *rdev)
        uint32_t size_reg;
        uint32_t tmp;
 
+       radeon_gart_restore(rdev);
        tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
        tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
        WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
index 979b00034de91490fbabecd66f00477d24247364..28c8690c7a35711aeb06ce221aef75b101866371 100644 (file)
@@ -213,6 +213,7 @@ int rs600_gart_enable(struct radeon_device *rdev)
        r = radeon_gart_table_vram_pin(rdev);
        if (r)
                return r;
+       radeon_gart_restore(rdev);
        /* Enable bus master */
        tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS;
        WREG32(R_00004C_BUS_CNTL, tmp);
index 3ecd138815d879cc5ace5228ab1c1e77b77bb558..6f1f4abbe88c567f5568607e5dff719bde5a8f68 100644 (file)
@@ -56,6 +56,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
        r = radeon_gart_table_vram_pin(rdev);
        if (r)
                return r;
+       radeon_gart_restore(rdev);
        /* Setup L2 cache */
        WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
                                ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |