Merge branches 'device-properties' and 'acpi-misc'
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_device.c
index 6e920086af468c4428be208007fefcfb1dfd3f0d..39c01b942ee483ef11e350aa43a44f1d7b5786d9 100644 (file)
@@ -25,6 +25,7 @@
  *          Alex Deucher
  *          Jerome Glisse
  */
+#include <linux/kthread.h>
 #include <linux/console.h>
 #include <linux/slab.h>
 #include <linux/debugfs.h>
@@ -35,6 +36,7 @@
 #include <linux/vga_switcheroo.h>
 #include <linux/efi.h>
 #include "amdgpu.h"
+#include "amdgpu_trace.h"
 #include "amdgpu_i2c.h"
 #include "atom.h"
 #include "amdgpu_atombios.h"
@@ -79,24 +81,27 @@ bool amdgpu_device_is_px(struct drm_device *dev)
 uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
                        bool always_indirect)
 {
+       uint32_t ret;
+
        if ((reg * 4) < adev->rmmio_size && !always_indirect)
-               return readl(((void __iomem *)adev->rmmio) + (reg * 4));
+               ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
        else {
                unsigned long flags;
-               uint32_t ret;
 
                spin_lock_irqsave(&adev->mmio_idx_lock, flags);
                writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
                ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
                spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
-
-               return ret;
        }
+       trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
+       return ret;
 }
 
 void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
                    bool always_indirect)
 {
+       trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
+       
        if ((reg * 4) < adev->rmmio_size && !always_indirect)
                writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
        else {
@@ -1070,11 +1075,14 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
        int i, r = 0;
 
        for (i = 0; i < adev->num_ip_blocks; i++) {
+               if (!adev->ip_block_status[i].valid)
+                       continue;
                if (adev->ip_blocks[i].type == block_type) {
                        r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
                                                                            state);
                        if (r)
                                return r;
+                       break;
                }
        }
        return r;
@@ -1087,16 +1095,53 @@ int amdgpu_set_powergating_state(struct amdgpu_device *adev,
        int i, r = 0;
 
        for (i = 0; i < adev->num_ip_blocks; i++) {
+               if (!adev->ip_block_status[i].valid)
+                       continue;
                if (adev->ip_blocks[i].type == block_type) {
                        r = adev->ip_blocks[i].funcs->set_powergating_state((void *)adev,
                                                                            state);
                        if (r)
                                return r;
+                       break;
                }
        }
        return r;
 }
 
+int amdgpu_wait_for_idle(struct amdgpu_device *adev,
+                        enum amd_ip_block_type block_type)
+{
+       int i, r;
+
+       for (i = 0; i < adev->num_ip_blocks; i++) {
+               if (!adev->ip_block_status[i].valid)
+                       continue;
+               if (adev->ip_blocks[i].type == block_type) {
+                       r = adev->ip_blocks[i].funcs->wait_for_idle((void *)adev);
+                       if (r)
+                               return r;
+                       break;
+               }
+       }
+       return 0;
+
+}
+
+bool amdgpu_is_idle(struct amdgpu_device *adev,
+                   enum amd_ip_block_type block_type)
+{
+       int i;
+
+       for (i = 0; i < adev->num_ip_blocks; i++) {
+               if (!adev->ip_block_status[i].valid)
+                       continue;
+               if (adev->ip_blocks[i].type == block_type)
+                       return adev->ip_blocks[i].funcs->is_idle((void *)adev);
+       }
+       return true;
+
+}
+
 const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
                                        struct amdgpu_device *adev,
                                        enum amd_ip_block_type type)
@@ -1209,6 +1254,9 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
                }
        }
 
+       adev->cg_flags &= amdgpu_cg_mask;
+       adev->pg_flags &= amdgpu_pg_mask;
+
        return 0;
 }
 
@@ -1440,9 +1488,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
        adev->didt_rreg = &amdgpu_invalid_rreg;
        adev->didt_wreg = &amdgpu_invalid_wreg;
+       adev->gc_cac_rreg = &amdgpu_invalid_rreg;
+       adev->gc_cac_wreg = &amdgpu_invalid_wreg;
        adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
        adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
 
+
        DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
                 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
                 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
@@ -1467,6 +1518,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        spin_lock_init(&adev->pcie_idx_lock);
        spin_lock_init(&adev->uvd_ctx_idx_lock);
        spin_lock_init(&adev->didt_idx_lock);
+       spin_lock_init(&adev->gc_cac_idx_lock);
        spin_lock_init(&adev->audio_endpt_idx_lock);
 
        adev->rmmio_base = pci_resource_start(adev->pdev, 5);
@@ -1511,17 +1563,20 @@ int amdgpu_device_init(struct amdgpu_device *adev,
                vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
 
        /* Read BIOS */
-       if (!amdgpu_get_bios(adev))
-               return -EINVAL;
+       if (!amdgpu_get_bios(adev)) {
+               r = -EINVAL;
+               goto failed;
+       }
        /* Must be an ATOMBIOS */
        if (!adev->is_atom_bios) {
                dev_err(adev->dev, "Expecting atombios for GPU\n");
-               return -EINVAL;
+               r = -EINVAL;
+               goto failed;
        }
        r = amdgpu_atombios_init(adev);
        if (r) {
                dev_err(adev->dev, "amdgpu_atombios_init failed\n");
-               return r;
+               goto failed;
        }
 
        /* See if the asic supports SR-IOV */
@@ -1538,7 +1593,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
             !(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) {
                if (!adev->bios) {
                        dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
-                       return -EINVAL;
+                       r = -EINVAL;
+                       goto failed;
                }
                DRM_INFO("GPU not posted. posting now...\n");
                amdgpu_atom_asic_init(adev->mode_info.atom_context);
@@ -1548,7 +1604,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        r = amdgpu_atombios_get_clock_info(adev);
        if (r) {
                dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
-               return r;
+               goto failed;
        }
        /* init i2c buses */
        amdgpu_atombios_i2c_init(adev);
@@ -1557,7 +1613,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        r = amdgpu_fence_driver_init(adev);
        if (r) {
                dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
-               return r;
+               goto failed;
        }
 
        /* init the mode config */
@@ -1567,7 +1623,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        if (r) {
                dev_err(adev->dev, "amdgpu_init failed\n");
                amdgpu_fini(adev);
-               return r;
+               goto failed;
        }
 
        adev->accel_working = true;
@@ -1577,7 +1633,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        r = amdgpu_ib_pool_init(adev);
        if (r) {
                dev_err(adev->dev, "IB initialization failed (%d).\n", r);
-               return r;
+               goto failed;
        }
 
        r = amdgpu_ib_ring_tests(adev);
@@ -1594,6 +1650,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
                DRM_ERROR("registering register debugfs failed (%d).\n", r);
        }
 
+       r = amdgpu_debugfs_firmware_init(adev);
+       if (r) {
+               DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
+               return r;
+       }
+
        if ((amdgpu_testing & 1)) {
                if (adev->accel_working)
                        amdgpu_test_moves(adev);
@@ -1619,10 +1681,15 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        r = amdgpu_late_init(adev);
        if (r) {
                dev_err(adev->dev, "amdgpu_late_init failed\n");
-               return r;
+               goto failed;
        }
 
        return 0;
+
+failed:
+       if (runtime)
+               vga_switcheroo_fini_domain_pm_ops(adev->dev);
+       return r;
 }
 
 static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev);
@@ -1641,6 +1708,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
 
        DRM_INFO("amdgpu: finishing device.\n");
        adev->shutdown = true;
+       drm_crtc_force_disable_all(adev->ddev);
        /* evict vram memory */
        amdgpu_bo_evict_vram(adev);
        amdgpu_ib_pool_fini(adev);
@@ -1656,6 +1724,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
        kfree(adev->bios);
        adev->bios = NULL;
        vga_switcheroo_unregister_client(adev->pdev);
+       if (adev->flags & AMD_IS_PX)
+               vga_switcheroo_fini_domain_pm_ops(adev->dev);
        vga_client_register(adev->pdev, NULL, NULL, NULL);
        if (adev->rio_mem)
                pci_iounmap(adev->pdev, adev->rio_mem);
@@ -1841,7 +1911,23 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
        }
 
        drm_kms_helper_poll_enable(dev);
+
+       /*
+        * Most of the connector probing functions try to acquire runtime pm
+        * refs to ensure that the GPU is powered on when connector polling is
+        * performed. Since we're calling this from a runtime PM callback,
+        * trying to acquire rpm refs will cause us to deadlock.
+        *
+        * Since we're guaranteed to be holding the rpm lock, it's safe to
+        * temporarily disable the rpm helpers so this doesn't deadlock us.
+        */
+#ifdef CONFIG_PM
+       dev->dev->power.disable_depth++;
+#endif
        drm_helper_hpd_irq_event(dev);
+#ifdef CONFIG_PM
+       dev->dev->power.disable_depth--;
+#endif
 
        if (fbcon) {
                amdgpu_fbdev_set_suspend(adev, 0);
@@ -1861,11 +1947,6 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
  */
 int amdgpu_gpu_reset(struct amdgpu_device *adev)
 {
-       unsigned ring_sizes[AMDGPU_MAX_RINGS];
-       uint32_t *ring_data[AMDGPU_MAX_RINGS];
-
-       bool saved = false;
-
        int i, r;
        int resched;
 
@@ -1874,22 +1955,30 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
        /* block TTM */
        resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
 
-       r = amdgpu_suspend(adev);
-
+       /* block scheduler */
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                struct amdgpu_ring *ring = adev->rings[i];
+
                if (!ring)
                        continue;
-
-               ring_sizes[i] = amdgpu_ring_backup(ring, &ring_data[i]);
-               if (ring_sizes[i]) {
-                       saved = true;
-                       dev_info(adev->dev, "Saved %d dwords of commands "
-                                "on ring %d.\n", ring_sizes[i], i);
-               }
+               kthread_park(ring->sched.thread);
+               amd_sched_hw_job_reset(&ring->sched);
        }
+       /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
+       amdgpu_fence_driver_force_completion(adev);
+
+       /* save scratch */
+       amdgpu_atombios_scratch_regs_save(adev);
+       r = amdgpu_suspend(adev);
 
 retry:
+       /* Disable fb access */
+       if (adev->mode_info.num_crtc) {
+               struct amdgpu_mode_mc_save save;
+               amdgpu_display_stop_mc_access(adev, &save);
+               amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
+       }
+
        r = amdgpu_asic_reset(adev);
        /* post card */
        amdgpu_atom_asic_init(adev->mode_info.atom_context);
@@ -1898,32 +1987,29 @@ retry:
                dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
                r = amdgpu_resume(adev);
        }
-
+       /* restore scratch */
+       amdgpu_atombios_scratch_regs_restore(adev);
        if (!r) {
+               r = amdgpu_ib_ring_tests(adev);
+               if (r) {
+                       dev_err(adev->dev, "ib ring test failed (%d).\n", r);
+                       r = amdgpu_suspend(adev);
+                       goto retry;
+               }
+
                for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                        struct amdgpu_ring *ring = adev->rings[i];
                        if (!ring)
                                continue;
-
-                       amdgpu_ring_restore(ring, ring_sizes[i], ring_data[i]);
-                       ring_sizes[i] = 0;
-                       ring_data[i] = NULL;
-               }
-
-               r = amdgpu_ib_ring_tests(adev);
-               if (r) {
-                       dev_err(adev->dev, "ib ring test failed (%d).\n", r);
-                       if (saved) {
-                               saved = false;
-                               r = amdgpu_suspend(adev);
-                               goto retry;
-                       }
+                       amd_sched_job_recovery(&ring->sched);
+                       kthread_unpark(ring->sched.thread);
                }
        } else {
-               amdgpu_fence_driver_force_completion(adev);
+               dev_err(adev->dev, "asic resume failed (%d).\n", r);
                for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
-                       if (adev->rings[i])
-                               kfree(ring_data[i]);
+                       if (adev->rings[i]) {
+                               kthread_unpark(adev->rings[i]->sched.thread);
+                       }
                }
        }
 
@@ -1934,13 +2020,11 @@ retry:
                /* bad news, how to tell it to userspace ? */
                dev_info(adev->dev, "GPU reset failed\n");
        }
+       amdgpu_irq_gpu_reset_resume_helper(adev);
 
        return r;
 }
 
-#define AMDGPU_DEFAULT_PCIE_GEN_MASK 0x30007  /* gen: chipset 1/2, asic 1/2/3 */
-#define AMDGPU_DEFAULT_PCIE_MLW_MASK 0x2f0000 /* 1/2/4/8/16 lanes */
-
 void amdgpu_get_pcie_info(struct amdgpu_device *adev)
 {
        u32 mask;
@@ -2094,20 +2178,43 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
        struct amdgpu_device *adev = f->f_inode->i_private;
        ssize_t result = 0;
        int r;
+       bool use_bank;
+       unsigned instance_bank, sh_bank, se_bank;
 
        if (size & 0x3 || *pos & 0x3)
                return -EINVAL;
 
+       if (*pos & (1ULL << 62)) {
+               se_bank = (*pos >> 24) & 0x3FF;
+               sh_bank = (*pos >> 34) & 0x3FF;
+               instance_bank = (*pos >> 44) & 0x3FF;
+               use_bank = 1;
+               *pos &= 0xFFFFFF;
+       } else {
+               use_bank = 0;
+       }
+
+       if (use_bank) {
+               if (sh_bank >= adev->gfx.config.max_sh_per_se ||
+                   se_bank >= adev->gfx.config.max_shader_engines)
+                       return -EINVAL;
+               mutex_lock(&adev->grbm_idx_mutex);
+               amdgpu_gfx_select_se_sh(adev, se_bank,
+                                       sh_bank, instance_bank);
+       }
+
        while (size) {
                uint32_t value;
 
                if (*pos > adev->rmmio_size)
-                       return result;
+                       goto end;
 
                value = RREG32(*pos >> 2);
                r = put_user(value, (uint32_t *)buf);
-               if (r)
-                       return r;
+               if (r) {
+                       result = r;
+                       goto end;
+               }
 
                result += 4;
                buf += 4;
@@ -2115,6 +2222,12 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
                size -= 4;
        }
 
+end:
+       if (use_bank) {
+               amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+               mutex_unlock(&adev->grbm_idx_mutex);
+       }
+
        return result;
 }
 
@@ -2314,6 +2427,68 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
        return result;
 }
 
+static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
+                                       size_t size, loff_t *pos)
+{
+       struct amdgpu_device *adev = f->f_inode->i_private;
+       ssize_t result = 0;
+       int r;
+       uint32_t *config, no_regs = 0;
+
+       if (size & 0x3 || *pos & 0x3)
+               return -EINVAL;
+
+       config = kmalloc(256 * sizeof(*config), GFP_KERNEL);
+       if (!config)
+               return -ENOMEM;
+
+       /* version, increment each time something is added */
+       config[no_regs++] = 0;
+       config[no_regs++] = adev->gfx.config.max_shader_engines;
+       config[no_regs++] = adev->gfx.config.max_tile_pipes;
+       config[no_regs++] = adev->gfx.config.max_cu_per_sh;
+       config[no_regs++] = adev->gfx.config.max_sh_per_se;
+       config[no_regs++] = adev->gfx.config.max_backends_per_se;
+       config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
+       config[no_regs++] = adev->gfx.config.max_gprs;
+       config[no_regs++] = adev->gfx.config.max_gs_threads;
+       config[no_regs++] = adev->gfx.config.max_hw_contexts;
+       config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
+       config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
+       config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
+       config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
+       config[no_regs++] = adev->gfx.config.num_tile_pipes;
+       config[no_regs++] = adev->gfx.config.backend_enable_mask;
+       config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
+       config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
+       config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
+       config[no_regs++] = adev->gfx.config.num_gpus;
+       config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
+       config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
+       config[no_regs++] = adev->gfx.config.gb_addr_config;
+       config[no_regs++] = adev->gfx.config.num_rbs;
+
+       while (size && (*pos < no_regs * 4)) {
+               uint32_t value;
+
+               value = config[*pos >> 2];
+               r = put_user(value, (uint32_t *)buf);
+               if (r) {
+                       kfree(config);
+                       return r;
+               }
+
+               result += 4;
+               buf += 4;
+               *pos += 4;
+               size -= 4;
+       }
+
+       kfree(config);
+       return result;
+}
+
+
 static const struct file_operations amdgpu_debugfs_regs_fops = {
        .owner = THIS_MODULE,
        .read = amdgpu_debugfs_regs_read,
@@ -2339,11 +2514,18 @@ static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
        .llseek = default_llseek
 };
 
+static const struct file_operations amdgpu_debugfs_gca_config_fops = {
+       .owner = THIS_MODULE,
+       .read = amdgpu_debugfs_gca_config_read,
+       .llseek = default_llseek
+};
+
 static const struct file_operations *debugfs_regs[] = {
        &amdgpu_debugfs_regs_fops,
        &amdgpu_debugfs_regs_didt_fops,
        &amdgpu_debugfs_regs_pcie_fops,
        &amdgpu_debugfs_regs_smc_fops,
+       &amdgpu_debugfs_gca_config_fops,
 };
 
 static const char *debugfs_regs_names[] = {
@@ -2351,6 +2533,7 @@ static const char *debugfs_regs_names[] = {
        "amdgpu_regs_didt",
        "amdgpu_regs_pcie",
        "amdgpu_regs_smc",
+       "amdgpu_gca_config",
 };
 
 static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)