drm/etnaviv: implement cooling support for new GPU cores
[linux-2.6-block.git] / drivers / gpu / drm / etnaviv / etnaviv_gpu.c
index b78d9239e48fb0fc3b02129fe97795a6b94bed70..ada45fdd0eaeead886cd7cac4068ad5f97c53add 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/dma-fence.h>
 #include <linux/moduleparam.h>
 #include <linux/of_device.h>
+#include <linux/thermal.h>
 
 #include "etnaviv_cmdbuf.h"
 #include "etnaviv_dump.h"
@@ -409,6 +410,23 @@ static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
        gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
 }
 
+static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu)
+{
+       if (gpu->identity.minor_features2 &
+           chipMinorFeatures2_DYNAMIC_FREQUENCY_SCALING) {
+               clk_set_rate(gpu->clk_core,
+                            gpu->base_rate_core >> gpu->freq_scale);
+               clk_set_rate(gpu->clk_shader,
+                            gpu->base_rate_shader >> gpu->freq_scale);
+       } else {
+               unsigned int fscale = 1 << (6 - gpu->freq_scale);
+               u32 clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
+                           VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
+
+               etnaviv_gpu_load_clock(gpu, clock);
+       }
+}
+
 static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
 {
        u32 control, idle;
@@ -426,11 +444,10 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
        timeout = jiffies + msecs_to_jiffies(1000);
 
        while (time_is_after_jiffies(timeout)) {
-               control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
-                         VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
-
                /* enable clock */
-               etnaviv_gpu_load_clock(gpu, control);
+               etnaviv_gpu_update_clock(gpu);
+
+               control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
 
                /* Wait for stable clock.  Vivante's code waited for 1ms */
                usleep_range(1000, 10000);
@@ -490,11 +507,7 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
        }
 
        /* We rely on the GPU running, so program the clock */
-       control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
-                 VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
-
-       /* enable clock */
-       etnaviv_gpu_load_clock(gpu, control);
+       etnaviv_gpu_update_clock(gpu);
 
        return 0;
 }
@@ -516,9 +529,10 @@ static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
 
        pmc = gpu_read(gpu, VIVS_PM_MODULE_CONTROLS);
 
-       /* Disable PA clock gating for GC400+ except for GC420 */
+       /* Disable PA clock gating for GC400+ without bugfix except for GC420 */
        if (gpu->identity.model >= chipModel_GC400 &&
-           gpu->identity.model != chipModel_GC420)
+           gpu->identity.model != chipModel_GC420 &&
+           !(gpu->identity.minor_features3 & chipMinorFeatures3_BUG_FIXES12))
                pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA;
 
        /*
@@ -534,6 +548,11 @@ static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
        if (gpu->identity.revision < 0x5422)
                pmc |= BIT(15); /* Unknown bit */
 
+       /* Disable TX clock gating on affected core revisions. */
+       if (etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
+           etnaviv_is_model_rev(gpu, GC2000, 0x5108))
+               pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX;
+
        pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ;
        pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ;
 
@@ -1051,6 +1070,12 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
 {
        struct etnaviv_fence *f;
 
+       /*
+        * GPU lock must already be held, otherwise fence completion order might
+        * not match the seqno order assigned here.
+        */
+       lockdep_assert_held(&gpu->lock);
+
        f = kzalloc(sizeof(*f), GFP_KERNEL);
        if (!f)
                return NULL;
@@ -1064,7 +1089,7 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
 }
 
 int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
-       unsigned int context, bool exclusive)
+       unsigned int context, bool exclusive, bool explicit)
 {
        struct reservation_object *robj = etnaviv_obj->resv;
        struct reservation_object_list *fobj;
@@ -1077,6 +1102,9 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
                        return ret;
        }
 
+       if (explicit)
+               return 0;
+
        /*
         * If we have any shared fences, then the exclusive fence
         * should be ignored as it will already have been signalled.
@@ -1321,8 +1349,8 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
        }
 
        gpu->event[event].fence = fence;
-       submit->fence = fence->seqno;
-       gpu->active_fence = submit->fence;
+       submit->fence = dma_fence_get(fence);
+       gpu->active_fence = submit->fence->seqno;
 
        if (gpu->lastctx != cmdbuf->ctx) {
                gpu->mmu->need_flush = true;
@@ -1527,17 +1555,13 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
 #ifdef CONFIG_PM
 static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
 {
-       u32 clock;
        int ret;
 
        ret = mutex_lock_killable(&gpu->lock);
        if (ret)
                return ret;
 
-       clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
-               VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
-
-       etnaviv_gpu_load_clock(gpu, clock);
+       etnaviv_gpu_update_clock(gpu);
        etnaviv_gpu_hw_init(gpu);
 
        gpu->switch_context = true;
@@ -1549,6 +1573,47 @@ static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
 }
 #endif
 
+static int
+etnaviv_gpu_cooling_get_max_state(struct thermal_cooling_device *cdev,
+                                 unsigned long *state)
+{
+       *state = 6;
+
+       return 0;
+}
+
+static int
+etnaviv_gpu_cooling_get_cur_state(struct thermal_cooling_device *cdev,
+                                 unsigned long *state)
+{
+       struct etnaviv_gpu *gpu = cdev->devdata;
+
+       *state = gpu->freq_scale;
+
+       return 0;
+}
+
+static int
+etnaviv_gpu_cooling_set_cur_state(struct thermal_cooling_device *cdev,
+                                 unsigned long state)
+{
+       struct etnaviv_gpu *gpu = cdev->devdata;
+
+       mutex_lock(&gpu->lock);
+       gpu->freq_scale = state;
+       if (!pm_runtime_suspended(gpu->dev))
+               etnaviv_gpu_update_clock(gpu);
+       mutex_unlock(&gpu->lock);
+
+       return 0;
+}
+
+static struct thermal_cooling_device_ops cooling_ops = {
+       .get_max_state = etnaviv_gpu_cooling_get_max_state,
+       .get_cur_state = etnaviv_gpu_cooling_get_cur_state,
+       .set_cur_state = etnaviv_gpu_cooling_set_cur_state,
+};
+
 static int etnaviv_gpu_bind(struct device *dev, struct device *master,
        void *data)
 {
@@ -1557,13 +1622,20 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
        struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
        int ret;
 
+       gpu->cooling = thermal_of_cooling_device_register(dev->of_node,
+                               (char *)dev_name(dev), gpu, &cooling_ops);
+       if (IS_ERR(gpu->cooling))
+               return PTR_ERR(gpu->cooling);
+
 #ifdef CONFIG_PM
        ret = pm_runtime_get_sync(gpu->dev);
 #else
        ret = etnaviv_gpu_clk_enable(gpu);
 #endif
-       if (ret < 0)
+       if (ret < 0) {
+               thermal_cooling_device_unregister(gpu->cooling);
                return ret;
+       }
 
        gpu->drm = drm;
        gpu->fence_context = dma_fence_context_alloc(1);
@@ -1617,6 +1689,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
        }
 
        gpu->drm = NULL;
+
+       thermal_cooling_device_unregister(gpu->cooling);
+       gpu->cooling = NULL;
 }
 
 static const struct component_ops gpu_ops = {
@@ -1673,11 +1748,13 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
        DBG("clk_core: %p", gpu->clk_core);
        if (IS_ERR(gpu->clk_core))
                gpu->clk_core = NULL;
+       gpu->base_rate_core = clk_get_rate(gpu->clk_core);
 
        gpu->clk_shader = devm_clk_get(&pdev->dev, "shader");
        DBG("clk_shader: %p", gpu->clk_shader);
        if (IS_ERR(gpu->clk_shader))
                gpu->clk_shader = NULL;
+       gpu->base_rate_shader = clk_get_rate(gpu->clk_shader);
 
        /* TODO: figure out max mapped size */
        dev_set_drvdata(dev, gpu);