drm/msm/gpu: Move BO allocation out of hw_init
authorRob Clark <robdclark@chromium.org>
Mon, 20 Mar 2023 14:43:35 +0000 (07:43 -0700)
committerRob Clark <robdclark@chromium.org>
Sat, 25 Mar 2023 23:31:45 +0000 (16:31 -0700)
These allocations are only done the first (successful) time through
hw_init() so they won't actually happen in the job_run() path.  But
lockdep doesn't know this.  So dis-entangle them from the hw_init()
path.

Signed-off-by: Rob Clark <robdclark@chromium.org>
Patchwork: https://patchwork.freedesktop.org/patch/527852/
Link: https://lore.kernel.org/r/20230320144356.803762-14-robdclark@gmail.com
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/adreno/adreno_device.c
drivers/gpu/drm/msm/msm_gpu.h

index 0372f89082022d6acddbcac4d27228daf8672d74..d6c1c3ab19a38595d99f6aedb01dc2caacc19035 100644 (file)
@@ -567,7 +567,7 @@ static void a5xx_ucode_check_version(struct a5xx_gpu *a5xx_gpu,
        msm_gem_put_vaddr(obj);
 }
 
-static int a5xx_ucode_init(struct msm_gpu *gpu)
+static int a5xx_ucode_load(struct msm_gpu *gpu)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
        struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
@@ -605,9 +605,24 @@ static int a5xx_ucode_init(struct msm_gpu *gpu)
                a5xx_ucode_check_version(a5xx_gpu, a5xx_gpu->pfp_bo);
        }
 
-       gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO, a5xx_gpu->pm4_iova);
+       if (a5xx_gpu->has_whereami) {
+               if (!a5xx_gpu->shadow_bo) {
+                       a5xx_gpu->shadow = msm_gem_kernel_new(gpu->dev,
+                               sizeof(u32) * gpu->nr_rings,
+                               MSM_BO_WC | MSM_BO_MAP_PRIV,
+                               gpu->aspace, &a5xx_gpu->shadow_bo,
+                               &a5xx_gpu->shadow_iova);
 
-       gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO, a5xx_gpu->pfp_iova);
+                       if (IS_ERR(a5xx_gpu->shadow))
+                               return PTR_ERR(a5xx_gpu->shadow);
+
+                       msm_gem_object_set_name(a5xx_gpu->shadow_bo, "shadow");
+               }
+       } else if (gpu->nr_rings > 1) {
+               /* Disable preemption if WHERE_AM_I isn't available */
+               a5xx_preempt_fini(gpu);
+               gpu->nr_rings = 1;
+       }
 
        return 0;
 }
@@ -900,9 +915,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
        if (adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu))
                a5xx_gpmu_ucode_init(gpu);
 
-       ret = a5xx_ucode_init(gpu);
-       if (ret)
-               return ret;
+       gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO, a5xx_gpu->pm4_iova);
+       gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO, a5xx_gpu->pfp_iova);
 
        /* Set the ringbuffer address */
        gpu_write64(gpu, REG_A5XX_CP_RB_BASE, gpu->rb[0]->iova);
@@ -916,27 +930,10 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
        gpu_write(gpu, REG_A5XX_CP_RB_CNTL,
                MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
 
-       /* Create a privileged buffer for the RPTR shadow */
-       if (a5xx_gpu->has_whereami) {
-               if (!a5xx_gpu->shadow_bo) {
-                       a5xx_gpu->shadow = msm_gem_kernel_new(gpu->dev,
-                               sizeof(u32) * gpu->nr_rings,
-                               MSM_BO_WC | MSM_BO_MAP_PRIV,
-                               gpu->aspace, &a5xx_gpu->shadow_bo,
-                               &a5xx_gpu->shadow_iova);
-
-                       if (IS_ERR(a5xx_gpu->shadow))
-                               return PTR_ERR(a5xx_gpu->shadow);
-
-                       msm_gem_object_set_name(a5xx_gpu->shadow_bo, "shadow");
-               }
-
+       /* Configure the RPTR shadow if needed: */
+       if (a5xx_gpu->shadow_bo) {
                gpu_write64(gpu, REG_A5XX_CP_RB_RPTR_ADDR,
                            shadowptr(a5xx_gpu, gpu->rb[0]));
-       } else if (gpu->nr_rings > 1) {
-               /* Disable preemption if WHERE_AM_I isn't available */
-               a5xx_preempt_fini(gpu);
-               gpu->nr_rings = 1;
        }
 
        a5xx_preempt_hw_init(gpu);
@@ -1682,6 +1679,7 @@ static const struct adreno_gpu_funcs funcs = {
                .get_param = adreno_get_param,
                .set_param = adreno_set_param,
                .hw_init = a5xx_hw_init,
+               .ucode_load = a5xx_ucode_load,
                .pm_suspend = a5xx_pm_suspend,
                .pm_resume = a5xx_pm_resume,
                .recover = a5xx_recover,
index 1e09777cce3f7d01e6bc84385037a6dd294b535f..0f6ed7a3f71211be8c3b84ff4769ab0344cdfb2e 100644 (file)
@@ -917,7 +917,7 @@ out:
        return ret;
 }
 
-static int a6xx_ucode_init(struct msm_gpu *gpu)
+static int a6xx_ucode_load(struct msm_gpu *gpu)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
        struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
@@ -946,7 +946,23 @@ static int a6xx_ucode_init(struct msm_gpu *gpu)
                }
        }
 
-       gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE, a6xx_gpu->sqe_iova);
+       /*
+        * Expanded APRIV and targets that support WHERE_AM_I both need a
+        * privileged buffer to store the RPTR shadow
+        */
+       if ((adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) &&
+           !a6xx_gpu->shadow_bo) {
+               a6xx_gpu->shadow = msm_gem_kernel_new(gpu->dev,
+                                                     sizeof(u32) * gpu->nr_rings,
+                                                     MSM_BO_WC | MSM_BO_MAP_PRIV,
+                                                     gpu->aspace, &a6xx_gpu->shadow_bo,
+                                                     &a6xx_gpu->shadow_iova);
+
+               if (IS_ERR(a6xx_gpu->shadow))
+                       return PTR_ERR(a6xx_gpu->shadow);
+
+               msm_gem_object_set_name(a6xx_gpu->shadow_bo, "shadow");
+       }
 
        return 0;
 }
@@ -1132,9 +1148,7 @@ static int hw_init(struct msm_gpu *gpu)
        if (ret)
                goto out;
 
-       ret = a6xx_ucode_init(gpu);
-       if (ret)
-               goto out;
+       gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE, a6xx_gpu->sqe_iova);
 
        /* Set the ringbuffer address */
        gpu_write64(gpu, REG_A6XX_CP_RB_BASE, gpu->rb[0]->iova);
@@ -1149,25 +1163,8 @@ static int hw_init(struct msm_gpu *gpu)
                gpu_write(gpu, REG_A6XX_CP_RB_CNTL,
                        MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
 
-       /*
-        * Expanded APRIV and targets that support WHERE_AM_I both need a
-        * privileged buffer to store the RPTR shadow
-        */
-
-       if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) {
-               if (!a6xx_gpu->shadow_bo) {
-                       a6xx_gpu->shadow = msm_gem_kernel_new(gpu->dev,
-                               sizeof(u32) * gpu->nr_rings,
-                               MSM_BO_WC | MSM_BO_MAP_PRIV,
-                               gpu->aspace, &a6xx_gpu->shadow_bo,
-                               &a6xx_gpu->shadow_iova);
-
-                       if (IS_ERR(a6xx_gpu->shadow))
-                               return PTR_ERR(a6xx_gpu->shadow);
-
-                       msm_gem_object_set_name(a6xx_gpu->shadow_bo, "shadow");
-               }
-
+       /* Configure the RPTR shadow if needed: */
+       if (a6xx_gpu->shadow_bo) {
                gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR,
                        shadowptr(a6xx_gpu, gpu->rb[0]));
        }
@@ -1958,6 +1955,7 @@ static const struct adreno_gpu_funcs funcs = {
                .get_param = adreno_get_param,
                .set_param = adreno_set_param,
                .hw_init = a6xx_hw_init,
+               .ucode_load = a6xx_ucode_load,
                .pm_suspend = a6xx_pm_suspend,
                .pm_resume = a6xx_pm_resume,
                .recover = a6xx_recover,
index 745f596827372f99300ab8454b27c6a60d49c04c..4d1448714285b185546b8c4919e15bcea8453653 100644 (file)
@@ -432,6 +432,12 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
        if (ret)
                return NULL;
 
+       if (gpu->funcs->ucode_load) {
+               ret = gpu->funcs->ucode_load(gpu);
+               if (ret)
+                       return NULL;
+       }
+
        /*
         * Now that we have firmware loaded, and are ready to begin
         * booting the gpu, go ahead and enable runpm:
index 84c616b1ebc0e6705a12747f55b7e5f3702d11c2..7a4fa1b8655b38a80c39a236b62adadfbf497e52 100644 (file)
@@ -49,6 +49,12 @@ struct msm_gpu_funcs {
        int (*set_param)(struct msm_gpu *gpu, struct msm_file_private *ctx,
                         uint32_t param, uint64_t value, uint32_t len);
        int (*hw_init)(struct msm_gpu *gpu);
+
+       /**
+        * @ucode_load: Optional hook to upload fw to GEM objs
+        */
+       int (*ucode_load)(struct msm_gpu *gpu);
+
        int (*pm_suspend)(struct msm_gpu *gpu);
        int (*pm_resume)(struct msm_gpu *gpu);
        void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit);