drm/amd/display: Enable copying of bounding box data from VBIOS DMUB
authorAurabindo Pillai <aurabindo.pillai@amd.com>
Tue, 21 May 2024 19:46:31 +0000 (19:46 +0000)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 29 May 2024 18:40:40 +0000 (14:40 -0400)
Allocate some memory, send the address in chunks to dmub, and finally
ask it to copy the bounding box data into the newly allocated memory.

Signed-off-by: Aurabindo Pillai <aurabindo.pillai@amd.com>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h

index 6688b13d28e10645324b9b5a94db9c2a98671407..011981bee58c6f969ed452863624a84ad208b4a8 100644 (file)
@@ -1627,6 +1627,117 @@ static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
        }
 }
 
+void*
+dm_allocate_gpu_mem(
+               struct amdgpu_device *adev,
+               enum dc_gpu_mem_alloc_type type,
+               size_t size,
+               long long *addr)
+{
+       struct dal_allocation *da;
+       u32 domain = (type == DC_MEM_ALLOC_TYPE_GART) ?
+               AMDGPU_GEM_DOMAIN_GTT : AMDGPU_GEM_DOMAIN_VRAM;
+       int ret;
+
+       da = kzalloc(sizeof(struct dal_allocation), GFP_KERNEL);
+       if (!da)
+               return NULL;
+
+       ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
+                                     domain, &da->bo,
+                                     &da->gpu_addr, &da->cpu_ptr);
+
+       *addr = da->gpu_addr;
+
+       if (ret) {
+               kfree(da);
+               return NULL;
+       }
+
+       /* add da to list in dm */
+       list_add(&da->list, &adev->dm.da_list);
+
+       return da->cpu_ptr;
+}
+
+static enum dmub_status
+dm_dmub_send_vbios_gpint_command(struct amdgpu_device *adev,
+                                enum dmub_gpint_command command_code,
+                                uint16_t param,
+                                uint32_t timeout_us)
+{
+       union dmub_gpint_data_register reg, test;
+       uint32_t i;
+
+       /* Assume that VBIOS DMUB is ready to take commands */
+
+       reg.bits.status = 1;
+       reg.bits.command_code = command_code;
+       reg.bits.param = param;
+
+       cgs_write_register(adev->dm.cgs_device, 0x34c0 + 0x01f8, reg.all);
+
+       for (i = 0; i < timeout_us; ++i) {
+               udelay(1);
+
+               /* Check if our GPINT got acked */
+               reg.bits.status = 0;
+               test = (union dmub_gpint_data_register)
+                       cgs_read_register(adev->dm.cgs_device, 0x34c0 + 0x01f8);
+
+               if (test.all == reg.all)
+                       return DMUB_STATUS_OK;
+       }
+
+       return DMUB_STATUS_TIMEOUT;
+}
+
+static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *adev)
+{
+       struct dml2_soc_bb *bb;
+       long long addr;
+       int i = 0;
+       uint16_t chunk;
+       enum dmub_gpint_command send_addrs[] = {
+               DMUB_GPINT__SET_BB_ADDR_WORD0,
+               DMUB_GPINT__SET_BB_ADDR_WORD1,
+               DMUB_GPINT__SET_BB_ADDR_WORD2,
+               DMUB_GPINT__SET_BB_ADDR_WORD3,
+       };
+       enum dmub_status ret;
+
+       switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+       case IP_VERSION(4, 0, 1):
+               break;
+       default:
+               return NULL;
+       }
+
+       bb =  dm_allocate_gpu_mem(adev,
+                                 DC_MEM_ALLOC_TYPE_GART,
+                                 sizeof(struct dml2_soc_bb),
+                                 &addr);
+       if (!bb)
+               return NULL;
+
+       for (i = 0; i < 4; i++) {
+               /* Extract 16-bit chunk */
+               chunk = ((uint64_t) addr >> (i * 16)) & 0xFFFF;
+               /* Send the chunk */
+               ret = dm_dmub_send_vbios_gpint_command(adev, send_addrs[i], chunk, 30000);
+               if (ret != DMUB_STATUS_OK)
+                       /* No need to free bb here since it shall be done unconditionally <elsewhere> */
+                       return NULL;
+       }
+
+       /* Now ask DMUB to copy the bb */
+       ret = dm_dmub_send_vbios_gpint_command(adev, DMUB_GPINT__BB_COPY, 1, 200000);
+       if (ret != DMUB_STATUS_OK)
+               return NULL;
+
+       return bb;
+}
+
 static int amdgpu_dm_init(struct amdgpu_device *adev)
 {
        struct dc_init_data init_data;
@@ -1748,6 +1859,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
 
        retrieve_dmi_info(&adev->dm);
 
+       if (adev->dm.bb_from_dmub)
+               init_data.bb_from_dmub = adev->dm.bb_from_dmub;
+       else
+               init_data.bb_from_dmub = NULL;
+
        /* Display Core create. */
        adev->dm.dc = dc_create(&init_data);
 
@@ -2305,6 +2421,8 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
                return -EINVAL;
        }
 
+       adev->dm.bb_from_dmub = dm_dmub_get_vbios_bounding_box(adev);
+
        return 0;
 }
 
@@ -2334,6 +2452,9 @@ static int dm_sw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       kfree(adev->dm.bb_from_dmub);
+       adev->dm.bb_from_dmub = NULL;
+
        kfree(adev->dm.dmub_fb_info);
        adev->dm.dmub_fb_info = NULL;
 
index a01f3f5bf2c00bddb5eaba99e440c5861790418c..94fc4c15d2dbc0e94beb1802e650ddc55cd26e70 100644 (file)
@@ -578,6 +578,11 @@ struct amdgpu_display_manager {
         * Guards access to DPIA AUX
         */
        struct mutex dpia_aux_lock;
+
+       /*
+        * Bounding box data read from dmub during early initialization for DCN4+
+        */
+       struct dml2_soc_bb *bb_from_dmub;
 };
 
 enum dsc_clock_force_state {
@@ -964,4 +969,9 @@ amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
 
 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth);
 struct idle_workqueue *idle_create_workqueue(struct amdgpu_device *adev);
+
+void *dm_allocate_gpu_mem(struct amdgpu_device *adev,
+                                                 enum dc_gpu_mem_alloc_type type,
+                                                 size_t size,
+                                                 long long *addr);
 #endif /* __AMDGPU_DM_H__ */
index 6d0f78b9ec0c2a5febed8ce4ffe9e4d45ba93dc4..8eb2f10f2c38bff121d2c20dd3da0e81dd737a4e 100644 (file)
@@ -1045,30 +1045,8 @@ void *dm_helpers_allocate_gpu_mem(
                long long *addr)
 {
        struct amdgpu_device *adev = ctx->driver_context;
-       struct dal_allocation *da;
-       u32 domain = (type == DC_MEM_ALLOC_TYPE_GART) ?
-               AMDGPU_GEM_DOMAIN_GTT : AMDGPU_GEM_DOMAIN_VRAM;
-       int ret;
-
-       da = kzalloc(sizeof(struct dal_allocation), GFP_KERNEL);
-       if (!da)
-               return NULL;
-
-       ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
-                                     domain, &da->bo,
-                                     &da->gpu_addr, &da->cpu_ptr);
-
-       *addr = da->gpu_addr;
-
-       if (ret) {
-               kfree(da);
-               return NULL;
-       }
-
-       /* add da to list in dm */
-       list_add(&da->list, &adev->dm.da_list);
 
-       return da->cpu_ptr;
+       return dm_allocate_gpu_mem(adev, type, size, addr);
 }
 
 void dm_helpers_free_gpu_mem(
index 4612c60edebd1dcba1557e46cd1ba2d971b415c9..0d97611c4817435c90f30f2ab22f0a4104efeb10 100644 (file)
@@ -1014,6 +1014,11 @@ static bool dc_construct(struct dc *dc,
 
        dc->dcn_ip = dcn_ip;
 
+       if (init_params->bb_from_dmub)
+               dc->dml2_options.bb_from_dmub = init_params->bb_from_dmub;
+       else
+               dc->dml2_options.bb_from_dmub = NULL;
+
        if (!dc_construct_ctx(dc, init_params)) {
                dm_error("%s: failed to create ctx\n", __func__);
                goto fail;
index 31e3371b1b2ee31d4d7bd4635e2239c8a7fe0095..d0ed01ac460d3ea49d3fa80c86548c916bc189cc 100644 (file)
@@ -1067,6 +1067,8 @@ struct dchub_init_data {
        bool dchub_info_valid;
 };
 
+struct dml2_soc_bb;
+
 struct dc_init_data {
        struct hw_asic_id asic_id;
        void *driver; /* ctx */
@@ -1099,6 +1101,7 @@ struct dc_init_data {
        uint32_t *dcn_reg_offsets;
        uint32_t *nbio_reg_offsets;
        uint32_t *clk_reg_offsets;
+       struct dml2_soc_bb *bb_from_dmub;
 };
 
 struct dc_callback_init {
index 37998f2c0b14cb83b36fe6ebd924a78082cb80ad..9f641ffdc924fbcc95f1a13aaf5c4b9121686e5a 100644 (file)
@@ -26,7 +26,11 @@ static void dml21_init_socbb_params(struct dml2_initialize_instance_in_out *dml_
                break;
        case DCN_VERSION_4_01:
        default:
-               soc_bb = &dml2_socbb_dcn401;
+               if (config->bb_from_dmub)
+                       soc_bb = config->bb_from_dmub;
+               else
+                       soc_bb = &dml2_socbb_dcn401;
+
                qos_params = &dml_dcn401_soc_qos_params;
        }
 
index dcb4e6f4d9162779366cd0342e0aab1c51c64afa..20b3970c085719d531db4cb535105ecdbf9e4c93 100644 (file)
@@ -236,6 +236,7 @@ struct dml2_configuration_options {
 
        bool use_clock_dc_limits;
        bool gpuvm_enable;
+       struct dml2_soc_bb *bb_from_dmub;
 };
 
 /*