From: Revalla Hari Krishna Date: Thu, 18 Apr 2024 12:46:40 +0000 (+0530) Subject: drm/amd/display: Refactor HUBBUB into component folder X-Git-Tag: io_uring-6.11-20240722~49^2~25^2~534 X-Git-Url: https://git.kernel.dk/?a=commitdiff_plain;h=f9c7818c9d653e40dbd3c7e9c857e5b00dfca622;p=linux-block.git drm/amd/display: Refactor HUBBUB into component folder [why] cleaning up the code refactor requires hubbub to be in its own component. [how] Move all files under newly created hubbub folder and fix the makefiles. Reviewed-by: Martin Leung Acked-by: Wayne Lin Signed-off-by: Revalla Hari Krishna Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile index 9a5bcafbf730..839e71aa7d0c 100644 --- a/drivers/gpu/drm/amd/display/Makefile +++ b/drivers/gpu/drm/amd/display/Makefile @@ -34,6 +34,7 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/resource subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dsc subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/optc subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dpp +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/hubbub subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index 8d963befc756..f1b0b1f66fb0 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile @@ -22,7 +22,7 @@ # # Makefile for Display Core (dc) component. -DC_LIBS = basics bios dml clk_mgr dce gpio hwss irq link virtual dsc resource optc dpp +DC_LIBS = basics bios dml clk_mgr dce gpio hwss irq link virtual dsc resource optc dpp hubbub ifdef CONFIG_DRM_AMD_DC_FP diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile index 8dc7938c36d8..508306baa65a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile @@ -27,7 +27,7 @@ DCN10 = dcn10_ipp.o \ dcn10_opp.o \ dcn10_hubp.o dcn10_mpc.o \ dcn10_cm_common.o \ - dcn10_hubbub.o dcn10_stream_encoder.o dcn10_link_encoder.o + dcn10_stream_encoder.o dcn10_link_encoder.o AMD_DAL_DCN10 = $(addprefix $(AMDDALPATH)/dc/dcn10/,$(DCN10)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c deleted file mode 100644 index 6dd355a03033..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c +++ /dev/null @@ -1,964 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" -#include "dcn10_hubp.h" -#include "dcn10_hubbub.h" -#include "reg_helper.h" - -#define CTX \ - hubbub1->base.ctx -#define DC_LOGGER \ - hubbub1->base.ctx->logger -#define REG(reg)\ - hubbub1->regs->reg - -#undef FN -#define FN(reg_name, field_name) \ - hubbub1->shifts->field_name, hubbub1->masks->field_name - -void hubbub1_wm_read_state(struct hubbub *hubbub, - struct dcn_hubbub_wm *wm) -{ - struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); - struct dcn_hubbub_wm_set *s; - - memset(wm, 0, sizeof(struct dcn_hubbub_wm)); - - s = &wm->sets[0]; - s->wm_set = 0; - s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A); - s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A); - if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) { - s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A); - s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A); - } - s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A); - - s = &wm->sets[1]; - s->wm_set = 1; - s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B); - s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B); - if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) { - s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B); - s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B); - } - s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B); - - s = &wm->sets[2]; - s->wm_set = 2; - s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C); - s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C); - if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) { - s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C); - s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C); - } - s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C); - - s = &wm->sets[3]; - s->wm_set = 3; - s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D); - s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D); - if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) { - s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D); - s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D); - } - s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D); -} - -void hubbub1_allow_self_refresh_control(struct hubbub *hubbub, bool allow) -{ - struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); - /* - * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 1 means do not allow stutter - * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0 means allow stutter - */ - - REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL, - DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE, 0, - DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, !allow); -} - -bool hubbub1_is_allow_self_refresh_enabled(struct hubbub *hubbub) -{ - struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); - uint32_t enable = 0; - - REG_GET(DCHUBBUB_ARB_DRAM_STATE_CNTL, - DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, &enable); - - return enable ? true : false; -} - - -bool hubbub1_verify_allow_pstate_change_high( - struct hubbub *hubbub) -{ - struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); - - /* pstate latency is ~20us so if we wait over 40us and pstate allow - * still not asserted, we are probably stuck and going to hang - * - * TODO: Figure out why it takes ~100us on linux - * pstate takes around ~100us (up to 200us) on linux. Unknown currently - * as to why it takes that long on linux - */ - const unsigned int pstate_wait_timeout_us = 200; - const unsigned int pstate_wait_expected_timeout_us = 180; - static unsigned int max_sampled_pstate_wait_us; /* data collection */ - static bool forced_pstate_allow; /* help with revert wa */ - - unsigned int debug_data = 0; - unsigned int i; - - if (forced_pstate_allow) { - /* we hacked to force pstate allow to prevent hang last time - * we verify_allow_pstate_change_high. so disable force - * here so we can check status - */ - REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL, - DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 0, - DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 0); - forced_pstate_allow = false; - } - - /* The following table only applies to DCN1 and DCN2, - * for newer DCNs, need to consult with HW IP folks to read RTL - * HUBBUB:DCHUBBUB_TEST_ARB_DEBUG10 DCHUBBUBDEBUGIND:0xB - * description - * 0: Pipe0 Plane0 Allow Pstate Change - * 1: Pipe0 Plane1 Allow Pstate Change - * 2: Pipe0 Cursor0 Allow Pstate Change - * 3: Pipe0 Cursor1 Allow Pstate Change - * 4: Pipe1 Plane0 Allow Pstate Change - * 5: Pipe1 Plane1 Allow Pstate Change - * 6: Pipe1 Cursor0 Allow Pstate Change - * 7: Pipe1 Cursor1 Allow Pstate Change - * 8: Pipe2 Plane0 Allow Pstate Change - * 9: Pipe2 Plane1 Allow Pstate Change - * 10: Pipe2 Cursor0 Allow Pstate Change - * 11: Pipe2 Cursor1 Allow Pstate Change - * 12: Pipe3 Plane0 Allow Pstate Change - * 13: Pipe3 Plane1 Allow Pstate Change - * 14: Pipe3 Cursor0 Allow Pstate Change - * 15: Pipe3 Cursor1 Allow Pstate Change - * 16: Pipe4 Plane0 Allow Pstate Change - * 17: Pipe4 Plane1 Allow Pstate Change - * 18: Pipe4 Cursor0 Allow Pstate Change - * 19: Pipe4 Cursor1 Allow Pstate Change - * 20: Pipe5 Plane0 Allow Pstate Change - * 21: Pipe5 Plane1 Allow Pstate Change - * 22: Pipe5 Cursor0 Allow Pstate Change - * 23: Pipe5 Cursor1 Allow Pstate Change - * 24: Pipe6 Plane0 Allow Pstate Change - * 25: Pipe6 Plane1 Allow Pstate Change - * 26: Pipe6 Cursor0 Allow Pstate Change - * 27: Pipe6 Cursor1 Allow Pstate Change - * 28: WB0 Allow Pstate Change - * 29: WB1 Allow Pstate Change - * 30: Arbiter's allow_pstate_change - * 31: SOC pstate change request - */ - - REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, hubbub1->debug_test_index_pstate); - - for (i = 0; i < pstate_wait_timeout_us; i++) { - debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA); - - if (debug_data & (1 << 30)) { - - if (i > pstate_wait_expected_timeout_us) - DC_LOG_WARNING("pstate took longer than expected ~%dus\n", - i); - - return true; - } - if (max_sampled_pstate_wait_us < i) - max_sampled_pstate_wait_us = i; - - udelay(1); - } - - /* force pstate allow to prevent system hang - * and break to debugger to investigate - */ - REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL, - DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 1, - DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 1); - forced_pstate_allow = true; - - DC_LOG_WARNING("pstate TEST_DEBUG_DATA: 0x%X\n", - debug_data); - - return false; -} - -static uint32_t convert_and_clamp( - uint32_t wm_ns, - uint32_t refclk_mhz, - uint32_t clamp_value) -{ - uint32_t ret_val = 0; - ret_val = wm_ns * refclk_mhz; - ret_val /= 1000; - - if (ret_val > clamp_value) - ret_val = clamp_value; - - return ret_val; -} - - -void hubbub1_wm_change_req_wa(struct hubbub *hubbub) -{ - struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); - - REG_UPDATE_SEQ_2(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, - DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0, - DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1); -} - -bool hubbub1_program_urgent_watermarks( - struct hubbub *hubbub, - union dcn_watermark_set *watermarks, - unsigned int refclk_mhz, - bool safe_to_lower) -{ - struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); - uint32_t prog_wm_value; - bool wm_pending = false; - - /* Repeat for water mark set A, B, C and D. */ - /* clock state A */ - if (safe_to_lower || watermarks->a.urgent_ns > hubbub1->watermarks.a.urgent_ns) { - hubbub1->watermarks.a.urgent_ns = watermarks->a.urgent_ns; - prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns, - refclk_mhz, 0x1fffff); - REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0, - DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); - - DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->a.urgent_ns, prog_wm_value); - } else if (watermarks->a.urgent_ns < hubbub1->watermarks.a.urgent_ns) - wm_pending = true; - - if (safe_to_lower || watermarks->a.pte_meta_urgent_ns > hubbub1->watermarks.a.pte_meta_urgent_ns) { - hubbub1->watermarks.a.pte_meta_urgent_ns = watermarks->a.pte_meta_urgent_ns; - prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns, - refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->a.pte_meta_urgent_ns, prog_wm_value); - } else if (watermarks->a.pte_meta_urgent_ns < hubbub1->watermarks.a.pte_meta_urgent_ns) - wm_pending = true; - - /* clock state B */ - if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) { - hubbub1->watermarks.b.urgent_ns = watermarks->b.urgent_ns; - prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns, - refclk_mhz, 0x1fffff); - REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0, - DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value); - - DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->b.urgent_ns, prog_wm_value); - } else if (watermarks->b.urgent_ns < hubbub1->watermarks.b.urgent_ns) - wm_pending = true; - - if (safe_to_lower || watermarks->b.pte_meta_urgent_ns > hubbub1->watermarks.b.pte_meta_urgent_ns) { - hubbub1->watermarks.b.pte_meta_urgent_ns = watermarks->b.pte_meta_urgent_ns; - prog_wm_value = convert_and_clamp(watermarks->b.pte_meta_urgent_ns, - refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->b.pte_meta_urgent_ns, prog_wm_value); - } else if (watermarks->b.pte_meta_urgent_ns < hubbub1->watermarks.b.pte_meta_urgent_ns) - wm_pending = true; - - /* clock state C */ - if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) { - hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns; - prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns, - refclk_mhz, 0x1fffff); - REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0, - DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value); - - DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->c.urgent_ns, prog_wm_value); - } else if (watermarks->c.urgent_ns < hubbub1->watermarks.c.urgent_ns) - wm_pending = true; - - if (safe_to_lower || watermarks->c.pte_meta_urgent_ns > hubbub1->watermarks.c.pte_meta_urgent_ns) { - hubbub1->watermarks.c.pte_meta_urgent_ns = watermarks->c.pte_meta_urgent_ns; - prog_wm_value = convert_and_clamp(watermarks->c.pte_meta_urgent_ns, - refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->c.pte_meta_urgent_ns, prog_wm_value); - } else if (watermarks->c.pte_meta_urgent_ns < hubbub1->watermarks.c.pte_meta_urgent_ns) - wm_pending = true; - - /* clock state D */ - if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) { - hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns; - prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns, - refclk_mhz, 0x1fffff); - REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0, - DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value); - - DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->d.urgent_ns, prog_wm_value); - } else if (watermarks->d.urgent_ns < hubbub1->watermarks.d.urgent_ns) - wm_pending = true; - - if (safe_to_lower || watermarks->d.pte_meta_urgent_ns > hubbub1->watermarks.d.pte_meta_urgent_ns) { - hubbub1->watermarks.d.pte_meta_urgent_ns = watermarks->d.pte_meta_urgent_ns; - prog_wm_value = convert_and_clamp(watermarks->d.pte_meta_urgent_ns, - refclk_mhz, 0x1fffff); - REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->d.pte_meta_urgent_ns, prog_wm_value); - } else if (watermarks->d.pte_meta_urgent_ns < hubbub1->watermarks.d.pte_meta_urgent_ns) - wm_pending = true; - - return wm_pending; -} - -bool hubbub1_program_stutter_watermarks( - struct hubbub *hubbub, - union dcn_watermark_set *watermarks, - unsigned int refclk_mhz, - bool safe_to_lower) -{ - struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); - uint32_t prog_wm_value; - bool wm_pending = false; - - /* clock state A */ - if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns - > hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) { - hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = - watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, - refclk_mhz, 0x1fffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); - } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns - < hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) - wm_pending = true; - - if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns - > hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) { - hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns = - watermarks->a.cstate_pstate.cstate_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->a.cstate_pstate.cstate_exit_ns, - refclk_mhz, 0x1fffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value); - } else if (watermarks->a.cstate_pstate.cstate_exit_ns - < hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) - wm_pending = true; - - /* clock state B */ - if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns - > hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) { - hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = - watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, - refclk_mhz, 0x1fffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); - } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns - < hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) - wm_pending = true; - - if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns - > hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) { - hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns = - watermarks->b.cstate_pstate.cstate_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->b.cstate_pstate.cstate_exit_ns, - refclk_mhz, 0x1fffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value); - } else if (watermarks->b.cstate_pstate.cstate_exit_ns - < hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) - wm_pending = true; - - /* clock state C */ - if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns - > hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) { - hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = - watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, - refclk_mhz, 0x1fffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); - } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns - < hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) - wm_pending = true; - - if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns - > hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) { - hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns = - watermarks->c.cstate_pstate.cstate_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->c.cstate_pstate.cstate_exit_ns, - refclk_mhz, 0x1fffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value); - } else if (watermarks->c.cstate_pstate.cstate_exit_ns - < hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) - wm_pending = true; - - /* clock state D */ - if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns - > hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) { - hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = - watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, - refclk_mhz, 0x1fffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); - } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns - < hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) - wm_pending = true; - - if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns - > hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) { - hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns = - watermarks->d.cstate_pstate.cstate_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->d.cstate_pstate.cstate_exit_ns, - refclk_mhz, 0x1fffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value); - } else if (watermarks->d.cstate_pstate.cstate_exit_ns - < hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) - wm_pending = true; - - return wm_pending; -} - -bool hubbub1_program_pstate_watermarks( - struct hubbub *hubbub, - union dcn_watermark_set *watermarks, - unsigned int refclk_mhz, - bool safe_to_lower) -{ - struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); - uint32_t prog_wm_value; - bool wm_pending = false; - - /* clock state A */ - if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns - > hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) { - hubbub1->watermarks.a.cstate_pstate.pstate_change_ns = - watermarks->a.cstate_pstate.pstate_change_ns; - prog_wm_value = convert_and_clamp( - watermarks->a.cstate_pstate.pstate_change_ns, - refclk_mhz, 0x1fffff); - REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0, - DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n" - "HW register value = 0x%x\n\n", - watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value); - } else if (watermarks->a.cstate_pstate.pstate_change_ns - < hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) - wm_pending = true; - - /* clock state B */ - if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns - > hubbub1->watermarks.b.cstate_pstate.pstate_change_ns) { - hubbub1->watermarks.b.cstate_pstate.pstate_change_ns = - watermarks->b.cstate_pstate.pstate_change_ns; - prog_wm_value = convert_and_clamp( - watermarks->b.cstate_pstate.pstate_change_ns, - refclk_mhz, 0x1fffff); - REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0, - DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n" - "HW register value = 0x%x\n\n", - watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value); - } else if (watermarks->b.cstate_pstate.pstate_change_ns - < hubbub1->watermarks.b.cstate_pstate.pstate_change_ns) - wm_pending = true; - - /* clock state C */ - if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns - > hubbub1->watermarks.c.cstate_pstate.pstate_change_ns) { - hubbub1->watermarks.c.cstate_pstate.pstate_change_ns = - watermarks->c.cstate_pstate.pstate_change_ns; - prog_wm_value = convert_and_clamp( - watermarks->c.cstate_pstate.pstate_change_ns, - refclk_mhz, 0x1fffff); - REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0, - DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n" - "HW register value = 0x%x\n\n", - watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value); - } else if (watermarks->c.cstate_pstate.pstate_change_ns - < hubbub1->watermarks.c.cstate_pstate.pstate_change_ns) - wm_pending = true; - - /* clock state D */ - if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns - > hubbub1->watermarks.d.cstate_pstate.pstate_change_ns) { - hubbub1->watermarks.d.cstate_pstate.pstate_change_ns = - watermarks->d.cstate_pstate.pstate_change_ns; - prog_wm_value = convert_and_clamp( - watermarks->d.cstate_pstate.pstate_change_ns, - refclk_mhz, 0x1fffff); - REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0, - DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n" - "HW register value = 0x%x\n\n", - watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value); - } else if (watermarks->d.cstate_pstate.pstate_change_ns - < hubbub1->watermarks.d.cstate_pstate.pstate_change_ns) - wm_pending = true; - - return wm_pending; -} - -bool hubbub1_program_watermarks( - struct hubbub *hubbub, - union dcn_watermark_set *watermarks, - unsigned int refclk_mhz, - bool safe_to_lower) -{ - struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); - bool wm_pending = false; - /* - * Need to clamp to max of the register values (i.e. no wrap) - * for dcn1, all wm registers are 21-bit wide - */ - if (hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) - wm_pending = true; - - if (hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) - wm_pending = true; - - if (hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) - wm_pending = true; - - REG_UPDATE(DCHUBBUB_ARB_SAT_LEVEL, - DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz); - REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, - DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 68); - - hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter); - -#if 0 - REG_UPDATE_2(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, - DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, 1, - DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1); -#endif - return wm_pending; -} - -void hubbub1_update_dchub( - struct hubbub *hubbub, - struct dchub_init_data *dh_data) -{ - struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); - - if (REG(DCHUBBUB_SDPIF_FB_TOP) == 0) { - ASSERT(false); - /*should not come here*/ - return; - } - /* TODO: port code from dal2 */ - switch (dh_data->fb_mode) { - case FRAME_BUFFER_MODE_ZFB_ONLY: - /*For ZFB case need to put DCHUB FB BASE and TOP upside down to indicate ZFB mode*/ - REG_UPDATE(DCHUBBUB_SDPIF_FB_TOP, - SDPIF_FB_TOP, 0); - - REG_UPDATE(DCHUBBUB_SDPIF_FB_BASE, - SDPIF_FB_BASE, 0x0FFFF); - - REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE, - SDPIF_AGP_BASE, dh_data->zfb_phys_addr_base >> 22); - - REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT, - SDPIF_AGP_BOT, dh_data->zfb_mc_base_addr >> 22); - - REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP, - SDPIF_AGP_TOP, (dh_data->zfb_mc_base_addr + - dh_data->zfb_size_in_byte - 1) >> 22); - break; - case FRAME_BUFFER_MODE_MIXED_ZFB_AND_LOCAL: - /*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/ - - REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE, - SDPIF_AGP_BASE, dh_data->zfb_phys_addr_base >> 22); - - REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT, - SDPIF_AGP_BOT, dh_data->zfb_mc_base_addr >> 22); - - REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP, - SDPIF_AGP_TOP, (dh_data->zfb_mc_base_addr + - dh_data->zfb_size_in_byte - 1) >> 22); - break; - case FRAME_BUFFER_MODE_LOCAL_ONLY: - /*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/ - REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE, - SDPIF_AGP_BASE, 0); - - REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT, - SDPIF_AGP_BOT, 0X03FFFF); - - REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP, - SDPIF_AGP_TOP, 0); - break; - default: - break; - } - - dh_data->dchub_initialzied = true; - dh_data->dchub_info_valid = false; -} - -void hubbub1_toggle_watermark_change_req(struct hubbub *hubbub) -{ - struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); - - uint32_t watermark_change_req; - - REG_GET(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, - DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, &watermark_change_req); - - if (watermark_change_req) - watermark_change_req = 0; - else - watermark_change_req = 1; - - REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, - DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, watermark_change_req); -} - -void hubbub1_soft_reset(struct hubbub *hubbub, bool reset) -{ - struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); - - uint32_t reset_en = reset ? 1 : 0; - - REG_UPDATE(DCHUBBUB_SOFT_RESET, - DCHUBBUB_GLOBAL_SOFT_RESET, reset_en); -} - -static bool hubbub1_dcc_support_swizzle( - enum swizzle_mode_values swizzle, - unsigned int bytes_per_element, - enum segment_order *segment_order_horz, - enum segment_order *segment_order_vert) -{ - bool standard_swizzle = false; - bool display_swizzle = false; - - switch (swizzle) { - case DC_SW_4KB_S: - case DC_SW_64KB_S: - case DC_SW_VAR_S: - case DC_SW_4KB_S_X: - case DC_SW_64KB_S_X: - case DC_SW_VAR_S_X: - standard_swizzle = true; - break; - case DC_SW_4KB_D: - case DC_SW_64KB_D: - case DC_SW_VAR_D: - case DC_SW_4KB_D_X: - case DC_SW_64KB_D_X: - case DC_SW_VAR_D_X: - display_swizzle = true; - break; - default: - break; - } - - if (bytes_per_element == 1 && standard_swizzle) { - *segment_order_horz = segment_order__contiguous; - *segment_order_vert = segment_order__na; - return true; - } - if (bytes_per_element == 2 && standard_swizzle) { - *segment_order_horz = segment_order__non_contiguous; - *segment_order_vert = segment_order__contiguous; - return true; - } - if (bytes_per_element == 4 && standard_swizzle) { - *segment_order_horz = segment_order__non_contiguous; - *segment_order_vert = segment_order__contiguous; - return true; - } - if (bytes_per_element == 8 && standard_swizzle) { - *segment_order_horz = segment_order__na; - *segment_order_vert = segment_order__contiguous; - return true; - } - if (bytes_per_element == 8 && display_swizzle) { - *segment_order_horz = segment_order__contiguous; - *segment_order_vert = segment_order__non_contiguous; - return true; - } - - return false; -} - -static bool hubbub1_dcc_support_pixel_format( - enum surface_pixel_format format, - unsigned int *bytes_per_element) -{ - /* DML: get_bytes_per_element */ - switch (format) { - case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: - case SURFACE_PIXEL_FORMAT_GRPH_RGB565: - *bytes_per_element = 2; - return true; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: - case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: - *bytes_per_element = 4; - return true; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: - case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: - *bytes_per_element = 8; - return true; - default: - return false; - } -} - -static void hubbub1_get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height, - unsigned int bytes_per_element) -{ - /* copied from DML. might want to refactor DML to leverage from DML */ - /* DML : get_blk256_size */ - if (bytes_per_element == 1) { - *blk256_width = 16; - *blk256_height = 16; - } else if (bytes_per_element == 2) { - *blk256_width = 16; - *blk256_height = 8; - } else if (bytes_per_element == 4) { - *blk256_width = 8; - *blk256_height = 8; - } else if (bytes_per_element == 8) { - *blk256_width = 8; - *blk256_height = 4; - } -} - -static void hubbub1_det_request_size( - unsigned int height, - unsigned int width, - unsigned int bpe, - bool *req128_horz_wc, - bool *req128_vert_wc) -{ - unsigned int detile_buf_size = 164 * 1024; /* 164KB for DCN1.0 */ - - unsigned int blk256_height = 0; - unsigned int blk256_width = 0; - unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc; - - hubbub1_get_blk256_size(&blk256_width, &blk256_height, bpe); - - swath_bytes_horz_wc = width * blk256_height * bpe; - swath_bytes_vert_wc = height * blk256_width * bpe; - - *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ? - false : /* full 256B request */ - true; /* half 128b request */ - - *req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ? - false : /* full 256B request */ - true; /* half 128b request */ -} - -static bool hubbub1_get_dcc_compression_cap(struct hubbub *hubbub, - const struct dc_dcc_surface_param *input, - struct dc_surface_dcc_cap *output) -{ - struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); - struct dc *dc = hubbub1->base.ctx->dc; - - /* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */ - enum dcc_control dcc_control; - unsigned int bpe; - enum segment_order segment_order_horz, segment_order_vert; - bool req128_horz_wc, req128_vert_wc; - - memset(output, 0, sizeof(*output)); - - if (dc->debug.disable_dcc == DCC_DISABLE) - return false; - - if (!hubbub1->base.funcs->dcc_support_pixel_format(input->format, &bpe)) - return false; - - if (!hubbub1->base.funcs->dcc_support_swizzle(input->swizzle_mode, bpe, - &segment_order_horz, &segment_order_vert)) - return false; - - hubbub1_det_request_size(input->surface_size.height, input->surface_size.width, - bpe, &req128_horz_wc, &req128_vert_wc); - - if (!req128_horz_wc && !req128_vert_wc) { - dcc_control = dcc_control__256_256_xxx; - } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) { - if (!req128_horz_wc) - dcc_control = dcc_control__256_256_xxx; - else if (segment_order_horz == segment_order__contiguous) - dcc_control = dcc_control__128_128_xxx; - else - dcc_control = dcc_control__256_64_64; - } else if (input->scan == SCAN_DIRECTION_VERTICAL) { - if (!req128_vert_wc) - dcc_control = dcc_control__256_256_xxx; - else if (segment_order_vert == segment_order__contiguous) - dcc_control = dcc_control__128_128_xxx; - else - dcc_control = dcc_control__256_64_64; - } else { - if ((req128_horz_wc && - segment_order_horz == segment_order__non_contiguous) || - (req128_vert_wc && - segment_order_vert == segment_order__non_contiguous)) - /* access_dir not known, must use most constraining */ - dcc_control = dcc_control__256_64_64; - else - /* reg128 is true for either horz and vert - * but segment_order is contiguous - */ - dcc_control = dcc_control__128_128_xxx; - } - - if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE && - dcc_control != dcc_control__256_256_xxx) - return false; - - switch (dcc_control) { - case dcc_control__256_256_xxx: - output->grph.rgb.max_uncompressed_blk_size = 256; - output->grph.rgb.max_compressed_blk_size = 256; - output->grph.rgb.independent_64b_blks = false; - break; - case dcc_control__128_128_xxx: - output->grph.rgb.max_uncompressed_blk_size = 128; - output->grph.rgb.max_compressed_blk_size = 128; - output->grph.rgb.independent_64b_blks = false; - break; - case dcc_control__256_64_64: - output->grph.rgb.max_uncompressed_blk_size = 256; - output->grph.rgb.max_compressed_blk_size = 64; - output->grph.rgb.independent_64b_blks = true; - break; - default: - ASSERT(false); - break; - } - - output->capable = true; - output->const_color_support = false; - - return true; -} - -static const struct hubbub_funcs hubbub1_funcs = { - .update_dchub = hubbub1_update_dchub, - .dcc_support_swizzle = hubbub1_dcc_support_swizzle, - .dcc_support_pixel_format = hubbub1_dcc_support_pixel_format, - .get_dcc_compression_cap = hubbub1_get_dcc_compression_cap, - .wm_read_state = hubbub1_wm_read_state, - .program_watermarks = hubbub1_program_watermarks, - .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, - .allow_self_refresh_control = hubbub1_allow_self_refresh_control, - .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high, -}; - -void hubbub1_construct(struct hubbub *hubbub, - struct dc_context *ctx, - const struct dcn_hubbub_registers *hubbub_regs, - const struct dcn_hubbub_shift *hubbub_shift, - const struct dcn_hubbub_mask *hubbub_mask) -{ - struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); - - hubbub1->base.ctx = ctx; - - hubbub1->base.funcs = &hubbub1_funcs; - - hubbub1->regs = hubbub_regs; - hubbub1->shifts = hubbub_shift; - hubbub1->masks = hubbub_mask; - - hubbub1->debug_test_index_pstate = 0x7; - if (ctx->dce_version == DCN_VERSION_1_01) - hubbub1->debug_test_index_pstate = 0xB; -} - diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h deleted file mode 100644 index a1e2cde9c4cc..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h +++ /dev/null @@ -1,508 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_HUBBUB_DCN10_H__ -#define __DC_HUBBUB_DCN10_H__ - -#include "core_types.h" -#include "dchubbub.h" - -#define TO_DCN10_HUBBUB(hubbub)\ - container_of(hubbub, struct dcn10_hubbub, base) - -#define HUBBUB_REG_LIST_DCN_COMMON()\ - SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A),\ - SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A),\ - SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B),\ - SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B),\ - SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C),\ - SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C),\ - SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D),\ - SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D),\ - SR(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL),\ - SR(DCHUBBUB_ARB_DRAM_STATE_CNTL),\ - SR(DCHUBBUB_ARB_SAT_LEVEL),\ - SR(DCHUBBUB_ARB_DF_REQ_OUTSTAND),\ - SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \ - SR(DCHUBBUB_TEST_DEBUG_INDEX), \ - SR(DCHUBBUB_TEST_DEBUG_DATA),\ - SR(DCHUBBUB_SOFT_RESET) - -#define HUBBUB_VM_REG_LIST() \ - SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A),\ - SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B),\ - SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C),\ - SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D) - -#define HUBBUB_SR_WATERMARK_REG_LIST()\ - SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A),\ - SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A),\ - SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B),\ - SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B),\ - SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C),\ - SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C),\ - SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D),\ - SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D) - -#define HUBBUB_REG_LIST_DCN10(id)\ - HUBBUB_REG_LIST_DCN_COMMON(), \ - HUBBUB_VM_REG_LIST(), \ - HUBBUB_SR_WATERMARK_REG_LIST(), \ - SR(DCHUBBUB_SDPIF_FB_TOP),\ - SR(DCHUBBUB_SDPIF_FB_BASE),\ - SR(DCHUBBUB_SDPIF_FB_OFFSET),\ - SR(DCHUBBUB_SDPIF_AGP_BASE),\ - SR(DCHUBBUB_SDPIF_AGP_BOT),\ - SR(DCHUBBUB_SDPIF_AGP_TOP) - -struct dcn_hubbub_registers { - uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A; - uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A; - uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A; - uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A; - uint32_t DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A; - uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B; - uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B; - uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B; - uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B; - uint32_t DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B; - uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C; - uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C; - uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C; - uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C; - uint32_t DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C; - uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D; - uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D; - uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D; - uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D; - uint32_t DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D; - uint32_t DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL; - uint32_t DCHUBBUB_ARB_SAT_LEVEL; - uint32_t DCHUBBUB_ARB_DF_REQ_OUTSTAND; - uint32_t DCHUBBUB_GLOBAL_TIMER_CNTL; - uint32_t DCHUBBUB_ARB_DRAM_STATE_CNTL; - uint32_t DCHUBBUB_TEST_DEBUG_INDEX; - uint32_t DCHUBBUB_TEST_DEBUG_DATA; - uint32_t DCHUBBUB_SDPIF_FB_TOP; - uint32_t DCHUBBUB_SDPIF_FB_BASE; - uint32_t DCHUBBUB_SDPIF_FB_OFFSET; - uint32_t DCHUBBUB_SDPIF_AGP_BASE; - uint32_t DCHUBBUB_SDPIF_AGP_BOT; - uint32_t DCHUBBUB_SDPIF_AGP_TOP; - uint32_t DCHUBBUB_CRC_CTRL; - uint32_t DCHUBBUB_SOFT_RESET; - uint32_t DCN_VM_FB_LOCATION_BASE; - uint32_t DCN_VM_FB_LOCATION_TOP; - uint32_t DCN_VM_FB_OFFSET; - uint32_t DCN_VM_AGP_BOT; - uint32_t DCN_VM_AGP_TOP; - uint32_t DCN_VM_AGP_BASE; - uint32_t DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB; - uint32_t DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB; - uint32_t DCN_VM_FAULT_ADDR_MSB; - uint32_t DCN_VM_FAULT_ADDR_LSB; - uint32_t DCN_VM_FAULT_CNTL; - uint32_t DCN_VM_FAULT_STATUS; - uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_A; - uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_B; - uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_C; - uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_D; - uint32_t DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A; - uint32_t DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B; - uint32_t DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C; - uint32_t DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D; - uint32_t DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A; - uint32_t DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B; - uint32_t DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C; - uint32_t DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D; - uint32_t DCHUBBUB_ARB_HOSTVM_CNTL; - uint32_t DCHVM_CTRL0; - uint32_t DCHVM_MEM_CTRL; - uint32_t DCHVM_CLK_CTRL; - uint32_t DCHVM_RIOMMU_CTRL0; - uint32_t DCHVM_RIOMMU_STAT0; - uint32_t DCHUBBUB_DET0_CTRL; - uint32_t DCHUBBUB_DET1_CTRL; - uint32_t DCHUBBUB_DET2_CTRL; - uint32_t DCHUBBUB_DET3_CTRL; - uint32_t DCHUBBUB_COMPBUF_CTRL; - uint32_t COMPBUF_RESERVED_SPACE; - uint32_t DCHUBBUB_DEBUG_CTRL_0; - uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A; - uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A; - uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B; - uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B; - uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C; - uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C; - uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D; - uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D; - uint32_t DCHUBBUB_ARB_USR_RETRAINING_CNTL; - uint32_t DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A; - uint32_t DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B; - uint32_t DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C; - uint32_t DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D; - uint32_t DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A; - uint32_t DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B; - uint32_t DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C; - uint32_t DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D; - uint32_t DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A; - uint32_t DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B; - uint32_t DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C; - uint32_t DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D; - uint32_t DCHUBBUB_ARB_MALL_CNTL; - uint32_t SDPIF_REQUEST_RATE_LIMIT; - uint32_t DCHUBBUB_SDPIF_CFG0; - uint32_t DCHUBBUB_SDPIF_CFG1; - uint32_t DCHUBBUB_CLOCK_CNTL; - uint32_t DCHUBBUB_MEM_PWR_MODE_CTRL; - uint32_t DCHUBBUB_ARB_QOS_FORCE; - uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_A; - uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_A; - uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_B; - uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_B; - uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_A; - uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_A; - uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_B; - uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_B; - uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_A; - uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_A; - uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_B; - uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_B; - uint32_t DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_A; - uint32_t DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_B; - uint32_t DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_A; - uint32_t DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_B; - uint32_t DCHUBBUB_ARB_REFCYC_PER_META_TRIP_A; - uint32_t DCHUBBUB_ARB_REFCYC_PER_META_TRIP_B; - uint32_t DCHUBBUB_ARB_FRAC_URG_BW_MALL_A; - uint32_t DCHUBBUB_ARB_FRAC_URG_BW_MALL_B; -}; - -#define HUBBUB_REG_FIELD_LIST_DCN32(type) \ - type DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_VALUE;\ - type DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_ENABLE;\ - type DCHUBBUB_ARB_DO_NOT_FORCE_ALLOW_USR_RETRAINING_DURING_PSTATE_CHANGE_REQUEST;\ - type DCHUBBUB_ARB_DO_NOT_FORCE_ALLOW_USR_RETRAINING_DURING_PRE_CSTATE;\ - type DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A;\ - type DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B;\ - type DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C;\ - type DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D;\ - type DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A;\ - type DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B;\ - type DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C;\ - type DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D;\ - type DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A;\ - type DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B;\ - type DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C;\ - type DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D;\ - type MALL_PREFETCH_COMPLETE;\ - type MALL_IN_USE - -#define HUBBUB_REG_FIELD_LIST_DCN35(type) \ - type DCHUBBUB_FGCG_REP_DIS;\ - type DCHUBBUB_ARB_ALLOW_CSTATE_DEEPSLEEP_LEGACY_MODE - -/* set field name */ -#define HUBBUB_SF(reg_name, field_name, post_fix)\ - .field_name = reg_name ## __ ## field_name ## post_fix - -#define HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh)\ - HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \ - HUBBUB_SF(DCHUBBUB_SOFT_RESET, DCHUBBUB_GLOBAL_SOFT_RESET, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_SAT_LEVEL, DCHUBBUB_ARB_SAT_LEVEL, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, mask_sh) - -#define HUBBUB_MASK_SH_LIST_STUTTER(mask_sh) \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, mask_sh) - -#define HUBBUB_MASK_SH_LIST_DCN10(mask_sh)\ - HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \ - HUBBUB_MASK_SH_LIST_STUTTER(mask_sh), \ - HUBBUB_SF(DCHUBBUB_SDPIF_FB_TOP, SDPIF_FB_TOP, mask_sh), \ - HUBBUB_SF(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, mask_sh), \ - HUBBUB_SF(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, mask_sh), \ - HUBBUB_SF(DCHUBBUB_SDPIF_AGP_BASE, SDPIF_AGP_BASE, mask_sh), \ - HUBBUB_SF(DCHUBBUB_SDPIF_AGP_BOT, SDPIF_AGP_BOT, mask_sh), \ - HUBBUB_SF(DCHUBBUB_SDPIF_AGP_TOP, SDPIF_AGP_TOP, mask_sh) - -#define DCN_HUBBUB_REG_FIELD_LIST(type) \ - type DCHUBBUB_GLOBAL_TIMER_ENABLE; \ - type DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST;\ - type DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE;\ - type DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE;\ - type DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE;\ - type DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE;\ - type DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE;\ - type DCHUBBUB_ARB_SAT_LEVEL;\ - type DCHUBBUB_ARB_MIN_REQ_OUTSTAND;\ - type DCHUBBUB_GLOBAL_TIMER_REFDIV;\ - type DCHUBBUB_GLOBAL_SOFT_RESET; \ - type SDPIF_FB_TOP;\ - type SDPIF_FB_BASE;\ - type SDPIF_FB_OFFSET;\ - type SDPIF_AGP_BASE;\ - type SDPIF_AGP_BOT;\ - type SDPIF_AGP_TOP;\ - type FB_BASE;\ - type FB_TOP;\ - type FB_OFFSET;\ - type AGP_BOT;\ - type AGP_TOP;\ - type AGP_BASE;\ - type DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A;\ - type DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B;\ - type DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C;\ - type DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D;\ - type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A;\ - type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B;\ - type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C;\ - type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D;\ - type DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB;\ - type DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB;\ - type DCN_VM_FAULT_ADDR_MSB;\ - type DCN_VM_FAULT_ADDR_LSB;\ - type DCN_VM_ERROR_STATUS_CLEAR;\ - type DCN_VM_ERROR_STATUS_MODE;\ - type DCN_VM_ERROR_INTERRUPT_ENABLE;\ - type DCN_VM_RANGE_FAULT_DISABLE;\ - type DCN_VM_PRQ_FAULT_DISABLE;\ - type DCN_VM_ERROR_STATUS;\ - type DCN_VM_ERROR_VMID;\ - type DCN_VM_ERROR_TABLE_LEVEL;\ - type DCN_VM_ERROR_PIPE;\ - type DCN_VM_ERROR_INTERRUPT_STATUS - -#define HUBBUB_STUTTER_REG_FIELD_LIST(type) \ - type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A;\ - type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B;\ - type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C;\ - type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D;\ - type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A;\ - type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B;\ - type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C;\ - type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D - - -#define HUBBUB_HVM_REG_FIELD_LIST(type) \ - type DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD;\ - type DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A;\ - type DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_B;\ - type DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_C;\ - type DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_D;\ - type DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_A;\ - type DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_B;\ - type DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_C;\ - type DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_D;\ - type DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A;\ - type DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_B;\ - type DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_C;\ - type DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_D;\ - type DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A;\ - type DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B;\ - type DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C;\ - type DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D;\ - type DCHUBBUB_ARB_FRAC_URG_BW_NOM_A;\ - type DCHUBBUB_ARB_FRAC_URG_BW_NOM_B;\ - type DCHUBBUB_ARB_FRAC_URG_BW_NOM_C;\ - type DCHUBBUB_ARB_FRAC_URG_BW_NOM_D;\ - type DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A;\ - type DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B;\ - type DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C;\ - type DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D;\ - type DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A;\ - type DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B;\ - type DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C;\ - type DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D;\ - type DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD;\ - type HOSTVM_INIT_REQ; \ - type HVM_GPUVMRET_PWR_REQ_DIS; \ - type HVM_GPUVMRET_FORCE_REQ; \ - type HVM_GPUVMRET_POWER_STATUS; \ - type HVM_DISPCLK_R_GATE_DIS; \ - type HVM_DISPCLK_G_GATE_DIS; \ - type HVM_DCFCLK_R_GATE_DIS; \ - type HVM_DCFCLK_G_GATE_DIS; \ - type TR_REQ_REQCLKREQ_MODE; \ - type TW_RSP_COMPCLKREQ_MODE; \ - type HOSTVM_PREFETCH_REQ; \ - type HOSTVM_POWERSTATUS; \ - type RIOMMU_ACTIVE; \ - type HOSTVM_PREFETCH_DONE - -#define HUBBUB_RET_REG_FIELD_LIST(type) \ - type DET_DEPTH;\ - type DET0_SIZE;\ - type DET1_SIZE;\ - type DET2_SIZE;\ - type DET3_SIZE;\ - type DET0_SIZE_CURRENT;\ - type DET1_SIZE_CURRENT;\ - type DET2_SIZE_CURRENT;\ - type DET3_SIZE_CURRENT;\ - type COMPBUF_SIZE;\ - type COMPBUF_SIZE_CURRENT;\ - type CONFIG_ERROR;\ - type COMPBUF_RESERVED_SPACE_64B;\ - type COMPBUF_RESERVED_SPACE_ZS;\ - type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A;\ - type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A;\ - type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B;\ - type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B;\ - type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C;\ - type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C;\ - type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D;\ - type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D;\ - type SDPIF_REQUEST_RATE_LIMIT;\ - type DISPCLK_R_DCHUBBUB_GATE_DIS;\ - type DCFCLK_R_DCHUBBUB_GATE_DIS;\ - type SDPIF_MAX_NUM_OUTSTANDING;\ - type DCHUBBUB_ARB_MAX_REQ_OUTSTAND;\ - type SDPIF_PORT_CONTROL;\ - type DET_MEM_PWR_LS_MODE - - -#define HUBBUB_REG_FIELD_LIST_DCN4_01(type) \ - type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_A;\ - type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_A;\ - type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_B;\ - type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_B;\ - type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_A;\ - type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_A;\ - type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_B;\ - type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_B;\ - type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_A;\ - type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_A;\ - type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_B;\ - type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_B;\ - type DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_A;\ - type DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_B;\ - type DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_A;\ - type DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_B;\ - type DCHUBBUB_ARB_REFCYC_PER_META_TRIP_A;\ - type DCHUBBUB_ARB_REFCYC_PER_META_TRIP_B;\ - type DCHUBBUB_ARB_FRAC_URG_BW_MALL_A;\ - type DCHUBBUB_ARB_FRAC_URG_BW_MALL_B - -struct dcn_hubbub_shift { - DCN_HUBBUB_REG_FIELD_LIST(uint8_t); - HUBBUB_STUTTER_REG_FIELD_LIST(uint8_t); - HUBBUB_HVM_REG_FIELD_LIST(uint8_t); - HUBBUB_RET_REG_FIELD_LIST(uint8_t); - HUBBUB_REG_FIELD_LIST_DCN32(uint8_t); - HUBBUB_REG_FIELD_LIST_DCN35(uint8_t); - HUBBUB_REG_FIELD_LIST_DCN4_01(uint8_t); -}; - -struct dcn_hubbub_mask { - DCN_HUBBUB_REG_FIELD_LIST(uint32_t); - HUBBUB_STUTTER_REG_FIELD_LIST(uint32_t); - HUBBUB_HVM_REG_FIELD_LIST(uint32_t); - HUBBUB_RET_REG_FIELD_LIST(uint32_t); - HUBBUB_REG_FIELD_LIST_DCN32(uint32_t); - HUBBUB_REG_FIELD_LIST_DCN35(uint32_t); - HUBBUB_REG_FIELD_LIST_DCN4_01(uint32_t); -}; - -struct dc; - -struct dcn10_hubbub { - struct hubbub base; - const struct dcn_hubbub_registers *regs; - const struct dcn_hubbub_shift *shifts; - const struct dcn_hubbub_mask *masks; - unsigned int debug_test_index_pstate; - union dcn_watermark_set watermarks; -}; - -void hubbub1_update_dchub( - struct hubbub *hubbub, - struct dchub_init_data *dh_data); - -bool hubbub1_verify_allow_pstate_change_high( - struct hubbub *hubbub); - -void hubbub1_wm_change_req_wa(struct hubbub *hubbub); - -bool hubbub1_program_watermarks( - struct hubbub *hubbub, - union dcn_watermark_set *watermarks, - unsigned int refclk_mhz, - bool safe_to_lower); - -void hubbub1_allow_self_refresh_control(struct hubbub *hubbub, bool allow); - -bool hubbub1_is_allow_self_refresh_enabled(struct hubbub *hubub); - -void hubbub1_toggle_watermark_change_req( - struct hubbub *hubbub); - -void hubbub1_wm_read_state(struct hubbub *hubbub, - struct dcn_hubbub_wm *wm); - -void hubbub1_soft_reset(struct hubbub *hubbub, bool reset); -void hubbub1_construct(struct hubbub *hubbub, - struct dc_context *ctx, - const struct dcn_hubbub_registers *hubbub_regs, - const struct dcn_hubbub_shift *hubbub_shift, - const struct dcn_hubbub_mask *hubbub_mask); - -bool hubbub1_program_urgent_watermarks( - struct hubbub *hubbub, - union dcn_watermark_set *watermarks, - unsigned int refclk_mhz, - bool safe_to_lower); -bool hubbub1_program_stutter_watermarks( - struct hubbub *hubbub, - union dcn_watermark_set *watermarks, - unsigned int refclk_mhz, - bool safe_to_lower); -bool hubbub1_program_pstate_watermarks( - struct hubbub *hubbub, - union dcn_watermark_set *watermarks, - unsigned int refclk_mhz, - bool safe_to_lower); - -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c index c51b717e5622..3adef474ed26 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c @@ -41,7 +41,7 @@ #include "mpc.h" #include "reg_helper.h" #include "dcn10_hubp.h" -#include "dcn10_hubbub.h" +#include "dcn10/dcn10_hubbub.h" #include "dcn10_cm_common.h" #include "clk_mgr.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile index 9b6070c99794..6e5b7fcf8dbd 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile @@ -3,7 +3,7 @@ # Makefile for DCN. DCN20 = dcn20_hubp.o \ - dcn20_mpc.o dcn20_opp.o dcn20_hubbub.o dcn20_mmhubbub.o \ + dcn20_mpc.o dcn20_opp.o dcn20_mmhubbub.o \ dcn20_stream_encoder.o dcn20_link_encoder.o dcn20_dccg.o \ dcn20_vmid.o dcn20_dwb.o dcn20_dwb_scl.o diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c deleted file mode 100644 index c6f859871d11..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c +++ /dev/null @@ -1,670 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - - -#include "dcn20_hubbub.h" -#include "reg_helper.h" -#include "clk_mgr.h" - -#define REG(reg)\ - hubbub1->regs->reg - -#define CTX \ - hubbub1->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - hubbub1->shifts->field_name, hubbub1->masks->field_name - -#define REG(reg)\ - hubbub1->regs->reg - -#define CTX \ - hubbub1->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - hubbub1->shifts->field_name, hubbub1->masks->field_name - -#ifdef NUM_VMID -#undef NUM_VMID -#endif -#define NUM_VMID 16 - -bool hubbub2_dcc_support_swizzle( - enum swizzle_mode_values swizzle, - unsigned int bytes_per_element, - enum segment_order *segment_order_horz, - enum segment_order *segment_order_vert) -{ - bool standard_swizzle = false; - bool display_swizzle = false; - bool render_swizzle = false; - - switch (swizzle) { - case DC_SW_4KB_S: - case DC_SW_64KB_S: - case DC_SW_VAR_S: - case DC_SW_4KB_S_X: - case DC_SW_64KB_S_X: - case DC_SW_VAR_S_X: - standard_swizzle = true; - break; - case DC_SW_64KB_R_X: - render_swizzle = true; - break; - case DC_SW_4KB_D: - case DC_SW_64KB_D: - case DC_SW_VAR_D: - case DC_SW_4KB_D_X: - case DC_SW_64KB_D_X: - case DC_SW_VAR_D_X: - display_swizzle = true; - break; - default: - break; - } - - if (standard_swizzle) { - if (bytes_per_element == 1) { - *segment_order_horz = segment_order__contiguous; - *segment_order_vert = segment_order__na; - return true; - } - if (bytes_per_element == 2) { - *segment_order_horz = segment_order__non_contiguous; - *segment_order_vert = segment_order__contiguous; - return true; - } - if (bytes_per_element == 4) { - *segment_order_horz = segment_order__non_contiguous; - *segment_order_vert = segment_order__contiguous; - return true; - } - if (bytes_per_element == 8) { - *segment_order_horz = segment_order__na; - *segment_order_vert = segment_order__contiguous; - return true; - } - } - if (render_swizzle) { - if (bytes_per_element == 2) { - *segment_order_horz = segment_order__contiguous; - *segment_order_vert = segment_order__contiguous; - return true; - } - if (bytes_per_element == 4) { - *segment_order_horz = segment_order__non_contiguous; - *segment_order_vert = segment_order__contiguous; - return true; - } - if (bytes_per_element == 8) { - *segment_order_horz = segment_order__contiguous; - *segment_order_vert = segment_order__non_contiguous; - return true; - } - } - if (display_swizzle && bytes_per_element == 8) { - *segment_order_horz = segment_order__contiguous; - *segment_order_vert = segment_order__non_contiguous; - return true; - } - - return false; -} - -bool hubbub2_dcc_support_pixel_format( - enum surface_pixel_format format, - unsigned int *bytes_per_element) -{ - /* DML: get_bytes_per_element */ - switch (format) { - case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: - case SURFACE_PIXEL_FORMAT_GRPH_RGB565: - *bytes_per_element = 2; - return true; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: - case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: - case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX: - case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX: - case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT: - case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT: - case SURFACE_PIXEL_FORMAT_GRPH_RGBE: - case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA: - *bytes_per_element = 4; - return true; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: - case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: - *bytes_per_element = 8; - return true; - default: - return false; - } -} - -static void hubbub2_get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height, - unsigned int bytes_per_element) -{ - /* copied from DML. might want to refactor DML to leverage from DML */ - /* DML : get_blk256_size */ - if (bytes_per_element == 1) { - *blk256_width = 16; - *blk256_height = 16; - } else if (bytes_per_element == 2) { - *blk256_width = 16; - *blk256_height = 8; - } else if (bytes_per_element == 4) { - *blk256_width = 8; - *blk256_height = 8; - } else if (bytes_per_element == 8) { - *blk256_width = 8; - *blk256_height = 4; - } -} - -static void hubbub2_det_request_size( - unsigned int detile_buf_size, - unsigned int height, - unsigned int width, - unsigned int bpe, - bool *req128_horz_wc, - bool *req128_vert_wc) -{ - unsigned int blk256_height = 0; - unsigned int blk256_width = 0; - unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc; - - hubbub2_get_blk256_size(&blk256_width, &blk256_height, bpe); - - swath_bytes_horz_wc = width * blk256_height * bpe; - swath_bytes_vert_wc = height * blk256_width * bpe; - - *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ? - false : /* full 256B request */ - true; /* half 128b request */ - - *req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ? - false : /* full 256B request */ - true; /* half 128b request */ -} - -bool hubbub2_get_dcc_compression_cap(struct hubbub *hubbub, - const struct dc_dcc_surface_param *input, - struct dc_surface_dcc_cap *output) -{ - struct dc *dc = hubbub->ctx->dc; - /* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */ - enum dcc_control dcc_control; - unsigned int bpe; - enum segment_order segment_order_horz, segment_order_vert; - bool req128_horz_wc, req128_vert_wc; - - memset(output, 0, sizeof(*output)); - - if (dc->debug.disable_dcc == DCC_DISABLE) - return false; - - if (!hubbub->funcs->dcc_support_pixel_format(input->format, - &bpe)) - return false; - - if (!hubbub->funcs->dcc_support_swizzle(input->swizzle_mode, bpe, - &segment_order_horz, &segment_order_vert)) - return false; - - hubbub2_det_request_size(TO_DCN20_HUBBUB(hubbub)->detile_buf_size, - input->surface_size.height, input->surface_size.width, - bpe, &req128_horz_wc, &req128_vert_wc); - - if (!req128_horz_wc && !req128_vert_wc) { - dcc_control = dcc_control__256_256_xxx; - } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) { - if (!req128_horz_wc) - dcc_control = dcc_control__256_256_xxx; - else if (segment_order_horz == segment_order__contiguous) - dcc_control = dcc_control__128_128_xxx; - else - dcc_control = dcc_control__256_64_64; - } else if (input->scan == SCAN_DIRECTION_VERTICAL) { - if (!req128_vert_wc) - dcc_control = dcc_control__256_256_xxx; - else if (segment_order_vert == segment_order__contiguous) - dcc_control = dcc_control__128_128_xxx; - else - dcc_control = dcc_control__256_64_64; - } else { - if ((req128_horz_wc && - segment_order_horz == segment_order__non_contiguous) || - (req128_vert_wc && - segment_order_vert == segment_order__non_contiguous)) - /* access_dir not known, must use most constraining */ - dcc_control = dcc_control__256_64_64; - else - /* reg128 is true for either horz and vert - * but segment_order is contiguous - */ - dcc_control = dcc_control__128_128_xxx; - } - - /* Exception for 64KB_R_X */ - if ((bpe == 2) && (input->swizzle_mode == DC_SW_64KB_R_X)) - dcc_control = dcc_control__128_128_xxx; - - if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE && - dcc_control != dcc_control__256_256_xxx) - return false; - - switch (dcc_control) { - case dcc_control__256_256_xxx: - output->grph.rgb.max_uncompressed_blk_size = 256; - output->grph.rgb.max_compressed_blk_size = 256; - output->grph.rgb.independent_64b_blks = false; - break; - case dcc_control__128_128_xxx: - output->grph.rgb.max_uncompressed_blk_size = 128; - output->grph.rgb.max_compressed_blk_size = 128; - output->grph.rgb.independent_64b_blks = false; - break; - case dcc_control__256_64_64: - output->grph.rgb.max_uncompressed_blk_size = 256; - output->grph.rgb.max_compressed_blk_size = 64; - output->grph.rgb.independent_64b_blks = true; - break; - default: - ASSERT(false); - break; - } - output->capable = true; - output->const_color_support = true; - - return true; -} - -static enum dcn_hubbub_page_table_depth page_table_depth_to_hw(unsigned int page_table_depth) -{ - enum dcn_hubbub_page_table_depth depth = 0; - - switch (page_table_depth) { - case 1: - depth = DCN_PAGE_TABLE_DEPTH_1_LEVEL; - break; - case 2: - depth = DCN_PAGE_TABLE_DEPTH_2_LEVEL; - break; - case 3: - depth = DCN_PAGE_TABLE_DEPTH_3_LEVEL; - break; - case 4: - depth = DCN_PAGE_TABLE_DEPTH_4_LEVEL; - break; - default: - ASSERT(false); - break; - } - - return depth; -} - -static enum dcn_hubbub_page_table_block_size page_table_block_size_to_hw(unsigned int page_table_block_size) -{ - enum dcn_hubbub_page_table_block_size block_size = 0; - - switch (page_table_block_size) { - case 4096: - block_size = DCN_PAGE_TABLE_BLOCK_SIZE_4KB; - break; - case 65536: - block_size = DCN_PAGE_TABLE_BLOCK_SIZE_64KB; - break; - case 32768: - block_size = DCN_PAGE_TABLE_BLOCK_SIZE_32KB; - break; - default: - ASSERT(false); - block_size = page_table_block_size; - break; - } - - return block_size; -} - -void hubbub2_init_vm_ctx(struct hubbub *hubbub, - struct dcn_hubbub_virt_addr_config *va_config, - int vmid) -{ - struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); - struct dcn_vmid_page_table_config virt_config; - - virt_config.page_table_start_addr = va_config->page_table_start_addr >> 12; - virt_config.page_table_end_addr = va_config->page_table_end_addr >> 12; - virt_config.depth = page_table_depth_to_hw(va_config->page_table_depth); - virt_config.block_size = page_table_block_size_to_hw(va_config->page_table_block_size); - virt_config.page_table_base_addr = va_config->page_table_base_addr; - - dcn20_vmid_setup(&hubbub1->vmid[vmid], &virt_config); -} - -int hubbub2_init_dchub_sys_ctx(struct hubbub *hubbub, - struct dcn_hubbub_phys_addr_config *pa_config) -{ - struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); - struct dcn_vmid_page_table_config phys_config; - - REG_SET(DCN_VM_FB_LOCATION_BASE, 0, - FB_BASE, pa_config->system_aperture.fb_base >> 24); - REG_SET(DCN_VM_FB_LOCATION_TOP, 0, - FB_TOP, pa_config->system_aperture.fb_top >> 24); - REG_SET(DCN_VM_FB_OFFSET, 0, - FB_OFFSET, pa_config->system_aperture.fb_offset >> 24); - REG_SET(DCN_VM_AGP_BOT, 0, - AGP_BOT, pa_config->system_aperture.agp_bot >> 24); - REG_SET(DCN_VM_AGP_TOP, 0, - AGP_TOP, pa_config->system_aperture.agp_top >> 24); - REG_SET(DCN_VM_AGP_BASE, 0, - AGP_BASE, pa_config->system_aperture.agp_base >> 24); - - REG_SET(DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB, 0, - DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB, (pa_config->page_table_default_page_addr >> 44) & 0xF); - REG_SET(DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB, 0, - DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB, (pa_config->page_table_default_page_addr >> 12) & 0xFFFFFFFF); - - if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) { - phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12; - phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12; - phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr; - phys_config.depth = 0; - phys_config.block_size = 0; - // Init VMID 0 based on PA config - dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config); - } - - return NUM_VMID; -} - -void hubbub2_update_dchub(struct hubbub *hubbub, - struct dchub_init_data *dh_data) -{ - struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); - - if (REG(DCN_VM_FB_LOCATION_TOP) == 0) - return; - - switch (dh_data->fb_mode) { - case FRAME_BUFFER_MODE_ZFB_ONLY: - /*For ZFB case need to put DCHUB FB BASE and TOP upside down to indicate ZFB mode*/ - REG_UPDATE(DCN_VM_FB_LOCATION_TOP, - FB_TOP, 0); - - REG_UPDATE(DCN_VM_FB_LOCATION_BASE, - FB_BASE, 0xFFFFFF); - - /*This field defines the 24 MSBs, bits [47:24] of the 48 bit AGP Base*/ - REG_UPDATE(DCN_VM_AGP_BASE, - AGP_BASE, dh_data->zfb_phys_addr_base >> 24); - - /*This field defines the bottom range of the AGP aperture and represents the 24*/ - /*MSBs, bits [47:24] of the 48 address bits*/ - REG_UPDATE(DCN_VM_AGP_BOT, - AGP_BOT, dh_data->zfb_mc_base_addr >> 24); - - /*This field defines the top range of the AGP aperture and represents the 24*/ - /*MSBs, bits [47:24] of the 48 address bits*/ - REG_UPDATE(DCN_VM_AGP_TOP, - AGP_TOP, (dh_data->zfb_mc_base_addr + - dh_data->zfb_size_in_byte - 1) >> 24); - break; - case FRAME_BUFFER_MODE_MIXED_ZFB_AND_LOCAL: - /*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/ - - /*This field defines the 24 MSBs, bits [47:24] of the 48 bit AGP Base*/ - REG_UPDATE(DCN_VM_AGP_BASE, - AGP_BASE, dh_data->zfb_phys_addr_base >> 24); - - /*This field defines the bottom range of the AGP aperture and represents the 24*/ - /*MSBs, bits [47:24] of the 48 address bits*/ - REG_UPDATE(DCN_VM_AGP_BOT, - AGP_BOT, dh_data->zfb_mc_base_addr >> 24); - - /*This field defines the top range of the AGP aperture and represents the 24*/ - /*MSBs, bits [47:24] of the 48 address bits*/ - REG_UPDATE(DCN_VM_AGP_TOP, - AGP_TOP, (dh_data->zfb_mc_base_addr + - dh_data->zfb_size_in_byte - 1) >> 24); - break; - case FRAME_BUFFER_MODE_LOCAL_ONLY: - /*Should not touch FB LOCATION (should be done by VBIOS)*/ - - /*This field defines the 24 MSBs, bits [47:24] of the 48 bit AGP Base*/ - REG_UPDATE(DCN_VM_AGP_BASE, - AGP_BASE, 0); - - /*This field defines the bottom range of the AGP aperture and represents the 24*/ - /*MSBs, bits [47:24] of the 48 address bits*/ - REG_UPDATE(DCN_VM_AGP_BOT, - AGP_BOT, 0xFFFFFF); - - /*This field defines the top range of the AGP aperture and represents the 24*/ - /*MSBs, bits [47:24] of the 48 address bits*/ - REG_UPDATE(DCN_VM_AGP_TOP, - AGP_TOP, 0); - break; - default: - break; - } - - dh_data->dchub_initialzied = true; - dh_data->dchub_info_valid = false; -} - -void hubbub2_wm_read_state(struct hubbub *hubbub, - struct dcn_hubbub_wm *wm) -{ - struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); - - struct dcn_hubbub_wm_set *s; - - memset(wm, 0, sizeof(struct dcn_hubbub_wm)); - - s = &wm->sets[0]; - s->wm_set = 0; - s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A); - if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A)) - s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A); - if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) { - s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A); - s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A); - } - s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A); - - s = &wm->sets[1]; - s->wm_set = 1; - s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B); - if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B)) - s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B); - if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) { - s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B); - s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B); - } - s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B); - - s = &wm->sets[2]; - s->wm_set = 2; - s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C); - if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C)) - s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C); - if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) { - s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C); - s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C); - } - s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C); - - s = &wm->sets[3]; - s->wm_set = 3; - s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D); - if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D)) - s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D); - if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) { - s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D); - s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D); - } - s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D); -} - -void hubbub2_get_dchub_ref_freq(struct hubbub *hubbub, - unsigned int dccg_ref_freq_inKhz, - unsigned int *dchub_ref_freq_inKhz) -{ - struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); - uint32_t ref_div = 0; - uint32_t ref_en = 0; - - REG_GET_2(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, &ref_div, - DCHUBBUB_GLOBAL_TIMER_ENABLE, &ref_en); - - if (ref_en) { - if (ref_div == 2) - *dchub_ref_freq_inKhz = dccg_ref_freq_inKhz / 2; - else - *dchub_ref_freq_inKhz = dccg_ref_freq_inKhz; - - // DC hub reference frequency must be around 50Mhz, otherwise there may be - // overflow/underflow issues when doing HUBBUB programming - if (*dchub_ref_freq_inKhz < 40000 || *dchub_ref_freq_inKhz > 60000) - ASSERT_CRITICAL(false); - - return; - } else { - *dchub_ref_freq_inKhz = dccg_ref_freq_inKhz; - - // HUBBUB global timer must be enabled. - ASSERT_CRITICAL(false); - return; - } -} - -static bool hubbub2_program_watermarks( - struct hubbub *hubbub, - union dcn_watermark_set *watermarks, - unsigned int refclk_mhz, - bool safe_to_lower) -{ - struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); - bool wm_pending = false; - /* - * Need to clamp to max of the register values (i.e. no wrap) - * for dcn1, all wm registers are 21-bit wide - */ - if (hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) - wm_pending = true; - - if (hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) - wm_pending = true; - - /* - * There's a special case when going from p-state support to p-state unsupported - * here we are going to LOWER watermarks to go to dummy p-state only, but this has - * to be done prepare_bandwidth, not optimize - */ - if (hubbub1->base.ctx->dc->clk_mgr->clks.prev_p_state_change_support == true && - hubbub1->base.ctx->dc->clk_mgr->clks.p_state_change_support == false) - safe_to_lower = true; - - hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower); - - REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0, - DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz); - REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 180); - - hubbub->funcs->allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter); - return wm_pending; -} - -void hubbub2_read_state(struct hubbub *hubbub, struct dcn_hubbub_state *hubbub_state) -{ - struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); - - if (REG(DCN_VM_FAULT_ADDR_MSB)) - hubbub_state->vm_fault_addr_msb = REG_READ(DCN_VM_FAULT_ADDR_MSB); - - if (REG(DCN_VM_FAULT_ADDR_LSB)) - hubbub_state->vm_fault_addr_msb = REG_READ(DCN_VM_FAULT_ADDR_LSB); - - if (REG(DCN_VM_FAULT_CNTL)) - REG_GET(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_MODE, &hubbub_state->vm_error_mode); - - if (REG(DCN_VM_FAULT_STATUS)) { - REG_GET(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_STATUS, &hubbub_state->vm_error_status); - REG_GET(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, &hubbub_state->vm_error_vmid); - REG_GET(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, &hubbub_state->vm_error_pipe); - } - - if (REG(DCHUBBUB_TEST_DEBUG_INDEX) && REG(DCHUBBUB_TEST_DEBUG_DATA)) { - REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, 0x6); - hubbub_state->test_debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA); - } - - if (REG(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL)) - hubbub_state->watermark_change_cntl = REG_READ(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL); - - if (REG(DCHUBBUB_ARB_DRAM_STATE_CNTL)) - hubbub_state->dram_state_cntl = REG_READ(DCHUBBUB_ARB_DRAM_STATE_CNTL); -} - -static const struct hubbub_funcs hubbub2_funcs = { - .update_dchub = hubbub2_update_dchub, - .init_dchub_sys_ctx = hubbub2_init_dchub_sys_ctx, - .init_vm_ctx = hubbub2_init_vm_ctx, - .dcc_support_swizzle = hubbub2_dcc_support_swizzle, - .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format, - .get_dcc_compression_cap = hubbub2_get_dcc_compression_cap, - .wm_read_state = hubbub2_wm_read_state, - .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq, - .program_watermarks = hubbub2_program_watermarks, - .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, - .allow_self_refresh_control = hubbub1_allow_self_refresh_control, - .hubbub_read_state = hubbub2_read_state, -}; - -void hubbub2_construct(struct dcn20_hubbub *hubbub, - struct dc_context *ctx, - const struct dcn_hubbub_registers *hubbub_regs, - const struct dcn_hubbub_shift *hubbub_shift, - const struct dcn_hubbub_mask *hubbub_mask) -{ - hubbub->base.ctx = ctx; - - hubbub->base.funcs = &hubbub2_funcs; - - hubbub->regs = hubbub_regs; - hubbub->shifts = hubbub_shift; - hubbub->masks = hubbub_mask; - - hubbub->debug_test_index_pstate = 0xB; - hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */ -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h deleted file mode 100644 index 24a9c45988ed..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_HUBBUB_DCN20_H__ -#define __DC_HUBBUB_DCN20_H__ - -#include "dcn10/dcn10_hubbub.h" -#include "dcn20_vmid.h" - -#define TO_DCN20_HUBBUB(hubbub)\ - container_of(hubbub, struct dcn20_hubbub, base) - -#define HUBBUB_REG_LIST_DCN20_COMMON()\ - HUBBUB_REG_LIST_DCN_COMMON(), \ - SR(DCHUBBUB_CRC_CTRL), \ - SR(DCN_VM_FB_LOCATION_BASE),\ - SR(DCN_VM_FB_LOCATION_TOP),\ - SR(DCN_VM_FB_OFFSET),\ - SR(DCN_VM_AGP_BOT),\ - SR(DCN_VM_AGP_TOP),\ - SR(DCN_VM_AGP_BASE),\ - SR(DCN_VM_FAULT_ADDR_MSB), \ - SR(DCN_VM_FAULT_ADDR_LSB), \ - SR(DCN_VM_FAULT_CNTL), \ - SR(DCN_VM_FAULT_STATUS) - -#define HUBBUB_REG_LIST_DCN20(id)\ - HUBBUB_REG_LIST_DCN20_COMMON(), \ - HUBBUB_SR_WATERMARK_REG_LIST(), \ - HUBBUB_VM_REG_LIST(),\ - SR(DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB),\ - SR(DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB) - - -#define HUBBUB_MASK_SH_LIST_DCN20(mask_sh)\ - HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \ - HUBBUB_MASK_SH_LIST_STUTTER(mask_sh), \ - HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ - HUBBUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE, mask_sh), \ - HUBBUB_SF(DCN_VM_FB_LOCATION_TOP, FB_TOP, mask_sh), \ - HUBBUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET, mask_sh), \ - HUBBUB_SF(DCN_VM_AGP_BOT, AGP_BOT, mask_sh), \ - HUBBUB_SF(DCN_VM_AGP_TOP, AGP_TOP, mask_sh), \ - HUBBUB_SF(DCN_VM_AGP_BASE, AGP_BASE, mask_sh), \ - HUBBUB_SF(DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB, DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB, mask_sh), \ - HUBBUB_SF(DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB, DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_ADDR_MSB, DCN_VM_FAULT_ADDR_MSB, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_ADDR_LSB, DCN_VM_FAULT_ADDR_LSB, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_CLEAR, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_MODE, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_INTERRUPT_ENABLE, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_RANGE_FAULT_DISABLE, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_PRQ_FAULT_DISABLE, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_STATUS, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_TABLE_LEVEL, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh) - -struct dcn20_hubbub { - struct hubbub base; - const struct dcn_hubbub_registers *regs; - const struct dcn_hubbub_shift *shifts; - const struct dcn_hubbub_mask *masks; - unsigned int debug_test_index_pstate; - union dcn_watermark_set watermarks; - int num_vmid; - struct dcn20_vmid vmid[16]; - unsigned int detile_buf_size; - unsigned int crb_size_segs; - unsigned int compbuf_size_segments; - unsigned int pixel_chunk_size; - unsigned int det0_size; - unsigned int det1_size; - unsigned int det2_size; - unsigned int det3_size; -}; - -void hubbub2_construct(struct dcn20_hubbub *hubbub, - struct dc_context *ctx, - const struct dcn_hubbub_registers *hubbub_regs, - const struct dcn_hubbub_shift *hubbub_shift, - const struct dcn_hubbub_mask *hubbub_mask); - -bool hubbub2_dcc_support_swizzle( - enum swizzle_mode_values swizzle, - unsigned int bytes_per_element, - enum segment_order *segment_order_horz, - enum segment_order *segment_order_vert); - -bool hubbub2_dcc_support_pixel_format( - enum surface_pixel_format format, - unsigned int *bytes_per_element); - -bool hubbub2_get_dcc_compression_cap(struct hubbub *hubbub, - const struct dc_dcc_surface_param *input, - struct dc_surface_dcc_cap *output); - -bool hubbub2_initialize_vmids(struct hubbub *hubbub, - const struct dc_dcc_surface_param *input, - struct dc_surface_dcc_cap *output); - -int hubbub2_init_dchub_sys_ctx(struct hubbub *hubbub, - struct dcn_hubbub_phys_addr_config *pa_config); -void hubbub2_init_vm_ctx(struct hubbub *hubbub, - struct dcn_hubbub_virt_addr_config *va_config, - int vmid); -void hubbub2_update_dchub(struct hubbub *hubbub, - struct dchub_init_data *dh_data); - -void hubbub2_get_dchub_ref_freq(struct hubbub *hubbub, - unsigned int dccg_ref_freq_inKhz, - unsigned int *dchub_ref_freq_inKhz); - -void hubbub2_wm_read_state(struct hubbub *hubbub, - struct dcn_hubbub_wm *wm); - -void hubbub2_read_state(struct hubbub *hubbub, - struct dcn_hubbub_state *hubbub_state); - -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/Makefile b/drivers/gpu/drm/amd/display/dc/dcn201/Makefile index 3880db59e457..c5716ea5886a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn201/Makefile @@ -1,8 +1,7 @@ # SPDX-License-Identifier: MIT # # Makefile for DCN. -DCN201 = dcn201_hubbub.o\ - dcn201_mpc.o dcn201_hubp.o dcn201_opp.o \ +DCN201 = dcn201_mpc.o dcn201_hubp.o dcn201_opp.o \ dcn201_dccg.o dcn201_link_encoder.o AMD_DAL_DCN201 = $(addprefix $(AMDDALPATH)/dc/dcn201/,$(DCN201)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c deleted file mode 100644 index 63798132ed95..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c +++ /dev/null @@ -1,107 +0,0 @@ -/* -* Copyright 2018 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ -#include "dm_services.h" -#include "dcn20/dcn20_hubbub.h" -#include "dcn201_hubbub.h" -#include "reg_helper.h" - -#define REG(reg)\ - hubbub1->regs->reg - -#define DC_LOGGER \ - hubbub1->base.ctx->logger - -#define CTX \ - hubbub1->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - hubbub1->shifts->field_name, hubbub1->masks->field_name - -#define REG(reg)\ - hubbub1->regs->reg - -#define CTX \ - hubbub1->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - hubbub1->shifts->field_name, hubbub1->masks->field_name - -static bool hubbub201_program_watermarks( - struct hubbub *hubbub, - union dcn_watermark_set *watermarks, - unsigned int refclk_mhz, - bool safe_to_lower) -{ - struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); - bool wm_pending = false; - - if (hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) - wm_pending = true; - - if (hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) - wm_pending = true; - - REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0, - DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz); - REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, - DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 68); - - hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter); - - return wm_pending; -} - -static const struct hubbub_funcs hubbub201_funcs = { - .update_dchub = hubbub2_update_dchub, - .init_dchub_sys_ctx = NULL, - .init_vm_ctx = NULL, - .dcc_support_swizzle = hubbub2_dcc_support_swizzle, - .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format, - .get_dcc_compression_cap = hubbub2_get_dcc_compression_cap, - .wm_read_state = hubbub2_wm_read_state, - .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq, - .program_watermarks = hubbub201_program_watermarks, - .hubbub_read_state = hubbub2_read_state, -}; - -void hubbub201_construct(struct dcn20_hubbub *hubbub, - struct dc_context *ctx, - const struct dcn_hubbub_registers *hubbub_regs, - const struct dcn_hubbub_shift *hubbub_shift, - const struct dcn_hubbub_mask *hubbub_mask) -{ - hubbub->base.ctx = ctx; - - hubbub->base.funcs = &hubbub201_funcs; - - hubbub->regs = hubbub_regs; - hubbub->shifts = hubbub_shift; - hubbub->masks = hubbub_mask; - - hubbub->debug_test_index_pstate = 0xB; - hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */ -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.h deleted file mode 100644 index 5aeca0be3e15..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2018 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ -#ifndef DAL_DC_DCN201_DCN201_HUBBUB_H_ -#define DAL_DC_DCN201_DCN201_HUBBUB_H_ - -#include "dcn20/dcn20_hubbub.h" - -#define HUBBUB_REG_LIST_DCN201(id)\ - HUBBUB_REG_LIST_DCN_COMMON(), \ - HUBBUB_VM_REG_LIST(), \ - SR(DCHUBBUB_CRC_CTRL) - -#define HUBBUB_MASK_SH_LIST_DCN201(mask_sh)\ - HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \ - HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh) - -void hubbub201_construct(struct dcn20_hubbub *hubbub, - struct dc_context *ctx, - const struct dcn_hubbub_registers *hubbub_regs, - const struct dcn_hubbub_shift *hubbub_shift, - const struct dcn_hubbub_mask *hubbub_mask); - -#endif /* DAL_DC_DCN201_DCN201_HUBBUB_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile index ca92f5c8e7fb..b0803403fe23 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile @@ -2,7 +2,7 @@ # # Makefile for DCN21. -DCN21 = dcn21_hubp.o dcn21_hubbub.o \ +DCN21 = dcn21_hubp.o \ dcn21_link_encoder.o dcn21_dccg.o AMD_DAL_DCN21 = $(addprefix $(AMDDALPATH)/dc/dcn21/,$(DCN21)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c deleted file mode 100644 index 2546224b326a..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c +++ /dev/null @@ -1,723 +0,0 @@ -/* -* Copyright 2018 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ -#include -#include "dm_services.h" -#include "dcn20/dcn20_hubbub.h" -#include "dcn21_hubbub.h" -#include "reg_helper.h" - -#define REG(reg)\ - hubbub1->regs->reg -#define DC_LOGGER \ - hubbub1->base.ctx->logger -#define CTX \ - hubbub1->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - hubbub1->shifts->field_name, hubbub1->masks->field_name - -#define REG(reg)\ - hubbub1->regs->reg - -#define CTX \ - hubbub1->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - hubbub1->shifts->field_name, hubbub1->masks->field_name - -static uint32_t convert_and_clamp( - uint32_t wm_ns, - uint32_t refclk_mhz, - uint32_t clamp_value) -{ - uint32_t ret_val = 0; - ret_val = wm_ns * refclk_mhz; - ret_val /= 1000; - - if (ret_val > clamp_value) - ret_val = clamp_value; - - return ret_val; -} - -void dcn21_dchvm_init(struct hubbub *hubbub) -{ - struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); - uint32_t riommu_active; - int i; - - //Init DCHVM block - REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1); - - //Poll until RIOMMU_ACTIVE = 1 - for (i = 0; i < 100; i++) { - REG_GET(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, &riommu_active); - - if (riommu_active) - break; - else - udelay(5); - } - - if (riommu_active) { - //Reflect the power status of DCHUBBUB - REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1); - - //Start rIOMMU prefetching - REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1); - - // Enable dynamic clock gating - REG_UPDATE_4(DCHVM_CLK_CTRL, - HVM_DISPCLK_R_GATE_DIS, 0, - HVM_DISPCLK_G_GATE_DIS, 0, - HVM_DCFCLK_R_GATE_DIS, 0, - HVM_DCFCLK_G_GATE_DIS, 0); - - //Poll until HOSTVM_PREFETCH_DONE = 1 - REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100); - - hubbub->riommu_active = true; - } -} - -int hubbub21_init_dchub(struct hubbub *hubbub, - struct dcn_hubbub_phys_addr_config *pa_config) -{ - struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); - struct dcn_vmid_page_table_config phys_config; - - REG_SET(DCN_VM_FB_LOCATION_BASE, 0, - FB_BASE, pa_config->system_aperture.fb_base >> 24); - REG_SET(DCN_VM_FB_LOCATION_TOP, 0, - FB_TOP, pa_config->system_aperture.fb_top >> 24); - REG_SET(DCN_VM_FB_OFFSET, 0, - FB_OFFSET, pa_config->system_aperture.fb_offset >> 24); - REG_SET(DCN_VM_AGP_BOT, 0, - AGP_BOT, pa_config->system_aperture.agp_bot >> 24); - REG_SET(DCN_VM_AGP_TOP, 0, - AGP_TOP, pa_config->system_aperture.agp_top >> 24); - REG_SET(DCN_VM_AGP_BASE, 0, - AGP_BASE, pa_config->system_aperture.agp_base >> 24); - - if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) { - phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12; - phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12; - phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr | 1; //Note: hack - phys_config.depth = 0; - phys_config.block_size = 0; - // Init VMID 0 based on PA config - dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config); - } - - dcn21_dchvm_init(hubbub); - - return hubbub1->num_vmid; -} - -bool hubbub21_program_urgent_watermarks( - struct hubbub *hubbub, - union dcn_watermark_set *watermarks, - unsigned int refclk_mhz, - bool safe_to_lower) -{ - struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); - uint32_t prog_wm_value; - bool wm_pending = false; - - /* Repeat for water mark set A, B, C and D. */ - /* clock state A */ - if (safe_to_lower || watermarks->a.urgent_ns > hubbub1->watermarks.a.urgent_ns) { - hubbub1->watermarks.a.urgent_ns = watermarks->a.urgent_ns; - prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns, - refclk_mhz, 0x1fffff); - REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0, - DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value, - DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A, prog_wm_value); - - DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->a.urgent_ns, prog_wm_value); - } else if (watermarks->a.urgent_ns < hubbub1->watermarks.a.urgent_ns) - wm_pending = true; - - /* determine the transfer time for a quantity of data for a particular requestor.*/ - if (safe_to_lower || watermarks->a.frac_urg_bw_flip - > hubbub1->watermarks.a.frac_urg_bw_flip) { - hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip; - - REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0, - DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->a.frac_urg_bw_flip); - } else if (watermarks->a.frac_urg_bw_flip - < hubbub1->watermarks.a.frac_urg_bw_flip) - wm_pending = true; - - if (safe_to_lower || watermarks->a.frac_urg_bw_nom - > hubbub1->watermarks.a.frac_urg_bw_nom) { - hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom; - - REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0, - DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom); - } else if (watermarks->a.frac_urg_bw_nom - < hubbub1->watermarks.a.frac_urg_bw_nom) - wm_pending = true; - - if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub1->watermarks.a.urgent_latency_ns) { - hubbub1->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns; - prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns, - refclk_mhz, 0x1fffff); - REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0, - DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value); - } else if (watermarks->a.urgent_latency_ns < hubbub1->watermarks.a.urgent_latency_ns) - wm_pending = true; - - /* clock state B */ - if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) { - hubbub1->watermarks.b.urgent_ns = watermarks->b.urgent_ns; - prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns, - refclk_mhz, 0x1fffff); - REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0, - DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value, - DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_B, prog_wm_value); - - DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->b.urgent_ns, prog_wm_value); - } else if (watermarks->b.urgent_ns < hubbub1->watermarks.b.urgent_ns) - wm_pending = true; - - /* determine the transfer time for a quantity of data for a particular requestor.*/ - if (safe_to_lower || watermarks->a.frac_urg_bw_flip - > hubbub1->watermarks.a.frac_urg_bw_flip) { - hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip; - - REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0, - DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->a.frac_urg_bw_flip); - } else if (watermarks->a.frac_urg_bw_flip - < hubbub1->watermarks.a.frac_urg_bw_flip) - wm_pending = true; - - if (safe_to_lower || watermarks->a.frac_urg_bw_nom - > hubbub1->watermarks.a.frac_urg_bw_nom) { - hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom; - - REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0, - DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->a.frac_urg_bw_nom); - } else if (watermarks->a.frac_urg_bw_nom - < hubbub1->watermarks.a.frac_urg_bw_nom) - wm_pending = true; - - if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub1->watermarks.b.urgent_latency_ns) { - hubbub1->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns; - prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns, - refclk_mhz, 0x1fffff); - REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0, - DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value); - } else if (watermarks->b.urgent_latency_ns < hubbub1->watermarks.b.urgent_latency_ns) - wm_pending = true; - - /* clock state C */ - if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) { - hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns; - prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns, - refclk_mhz, 0x1fffff); - REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0, - DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value, - DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_C, prog_wm_value); - - DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->c.urgent_ns, prog_wm_value); - } else if (watermarks->c.urgent_ns < hubbub1->watermarks.c.urgent_ns) - wm_pending = true; - - /* determine the transfer time for a quantity of data for a particular requestor.*/ - if (safe_to_lower || watermarks->a.frac_urg_bw_flip - > hubbub1->watermarks.a.frac_urg_bw_flip) { - hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip; - - REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, 0, - DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, watermarks->a.frac_urg_bw_flip); - } else if (watermarks->a.frac_urg_bw_flip - < hubbub1->watermarks.a.frac_urg_bw_flip) - wm_pending = true; - - if (safe_to_lower || watermarks->a.frac_urg_bw_nom - > hubbub1->watermarks.a.frac_urg_bw_nom) { - hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom; - - REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, 0, - DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->a.frac_urg_bw_nom); - } else if (watermarks->a.frac_urg_bw_nom - < hubbub1->watermarks.a.frac_urg_bw_nom) - wm_pending = true; - - if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub1->watermarks.c.urgent_latency_ns) { - hubbub1->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns; - prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns, - refclk_mhz, 0x1fffff); - REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0, - DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value); - } else if (watermarks->c.urgent_latency_ns < hubbub1->watermarks.c.urgent_latency_ns) - wm_pending = true; - - /* clock state D */ - if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) { - hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns; - prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns, - refclk_mhz, 0x1fffff); - REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0, - DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value, - DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_D, prog_wm_value); - - DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->d.urgent_ns, prog_wm_value); - } else if (watermarks->d.urgent_ns < hubbub1->watermarks.d.urgent_ns) - wm_pending = true; - - /* determine the transfer time for a quantity of data for a particular requestor.*/ - if (safe_to_lower || watermarks->a.frac_urg_bw_flip - > hubbub1->watermarks.a.frac_urg_bw_flip) { - hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip; - - REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, 0, - DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, watermarks->a.frac_urg_bw_flip); - } else if (watermarks->a.frac_urg_bw_flip - < hubbub1->watermarks.a.frac_urg_bw_flip) - wm_pending = true; - - if (safe_to_lower || watermarks->a.frac_urg_bw_nom - > hubbub1->watermarks.a.frac_urg_bw_nom) { - hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom; - - REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0, - DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->a.frac_urg_bw_nom); - } else if (watermarks->a.frac_urg_bw_nom - < hubbub1->watermarks.a.frac_urg_bw_nom) - wm_pending = true; - - if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub1->watermarks.d.urgent_latency_ns) { - hubbub1->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns; - prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns, - refclk_mhz, 0x1fffff); - REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0, - DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value); - } else if (watermarks->d.urgent_latency_ns < hubbub1->watermarks.d.urgent_latency_ns) - wm_pending = true; - - return wm_pending; -} - -bool hubbub21_program_stutter_watermarks( - struct hubbub *hubbub, - union dcn_watermark_set *watermarks, - unsigned int refclk_mhz, - bool safe_to_lower) -{ - struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); - uint32_t prog_wm_value; - bool wm_pending = false; - - /* clock state A */ - if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns - > hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) { - hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = - watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, - refclk_mhz, 0x1fffff); - REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value, - DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); - } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns - < hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) - wm_pending = true; - - if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns - > hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) { - hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns = - watermarks->a.cstate_pstate.cstate_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->a.cstate_pstate.cstate_exit_ns, - refclk_mhz, 0x1fffff); - REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value, - DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value); - } else if (watermarks->a.cstate_pstate.cstate_exit_ns - < hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) - wm_pending = true; - - /* clock state B */ - if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns - > hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) { - hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = - watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, - refclk_mhz, 0x1fffff); - REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value, - DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); - } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns - < hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) - wm_pending = true; - - if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns - > hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) { - hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns = - watermarks->b.cstate_pstate.cstate_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->b.cstate_pstate.cstate_exit_ns, - refclk_mhz, 0x1fffff); - REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value, - DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value); - } else if (watermarks->b.cstate_pstate.cstate_exit_ns - < hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) - wm_pending = true; - - /* clock state C */ - if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns - > hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) { - hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = - watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, - refclk_mhz, 0x1fffff); - REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value, - DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); - } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns - < hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) - wm_pending = true; - - if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns - > hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) { - hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns = - watermarks->c.cstate_pstate.cstate_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->c.cstate_pstate.cstate_exit_ns, - refclk_mhz, 0x1fffff); - REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value, - DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value); - } else if (watermarks->c.cstate_pstate.cstate_exit_ns - < hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) - wm_pending = true; - - /* clock state D */ - if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns - > hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) { - hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = - watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, - refclk_mhz, 0x1fffff); - REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value, - DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); - } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns - < hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) - wm_pending = true; - - if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns - > hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) { - hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns = - watermarks->d.cstate_pstate.cstate_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->d.cstate_pstate.cstate_exit_ns, - refclk_mhz, 0x1fffff); - REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value, - DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value); - } else if (watermarks->d.cstate_pstate.cstate_exit_ns - < hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) - wm_pending = true; - - return wm_pending; -} - -bool hubbub21_program_pstate_watermarks( - struct hubbub *hubbub, - union dcn_watermark_set *watermarks, - unsigned int refclk_mhz, - bool safe_to_lower) -{ - struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); - uint32_t prog_wm_value; - - bool wm_pending = false; - - /* clock state A */ - if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns - > hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) { - hubbub1->watermarks.a.cstate_pstate.pstate_change_ns = - watermarks->a.cstate_pstate.pstate_change_ns; - prog_wm_value = convert_and_clamp( - watermarks->a.cstate_pstate.pstate_change_ns, - refclk_mhz, 0x1fffff); - REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0, - DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value, - DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n" - "HW register value = 0x%x\n\n", - watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value); - } else if (watermarks->a.cstate_pstate.pstate_change_ns - < hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) - wm_pending = true; - - /* clock state B */ - if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns - > hubbub1->watermarks.b.cstate_pstate.pstate_change_ns) { - hubbub1->watermarks.b.cstate_pstate.pstate_change_ns = - watermarks->b.cstate_pstate.pstate_change_ns; - prog_wm_value = convert_and_clamp( - watermarks->b.cstate_pstate.pstate_change_ns, - refclk_mhz, 0x1fffff); - REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0, - DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value, - DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n" - "HW register value = 0x%x\n\n", - watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value); - } else if (watermarks->b.cstate_pstate.pstate_change_ns - < hubbub1->watermarks.b.cstate_pstate.pstate_change_ns) - wm_pending = false; - - /* clock state C */ - if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns - > hubbub1->watermarks.c.cstate_pstate.pstate_change_ns) { - hubbub1->watermarks.c.cstate_pstate.pstate_change_ns = - watermarks->c.cstate_pstate.pstate_change_ns; - prog_wm_value = convert_and_clamp( - watermarks->c.cstate_pstate.pstate_change_ns, - refclk_mhz, 0x1fffff); - REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0, - DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value, - DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n" - "HW register value = 0x%x\n\n", - watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value); - } else if (watermarks->c.cstate_pstate.pstate_change_ns - < hubbub1->watermarks.c.cstate_pstate.pstate_change_ns) - wm_pending = true; - - /* clock state D */ - if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns - > hubbub1->watermarks.d.cstate_pstate.pstate_change_ns) { - hubbub1->watermarks.d.cstate_pstate.pstate_change_ns = - watermarks->d.cstate_pstate.pstate_change_ns; - prog_wm_value = convert_and_clamp( - watermarks->d.cstate_pstate.pstate_change_ns, - refclk_mhz, 0x1fffff); - REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0, - DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value, - DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n" - "HW register value = 0x%x\n\n", - watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value); - } else if (watermarks->d.cstate_pstate.pstate_change_ns - < hubbub1->watermarks.d.cstate_pstate.pstate_change_ns) - wm_pending = true; - - return wm_pending; -} - -bool hubbub21_program_watermarks( - struct hubbub *hubbub, - union dcn_watermark_set *watermarks, - unsigned int refclk_mhz, - bool safe_to_lower) -{ - struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); - bool wm_pending = false; - - if (hubbub21_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) - wm_pending = true; - - if (hubbub21_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) - wm_pending = true; - - if (hubbub21_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) - wm_pending = true; - - /* - * The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric. - * If the memory controller is fully utilized and the DCHub requestors are - * well ahead of their amortized schedule, then it is safe to prevent the next winner - * from being committed and sent to the fabric. - * The utilization of the memory controller is approximated by ensuring that - * the number of outstanding requests is greater than a threshold specified - * by the ARB_MIN_REQ_OUTSTANDING. To determine that the DCHub requestors are well ahead of the amortized schedule, - * the slack of the next winner is compared with the ARB_SAT_LEVEL in DLG RefClk cycles. - * - * TODO: Revisit request limit after figure out right number. request limit for Renoir isn't decided yet, set maximum value (0x1FF) - * to turn off it for now. - */ - REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0, - DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz); - REG_UPDATE_2(DCHUBBUB_ARB_DF_REQ_OUTSTAND, - DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF, - DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD, 0xA); - REG_UPDATE(DCHUBBUB_ARB_HOSTVM_CNTL, - DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD, 0xF); - - hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter); - - return wm_pending; -} - -void hubbub21_wm_read_state(struct hubbub *hubbub, - struct dcn_hubbub_wm *wm) -{ - struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); - struct dcn_hubbub_wm_set *s; - - memset(wm, 0, sizeof(struct dcn_hubbub_wm)); - - s = &wm->sets[0]; - s->wm_set = 0; - REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, - DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, &s->data_urgent); - - REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, &s->sr_enter); - - REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, &s->sr_exit); - - REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, - DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, &s->dram_clk_change); - - s = &wm->sets[1]; - s->wm_set = 1; - REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, - DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, &s->data_urgent); - - REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, &s->sr_enter); - - REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, &s->sr_exit); - - REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, - DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, &s->dram_clk_change); - - s = &wm->sets[2]; - s->wm_set = 2; - REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, - DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, &s->data_urgent); - - REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, &s->sr_enter); - - REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, &s->sr_exit); - - REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, - DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, &s->dram_clk_change); - - s = &wm->sets[3]; - s->wm_set = 3; - REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, - DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, &s->data_urgent); - - REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, &s->sr_enter); - - REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, &s->sr_exit); - - REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, - DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, &s->dram_clk_change); -} - -static void hubbub21_apply_DEDCN21_147_wa(struct hubbub *hubbub) -{ - struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); - uint32_t prog_wm_value; - - prog_wm_value = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A); - REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); -} - -static const struct hubbub_funcs hubbub21_funcs = { - .update_dchub = hubbub2_update_dchub, - .init_dchub_sys_ctx = hubbub21_init_dchub, - .init_vm_ctx = hubbub2_init_vm_ctx, - .dcc_support_swizzle = hubbub2_dcc_support_swizzle, - .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format, - .get_dcc_compression_cap = hubbub2_get_dcc_compression_cap, - .wm_read_state = hubbub21_wm_read_state, - .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq, - .program_watermarks = hubbub21_program_watermarks, - .allow_self_refresh_control = hubbub1_allow_self_refresh_control, - .apply_DEDCN21_147_wa = hubbub21_apply_DEDCN21_147_wa, - .hubbub_read_state = hubbub2_read_state, -}; - -void hubbub21_construct(struct dcn20_hubbub *hubbub, - struct dc_context *ctx, - const struct dcn_hubbub_registers *hubbub_regs, - const struct dcn_hubbub_shift *hubbub_shift, - const struct dcn_hubbub_mask *hubbub_mask) -{ - hubbub->base.ctx = ctx; - - hubbub->base.funcs = &hubbub21_funcs; - - hubbub->regs = hubbub_regs; - hubbub->shifts = hubbub_shift; - hubbub->masks = hubbub_mask; - - hubbub->debug_test_index_pstate = 0xB; - hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */ -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h deleted file mode 100644 index ab2ce0313529..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h +++ /dev/null @@ -1,158 +0,0 @@ -/* -* Copyright 2018 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ -#ifndef DAL_DC_DCN21_DCN21_HUBBUB_H_ -#define DAL_DC_DCN21_DCN21_HUBBUB_H_ - -#include "dcn20/dcn20_hubbub.h" - -#define HUBBUB_HVM_REG_LIST() \ - SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A),\ - SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B),\ - SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C),\ - SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D),\ - SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A),\ - SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B),\ - SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C),\ - SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D),\ - SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A),\ - SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B),\ - SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C),\ - SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D),\ - SR(DCHUBBUB_ARB_HOSTVM_CNTL), \ - SR(DCHVM_CTRL0), \ - SR(DCHVM_MEM_CTRL), \ - SR(DCHVM_CLK_CTRL), \ - SR(DCHVM_RIOMMU_CTRL0), \ - SR(DCHVM_RIOMMU_STAT0) - -#define HUBBUB_REG_LIST_DCN21()\ - HUBBUB_REG_LIST_DCN20_COMMON(), \ - HUBBUB_SR_WATERMARK_REG_LIST(), \ - HUBBUB_HVM_REG_LIST() - -#define HUBBUB_MASK_SH_LIST_HVM(mask_sh) \ - HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_B, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_C, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_D, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_A, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_B, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_C, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_D, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_B, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_C, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_D, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_HOSTVM_CNTL, DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD, mask_sh), \ - HUBBUB_SF(DCHVM_CTRL0, HOSTVM_INIT_REQ, mask_sh), \ - HUBBUB_SF(DCHVM_MEM_CTRL, HVM_GPUVMRET_PWR_REQ_DIS, mask_sh), \ - HUBBUB_SF(DCHVM_MEM_CTRL, HVM_GPUVMRET_FORCE_REQ, mask_sh), \ - HUBBUB_SF(DCHVM_MEM_CTRL, HVM_GPUVMRET_POWER_STATUS, mask_sh), \ - HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DISPCLK_R_GATE_DIS, mask_sh), \ - HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DISPCLK_G_GATE_DIS, mask_sh), \ - HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DCFCLK_R_GATE_DIS, mask_sh), \ - HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DCFCLK_G_GATE_DIS, mask_sh), \ - HUBBUB_SF(DCHVM_CLK_CTRL, TR_REQ_REQCLKREQ_MODE, mask_sh), \ - HUBBUB_SF(DCHVM_CLK_CTRL, TW_RSP_COMPCLKREQ_MODE, mask_sh), \ - HUBBUB_SF(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, mask_sh), \ - HUBBUB_SF(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, mask_sh), \ - HUBBUB_SF(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, mask_sh), \ - HUBBUB_SF(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, mask_sh) - -#define HUBBUB_MASK_SH_LIST_DCN21(mask_sh)\ - HUBBUB_MASK_SH_LIST_HVM(mask_sh), \ - HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \ - HUBBUB_MASK_SH_LIST_STUTTER(mask_sh), \ - HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ - HUBBUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE, mask_sh), \ - HUBBUB_SF(DCN_VM_FB_LOCATION_TOP, FB_TOP, mask_sh), \ - HUBBUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET, mask_sh), \ - HUBBUB_SF(DCN_VM_AGP_BOT, AGP_BOT, mask_sh), \ - HUBBUB_SF(DCN_VM_AGP_TOP, AGP_TOP, mask_sh), \ - HUBBUB_SF(DCN_VM_AGP_BASE, AGP_BASE, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_ADDR_MSB, DCN_VM_FAULT_ADDR_MSB, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_ADDR_LSB, DCN_VM_FAULT_ADDR_LSB, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_CLEAR, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_MODE, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_INTERRUPT_ENABLE, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_RANGE_FAULT_DISABLE, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_PRQ_FAULT_DISABLE, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_STATUS, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_TABLE_LEVEL, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh) - -void dcn21_dchvm_init(struct hubbub *hubbub); -int hubbub21_init_dchub(struct hubbub *hubbub, - struct dcn_hubbub_phys_addr_config *pa_config); -bool hubbub21_program_watermarks( - struct hubbub *hubbub, - union dcn_watermark_set *watermarks, - unsigned int refclk_mhz, - bool safe_to_lower); -bool hubbub21_program_urgent_watermarks( - struct hubbub *hubbub, - union dcn_watermark_set *watermarks, - unsigned int refclk_mhz, - bool safe_to_lower); -bool hubbub21_program_stutter_watermarks( - struct hubbub *hubbub, - union dcn_watermark_set *watermarks, - unsigned int refclk_mhz, - bool safe_to_lower); -bool hubbub21_program_pstate_watermarks( - struct hubbub *hubbub, - union dcn_watermark_set *watermarks, - unsigned int refclk_mhz, - bool safe_to_lower); - -void hubbub21_wm_read_state(struct hubbub *hubbub, - struct dcn_hubbub_wm *wm); - -void hubbub21_construct(struct dcn20_hubbub *hubbub, - struct dc_context *ctx, - const struct dcn_hubbub_registers *hubbub_regs, - const struct dcn_hubbub_shift *hubbub_shift, - const struct dcn_hubbub_mask *hubbub_mask); - -#endif /* DAL_DC_DCN21_DCN21_HUBBUB_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile index c6ca70f3c061..435979febb79 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile @@ -23,8 +23,7 @@ # # -DCN30 := dcn30_hubbub.o \ - dcn30_hubp.o \ +DCN30 := dcn30_hubp.o \ dcn30_dccg.o \ dcn30_mpc.o dcn30_vpg.o \ dcn30_afmt.o \ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c deleted file mode 100644 index 6a5af3da4b45..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c +++ /dev/null @@ -1,473 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - - -#include "dm_services.h" -#include "reg_helper.h" -#include "dcn30_hubbub.h" - - -#define CTX \ - hubbub1->base.ctx -#define DC_LOGGER \ - hubbub1->base.ctx->logger -#define REG(reg)\ - hubbub1->regs->reg - -#undef FN -#define FN(reg_name, field_name) \ - hubbub1->shifts->field_name, hubbub1->masks->field_name - -#ifdef NUM_VMID -#undef NUM_VMID -#endif -#define NUM_VMID 16 - - -static uint32_t convert_and_clamp( - uint32_t wm_ns, - uint32_t refclk_mhz, - uint32_t clamp_value) -{ - uint32_t ret_val = 0; - ret_val = wm_ns * refclk_mhz; - ret_val /= 1000; - - if (ret_val > clamp_value) - ret_val = clamp_value; - - return ret_val; -} - -int hubbub3_init_dchub_sys_ctx(struct hubbub *hubbub, - struct dcn_hubbub_phys_addr_config *pa_config) -{ - struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); - struct dcn_vmid_page_table_config phys_config; - - REG_SET(DCN_VM_FB_LOCATION_BASE, 0, - FB_BASE, pa_config->system_aperture.fb_base >> 24); - REG_SET(DCN_VM_FB_LOCATION_TOP, 0, - FB_TOP, pa_config->system_aperture.fb_top >> 24); - REG_SET(DCN_VM_FB_OFFSET, 0, - FB_OFFSET, pa_config->system_aperture.fb_offset >> 24); - REG_SET(DCN_VM_AGP_BOT, 0, - AGP_BOT, pa_config->system_aperture.agp_bot >> 24); - REG_SET(DCN_VM_AGP_TOP, 0, - AGP_TOP, pa_config->system_aperture.agp_top >> 24); - REG_SET(DCN_VM_AGP_BASE, 0, - AGP_BASE, pa_config->system_aperture.agp_base >> 24); - - if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) { - phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12; - phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12; - phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr; - phys_config.depth = 0; - phys_config.block_size = 0; - // Init VMID 0 based on PA config - dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config); - } - - return NUM_VMID; -} - -bool hubbub3_program_watermarks( - struct hubbub *hubbub, - union dcn_watermark_set *watermarks, - unsigned int refclk_mhz, - bool safe_to_lower) -{ - struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); - bool wm_pending = false; - - if (hubbub21_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) - wm_pending = true; - - if (hubbub21_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) - wm_pending = true; - - if (hubbub21_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) - wm_pending = true; - - /* - * The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric. - * If the memory controller is fully utilized and the DCHub requestors are - * well ahead of their amortized schedule, then it is safe to prevent the next winner - * from being committed and sent to the fabric. - * The utilization of the memory controller is approximated by ensuring that - * the number of outstanding requests is greater than a threshold specified - * by the ARB_MIN_REQ_OUTSTANDING. To determine that the DCHub requestors are well ahead of the amortized schedule, - * the slack of the next winner is compared with the ARB_SAT_LEVEL in DLG RefClk cycles. - * - * TODO: Revisit request limit after figure out right number. request limit for Renoir isn't decided yet, set maximum value (0x1FF) - * to turn off it for now. - */ - REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0, - DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz); - REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, - DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF); - - hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter); - - return wm_pending; -} - -bool hubbub3_dcc_support_swizzle( - enum swizzle_mode_values swizzle, - unsigned int bytes_per_element, - enum segment_order *segment_order_horz, - enum segment_order *segment_order_vert) -{ - bool standard_swizzle = false; - bool display_swizzle = false; - bool render_swizzle = false; - - switch (swizzle) { - case DC_SW_4KB_S: - case DC_SW_64KB_S: - case DC_SW_VAR_S: - case DC_SW_4KB_S_X: - case DC_SW_64KB_S_X: - case DC_SW_VAR_S_X: - standard_swizzle = true; - break; - case DC_SW_4KB_R: - case DC_SW_64KB_R: - case DC_SW_VAR_R: - case DC_SW_4KB_R_X: - case DC_SW_64KB_R_X: - case DC_SW_VAR_R_X: - render_swizzle = true; - break; - case DC_SW_4KB_D: - case DC_SW_64KB_D: - case DC_SW_VAR_D: - case DC_SW_4KB_D_X: - case DC_SW_64KB_D_X: - case DC_SW_VAR_D_X: - display_swizzle = true; - break; - default: - break; - } - - if (standard_swizzle) { - if (bytes_per_element == 1) { - *segment_order_horz = segment_order__contiguous; - *segment_order_vert = segment_order__na; - return true; - } - if (bytes_per_element == 2) { - *segment_order_horz = segment_order__non_contiguous; - *segment_order_vert = segment_order__contiguous; - return true; - } - if (bytes_per_element == 4) { - *segment_order_horz = segment_order__non_contiguous; - *segment_order_vert = segment_order__contiguous; - return true; - } - if (bytes_per_element == 8) { - *segment_order_horz = segment_order__na; - *segment_order_vert = segment_order__contiguous; - return true; - } - } - if (render_swizzle) { - if (bytes_per_element == 1) { - *segment_order_horz = segment_order__contiguous; - *segment_order_vert = segment_order__na; - return true; - } - if (bytes_per_element == 2) { - *segment_order_horz = segment_order__non_contiguous; - *segment_order_vert = segment_order__contiguous; - return true; - } - if (bytes_per_element == 4) { - *segment_order_horz = segment_order__contiguous; - *segment_order_vert = segment_order__non_contiguous; - return true; - } - if (bytes_per_element == 8) { - *segment_order_horz = segment_order__contiguous; - *segment_order_vert = segment_order__non_contiguous; - return true; - } - } - if (display_swizzle && bytes_per_element == 8) { - *segment_order_horz = segment_order__contiguous; - *segment_order_vert = segment_order__non_contiguous; - return true; - } - - return false; -} - -static void hubbub3_get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height, - unsigned int bytes_per_element) -{ - /* copied from DML. might want to refactor DML to leverage from DML */ - /* DML : get_blk256_size */ - if (bytes_per_element == 1) { - *blk256_width = 16; - *blk256_height = 16; - } else if (bytes_per_element == 2) { - *blk256_width = 16; - *blk256_height = 8; - } else if (bytes_per_element == 4) { - *blk256_width = 8; - *blk256_height = 8; - } else if (bytes_per_element == 8) { - *blk256_width = 8; - *blk256_height = 4; - } -} - -static void hubbub3_det_request_size( - unsigned int detile_buf_size, - unsigned int height, - unsigned int width, - unsigned int bpe, - bool *req128_horz_wc, - bool *req128_vert_wc) -{ - unsigned int blk256_height = 0; - unsigned int blk256_width = 0; - unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc; - - hubbub3_get_blk256_size(&blk256_width, &blk256_height, bpe); - - swath_bytes_horz_wc = width * blk256_height * bpe; - swath_bytes_vert_wc = height * blk256_width * bpe; - - *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ? - false : /* full 256B request */ - true; /* half 128b request */ - - *req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ? - false : /* full 256B request */ - true; /* half 128b request */ -} - -bool hubbub3_get_dcc_compression_cap(struct hubbub *hubbub, - const struct dc_dcc_surface_param *input, - struct dc_surface_dcc_cap *output) -{ - struct dc *dc = hubbub->ctx->dc; - /* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */ - enum dcc_control dcc_control; - unsigned int bpe; - enum segment_order segment_order_horz, segment_order_vert; - bool req128_horz_wc, req128_vert_wc; - - memset(output, 0, sizeof(*output)); - - if (dc->debug.disable_dcc == DCC_DISABLE) - return false; - - if (!hubbub->funcs->dcc_support_pixel_format(input->format, - &bpe)) - return false; - - if (!hubbub->funcs->dcc_support_swizzle(input->swizzle_mode, bpe, - &segment_order_horz, &segment_order_vert)) - return false; - - hubbub3_det_request_size(TO_DCN20_HUBBUB(hubbub)->detile_buf_size, - input->surface_size.height, input->surface_size.width, - bpe, &req128_horz_wc, &req128_vert_wc); - - if (!req128_horz_wc && !req128_vert_wc) { - dcc_control = dcc_control__256_256_xxx; - } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) { - if (!req128_horz_wc) - dcc_control = dcc_control__256_256_xxx; - else if (segment_order_horz == segment_order__contiguous) - dcc_control = dcc_control__128_128_xxx; - else - dcc_control = dcc_control__256_64_64; - } else if (input->scan == SCAN_DIRECTION_VERTICAL) { - if (!req128_vert_wc) - dcc_control = dcc_control__256_256_xxx; - else if (segment_order_vert == segment_order__contiguous) - dcc_control = dcc_control__128_128_xxx; - else - dcc_control = dcc_control__256_64_64; - } else { - if ((req128_horz_wc && - segment_order_horz == segment_order__non_contiguous) || - (req128_vert_wc && - segment_order_vert == segment_order__non_contiguous)) - /* access_dir not known, must use most constraining */ - dcc_control = dcc_control__256_64_64; - else - /* reg128 is true for either horz and vert - * but segment_order is contiguous - */ - dcc_control = dcc_control__128_128_xxx; - } - - /* Exception for 64KB_R_X */ - if ((bpe == 2) && (input->swizzle_mode == DC_SW_64KB_R_X)) - dcc_control = dcc_control__128_128_xxx; - - if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE && - dcc_control != dcc_control__256_256_xxx) - return false; - - switch (dcc_control) { - case dcc_control__256_256_xxx: - output->grph.rgb.max_uncompressed_blk_size = 256; - output->grph.rgb.max_compressed_blk_size = 256; - output->grph.rgb.independent_64b_blks = false; - output->grph.rgb.dcc_controls.dcc_256_256_unconstrained = 1; - output->grph.rgb.dcc_controls.dcc_256_128_128 = 1; - break; - case dcc_control__128_128_xxx: - output->grph.rgb.max_uncompressed_blk_size = 128; - output->grph.rgb.max_compressed_blk_size = 128; - output->grph.rgb.independent_64b_blks = false; - output->grph.rgb.dcc_controls.dcc_128_128_uncontrained = 1; - output->grph.rgb.dcc_controls.dcc_256_128_128 = 1; - break; - case dcc_control__256_64_64: - output->grph.rgb.max_uncompressed_blk_size = 256; - output->grph.rgb.max_compressed_blk_size = 64; - output->grph.rgb.independent_64b_blks = true; - output->grph.rgb.dcc_controls.dcc_256_64_64 = 1; - break; - case dcc_control__256_128_128: - output->grph.rgb.max_uncompressed_blk_size = 256; - output->grph.rgb.max_compressed_blk_size = 128; - output->grph.rgb.independent_64b_blks = false; - output->grph.rgb.dcc_controls.dcc_256_128_128 = 1; - break; - } - output->capable = true; - output->const_color_support = true; - - return true; -} - -void hubbub3_force_wm_propagate_to_pipes(struct hubbub *hubbub) -{ - struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); - uint32_t refclk_mhz = hubbub->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000; - uint32_t prog_wm_value = convert_and_clamp(hubbub1->watermarks.a.urgent_ns, - refclk_mhz, 0x1fffff); - - REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0, - DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value, - DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A, prog_wm_value); -} - -void hubbub3_force_pstate_change_control(struct hubbub *hubbub, - bool force, bool allow) -{ - struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); - - REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL, - DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, allow, - DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, force); -} - -/* Copy values from WM set A to all other sets */ -void hubbub3_init_watermarks(struct hubbub *hubbub) -{ - struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); - uint32_t reg; - - reg = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A); - REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, reg); - REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, reg); - REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, reg); - - reg = REG_READ(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A); - REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, reg); - REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, reg); - REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, reg); - - reg = REG_READ(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A); - REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, reg); - REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, reg); - REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, reg); - - reg = REG_READ(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A); - REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, reg); - REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, reg); - REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, reg); - - reg = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, reg); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, reg); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, reg); - - reg = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, reg); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, reg); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, reg); - - reg = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A); - REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, reg); - REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, reg); - REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, reg); -} - -static const struct hubbub_funcs hubbub30_funcs = { - .update_dchub = hubbub2_update_dchub, - .init_dchub_sys_ctx = hubbub3_init_dchub_sys_ctx, - .init_vm_ctx = hubbub2_init_vm_ctx, - .dcc_support_swizzle = hubbub3_dcc_support_swizzle, - .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format, - .get_dcc_compression_cap = hubbub3_get_dcc_compression_cap, - .wm_read_state = hubbub21_wm_read_state, - .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq, - .program_watermarks = hubbub3_program_watermarks, - .allow_self_refresh_control = hubbub1_allow_self_refresh_control, - .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, - .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high, - .force_wm_propagate_to_pipes = hubbub3_force_wm_propagate_to_pipes, - .force_pstate_change_control = hubbub3_force_pstate_change_control, - .init_watermarks = hubbub3_init_watermarks, - .hubbub_read_state = hubbub2_read_state, -}; - -void hubbub3_construct(struct dcn20_hubbub *hubbub3, - struct dc_context *ctx, - const struct dcn_hubbub_registers *hubbub_regs, - const struct dcn_hubbub_shift *hubbub_shift, - const struct dcn_hubbub_mask *hubbub_mask) -{ - hubbub3->base.ctx = ctx; - hubbub3->base.funcs = &hubbub30_funcs; - hubbub3->regs = hubbub_regs; - hubbub3->shifts = hubbub_shift; - hubbub3->masks = hubbub_mask; - - hubbub3->debug_test_index_pstate = 0xB; - hubbub3->detile_buf_size = 184 * 1024; /* 184KB for DCN3 */ -} - diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h deleted file mode 100644 index ca6233e8f1f4..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_HUBBUB_DCN30_H__ -#define __DC_HUBBUB_DCN30_H__ - -#include "dcn21/dcn21_hubbub.h" - -#define HUBBUB_REG_LIST_DCN3AG(id)\ - HUBBUB_REG_LIST_DCN21() - -#define HUBBUB_MASK_SH_LIST_DCN3AG(mask_sh)\ - HUBBUB_MASK_SH_LIST_DCN21(mask_sh) - -#define HUBBUB_REG_LIST_DCN30(id)\ - HUBBUB_REG_LIST_DCN20_COMMON(), \ - HUBBUB_SR_WATERMARK_REG_LIST(), \ - SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A),\ - SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B),\ - SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C),\ - SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D),\ - SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A),\ - SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B),\ - SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C),\ - SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D),\ - SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A),\ - SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B),\ - SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C),\ - SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D) - -#define HUBBUB_MASK_SH_LIST_DCN30(mask_sh)\ - HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \ - HUBBUB_MASK_SH_LIST_STUTTER(mask_sh), \ - HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ - HUBBUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE, mask_sh), \ - HUBBUB_SF(DCN_VM_FB_LOCATION_TOP, FB_TOP, mask_sh), \ - HUBBUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET, mask_sh), \ - HUBBUB_SF(DCN_VM_AGP_BOT, AGP_BOT, mask_sh), \ - HUBBUB_SF(DCN_VM_AGP_TOP, AGP_TOP, mask_sh), \ - HUBBUB_SF(DCN_VM_AGP_BASE, AGP_BASE, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_B, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_C, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_D, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_A, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_B, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_C, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_D, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_B, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_C, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_D, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_ADDR_MSB, DCN_VM_FAULT_ADDR_MSB, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_ADDR_LSB, DCN_VM_FAULT_ADDR_LSB, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_CLEAR, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_MODE, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_INTERRUPT_ENABLE, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_RANGE_FAULT_DISABLE, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_PRQ_FAULT_DISABLE, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_STATUS, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_TABLE_LEVEL, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh) - -void hubbub3_construct(struct dcn20_hubbub *hubbub3, - struct dc_context *ctx, - const struct dcn_hubbub_registers *hubbub_regs, - const struct dcn_hubbub_shift *hubbub_shift, - const struct dcn_hubbub_mask *hubbub_mask); - -int hubbub3_init_dchub_sys_ctx(struct hubbub *hubbub, - struct dcn_hubbub_phys_addr_config *pa_config); - -bool hubbub3_dcc_support_swizzle( - enum swizzle_mode_values swizzle, - unsigned int bytes_per_element, - enum segment_order *segment_order_horz, - enum segment_order *segment_order_vert); - -void hubbub3_force_wm_propagate_to_pipes(struct hubbub *hubbub); - -bool hubbub3_get_dcc_compression_cap(struct hubbub *hubbub, - const struct dc_dcc_surface_param *input, - struct dc_surface_dcc_cap *output); - -bool hubbub3_program_watermarks( - struct hubbub *hubbub, - union dcn_watermark_set *watermarks, - unsigned int refclk_mhz, - bool safe_to_lower); - -void hubbub3_force_pstate_change_control(struct hubbub *hubbub, - bool force, bool allow); - -void hubbub3_init_watermarks(struct hubbub *hubbub); - -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile index d241f665e40a..bfda72fa4f42 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile @@ -11,7 +11,7 @@ # Makefile for dcn30. DCN301 = dcn301_dccg.o \ - dcn301_dio_link_encoder.o dcn301_panel_cntl.o dcn301_hubbub.o + dcn301_dio_link_encoder.o dcn301_panel_cntl.o AMD_DAL_DCN301 = $(addprefix $(AMDDALPATH)/dc/dcn301/,$(DCN301)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c deleted file mode 100644 index c1959672df50..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c +++ /dev/null @@ -1,84 +0,0 @@ -/* -* Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ -#include "dm_services.h" -#include "dcn301_hubbub.h" -#include "reg_helper.h" - -#define REG(reg)\ - hubbub1->regs->reg -#define DC_LOGGER \ - hubbub1->base.ctx->logger -#define CTX \ - hubbub1->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - hubbub1->shifts->field_name, hubbub1->masks->field_name - -#define REG(reg)\ - hubbub1->regs->reg - -#define CTX \ - hubbub1->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - hubbub1->shifts->field_name, hubbub1->masks->field_name - - -static const struct hubbub_funcs hubbub301_funcs = { - .update_dchub = hubbub2_update_dchub, - .init_dchub_sys_ctx = hubbub21_init_dchub, - .init_vm_ctx = hubbub2_init_vm_ctx, - .dcc_support_swizzle = hubbub3_dcc_support_swizzle, - .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format, - .get_dcc_compression_cap = hubbub3_get_dcc_compression_cap, - .wm_read_state = hubbub21_wm_read_state, - .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq, - .program_watermarks = hubbub3_program_watermarks, - .allow_self_refresh_control = hubbub1_allow_self_refresh_control, - .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, - .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high, - .force_wm_propagate_to_pipes = hubbub3_force_wm_propagate_to_pipes, - .force_pstate_change_control = hubbub3_force_pstate_change_control, - .init_watermarks = hubbub3_init_watermarks, - .hubbub_read_state = hubbub2_read_state, -}; - -void hubbub301_construct(struct dcn20_hubbub *hubbub3, - struct dc_context *ctx, - const struct dcn_hubbub_registers *hubbub_regs, - const struct dcn_hubbub_shift *hubbub_shift, - const struct dcn_hubbub_mask *hubbub_mask) -{ - hubbub3->base.ctx = ctx; - hubbub3->base.funcs = &hubbub301_funcs; - hubbub3->regs = hubbub_regs; - hubbub3->shifts = hubbub_shift; - hubbub3->masks = hubbub_mask; - - hubbub3->debug_test_index_pstate = 0xB; - hubbub3->detile_buf_size = 184 * 1024; /* 184KB for DCN3 */ -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.h deleted file mode 100644 index b599f4475479..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.h +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ -#ifndef DAL_DC_DCN301_DCN301_HUBBUB_H_ -#define DAL_DC_DCN301_DCN301_HUBBUB_H_ - -#include "dcn30/dcn30_hubbub.h" - - -#define HUBBUB_REG_LIST_DCN301(id)\ - HUBBUB_REG_LIST_DCN30(id), \ - HUBBUB_HVM_REG_LIST() - - -#define HUBBUB_MASK_SH_LIST_DCN301(mask_sh)\ - HUBBUB_MASK_SH_LIST_DCN30(mask_sh), \ - HUBBUB_SF(DCHVM_CTRL0, HOSTVM_INIT_REQ, mask_sh), \ - HUBBUB_SF(DCHVM_MEM_CTRL, HVM_GPUVMRET_PWR_REQ_DIS, mask_sh), \ - HUBBUB_SF(DCHVM_MEM_CTRL, HVM_GPUVMRET_FORCE_REQ, mask_sh), \ - HUBBUB_SF(DCHVM_MEM_CTRL, HVM_GPUVMRET_POWER_STATUS, mask_sh), \ - HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DISPCLK_R_GATE_DIS, mask_sh), \ - HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DISPCLK_G_GATE_DIS, mask_sh), \ - HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DCFCLK_R_GATE_DIS, mask_sh), \ - HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DCFCLK_G_GATE_DIS, mask_sh), \ - HUBBUB_SF(DCHVM_CLK_CTRL, TR_REQ_REQCLKREQ_MODE, mask_sh), \ - HUBBUB_SF(DCHVM_CLK_CTRL, TW_RSP_COMPCLKREQ_MODE, mask_sh), \ - HUBBUB_SF(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, mask_sh), \ - HUBBUB_SF(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, mask_sh), \ - HUBBUB_SF(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, mask_sh), \ - HUBBUB_SF(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, mask_sh) - -void hubbub301_construct(struct dcn20_hubbub *hubbub3, - struct dc_context *ctx, - const struct dcn_hubbub_registers *hubbub_regs, - const struct dcn_hubbub_shift *hubbub_shift, - const struct dcn_hubbub_mask *hubbub_mask); - - -#endif /* DAL_DC_DCN301_DCN301_HUBBUB_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile index 5d93ac16c03a..9608c1f418ab 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile @@ -10,7 +10,7 @@ # # Makefile for dcn31. -DCN31 = dcn31_hubbub.o dcn31_hubp.o \ +DCN31 = dcn31_hubp.o \ dcn31_dccg.o dcn31_dio_link_encoder.o dcn31_panel_cntl.o \ dcn31_apg.o dcn31_hpo_dp_stream_encoder.o dcn31_hpo_dp_link_encoder.o \ dcn31_afmt.o dcn31_vpg.o diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c deleted file mode 100644 index b906db6e7355..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c +++ /dev/null @@ -1,1090 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - - -#include "dcn30/dcn30_hubbub.h" -#include "dcn31_hubbub.h" -#include "dm_services.h" -#include "reg_helper.h" - - -#define CTX \ - hubbub2->base.ctx -#define DC_LOGGER \ - hubbub2->base.ctx->logger -#define REG(reg)\ - hubbub2->regs->reg - -#undef FN -#define FN(reg_name, field_name) \ - hubbub2->shifts->field_name, hubbub2->masks->field_name - -#ifdef NUM_VMID -#undef NUM_VMID -#endif -#define NUM_VMID 16 - -#define DCN31_CRB_SEGMENT_SIZE_KB 64 - -static void dcn31_init_crb(struct hubbub *hubbub) -{ - struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); - - REG_GET(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, - &hubbub2->det0_size); - - REG_GET(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, - &hubbub2->det1_size); - - REG_GET(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, - &hubbub2->det2_size); - - REG_GET(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, - &hubbub2->det3_size); - - REG_GET(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE_CURRENT, - &hubbub2->compbuf_size_segments); - - REG_SET_2(COMPBUF_RESERVED_SPACE, 0, - COMPBUF_RESERVED_SPACE_64B, hubbub2->pixel_chunk_size / 32, - COMPBUF_RESERVED_SPACE_ZS, hubbub2->pixel_chunk_size / 128); - REG_UPDATE(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, 0x17F); -} - -static void dcn31_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigned int det_buffer_size_in_kbyte) -{ - struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); - - unsigned int det_size_segments = (det_buffer_size_in_kbyte + DCN31_CRB_SEGMENT_SIZE_KB - 1) / DCN31_CRB_SEGMENT_SIZE_KB; - - switch (hubp_inst) { - case 0: - REG_UPDATE(DCHUBBUB_DET0_CTRL, - DET0_SIZE, det_size_segments); - hubbub2->det0_size = det_size_segments; - break; - case 1: - REG_UPDATE(DCHUBBUB_DET1_CTRL, - DET1_SIZE, det_size_segments); - hubbub2->det1_size = det_size_segments; - break; - case 2: - REG_UPDATE(DCHUBBUB_DET2_CTRL, - DET2_SIZE, det_size_segments); - hubbub2->det2_size = det_size_segments; - break; - case 3: - REG_UPDATE(DCHUBBUB_DET3_CTRL, - DET3_SIZE, det_size_segments); - hubbub2->det3_size = det_size_segments; - break; - default: - break; - } - DC_LOG_DEBUG("Set DET%d to %d segments\n", hubp_inst, det_size_segments); - /* Should never be hit, if it is we have an erroneous hw config*/ - ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size - + hubbub2->det3_size + hubbub2->compbuf_size_segments <= hubbub2->crb_size_segs); -} - -static void dcn31_wait_for_det_apply(struct hubbub *hubbub, int hubp_inst) -{ - struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); - - switch (hubp_inst) { - case 0: - REG_WAIT(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, hubbub2->det0_size, 1000, 30); - break; - case 1: - REG_WAIT(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, hubbub2->det1_size, 1000, 30); - break; - case 2: - REG_WAIT(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, hubbub2->det2_size, 1000, 30); - break; - case 3: - REG_WAIT(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, hubbub2->det3_size, 1000, 30); - break; - default: - break; - } -} - -static void dcn31_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase) -{ - struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); - unsigned int compbuf_size_segments = (compbuf_size_kb + DCN31_CRB_SEGMENT_SIZE_KB - 1) / DCN31_CRB_SEGMENT_SIZE_KB; - - if (safe_to_increase || compbuf_size_segments <= hubbub2->compbuf_size_segments) { - if (compbuf_size_segments > hubbub2->compbuf_size_segments) { - REG_WAIT(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, hubbub2->det0_size, 1, 100); - REG_WAIT(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, hubbub2->det1_size, 1, 100); - REG_WAIT(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, hubbub2->det2_size, 1, 100); - REG_WAIT(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, hubbub2->det3_size, 1, 100); - } - /* Should never be hit, if it is we have an erroneous hw config*/ - ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size - + hubbub2->det3_size + compbuf_size_segments <= hubbub2->crb_size_segs); - REG_UPDATE(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE, compbuf_size_segments); - hubbub2->compbuf_size_segments = compbuf_size_segments; - ASSERT(REG_GET(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR, &compbuf_size_segments) && !compbuf_size_segments); - } -} - -static uint32_t convert_and_clamp( - uint32_t wm_ns, - uint32_t refclk_mhz, - uint32_t clamp_value) -{ - uint32_t ret_val = 0; - ret_val = wm_ns * refclk_mhz; - ret_val /= 1000; - - if (ret_val > clamp_value) { - /* clamping WMs is abnormal, unexpected and may lead to underflow*/ - ASSERT(0); - ret_val = clamp_value; - } - - return ret_val; -} - -static bool hubbub31_program_urgent_watermarks( - struct hubbub *hubbub, - union dcn_watermark_set *watermarks, - unsigned int refclk_mhz, - bool safe_to_lower) -{ - struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); - uint32_t prog_wm_value; - bool wm_pending = false; - - /* Repeat for water mark set A, B, C and D. */ - /* clock state A */ - if (safe_to_lower || watermarks->a.urgent_ns > hubbub2->watermarks.a.urgent_ns) { - hubbub2->watermarks.a.urgent_ns = watermarks->a.urgent_ns; - prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns, - refclk_mhz, 0x3fff); - REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0, - DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); - - DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->a.urgent_ns, prog_wm_value); - } else if (watermarks->a.urgent_ns < hubbub2->watermarks.a.urgent_ns) - wm_pending = true; - - /* determine the transfer time for a quantity of data for a particular requestor.*/ - if (safe_to_lower || watermarks->a.frac_urg_bw_flip - > hubbub2->watermarks.a.frac_urg_bw_flip) { - hubbub2->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip; - - REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0, - DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->a.frac_urg_bw_flip); - } else if (watermarks->a.frac_urg_bw_flip - < hubbub2->watermarks.a.frac_urg_bw_flip) - wm_pending = true; - - if (safe_to_lower || watermarks->a.frac_urg_bw_nom - > hubbub2->watermarks.a.frac_urg_bw_nom) { - hubbub2->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom; - - REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0, - DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom); - } else if (watermarks->a.frac_urg_bw_nom - < hubbub2->watermarks.a.frac_urg_bw_nom) - wm_pending = true; - - if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub2->watermarks.a.urgent_latency_ns) { - hubbub2->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns; - prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns, - refclk_mhz, 0x3fff); - REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0, - DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value); - } else if (watermarks->a.urgent_latency_ns < hubbub2->watermarks.a.urgent_latency_ns) - wm_pending = true; - - /* clock state B */ - if (safe_to_lower || watermarks->b.urgent_ns > hubbub2->watermarks.b.urgent_ns) { - hubbub2->watermarks.b.urgent_ns = watermarks->b.urgent_ns; - prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns, - refclk_mhz, 0x3fff); - REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0, - DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value); - - DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->b.urgent_ns, prog_wm_value); - } else if (watermarks->b.urgent_ns < hubbub2->watermarks.b.urgent_ns) - wm_pending = true; - - /* determine the transfer time for a quantity of data for a particular requestor.*/ - if (safe_to_lower || watermarks->b.frac_urg_bw_flip - > hubbub2->watermarks.b.frac_urg_bw_flip) { - hubbub2->watermarks.b.frac_urg_bw_flip = watermarks->b.frac_urg_bw_flip; - - REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0, - DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->b.frac_urg_bw_flip); - } else if (watermarks->b.frac_urg_bw_flip - < hubbub2->watermarks.b.frac_urg_bw_flip) - wm_pending = true; - - if (safe_to_lower || watermarks->b.frac_urg_bw_nom - > hubbub2->watermarks.b.frac_urg_bw_nom) { - hubbub2->watermarks.b.frac_urg_bw_nom = watermarks->b.frac_urg_bw_nom; - - REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0, - DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->b.frac_urg_bw_nom); - } else if (watermarks->b.frac_urg_bw_nom - < hubbub2->watermarks.b.frac_urg_bw_nom) - wm_pending = true; - - if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub2->watermarks.b.urgent_latency_ns) { - hubbub2->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns; - prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns, - refclk_mhz, 0x3fff); - REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0, - DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value); - } else if (watermarks->b.urgent_latency_ns < hubbub2->watermarks.b.urgent_latency_ns) - wm_pending = true; - - /* clock state C */ - if (safe_to_lower || watermarks->c.urgent_ns > hubbub2->watermarks.c.urgent_ns) { - hubbub2->watermarks.c.urgent_ns = watermarks->c.urgent_ns; - prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns, - refclk_mhz, 0x3fff); - REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0, - DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value); - - DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->c.urgent_ns, prog_wm_value); - } else if (watermarks->c.urgent_ns < hubbub2->watermarks.c.urgent_ns) - wm_pending = true; - - /* determine the transfer time for a quantity of data for a particular requestor.*/ - if (safe_to_lower || watermarks->c.frac_urg_bw_flip - > hubbub2->watermarks.c.frac_urg_bw_flip) { - hubbub2->watermarks.c.frac_urg_bw_flip = watermarks->c.frac_urg_bw_flip; - - REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, 0, - DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, watermarks->c.frac_urg_bw_flip); - } else if (watermarks->c.frac_urg_bw_flip - < hubbub2->watermarks.c.frac_urg_bw_flip) - wm_pending = true; - - if (safe_to_lower || watermarks->c.frac_urg_bw_nom - > hubbub2->watermarks.c.frac_urg_bw_nom) { - hubbub2->watermarks.c.frac_urg_bw_nom = watermarks->c.frac_urg_bw_nom; - - REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, 0, - DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->c.frac_urg_bw_nom); - } else if (watermarks->c.frac_urg_bw_nom - < hubbub2->watermarks.c.frac_urg_bw_nom) - wm_pending = true; - - if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub2->watermarks.c.urgent_latency_ns) { - hubbub2->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns; - prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns, - refclk_mhz, 0x3fff); - REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0, - DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value); - } else if (watermarks->c.urgent_latency_ns < hubbub2->watermarks.c.urgent_latency_ns) - wm_pending = true; - - /* clock state D */ - if (safe_to_lower || watermarks->d.urgent_ns > hubbub2->watermarks.d.urgent_ns) { - hubbub2->watermarks.d.urgent_ns = watermarks->d.urgent_ns; - prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns, - refclk_mhz, 0x3fff); - REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0, - DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value); - - DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->d.urgent_ns, prog_wm_value); - } else if (watermarks->d.urgent_ns < hubbub2->watermarks.d.urgent_ns) - wm_pending = true; - - /* determine the transfer time for a quantity of data for a particular requestor.*/ - if (safe_to_lower || watermarks->d.frac_urg_bw_flip - > hubbub2->watermarks.d.frac_urg_bw_flip) { - hubbub2->watermarks.d.frac_urg_bw_flip = watermarks->d.frac_urg_bw_flip; - - REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, 0, - DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, watermarks->d.frac_urg_bw_flip); - } else if (watermarks->d.frac_urg_bw_flip - < hubbub2->watermarks.d.frac_urg_bw_flip) - wm_pending = true; - - if (safe_to_lower || watermarks->d.frac_urg_bw_nom - > hubbub2->watermarks.d.frac_urg_bw_nom) { - hubbub2->watermarks.d.frac_urg_bw_nom = watermarks->d.frac_urg_bw_nom; - - REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0, - DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->d.frac_urg_bw_nom); - } else if (watermarks->d.frac_urg_bw_nom - < hubbub2->watermarks.d.frac_urg_bw_nom) - wm_pending = true; - - if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub2->watermarks.d.urgent_latency_ns) { - hubbub2->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns; - prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns, - refclk_mhz, 0x3fff); - REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0, - DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value); - } else if (watermarks->d.urgent_latency_ns < hubbub2->watermarks.d.urgent_latency_ns) - wm_pending = true; - - return wm_pending; -} - -static bool hubbub31_program_stutter_watermarks( - struct hubbub *hubbub, - union dcn_watermark_set *watermarks, - unsigned int refclk_mhz, - bool safe_to_lower) -{ - struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); - uint32_t prog_wm_value; - bool wm_pending = false; - - /* clock state A */ - if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns - > hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) { - hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = - watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, - refclk_mhz, 0xfffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); - } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns - < hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) - wm_pending = true; - - if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns - > hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns) { - hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns = - watermarks->a.cstate_pstate.cstate_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->a.cstate_pstate.cstate_exit_ns, - refclk_mhz, 0xfffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value); - } else if (watermarks->a.cstate_pstate.cstate_exit_ns - < hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns) - wm_pending = true; - - if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns - > hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns) { - hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns = - watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns; - prog_wm_value = convert_and_clamp( - watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns, - refclk_mhz, 0xfffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_A calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns, prog_wm_value); - } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns - < hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns) - wm_pending = true; - - if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_z8_ns - > hubbub2->watermarks.a.cstate_pstate.cstate_exit_z8_ns) { - hubbub2->watermarks.a.cstate_pstate.cstate_exit_z8_ns = - watermarks->a.cstate_pstate.cstate_exit_z8_ns; - prog_wm_value = convert_and_clamp( - watermarks->a.cstate_pstate.cstate_exit_z8_ns, - refclk_mhz, 0xfffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_A calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->a.cstate_pstate.cstate_exit_z8_ns, prog_wm_value); - } else if (watermarks->a.cstate_pstate.cstate_exit_z8_ns - < hubbub2->watermarks.a.cstate_pstate.cstate_exit_z8_ns) - wm_pending = true; - - /* clock state B */ - if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns - > hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) { - hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = - watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, - refclk_mhz, 0xfffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); - } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns - < hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) - wm_pending = true; - - if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns - > hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns) { - hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns = - watermarks->b.cstate_pstate.cstate_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->b.cstate_pstate.cstate_exit_ns, - refclk_mhz, 0xfffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value); - } else if (watermarks->b.cstate_pstate.cstate_exit_ns - < hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns) - wm_pending = true; - - if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns - > hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns) { - hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns = - watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns; - prog_wm_value = convert_and_clamp( - watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns, - refclk_mhz, 0xfffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_B calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns, prog_wm_value); - } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns - < hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns) - wm_pending = true; - - if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_z8_ns - > hubbub2->watermarks.b.cstate_pstate.cstate_exit_z8_ns) { - hubbub2->watermarks.b.cstate_pstate.cstate_exit_z8_ns = - watermarks->b.cstate_pstate.cstate_exit_z8_ns; - prog_wm_value = convert_and_clamp( - watermarks->b.cstate_pstate.cstate_exit_z8_ns, - refclk_mhz, 0xfffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_B calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->b.cstate_pstate.cstate_exit_z8_ns, prog_wm_value); - } else if (watermarks->b.cstate_pstate.cstate_exit_z8_ns - < hubbub2->watermarks.b.cstate_pstate.cstate_exit_z8_ns) - wm_pending = true; - - /* clock state C */ - if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns - > hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) { - hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = - watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, - refclk_mhz, 0xfffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); - } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns - < hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) - wm_pending = true; - - if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns - > hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns) { - hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns = - watermarks->c.cstate_pstate.cstate_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->c.cstate_pstate.cstate_exit_ns, - refclk_mhz, 0xfffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value); - } else if (watermarks->c.cstate_pstate.cstate_exit_ns - < hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns) - wm_pending = true; - - if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns - > hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns) { - hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns = - watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns; - prog_wm_value = convert_and_clamp( - watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns, - refclk_mhz, 0xfffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_C calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns, prog_wm_value); - } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns - < hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns) - wm_pending = true; - - if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_z8_ns - > hubbub2->watermarks.c.cstate_pstate.cstate_exit_z8_ns) { - hubbub2->watermarks.c.cstate_pstate.cstate_exit_z8_ns = - watermarks->c.cstate_pstate.cstate_exit_z8_ns; - prog_wm_value = convert_and_clamp( - watermarks->c.cstate_pstate.cstate_exit_z8_ns, - refclk_mhz, 0xfffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_C calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->c.cstate_pstate.cstate_exit_z8_ns, prog_wm_value); - } else if (watermarks->c.cstate_pstate.cstate_exit_z8_ns - < hubbub2->watermarks.c.cstate_pstate.cstate_exit_z8_ns) - wm_pending = true; - - /* clock state D */ - if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns - > hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) { - hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = - watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, - refclk_mhz, 0xfffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); - } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns - < hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) - wm_pending = true; - - if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns - > hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns) { - hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns = - watermarks->d.cstate_pstate.cstate_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->d.cstate_pstate.cstate_exit_ns, - refclk_mhz, 0xfffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value); - } else if (watermarks->d.cstate_pstate.cstate_exit_ns - < hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns) - wm_pending = true; - - if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns - > hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns) { - hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns = - watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns; - prog_wm_value = convert_and_clamp( - watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns, - refclk_mhz, 0xfffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_D calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns, prog_wm_value); - } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns - < hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns) - wm_pending = true; - - if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_z8_ns - > hubbub2->watermarks.d.cstate_pstate.cstate_exit_z8_ns) { - hubbub2->watermarks.d.cstate_pstate.cstate_exit_z8_ns = - watermarks->d.cstate_pstate.cstate_exit_z8_ns; - prog_wm_value = convert_and_clamp( - watermarks->d.cstate_pstate.cstate_exit_z8_ns, - refclk_mhz, 0xfffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_D calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->d.cstate_pstate.cstate_exit_z8_ns, prog_wm_value); - } else if (watermarks->d.cstate_pstate.cstate_exit_z8_ns - < hubbub2->watermarks.d.cstate_pstate.cstate_exit_z8_ns) - wm_pending = true; - - return wm_pending; -} - -static bool hubbub31_program_pstate_watermarks( - struct hubbub *hubbub, - union dcn_watermark_set *watermarks, - unsigned int refclk_mhz, - bool safe_to_lower) -{ - struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); - uint32_t prog_wm_value; - - bool wm_pending = false; - - /* clock state A */ - if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns - > hubbub2->watermarks.a.cstate_pstate.pstate_change_ns) { - hubbub2->watermarks.a.cstate_pstate.pstate_change_ns = - watermarks->a.cstate_pstate.pstate_change_ns; - prog_wm_value = convert_and_clamp( - watermarks->a.cstate_pstate.pstate_change_ns, - refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0, - DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n" - "HW register value = 0x%x\n\n", - watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value); - } else if (watermarks->a.cstate_pstate.pstate_change_ns - < hubbub2->watermarks.a.cstate_pstate.pstate_change_ns) - wm_pending = true; - - /* clock state B */ - if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns - > hubbub2->watermarks.b.cstate_pstate.pstate_change_ns) { - hubbub2->watermarks.b.cstate_pstate.pstate_change_ns = - watermarks->b.cstate_pstate.pstate_change_ns; - prog_wm_value = convert_and_clamp( - watermarks->b.cstate_pstate.pstate_change_ns, - refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0, - DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n" - "HW register value = 0x%x\n\n", - watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value); - } else if (watermarks->b.cstate_pstate.pstate_change_ns - < hubbub2->watermarks.b.cstate_pstate.pstate_change_ns) - wm_pending = false; - - /* clock state C */ - if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns - > hubbub2->watermarks.c.cstate_pstate.pstate_change_ns) { - hubbub2->watermarks.c.cstate_pstate.pstate_change_ns = - watermarks->c.cstate_pstate.pstate_change_ns; - prog_wm_value = convert_and_clamp( - watermarks->c.cstate_pstate.pstate_change_ns, - refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0, - DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n" - "HW register value = 0x%x\n\n", - watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value); - } else if (watermarks->c.cstate_pstate.pstate_change_ns - < hubbub2->watermarks.c.cstate_pstate.pstate_change_ns) - wm_pending = true; - - /* clock state D */ - if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns - > hubbub2->watermarks.d.cstate_pstate.pstate_change_ns) { - hubbub2->watermarks.d.cstate_pstate.pstate_change_ns = - watermarks->d.cstate_pstate.pstate_change_ns; - prog_wm_value = convert_and_clamp( - watermarks->d.cstate_pstate.pstate_change_ns, - refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0, - DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n" - "HW register value = 0x%x\n\n", - watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value); - } else if (watermarks->d.cstate_pstate.pstate_change_ns - < hubbub2->watermarks.d.cstate_pstate.pstate_change_ns) - wm_pending = true; - - return wm_pending; -} - -static bool hubbub31_program_watermarks( - struct hubbub *hubbub, - union dcn_watermark_set *watermarks, - unsigned int refclk_mhz, - bool safe_to_lower) -{ - bool wm_pending = false; - - if (hubbub31_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) - wm_pending = true; - - if (hubbub31_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) - wm_pending = true; - - if (hubbub31_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) - wm_pending = true; - - /* - * The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric. - * If the memory controller is fully utilized and the DCHub requestors are - * well ahead of their amortized schedule, then it is safe to prevent the next winner - * from being committed and sent to the fabric. - * The utilization of the memory controller is approximated by ensuring that - * the number of outstanding requests is greater than a threshold specified - * by the ARB_MIN_REQ_OUTSTANDING. To determine that the DCHub requestors are well ahead of the amortized schedule, - * the slack of the next winner is compared with the ARB_SAT_LEVEL in DLG RefClk cycles. - * - * TODO: Revisit request limit after figure out right number. request limit for RM isn't decided yet, set maximum value (0x1FF) - * to turn off it for now. - */ - /*REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0, - DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz); - REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, - DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);*/ - - hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter); - return wm_pending; -} - -static void hubbub3_get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height, - unsigned int bytes_per_element) -{ - /* copied from DML. might want to refactor DML to leverage from DML */ - /* DML : get_blk256_size */ - if (bytes_per_element == 1) { - *blk256_width = 16; - *blk256_height = 16; - } else if (bytes_per_element == 2) { - *blk256_width = 16; - *blk256_height = 8; - } else if (bytes_per_element == 4) { - *blk256_width = 8; - *blk256_height = 8; - } else if (bytes_per_element == 8) { - *blk256_width = 8; - *blk256_height = 4; - } -} - -static void hubbub31_det_request_size( - unsigned int detile_buf_size, - unsigned int height, - unsigned int width, - unsigned int bpe, - bool *req128_horz_wc, - bool *req128_vert_wc) -{ - unsigned int blk256_height = 0; - unsigned int blk256_width = 0; - unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc; - - hubbub3_get_blk256_size(&blk256_width, &blk256_height, bpe); - - swath_bytes_horz_wc = width * blk256_height * bpe; - swath_bytes_vert_wc = height * blk256_width * bpe; - - *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ? - false : /* full 256B request */ - true; /* half 128b request */ - - *req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ? - false : /* full 256B request */ - true; /* half 128b request */ -} - -static bool hubbub31_get_dcc_compression_cap(struct hubbub *hubbub, - const struct dc_dcc_surface_param *input, - struct dc_surface_dcc_cap *output) -{ - struct dc *dc = hubbub->ctx->dc; - enum dcc_control dcc_control; - unsigned int bpe; - enum segment_order segment_order_horz, segment_order_vert; - bool req128_horz_wc, req128_vert_wc; - - memset(output, 0, sizeof(*output)); - - if (dc->debug.disable_dcc == DCC_DISABLE) - return false; - - if (!hubbub->funcs->dcc_support_pixel_format(input->format, - &bpe)) - return false; - - if (!hubbub->funcs->dcc_support_swizzle(input->swizzle_mode, bpe, - &segment_order_horz, &segment_order_vert)) - return false; - - hubbub31_det_request_size(TO_DCN20_HUBBUB(hubbub)->detile_buf_size, - input->surface_size.height, input->surface_size.width, - bpe, &req128_horz_wc, &req128_vert_wc); - - if (!req128_horz_wc && !req128_vert_wc) { - dcc_control = dcc_control__256_256_xxx; - } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) { - if (!req128_horz_wc) - dcc_control = dcc_control__256_256_xxx; - else if (segment_order_horz == segment_order__contiguous) - dcc_control = dcc_control__128_128_xxx; - else - dcc_control = dcc_control__256_64_64; - } else if (input->scan == SCAN_DIRECTION_VERTICAL) { - if (!req128_vert_wc) - dcc_control = dcc_control__256_256_xxx; - else if (segment_order_vert == segment_order__contiguous) - dcc_control = dcc_control__128_128_xxx; - else - dcc_control = dcc_control__256_64_64; - } else { - if ((req128_horz_wc && - segment_order_horz == segment_order__non_contiguous) || - (req128_vert_wc && - segment_order_vert == segment_order__non_contiguous)) - /* access_dir not known, must use most constraining */ - dcc_control = dcc_control__256_64_64; - else - /* reg128 is true for either horz and vert - * but segment_order is contiguous - */ - dcc_control = dcc_control__128_128_xxx; - } - - /* Exception for 64KB_R_X */ - if ((bpe == 2) && (input->swizzle_mode == DC_SW_64KB_R_X)) - dcc_control = dcc_control__128_128_xxx; - - if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE && - dcc_control != dcc_control__256_256_xxx) - return false; - - switch (dcc_control) { - case dcc_control__256_256_xxx: - output->grph.rgb.max_uncompressed_blk_size = 256; - output->grph.rgb.max_compressed_blk_size = 256; - output->grph.rgb.independent_64b_blks = false; - output->grph.rgb.dcc_controls.dcc_256_256_unconstrained = 1; - output->grph.rgb.dcc_controls.dcc_256_128_128 = 1; - break; - case dcc_control__128_128_xxx: - output->grph.rgb.max_uncompressed_blk_size = 128; - output->grph.rgb.max_compressed_blk_size = 128; - output->grph.rgb.independent_64b_blks = false; - output->grph.rgb.dcc_controls.dcc_128_128_uncontrained = 1; - output->grph.rgb.dcc_controls.dcc_256_128_128 = 1; - break; - case dcc_control__256_64_64: - output->grph.rgb.max_uncompressed_blk_size = 256; - output->grph.rgb.max_compressed_blk_size = 64; - output->grph.rgb.independent_64b_blks = true; - output->grph.rgb.dcc_controls.dcc_256_64_64 = 1; - break; - case dcc_control__256_128_128: - output->grph.rgb.max_uncompressed_blk_size = 256; - output->grph.rgb.max_compressed_blk_size = 128; - output->grph.rgb.independent_64b_blks = false; - output->grph.rgb.dcc_controls.dcc_256_128_128 = 1; - break; - } - output->capable = true; - output->const_color_support = true; - - return true; -} - -int hubbub31_init_dchub_sys_ctx(struct hubbub *hubbub, - struct dcn_hubbub_phys_addr_config *pa_config) -{ - struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); - struct dcn_vmid_page_table_config phys_config; - - REG_SET(DCN_VM_FB_LOCATION_BASE, 0, - FB_BASE, pa_config->system_aperture.fb_base >> 24); - REG_SET(DCN_VM_FB_LOCATION_TOP, 0, - FB_TOP, pa_config->system_aperture.fb_top >> 24); - REG_SET(DCN_VM_FB_OFFSET, 0, - FB_OFFSET, pa_config->system_aperture.fb_offset >> 24); - REG_SET(DCN_VM_AGP_BOT, 0, - AGP_BOT, pa_config->system_aperture.agp_bot >> 24); - REG_SET(DCN_VM_AGP_TOP, 0, - AGP_TOP, pa_config->system_aperture.agp_top >> 24); - REG_SET(DCN_VM_AGP_BASE, 0, - AGP_BASE, pa_config->system_aperture.agp_base >> 24); - - if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) { - phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12; - phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12; - phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr; - phys_config.depth = 0; - phys_config.block_size = 0; - // Init VMID 0 based on PA config - dcn20_vmid_setup(&hubbub2->vmid[0], &phys_config); - - dcn20_vmid_setup(&hubbub2->vmid[15], &phys_config); - } - - dcn21_dchvm_init(hubbub); - - return NUM_VMID; -} - -static void hubbub31_get_dchub_ref_freq(struct hubbub *hubbub, - unsigned int dccg_ref_freq_inKhz, - unsigned int *dchub_ref_freq_inKhz) -{ - struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); - uint32_t ref_div = 0; - uint32_t ref_en = 0; - unsigned int dc_refclk_khz = 24000; - - REG_GET_2(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, &ref_div, - DCHUBBUB_GLOBAL_TIMER_ENABLE, &ref_en); - - if (ref_en) { - if (ref_div == 2) - *dchub_ref_freq_inKhz = dc_refclk_khz / 2; - else - *dchub_ref_freq_inKhz = dc_refclk_khz; - - /* - * The external Reference Clock may change based on the board or - * platform requirements and the programmable integer divide must - * be programmed to provide a suitable DLG RefClk frequency between - * a minimum of 20MHz and maximum of 50MHz - */ - if (*dchub_ref_freq_inKhz < 20000 || *dchub_ref_freq_inKhz > 50000) - ASSERT_CRITICAL(false); - - return; - } else { - *dchub_ref_freq_inKhz = dc_refclk_khz; - - // HUBBUB global timer must be enabled. - ASSERT_CRITICAL(false); - return; - } -} - -static bool hubbub31_verify_allow_pstate_change_high(struct hubbub *hubbub) -{ - struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); - - /* - * Pstate latency is ~20us so if we wait over 40us and pstate allow - * still not asserted, we are probably stuck and going to hang - */ - const unsigned int pstate_wait_timeout_us = 100; - const unsigned int pstate_wait_expected_timeout_us = 40; - - static unsigned int max_sampled_pstate_wait_us; /* data collection */ - static bool forced_pstate_allow; /* help with revert wa */ - - unsigned int debug_data = 0; - unsigned int i; - - if (forced_pstate_allow) { - /* we hacked to force pstate allow to prevent hang last time - * we verify_allow_pstate_change_high. so disable force - * here so we can check status - */ - REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL, - DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 0, - DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 0); - forced_pstate_allow = false; - } - - REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, hubbub2->debug_test_index_pstate); - - for (i = 0; i < pstate_wait_timeout_us; i++) { - debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA); - - /* Debug bit is specific to ASIC. */ - if (debug_data & (1 << 26)) { - if (i > pstate_wait_expected_timeout_us) - DC_LOG_WARNING("pstate took longer than expected ~%dus\n", i); - return true; - } - if (max_sampled_pstate_wait_us < i) - max_sampled_pstate_wait_us = i; - - udelay(1); - } - - /* force pstate allow to prevent system hang - * and break to debugger to investigate - */ - REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL, - DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 1, - DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 1); - forced_pstate_allow = true; - - DC_LOG_WARNING("pstate TEST_DEBUG_DATA: 0x%X\n", - debug_data); - - return false; -} - -void hubbub31_init(struct hubbub *hubbub) -{ - struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); - - /*Enable clock gate*/ - if (hubbub->ctx->dc->debug.disable_clock_gate) { - /*done in hwseq*/ - /*REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);*/ - REG_UPDATE_2(DCHUBBUB_CLOCK_CNTL, - DISPCLK_R_DCHUBBUB_GATE_DIS, 1, - DCFCLK_R_DCHUBBUB_GATE_DIS, 1); - } - - /* - only the DCN will determine when to connect the SDP port - */ - REG_UPDATE(DCHUBBUB_SDPIF_CFG0, SDPIF_PORT_CONTROL, 1); -} -static const struct hubbub_funcs hubbub31_funcs = { - .update_dchub = hubbub2_update_dchub, - .init_dchub_sys_ctx = hubbub31_init_dchub_sys_ctx, - .init_vm_ctx = hubbub2_init_vm_ctx, - .dcc_support_swizzle = hubbub3_dcc_support_swizzle, - .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format, - .get_dcc_compression_cap = hubbub31_get_dcc_compression_cap, - .wm_read_state = hubbub21_wm_read_state, - .get_dchub_ref_freq = hubbub31_get_dchub_ref_freq, - .program_watermarks = hubbub31_program_watermarks, - .allow_self_refresh_control = hubbub1_allow_self_refresh_control, - .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, - .verify_allow_pstate_change_high = hubbub31_verify_allow_pstate_change_high, - .program_det_size = dcn31_program_det_size, - .wait_for_det_apply = dcn31_wait_for_det_apply, - .program_compbuf_size = dcn31_program_compbuf_size, - .init_crb = dcn31_init_crb, - .hubbub_read_state = hubbub2_read_state, -}; - -void hubbub31_construct(struct dcn20_hubbub *hubbub31, - struct dc_context *ctx, - const struct dcn_hubbub_registers *hubbub_regs, - const struct dcn_hubbub_shift *hubbub_shift, - const struct dcn_hubbub_mask *hubbub_mask, - int det_size_kb, - int pixel_chunk_size_kb, - int config_return_buffer_size_kb) -{ - - hubbub3_construct(hubbub31, ctx, hubbub_regs, hubbub_shift, hubbub_mask); - hubbub31->base.funcs = &hubbub31_funcs; - hubbub31->detile_buf_size = det_size_kb * 1024; - hubbub31->pixel_chunk_size = pixel_chunk_size_kb * 1024; - hubbub31->crb_size_segs = config_return_buffer_size_kb / DCN31_CRB_SEGMENT_SIZE_KB; - - hubbub31->debug_test_index_pstate = 0x6; -} - diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h deleted file mode 100644 index 89d6208287b5..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_HUBBUB_DCN31_H__ -#define __DC_HUBBUB_DCN31_H__ - -#include "dcn21/dcn21_hubbub.h" - -#define HUBBUB_REG_LIST_DCN31(id)\ - HUBBUB_REG_LIST_DCN30(id),\ - SR(DCHVM_CTRL0),\ - SR(DCHVM_MEM_CTRL),\ - SR(DCHVM_CLK_CTRL),\ - SR(DCHVM_RIOMMU_CTRL0),\ - SR(DCHVM_RIOMMU_STAT0),\ - SR(DCHUBBUB_DET0_CTRL),\ - SR(DCHUBBUB_DET1_CTRL),\ - SR(DCHUBBUB_DET2_CTRL),\ - SR(DCHUBBUB_DET3_CTRL),\ - SR(DCHUBBUB_COMPBUF_CTRL),\ - SR(COMPBUF_RESERVED_SPACE),\ - SR(DCHUBBUB_DEBUG_CTRL_0),\ - SR(DCHUBBUB_CLOCK_CNTL),\ - SR(DCHUBBUB_SDPIF_CFG0),\ - SR(DCHUBBUB_SDPIF_CFG1),\ - SR(DCHUBBUB_MEM_PWR_MODE_CTRL),\ - SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A),\ - SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A),\ - SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B),\ - SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B),\ - SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C),\ - SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C),\ - SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D),\ - SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D) - -#define HUBBUB_MASK_SH_LIST_DCN31(mask_sh)\ - HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \ - HUBBUB_MASK_SH_LIST_STUTTER(mask_sh), \ - HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ - HUBBUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE, mask_sh), \ - HUBBUB_SF(DCN_VM_FB_LOCATION_TOP, FB_TOP, mask_sh), \ - HUBBUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET, mask_sh), \ - HUBBUB_SF(DCN_VM_AGP_BOT, AGP_BOT, mask_sh), \ - HUBBUB_SF(DCN_VM_AGP_TOP, AGP_TOP, mask_sh), \ - HUBBUB_SF(DCN_VM_AGP_BASE, AGP_BASE, mask_sh), \ - HUBBUB_SF(DCHVM_CTRL0, HOSTVM_INIT_REQ, mask_sh),\ - HUBBUB_SF(DCHVM_MEM_CTRL, HVM_GPUVMRET_PWR_REQ_DIS, mask_sh),\ - HUBBUB_SF(DCHVM_MEM_CTRL, HVM_GPUVMRET_FORCE_REQ, mask_sh),\ - HUBBUB_SF(DCHVM_MEM_CTRL, HVM_GPUVMRET_POWER_STATUS, mask_sh),\ - HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DISPCLK_R_GATE_DIS, mask_sh),\ - HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DISPCLK_G_GATE_DIS, mask_sh),\ - HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DCFCLK_R_GATE_DIS, mask_sh),\ - HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DCFCLK_G_GATE_DIS, mask_sh),\ - HUBBUB_SF(DCHVM_CLK_CTRL, TR_REQ_REQCLKREQ_MODE, mask_sh),\ - HUBBUB_SF(DCHVM_CLK_CTRL, TW_RSP_COMPCLKREQ_MODE, mask_sh),\ - HUBBUB_SF(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, mask_sh),\ - HUBBUB_SF(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, mask_sh),\ - HUBBUB_SF(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, mask_sh),\ - HUBBUB_SF(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, mask_sh),\ - HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, mask_sh),\ - HUBBUB_SF(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, mask_sh),\ - HUBBUB_SF(DCHUBBUB_DET0_CTRL, DET0_SIZE, mask_sh),\ - HUBBUB_SF(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, mask_sh),\ - HUBBUB_SF(DCHUBBUB_DET1_CTRL, DET1_SIZE, mask_sh),\ - HUBBUB_SF(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, mask_sh),\ - HUBBUB_SF(DCHUBBUB_DET2_CTRL, DET2_SIZE, mask_sh),\ - HUBBUB_SF(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, mask_sh),\ - HUBBUB_SF(DCHUBBUB_DET3_CTRL, DET3_SIZE, mask_sh),\ - HUBBUB_SF(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, mask_sh),\ - HUBBUB_SF(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE, mask_sh),\ - HUBBUB_SF(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE_CURRENT, mask_sh),\ - HUBBUB_SF(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR, mask_sh),\ - HUBBUB_SF(COMPBUF_RESERVED_SPACE, COMPBUF_RESERVED_SPACE_64B, mask_sh),\ - HUBBUB_SF(COMPBUF_RESERVED_SPACE, COMPBUF_RESERVED_SPACE_ZS, mask_sh),\ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_ADDR_MSB, DCN_VM_FAULT_ADDR_MSB, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_ADDR_LSB, DCN_VM_FAULT_ADDR_LSB, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_CLEAR, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_MODE, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_INTERRUPT_ENABLE, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_RANGE_FAULT_DISABLE, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_PRQ_FAULT_DISABLE, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_STATUS, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_TABLE_LEVEL, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh),\ - HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DISPCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\ - HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DCFCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\ - HUBBUB_SF(DCHUBBUB_SDPIF_CFG0, SDPIF_PORT_CONTROL, mask_sh),\ - HUBBUB_SF(DCHUBBUB_MEM_PWR_MODE_CTRL, DET_MEM_PWR_LS_MODE, mask_sh) - -int hubbub31_init_dchub_sys_ctx(struct hubbub *hubbub, - struct dcn_hubbub_phys_addr_config *pa_config); - -void hubbub31_init(struct hubbub *hubbub); - -void hubbub31_construct(struct dcn20_hubbub *hubbub3, - struct dc_context *ctx, - const struct dcn_hubbub_registers *hubbub_regs, - const struct dcn_hubbub_shift *hubbub_shift, - const struct dcn_hubbub_mask *hubbub_mask, - int det_size_kb, - int pixel_chunk_size_kb, - int config_return_buffer_size_kb); - -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/Makefile b/drivers/gpu/drm/amd/display/dc/dcn32/Makefile index a58c37165f5a..8a6bc529f376 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn32/Makefile @@ -10,7 +10,7 @@ # # Makefile for dcn32. -DCN32 = dcn32_hubbub.o dcn32_dccg.o \ +DCN32 = dcn32_dccg.o \ dcn32_mmhubbub.o dcn32_hubp.o dcn32_mpc.o \ dcn32_dio_stream_encoder.o dcn32_dio_link_encoder.o dcn32_resource_helpers.o \ dcn32_hpo_dp_link_encoder.o diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c deleted file mode 100644 index 5264dc26cce1..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c +++ /dev/null @@ -1,1032 +0,0 @@ -/* - * Copyright 2021 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - - -#include "dcn30/dcn30_hubbub.h" -#include "dcn32_hubbub.h" -#include "dm_services.h" -#include "reg_helper.h" - - -#define CTX \ - hubbub2->base.ctx -#define DC_LOGGER \ - hubbub2->base.ctx->logger -#define REG(reg)\ - hubbub2->regs->reg - -#undef FN -#define FN(reg_name, field_name) \ - hubbub2->shifts->field_name, hubbub2->masks->field_name - -/** - * DCN32_CRB_SEGMENT_SIZE_KB: Maximum Configurable Return Buffer size for - * DCN32 - */ -#define DCN32_CRB_SEGMENT_SIZE_KB 64 - -static void dcn32_init_crb(struct hubbub *hubbub) -{ - struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); - - REG_GET(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, - &hubbub2->det0_size); - - REG_GET(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, - &hubbub2->det1_size); - - REG_GET(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, - &hubbub2->det2_size); - - REG_GET(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, - &hubbub2->det3_size); - - REG_GET(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE_CURRENT, - &hubbub2->compbuf_size_segments); - - REG_SET_2(COMPBUF_RESERVED_SPACE, 0, - COMPBUF_RESERVED_SPACE_64B, hubbub2->pixel_chunk_size / 32, - COMPBUF_RESERVED_SPACE_ZS, hubbub2->pixel_chunk_size / 128); - REG_UPDATE(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, 0x47F); -} - -void hubbub32_set_request_limit(struct hubbub *hubbub, int memory_channel_count, int words_per_channel) -{ - struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); - - uint32_t request_limit = 3 * memory_channel_count * words_per_channel / 4; - - ASSERT((request_limit & (~0xFFF)) == 0); //field is only 24 bits long - ASSERT(request_limit > 0); //field is only 24 bits long - - if (request_limit > 0xFFF) - request_limit = 0xFFF; - - if (request_limit > 0) - REG_UPDATE(SDPIF_REQUEST_RATE_LIMIT, SDPIF_REQUEST_RATE_LIMIT, request_limit); -} - - -void dcn32_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigned int det_buffer_size_in_kbyte) -{ - struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); - - unsigned int det_size_segments = (det_buffer_size_in_kbyte + DCN32_CRB_SEGMENT_SIZE_KB - 1) / DCN32_CRB_SEGMENT_SIZE_KB; - - switch (hubp_inst) { - case 0: - REG_UPDATE(DCHUBBUB_DET0_CTRL, - DET0_SIZE, det_size_segments); - hubbub2->det0_size = det_size_segments; - break; - case 1: - REG_UPDATE(DCHUBBUB_DET1_CTRL, - DET1_SIZE, det_size_segments); - hubbub2->det1_size = det_size_segments; - break; - case 2: - REG_UPDATE(DCHUBBUB_DET2_CTRL, - DET2_SIZE, det_size_segments); - hubbub2->det2_size = det_size_segments; - break; - case 3: - REG_UPDATE(DCHUBBUB_DET3_CTRL, - DET3_SIZE, det_size_segments); - hubbub2->det3_size = det_size_segments; - break; - default: - break; - } - if (hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size - + hubbub2->det3_size + hubbub2->compbuf_size_segments > hubbub2->crb_size_segs) { - /* This may happen during seamless transition from ODM 2:1 to ODM4:1 */ - DC_LOG_WARNING("CRB Config Warning: DET size (%d,%d,%d,%d) + Compbuf size (%d) > CRB segments (%d)\n", - hubbub2->det0_size, hubbub2->det1_size, hubbub2->det2_size, hubbub2->det3_size, - hubbub2->compbuf_size_segments, hubbub2->crb_size_segs); - } -} - -void dcn32_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase) -{ - struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); - unsigned int compbuf_size_segments = (compbuf_size_kb + DCN32_CRB_SEGMENT_SIZE_KB - 1) / DCN32_CRB_SEGMENT_SIZE_KB; - - if (safe_to_increase || compbuf_size_segments <= hubbub2->compbuf_size_segments) { - if (compbuf_size_segments > hubbub2->compbuf_size_segments) { - REG_WAIT(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, hubbub2->det0_size, 1, 100); - REG_WAIT(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, hubbub2->det1_size, 1, 100); - REG_WAIT(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, hubbub2->det2_size, 1, 100); - REG_WAIT(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, hubbub2->det3_size, 1, 100); - } - /* Should never be hit, if it is we have an erroneous hw config*/ - ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size - + hubbub2->det3_size + compbuf_size_segments <= hubbub2->crb_size_segs); - REG_UPDATE(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE, compbuf_size_segments); - hubbub2->compbuf_size_segments = compbuf_size_segments; - ASSERT(REG_GET(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR, &compbuf_size_segments) && !compbuf_size_segments); - } -} - -static uint32_t convert_and_clamp( - uint32_t wm_ns, - uint32_t refclk_mhz, - uint32_t clamp_value) -{ - uint32_t ret_val = 0; - ret_val = wm_ns * refclk_mhz; - - ret_val /= 1000; - - if (ret_val > clamp_value) - ret_val = clamp_value; - - return ret_val; -} - -bool hubbub32_program_urgent_watermarks( - struct hubbub *hubbub, - union dcn_watermark_set *watermarks, - unsigned int refclk_mhz, - bool safe_to_lower) -{ - struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); - uint32_t prog_wm_value; - bool wm_pending = false; - - /* Repeat for water mark set A, B, C and D. */ - /* clock state A */ - if (safe_to_lower || watermarks->a.urgent_ns > hubbub2->watermarks.a.urgent_ns) { - hubbub2->watermarks.a.urgent_ns = watermarks->a.urgent_ns; - prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns, - refclk_mhz, 0x3fff); - REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0, - DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); - - DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->a.urgent_ns, prog_wm_value); - } else if (watermarks->a.urgent_ns < hubbub2->watermarks.a.urgent_ns) - wm_pending = true; - - /* determine the transfer time for a quantity of data for a particular requestor.*/ - if (safe_to_lower || watermarks->a.frac_urg_bw_flip - > hubbub2->watermarks.a.frac_urg_bw_flip) { - hubbub2->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip; - - REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0, - DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->a.frac_urg_bw_flip); - } else if (watermarks->a.frac_urg_bw_flip - < hubbub2->watermarks.a.frac_urg_bw_flip) - wm_pending = true; - - if (safe_to_lower || watermarks->a.frac_urg_bw_nom - > hubbub2->watermarks.a.frac_urg_bw_nom) { - hubbub2->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom; - - REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0, - DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom); - } else if (watermarks->a.frac_urg_bw_nom - < hubbub2->watermarks.a.frac_urg_bw_nom) - wm_pending = true; - - if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub2->watermarks.a.urgent_latency_ns) { - hubbub2->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns; - prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns, - refclk_mhz, 0x3fff); - REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0, - DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value); - } else if (watermarks->a.urgent_latency_ns < hubbub2->watermarks.a.urgent_latency_ns) - wm_pending = true; - - /* clock state B */ - if (safe_to_lower || watermarks->b.urgent_ns > hubbub2->watermarks.b.urgent_ns) { - hubbub2->watermarks.b.urgent_ns = watermarks->b.urgent_ns; - prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns, - refclk_mhz, 0x3fff); - REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0, - DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value); - - DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->b.urgent_ns, prog_wm_value); - } else if (watermarks->b.urgent_ns < hubbub2->watermarks.b.urgent_ns) - wm_pending = true; - - /* determine the transfer time for a quantity of data for a particular requestor.*/ - if (safe_to_lower || watermarks->b.frac_urg_bw_flip - > hubbub2->watermarks.b.frac_urg_bw_flip) { - hubbub2->watermarks.b.frac_urg_bw_flip = watermarks->b.frac_urg_bw_flip; - - REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0, - DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->b.frac_urg_bw_flip); - } else if (watermarks->b.frac_urg_bw_flip - < hubbub2->watermarks.b.frac_urg_bw_flip) - wm_pending = true; - - if (safe_to_lower || watermarks->b.frac_urg_bw_nom - > hubbub2->watermarks.b.frac_urg_bw_nom) { - hubbub2->watermarks.b.frac_urg_bw_nom = watermarks->b.frac_urg_bw_nom; - - REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0, - DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->b.frac_urg_bw_nom); - } else if (watermarks->b.frac_urg_bw_nom - < hubbub2->watermarks.b.frac_urg_bw_nom) - wm_pending = true; - - if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub2->watermarks.b.urgent_latency_ns) { - hubbub2->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns; - prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns, - refclk_mhz, 0x3fff); - REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0, - DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value); - } else if (watermarks->b.urgent_latency_ns < hubbub2->watermarks.b.urgent_latency_ns) - wm_pending = true; - - /* clock state C */ - if (safe_to_lower || watermarks->c.urgent_ns > hubbub2->watermarks.c.urgent_ns) { - hubbub2->watermarks.c.urgent_ns = watermarks->c.urgent_ns; - prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns, - refclk_mhz, 0x3fff); - REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0, - DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value); - - DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->c.urgent_ns, prog_wm_value); - } else if (watermarks->c.urgent_ns < hubbub2->watermarks.c.urgent_ns) - wm_pending = true; - - /* determine the transfer time for a quantity of data for a particular requestor.*/ - if (safe_to_lower || watermarks->c.frac_urg_bw_flip - > hubbub2->watermarks.c.frac_urg_bw_flip) { - hubbub2->watermarks.c.frac_urg_bw_flip = watermarks->c.frac_urg_bw_flip; - - REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, 0, - DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, watermarks->c.frac_urg_bw_flip); - } else if (watermarks->c.frac_urg_bw_flip - < hubbub2->watermarks.c.frac_urg_bw_flip) - wm_pending = true; - - if (safe_to_lower || watermarks->c.frac_urg_bw_nom - > hubbub2->watermarks.c.frac_urg_bw_nom) { - hubbub2->watermarks.c.frac_urg_bw_nom = watermarks->c.frac_urg_bw_nom; - - REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, 0, - DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->c.frac_urg_bw_nom); - } else if (watermarks->c.frac_urg_bw_nom - < hubbub2->watermarks.c.frac_urg_bw_nom) - wm_pending = true; - - if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub2->watermarks.c.urgent_latency_ns) { - hubbub2->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns; - prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns, - refclk_mhz, 0x3fff); - REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0, - DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value); - } else if (watermarks->c.urgent_latency_ns < hubbub2->watermarks.c.urgent_latency_ns) - wm_pending = true; - - /* clock state D */ - if (safe_to_lower || watermarks->d.urgent_ns > hubbub2->watermarks.d.urgent_ns) { - hubbub2->watermarks.d.urgent_ns = watermarks->d.urgent_ns; - prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns, - refclk_mhz, 0x3fff); - REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0, - DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value); - - DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->d.urgent_ns, prog_wm_value); - } else if (watermarks->d.urgent_ns < hubbub2->watermarks.d.urgent_ns) - wm_pending = true; - - /* determine the transfer time for a quantity of data for a particular requestor.*/ - if (safe_to_lower || watermarks->d.frac_urg_bw_flip - > hubbub2->watermarks.d.frac_urg_bw_flip) { - hubbub2->watermarks.d.frac_urg_bw_flip = watermarks->d.frac_urg_bw_flip; - - REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, 0, - DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, watermarks->d.frac_urg_bw_flip); - } else if (watermarks->d.frac_urg_bw_flip - < hubbub2->watermarks.d.frac_urg_bw_flip) - wm_pending = true; - - if (safe_to_lower || watermarks->d.frac_urg_bw_nom - > hubbub2->watermarks.d.frac_urg_bw_nom) { - hubbub2->watermarks.d.frac_urg_bw_nom = watermarks->d.frac_urg_bw_nom; - - REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0, - DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->d.frac_urg_bw_nom); - } else if (watermarks->d.frac_urg_bw_nom - < hubbub2->watermarks.d.frac_urg_bw_nom) - wm_pending = true; - - if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub2->watermarks.d.urgent_latency_ns) { - hubbub2->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns; - prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns, - refclk_mhz, 0x3fff); - REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0, - DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value); - } else if (watermarks->d.urgent_latency_ns < hubbub2->watermarks.d.urgent_latency_ns) - wm_pending = true; - - return wm_pending; -} - -bool hubbub32_program_stutter_watermarks( - struct hubbub *hubbub, - union dcn_watermark_set *watermarks, - unsigned int refclk_mhz, - bool safe_to_lower) -{ - struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); - uint32_t prog_wm_value; - bool wm_pending = false; - - /* clock state A */ - if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns - > hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) { - hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = - watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, - refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); - } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns - < hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) - wm_pending = true; - - if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns - > hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns) { - hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns = - watermarks->a.cstate_pstate.cstate_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->a.cstate_pstate.cstate_exit_ns, - refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value); - } else if (watermarks->a.cstate_pstate.cstate_exit_ns - < hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns) - wm_pending = true; - - /* clock state B */ - if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns - > hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) { - hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = - watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, - refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); - } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns - < hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) - wm_pending = true; - - if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns - > hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns) { - hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns = - watermarks->b.cstate_pstate.cstate_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->b.cstate_pstate.cstate_exit_ns, - refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value); - } else if (watermarks->b.cstate_pstate.cstate_exit_ns - < hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns) - wm_pending = true; - - /* clock state C */ - if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns - > hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) { - hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = - watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, - refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); - } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns - < hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) - wm_pending = true; - - if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns - > hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns) { - hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns = - watermarks->c.cstate_pstate.cstate_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->c.cstate_pstate.cstate_exit_ns, - refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value); - } else if (watermarks->c.cstate_pstate.cstate_exit_ns - < hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns) - wm_pending = true; - - /* clock state D */ - if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns - > hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) { - hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = - watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, - refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); - } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns - < hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) - wm_pending = true; - - if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns - > hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns) { - hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns = - watermarks->d.cstate_pstate.cstate_exit_ns; - prog_wm_value = convert_and_clamp( - watermarks->d.cstate_pstate.cstate_exit_ns, - refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value); - } else if (watermarks->d.cstate_pstate.cstate_exit_ns - < hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns) - wm_pending = true; - - return wm_pending; -} - - -bool hubbub32_program_pstate_watermarks( - struct hubbub *hubbub, - union dcn_watermark_set *watermarks, - unsigned int refclk_mhz, - bool safe_to_lower) -{ - struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); - uint32_t prog_wm_value; - - bool wm_pending = false; - - /* Section for UCLK_PSTATE_CHANGE_WATERMARKS */ - /* clock state A */ - if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns - > hubbub2->watermarks.a.cstate_pstate.pstate_change_ns) { - hubbub2->watermarks.a.cstate_pstate.pstate_change_ns = - watermarks->a.cstate_pstate.pstate_change_ns; - prog_wm_value = convert_and_clamp( - watermarks->a.cstate_pstate.pstate_change_ns, - refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, 0, - DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n" - "HW register value = 0x%x\n\n", - watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value); - } else if (watermarks->a.cstate_pstate.pstate_change_ns - < hubbub2->watermarks.a.cstate_pstate.pstate_change_ns) - wm_pending = true; - - /* clock state B */ - if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns - > hubbub2->watermarks.b.cstate_pstate.pstate_change_ns) { - hubbub2->watermarks.b.cstate_pstate.pstate_change_ns = - watermarks->b.cstate_pstate.pstate_change_ns; - prog_wm_value = convert_and_clamp( - watermarks->b.cstate_pstate.pstate_change_ns, - refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, 0, - DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n" - "HW register value = 0x%x\n\n", - watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value); - } else if (watermarks->b.cstate_pstate.pstate_change_ns - < hubbub2->watermarks.b.cstate_pstate.pstate_change_ns) - wm_pending = true; - - /* clock state C */ - if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns - > hubbub2->watermarks.c.cstate_pstate.pstate_change_ns) { - hubbub2->watermarks.c.cstate_pstate.pstate_change_ns = - watermarks->c.cstate_pstate.pstate_change_ns; - prog_wm_value = convert_and_clamp( - watermarks->c.cstate_pstate.pstate_change_ns, - refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, 0, - DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n" - "HW register value = 0x%x\n\n", - watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value); - } else if (watermarks->c.cstate_pstate.pstate_change_ns - < hubbub2->watermarks.c.cstate_pstate.pstate_change_ns) - wm_pending = true; - - /* clock state D */ - if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns - > hubbub2->watermarks.d.cstate_pstate.pstate_change_ns) { - hubbub2->watermarks.d.cstate_pstate.pstate_change_ns = - watermarks->d.cstate_pstate.pstate_change_ns; - prog_wm_value = convert_and_clamp( - watermarks->d.cstate_pstate.pstate_change_ns, - refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, 0, - DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n" - "HW register value = 0x%x\n\n", - watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value); - } else if (watermarks->d.cstate_pstate.pstate_change_ns - < hubbub2->watermarks.d.cstate_pstate.pstate_change_ns) - wm_pending = true; - - /* Section for FCLK_PSTATE_CHANGE_WATERMARKS */ - /* clock state A */ - if (safe_to_lower || watermarks->a.cstate_pstate.fclk_pstate_change_ns - > hubbub2->watermarks.a.cstate_pstate.fclk_pstate_change_ns) { - hubbub2->watermarks.a.cstate_pstate.fclk_pstate_change_ns = - watermarks->a.cstate_pstate.fclk_pstate_change_ns; - prog_wm_value = convert_and_clamp( - watermarks->a.cstate_pstate.fclk_pstate_change_ns, - refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, 0, - DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_A calculated =%d\n" - "HW register value = 0x%x\n\n", - watermarks->a.cstate_pstate.fclk_pstate_change_ns, prog_wm_value); - } else if (watermarks->a.cstate_pstate.fclk_pstate_change_ns - < hubbub2->watermarks.a.cstate_pstate.fclk_pstate_change_ns) - wm_pending = true; - - /* clock state B */ - if (safe_to_lower || watermarks->b.cstate_pstate.fclk_pstate_change_ns - > hubbub2->watermarks.b.cstate_pstate.fclk_pstate_change_ns) { - hubbub2->watermarks.b.cstate_pstate.fclk_pstate_change_ns = - watermarks->b.cstate_pstate.fclk_pstate_change_ns; - prog_wm_value = convert_and_clamp( - watermarks->b.cstate_pstate.fclk_pstate_change_ns, - refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, 0, - DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_B calculated =%d\n" - "HW register value = 0x%x\n\n", - watermarks->b.cstate_pstate.fclk_pstate_change_ns, prog_wm_value); - } else if (watermarks->b.cstate_pstate.fclk_pstate_change_ns - < hubbub2->watermarks.b.cstate_pstate.fclk_pstate_change_ns) - wm_pending = true; - - /* clock state C */ - if (safe_to_lower || watermarks->c.cstate_pstate.fclk_pstate_change_ns - > hubbub2->watermarks.c.cstate_pstate.fclk_pstate_change_ns) { - hubbub2->watermarks.c.cstate_pstate.fclk_pstate_change_ns = - watermarks->c.cstate_pstate.fclk_pstate_change_ns; - prog_wm_value = convert_and_clamp( - watermarks->c.cstate_pstate.fclk_pstate_change_ns, - refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, 0, - DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_C calculated =%d\n" - "HW register value = 0x%x\n\n", - watermarks->c.cstate_pstate.fclk_pstate_change_ns, prog_wm_value); - } else if (watermarks->c.cstate_pstate.fclk_pstate_change_ns - < hubbub2->watermarks.c.cstate_pstate.fclk_pstate_change_ns) - wm_pending = true; - - /* clock state D */ - if (safe_to_lower || watermarks->d.cstate_pstate.fclk_pstate_change_ns - > hubbub2->watermarks.d.cstate_pstate.fclk_pstate_change_ns) { - hubbub2->watermarks.d.cstate_pstate.fclk_pstate_change_ns = - watermarks->d.cstate_pstate.fclk_pstate_change_ns; - prog_wm_value = convert_and_clamp( - watermarks->d.cstate_pstate.fclk_pstate_change_ns, - refclk_mhz, 0xffff); - REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, 0, - DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_D calculated =%d\n" - "HW register value = 0x%x\n\n", - watermarks->d.cstate_pstate.fclk_pstate_change_ns, prog_wm_value); - } else if (watermarks->d.cstate_pstate.fclk_pstate_change_ns - < hubbub2->watermarks.d.cstate_pstate.fclk_pstate_change_ns) - wm_pending = true; - - return wm_pending; -} - - -bool hubbub32_program_usr_watermarks( - struct hubbub *hubbub, - union dcn_watermark_set *watermarks, - unsigned int refclk_mhz, - bool safe_to_lower) -{ - struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); - uint32_t prog_wm_value; - - bool wm_pending = false; - - /* clock state A */ - if (safe_to_lower || watermarks->a.usr_retraining_ns - > hubbub2->watermarks.a.usr_retraining_ns) { - hubbub2->watermarks.a.usr_retraining_ns = watermarks->a.usr_retraining_ns; - prog_wm_value = convert_and_clamp( - watermarks->a.usr_retraining_ns, - refclk_mhz, 0x3fff); - REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, 0, - DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_A calculated =%d\n" - "HW register value = 0x%x\n\n", - watermarks->a.usr_retraining_ns, prog_wm_value); - } else if (watermarks->a.usr_retraining_ns - < hubbub2->watermarks.a.usr_retraining_ns) - wm_pending = true; - - /* clock state B */ - if (safe_to_lower || watermarks->b.usr_retraining_ns - > hubbub2->watermarks.b.usr_retraining_ns) { - hubbub2->watermarks.b.usr_retraining_ns = watermarks->b.usr_retraining_ns; - prog_wm_value = convert_and_clamp( - watermarks->b.usr_retraining_ns, - refclk_mhz, 0x3fff); - REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, 0, - DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_B calculated =%d\n" - "HW register value = 0x%x\n\n", - watermarks->b.usr_retraining_ns, prog_wm_value); - } else if (watermarks->b.usr_retraining_ns - < hubbub2->watermarks.b.usr_retraining_ns) - wm_pending = true; - - /* clock state C */ - if (safe_to_lower || watermarks->c.usr_retraining_ns - > hubbub2->watermarks.c.usr_retraining_ns) { - hubbub2->watermarks.c.usr_retraining_ns = - watermarks->c.usr_retraining_ns; - prog_wm_value = convert_and_clamp( - watermarks->c.usr_retraining_ns, - refclk_mhz, 0x3fff); - REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, 0, - DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_C calculated =%d\n" - "HW register value = 0x%x\n\n", - watermarks->c.usr_retraining_ns, prog_wm_value); - } else if (watermarks->c.usr_retraining_ns - < hubbub2->watermarks.c.usr_retraining_ns) - wm_pending = true; - - /* clock state D */ - if (safe_to_lower || watermarks->d.usr_retraining_ns - > hubbub2->watermarks.d.usr_retraining_ns) { - hubbub2->watermarks.d.usr_retraining_ns = - watermarks->d.usr_retraining_ns; - prog_wm_value = convert_and_clamp( - watermarks->d.usr_retraining_ns, - refclk_mhz, 0x3fff); - REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, 0, - DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_D calculated =%d\n" - "HW register value = 0x%x\n\n", - watermarks->d.usr_retraining_ns, prog_wm_value); - } else if (watermarks->d.usr_retraining_ns - < hubbub2->watermarks.d.usr_retraining_ns) - wm_pending = true; - - return wm_pending; -} - -void hubbub32_force_usr_retraining_allow(struct hubbub *hubbub, bool allow) -{ - struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); - - /* - * DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_ENABLE = 1 means enabling forcing value - * DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_VALUE = 1 or 0, means value to be forced when force enable - */ - - REG_UPDATE_2(DCHUBBUB_ARB_USR_RETRAINING_CNTL, - DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_VALUE, allow, - DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_ENABLE, allow); -} - -static bool hubbub32_program_watermarks( - struct hubbub *hubbub, - union dcn_watermark_set *watermarks, - unsigned int refclk_mhz, - bool safe_to_lower) -{ - bool wm_pending = false; - - if (hubbub32_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) - wm_pending = true; - - if (hubbub32_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) - wm_pending = true; - - if (hubbub32_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) - wm_pending = true; - - if (hubbub32_program_usr_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) - wm_pending = true; - - /* - * The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric. - * If the memory controller is fully utilized and the DCHub requestors are - * well ahead of their amortized schedule, then it is safe to prevent the next winner - * from being committed and sent to the fabric. - * The utilization of the memory controller is approximated by ensuring that - * the number of outstanding requests is greater than a threshold specified - * by the ARB_MIN_REQ_OUTSTANDING. To determine that the DCHub requestors are well ahead of the amortized schedule, - * the slack of the next winner is compared with the ARB_SAT_LEVEL in DLG RefClk cycles. - * - * TODO: Revisit request limit after figure out right number. request limit for RM isn't decided yet, set maximum value (0x1FF) - * to turn off it for now. - */ - /*REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0, - DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz); - REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, - DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);*/ - - hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter); - - hubbub32_force_usr_retraining_allow(hubbub, hubbub->ctx->dc->debug.force_usr_allow); - - return wm_pending; -} - -/* Copy values from WM set A to all other sets */ -static void hubbub32_init_watermarks(struct hubbub *hubbub) -{ - struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); - uint32_t reg; - - reg = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A); - REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, reg); - REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, reg); - REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, reg); - - reg = REG_READ(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A); - REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, reg); - REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, reg); - REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, reg); - - reg = REG_READ(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A); - REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, reg); - REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, reg); - REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, reg); - - reg = REG_READ(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A); - REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, reg); - REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, reg); - REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, reg); - - reg = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, reg); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, reg); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, reg); - - reg = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, reg); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, reg); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, reg); - - reg = REG_READ(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A); - REG_WRITE(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, reg); - REG_WRITE(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, reg); - REG_WRITE(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, reg); - - reg = REG_READ(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A); - REG_WRITE(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, reg); - REG_WRITE(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, reg); - REG_WRITE(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, reg); - - reg = REG_READ(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A); - REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, reg); - REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, reg); - REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, reg); -} - -static void hubbub32_wm_read_state(struct hubbub *hubbub, - struct dcn_hubbub_wm *wm) -{ - struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); - struct dcn_hubbub_wm_set *s; - - memset(wm, 0, sizeof(struct dcn_hubbub_wm)); - - s = &wm->sets[0]; - s->wm_set = 0; - REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, - DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, &s->data_urgent); - - REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, &s->sr_enter); - - REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, &s->sr_exit); - - REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, - DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, &s->dram_clk_change); - - REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, - DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, &s->usr_retrain); - - REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, - DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, &s->fclk_pstate_change); - - s = &wm->sets[1]; - s->wm_set = 1; - REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, - DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, &s->data_urgent); - - REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, &s->sr_enter); - - REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, &s->sr_exit); - - REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, - DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, &s->dram_clk_change); - - REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, - DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, &s->usr_retrain); - - REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, - DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, &s->fclk_pstate_change); - - s = &wm->sets[2]; - s->wm_set = 2; - REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, - DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, &s->data_urgent); - - REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, &s->sr_enter); - - REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, &s->sr_exit); - - REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, - DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, &s->dram_clk_change); - - REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, - DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, &s->usr_retrain); - - REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, - DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, &s->fclk_pstate_change); - - s = &wm->sets[3]; - s->wm_set = 3; - REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, - DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, &s->data_urgent); - - REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, &s->sr_enter); - - REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, &s->sr_exit); - - REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, - DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, &s->dram_clk_change); - - REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, - DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, &s->usr_retrain); - - REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, - DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, &s->fclk_pstate_change); -} - -void hubbub32_force_wm_propagate_to_pipes(struct hubbub *hubbub) -{ - struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); - uint32_t refclk_mhz = hubbub->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000; - uint32_t prog_wm_value = convert_and_clamp(hubbub2->watermarks.a.urgent_ns, - refclk_mhz, 0x3fff); - - REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0, - DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); -} - -void hubbub32_get_mall_en(struct hubbub *hubbub, unsigned int *mall_in_use) -{ - struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); - uint32_t prefetch_complete, mall_en; - - REG_GET_2(DCHUBBUB_ARB_MALL_CNTL, MALL_IN_USE, &mall_en, - MALL_PREFETCH_COMPLETE, &prefetch_complete); - - *mall_in_use = prefetch_complete && mall_en; -} - -void hubbub32_init(struct hubbub *hubbub) -{ - struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); - - /* Enable clock gate*/ - if (hubbub->ctx->dc->debug.disable_clock_gate) { - /*done in hwseq*/ - /*REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);*/ - - REG_UPDATE_2(DCHUBBUB_CLOCK_CNTL, - DISPCLK_R_DCHUBBUB_GATE_DIS, 1, - DCFCLK_R_DCHUBBUB_GATE_DIS, 1); - } - /* - ignore the "df_pre_cstate_req" from the SDP port control. - only the DCN will determine when to connect the SDP port - */ - REG_UPDATE(DCHUBBUB_SDPIF_CFG0, - SDPIF_PORT_CONTROL, 1); - /*Set SDP's max outstanding request to 512 - must set the register back to 0 (max outstanding = 256) in zero frame buffer mode*/ - REG_UPDATE(DCHUBBUB_SDPIF_CFG1, - SDPIF_MAX_NUM_OUTSTANDING, 1); - /*must set the registers back to 256 in zero frame buffer mode*/ - REG_UPDATE_2(DCHUBBUB_ARB_DF_REQ_OUTSTAND, - DCHUBBUB_ARB_MAX_REQ_OUTSTAND, 512, - DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 512); -} - -static const struct hubbub_funcs hubbub32_funcs = { - .update_dchub = hubbub2_update_dchub, - .init_dchub_sys_ctx = hubbub3_init_dchub_sys_ctx, - .init_vm_ctx = hubbub2_init_vm_ctx, - .dcc_support_swizzle = hubbub3_dcc_support_swizzle, - .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format, - .get_dcc_compression_cap = hubbub3_get_dcc_compression_cap, - .wm_read_state = hubbub32_wm_read_state, - .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq, - .program_watermarks = hubbub32_program_watermarks, - .allow_self_refresh_control = hubbub1_allow_self_refresh_control, - .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, - .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high, - .force_wm_propagate_to_pipes = hubbub32_force_wm_propagate_to_pipes, - .force_pstate_change_control = hubbub3_force_pstate_change_control, - .init_watermarks = hubbub32_init_watermarks, - .program_det_size = dcn32_program_det_size, - .program_compbuf_size = dcn32_program_compbuf_size, - .init_crb = dcn32_init_crb, - .hubbub_read_state = hubbub2_read_state, - .force_usr_retraining_allow = hubbub32_force_usr_retraining_allow, - .set_request_limit = hubbub32_set_request_limit, - .get_mall_en = hubbub32_get_mall_en, -}; - -void hubbub32_construct(struct dcn20_hubbub *hubbub2, - struct dc_context *ctx, - const struct dcn_hubbub_registers *hubbub_regs, - const struct dcn_hubbub_shift *hubbub_shift, - const struct dcn_hubbub_mask *hubbub_mask, - int det_size_kb, - int pixel_chunk_size_kb, - int config_return_buffer_size_kb) -{ - hubbub2->base.ctx = ctx; - hubbub2->base.funcs = &hubbub32_funcs; - hubbub2->regs = hubbub_regs; - hubbub2->shifts = hubbub_shift; - hubbub2->masks = hubbub_mask; - - hubbub2->debug_test_index_pstate = 0xB; - hubbub2->detile_buf_size = det_size_kb * 1024; - hubbub2->pixel_chunk_size = pixel_chunk_size_kb * 1024; - hubbub2->crb_size_segs = config_return_buffer_size_kb / DCN32_CRB_SEGMENT_SIZE_KB; -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h deleted file mode 100644 index bfc55dbbad1f..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h +++ /dev/null @@ -1,166 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_HUBBUB_DCN32_H__ -#define __DC_HUBBUB_DCN32_H__ - -#include "dcn21/dcn21_hubbub.h" - -#define HUBBUB_MASK_SH_LIST_DCN32(mask_sh)\ - HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \ - HUBBUB_SF(DCHUBBUB_SOFT_RESET, DCHUBBUB_GLOBAL_SOFT_RESET, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_SAT_LEVEL, DCHUBBUB_ARB_SAT_LEVEL, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MAX_REQ_OUTSTAND, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, mask_sh), \ - HUBBUB_MASK_SH_LIST_STUTTER(mask_sh), \ - HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ - HUBBUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE, mask_sh), \ - HUBBUB_SF(DCN_VM_FB_LOCATION_TOP, FB_TOP, mask_sh), \ - HUBBUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET, mask_sh), \ - HUBBUB_SF(DCN_VM_AGP_BOT, AGP_BOT, mask_sh), \ - HUBBUB_SF(DCN_VM_AGP_TOP, AGP_TOP, mask_sh), \ - HUBBUB_SF(DCN_VM_AGP_BASE, AGP_BASE, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, mask_sh),\ - HUBBUB_SF(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, mask_sh),\ - HUBBUB_SF(DCHUBBUB_DET0_CTRL, DET0_SIZE, mask_sh),\ - HUBBUB_SF(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, mask_sh),\ - HUBBUB_SF(DCHUBBUB_DET1_CTRL, DET1_SIZE, mask_sh),\ - HUBBUB_SF(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, mask_sh),\ - HUBBUB_SF(DCHUBBUB_DET2_CTRL, DET2_SIZE, mask_sh),\ - HUBBUB_SF(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, mask_sh),\ - HUBBUB_SF(DCHUBBUB_DET3_CTRL, DET3_SIZE, mask_sh),\ - HUBBUB_SF(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, mask_sh),\ - HUBBUB_SF(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE, mask_sh),\ - HUBBUB_SF(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE_CURRENT, mask_sh),\ - HUBBUB_SF(COMPBUF_RESERVED_SPACE, COMPBUF_RESERVED_SPACE_64B, mask_sh),\ - HUBBUB_SF(COMPBUF_RESERVED_SPACE, COMPBUF_RESERVED_SPACE_ZS, mask_sh),\ - HUBBUB_SF(DCHUBBUB_ARB_USR_RETRAINING_CNTL, DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_VALUE, mask_sh),\ - HUBBUB_SF(DCHUBBUB_ARB_USR_RETRAINING_CNTL, DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_ENABLE, mask_sh),\ - HUBBUB_SF(DCHUBBUB_ARB_USR_RETRAINING_CNTL, DCHUBBUB_ARB_DO_NOT_FORCE_ALLOW_USR_RETRAINING_DURING_PSTATE_CHANGE_REQUEST, mask_sh),\ - HUBBUB_SF(DCHUBBUB_ARB_USR_RETRAINING_CNTL, DCHUBBUB_ARB_DO_NOT_FORCE_ALLOW_USR_RETRAINING_DURING_PRE_CSTATE, mask_sh),\ - HUBBUB_SF(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, mask_sh),\ - HUBBUB_SF(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, mask_sh),\ - HUBBUB_SF(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, mask_sh),\ - HUBBUB_SF(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, mask_sh),\ - HUBBUB_SF(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, mask_sh),\ - HUBBUB_SF(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, mask_sh),\ - HUBBUB_SF(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, mask_sh),\ - HUBBUB_SF(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, mask_sh),\ - HUBBUB_SF(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_ADDR_MSB, DCN_VM_FAULT_ADDR_MSB, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_ADDR_LSB, DCN_VM_FAULT_ADDR_LSB, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_CLEAR, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_MODE, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_INTERRUPT_ENABLE, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_RANGE_FAULT_DISABLE, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_PRQ_FAULT_DISABLE, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_STATUS, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_TABLE_LEVEL, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \ - HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh),\ - HUBBUB_SF(SDPIF_REQUEST_RATE_LIMIT, SDPIF_REQUEST_RATE_LIMIT, mask_sh),\ - HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DISPCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\ - HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DCFCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\ - HUBBUB_SF(DCHUBBUB_SDPIF_CFG0, SDPIF_PORT_CONTROL, mask_sh),\ - HUBBUB_SF(DCHUBBUB_SDPIF_CFG1, SDPIF_MAX_NUM_OUTSTANDING, mask_sh),\ - HUBBUB_SF(DCHUBBUB_MEM_PWR_MODE_CTRL, DET_MEM_PWR_LS_MODE, mask_sh),\ - HUBBUB_SF(DCHUBBUB_ARB_MALL_CNTL, MALL_PREFETCH_COMPLETE, mask_sh),\ - HUBBUB_SF(DCHUBBUB_ARB_MALL_CNTL, MALL_IN_USE, mask_sh) - - - -bool hubbub32_program_urgent_watermarks( - struct hubbub *hubbub, - union dcn_watermark_set *watermarks, - unsigned int refclk_mhz, - bool safe_to_lower); - -bool hubbub32_program_stutter_watermarks( - struct hubbub *hubbub, - union dcn_watermark_set *watermarks, - unsigned int refclk_mhz, - bool safe_to_lower); - -bool hubbub32_program_pstate_watermarks( - struct hubbub *hubbub, - union dcn_watermark_set *watermarks, - unsigned int refclk_mhz, - bool safe_to_lower); - -bool hubbub32_program_usr_watermarks( - struct hubbub *hubbub, - union dcn_watermark_set *watermarks, - unsigned int refclk_mhz, - bool safe_to_lower); - -void hubbub32_force_usr_retraining_allow(struct hubbub *hubbub, bool allow); - -void hubbub32_force_wm_propagate_to_pipes(struct hubbub *hubbub); - -void hubbub32_init(struct hubbub *hubbub); - -void dcn32_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigned int det_buffer_size_in_kbyte); - -void hubbub32_construct(struct dcn20_hubbub *hubbub2, - struct dc_context *ctx, - const struct dcn_hubbub_registers *hubbub_regs, - const struct dcn_hubbub_shift *hubbub_shift, - const struct dcn_hubbub_mask *hubbub_mask, - int det_size_kb, - int pixel_chunk_size_kb, - int config_return_buffer_size_kb); - -void hubbub32_set_request_limit(struct hubbub *hubbub, int umc_count, int words_per_umc); - -void hubbub32_get_mall_en(struct hubbub *hubbub, unsigned int *mall_in_use); - -void dcn32_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase); - -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/Makefile b/drivers/gpu/drm/amd/display/dc/dcn35/Makefile index d5b4533d2f62..09fd994ae158 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn35/Makefile @@ -12,7 +12,7 @@ DCN35 = dcn35_dio_stream_encoder.o \ dcn35_dio_link_encoder.o dcn35_dccg.o \ - dcn35_hubp.o dcn35_hubbub.o \ + dcn35_hubp.o \ dcn35_mmhubbub.o dcn35_opp.o dcn35_pg_cntl.o dcn35_dwb.o AMD_DAL_DCN35 = $(addprefix $(AMDDALPATH)/dc/dcn35/,$(DCN35)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c deleted file mode 100644 index 6293173ba2b9..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c +++ /dev/null @@ -1,611 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright 2023 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - - -#include "dcn30/dcn30_hubbub.h" -#include "dcn31/dcn31_hubbub.h" -#include "dcn32/dcn32_hubbub.h" -#include "dcn35_hubbub.h" -#include "dm_services.h" -#include "reg_helper.h" - - -#define CTX \ - hubbub2->base.ctx -#define DC_LOGGER \ - hubbub2->base.ctx->logger -#define REG(reg)\ - hubbub2->regs->reg - -#undef FN -#define FN(reg_name, field_name) \ - hubbub2->shifts->field_name, hubbub2->masks->field_name - -#define DCN35_CRB_SEGMENT_SIZE_KB 64 - -static void dcn35_init_crb(struct hubbub *hubbub) -{ - struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); - - REG_GET(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, - &hubbub2->det0_size); - - REG_GET(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, - &hubbub2->det1_size); - - REG_GET(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, - &hubbub2->det2_size); - - REG_GET(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, - &hubbub2->det3_size); - - REG_GET(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE_CURRENT, - &hubbub2->compbuf_size_segments); - - REG_SET_2(COMPBUF_RESERVED_SPACE, 0, - COMPBUF_RESERVED_SPACE_64B, hubbub2->pixel_chunk_size / 32, - COMPBUF_RESERVED_SPACE_ZS, hubbub2->pixel_chunk_size / 128); - REG_UPDATE(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, 0x5FF); -} - -static void dcn35_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase) -{ - struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); - unsigned int compbuf_size_segments = (compbuf_size_kb + DCN35_CRB_SEGMENT_SIZE_KB - 1) / DCN35_CRB_SEGMENT_SIZE_KB; - - if (safe_to_increase || compbuf_size_segments <= hubbub2->compbuf_size_segments) { - if (compbuf_size_segments > hubbub2->compbuf_size_segments) { - REG_WAIT(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, hubbub2->det0_size, 1, 100); - REG_WAIT(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, hubbub2->det1_size, 1, 100); - REG_WAIT(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, hubbub2->det2_size, 1, 100); - REG_WAIT(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, hubbub2->det3_size, 1, 100); - } - /* Should never be hit, if it is we have an erroneous hw config*/ - ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size - + hubbub2->det3_size + compbuf_size_segments <= hubbub2->crb_size_segs); - REG_UPDATE(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE, compbuf_size_segments); - hubbub2->compbuf_size_segments = compbuf_size_segments; - ASSERT(REG_GET(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR, &compbuf_size_segments) && !compbuf_size_segments); - } -} - -static uint32_t convert_and_clamp( - uint32_t wm_ns, - uint32_t refclk_mhz, - uint32_t clamp_value) -{ - uint32_t ret_val = 0; - - ret_val = wm_ns * refclk_mhz; - - ret_val /= 1000; - - if (ret_val > clamp_value) - ret_val = clamp_value; - - return ret_val; -} - -static bool hubbub35_program_stutter_z8_watermarks( - struct hubbub *hubbub, - union dcn_watermark_set *watermarks, - unsigned int refclk_mhz, - bool safe_to_lower) -{ - struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); - uint32_t prog_wm_value; - bool wm_pending = false; - - /* clock state A */ - if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns - > hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns) { - hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns = - watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns; - prog_wm_value = convert_and_clamp( - watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns, - refclk_mhz, 0xfffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_A calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns, prog_wm_value); - } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns - < hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns) - wm_pending = true; - - if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_z8_ns - > hubbub2->watermarks.a.cstate_pstate.cstate_exit_z8_ns) { - hubbub2->watermarks.a.cstate_pstate.cstate_exit_z8_ns = - watermarks->a.cstate_pstate.cstate_exit_z8_ns; - prog_wm_value = convert_and_clamp( - watermarks->a.cstate_pstate.cstate_exit_z8_ns, - refclk_mhz, 0xfffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_A calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->a.cstate_pstate.cstate_exit_z8_ns, prog_wm_value); - } else if (watermarks->a.cstate_pstate.cstate_exit_z8_ns - < hubbub2->watermarks.a.cstate_pstate.cstate_exit_z8_ns) - wm_pending = true; - - /* clock state B */ - - if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns - > hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns) { - hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns = - watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns; - prog_wm_value = convert_and_clamp( - watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns, - refclk_mhz, 0xfffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_B calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns, prog_wm_value); - } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns - < hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns) - wm_pending = true; - - if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_z8_ns - > hubbub2->watermarks.b.cstate_pstate.cstate_exit_z8_ns) { - hubbub2->watermarks.b.cstate_pstate.cstate_exit_z8_ns = - watermarks->b.cstate_pstate.cstate_exit_z8_ns; - prog_wm_value = convert_and_clamp( - watermarks->b.cstate_pstate.cstate_exit_z8_ns, - refclk_mhz, 0xfffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_B calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->b.cstate_pstate.cstate_exit_z8_ns, prog_wm_value); - } else if (watermarks->b.cstate_pstate.cstate_exit_z8_ns - < hubbub2->watermarks.b.cstate_pstate.cstate_exit_z8_ns) - wm_pending = true; - - /* clock state C */ - if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns - > hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns) { - hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns = - watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns; - prog_wm_value = convert_and_clamp( - watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns, - refclk_mhz, 0xfffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_C calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns, prog_wm_value); - } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns - < hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns) - wm_pending = true; - - if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_z8_ns - > hubbub2->watermarks.c.cstate_pstate.cstate_exit_z8_ns) { - hubbub2->watermarks.c.cstate_pstate.cstate_exit_z8_ns = - watermarks->c.cstate_pstate.cstate_exit_z8_ns; - prog_wm_value = convert_and_clamp( - watermarks->c.cstate_pstate.cstate_exit_z8_ns, - refclk_mhz, 0xfffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_C calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->c.cstate_pstate.cstate_exit_z8_ns, prog_wm_value); - } else if (watermarks->c.cstate_pstate.cstate_exit_z8_ns - < hubbub2->watermarks.c.cstate_pstate.cstate_exit_z8_ns) - wm_pending = true; - - /* clock state D */ - if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns - > hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns) { - hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns = - watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns; - prog_wm_value = convert_and_clamp( - watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns, - refclk_mhz, 0xfffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, 0, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_D calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns, prog_wm_value); - } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns - < hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns) - wm_pending = true; - - if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_z8_ns - > hubbub2->watermarks.d.cstate_pstate.cstate_exit_z8_ns) { - hubbub2->watermarks.d.cstate_pstate.cstate_exit_z8_ns = - watermarks->d.cstate_pstate.cstate_exit_z8_ns; - prog_wm_value = convert_and_clamp( - watermarks->d.cstate_pstate.cstate_exit_z8_ns, - refclk_mhz, 0xfffff); - REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, 0, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, prog_wm_value); - DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_D calculated =%d\n" - "HW register value = 0x%x\n", - watermarks->d.cstate_pstate.cstate_exit_z8_ns, prog_wm_value); - } else if (watermarks->d.cstate_pstate.cstate_exit_z8_ns - < hubbub2->watermarks.d.cstate_pstate.cstate_exit_z8_ns) - wm_pending = true; - - return wm_pending; -} - -static void hubbub35_get_dchub_ref_freq(struct hubbub *hubbub, - unsigned int dccg_ref_freq_inKhz, - unsigned int *dchub_ref_freq_inKhz) -{ - struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); - uint32_t ref_div = 0; - uint32_t ref_en = 0; - unsigned int dc_refclk_khz = 24000; - - REG_GET_2(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, &ref_div, - DCHUBBUB_GLOBAL_TIMER_ENABLE, &ref_en); - - if (ref_en) { - if (ref_div == 2) - *dchub_ref_freq_inKhz = dc_refclk_khz / 2; - else - *dchub_ref_freq_inKhz = dc_refclk_khz; - - /* - * The external Reference Clock may change based on the board or - * platform requirements and the programmable integer divide must - * be programmed to provide a suitable DLG RefClk frequency between - * a minimum of 20MHz and maximum of 50MHz - */ - if (*dchub_ref_freq_inKhz < 20000 || *dchub_ref_freq_inKhz > 50000) - ASSERT_CRITICAL(false); - - return; - } else { - *dchub_ref_freq_inKhz = dc_refclk_khz; - /*init sequence issue on bringup patch*/ - REG_UPDATE_2(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 1, - DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); - // HUBBUB global timer must be enabled. - ASSERT_CRITICAL(false); - return; - } -} - - -static bool hubbub35_program_watermarks( - struct hubbub *hubbub, - union dcn_watermark_set *watermarks, - unsigned int refclk_mhz, - bool safe_to_lower) -{ - bool wm_pending = false; - struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); - - if (hubbub32_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) - wm_pending = true; - - if (hubbub32_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) - wm_pending = true; - - if (hubbub32_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) - wm_pending = true; - - if (hubbub32_program_usr_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) - wm_pending = true; - - if (hubbub35_program_stutter_z8_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) - wm_pending = true; - - REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0, - DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz); - REG_UPDATE_2(DCHUBBUB_ARB_DF_REQ_OUTSTAND, - DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0xFF, - DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD, 0xA);/*hw delta*/ - REG_UPDATE(DCHUBBUB_ARB_HOSTVM_CNTL, DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD, 0xF); - - hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter); - - hubbub32_force_usr_retraining_allow(hubbub, hubbub->ctx->dc->debug.force_usr_allow); - - return wm_pending; -} - -/* Copy values from WM set A to all other sets */ -static void hubbub35_init_watermarks(struct hubbub *hubbub) -{ - struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); - uint32_t reg; - - reg = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A); - REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, reg); - REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, reg); - REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, reg); - - reg = REG_READ(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A); - REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, reg); - REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, reg); - REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, reg); - - reg = REG_READ(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A); - REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, reg); - REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, reg); - REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, reg); - - reg = REG_READ(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A); - REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, reg); - REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, reg); - REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, reg); - - reg = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, reg); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, reg); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, reg); - - reg = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, reg); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, reg); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, reg); - - reg = REG_READ(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A); - REG_WRITE(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, reg); - REG_WRITE(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, reg); - REG_WRITE(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, reg); - - reg = REG_READ(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A); - REG_WRITE(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, reg); - REG_WRITE(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, reg); - REG_WRITE(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, reg); - - reg = REG_READ(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A); - REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, reg); - REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, reg); - REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, reg); - - reg = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, reg); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, reg); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, reg); - - reg = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, reg); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, reg); - REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, reg); - -} - -static void hubbub35_wm_read_state(struct hubbub *hubbub, - struct dcn_hubbub_wm *wm) -{ - struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); - struct dcn_hubbub_wm_set *s; - - memset(wm, 0, sizeof(struct dcn_hubbub_wm)); - - s = &wm->sets[0]; - s->wm_set = 0; - REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, - DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, &s->data_urgent); - - REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, &s->sr_enter); - - REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, &s->sr_exit); - - REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, - DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, &s->dram_clk_change); - - REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, - DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, &s->usr_retrain); - - REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, - DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, &s->fclk_pstate_change); - - REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, &s->sr_enter_exit_Z8); - - REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, &s->sr_enter_Z8); - s = &wm->sets[1]; - s->wm_set = 1; - REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, - DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, &s->data_urgent); - - REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, &s->sr_enter); - - REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, &s->sr_exit); - - REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, - DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, &s->dram_clk_change); - - REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, - DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, &s->usr_retrain); - - REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, - DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, &s->fclk_pstate_change); - - REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, &s->sr_enter_exit_Z8); - - REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, &s->sr_enter_Z8); - - s = &wm->sets[2]; - s->wm_set = 2; - REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, - DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, &s->data_urgent); - - REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, &s->sr_enter); - - REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, &s->sr_exit); - - REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, - DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, &s->dram_clk_change); - - REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, - DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, &s->usr_retrain); - - REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, - DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, &s->fclk_pstate_change); - - REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, &s->sr_enter_exit_Z8); - - REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, &s->sr_enter_Z8); - - s = &wm->sets[3]; - s->wm_set = 3; - REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, - DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, &s->data_urgent); - - REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, &s->sr_enter); - - REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, &s->sr_exit); - - REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, - DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, &s->dram_clk_change); - - REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, - DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, &s->usr_retrain); - - REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, - DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, &s->fclk_pstate_change); - - REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, - DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, &s->sr_enter_exit_Z8); - - REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, - DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, &s->sr_enter_Z8); -} - -static void hubbub35_set_fgcg(struct dcn20_hubbub *hubbub2, bool enable) -{ - REG_UPDATE(DCHUBBUB_CLOCK_CNTL, DCHUBBUB_FGCG_REP_DIS, !enable); -} - -static void hubbub35_init(struct hubbub *hubbub) -{ - struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); - /*Enable clock gaters*/ - if (hubbub->ctx->dc->debug.disable_clock_gate) { - /*done in hwseq*/ - /*REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);*/ - - REG_UPDATE_2(DCHUBBUB_CLOCK_CNTL, - DISPCLK_R_DCHUBBUB_GATE_DIS, 1, - DCFCLK_R_DCHUBBUB_GATE_DIS, 1); - } - hubbub35_set_fgcg(hubbub2, - hubbub->ctx->dc->debug.enable_fine_grain_clock_gating - .bits.dchubbub); - /* - ignore the "df_pre_cstate_req" from the SDP port control. - only the DCN will determine when to connect the SDP port - */ - REG_UPDATE(DCHUBBUB_SDPIF_CFG0, - SDPIF_PORT_CONTROL, 1); - /*Set SDP's max outstanding request - When set to 1: Max outstanding is 512 - When set to 0: Max outstanding is 256 - must set the register back to 0 (max outstanding = 256) in zero frame buffer mode*/ - REG_UPDATE(DCHUBBUB_SDPIF_CFG1, - SDPIF_MAX_NUM_OUTSTANDING, 0); - - REG_UPDATE_2(DCHUBBUB_ARB_DF_REQ_OUTSTAND, - DCHUBBUB_ARB_MAX_REQ_OUTSTAND, 256, - DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 256); - -} - -/*static void hubbub35_set_request_limit(struct hubbub *hubbub, - int memory_channel_count, - int words_per_channel) -{ - struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); - - uint32_t request_limit = 3 * memory_channel_count * words_per_channel / 4; - - ASSERT((request_limit & (~0xFFF)) == 0); //field is only 24 bits long - ASSERT(request_limit > 0); //field is only 24 bits long - - if (request_limit > 0xFFF) - request_limit = 0xFFF; - - if (request_limit > 0) - REG_UPDATE(SDPIF_REQUEST_RATE_LIMIT, SDPIF_REQUEST_RATE_LIMIT, request_limit); -}*/ - -static const struct hubbub_funcs hubbub35_funcs = { - .update_dchub = hubbub2_update_dchub, - .init_dchub_sys_ctx = hubbub31_init_dchub_sys_ctx, - .init_vm_ctx = hubbub2_init_vm_ctx, - .dcc_support_swizzle = hubbub3_dcc_support_swizzle, - .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format, - .get_dcc_compression_cap = hubbub3_get_dcc_compression_cap, - .wm_read_state = hubbub35_wm_read_state, - .get_dchub_ref_freq = hubbub35_get_dchub_ref_freq, - .program_watermarks = hubbub35_program_watermarks, - .allow_self_refresh_control = hubbub1_allow_self_refresh_control, - .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, - .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high, - .force_wm_propagate_to_pipes = hubbub32_force_wm_propagate_to_pipes, - .force_pstate_change_control = hubbub3_force_pstate_change_control, - .init_watermarks = hubbub35_init_watermarks, - .program_det_size = dcn32_program_det_size, - .program_compbuf_size = dcn35_program_compbuf_size, - .init_crb = dcn35_init_crb, - .hubbub_read_state = hubbub2_read_state, - .force_usr_retraining_allow = hubbub32_force_usr_retraining_allow, - .dchubbub_init = hubbub35_init, -}; - -void hubbub35_construct(struct dcn20_hubbub *hubbub2, - struct dc_context *ctx, - const struct dcn_hubbub_registers *hubbub_regs, - const struct dcn_hubbub_shift *hubbub_shift, - const struct dcn_hubbub_mask *hubbub_mask, - int det_size_kb, - int pixel_chunk_size_kb, - int config_return_buffer_size_kb) -{ - hubbub2->base.ctx = ctx; - hubbub2->base.funcs = &hubbub35_funcs; - hubbub2->regs = hubbub_regs; - hubbub2->shifts = hubbub_shift; - hubbub2->masks = hubbub_mask; - - hubbub2->debug_test_index_pstate = 0xB; - hubbub2->detile_buf_size = det_size_kb * 1024; - hubbub2->pixel_chunk_size = pixel_chunk_size_kb * 1024; - hubbub2->crb_size_segs = config_return_buffer_size_kb / DCN35_CRB_SEGMENT_SIZE_KB; /*todo*/ -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.h deleted file mode 100644 index 54cf00ffceb8..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.h +++ /dev/null @@ -1,155 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright 2023 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DC_HUBBUB_DCN35_H__ -#define __DC_HUBBUB_DCN35_H__ - -#include "dcn32/dcn32_hubbub.h" - -#define HUBBUB_REG_LIST_DCN35(id)\ - SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A),\ - SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B),\ - SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C),\ - SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D),\ - SR(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL),\ - SR(DCHUBBUB_ARB_DRAM_STATE_CNTL),\ - SR(DCHUBBUB_ARB_SAT_LEVEL),\ - SR(DCHUBBUB_ARB_DF_REQ_OUTSTAND),\ - SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \ - SR(DCHUBBUB_SOFT_RESET),\ - SR(DCHUBBUB_CRC_CTRL), \ - SR(DCN_VM_FB_LOCATION_BASE),\ - SR(DCN_VM_FB_LOCATION_TOP),\ - SR(DCN_VM_FB_OFFSET),\ - SR(DCN_VM_AGP_BOT),\ - SR(DCN_VM_AGP_TOP),\ - SR(DCN_VM_AGP_BASE),\ - HUBBUB_SR_WATERMARK_REG_LIST(), \ - SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A),\ - SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B),\ - SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C),\ - SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D),\ - SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A),\ - SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B),\ - SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C),\ - SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D),\ - SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A),\ - SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B),\ - SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C),\ - SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D),\ - SR(DCHUBBUB_DET0_CTRL),\ - SR(DCHUBBUB_DET1_CTRL),\ - SR(DCHUBBUB_DET2_CTRL),\ - SR(DCHUBBUB_DET3_CTRL),\ - SR(DCHUBBUB_COMPBUF_CTRL),\ - SR(COMPBUF_RESERVED_SPACE),\ - SR(DCHUBBUB_DEBUG_CTRL_0),\ - SR(DCHUBBUB_ARB_USR_RETRAINING_CNTL),\ - SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A),\ - SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B),\ - SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C),\ - SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D),\ - SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A),\ - SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B),\ - SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C),\ - SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D),\ - SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A),\ - SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B),\ - SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C),\ - SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D),\ - SR(DCN_VM_FAULT_ADDR_MSB),\ - SR(DCN_VM_FAULT_ADDR_LSB),\ - SR(DCN_VM_FAULT_CNTL),\ - SR(DCN_VM_FAULT_STATUS),\ - SR(SDPIF_REQUEST_RATE_LIMIT),\ - SR(DCHUBBUB_CLOCK_CNTL),\ - SR(DCHUBBUB_SDPIF_CFG0),\ - SR(DCHUBBUB_SDPIF_CFG1),\ - SR(DCHUBBUB_MEM_PWR_MODE_CTRL),\ - SR(DCHUBBUB_ARB_HOSTVM_CNTL),\ - SR(DCHVM_CTRL0),\ - SR(DCHVM_MEM_CTRL),\ - SR(DCHVM_CLK_CTRL),\ - SR(DCHVM_RIOMMU_CTRL0),\ - SR(DCHVM_RIOMMU_STAT0),\ - SR(DCHUBBUB_COMPBUF_CTRL),\ - SR(COMPBUF_RESERVED_SPACE),\ - SR(DCHUBBUB_DEBUG_CTRL_0),\ - SR(DCHUBBUB_CLOCK_CNTL),\ - SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A),\ - SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A),\ - SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B),\ - SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B),\ - SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C),\ - SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C),\ - SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D),\ - SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D),\ - SR(DCHUBBUB_ARB_QOS_FORCE) - - -#define HUBBUB_MASK_SH_LIST_DCN35(mask_sh)\ - HUBBUB_MASK_SH_LIST_DCN32(mask_sh), \ - HUBBUB_SF(DCHVM_CTRL0, HOSTVM_INIT_REQ, mask_sh),\ - HUBBUB_SF(DCHVM_MEM_CTRL, HVM_GPUVMRET_PWR_REQ_DIS, mask_sh),\ - HUBBUB_SF(DCHVM_MEM_CTRL, HVM_GPUVMRET_FORCE_REQ, mask_sh),\ - HUBBUB_SF(DCHVM_MEM_CTRL, HVM_GPUVMRET_POWER_STATUS, mask_sh),\ - HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DISPCLK_R_GATE_DIS, mask_sh),\ - HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DISPCLK_G_GATE_DIS, mask_sh),\ - HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DCFCLK_R_GATE_DIS, mask_sh),\ - HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DCFCLK_G_GATE_DIS, mask_sh),\ - HUBBUB_SF(DCHVM_CLK_CTRL, TR_REQ_REQCLKREQ_MODE, mask_sh),\ - HUBBUB_SF(DCHVM_CLK_CTRL, TW_RSP_COMPCLKREQ_MODE, mask_sh),\ - HUBBUB_SF(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, mask_sh),\ - HUBBUB_SF(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, mask_sh),\ - HUBBUB_SF(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, mask_sh),\ - HUBBUB_SF(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, mask_sh),\ - HUBBUB_SF(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE, mask_sh),\ - HUBBUB_SF(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE_CURRENT, mask_sh),\ - HUBBUB_SF(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR, mask_sh),\ - HUBBUB_SF(COMPBUF_RESERVED_SPACE, COMPBUF_RESERVED_SPACE_64B, mask_sh),\ - HUBBUB_SF(COMPBUF_RESERVED_SPACE, COMPBUF_RESERVED_SPACE_ZS, mask_sh),\ - HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DCHUBBUB_FGCG_REP_DIS, mask_sh),\ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_CSTATE_DEEPSLEEP_LEGACY_MODE, mask_sh), \ - HUBBUB_SF(DCHUBBUB_ARB_HOSTVM_CNTL, DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD, mask_sh),\ - HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD, mask_sh) - -void hubbub35_construct(struct dcn20_hubbub *hubbub2, - struct dc_context *ctx, - const struct dcn_hubbub_registers *hubbub_regs, - const struct dcn_hubbub_shift *hubbub_shift, - const struct dcn_hubbub_mask *hubbub_mask, - int det_size_kb, - int pixel_chunk_size_kb, - int config_return_buffer_size_kb); -#endif diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/Makefile b/drivers/gpu/drm/amd/display/dc/hubbub/Makefile new file mode 100644 index 000000000000..ab2fddc4a858 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hubbub/Makefile @@ -0,0 +1,100 @@ + +# Copyright 2022 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +# Makefile for the 'hubbub' sub-component of DAL. +# +ifdef CONFIG_DRM_AMD_DC_FP +############################################################################### +# DCN +############################################################################### + +HUBBUB_DCN10 = dcn10_hubbub.o + +AMD_DAL_HUBBUB_DCN10 = $(addprefix $(AMDDALPATH)/dc/hubbub/dcn10/,$(HUBBUB_DCN10)) + +AMD_DISPLAY_FILES += $(AMD_DAL_HUBBUB_DCN10) + +############################################################################### + +HUBBUB_DCN20 = dcn20_hubbub.o + +AMD_DAL_HUBBUB_DCN20 = $(addprefix $(AMDDALPATH)/dc/hubbub/dcn20/,$(HUBBUB_DCN20)) + +AMD_DISPLAY_FILES += $(AMD_DAL_HUBBUB_DCN20) + +############################################################################### + +HUBBUB_DCN201 = dcn201_hubbub.o + +AMD_DAL_HUBBUB_DCN201 = $(addprefix $(AMDDALPATH)/dc/hubbub/dcn201/,$(HUBBUB_DCN201)) + +AMD_DISPLAY_FILES += $(AMD_DAL_HUBBUB_DCN201) + +############################################################################### + +HUBBUB_DCN21 = dcn21_hubbub.o + +AMD_DAL_HUBBUB_DCN21 = $(addprefix $(AMDDALPATH)/dc/hubbub/dcn21/,$(HUBBUB_DCN21)) + +AMD_DISPLAY_FILES += $(AMD_DAL_HUBBUB_DCN21) + +############################################################################### +HUBBUB_DCN30 = dcn30_hubbub.o + +AMD_DAL_HUBBUB_DCN30 = $(addprefix $(AMDDALPATH)/dc/hubbub/dcn30/,$(HUBBUB_DCN30)) + +AMD_DISPLAY_FILES += $(AMD_DAL_HUBBUB_DCN30) + +############################################################################### +HUBBUB_DCN301 = dcn301_hubbub.o + +AMD_DAL_HUBBUB_DCN301 = $(addprefix $(AMDDALPATH)/dc/hubbub/dcn301/,$(HUBBUB_DCN301)) + +AMD_DISPLAY_FILES += $(AMD_DAL_HUBBUB_DCN301) + +############################################################################### + +HUBBUB_DCN31 = dcn31_hubbub.o + +AMD_DAL_HUBBUB_DCN31 = $(addprefix $(AMDDALPATH)/dc/hubbub/dcn31/,$(HUBBUB_DCN31)) + +AMD_DISPLAY_FILES += $(AMD_DAL_HUBBUB_DCN31) + +############################################################################### +HUBBUB_DCN32 = dcn32_hubbub.o + +AMD_DAL_HUBBUB_DCN32 = $(addprefix $(AMDDALPATH)/dc/hubbub/dcn32/,$(HUBBUB_DCN32)) + +AMD_DISPLAY_FILES += $(AMD_DAL_HUBBUB_DCN32) + +############################################################################### + +HUBBUB_DCN35 = dcn35_hubbub.o + +AMD_DAL_HUBBUB_DCN35 = $(addprefix $(AMDDALPATH)/dc/hubbub/dcn35/,$(HUBBUB_DCN35)) + +AMD_DISPLAY_FILES += $(AMD_DAL_HUBBUB_DCN35) + +############################################################################### + + +############################################################################### +endif \ No newline at end of file diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.c new file mode 100644 index 000000000000..d738a36f2132 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.c @@ -0,0 +1,964 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" +#include "dcn10/dcn10_hubp.h" +#include "dcn10_hubbub.h" +#include "reg_helper.h" + +#define CTX \ + hubbub1->base.ctx +#define DC_LOGGER \ + hubbub1->base.ctx->logger +#define REG(reg)\ + hubbub1->regs->reg + +#undef FN +#define FN(reg_name, field_name) \ + hubbub1->shifts->field_name, hubbub1->masks->field_name + +void hubbub1_wm_read_state(struct hubbub *hubbub, + struct dcn_hubbub_wm *wm) +{ + struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); + struct dcn_hubbub_wm_set *s; + + memset(wm, 0, sizeof(struct dcn_hubbub_wm)); + + s = &wm->sets[0]; + s->wm_set = 0; + s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A); + s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A); + if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) { + s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A); + s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A); + } + s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A); + + s = &wm->sets[1]; + s->wm_set = 1; + s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B); + s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B); + if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) { + s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B); + s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B); + } + s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B); + + s = &wm->sets[2]; + s->wm_set = 2; + s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C); + s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C); + if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) { + s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C); + s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C); + } + s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C); + + s = &wm->sets[3]; + s->wm_set = 3; + s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D); + s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D); + if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) { + s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D); + s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D); + } + s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D); +} + +void hubbub1_allow_self_refresh_control(struct hubbub *hubbub, bool allow) +{ + struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); + /* + * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 1 means do not allow stutter + * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0 means allow stutter + */ + + REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL, + DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE, 0, + DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, !allow); +} + +bool hubbub1_is_allow_self_refresh_enabled(struct hubbub *hubbub) +{ + struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); + uint32_t enable = 0; + + REG_GET(DCHUBBUB_ARB_DRAM_STATE_CNTL, + DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, &enable); + + return enable ? true : false; +} + + +bool hubbub1_verify_allow_pstate_change_high( + struct hubbub *hubbub) +{ + struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); + + /* pstate latency is ~20us so if we wait over 40us and pstate allow + * still not asserted, we are probably stuck and going to hang + * + * TODO: Figure out why it takes ~100us on linux + * pstate takes around ~100us (up to 200us) on linux. Unknown currently + * as to why it takes that long on linux + */ + const unsigned int pstate_wait_timeout_us = 200; + const unsigned int pstate_wait_expected_timeout_us = 180; + static unsigned int max_sampled_pstate_wait_us; /* data collection */ + static bool forced_pstate_allow; /* help with revert wa */ + + unsigned int debug_data = 0; + unsigned int i; + + if (forced_pstate_allow) { + /* we hacked to force pstate allow to prevent hang last time + * we verify_allow_pstate_change_high. so disable force + * here so we can check status + */ + REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL, + DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 0, + DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 0); + forced_pstate_allow = false; + } + + /* The following table only applies to DCN1 and DCN2, + * for newer DCNs, need to consult with HW IP folks to read RTL + * HUBBUB:DCHUBBUB_TEST_ARB_DEBUG10 DCHUBBUBDEBUGIND:0xB + * description + * 0: Pipe0 Plane0 Allow Pstate Change + * 1: Pipe0 Plane1 Allow Pstate Change + * 2: Pipe0 Cursor0 Allow Pstate Change + * 3: Pipe0 Cursor1 Allow Pstate Change + * 4: Pipe1 Plane0 Allow Pstate Change + * 5: Pipe1 Plane1 Allow Pstate Change + * 6: Pipe1 Cursor0 Allow Pstate Change + * 7: Pipe1 Cursor1 Allow Pstate Change + * 8: Pipe2 Plane0 Allow Pstate Change + * 9: Pipe2 Plane1 Allow Pstate Change + * 10: Pipe2 Cursor0 Allow Pstate Change + * 11: Pipe2 Cursor1 Allow Pstate Change + * 12: Pipe3 Plane0 Allow Pstate Change + * 13: Pipe3 Plane1 Allow Pstate Change + * 14: Pipe3 Cursor0 Allow Pstate Change + * 15: Pipe3 Cursor1 Allow Pstate Change + * 16: Pipe4 Plane0 Allow Pstate Change + * 17: Pipe4 Plane1 Allow Pstate Change + * 18: Pipe4 Cursor0 Allow Pstate Change + * 19: Pipe4 Cursor1 Allow Pstate Change + * 20: Pipe5 Plane0 Allow Pstate Change + * 21: Pipe5 Plane1 Allow Pstate Change + * 22: Pipe5 Cursor0 Allow Pstate Change + * 23: Pipe5 Cursor1 Allow Pstate Change + * 24: Pipe6 Plane0 Allow Pstate Change + * 25: Pipe6 Plane1 Allow Pstate Change + * 26: Pipe6 Cursor0 Allow Pstate Change + * 27: Pipe6 Cursor1 Allow Pstate Change + * 28: WB0 Allow Pstate Change + * 29: WB1 Allow Pstate Change + * 30: Arbiter's allow_pstate_change + * 31: SOC pstate change request + */ + + REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, hubbub1->debug_test_index_pstate); + + for (i = 0; i < pstate_wait_timeout_us; i++) { + debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA); + + if (debug_data & (1 << 30)) { + + if (i > pstate_wait_expected_timeout_us) + DC_LOG_WARNING("pstate took longer than expected ~%dus\n", + i); + + return true; + } + if (max_sampled_pstate_wait_us < i) + max_sampled_pstate_wait_us = i; + + udelay(1); + } + + /* force pstate allow to prevent system hang + * and break to debugger to investigate + */ + REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL, + DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 1, + DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 1); + forced_pstate_allow = true; + + DC_LOG_WARNING("pstate TEST_DEBUG_DATA: 0x%X\n", + debug_data); + + return false; +} + +static uint32_t convert_and_clamp( + uint32_t wm_ns, + uint32_t refclk_mhz, + uint32_t clamp_value) +{ + uint32_t ret_val = 0; + ret_val = wm_ns * refclk_mhz; + ret_val /= 1000; + + if (ret_val > clamp_value) + ret_val = clamp_value; + + return ret_val; +} + + +void hubbub1_wm_change_req_wa(struct hubbub *hubbub) +{ + struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); + + REG_UPDATE_SEQ_2(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, + DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0, + DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1); +} + +bool hubbub1_program_urgent_watermarks( + struct hubbub *hubbub, + union dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower) +{ + struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); + uint32_t prog_wm_value; + bool wm_pending = false; + + /* Repeat for water mark set A, B, C and D. */ + /* clock state A */ + if (safe_to_lower || watermarks->a.urgent_ns > hubbub1->watermarks.a.urgent_ns) { + hubbub1->watermarks.a.urgent_ns = watermarks->a.urgent_ns; + prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns, + refclk_mhz, 0x1fffff); + REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); + + DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->a.urgent_ns, prog_wm_value); + } else if (watermarks->a.urgent_ns < hubbub1->watermarks.a.urgent_ns) + wm_pending = true; + + if (safe_to_lower || watermarks->a.pte_meta_urgent_ns > hubbub1->watermarks.a.pte_meta_urgent_ns) { + hubbub1->watermarks.a.pte_meta_urgent_ns = watermarks->a.pte_meta_urgent_ns; + prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns, + refclk_mhz, 0x1fffff); + REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->a.pte_meta_urgent_ns, prog_wm_value); + } else if (watermarks->a.pte_meta_urgent_ns < hubbub1->watermarks.a.pte_meta_urgent_ns) + wm_pending = true; + + /* clock state B */ + if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) { + hubbub1->watermarks.b.urgent_ns = watermarks->b.urgent_ns; + prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns, + refclk_mhz, 0x1fffff); + REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value); + + DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->b.urgent_ns, prog_wm_value); + } else if (watermarks->b.urgent_ns < hubbub1->watermarks.b.urgent_ns) + wm_pending = true; + + if (safe_to_lower || watermarks->b.pte_meta_urgent_ns > hubbub1->watermarks.b.pte_meta_urgent_ns) { + hubbub1->watermarks.b.pte_meta_urgent_ns = watermarks->b.pte_meta_urgent_ns; + prog_wm_value = convert_and_clamp(watermarks->b.pte_meta_urgent_ns, + refclk_mhz, 0x1fffff); + REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->b.pte_meta_urgent_ns, prog_wm_value); + } else if (watermarks->b.pte_meta_urgent_ns < hubbub1->watermarks.b.pte_meta_urgent_ns) + wm_pending = true; + + /* clock state C */ + if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) { + hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns; + prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns, + refclk_mhz, 0x1fffff); + REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value); + + DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->c.urgent_ns, prog_wm_value); + } else if (watermarks->c.urgent_ns < hubbub1->watermarks.c.urgent_ns) + wm_pending = true; + + if (safe_to_lower || watermarks->c.pte_meta_urgent_ns > hubbub1->watermarks.c.pte_meta_urgent_ns) { + hubbub1->watermarks.c.pte_meta_urgent_ns = watermarks->c.pte_meta_urgent_ns; + prog_wm_value = convert_and_clamp(watermarks->c.pte_meta_urgent_ns, + refclk_mhz, 0x1fffff); + REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->c.pte_meta_urgent_ns, prog_wm_value); + } else if (watermarks->c.pte_meta_urgent_ns < hubbub1->watermarks.c.pte_meta_urgent_ns) + wm_pending = true; + + /* clock state D */ + if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) { + hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns; + prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns, + refclk_mhz, 0x1fffff); + REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value); + + DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->d.urgent_ns, prog_wm_value); + } else if (watermarks->d.urgent_ns < hubbub1->watermarks.d.urgent_ns) + wm_pending = true; + + if (safe_to_lower || watermarks->d.pte_meta_urgent_ns > hubbub1->watermarks.d.pte_meta_urgent_ns) { + hubbub1->watermarks.d.pte_meta_urgent_ns = watermarks->d.pte_meta_urgent_ns; + prog_wm_value = convert_and_clamp(watermarks->d.pte_meta_urgent_ns, + refclk_mhz, 0x1fffff); + REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->d.pte_meta_urgent_ns, prog_wm_value); + } else if (watermarks->d.pte_meta_urgent_ns < hubbub1->watermarks.d.pte_meta_urgent_ns) + wm_pending = true; + + return wm_pending; +} + +bool hubbub1_program_stutter_watermarks( + struct hubbub *hubbub, + union dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower) +{ + struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); + uint32_t prog_wm_value; + bool wm_pending = false; + + /* clock state A */ + if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns + > hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) { + hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = + watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns; + prog_wm_value = convert_and_clamp( + watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, + refclk_mhz, 0x1fffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); + } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns + < hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) + wm_pending = true; + + if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns + > hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) { + hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns = + watermarks->a.cstate_pstate.cstate_exit_ns; + prog_wm_value = convert_and_clamp( + watermarks->a.cstate_pstate.cstate_exit_ns, + refclk_mhz, 0x1fffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value); + } else if (watermarks->a.cstate_pstate.cstate_exit_ns + < hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) + wm_pending = true; + + /* clock state B */ + if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns + > hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) { + hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = + watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns; + prog_wm_value = convert_and_clamp( + watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, + refclk_mhz, 0x1fffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); + } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns + < hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) + wm_pending = true; + + if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns + > hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) { + hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns = + watermarks->b.cstate_pstate.cstate_exit_ns; + prog_wm_value = convert_and_clamp( + watermarks->b.cstate_pstate.cstate_exit_ns, + refclk_mhz, 0x1fffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value); + } else if (watermarks->b.cstate_pstate.cstate_exit_ns + < hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) + wm_pending = true; + + /* clock state C */ + if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns + > hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) { + hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = + watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns; + prog_wm_value = convert_and_clamp( + watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, + refclk_mhz, 0x1fffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); + } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns + < hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) + wm_pending = true; + + if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns + > hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) { + hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns = + watermarks->c.cstate_pstate.cstate_exit_ns; + prog_wm_value = convert_and_clamp( + watermarks->c.cstate_pstate.cstate_exit_ns, + refclk_mhz, 0x1fffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value); + } else if (watermarks->c.cstate_pstate.cstate_exit_ns + < hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) + wm_pending = true; + + /* clock state D */ + if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns + > hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) { + hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = + watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns; + prog_wm_value = convert_and_clamp( + watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, + refclk_mhz, 0x1fffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); + } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns + < hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) + wm_pending = true; + + if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns + > hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) { + hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns = + watermarks->d.cstate_pstate.cstate_exit_ns; + prog_wm_value = convert_and_clamp( + watermarks->d.cstate_pstate.cstate_exit_ns, + refclk_mhz, 0x1fffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value); + } else if (watermarks->d.cstate_pstate.cstate_exit_ns + < hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) + wm_pending = true; + + return wm_pending; +} + +bool hubbub1_program_pstate_watermarks( + struct hubbub *hubbub, + union dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower) +{ + struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); + uint32_t prog_wm_value; + bool wm_pending = false; + + /* clock state A */ + if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns + > hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) { + hubbub1->watermarks.a.cstate_pstate.pstate_change_ns = + watermarks->a.cstate_pstate.pstate_change_ns; + prog_wm_value = convert_and_clamp( + watermarks->a.cstate_pstate.pstate_change_ns, + refclk_mhz, 0x1fffff); + REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0, + DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n" + "HW register value = 0x%x\n\n", + watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value); + } else if (watermarks->a.cstate_pstate.pstate_change_ns + < hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) + wm_pending = true; + + /* clock state B */ + if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns + > hubbub1->watermarks.b.cstate_pstate.pstate_change_ns) { + hubbub1->watermarks.b.cstate_pstate.pstate_change_ns = + watermarks->b.cstate_pstate.pstate_change_ns; + prog_wm_value = convert_and_clamp( + watermarks->b.cstate_pstate.pstate_change_ns, + refclk_mhz, 0x1fffff); + REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0, + DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n" + "HW register value = 0x%x\n\n", + watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value); + } else if (watermarks->b.cstate_pstate.pstate_change_ns + < hubbub1->watermarks.b.cstate_pstate.pstate_change_ns) + wm_pending = true; + + /* clock state C */ + if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns + > hubbub1->watermarks.c.cstate_pstate.pstate_change_ns) { + hubbub1->watermarks.c.cstate_pstate.pstate_change_ns = + watermarks->c.cstate_pstate.pstate_change_ns; + prog_wm_value = convert_and_clamp( + watermarks->c.cstate_pstate.pstate_change_ns, + refclk_mhz, 0x1fffff); + REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0, + DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n" + "HW register value = 0x%x\n\n", + watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value); + } else if (watermarks->c.cstate_pstate.pstate_change_ns + < hubbub1->watermarks.c.cstate_pstate.pstate_change_ns) + wm_pending = true; + + /* clock state D */ + if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns + > hubbub1->watermarks.d.cstate_pstate.pstate_change_ns) { + hubbub1->watermarks.d.cstate_pstate.pstate_change_ns = + watermarks->d.cstate_pstate.pstate_change_ns; + prog_wm_value = convert_and_clamp( + watermarks->d.cstate_pstate.pstate_change_ns, + refclk_mhz, 0x1fffff); + REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0, + DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n" + "HW register value = 0x%x\n\n", + watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value); + } else if (watermarks->d.cstate_pstate.pstate_change_ns + < hubbub1->watermarks.d.cstate_pstate.pstate_change_ns) + wm_pending = true; + + return wm_pending; +} + +bool hubbub1_program_watermarks( + struct hubbub *hubbub, + union dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower) +{ + struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); + bool wm_pending = false; + /* + * Need to clamp to max of the register values (i.e. no wrap) + * for dcn1, all wm registers are 21-bit wide + */ + if (hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) + wm_pending = true; + + if (hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) + wm_pending = true; + + if (hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) + wm_pending = true; + + REG_UPDATE(DCHUBBUB_ARB_SAT_LEVEL, + DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz); + REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, + DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 68); + + hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter); + +#if 0 + REG_UPDATE_2(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, + DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, 1, + DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1); +#endif + return wm_pending; +} + +void hubbub1_update_dchub( + struct hubbub *hubbub, + struct dchub_init_data *dh_data) +{ + struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); + + if (REG(DCHUBBUB_SDPIF_FB_TOP) == 0) { + ASSERT(false); + /*should not come here*/ + return; + } + /* TODO: port code from dal2 */ + switch (dh_data->fb_mode) { + case FRAME_BUFFER_MODE_ZFB_ONLY: + /*For ZFB case need to put DCHUB FB BASE and TOP upside down to indicate ZFB mode*/ + REG_UPDATE(DCHUBBUB_SDPIF_FB_TOP, + SDPIF_FB_TOP, 0); + + REG_UPDATE(DCHUBBUB_SDPIF_FB_BASE, + SDPIF_FB_BASE, 0x0FFFF); + + REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE, + SDPIF_AGP_BASE, dh_data->zfb_phys_addr_base >> 22); + + REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT, + SDPIF_AGP_BOT, dh_data->zfb_mc_base_addr >> 22); + + REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP, + SDPIF_AGP_TOP, (dh_data->zfb_mc_base_addr + + dh_data->zfb_size_in_byte - 1) >> 22); + break; + case FRAME_BUFFER_MODE_MIXED_ZFB_AND_LOCAL: + /*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/ + + REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE, + SDPIF_AGP_BASE, dh_data->zfb_phys_addr_base >> 22); + + REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT, + SDPIF_AGP_BOT, dh_data->zfb_mc_base_addr >> 22); + + REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP, + SDPIF_AGP_TOP, (dh_data->zfb_mc_base_addr + + dh_data->zfb_size_in_byte - 1) >> 22); + break; + case FRAME_BUFFER_MODE_LOCAL_ONLY: + /*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/ + REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE, + SDPIF_AGP_BASE, 0); + + REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT, + SDPIF_AGP_BOT, 0X03FFFF); + + REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP, + SDPIF_AGP_TOP, 0); + break; + default: + break; + } + + dh_data->dchub_initialzied = true; + dh_data->dchub_info_valid = false; +} + +void hubbub1_toggle_watermark_change_req(struct hubbub *hubbub) +{ + struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); + + uint32_t watermark_change_req; + + REG_GET(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, + DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, &watermark_change_req); + + if (watermark_change_req) + watermark_change_req = 0; + else + watermark_change_req = 1; + + REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, + DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, watermark_change_req); +} + +void hubbub1_soft_reset(struct hubbub *hubbub, bool reset) +{ + struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); + + uint32_t reset_en = reset ? 1 : 0; + + REG_UPDATE(DCHUBBUB_SOFT_RESET, + DCHUBBUB_GLOBAL_SOFT_RESET, reset_en); +} + +static bool hubbub1_dcc_support_swizzle( + enum swizzle_mode_values swizzle, + unsigned int bytes_per_element, + enum segment_order *segment_order_horz, + enum segment_order *segment_order_vert) +{ + bool standard_swizzle = false; + bool display_swizzle = false; + + switch (swizzle) { + case DC_SW_4KB_S: + case DC_SW_64KB_S: + case DC_SW_VAR_S: + case DC_SW_4KB_S_X: + case DC_SW_64KB_S_X: + case DC_SW_VAR_S_X: + standard_swizzle = true; + break; + case DC_SW_4KB_D: + case DC_SW_64KB_D: + case DC_SW_VAR_D: + case DC_SW_4KB_D_X: + case DC_SW_64KB_D_X: + case DC_SW_VAR_D_X: + display_swizzle = true; + break; + default: + break; + } + + if (bytes_per_element == 1 && standard_swizzle) { + *segment_order_horz = segment_order__contiguous; + *segment_order_vert = segment_order__na; + return true; + } + if (bytes_per_element == 2 && standard_swizzle) { + *segment_order_horz = segment_order__non_contiguous; + *segment_order_vert = segment_order__contiguous; + return true; + } + if (bytes_per_element == 4 && standard_swizzle) { + *segment_order_horz = segment_order__non_contiguous; + *segment_order_vert = segment_order__contiguous; + return true; + } + if (bytes_per_element == 8 && standard_swizzle) { + *segment_order_horz = segment_order__na; + *segment_order_vert = segment_order__contiguous; + return true; + } + if (bytes_per_element == 8 && display_swizzle) { + *segment_order_horz = segment_order__contiguous; + *segment_order_vert = segment_order__non_contiguous; + return true; + } + + return false; +} + +static bool hubbub1_dcc_support_pixel_format( + enum surface_pixel_format format, + unsigned int *bytes_per_element) +{ + /* DML: get_bytes_per_element */ + switch (format) { + case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: + case SURFACE_PIXEL_FORMAT_GRPH_RGB565: + *bytes_per_element = 2; + return true; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: + case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: + *bytes_per_element = 4; + return true; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: + *bytes_per_element = 8; + return true; + default: + return false; + } +} + +static void hubbub1_get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height, + unsigned int bytes_per_element) +{ + /* copied from DML. might want to refactor DML to leverage from DML */ + /* DML : get_blk256_size */ + if (bytes_per_element == 1) { + *blk256_width = 16; + *blk256_height = 16; + } else if (bytes_per_element == 2) { + *blk256_width = 16; + *blk256_height = 8; + } else if (bytes_per_element == 4) { + *blk256_width = 8; + *blk256_height = 8; + } else if (bytes_per_element == 8) { + *blk256_width = 8; + *blk256_height = 4; + } +} + +static void hubbub1_det_request_size( + unsigned int height, + unsigned int width, + unsigned int bpe, + bool *req128_horz_wc, + bool *req128_vert_wc) +{ + unsigned int detile_buf_size = 164 * 1024; /* 164KB for DCN1.0 */ + + unsigned int blk256_height = 0; + unsigned int blk256_width = 0; + unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc; + + hubbub1_get_blk256_size(&blk256_width, &blk256_height, bpe); + + swath_bytes_horz_wc = width * blk256_height * bpe; + swath_bytes_vert_wc = height * blk256_width * bpe; + + *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ? + false : /* full 256B request */ + true; /* half 128b request */ + + *req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ? + false : /* full 256B request */ + true; /* half 128b request */ +} + +static bool hubbub1_get_dcc_compression_cap(struct hubbub *hubbub, + const struct dc_dcc_surface_param *input, + struct dc_surface_dcc_cap *output) +{ + struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); + struct dc *dc = hubbub1->base.ctx->dc; + + /* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */ + enum dcc_control dcc_control; + unsigned int bpe; + enum segment_order segment_order_horz, segment_order_vert; + bool req128_horz_wc, req128_vert_wc; + + memset(output, 0, sizeof(*output)); + + if (dc->debug.disable_dcc == DCC_DISABLE) + return false; + + if (!hubbub1->base.funcs->dcc_support_pixel_format(input->format, &bpe)) + return false; + + if (!hubbub1->base.funcs->dcc_support_swizzle(input->swizzle_mode, bpe, + &segment_order_horz, &segment_order_vert)) + return false; + + hubbub1_det_request_size(input->surface_size.height, input->surface_size.width, + bpe, &req128_horz_wc, &req128_vert_wc); + + if (!req128_horz_wc && !req128_vert_wc) { + dcc_control = dcc_control__256_256_xxx; + } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) { + if (!req128_horz_wc) + dcc_control = dcc_control__256_256_xxx; + else if (segment_order_horz == segment_order__contiguous) + dcc_control = dcc_control__128_128_xxx; + else + dcc_control = dcc_control__256_64_64; + } else if (input->scan == SCAN_DIRECTION_VERTICAL) { + if (!req128_vert_wc) + dcc_control = dcc_control__256_256_xxx; + else if (segment_order_vert == segment_order__contiguous) + dcc_control = dcc_control__128_128_xxx; + else + dcc_control = dcc_control__256_64_64; + } else { + if ((req128_horz_wc && + segment_order_horz == segment_order__non_contiguous) || + (req128_vert_wc && + segment_order_vert == segment_order__non_contiguous)) + /* access_dir not known, must use most constraining */ + dcc_control = dcc_control__256_64_64; + else + /* reg128 is true for either horz and vert + * but segment_order is contiguous + */ + dcc_control = dcc_control__128_128_xxx; + } + + if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE && + dcc_control != dcc_control__256_256_xxx) + return false; + + switch (dcc_control) { + case dcc_control__256_256_xxx: + output->grph.rgb.max_uncompressed_blk_size = 256; + output->grph.rgb.max_compressed_blk_size = 256; + output->grph.rgb.independent_64b_blks = false; + break; + case dcc_control__128_128_xxx: + output->grph.rgb.max_uncompressed_blk_size = 128; + output->grph.rgb.max_compressed_blk_size = 128; + output->grph.rgb.independent_64b_blks = false; + break; + case dcc_control__256_64_64: + output->grph.rgb.max_uncompressed_blk_size = 256; + output->grph.rgb.max_compressed_blk_size = 64; + output->grph.rgb.independent_64b_blks = true; + break; + default: + ASSERT(false); + break; + } + + output->capable = true; + output->const_color_support = false; + + return true; +} + +static const struct hubbub_funcs hubbub1_funcs = { + .update_dchub = hubbub1_update_dchub, + .dcc_support_swizzle = hubbub1_dcc_support_swizzle, + .dcc_support_pixel_format = hubbub1_dcc_support_pixel_format, + .get_dcc_compression_cap = hubbub1_get_dcc_compression_cap, + .wm_read_state = hubbub1_wm_read_state, + .program_watermarks = hubbub1_program_watermarks, + .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, + .allow_self_refresh_control = hubbub1_allow_self_refresh_control, + .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high, +}; + +void hubbub1_construct(struct hubbub *hubbub, + struct dc_context *ctx, + const struct dcn_hubbub_registers *hubbub_regs, + const struct dcn_hubbub_shift *hubbub_shift, + const struct dcn_hubbub_mask *hubbub_mask) +{ + struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub); + + hubbub1->base.ctx = ctx; + + hubbub1->base.funcs = &hubbub1_funcs; + + hubbub1->regs = hubbub_regs; + hubbub1->shifts = hubbub_shift; + hubbub1->masks = hubbub_mask; + + hubbub1->debug_test_index_pstate = 0x7; + if (ctx->dce_version == DCN_VERSION_1_01) + hubbub1->debug_test_index_pstate = 0xB; +} + diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.h new file mode 100644 index 000000000000..a1e2cde9c4cc --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.h @@ -0,0 +1,508 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_HUBBUB_DCN10_H__ +#define __DC_HUBBUB_DCN10_H__ + +#include "core_types.h" +#include "dchubbub.h" + +#define TO_DCN10_HUBBUB(hubbub)\ + container_of(hubbub, struct dcn10_hubbub, base) + +#define HUBBUB_REG_LIST_DCN_COMMON()\ + SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A),\ + SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A),\ + SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B),\ + SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B),\ + SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C),\ + SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C),\ + SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D),\ + SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D),\ + SR(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL),\ + SR(DCHUBBUB_ARB_DRAM_STATE_CNTL),\ + SR(DCHUBBUB_ARB_SAT_LEVEL),\ + SR(DCHUBBUB_ARB_DF_REQ_OUTSTAND),\ + SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \ + SR(DCHUBBUB_TEST_DEBUG_INDEX), \ + SR(DCHUBBUB_TEST_DEBUG_DATA),\ + SR(DCHUBBUB_SOFT_RESET) + +#define HUBBUB_VM_REG_LIST() \ + SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A),\ + SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B),\ + SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C),\ + SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D) + +#define HUBBUB_SR_WATERMARK_REG_LIST()\ + SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A),\ + SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A),\ + SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B),\ + SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B),\ + SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C),\ + SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C),\ + SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D),\ + SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D) + +#define HUBBUB_REG_LIST_DCN10(id)\ + HUBBUB_REG_LIST_DCN_COMMON(), \ + HUBBUB_VM_REG_LIST(), \ + HUBBUB_SR_WATERMARK_REG_LIST(), \ + SR(DCHUBBUB_SDPIF_FB_TOP),\ + SR(DCHUBBUB_SDPIF_FB_BASE),\ + SR(DCHUBBUB_SDPIF_FB_OFFSET),\ + SR(DCHUBBUB_SDPIF_AGP_BASE),\ + SR(DCHUBBUB_SDPIF_AGP_BOT),\ + SR(DCHUBBUB_SDPIF_AGP_TOP) + +struct dcn_hubbub_registers { + uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A; + uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A; + uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A; + uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A; + uint32_t DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A; + uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B; + uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B; + uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B; + uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B; + uint32_t DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B; + uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C; + uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C; + uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C; + uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C; + uint32_t DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C; + uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D; + uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D; + uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D; + uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D; + uint32_t DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D; + uint32_t DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL; + uint32_t DCHUBBUB_ARB_SAT_LEVEL; + uint32_t DCHUBBUB_ARB_DF_REQ_OUTSTAND; + uint32_t DCHUBBUB_GLOBAL_TIMER_CNTL; + uint32_t DCHUBBUB_ARB_DRAM_STATE_CNTL; + uint32_t DCHUBBUB_TEST_DEBUG_INDEX; + uint32_t DCHUBBUB_TEST_DEBUG_DATA; + uint32_t DCHUBBUB_SDPIF_FB_TOP; + uint32_t DCHUBBUB_SDPIF_FB_BASE; + uint32_t DCHUBBUB_SDPIF_FB_OFFSET; + uint32_t DCHUBBUB_SDPIF_AGP_BASE; + uint32_t DCHUBBUB_SDPIF_AGP_BOT; + uint32_t DCHUBBUB_SDPIF_AGP_TOP; + uint32_t DCHUBBUB_CRC_CTRL; + uint32_t DCHUBBUB_SOFT_RESET; + uint32_t DCN_VM_FB_LOCATION_BASE; + uint32_t DCN_VM_FB_LOCATION_TOP; + uint32_t DCN_VM_FB_OFFSET; + uint32_t DCN_VM_AGP_BOT; + uint32_t DCN_VM_AGP_TOP; + uint32_t DCN_VM_AGP_BASE; + uint32_t DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB; + uint32_t DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB; + uint32_t DCN_VM_FAULT_ADDR_MSB; + uint32_t DCN_VM_FAULT_ADDR_LSB; + uint32_t DCN_VM_FAULT_CNTL; + uint32_t DCN_VM_FAULT_STATUS; + uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_A; + uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_B; + uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_C; + uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_D; + uint32_t DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A; + uint32_t DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B; + uint32_t DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C; + uint32_t DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D; + uint32_t DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A; + uint32_t DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B; + uint32_t DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C; + uint32_t DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D; + uint32_t DCHUBBUB_ARB_HOSTVM_CNTL; + uint32_t DCHVM_CTRL0; + uint32_t DCHVM_MEM_CTRL; + uint32_t DCHVM_CLK_CTRL; + uint32_t DCHVM_RIOMMU_CTRL0; + uint32_t DCHVM_RIOMMU_STAT0; + uint32_t DCHUBBUB_DET0_CTRL; + uint32_t DCHUBBUB_DET1_CTRL; + uint32_t DCHUBBUB_DET2_CTRL; + uint32_t DCHUBBUB_DET3_CTRL; + uint32_t DCHUBBUB_COMPBUF_CTRL; + uint32_t COMPBUF_RESERVED_SPACE; + uint32_t DCHUBBUB_DEBUG_CTRL_0; + uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A; + uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A; + uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B; + uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B; + uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C; + uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C; + uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D; + uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D; + uint32_t DCHUBBUB_ARB_USR_RETRAINING_CNTL; + uint32_t DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A; + uint32_t DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B; + uint32_t DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C; + uint32_t DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D; + uint32_t DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A; + uint32_t DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B; + uint32_t DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C; + uint32_t DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D; + uint32_t DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A; + uint32_t DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B; + uint32_t DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C; + uint32_t DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D; + uint32_t DCHUBBUB_ARB_MALL_CNTL; + uint32_t SDPIF_REQUEST_RATE_LIMIT; + uint32_t DCHUBBUB_SDPIF_CFG0; + uint32_t DCHUBBUB_SDPIF_CFG1; + uint32_t DCHUBBUB_CLOCK_CNTL; + uint32_t DCHUBBUB_MEM_PWR_MODE_CTRL; + uint32_t DCHUBBUB_ARB_QOS_FORCE; + uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_A; + uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_A; + uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_B; + uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_B; + uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_A; + uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_A; + uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_B; + uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_B; + uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_A; + uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_A; + uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_B; + uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_B; + uint32_t DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_A; + uint32_t DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_B; + uint32_t DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_A; + uint32_t DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_B; + uint32_t DCHUBBUB_ARB_REFCYC_PER_META_TRIP_A; + uint32_t DCHUBBUB_ARB_REFCYC_PER_META_TRIP_B; + uint32_t DCHUBBUB_ARB_FRAC_URG_BW_MALL_A; + uint32_t DCHUBBUB_ARB_FRAC_URG_BW_MALL_B; +}; + +#define HUBBUB_REG_FIELD_LIST_DCN32(type) \ + type DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_VALUE;\ + type DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_ENABLE;\ + type DCHUBBUB_ARB_DO_NOT_FORCE_ALLOW_USR_RETRAINING_DURING_PSTATE_CHANGE_REQUEST;\ + type DCHUBBUB_ARB_DO_NOT_FORCE_ALLOW_USR_RETRAINING_DURING_PRE_CSTATE;\ + type DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A;\ + type DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B;\ + type DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C;\ + type DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D;\ + type DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A;\ + type DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B;\ + type DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C;\ + type DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D;\ + type DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A;\ + type DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B;\ + type DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C;\ + type DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D;\ + type MALL_PREFETCH_COMPLETE;\ + type MALL_IN_USE + +#define HUBBUB_REG_FIELD_LIST_DCN35(type) \ + type DCHUBBUB_FGCG_REP_DIS;\ + type DCHUBBUB_ARB_ALLOW_CSTATE_DEEPSLEEP_LEGACY_MODE + +/* set field name */ +#define HUBBUB_SF(reg_name, field_name, post_fix)\ + .field_name = reg_name ## __ ## field_name ## post_fix + +#define HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh)\ + HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \ + HUBBUB_SF(DCHUBBUB_SOFT_RESET, DCHUBBUB_GLOBAL_SOFT_RESET, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_SAT_LEVEL, DCHUBBUB_ARB_SAT_LEVEL, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, mask_sh) + +#define HUBBUB_MASK_SH_LIST_STUTTER(mask_sh) \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, mask_sh) + +#define HUBBUB_MASK_SH_LIST_DCN10(mask_sh)\ + HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \ + HUBBUB_MASK_SH_LIST_STUTTER(mask_sh), \ + HUBBUB_SF(DCHUBBUB_SDPIF_FB_TOP, SDPIF_FB_TOP, mask_sh), \ + HUBBUB_SF(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, mask_sh), \ + HUBBUB_SF(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, mask_sh), \ + HUBBUB_SF(DCHUBBUB_SDPIF_AGP_BASE, SDPIF_AGP_BASE, mask_sh), \ + HUBBUB_SF(DCHUBBUB_SDPIF_AGP_BOT, SDPIF_AGP_BOT, mask_sh), \ + HUBBUB_SF(DCHUBBUB_SDPIF_AGP_TOP, SDPIF_AGP_TOP, mask_sh) + +#define DCN_HUBBUB_REG_FIELD_LIST(type) \ + type DCHUBBUB_GLOBAL_TIMER_ENABLE; \ + type DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST;\ + type DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE;\ + type DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE;\ + type DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE;\ + type DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE;\ + type DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE;\ + type DCHUBBUB_ARB_SAT_LEVEL;\ + type DCHUBBUB_ARB_MIN_REQ_OUTSTAND;\ + type DCHUBBUB_GLOBAL_TIMER_REFDIV;\ + type DCHUBBUB_GLOBAL_SOFT_RESET; \ + type SDPIF_FB_TOP;\ + type SDPIF_FB_BASE;\ + type SDPIF_FB_OFFSET;\ + type SDPIF_AGP_BASE;\ + type SDPIF_AGP_BOT;\ + type SDPIF_AGP_TOP;\ + type FB_BASE;\ + type FB_TOP;\ + type FB_OFFSET;\ + type AGP_BOT;\ + type AGP_TOP;\ + type AGP_BASE;\ + type DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A;\ + type DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B;\ + type DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C;\ + type DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D;\ + type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A;\ + type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B;\ + type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C;\ + type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D;\ + type DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB;\ + type DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB;\ + type DCN_VM_FAULT_ADDR_MSB;\ + type DCN_VM_FAULT_ADDR_LSB;\ + type DCN_VM_ERROR_STATUS_CLEAR;\ + type DCN_VM_ERROR_STATUS_MODE;\ + type DCN_VM_ERROR_INTERRUPT_ENABLE;\ + type DCN_VM_RANGE_FAULT_DISABLE;\ + type DCN_VM_PRQ_FAULT_DISABLE;\ + type DCN_VM_ERROR_STATUS;\ + type DCN_VM_ERROR_VMID;\ + type DCN_VM_ERROR_TABLE_LEVEL;\ + type DCN_VM_ERROR_PIPE;\ + type DCN_VM_ERROR_INTERRUPT_STATUS + +#define HUBBUB_STUTTER_REG_FIELD_LIST(type) \ + type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A;\ + type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B;\ + type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C;\ + type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D;\ + type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A;\ + type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B;\ + type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C;\ + type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D + + +#define HUBBUB_HVM_REG_FIELD_LIST(type) \ + type DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD;\ + type DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A;\ + type DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_B;\ + type DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_C;\ + type DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_D;\ + type DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_A;\ + type DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_B;\ + type DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_C;\ + type DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_D;\ + type DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A;\ + type DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_B;\ + type DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_C;\ + type DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_D;\ + type DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A;\ + type DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B;\ + type DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C;\ + type DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D;\ + type DCHUBBUB_ARB_FRAC_URG_BW_NOM_A;\ + type DCHUBBUB_ARB_FRAC_URG_BW_NOM_B;\ + type DCHUBBUB_ARB_FRAC_URG_BW_NOM_C;\ + type DCHUBBUB_ARB_FRAC_URG_BW_NOM_D;\ + type DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A;\ + type DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B;\ + type DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C;\ + type DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D;\ + type DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A;\ + type DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B;\ + type DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C;\ + type DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D;\ + type DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD;\ + type HOSTVM_INIT_REQ; \ + type HVM_GPUVMRET_PWR_REQ_DIS; \ + type HVM_GPUVMRET_FORCE_REQ; \ + type HVM_GPUVMRET_POWER_STATUS; \ + type HVM_DISPCLK_R_GATE_DIS; \ + type HVM_DISPCLK_G_GATE_DIS; \ + type HVM_DCFCLK_R_GATE_DIS; \ + type HVM_DCFCLK_G_GATE_DIS; \ + type TR_REQ_REQCLKREQ_MODE; \ + type TW_RSP_COMPCLKREQ_MODE; \ + type HOSTVM_PREFETCH_REQ; \ + type HOSTVM_POWERSTATUS; \ + type RIOMMU_ACTIVE; \ + type HOSTVM_PREFETCH_DONE + +#define HUBBUB_RET_REG_FIELD_LIST(type) \ + type DET_DEPTH;\ + type DET0_SIZE;\ + type DET1_SIZE;\ + type DET2_SIZE;\ + type DET3_SIZE;\ + type DET0_SIZE_CURRENT;\ + type DET1_SIZE_CURRENT;\ + type DET2_SIZE_CURRENT;\ + type DET3_SIZE_CURRENT;\ + type COMPBUF_SIZE;\ + type COMPBUF_SIZE_CURRENT;\ + type CONFIG_ERROR;\ + type COMPBUF_RESERVED_SPACE_64B;\ + type COMPBUF_RESERVED_SPACE_ZS;\ + type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A;\ + type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A;\ + type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B;\ + type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B;\ + type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C;\ + type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C;\ + type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D;\ + type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D;\ + type SDPIF_REQUEST_RATE_LIMIT;\ + type DISPCLK_R_DCHUBBUB_GATE_DIS;\ + type DCFCLK_R_DCHUBBUB_GATE_DIS;\ + type SDPIF_MAX_NUM_OUTSTANDING;\ + type DCHUBBUB_ARB_MAX_REQ_OUTSTAND;\ + type SDPIF_PORT_CONTROL;\ + type DET_MEM_PWR_LS_MODE + + +#define HUBBUB_REG_FIELD_LIST_DCN4_01(type) \ + type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_A;\ + type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_A;\ + type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK1_B;\ + type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK1_B;\ + type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_A;\ + type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_A;\ + type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK2_B;\ + type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK2_B;\ + type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_A;\ + type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_A;\ + type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK3_B;\ + type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK3_B;\ + type DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_A;\ + type DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK1_B;\ + type DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_A;\ + type DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK1_B;\ + type DCHUBBUB_ARB_REFCYC_PER_META_TRIP_A;\ + type DCHUBBUB_ARB_REFCYC_PER_META_TRIP_B;\ + type DCHUBBUB_ARB_FRAC_URG_BW_MALL_A;\ + type DCHUBBUB_ARB_FRAC_URG_BW_MALL_B + +struct dcn_hubbub_shift { + DCN_HUBBUB_REG_FIELD_LIST(uint8_t); + HUBBUB_STUTTER_REG_FIELD_LIST(uint8_t); + HUBBUB_HVM_REG_FIELD_LIST(uint8_t); + HUBBUB_RET_REG_FIELD_LIST(uint8_t); + HUBBUB_REG_FIELD_LIST_DCN32(uint8_t); + HUBBUB_REG_FIELD_LIST_DCN35(uint8_t); + HUBBUB_REG_FIELD_LIST_DCN4_01(uint8_t); +}; + +struct dcn_hubbub_mask { + DCN_HUBBUB_REG_FIELD_LIST(uint32_t); + HUBBUB_STUTTER_REG_FIELD_LIST(uint32_t); + HUBBUB_HVM_REG_FIELD_LIST(uint32_t); + HUBBUB_RET_REG_FIELD_LIST(uint32_t); + HUBBUB_REG_FIELD_LIST_DCN32(uint32_t); + HUBBUB_REG_FIELD_LIST_DCN35(uint32_t); + HUBBUB_REG_FIELD_LIST_DCN4_01(uint32_t); +}; + +struct dc; + +struct dcn10_hubbub { + struct hubbub base; + const struct dcn_hubbub_registers *regs; + const struct dcn_hubbub_shift *shifts; + const struct dcn_hubbub_mask *masks; + unsigned int debug_test_index_pstate; + union dcn_watermark_set watermarks; +}; + +void hubbub1_update_dchub( + struct hubbub *hubbub, + struct dchub_init_data *dh_data); + +bool hubbub1_verify_allow_pstate_change_high( + struct hubbub *hubbub); + +void hubbub1_wm_change_req_wa(struct hubbub *hubbub); + +bool hubbub1_program_watermarks( + struct hubbub *hubbub, + union dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower); + +void hubbub1_allow_self_refresh_control(struct hubbub *hubbub, bool allow); + +bool hubbub1_is_allow_self_refresh_enabled(struct hubbub *hubub); + +void hubbub1_toggle_watermark_change_req( + struct hubbub *hubbub); + +void hubbub1_wm_read_state(struct hubbub *hubbub, + struct dcn_hubbub_wm *wm); + +void hubbub1_soft_reset(struct hubbub *hubbub, bool reset); +void hubbub1_construct(struct hubbub *hubbub, + struct dc_context *ctx, + const struct dcn_hubbub_registers *hubbub_regs, + const struct dcn_hubbub_shift *hubbub_shift, + const struct dcn_hubbub_mask *hubbub_mask); + +bool hubbub1_program_urgent_watermarks( + struct hubbub *hubbub, + union dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower); +bool hubbub1_program_stutter_watermarks( + struct hubbub *hubbub, + union dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower); +bool hubbub1_program_pstate_watermarks( + struct hubbub *hubbub, + union dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn20/dcn20_hubbub.c new file mode 100644 index 000000000000..c6f859871d11 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn20/dcn20_hubbub.c @@ -0,0 +1,670 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#include "dcn20_hubbub.h" +#include "reg_helper.h" +#include "clk_mgr.h" + +#define REG(reg)\ + hubbub1->regs->reg + +#define CTX \ + hubbub1->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + hubbub1->shifts->field_name, hubbub1->masks->field_name + +#define REG(reg)\ + hubbub1->regs->reg + +#define CTX \ + hubbub1->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + hubbub1->shifts->field_name, hubbub1->masks->field_name + +#ifdef NUM_VMID +#undef NUM_VMID +#endif +#define NUM_VMID 16 + +bool hubbub2_dcc_support_swizzle( + enum swizzle_mode_values swizzle, + unsigned int bytes_per_element, + enum segment_order *segment_order_horz, + enum segment_order *segment_order_vert) +{ + bool standard_swizzle = false; + bool display_swizzle = false; + bool render_swizzle = false; + + switch (swizzle) { + case DC_SW_4KB_S: + case DC_SW_64KB_S: + case DC_SW_VAR_S: + case DC_SW_4KB_S_X: + case DC_SW_64KB_S_X: + case DC_SW_VAR_S_X: + standard_swizzle = true; + break; + case DC_SW_64KB_R_X: + render_swizzle = true; + break; + case DC_SW_4KB_D: + case DC_SW_64KB_D: + case DC_SW_VAR_D: + case DC_SW_4KB_D_X: + case DC_SW_64KB_D_X: + case DC_SW_VAR_D_X: + display_swizzle = true; + break; + default: + break; + } + + if (standard_swizzle) { + if (bytes_per_element == 1) { + *segment_order_horz = segment_order__contiguous; + *segment_order_vert = segment_order__na; + return true; + } + if (bytes_per_element == 2) { + *segment_order_horz = segment_order__non_contiguous; + *segment_order_vert = segment_order__contiguous; + return true; + } + if (bytes_per_element == 4) { + *segment_order_horz = segment_order__non_contiguous; + *segment_order_vert = segment_order__contiguous; + return true; + } + if (bytes_per_element == 8) { + *segment_order_horz = segment_order__na; + *segment_order_vert = segment_order__contiguous; + return true; + } + } + if (render_swizzle) { + if (bytes_per_element == 2) { + *segment_order_horz = segment_order__contiguous; + *segment_order_vert = segment_order__contiguous; + return true; + } + if (bytes_per_element == 4) { + *segment_order_horz = segment_order__non_contiguous; + *segment_order_vert = segment_order__contiguous; + return true; + } + if (bytes_per_element == 8) { + *segment_order_horz = segment_order__contiguous; + *segment_order_vert = segment_order__non_contiguous; + return true; + } + } + if (display_swizzle && bytes_per_element == 8) { + *segment_order_horz = segment_order__contiguous; + *segment_order_vert = segment_order__non_contiguous; + return true; + } + + return false; +} + +bool hubbub2_dcc_support_pixel_format( + enum surface_pixel_format format, + unsigned int *bytes_per_element) +{ + /* DML: get_bytes_per_element */ + switch (format) { + case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: + case SURFACE_PIXEL_FORMAT_GRPH_RGB565: + *bytes_per_element = 2; + return true; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: + case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: + case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX: + case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX: + case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT: + case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT: + case SURFACE_PIXEL_FORMAT_GRPH_RGBE: + case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA: + *bytes_per_element = 4; + return true; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: + *bytes_per_element = 8; + return true; + default: + return false; + } +} + +static void hubbub2_get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height, + unsigned int bytes_per_element) +{ + /* copied from DML. might want to refactor DML to leverage from DML */ + /* DML : get_blk256_size */ + if (bytes_per_element == 1) { + *blk256_width = 16; + *blk256_height = 16; + } else if (bytes_per_element == 2) { + *blk256_width = 16; + *blk256_height = 8; + } else if (bytes_per_element == 4) { + *blk256_width = 8; + *blk256_height = 8; + } else if (bytes_per_element == 8) { + *blk256_width = 8; + *blk256_height = 4; + } +} + +static void hubbub2_det_request_size( + unsigned int detile_buf_size, + unsigned int height, + unsigned int width, + unsigned int bpe, + bool *req128_horz_wc, + bool *req128_vert_wc) +{ + unsigned int blk256_height = 0; + unsigned int blk256_width = 0; + unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc; + + hubbub2_get_blk256_size(&blk256_width, &blk256_height, bpe); + + swath_bytes_horz_wc = width * blk256_height * bpe; + swath_bytes_vert_wc = height * blk256_width * bpe; + + *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ? + false : /* full 256B request */ + true; /* half 128b request */ + + *req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ? + false : /* full 256B request */ + true; /* half 128b request */ +} + +bool hubbub2_get_dcc_compression_cap(struct hubbub *hubbub, + const struct dc_dcc_surface_param *input, + struct dc_surface_dcc_cap *output) +{ + struct dc *dc = hubbub->ctx->dc; + /* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */ + enum dcc_control dcc_control; + unsigned int bpe; + enum segment_order segment_order_horz, segment_order_vert; + bool req128_horz_wc, req128_vert_wc; + + memset(output, 0, sizeof(*output)); + + if (dc->debug.disable_dcc == DCC_DISABLE) + return false; + + if (!hubbub->funcs->dcc_support_pixel_format(input->format, + &bpe)) + return false; + + if (!hubbub->funcs->dcc_support_swizzle(input->swizzle_mode, bpe, + &segment_order_horz, &segment_order_vert)) + return false; + + hubbub2_det_request_size(TO_DCN20_HUBBUB(hubbub)->detile_buf_size, + input->surface_size.height, input->surface_size.width, + bpe, &req128_horz_wc, &req128_vert_wc); + + if (!req128_horz_wc && !req128_vert_wc) { + dcc_control = dcc_control__256_256_xxx; + } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) { + if (!req128_horz_wc) + dcc_control = dcc_control__256_256_xxx; + else if (segment_order_horz == segment_order__contiguous) + dcc_control = dcc_control__128_128_xxx; + else + dcc_control = dcc_control__256_64_64; + } else if (input->scan == SCAN_DIRECTION_VERTICAL) { + if (!req128_vert_wc) + dcc_control = dcc_control__256_256_xxx; + else if (segment_order_vert == segment_order__contiguous) + dcc_control = dcc_control__128_128_xxx; + else + dcc_control = dcc_control__256_64_64; + } else { + if ((req128_horz_wc && + segment_order_horz == segment_order__non_contiguous) || + (req128_vert_wc && + segment_order_vert == segment_order__non_contiguous)) + /* access_dir not known, must use most constraining */ + dcc_control = dcc_control__256_64_64; + else + /* reg128 is true for either horz and vert + * but segment_order is contiguous + */ + dcc_control = dcc_control__128_128_xxx; + } + + /* Exception for 64KB_R_X */ + if ((bpe == 2) && (input->swizzle_mode == DC_SW_64KB_R_X)) + dcc_control = dcc_control__128_128_xxx; + + if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE && + dcc_control != dcc_control__256_256_xxx) + return false; + + switch (dcc_control) { + case dcc_control__256_256_xxx: + output->grph.rgb.max_uncompressed_blk_size = 256; + output->grph.rgb.max_compressed_blk_size = 256; + output->grph.rgb.independent_64b_blks = false; + break; + case dcc_control__128_128_xxx: + output->grph.rgb.max_uncompressed_blk_size = 128; + output->grph.rgb.max_compressed_blk_size = 128; + output->grph.rgb.independent_64b_blks = false; + break; + case dcc_control__256_64_64: + output->grph.rgb.max_uncompressed_blk_size = 256; + output->grph.rgb.max_compressed_blk_size = 64; + output->grph.rgb.independent_64b_blks = true; + break; + default: + ASSERT(false); + break; + } + output->capable = true; + output->const_color_support = true; + + return true; +} + +static enum dcn_hubbub_page_table_depth page_table_depth_to_hw(unsigned int page_table_depth) +{ + enum dcn_hubbub_page_table_depth depth = 0; + + switch (page_table_depth) { + case 1: + depth = DCN_PAGE_TABLE_DEPTH_1_LEVEL; + break; + case 2: + depth = DCN_PAGE_TABLE_DEPTH_2_LEVEL; + break; + case 3: + depth = DCN_PAGE_TABLE_DEPTH_3_LEVEL; + break; + case 4: + depth = DCN_PAGE_TABLE_DEPTH_4_LEVEL; + break; + default: + ASSERT(false); + break; + } + + return depth; +} + +static enum dcn_hubbub_page_table_block_size page_table_block_size_to_hw(unsigned int page_table_block_size) +{ + enum dcn_hubbub_page_table_block_size block_size = 0; + + switch (page_table_block_size) { + case 4096: + block_size = DCN_PAGE_TABLE_BLOCK_SIZE_4KB; + break; + case 65536: + block_size = DCN_PAGE_TABLE_BLOCK_SIZE_64KB; + break; + case 32768: + block_size = DCN_PAGE_TABLE_BLOCK_SIZE_32KB; + break; + default: + ASSERT(false); + block_size = page_table_block_size; + break; + } + + return block_size; +} + +void hubbub2_init_vm_ctx(struct hubbub *hubbub, + struct dcn_hubbub_virt_addr_config *va_config, + int vmid) +{ + struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + struct dcn_vmid_page_table_config virt_config; + + virt_config.page_table_start_addr = va_config->page_table_start_addr >> 12; + virt_config.page_table_end_addr = va_config->page_table_end_addr >> 12; + virt_config.depth = page_table_depth_to_hw(va_config->page_table_depth); + virt_config.block_size = page_table_block_size_to_hw(va_config->page_table_block_size); + virt_config.page_table_base_addr = va_config->page_table_base_addr; + + dcn20_vmid_setup(&hubbub1->vmid[vmid], &virt_config); +} + +int hubbub2_init_dchub_sys_ctx(struct hubbub *hubbub, + struct dcn_hubbub_phys_addr_config *pa_config) +{ + struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + struct dcn_vmid_page_table_config phys_config; + + REG_SET(DCN_VM_FB_LOCATION_BASE, 0, + FB_BASE, pa_config->system_aperture.fb_base >> 24); + REG_SET(DCN_VM_FB_LOCATION_TOP, 0, + FB_TOP, pa_config->system_aperture.fb_top >> 24); + REG_SET(DCN_VM_FB_OFFSET, 0, + FB_OFFSET, pa_config->system_aperture.fb_offset >> 24); + REG_SET(DCN_VM_AGP_BOT, 0, + AGP_BOT, pa_config->system_aperture.agp_bot >> 24); + REG_SET(DCN_VM_AGP_TOP, 0, + AGP_TOP, pa_config->system_aperture.agp_top >> 24); + REG_SET(DCN_VM_AGP_BASE, 0, + AGP_BASE, pa_config->system_aperture.agp_base >> 24); + + REG_SET(DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB, 0, + DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB, (pa_config->page_table_default_page_addr >> 44) & 0xF); + REG_SET(DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB, 0, + DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB, (pa_config->page_table_default_page_addr >> 12) & 0xFFFFFFFF); + + if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) { + phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12; + phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12; + phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr; + phys_config.depth = 0; + phys_config.block_size = 0; + // Init VMID 0 based on PA config + dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config); + } + + return NUM_VMID; +} + +void hubbub2_update_dchub(struct hubbub *hubbub, + struct dchub_init_data *dh_data) +{ + struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + + if (REG(DCN_VM_FB_LOCATION_TOP) == 0) + return; + + switch (dh_data->fb_mode) { + case FRAME_BUFFER_MODE_ZFB_ONLY: + /*For ZFB case need to put DCHUB FB BASE and TOP upside down to indicate ZFB mode*/ + REG_UPDATE(DCN_VM_FB_LOCATION_TOP, + FB_TOP, 0); + + REG_UPDATE(DCN_VM_FB_LOCATION_BASE, + FB_BASE, 0xFFFFFF); + + /*This field defines the 24 MSBs, bits [47:24] of the 48 bit AGP Base*/ + REG_UPDATE(DCN_VM_AGP_BASE, + AGP_BASE, dh_data->zfb_phys_addr_base >> 24); + + /*This field defines the bottom range of the AGP aperture and represents the 24*/ + /*MSBs, bits [47:24] of the 48 address bits*/ + REG_UPDATE(DCN_VM_AGP_BOT, + AGP_BOT, dh_data->zfb_mc_base_addr >> 24); + + /*This field defines the top range of the AGP aperture and represents the 24*/ + /*MSBs, bits [47:24] of the 48 address bits*/ + REG_UPDATE(DCN_VM_AGP_TOP, + AGP_TOP, (dh_data->zfb_mc_base_addr + + dh_data->zfb_size_in_byte - 1) >> 24); + break; + case FRAME_BUFFER_MODE_MIXED_ZFB_AND_LOCAL: + /*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/ + + /*This field defines the 24 MSBs, bits [47:24] of the 48 bit AGP Base*/ + REG_UPDATE(DCN_VM_AGP_BASE, + AGP_BASE, dh_data->zfb_phys_addr_base >> 24); + + /*This field defines the bottom range of the AGP aperture and represents the 24*/ + /*MSBs, bits [47:24] of the 48 address bits*/ + REG_UPDATE(DCN_VM_AGP_BOT, + AGP_BOT, dh_data->zfb_mc_base_addr >> 24); + + /*This field defines the top range of the AGP aperture and represents the 24*/ + /*MSBs, bits [47:24] of the 48 address bits*/ + REG_UPDATE(DCN_VM_AGP_TOP, + AGP_TOP, (dh_data->zfb_mc_base_addr + + dh_data->zfb_size_in_byte - 1) >> 24); + break; + case FRAME_BUFFER_MODE_LOCAL_ONLY: + /*Should not touch FB LOCATION (should be done by VBIOS)*/ + + /*This field defines the 24 MSBs, bits [47:24] of the 48 bit AGP Base*/ + REG_UPDATE(DCN_VM_AGP_BASE, + AGP_BASE, 0); + + /*This field defines the bottom range of the AGP aperture and represents the 24*/ + /*MSBs, bits [47:24] of the 48 address bits*/ + REG_UPDATE(DCN_VM_AGP_BOT, + AGP_BOT, 0xFFFFFF); + + /*This field defines the top range of the AGP aperture and represents the 24*/ + /*MSBs, bits [47:24] of the 48 address bits*/ + REG_UPDATE(DCN_VM_AGP_TOP, + AGP_TOP, 0); + break; + default: + break; + } + + dh_data->dchub_initialzied = true; + dh_data->dchub_info_valid = false; +} + +void hubbub2_wm_read_state(struct hubbub *hubbub, + struct dcn_hubbub_wm *wm) +{ + struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + + struct dcn_hubbub_wm_set *s; + + memset(wm, 0, sizeof(struct dcn_hubbub_wm)); + + s = &wm->sets[0]; + s->wm_set = 0; + s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A); + if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A)) + s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A); + if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) { + s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A); + s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A); + } + s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A); + + s = &wm->sets[1]; + s->wm_set = 1; + s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B); + if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B)) + s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B); + if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) { + s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B); + s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B); + } + s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B); + + s = &wm->sets[2]; + s->wm_set = 2; + s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C); + if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C)) + s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C); + if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) { + s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C); + s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C); + } + s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C); + + s = &wm->sets[3]; + s->wm_set = 3; + s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D); + if (REG(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D)) + s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D); + if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) { + s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D); + s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D); + } + s->dram_clk_change = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D); +} + +void hubbub2_get_dchub_ref_freq(struct hubbub *hubbub, + unsigned int dccg_ref_freq_inKhz, + unsigned int *dchub_ref_freq_inKhz) +{ + struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + uint32_t ref_div = 0; + uint32_t ref_en = 0; + + REG_GET_2(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, &ref_div, + DCHUBBUB_GLOBAL_TIMER_ENABLE, &ref_en); + + if (ref_en) { + if (ref_div == 2) + *dchub_ref_freq_inKhz = dccg_ref_freq_inKhz / 2; + else + *dchub_ref_freq_inKhz = dccg_ref_freq_inKhz; + + // DC hub reference frequency must be around 50Mhz, otherwise there may be + // overflow/underflow issues when doing HUBBUB programming + if (*dchub_ref_freq_inKhz < 40000 || *dchub_ref_freq_inKhz > 60000) + ASSERT_CRITICAL(false); + + return; + } else { + *dchub_ref_freq_inKhz = dccg_ref_freq_inKhz; + + // HUBBUB global timer must be enabled. + ASSERT_CRITICAL(false); + return; + } +} + +static bool hubbub2_program_watermarks( + struct hubbub *hubbub, + union dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower) +{ + struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + bool wm_pending = false; + /* + * Need to clamp to max of the register values (i.e. no wrap) + * for dcn1, all wm registers are 21-bit wide + */ + if (hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) + wm_pending = true; + + if (hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) + wm_pending = true; + + /* + * There's a special case when going from p-state support to p-state unsupported + * here we are going to LOWER watermarks to go to dummy p-state only, but this has + * to be done prepare_bandwidth, not optimize + */ + if (hubbub1->base.ctx->dc->clk_mgr->clks.prev_p_state_change_support == true && + hubbub1->base.ctx->dc->clk_mgr->clks.p_state_change_support == false) + safe_to_lower = true; + + hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower); + + REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0, + DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz); + REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 180); + + hubbub->funcs->allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter); + return wm_pending; +} + +void hubbub2_read_state(struct hubbub *hubbub, struct dcn_hubbub_state *hubbub_state) +{ + struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + + if (REG(DCN_VM_FAULT_ADDR_MSB)) + hubbub_state->vm_fault_addr_msb = REG_READ(DCN_VM_FAULT_ADDR_MSB); + + if (REG(DCN_VM_FAULT_ADDR_LSB)) + hubbub_state->vm_fault_addr_msb = REG_READ(DCN_VM_FAULT_ADDR_LSB); + + if (REG(DCN_VM_FAULT_CNTL)) + REG_GET(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_MODE, &hubbub_state->vm_error_mode); + + if (REG(DCN_VM_FAULT_STATUS)) { + REG_GET(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_STATUS, &hubbub_state->vm_error_status); + REG_GET(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, &hubbub_state->vm_error_vmid); + REG_GET(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, &hubbub_state->vm_error_pipe); + } + + if (REG(DCHUBBUB_TEST_DEBUG_INDEX) && REG(DCHUBBUB_TEST_DEBUG_DATA)) { + REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, 0x6); + hubbub_state->test_debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA); + } + + if (REG(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL)) + hubbub_state->watermark_change_cntl = REG_READ(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL); + + if (REG(DCHUBBUB_ARB_DRAM_STATE_CNTL)) + hubbub_state->dram_state_cntl = REG_READ(DCHUBBUB_ARB_DRAM_STATE_CNTL); +} + +static const struct hubbub_funcs hubbub2_funcs = { + .update_dchub = hubbub2_update_dchub, + .init_dchub_sys_ctx = hubbub2_init_dchub_sys_ctx, + .init_vm_ctx = hubbub2_init_vm_ctx, + .dcc_support_swizzle = hubbub2_dcc_support_swizzle, + .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format, + .get_dcc_compression_cap = hubbub2_get_dcc_compression_cap, + .wm_read_state = hubbub2_wm_read_state, + .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq, + .program_watermarks = hubbub2_program_watermarks, + .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, + .allow_self_refresh_control = hubbub1_allow_self_refresh_control, + .hubbub_read_state = hubbub2_read_state, +}; + +void hubbub2_construct(struct dcn20_hubbub *hubbub, + struct dc_context *ctx, + const struct dcn_hubbub_registers *hubbub_regs, + const struct dcn_hubbub_shift *hubbub_shift, + const struct dcn_hubbub_mask *hubbub_mask) +{ + hubbub->base.ctx = ctx; + + hubbub->base.funcs = &hubbub2_funcs; + + hubbub->regs = hubbub_regs; + hubbub->shifts = hubbub_shift; + hubbub->masks = hubbub_mask; + + hubbub->debug_test_index_pstate = 0xB; + hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */ +} diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn20/dcn20_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn20/dcn20_hubbub.h new file mode 100644 index 000000000000..036bb3e6c957 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn20/dcn20_hubbub.h @@ -0,0 +1,143 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_HUBBUB_DCN20_H__ +#define __DC_HUBBUB_DCN20_H__ + +#include "dcn10/dcn10_hubbub.h" +#include "dcn20/dcn20_vmid.h" + +#define TO_DCN20_HUBBUB(hubbub)\ + container_of(hubbub, struct dcn20_hubbub, base) + +#define HUBBUB_REG_LIST_DCN20_COMMON()\ + HUBBUB_REG_LIST_DCN_COMMON(), \ + SR(DCHUBBUB_CRC_CTRL), \ + SR(DCN_VM_FB_LOCATION_BASE),\ + SR(DCN_VM_FB_LOCATION_TOP),\ + SR(DCN_VM_FB_OFFSET),\ + SR(DCN_VM_AGP_BOT),\ + SR(DCN_VM_AGP_TOP),\ + SR(DCN_VM_AGP_BASE),\ + SR(DCN_VM_FAULT_ADDR_MSB), \ + SR(DCN_VM_FAULT_ADDR_LSB), \ + SR(DCN_VM_FAULT_CNTL), \ + SR(DCN_VM_FAULT_STATUS) + +#define HUBBUB_REG_LIST_DCN20(id)\ + HUBBUB_REG_LIST_DCN20_COMMON(), \ + HUBBUB_SR_WATERMARK_REG_LIST(), \ + HUBBUB_VM_REG_LIST(),\ + SR(DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB),\ + SR(DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB) + + +#define HUBBUB_MASK_SH_LIST_DCN20(mask_sh)\ + HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \ + HUBBUB_MASK_SH_LIST_STUTTER(mask_sh), \ + HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ + HUBBUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE, mask_sh), \ + HUBBUB_SF(DCN_VM_FB_LOCATION_TOP, FB_TOP, mask_sh), \ + HUBBUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET, mask_sh), \ + HUBBUB_SF(DCN_VM_AGP_BOT, AGP_BOT, mask_sh), \ + HUBBUB_SF(DCN_VM_AGP_TOP, AGP_TOP, mask_sh), \ + HUBBUB_SF(DCN_VM_AGP_BASE, AGP_BASE, mask_sh), \ + HUBBUB_SF(DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB, DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB, mask_sh), \ + HUBBUB_SF(DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB, DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_ADDR_MSB, DCN_VM_FAULT_ADDR_MSB, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_ADDR_LSB, DCN_VM_FAULT_ADDR_LSB, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_CLEAR, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_MODE, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_INTERRUPT_ENABLE, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_RANGE_FAULT_DISABLE, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_PRQ_FAULT_DISABLE, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_STATUS, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_TABLE_LEVEL, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh) + +struct dcn20_hubbub { + struct hubbub base; + const struct dcn_hubbub_registers *regs; + const struct dcn_hubbub_shift *shifts; + const struct dcn_hubbub_mask *masks; + unsigned int debug_test_index_pstate; + union dcn_watermark_set watermarks; + int num_vmid; + struct dcn20_vmid vmid[16]; + unsigned int detile_buf_size; + unsigned int crb_size_segs; + unsigned int compbuf_size_segments; + unsigned int pixel_chunk_size; + unsigned int det0_size; + unsigned int det1_size; + unsigned int det2_size; + unsigned int det3_size; +}; + +void hubbub2_construct(struct dcn20_hubbub *hubbub, + struct dc_context *ctx, + const struct dcn_hubbub_registers *hubbub_regs, + const struct dcn_hubbub_shift *hubbub_shift, + const struct dcn_hubbub_mask *hubbub_mask); + +bool hubbub2_dcc_support_swizzle( + enum swizzle_mode_values swizzle, + unsigned int bytes_per_element, + enum segment_order *segment_order_horz, + enum segment_order *segment_order_vert); + +bool hubbub2_dcc_support_pixel_format( + enum surface_pixel_format format, + unsigned int *bytes_per_element); + +bool hubbub2_get_dcc_compression_cap(struct hubbub *hubbub, + const struct dc_dcc_surface_param *input, + struct dc_surface_dcc_cap *output); + +bool hubbub2_initialize_vmids(struct hubbub *hubbub, + const struct dc_dcc_surface_param *input, + struct dc_surface_dcc_cap *output); + +int hubbub2_init_dchub_sys_ctx(struct hubbub *hubbub, + struct dcn_hubbub_phys_addr_config *pa_config); +void hubbub2_init_vm_ctx(struct hubbub *hubbub, + struct dcn_hubbub_virt_addr_config *va_config, + int vmid); +void hubbub2_update_dchub(struct hubbub *hubbub, + struct dchub_init_data *dh_data); + +void hubbub2_get_dchub_ref_freq(struct hubbub *hubbub, + unsigned int dccg_ref_freq_inKhz, + unsigned int *dchub_ref_freq_inKhz); + +void hubbub2_wm_read_state(struct hubbub *hubbub, + struct dcn_hubbub_wm *wm); + +void hubbub2_read_state(struct hubbub *hubbub, + struct dcn_hubbub_state *hubbub_state); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn201/dcn201_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn201/dcn201_hubbub.c new file mode 100644 index 000000000000..63798132ed95 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn201/dcn201_hubbub.c @@ -0,0 +1,107 @@ +/* +* Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "dm_services.h" +#include "dcn20/dcn20_hubbub.h" +#include "dcn201_hubbub.h" +#include "reg_helper.h" + +#define REG(reg)\ + hubbub1->regs->reg + +#define DC_LOGGER \ + hubbub1->base.ctx->logger + +#define CTX \ + hubbub1->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + hubbub1->shifts->field_name, hubbub1->masks->field_name + +#define REG(reg)\ + hubbub1->regs->reg + +#define CTX \ + hubbub1->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + hubbub1->shifts->field_name, hubbub1->masks->field_name + +static bool hubbub201_program_watermarks( + struct hubbub *hubbub, + union dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower) +{ + struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + bool wm_pending = false; + + if (hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) + wm_pending = true; + + if (hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) + wm_pending = true; + + REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0, + DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz); + REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, + DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 68); + + hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter); + + return wm_pending; +} + +static const struct hubbub_funcs hubbub201_funcs = { + .update_dchub = hubbub2_update_dchub, + .init_dchub_sys_ctx = NULL, + .init_vm_ctx = NULL, + .dcc_support_swizzle = hubbub2_dcc_support_swizzle, + .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format, + .get_dcc_compression_cap = hubbub2_get_dcc_compression_cap, + .wm_read_state = hubbub2_wm_read_state, + .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq, + .program_watermarks = hubbub201_program_watermarks, + .hubbub_read_state = hubbub2_read_state, +}; + +void hubbub201_construct(struct dcn20_hubbub *hubbub, + struct dc_context *ctx, + const struct dcn_hubbub_registers *hubbub_regs, + const struct dcn_hubbub_shift *hubbub_shift, + const struct dcn_hubbub_mask *hubbub_mask) +{ + hubbub->base.ctx = ctx; + + hubbub->base.funcs = &hubbub201_funcs; + + hubbub->regs = hubbub_regs; + hubbub->shifts = hubbub_shift; + hubbub->masks = hubbub_mask; + + hubbub->debug_test_index_pstate = 0xB; + hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */ +} diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn201/dcn201_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn201/dcn201_hubbub.h new file mode 100644 index 000000000000..5aeca0be3e15 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn201/dcn201_hubbub.h @@ -0,0 +1,45 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef DAL_DC_DCN201_DCN201_HUBBUB_H_ +#define DAL_DC_DCN201_DCN201_HUBBUB_H_ + +#include "dcn20/dcn20_hubbub.h" + +#define HUBBUB_REG_LIST_DCN201(id)\ + HUBBUB_REG_LIST_DCN_COMMON(), \ + HUBBUB_VM_REG_LIST(), \ + SR(DCHUBBUB_CRC_CTRL) + +#define HUBBUB_MASK_SH_LIST_DCN201(mask_sh)\ + HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \ + HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh) + +void hubbub201_construct(struct dcn20_hubbub *hubbub, + struct dc_context *ctx, + const struct dcn_hubbub_registers *hubbub_regs, + const struct dcn_hubbub_shift *hubbub_shift, + const struct dcn_hubbub_mask *hubbub_mask); + +#endif /* DAL_DC_DCN201_DCN201_HUBBUB_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.c new file mode 100644 index 000000000000..2546224b326a --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.c @@ -0,0 +1,723 @@ +/* +* Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include +#include "dm_services.h" +#include "dcn20/dcn20_hubbub.h" +#include "dcn21_hubbub.h" +#include "reg_helper.h" + +#define REG(reg)\ + hubbub1->regs->reg +#define DC_LOGGER \ + hubbub1->base.ctx->logger +#define CTX \ + hubbub1->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + hubbub1->shifts->field_name, hubbub1->masks->field_name + +#define REG(reg)\ + hubbub1->regs->reg + +#define CTX \ + hubbub1->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + hubbub1->shifts->field_name, hubbub1->masks->field_name + +static uint32_t convert_and_clamp( + uint32_t wm_ns, + uint32_t refclk_mhz, + uint32_t clamp_value) +{ + uint32_t ret_val = 0; + ret_val = wm_ns * refclk_mhz; + ret_val /= 1000; + + if (ret_val > clamp_value) + ret_val = clamp_value; + + return ret_val; +} + +void dcn21_dchvm_init(struct hubbub *hubbub) +{ + struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + uint32_t riommu_active; + int i; + + //Init DCHVM block + REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1); + + //Poll until RIOMMU_ACTIVE = 1 + for (i = 0; i < 100; i++) { + REG_GET(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, &riommu_active); + + if (riommu_active) + break; + else + udelay(5); + } + + if (riommu_active) { + //Reflect the power status of DCHUBBUB + REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1); + + //Start rIOMMU prefetching + REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1); + + // Enable dynamic clock gating + REG_UPDATE_4(DCHVM_CLK_CTRL, + HVM_DISPCLK_R_GATE_DIS, 0, + HVM_DISPCLK_G_GATE_DIS, 0, + HVM_DCFCLK_R_GATE_DIS, 0, + HVM_DCFCLK_G_GATE_DIS, 0); + + //Poll until HOSTVM_PREFETCH_DONE = 1 + REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100); + + hubbub->riommu_active = true; + } +} + +int hubbub21_init_dchub(struct hubbub *hubbub, + struct dcn_hubbub_phys_addr_config *pa_config) +{ + struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + struct dcn_vmid_page_table_config phys_config; + + REG_SET(DCN_VM_FB_LOCATION_BASE, 0, + FB_BASE, pa_config->system_aperture.fb_base >> 24); + REG_SET(DCN_VM_FB_LOCATION_TOP, 0, + FB_TOP, pa_config->system_aperture.fb_top >> 24); + REG_SET(DCN_VM_FB_OFFSET, 0, + FB_OFFSET, pa_config->system_aperture.fb_offset >> 24); + REG_SET(DCN_VM_AGP_BOT, 0, + AGP_BOT, pa_config->system_aperture.agp_bot >> 24); + REG_SET(DCN_VM_AGP_TOP, 0, + AGP_TOP, pa_config->system_aperture.agp_top >> 24); + REG_SET(DCN_VM_AGP_BASE, 0, + AGP_BASE, pa_config->system_aperture.agp_base >> 24); + + if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) { + phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12; + phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12; + phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr | 1; //Note: hack + phys_config.depth = 0; + phys_config.block_size = 0; + // Init VMID 0 based on PA config + dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config); + } + + dcn21_dchvm_init(hubbub); + + return hubbub1->num_vmid; +} + +bool hubbub21_program_urgent_watermarks( + struct hubbub *hubbub, + union dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower) +{ + struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + uint32_t prog_wm_value; + bool wm_pending = false; + + /* Repeat for water mark set A, B, C and D. */ + /* clock state A */ + if (safe_to_lower || watermarks->a.urgent_ns > hubbub1->watermarks.a.urgent_ns) { + hubbub1->watermarks.a.urgent_ns = watermarks->a.urgent_ns; + prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns, + refclk_mhz, 0x1fffff); + REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value, + DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A, prog_wm_value); + + DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->a.urgent_ns, prog_wm_value); + } else if (watermarks->a.urgent_ns < hubbub1->watermarks.a.urgent_ns) + wm_pending = true; + + /* determine the transfer time for a quantity of data for a particular requestor.*/ + if (safe_to_lower || watermarks->a.frac_urg_bw_flip + > hubbub1->watermarks.a.frac_urg_bw_flip) { + hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip; + + REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0, + DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->a.frac_urg_bw_flip); + } else if (watermarks->a.frac_urg_bw_flip + < hubbub1->watermarks.a.frac_urg_bw_flip) + wm_pending = true; + + if (safe_to_lower || watermarks->a.frac_urg_bw_nom + > hubbub1->watermarks.a.frac_urg_bw_nom) { + hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom; + + REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0, + DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom); + } else if (watermarks->a.frac_urg_bw_nom + < hubbub1->watermarks.a.frac_urg_bw_nom) + wm_pending = true; + + if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub1->watermarks.a.urgent_latency_ns) { + hubbub1->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns; + prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns, + refclk_mhz, 0x1fffff); + REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0, + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value); + } else if (watermarks->a.urgent_latency_ns < hubbub1->watermarks.a.urgent_latency_ns) + wm_pending = true; + + /* clock state B */ + if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) { + hubbub1->watermarks.b.urgent_ns = watermarks->b.urgent_ns; + prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns, + refclk_mhz, 0x1fffff); + REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value, + DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_B, prog_wm_value); + + DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->b.urgent_ns, prog_wm_value); + } else if (watermarks->b.urgent_ns < hubbub1->watermarks.b.urgent_ns) + wm_pending = true; + + /* determine the transfer time for a quantity of data for a particular requestor.*/ + if (safe_to_lower || watermarks->a.frac_urg_bw_flip + > hubbub1->watermarks.a.frac_urg_bw_flip) { + hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip; + + REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0, + DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->a.frac_urg_bw_flip); + } else if (watermarks->a.frac_urg_bw_flip + < hubbub1->watermarks.a.frac_urg_bw_flip) + wm_pending = true; + + if (safe_to_lower || watermarks->a.frac_urg_bw_nom + > hubbub1->watermarks.a.frac_urg_bw_nom) { + hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom; + + REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0, + DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->a.frac_urg_bw_nom); + } else if (watermarks->a.frac_urg_bw_nom + < hubbub1->watermarks.a.frac_urg_bw_nom) + wm_pending = true; + + if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub1->watermarks.b.urgent_latency_ns) { + hubbub1->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns; + prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns, + refclk_mhz, 0x1fffff); + REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0, + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value); + } else if (watermarks->b.urgent_latency_ns < hubbub1->watermarks.b.urgent_latency_ns) + wm_pending = true; + + /* clock state C */ + if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) { + hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns; + prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns, + refclk_mhz, 0x1fffff); + REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value, + DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_C, prog_wm_value); + + DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->c.urgent_ns, prog_wm_value); + } else if (watermarks->c.urgent_ns < hubbub1->watermarks.c.urgent_ns) + wm_pending = true; + + /* determine the transfer time for a quantity of data for a particular requestor.*/ + if (safe_to_lower || watermarks->a.frac_urg_bw_flip + > hubbub1->watermarks.a.frac_urg_bw_flip) { + hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip; + + REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, 0, + DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, watermarks->a.frac_urg_bw_flip); + } else if (watermarks->a.frac_urg_bw_flip + < hubbub1->watermarks.a.frac_urg_bw_flip) + wm_pending = true; + + if (safe_to_lower || watermarks->a.frac_urg_bw_nom + > hubbub1->watermarks.a.frac_urg_bw_nom) { + hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom; + + REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, 0, + DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->a.frac_urg_bw_nom); + } else if (watermarks->a.frac_urg_bw_nom + < hubbub1->watermarks.a.frac_urg_bw_nom) + wm_pending = true; + + if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub1->watermarks.c.urgent_latency_ns) { + hubbub1->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns; + prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns, + refclk_mhz, 0x1fffff); + REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0, + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value); + } else if (watermarks->c.urgent_latency_ns < hubbub1->watermarks.c.urgent_latency_ns) + wm_pending = true; + + /* clock state D */ + if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) { + hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns; + prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns, + refclk_mhz, 0x1fffff); + REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value, + DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_D, prog_wm_value); + + DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->d.urgent_ns, prog_wm_value); + } else if (watermarks->d.urgent_ns < hubbub1->watermarks.d.urgent_ns) + wm_pending = true; + + /* determine the transfer time for a quantity of data for a particular requestor.*/ + if (safe_to_lower || watermarks->a.frac_urg_bw_flip + > hubbub1->watermarks.a.frac_urg_bw_flip) { + hubbub1->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip; + + REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, 0, + DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, watermarks->a.frac_urg_bw_flip); + } else if (watermarks->a.frac_urg_bw_flip + < hubbub1->watermarks.a.frac_urg_bw_flip) + wm_pending = true; + + if (safe_to_lower || watermarks->a.frac_urg_bw_nom + > hubbub1->watermarks.a.frac_urg_bw_nom) { + hubbub1->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom; + + REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0, + DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->a.frac_urg_bw_nom); + } else if (watermarks->a.frac_urg_bw_nom + < hubbub1->watermarks.a.frac_urg_bw_nom) + wm_pending = true; + + if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub1->watermarks.d.urgent_latency_ns) { + hubbub1->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns; + prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns, + refclk_mhz, 0x1fffff); + REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0, + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value); + } else if (watermarks->d.urgent_latency_ns < hubbub1->watermarks.d.urgent_latency_ns) + wm_pending = true; + + return wm_pending; +} + +bool hubbub21_program_stutter_watermarks( + struct hubbub *hubbub, + union dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower) +{ + struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + uint32_t prog_wm_value; + bool wm_pending = false; + + /* clock state A */ + if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns + > hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) { + hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = + watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns; + prog_wm_value = convert_and_clamp( + watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, + refclk_mhz, 0x1fffff); + REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value, + DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); + } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns + < hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) + wm_pending = true; + + if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns + > hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) { + hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns = + watermarks->a.cstate_pstate.cstate_exit_ns; + prog_wm_value = convert_and_clamp( + watermarks->a.cstate_pstate.cstate_exit_ns, + refclk_mhz, 0x1fffff); + REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value, + DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value); + } else if (watermarks->a.cstate_pstate.cstate_exit_ns + < hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) + wm_pending = true; + + /* clock state B */ + if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns + > hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) { + hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = + watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns; + prog_wm_value = convert_and_clamp( + watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, + refclk_mhz, 0x1fffff); + REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value, + DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); + } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns + < hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) + wm_pending = true; + + if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns + > hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) { + hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns = + watermarks->b.cstate_pstate.cstate_exit_ns; + prog_wm_value = convert_and_clamp( + watermarks->b.cstate_pstate.cstate_exit_ns, + refclk_mhz, 0x1fffff); + REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value, + DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value); + } else if (watermarks->b.cstate_pstate.cstate_exit_ns + < hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) + wm_pending = true; + + /* clock state C */ + if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns + > hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) { + hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = + watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns; + prog_wm_value = convert_and_clamp( + watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, + refclk_mhz, 0x1fffff); + REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value, + DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); + } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns + < hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) + wm_pending = true; + + if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns + > hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) { + hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns = + watermarks->c.cstate_pstate.cstate_exit_ns; + prog_wm_value = convert_and_clamp( + watermarks->c.cstate_pstate.cstate_exit_ns, + refclk_mhz, 0x1fffff); + REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value, + DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value); + } else if (watermarks->c.cstate_pstate.cstate_exit_ns + < hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) + wm_pending = true; + + /* clock state D */ + if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns + > hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) { + hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = + watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns; + prog_wm_value = convert_and_clamp( + watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, + refclk_mhz, 0x1fffff); + REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value, + DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); + } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns + < hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) + wm_pending = true; + + if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns + > hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) { + hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns = + watermarks->d.cstate_pstate.cstate_exit_ns; + prog_wm_value = convert_and_clamp( + watermarks->d.cstate_pstate.cstate_exit_ns, + refclk_mhz, 0x1fffff); + REG_SET_2(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value, + DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value); + } else if (watermarks->d.cstate_pstate.cstate_exit_ns + < hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) + wm_pending = true; + + return wm_pending; +} + +bool hubbub21_program_pstate_watermarks( + struct hubbub *hubbub, + union dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower) +{ + struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + uint32_t prog_wm_value; + + bool wm_pending = false; + + /* clock state A */ + if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns + > hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) { + hubbub1->watermarks.a.cstate_pstate.pstate_change_ns = + watermarks->a.cstate_pstate.pstate_change_ns; + prog_wm_value = convert_and_clamp( + watermarks->a.cstate_pstate.pstate_change_ns, + refclk_mhz, 0x1fffff); + REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0, + DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value, + DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n" + "HW register value = 0x%x\n\n", + watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value); + } else if (watermarks->a.cstate_pstate.pstate_change_ns + < hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) + wm_pending = true; + + /* clock state B */ + if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns + > hubbub1->watermarks.b.cstate_pstate.pstate_change_ns) { + hubbub1->watermarks.b.cstate_pstate.pstate_change_ns = + watermarks->b.cstate_pstate.pstate_change_ns; + prog_wm_value = convert_and_clamp( + watermarks->b.cstate_pstate.pstate_change_ns, + refclk_mhz, 0x1fffff); + REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0, + DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value, + DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n" + "HW register value = 0x%x\n\n", + watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value); + } else if (watermarks->b.cstate_pstate.pstate_change_ns + < hubbub1->watermarks.b.cstate_pstate.pstate_change_ns) + wm_pending = false; + + /* clock state C */ + if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns + > hubbub1->watermarks.c.cstate_pstate.pstate_change_ns) { + hubbub1->watermarks.c.cstate_pstate.pstate_change_ns = + watermarks->c.cstate_pstate.pstate_change_ns; + prog_wm_value = convert_and_clamp( + watermarks->c.cstate_pstate.pstate_change_ns, + refclk_mhz, 0x1fffff); + REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0, + DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value, + DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n" + "HW register value = 0x%x\n\n", + watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value); + } else if (watermarks->c.cstate_pstate.pstate_change_ns + < hubbub1->watermarks.c.cstate_pstate.pstate_change_ns) + wm_pending = true; + + /* clock state D */ + if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns + > hubbub1->watermarks.d.cstate_pstate.pstate_change_ns) { + hubbub1->watermarks.d.cstate_pstate.pstate_change_ns = + watermarks->d.cstate_pstate.pstate_change_ns; + prog_wm_value = convert_and_clamp( + watermarks->d.cstate_pstate.pstate_change_ns, + refclk_mhz, 0x1fffff); + REG_SET_2(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0, + DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value, + DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n" + "HW register value = 0x%x\n\n", + watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value); + } else if (watermarks->d.cstate_pstate.pstate_change_ns + < hubbub1->watermarks.d.cstate_pstate.pstate_change_ns) + wm_pending = true; + + return wm_pending; +} + +bool hubbub21_program_watermarks( + struct hubbub *hubbub, + union dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower) +{ + struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + bool wm_pending = false; + + if (hubbub21_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) + wm_pending = true; + + if (hubbub21_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) + wm_pending = true; + + if (hubbub21_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) + wm_pending = true; + + /* + * The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric. + * If the memory controller is fully utilized and the DCHub requestors are + * well ahead of their amortized schedule, then it is safe to prevent the next winner + * from being committed and sent to the fabric. + * The utilization of the memory controller is approximated by ensuring that + * the number of outstanding requests is greater than a threshold specified + * by the ARB_MIN_REQ_OUTSTANDING. To determine that the DCHub requestors are well ahead of the amortized schedule, + * the slack of the next winner is compared with the ARB_SAT_LEVEL in DLG RefClk cycles. + * + * TODO: Revisit request limit after figure out right number. request limit for Renoir isn't decided yet, set maximum value (0x1FF) + * to turn off it for now. + */ + REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0, + DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz); + REG_UPDATE_2(DCHUBBUB_ARB_DF_REQ_OUTSTAND, + DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF, + DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD, 0xA); + REG_UPDATE(DCHUBBUB_ARB_HOSTVM_CNTL, + DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD, 0xF); + + hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter); + + return wm_pending; +} + +void hubbub21_wm_read_state(struct hubbub *hubbub, + struct dcn_hubbub_wm *wm) +{ + struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + struct dcn_hubbub_wm_set *s; + + memset(wm, 0, sizeof(struct dcn_hubbub_wm)); + + s = &wm->sets[0]; + s->wm_set = 0; + REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, &s->data_urgent); + + REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, &s->sr_enter); + + REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, &s->sr_exit); + + REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, + DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, &s->dram_clk_change); + + s = &wm->sets[1]; + s->wm_set = 1; + REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, &s->data_urgent); + + REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, &s->sr_enter); + + REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, &s->sr_exit); + + REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, + DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, &s->dram_clk_change); + + s = &wm->sets[2]; + s->wm_set = 2; + REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, &s->data_urgent); + + REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, &s->sr_enter); + + REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, &s->sr_exit); + + REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, + DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, &s->dram_clk_change); + + s = &wm->sets[3]; + s->wm_set = 3; + REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, &s->data_urgent); + + REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, &s->sr_enter); + + REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, &s->sr_exit); + + REG_GET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, + DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, &s->dram_clk_change); +} + +static void hubbub21_apply_DEDCN21_147_wa(struct hubbub *hubbub) +{ + struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + uint32_t prog_wm_value; + + prog_wm_value = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A); + REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); +} + +static const struct hubbub_funcs hubbub21_funcs = { + .update_dchub = hubbub2_update_dchub, + .init_dchub_sys_ctx = hubbub21_init_dchub, + .init_vm_ctx = hubbub2_init_vm_ctx, + .dcc_support_swizzle = hubbub2_dcc_support_swizzle, + .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format, + .get_dcc_compression_cap = hubbub2_get_dcc_compression_cap, + .wm_read_state = hubbub21_wm_read_state, + .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq, + .program_watermarks = hubbub21_program_watermarks, + .allow_self_refresh_control = hubbub1_allow_self_refresh_control, + .apply_DEDCN21_147_wa = hubbub21_apply_DEDCN21_147_wa, + .hubbub_read_state = hubbub2_read_state, +}; + +void hubbub21_construct(struct dcn20_hubbub *hubbub, + struct dc_context *ctx, + const struct dcn_hubbub_registers *hubbub_regs, + const struct dcn_hubbub_shift *hubbub_shift, + const struct dcn_hubbub_mask *hubbub_mask) +{ + hubbub->base.ctx = ctx; + + hubbub->base.funcs = &hubbub21_funcs; + + hubbub->regs = hubbub_regs; + hubbub->shifts = hubbub_shift; + hubbub->masks = hubbub_mask; + + hubbub->debug_test_index_pstate = 0xB; + hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */ +} diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.h new file mode 100644 index 000000000000..ab2ce0313529 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.h @@ -0,0 +1,158 @@ +/* +* Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef DAL_DC_DCN21_DCN21_HUBBUB_H_ +#define DAL_DC_DCN21_DCN21_HUBBUB_H_ + +#include "dcn20/dcn20_hubbub.h" + +#define HUBBUB_HVM_REG_LIST() \ + SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A),\ + SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B),\ + SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C),\ + SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D),\ + SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A),\ + SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B),\ + SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C),\ + SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D),\ + SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A),\ + SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B),\ + SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C),\ + SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D),\ + SR(DCHUBBUB_ARB_HOSTVM_CNTL), \ + SR(DCHVM_CTRL0), \ + SR(DCHVM_MEM_CTRL), \ + SR(DCHVM_CLK_CTRL), \ + SR(DCHVM_RIOMMU_CTRL0), \ + SR(DCHVM_RIOMMU_STAT0) + +#define HUBBUB_REG_LIST_DCN21()\ + HUBBUB_REG_LIST_DCN20_COMMON(), \ + HUBBUB_SR_WATERMARK_REG_LIST(), \ + HUBBUB_HVM_REG_LIST() + +#define HUBBUB_MASK_SH_LIST_HVM(mask_sh) \ + HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_D, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_D, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_D, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_HOSTVM_CNTL, DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD, mask_sh), \ + HUBBUB_SF(DCHVM_CTRL0, HOSTVM_INIT_REQ, mask_sh), \ + HUBBUB_SF(DCHVM_MEM_CTRL, HVM_GPUVMRET_PWR_REQ_DIS, mask_sh), \ + HUBBUB_SF(DCHVM_MEM_CTRL, HVM_GPUVMRET_FORCE_REQ, mask_sh), \ + HUBBUB_SF(DCHVM_MEM_CTRL, HVM_GPUVMRET_POWER_STATUS, mask_sh), \ + HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DISPCLK_R_GATE_DIS, mask_sh), \ + HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DISPCLK_G_GATE_DIS, mask_sh), \ + HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DCFCLK_R_GATE_DIS, mask_sh), \ + HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DCFCLK_G_GATE_DIS, mask_sh), \ + HUBBUB_SF(DCHVM_CLK_CTRL, TR_REQ_REQCLKREQ_MODE, mask_sh), \ + HUBBUB_SF(DCHVM_CLK_CTRL, TW_RSP_COMPCLKREQ_MODE, mask_sh), \ + HUBBUB_SF(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, mask_sh), \ + HUBBUB_SF(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, mask_sh), \ + HUBBUB_SF(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, mask_sh), \ + HUBBUB_SF(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, mask_sh) + +#define HUBBUB_MASK_SH_LIST_DCN21(mask_sh)\ + HUBBUB_MASK_SH_LIST_HVM(mask_sh), \ + HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \ + HUBBUB_MASK_SH_LIST_STUTTER(mask_sh), \ + HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ + HUBBUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE, mask_sh), \ + HUBBUB_SF(DCN_VM_FB_LOCATION_TOP, FB_TOP, mask_sh), \ + HUBBUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET, mask_sh), \ + HUBBUB_SF(DCN_VM_AGP_BOT, AGP_BOT, mask_sh), \ + HUBBUB_SF(DCN_VM_AGP_TOP, AGP_TOP, mask_sh), \ + HUBBUB_SF(DCN_VM_AGP_BASE, AGP_BASE, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_ADDR_MSB, DCN_VM_FAULT_ADDR_MSB, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_ADDR_LSB, DCN_VM_FAULT_ADDR_LSB, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_CLEAR, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_MODE, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_INTERRUPT_ENABLE, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_RANGE_FAULT_DISABLE, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_PRQ_FAULT_DISABLE, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_STATUS, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_TABLE_LEVEL, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh) + +void dcn21_dchvm_init(struct hubbub *hubbub); +int hubbub21_init_dchub(struct hubbub *hubbub, + struct dcn_hubbub_phys_addr_config *pa_config); +bool hubbub21_program_watermarks( + struct hubbub *hubbub, + union dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower); +bool hubbub21_program_urgent_watermarks( + struct hubbub *hubbub, + union dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower); +bool hubbub21_program_stutter_watermarks( + struct hubbub *hubbub, + union dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower); +bool hubbub21_program_pstate_watermarks( + struct hubbub *hubbub, + union dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower); + +void hubbub21_wm_read_state(struct hubbub *hubbub, + struct dcn_hubbub_wm *wm); + +void hubbub21_construct(struct dcn20_hubbub *hubbub, + struct dc_context *ctx, + const struct dcn_hubbub_registers *hubbub_regs, + const struct dcn_hubbub_shift *hubbub_shift, + const struct dcn_hubbub_mask *hubbub_mask); + +#endif /* DAL_DC_DCN21_DCN21_HUBBUB_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c new file mode 100644 index 000000000000..6a5af3da4b45 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c @@ -0,0 +1,473 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#include "dm_services.h" +#include "reg_helper.h" +#include "dcn30_hubbub.h" + + +#define CTX \ + hubbub1->base.ctx +#define DC_LOGGER \ + hubbub1->base.ctx->logger +#define REG(reg)\ + hubbub1->regs->reg + +#undef FN +#define FN(reg_name, field_name) \ + hubbub1->shifts->field_name, hubbub1->masks->field_name + +#ifdef NUM_VMID +#undef NUM_VMID +#endif +#define NUM_VMID 16 + + +static uint32_t convert_and_clamp( + uint32_t wm_ns, + uint32_t refclk_mhz, + uint32_t clamp_value) +{ + uint32_t ret_val = 0; + ret_val = wm_ns * refclk_mhz; + ret_val /= 1000; + + if (ret_val > clamp_value) + ret_val = clamp_value; + + return ret_val; +} + +int hubbub3_init_dchub_sys_ctx(struct hubbub *hubbub, + struct dcn_hubbub_phys_addr_config *pa_config) +{ + struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + struct dcn_vmid_page_table_config phys_config; + + REG_SET(DCN_VM_FB_LOCATION_BASE, 0, + FB_BASE, pa_config->system_aperture.fb_base >> 24); + REG_SET(DCN_VM_FB_LOCATION_TOP, 0, + FB_TOP, pa_config->system_aperture.fb_top >> 24); + REG_SET(DCN_VM_FB_OFFSET, 0, + FB_OFFSET, pa_config->system_aperture.fb_offset >> 24); + REG_SET(DCN_VM_AGP_BOT, 0, + AGP_BOT, pa_config->system_aperture.agp_bot >> 24); + REG_SET(DCN_VM_AGP_TOP, 0, + AGP_TOP, pa_config->system_aperture.agp_top >> 24); + REG_SET(DCN_VM_AGP_BASE, 0, + AGP_BASE, pa_config->system_aperture.agp_base >> 24); + + if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) { + phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12; + phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12; + phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr; + phys_config.depth = 0; + phys_config.block_size = 0; + // Init VMID 0 based on PA config + dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config); + } + + return NUM_VMID; +} + +bool hubbub3_program_watermarks( + struct hubbub *hubbub, + union dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower) +{ + struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + bool wm_pending = false; + + if (hubbub21_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) + wm_pending = true; + + if (hubbub21_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) + wm_pending = true; + + if (hubbub21_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) + wm_pending = true; + + /* + * The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric. + * If the memory controller is fully utilized and the DCHub requestors are + * well ahead of their amortized schedule, then it is safe to prevent the next winner + * from being committed and sent to the fabric. + * The utilization of the memory controller is approximated by ensuring that + * the number of outstanding requests is greater than a threshold specified + * by the ARB_MIN_REQ_OUTSTANDING. To determine that the DCHub requestors are well ahead of the amortized schedule, + * the slack of the next winner is compared with the ARB_SAT_LEVEL in DLG RefClk cycles. + * + * TODO: Revisit request limit after figure out right number. request limit for Renoir isn't decided yet, set maximum value (0x1FF) + * to turn off it for now. + */ + REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0, + DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz); + REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, + DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF); + + hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter); + + return wm_pending; +} + +bool hubbub3_dcc_support_swizzle( + enum swizzle_mode_values swizzle, + unsigned int bytes_per_element, + enum segment_order *segment_order_horz, + enum segment_order *segment_order_vert) +{ + bool standard_swizzle = false; + bool display_swizzle = false; + bool render_swizzle = false; + + switch (swizzle) { + case DC_SW_4KB_S: + case DC_SW_64KB_S: + case DC_SW_VAR_S: + case DC_SW_4KB_S_X: + case DC_SW_64KB_S_X: + case DC_SW_VAR_S_X: + standard_swizzle = true; + break; + case DC_SW_4KB_R: + case DC_SW_64KB_R: + case DC_SW_VAR_R: + case DC_SW_4KB_R_X: + case DC_SW_64KB_R_X: + case DC_SW_VAR_R_X: + render_swizzle = true; + break; + case DC_SW_4KB_D: + case DC_SW_64KB_D: + case DC_SW_VAR_D: + case DC_SW_4KB_D_X: + case DC_SW_64KB_D_X: + case DC_SW_VAR_D_X: + display_swizzle = true; + break; + default: + break; + } + + if (standard_swizzle) { + if (bytes_per_element == 1) { + *segment_order_horz = segment_order__contiguous; + *segment_order_vert = segment_order__na; + return true; + } + if (bytes_per_element == 2) { + *segment_order_horz = segment_order__non_contiguous; + *segment_order_vert = segment_order__contiguous; + return true; + } + if (bytes_per_element == 4) { + *segment_order_horz = segment_order__non_contiguous; + *segment_order_vert = segment_order__contiguous; + return true; + } + if (bytes_per_element == 8) { + *segment_order_horz = segment_order__na; + *segment_order_vert = segment_order__contiguous; + return true; + } + } + if (render_swizzle) { + if (bytes_per_element == 1) { + *segment_order_horz = segment_order__contiguous; + *segment_order_vert = segment_order__na; + return true; + } + if (bytes_per_element == 2) { + *segment_order_horz = segment_order__non_contiguous; + *segment_order_vert = segment_order__contiguous; + return true; + } + if (bytes_per_element == 4) { + *segment_order_horz = segment_order__contiguous; + *segment_order_vert = segment_order__non_contiguous; + return true; + } + if (bytes_per_element == 8) { + *segment_order_horz = segment_order__contiguous; + *segment_order_vert = segment_order__non_contiguous; + return true; + } + } + if (display_swizzle && bytes_per_element == 8) { + *segment_order_horz = segment_order__contiguous; + *segment_order_vert = segment_order__non_contiguous; + return true; + } + + return false; +} + +static void hubbub3_get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height, + unsigned int bytes_per_element) +{ + /* copied from DML. might want to refactor DML to leverage from DML */ + /* DML : get_blk256_size */ + if (bytes_per_element == 1) { + *blk256_width = 16; + *blk256_height = 16; + } else if (bytes_per_element == 2) { + *blk256_width = 16; + *blk256_height = 8; + } else if (bytes_per_element == 4) { + *blk256_width = 8; + *blk256_height = 8; + } else if (bytes_per_element == 8) { + *blk256_width = 8; + *blk256_height = 4; + } +} + +static void hubbub3_det_request_size( + unsigned int detile_buf_size, + unsigned int height, + unsigned int width, + unsigned int bpe, + bool *req128_horz_wc, + bool *req128_vert_wc) +{ + unsigned int blk256_height = 0; + unsigned int blk256_width = 0; + unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc; + + hubbub3_get_blk256_size(&blk256_width, &blk256_height, bpe); + + swath_bytes_horz_wc = width * blk256_height * bpe; + swath_bytes_vert_wc = height * blk256_width * bpe; + + *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ? + false : /* full 256B request */ + true; /* half 128b request */ + + *req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ? + false : /* full 256B request */ + true; /* half 128b request */ +} + +bool hubbub3_get_dcc_compression_cap(struct hubbub *hubbub, + const struct dc_dcc_surface_param *input, + struct dc_surface_dcc_cap *output) +{ + struct dc *dc = hubbub->ctx->dc; + /* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */ + enum dcc_control dcc_control; + unsigned int bpe; + enum segment_order segment_order_horz, segment_order_vert; + bool req128_horz_wc, req128_vert_wc; + + memset(output, 0, sizeof(*output)); + + if (dc->debug.disable_dcc == DCC_DISABLE) + return false; + + if (!hubbub->funcs->dcc_support_pixel_format(input->format, + &bpe)) + return false; + + if (!hubbub->funcs->dcc_support_swizzle(input->swizzle_mode, bpe, + &segment_order_horz, &segment_order_vert)) + return false; + + hubbub3_det_request_size(TO_DCN20_HUBBUB(hubbub)->detile_buf_size, + input->surface_size.height, input->surface_size.width, + bpe, &req128_horz_wc, &req128_vert_wc); + + if (!req128_horz_wc && !req128_vert_wc) { + dcc_control = dcc_control__256_256_xxx; + } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) { + if (!req128_horz_wc) + dcc_control = dcc_control__256_256_xxx; + else if (segment_order_horz == segment_order__contiguous) + dcc_control = dcc_control__128_128_xxx; + else + dcc_control = dcc_control__256_64_64; + } else if (input->scan == SCAN_DIRECTION_VERTICAL) { + if (!req128_vert_wc) + dcc_control = dcc_control__256_256_xxx; + else if (segment_order_vert == segment_order__contiguous) + dcc_control = dcc_control__128_128_xxx; + else + dcc_control = dcc_control__256_64_64; + } else { + if ((req128_horz_wc && + segment_order_horz == segment_order__non_contiguous) || + (req128_vert_wc && + segment_order_vert == segment_order__non_contiguous)) + /* access_dir not known, must use most constraining */ + dcc_control = dcc_control__256_64_64; + else + /* reg128 is true for either horz and vert + * but segment_order is contiguous + */ + dcc_control = dcc_control__128_128_xxx; + } + + /* Exception for 64KB_R_X */ + if ((bpe == 2) && (input->swizzle_mode == DC_SW_64KB_R_X)) + dcc_control = dcc_control__128_128_xxx; + + if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE && + dcc_control != dcc_control__256_256_xxx) + return false; + + switch (dcc_control) { + case dcc_control__256_256_xxx: + output->grph.rgb.max_uncompressed_blk_size = 256; + output->grph.rgb.max_compressed_blk_size = 256; + output->grph.rgb.independent_64b_blks = false; + output->grph.rgb.dcc_controls.dcc_256_256_unconstrained = 1; + output->grph.rgb.dcc_controls.dcc_256_128_128 = 1; + break; + case dcc_control__128_128_xxx: + output->grph.rgb.max_uncompressed_blk_size = 128; + output->grph.rgb.max_compressed_blk_size = 128; + output->grph.rgb.independent_64b_blks = false; + output->grph.rgb.dcc_controls.dcc_128_128_uncontrained = 1; + output->grph.rgb.dcc_controls.dcc_256_128_128 = 1; + break; + case dcc_control__256_64_64: + output->grph.rgb.max_uncompressed_blk_size = 256; + output->grph.rgb.max_compressed_blk_size = 64; + output->grph.rgb.independent_64b_blks = true; + output->grph.rgb.dcc_controls.dcc_256_64_64 = 1; + break; + case dcc_control__256_128_128: + output->grph.rgb.max_uncompressed_blk_size = 256; + output->grph.rgb.max_compressed_blk_size = 128; + output->grph.rgb.independent_64b_blks = false; + output->grph.rgb.dcc_controls.dcc_256_128_128 = 1; + break; + } + output->capable = true; + output->const_color_support = true; + + return true; +} + +void hubbub3_force_wm_propagate_to_pipes(struct hubbub *hubbub) +{ + struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + uint32_t refclk_mhz = hubbub->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000; + uint32_t prog_wm_value = convert_and_clamp(hubbub1->watermarks.a.urgent_ns, + refclk_mhz, 0x1fffff); + + REG_SET_2(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value, + DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A, prog_wm_value); +} + +void hubbub3_force_pstate_change_control(struct hubbub *hubbub, + bool force, bool allow) +{ + struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + + REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL, + DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, allow, + DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, force); +} + +/* Copy values from WM set A to all other sets */ +void hubbub3_init_watermarks(struct hubbub *hubbub) +{ + struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + uint32_t reg; + + reg = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A); + REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, reg); + REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, reg); + REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, reg); + + reg = REG_READ(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A); + REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, reg); + REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, reg); + REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, reg); + + reg = REG_READ(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A); + REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, reg); + REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, reg); + REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, reg); + + reg = REG_READ(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A); + REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, reg); + REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, reg); + REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, reg); + + reg = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A); + REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, reg); + REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, reg); + REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, reg); + + reg = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A); + REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, reg); + REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, reg); + REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, reg); + + reg = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A); + REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, reg); + REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, reg); + REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, reg); +} + +static const struct hubbub_funcs hubbub30_funcs = { + .update_dchub = hubbub2_update_dchub, + .init_dchub_sys_ctx = hubbub3_init_dchub_sys_ctx, + .init_vm_ctx = hubbub2_init_vm_ctx, + .dcc_support_swizzle = hubbub3_dcc_support_swizzle, + .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format, + .get_dcc_compression_cap = hubbub3_get_dcc_compression_cap, + .wm_read_state = hubbub21_wm_read_state, + .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq, + .program_watermarks = hubbub3_program_watermarks, + .allow_self_refresh_control = hubbub1_allow_self_refresh_control, + .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, + .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high, + .force_wm_propagate_to_pipes = hubbub3_force_wm_propagate_to_pipes, + .force_pstate_change_control = hubbub3_force_pstate_change_control, + .init_watermarks = hubbub3_init_watermarks, + .hubbub_read_state = hubbub2_read_state, +}; + +void hubbub3_construct(struct dcn20_hubbub *hubbub3, + struct dc_context *ctx, + const struct dcn_hubbub_registers *hubbub_regs, + const struct dcn_hubbub_shift *hubbub_shift, + const struct dcn_hubbub_mask *hubbub_mask) +{ + hubbub3->base.ctx = ctx; + hubbub3->base.funcs = &hubbub30_funcs; + hubbub3->regs = hubbub_regs; + hubbub3->shifts = hubbub_shift; + hubbub3->masks = hubbub_mask; + + hubbub3->debug_test_index_pstate = 0xB; + hubbub3->detile_buf_size = 184 * 1024; /* 184KB for DCN3 */ +} + diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h new file mode 100644 index 000000000000..ca6233e8f1f4 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h @@ -0,0 +1,136 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_HUBBUB_DCN30_H__ +#define __DC_HUBBUB_DCN30_H__ + +#include "dcn21/dcn21_hubbub.h" + +#define HUBBUB_REG_LIST_DCN3AG(id)\ + HUBBUB_REG_LIST_DCN21() + +#define HUBBUB_MASK_SH_LIST_DCN3AG(mask_sh)\ + HUBBUB_MASK_SH_LIST_DCN21(mask_sh) + +#define HUBBUB_REG_LIST_DCN30(id)\ + HUBBUB_REG_LIST_DCN20_COMMON(), \ + HUBBUB_SR_WATERMARK_REG_LIST(), \ + SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A),\ + SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B),\ + SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C),\ + SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D),\ + SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A),\ + SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B),\ + SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C),\ + SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D),\ + SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A),\ + SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B),\ + SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C),\ + SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D) + +#define HUBBUB_MASK_SH_LIST_DCN30(mask_sh)\ + HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \ + HUBBUB_MASK_SH_LIST_STUTTER(mask_sh), \ + HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ + HUBBUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE, mask_sh), \ + HUBBUB_SF(DCN_VM_FB_LOCATION_TOP, FB_TOP, mask_sh), \ + HUBBUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET, mask_sh), \ + HUBBUB_SF(DCN_VM_AGP_BOT, AGP_BOT, mask_sh), \ + HUBBUB_SF(DCN_VM_AGP_TOP, AGP_TOP, mask_sh), \ + HUBBUB_SF(DCN_VM_AGP_BASE, AGP_BASE, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_D, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_ENTER_WATERMARK_D, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, DCHUBBUB_ARB_VM_ROW_ALLOW_SR_EXIT_WATERMARK_D, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, DCHUBBUB_ARB_VM_ROW_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_ADDR_MSB, DCN_VM_FAULT_ADDR_MSB, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_ADDR_LSB, DCN_VM_FAULT_ADDR_LSB, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_CLEAR, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_MODE, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_INTERRUPT_ENABLE, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_RANGE_FAULT_DISABLE, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_PRQ_FAULT_DISABLE, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_STATUS, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_TABLE_LEVEL, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh) + +void hubbub3_construct(struct dcn20_hubbub *hubbub3, + struct dc_context *ctx, + const struct dcn_hubbub_registers *hubbub_regs, + const struct dcn_hubbub_shift *hubbub_shift, + const struct dcn_hubbub_mask *hubbub_mask); + +int hubbub3_init_dchub_sys_ctx(struct hubbub *hubbub, + struct dcn_hubbub_phys_addr_config *pa_config); + +bool hubbub3_dcc_support_swizzle( + enum swizzle_mode_values swizzle, + unsigned int bytes_per_element, + enum segment_order *segment_order_horz, + enum segment_order *segment_order_vert); + +void hubbub3_force_wm_propagate_to_pipes(struct hubbub *hubbub); + +bool hubbub3_get_dcc_compression_cap(struct hubbub *hubbub, + const struct dc_dcc_surface_param *input, + struct dc_surface_dcc_cap *output); + +bool hubbub3_program_watermarks( + struct hubbub *hubbub, + union dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower); + +void hubbub3_force_pstate_change_control(struct hubbub *hubbub, + bool force, bool allow); + +void hubbub3_init_watermarks(struct hubbub *hubbub); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn301/dcn301_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn301/dcn301_hubbub.c new file mode 100644 index 000000000000..c1959672df50 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn301/dcn301_hubbub.c @@ -0,0 +1,84 @@ +/* +* Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "dm_services.h" +#include "dcn301_hubbub.h" +#include "reg_helper.h" + +#define REG(reg)\ + hubbub1->regs->reg +#define DC_LOGGER \ + hubbub1->base.ctx->logger +#define CTX \ + hubbub1->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + hubbub1->shifts->field_name, hubbub1->masks->field_name + +#define REG(reg)\ + hubbub1->regs->reg + +#define CTX \ + hubbub1->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + hubbub1->shifts->field_name, hubbub1->masks->field_name + + +static const struct hubbub_funcs hubbub301_funcs = { + .update_dchub = hubbub2_update_dchub, + .init_dchub_sys_ctx = hubbub21_init_dchub, + .init_vm_ctx = hubbub2_init_vm_ctx, + .dcc_support_swizzle = hubbub3_dcc_support_swizzle, + .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format, + .get_dcc_compression_cap = hubbub3_get_dcc_compression_cap, + .wm_read_state = hubbub21_wm_read_state, + .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq, + .program_watermarks = hubbub3_program_watermarks, + .allow_self_refresh_control = hubbub1_allow_self_refresh_control, + .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, + .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high, + .force_wm_propagate_to_pipes = hubbub3_force_wm_propagate_to_pipes, + .force_pstate_change_control = hubbub3_force_pstate_change_control, + .init_watermarks = hubbub3_init_watermarks, + .hubbub_read_state = hubbub2_read_state, +}; + +void hubbub301_construct(struct dcn20_hubbub *hubbub3, + struct dc_context *ctx, + const struct dcn_hubbub_registers *hubbub_regs, + const struct dcn_hubbub_shift *hubbub_shift, + const struct dcn_hubbub_mask *hubbub_mask) +{ + hubbub3->base.ctx = ctx; + hubbub3->base.funcs = &hubbub301_funcs; + hubbub3->regs = hubbub_regs; + hubbub3->shifts = hubbub_shift; + hubbub3->masks = hubbub_mask; + + hubbub3->debug_test_index_pstate = 0xB; + hubbub3->detile_buf_size = 184 * 1024; /* 184KB for DCN3 */ +} diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn301/dcn301_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn301/dcn301_hubbub.h new file mode 100644 index 000000000000..b599f4475479 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn301/dcn301_hubbub.h @@ -0,0 +1,60 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef DAL_DC_DCN301_DCN301_HUBBUB_H_ +#define DAL_DC_DCN301_DCN301_HUBBUB_H_ + +#include "dcn30/dcn30_hubbub.h" + + +#define HUBBUB_REG_LIST_DCN301(id)\ + HUBBUB_REG_LIST_DCN30(id), \ + HUBBUB_HVM_REG_LIST() + + +#define HUBBUB_MASK_SH_LIST_DCN301(mask_sh)\ + HUBBUB_MASK_SH_LIST_DCN30(mask_sh), \ + HUBBUB_SF(DCHVM_CTRL0, HOSTVM_INIT_REQ, mask_sh), \ + HUBBUB_SF(DCHVM_MEM_CTRL, HVM_GPUVMRET_PWR_REQ_DIS, mask_sh), \ + HUBBUB_SF(DCHVM_MEM_CTRL, HVM_GPUVMRET_FORCE_REQ, mask_sh), \ + HUBBUB_SF(DCHVM_MEM_CTRL, HVM_GPUVMRET_POWER_STATUS, mask_sh), \ + HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DISPCLK_R_GATE_DIS, mask_sh), \ + HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DISPCLK_G_GATE_DIS, mask_sh), \ + HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DCFCLK_R_GATE_DIS, mask_sh), \ + HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DCFCLK_G_GATE_DIS, mask_sh), \ + HUBBUB_SF(DCHVM_CLK_CTRL, TR_REQ_REQCLKREQ_MODE, mask_sh), \ + HUBBUB_SF(DCHVM_CLK_CTRL, TW_RSP_COMPCLKREQ_MODE, mask_sh), \ + HUBBUB_SF(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, mask_sh), \ + HUBBUB_SF(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, mask_sh), \ + HUBBUB_SF(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, mask_sh), \ + HUBBUB_SF(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, mask_sh) + +void hubbub301_construct(struct dcn20_hubbub *hubbub3, + struct dc_context *ctx, + const struct dcn_hubbub_registers *hubbub_regs, + const struct dcn_hubbub_shift *hubbub_shift, + const struct dcn_hubbub_mask *hubbub_mask); + + +#endif /* DAL_DC_DCN301_DCN301_HUBBUB_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c new file mode 100644 index 000000000000..b906db6e7355 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c @@ -0,0 +1,1090 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#include "dcn30/dcn30_hubbub.h" +#include "dcn31_hubbub.h" +#include "dm_services.h" +#include "reg_helper.h" + + +#define CTX \ + hubbub2->base.ctx +#define DC_LOGGER \ + hubbub2->base.ctx->logger +#define REG(reg)\ + hubbub2->regs->reg + +#undef FN +#define FN(reg_name, field_name) \ + hubbub2->shifts->field_name, hubbub2->masks->field_name + +#ifdef NUM_VMID +#undef NUM_VMID +#endif +#define NUM_VMID 16 + +#define DCN31_CRB_SEGMENT_SIZE_KB 64 + +static void dcn31_init_crb(struct hubbub *hubbub) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + + REG_GET(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, + &hubbub2->det0_size); + + REG_GET(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, + &hubbub2->det1_size); + + REG_GET(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, + &hubbub2->det2_size); + + REG_GET(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, + &hubbub2->det3_size); + + REG_GET(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE_CURRENT, + &hubbub2->compbuf_size_segments); + + REG_SET_2(COMPBUF_RESERVED_SPACE, 0, + COMPBUF_RESERVED_SPACE_64B, hubbub2->pixel_chunk_size / 32, + COMPBUF_RESERVED_SPACE_ZS, hubbub2->pixel_chunk_size / 128); + REG_UPDATE(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, 0x17F); +} + +static void dcn31_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigned int det_buffer_size_in_kbyte) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + + unsigned int det_size_segments = (det_buffer_size_in_kbyte + DCN31_CRB_SEGMENT_SIZE_KB - 1) / DCN31_CRB_SEGMENT_SIZE_KB; + + switch (hubp_inst) { + case 0: + REG_UPDATE(DCHUBBUB_DET0_CTRL, + DET0_SIZE, det_size_segments); + hubbub2->det0_size = det_size_segments; + break; + case 1: + REG_UPDATE(DCHUBBUB_DET1_CTRL, + DET1_SIZE, det_size_segments); + hubbub2->det1_size = det_size_segments; + break; + case 2: + REG_UPDATE(DCHUBBUB_DET2_CTRL, + DET2_SIZE, det_size_segments); + hubbub2->det2_size = det_size_segments; + break; + case 3: + REG_UPDATE(DCHUBBUB_DET3_CTRL, + DET3_SIZE, det_size_segments); + hubbub2->det3_size = det_size_segments; + break; + default: + break; + } + DC_LOG_DEBUG("Set DET%d to %d segments\n", hubp_inst, det_size_segments); + /* Should never be hit, if it is we have an erroneous hw config*/ + ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size + + hubbub2->det3_size + hubbub2->compbuf_size_segments <= hubbub2->crb_size_segs); +} + +static void dcn31_wait_for_det_apply(struct hubbub *hubbub, int hubp_inst) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + + switch (hubp_inst) { + case 0: + REG_WAIT(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, hubbub2->det0_size, 1000, 30); + break; + case 1: + REG_WAIT(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, hubbub2->det1_size, 1000, 30); + break; + case 2: + REG_WAIT(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, hubbub2->det2_size, 1000, 30); + break; + case 3: + REG_WAIT(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, hubbub2->det3_size, 1000, 30); + break; + default: + break; + } +} + +static void dcn31_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + unsigned int compbuf_size_segments = (compbuf_size_kb + DCN31_CRB_SEGMENT_SIZE_KB - 1) / DCN31_CRB_SEGMENT_SIZE_KB; + + if (safe_to_increase || compbuf_size_segments <= hubbub2->compbuf_size_segments) { + if (compbuf_size_segments > hubbub2->compbuf_size_segments) { + REG_WAIT(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, hubbub2->det0_size, 1, 100); + REG_WAIT(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, hubbub2->det1_size, 1, 100); + REG_WAIT(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, hubbub2->det2_size, 1, 100); + REG_WAIT(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, hubbub2->det3_size, 1, 100); + } + /* Should never be hit, if it is we have an erroneous hw config*/ + ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size + + hubbub2->det3_size + compbuf_size_segments <= hubbub2->crb_size_segs); + REG_UPDATE(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE, compbuf_size_segments); + hubbub2->compbuf_size_segments = compbuf_size_segments; + ASSERT(REG_GET(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR, &compbuf_size_segments) && !compbuf_size_segments); + } +} + +static uint32_t convert_and_clamp( + uint32_t wm_ns, + uint32_t refclk_mhz, + uint32_t clamp_value) +{ + uint32_t ret_val = 0; + ret_val = wm_ns * refclk_mhz; + ret_val /= 1000; + + if (ret_val > clamp_value) { + /* clamping WMs is abnormal, unexpected and may lead to underflow*/ + ASSERT(0); + ret_val = clamp_value; + } + + return ret_val; +} + +static bool hubbub31_program_urgent_watermarks( + struct hubbub *hubbub, + union dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + uint32_t prog_wm_value; + bool wm_pending = false; + + /* Repeat for water mark set A, B, C and D. */ + /* clock state A */ + if (safe_to_lower || watermarks->a.urgent_ns > hubbub2->watermarks.a.urgent_ns) { + hubbub2->watermarks.a.urgent_ns = watermarks->a.urgent_ns; + prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns, + refclk_mhz, 0x3fff); + REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); + + DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->a.urgent_ns, prog_wm_value); + } else if (watermarks->a.urgent_ns < hubbub2->watermarks.a.urgent_ns) + wm_pending = true; + + /* determine the transfer time for a quantity of data for a particular requestor.*/ + if (safe_to_lower || watermarks->a.frac_urg_bw_flip + > hubbub2->watermarks.a.frac_urg_bw_flip) { + hubbub2->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip; + + REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0, + DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->a.frac_urg_bw_flip); + } else if (watermarks->a.frac_urg_bw_flip + < hubbub2->watermarks.a.frac_urg_bw_flip) + wm_pending = true; + + if (safe_to_lower || watermarks->a.frac_urg_bw_nom + > hubbub2->watermarks.a.frac_urg_bw_nom) { + hubbub2->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom; + + REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0, + DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom); + } else if (watermarks->a.frac_urg_bw_nom + < hubbub2->watermarks.a.frac_urg_bw_nom) + wm_pending = true; + + if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub2->watermarks.a.urgent_latency_ns) { + hubbub2->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns; + prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns, + refclk_mhz, 0x3fff); + REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0, + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value); + } else if (watermarks->a.urgent_latency_ns < hubbub2->watermarks.a.urgent_latency_ns) + wm_pending = true; + + /* clock state B */ + if (safe_to_lower || watermarks->b.urgent_ns > hubbub2->watermarks.b.urgent_ns) { + hubbub2->watermarks.b.urgent_ns = watermarks->b.urgent_ns; + prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns, + refclk_mhz, 0x3fff); + REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value); + + DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->b.urgent_ns, prog_wm_value); + } else if (watermarks->b.urgent_ns < hubbub2->watermarks.b.urgent_ns) + wm_pending = true; + + /* determine the transfer time for a quantity of data for a particular requestor.*/ + if (safe_to_lower || watermarks->b.frac_urg_bw_flip + > hubbub2->watermarks.b.frac_urg_bw_flip) { + hubbub2->watermarks.b.frac_urg_bw_flip = watermarks->b.frac_urg_bw_flip; + + REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0, + DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->b.frac_urg_bw_flip); + } else if (watermarks->b.frac_urg_bw_flip + < hubbub2->watermarks.b.frac_urg_bw_flip) + wm_pending = true; + + if (safe_to_lower || watermarks->b.frac_urg_bw_nom + > hubbub2->watermarks.b.frac_urg_bw_nom) { + hubbub2->watermarks.b.frac_urg_bw_nom = watermarks->b.frac_urg_bw_nom; + + REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0, + DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->b.frac_urg_bw_nom); + } else if (watermarks->b.frac_urg_bw_nom + < hubbub2->watermarks.b.frac_urg_bw_nom) + wm_pending = true; + + if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub2->watermarks.b.urgent_latency_ns) { + hubbub2->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns; + prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns, + refclk_mhz, 0x3fff); + REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0, + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value); + } else if (watermarks->b.urgent_latency_ns < hubbub2->watermarks.b.urgent_latency_ns) + wm_pending = true; + + /* clock state C */ + if (safe_to_lower || watermarks->c.urgent_ns > hubbub2->watermarks.c.urgent_ns) { + hubbub2->watermarks.c.urgent_ns = watermarks->c.urgent_ns; + prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns, + refclk_mhz, 0x3fff); + REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value); + + DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->c.urgent_ns, prog_wm_value); + } else if (watermarks->c.urgent_ns < hubbub2->watermarks.c.urgent_ns) + wm_pending = true; + + /* determine the transfer time for a quantity of data for a particular requestor.*/ + if (safe_to_lower || watermarks->c.frac_urg_bw_flip + > hubbub2->watermarks.c.frac_urg_bw_flip) { + hubbub2->watermarks.c.frac_urg_bw_flip = watermarks->c.frac_urg_bw_flip; + + REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, 0, + DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, watermarks->c.frac_urg_bw_flip); + } else if (watermarks->c.frac_urg_bw_flip + < hubbub2->watermarks.c.frac_urg_bw_flip) + wm_pending = true; + + if (safe_to_lower || watermarks->c.frac_urg_bw_nom + > hubbub2->watermarks.c.frac_urg_bw_nom) { + hubbub2->watermarks.c.frac_urg_bw_nom = watermarks->c.frac_urg_bw_nom; + + REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, 0, + DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->c.frac_urg_bw_nom); + } else if (watermarks->c.frac_urg_bw_nom + < hubbub2->watermarks.c.frac_urg_bw_nom) + wm_pending = true; + + if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub2->watermarks.c.urgent_latency_ns) { + hubbub2->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns; + prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns, + refclk_mhz, 0x3fff); + REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0, + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value); + } else if (watermarks->c.urgent_latency_ns < hubbub2->watermarks.c.urgent_latency_ns) + wm_pending = true; + + /* clock state D */ + if (safe_to_lower || watermarks->d.urgent_ns > hubbub2->watermarks.d.urgent_ns) { + hubbub2->watermarks.d.urgent_ns = watermarks->d.urgent_ns; + prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns, + refclk_mhz, 0x3fff); + REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value); + + DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->d.urgent_ns, prog_wm_value); + } else if (watermarks->d.urgent_ns < hubbub2->watermarks.d.urgent_ns) + wm_pending = true; + + /* determine the transfer time for a quantity of data for a particular requestor.*/ + if (safe_to_lower || watermarks->d.frac_urg_bw_flip + > hubbub2->watermarks.d.frac_urg_bw_flip) { + hubbub2->watermarks.d.frac_urg_bw_flip = watermarks->d.frac_urg_bw_flip; + + REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, 0, + DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, watermarks->d.frac_urg_bw_flip); + } else if (watermarks->d.frac_urg_bw_flip + < hubbub2->watermarks.d.frac_urg_bw_flip) + wm_pending = true; + + if (safe_to_lower || watermarks->d.frac_urg_bw_nom + > hubbub2->watermarks.d.frac_urg_bw_nom) { + hubbub2->watermarks.d.frac_urg_bw_nom = watermarks->d.frac_urg_bw_nom; + + REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0, + DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->d.frac_urg_bw_nom); + } else if (watermarks->d.frac_urg_bw_nom + < hubbub2->watermarks.d.frac_urg_bw_nom) + wm_pending = true; + + if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub2->watermarks.d.urgent_latency_ns) { + hubbub2->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns; + prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns, + refclk_mhz, 0x3fff); + REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0, + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value); + } else if (watermarks->d.urgent_latency_ns < hubbub2->watermarks.d.urgent_latency_ns) + wm_pending = true; + + return wm_pending; +} + +static bool hubbub31_program_stutter_watermarks( + struct hubbub *hubbub, + union dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + uint32_t prog_wm_value; + bool wm_pending = false; + + /* clock state A */ + if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns + > hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) { + hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = + watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns; + prog_wm_value = convert_and_clamp( + watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, + refclk_mhz, 0xfffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); + } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns + < hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) + wm_pending = true; + + if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns + > hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns) { + hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns = + watermarks->a.cstate_pstate.cstate_exit_ns; + prog_wm_value = convert_and_clamp( + watermarks->a.cstate_pstate.cstate_exit_ns, + refclk_mhz, 0xfffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value); + } else if (watermarks->a.cstate_pstate.cstate_exit_ns + < hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns) + wm_pending = true; + + if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns + > hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns) { + hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns = + watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns; + prog_wm_value = convert_and_clamp( + watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns, + refclk_mhz, 0xfffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, 0, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_A calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns, prog_wm_value); + } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns + < hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns) + wm_pending = true; + + if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_z8_ns + > hubbub2->watermarks.a.cstate_pstate.cstate_exit_z8_ns) { + hubbub2->watermarks.a.cstate_pstate.cstate_exit_z8_ns = + watermarks->a.cstate_pstate.cstate_exit_z8_ns; + prog_wm_value = convert_and_clamp( + watermarks->a.cstate_pstate.cstate_exit_z8_ns, + refclk_mhz, 0xfffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, 0, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_A calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->a.cstate_pstate.cstate_exit_z8_ns, prog_wm_value); + } else if (watermarks->a.cstate_pstate.cstate_exit_z8_ns + < hubbub2->watermarks.a.cstate_pstate.cstate_exit_z8_ns) + wm_pending = true; + + /* clock state B */ + if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns + > hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) { + hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = + watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns; + prog_wm_value = convert_and_clamp( + watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, + refclk_mhz, 0xfffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); + } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns + < hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) + wm_pending = true; + + if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns + > hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns) { + hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns = + watermarks->b.cstate_pstate.cstate_exit_ns; + prog_wm_value = convert_and_clamp( + watermarks->b.cstate_pstate.cstate_exit_ns, + refclk_mhz, 0xfffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value); + } else if (watermarks->b.cstate_pstate.cstate_exit_ns + < hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns) + wm_pending = true; + + if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns + > hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns) { + hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns = + watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns; + prog_wm_value = convert_and_clamp( + watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns, + refclk_mhz, 0xfffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, 0, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_B calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns, prog_wm_value); + } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns + < hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns) + wm_pending = true; + + if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_z8_ns + > hubbub2->watermarks.b.cstate_pstate.cstate_exit_z8_ns) { + hubbub2->watermarks.b.cstate_pstate.cstate_exit_z8_ns = + watermarks->b.cstate_pstate.cstate_exit_z8_ns; + prog_wm_value = convert_and_clamp( + watermarks->b.cstate_pstate.cstate_exit_z8_ns, + refclk_mhz, 0xfffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, 0, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_B calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->b.cstate_pstate.cstate_exit_z8_ns, prog_wm_value); + } else if (watermarks->b.cstate_pstate.cstate_exit_z8_ns + < hubbub2->watermarks.b.cstate_pstate.cstate_exit_z8_ns) + wm_pending = true; + + /* clock state C */ + if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns + > hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) { + hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = + watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns; + prog_wm_value = convert_and_clamp( + watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, + refclk_mhz, 0xfffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); + } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns + < hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) + wm_pending = true; + + if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns + > hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns) { + hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns = + watermarks->c.cstate_pstate.cstate_exit_ns; + prog_wm_value = convert_and_clamp( + watermarks->c.cstate_pstate.cstate_exit_ns, + refclk_mhz, 0xfffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value); + } else if (watermarks->c.cstate_pstate.cstate_exit_ns + < hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns) + wm_pending = true; + + if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns + > hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns) { + hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns = + watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns; + prog_wm_value = convert_and_clamp( + watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns, + refclk_mhz, 0xfffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, 0, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_C calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns, prog_wm_value); + } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns + < hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns) + wm_pending = true; + + if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_z8_ns + > hubbub2->watermarks.c.cstate_pstate.cstate_exit_z8_ns) { + hubbub2->watermarks.c.cstate_pstate.cstate_exit_z8_ns = + watermarks->c.cstate_pstate.cstate_exit_z8_ns; + prog_wm_value = convert_and_clamp( + watermarks->c.cstate_pstate.cstate_exit_z8_ns, + refclk_mhz, 0xfffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, 0, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_C calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->c.cstate_pstate.cstate_exit_z8_ns, prog_wm_value); + } else if (watermarks->c.cstate_pstate.cstate_exit_z8_ns + < hubbub2->watermarks.c.cstate_pstate.cstate_exit_z8_ns) + wm_pending = true; + + /* clock state D */ + if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns + > hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) { + hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = + watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns; + prog_wm_value = convert_and_clamp( + watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, + refclk_mhz, 0xfffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); + } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns + < hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) + wm_pending = true; + + if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns + > hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns) { + hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns = + watermarks->d.cstate_pstate.cstate_exit_ns; + prog_wm_value = convert_and_clamp( + watermarks->d.cstate_pstate.cstate_exit_ns, + refclk_mhz, 0xfffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value); + } else if (watermarks->d.cstate_pstate.cstate_exit_ns + < hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns) + wm_pending = true; + + if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns + > hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns) { + hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns = + watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns; + prog_wm_value = convert_and_clamp( + watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns, + refclk_mhz, 0xfffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, 0, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_D calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns, prog_wm_value); + } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns + < hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns) + wm_pending = true; + + if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_z8_ns + > hubbub2->watermarks.d.cstate_pstate.cstate_exit_z8_ns) { + hubbub2->watermarks.d.cstate_pstate.cstate_exit_z8_ns = + watermarks->d.cstate_pstate.cstate_exit_z8_ns; + prog_wm_value = convert_and_clamp( + watermarks->d.cstate_pstate.cstate_exit_z8_ns, + refclk_mhz, 0xfffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, 0, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_D calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->d.cstate_pstate.cstate_exit_z8_ns, prog_wm_value); + } else if (watermarks->d.cstate_pstate.cstate_exit_z8_ns + < hubbub2->watermarks.d.cstate_pstate.cstate_exit_z8_ns) + wm_pending = true; + + return wm_pending; +} + +static bool hubbub31_program_pstate_watermarks( + struct hubbub *hubbub, + union dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + uint32_t prog_wm_value; + + bool wm_pending = false; + + /* clock state A */ + if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns + > hubbub2->watermarks.a.cstate_pstate.pstate_change_ns) { + hubbub2->watermarks.a.cstate_pstate.pstate_change_ns = + watermarks->a.cstate_pstate.pstate_change_ns; + prog_wm_value = convert_and_clamp( + watermarks->a.cstate_pstate.pstate_change_ns, + refclk_mhz, 0xffff); + REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0, + DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n" + "HW register value = 0x%x\n\n", + watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value); + } else if (watermarks->a.cstate_pstate.pstate_change_ns + < hubbub2->watermarks.a.cstate_pstate.pstate_change_ns) + wm_pending = true; + + /* clock state B */ + if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns + > hubbub2->watermarks.b.cstate_pstate.pstate_change_ns) { + hubbub2->watermarks.b.cstate_pstate.pstate_change_ns = + watermarks->b.cstate_pstate.pstate_change_ns; + prog_wm_value = convert_and_clamp( + watermarks->b.cstate_pstate.pstate_change_ns, + refclk_mhz, 0xffff); + REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0, + DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n" + "HW register value = 0x%x\n\n", + watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value); + } else if (watermarks->b.cstate_pstate.pstate_change_ns + < hubbub2->watermarks.b.cstate_pstate.pstate_change_ns) + wm_pending = false; + + /* clock state C */ + if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns + > hubbub2->watermarks.c.cstate_pstate.pstate_change_ns) { + hubbub2->watermarks.c.cstate_pstate.pstate_change_ns = + watermarks->c.cstate_pstate.pstate_change_ns; + prog_wm_value = convert_and_clamp( + watermarks->c.cstate_pstate.pstate_change_ns, + refclk_mhz, 0xffff); + REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0, + DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n" + "HW register value = 0x%x\n\n", + watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value); + } else if (watermarks->c.cstate_pstate.pstate_change_ns + < hubbub2->watermarks.c.cstate_pstate.pstate_change_ns) + wm_pending = true; + + /* clock state D */ + if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns + > hubbub2->watermarks.d.cstate_pstate.pstate_change_ns) { + hubbub2->watermarks.d.cstate_pstate.pstate_change_ns = + watermarks->d.cstate_pstate.pstate_change_ns; + prog_wm_value = convert_and_clamp( + watermarks->d.cstate_pstate.pstate_change_ns, + refclk_mhz, 0xffff); + REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0, + DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n" + "HW register value = 0x%x\n\n", + watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value); + } else if (watermarks->d.cstate_pstate.pstate_change_ns + < hubbub2->watermarks.d.cstate_pstate.pstate_change_ns) + wm_pending = true; + + return wm_pending; +} + +static bool hubbub31_program_watermarks( + struct hubbub *hubbub, + union dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower) +{ + bool wm_pending = false; + + if (hubbub31_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) + wm_pending = true; + + if (hubbub31_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) + wm_pending = true; + + if (hubbub31_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) + wm_pending = true; + + /* + * The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric. + * If the memory controller is fully utilized and the DCHub requestors are + * well ahead of their amortized schedule, then it is safe to prevent the next winner + * from being committed and sent to the fabric. + * The utilization of the memory controller is approximated by ensuring that + * the number of outstanding requests is greater than a threshold specified + * by the ARB_MIN_REQ_OUTSTANDING. To determine that the DCHub requestors are well ahead of the amortized schedule, + * the slack of the next winner is compared with the ARB_SAT_LEVEL in DLG RefClk cycles. + * + * TODO: Revisit request limit after figure out right number. request limit for RM isn't decided yet, set maximum value (0x1FF) + * to turn off it for now. + */ + /*REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0, + DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz); + REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, + DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);*/ + + hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter); + return wm_pending; +} + +static void hubbub3_get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height, + unsigned int bytes_per_element) +{ + /* copied from DML. might want to refactor DML to leverage from DML */ + /* DML : get_blk256_size */ + if (bytes_per_element == 1) { + *blk256_width = 16; + *blk256_height = 16; + } else if (bytes_per_element == 2) { + *blk256_width = 16; + *blk256_height = 8; + } else if (bytes_per_element == 4) { + *blk256_width = 8; + *blk256_height = 8; + } else if (bytes_per_element == 8) { + *blk256_width = 8; + *blk256_height = 4; + } +} + +static void hubbub31_det_request_size( + unsigned int detile_buf_size, + unsigned int height, + unsigned int width, + unsigned int bpe, + bool *req128_horz_wc, + bool *req128_vert_wc) +{ + unsigned int blk256_height = 0; + unsigned int blk256_width = 0; + unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc; + + hubbub3_get_blk256_size(&blk256_width, &blk256_height, bpe); + + swath_bytes_horz_wc = width * blk256_height * bpe; + swath_bytes_vert_wc = height * blk256_width * bpe; + + *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ? + false : /* full 256B request */ + true; /* half 128b request */ + + *req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ? + false : /* full 256B request */ + true; /* half 128b request */ +} + +static bool hubbub31_get_dcc_compression_cap(struct hubbub *hubbub, + const struct dc_dcc_surface_param *input, + struct dc_surface_dcc_cap *output) +{ + struct dc *dc = hubbub->ctx->dc; + enum dcc_control dcc_control; + unsigned int bpe; + enum segment_order segment_order_horz, segment_order_vert; + bool req128_horz_wc, req128_vert_wc; + + memset(output, 0, sizeof(*output)); + + if (dc->debug.disable_dcc == DCC_DISABLE) + return false; + + if (!hubbub->funcs->dcc_support_pixel_format(input->format, + &bpe)) + return false; + + if (!hubbub->funcs->dcc_support_swizzle(input->swizzle_mode, bpe, + &segment_order_horz, &segment_order_vert)) + return false; + + hubbub31_det_request_size(TO_DCN20_HUBBUB(hubbub)->detile_buf_size, + input->surface_size.height, input->surface_size.width, + bpe, &req128_horz_wc, &req128_vert_wc); + + if (!req128_horz_wc && !req128_vert_wc) { + dcc_control = dcc_control__256_256_xxx; + } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) { + if (!req128_horz_wc) + dcc_control = dcc_control__256_256_xxx; + else if (segment_order_horz == segment_order__contiguous) + dcc_control = dcc_control__128_128_xxx; + else + dcc_control = dcc_control__256_64_64; + } else if (input->scan == SCAN_DIRECTION_VERTICAL) { + if (!req128_vert_wc) + dcc_control = dcc_control__256_256_xxx; + else if (segment_order_vert == segment_order__contiguous) + dcc_control = dcc_control__128_128_xxx; + else + dcc_control = dcc_control__256_64_64; + } else { + if ((req128_horz_wc && + segment_order_horz == segment_order__non_contiguous) || + (req128_vert_wc && + segment_order_vert == segment_order__non_contiguous)) + /* access_dir not known, must use most constraining */ + dcc_control = dcc_control__256_64_64; + else + /* reg128 is true for either horz and vert + * but segment_order is contiguous + */ + dcc_control = dcc_control__128_128_xxx; + } + + /* Exception for 64KB_R_X */ + if ((bpe == 2) && (input->swizzle_mode == DC_SW_64KB_R_X)) + dcc_control = dcc_control__128_128_xxx; + + if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE && + dcc_control != dcc_control__256_256_xxx) + return false; + + switch (dcc_control) { + case dcc_control__256_256_xxx: + output->grph.rgb.max_uncompressed_blk_size = 256; + output->grph.rgb.max_compressed_blk_size = 256; + output->grph.rgb.independent_64b_blks = false; + output->grph.rgb.dcc_controls.dcc_256_256_unconstrained = 1; + output->grph.rgb.dcc_controls.dcc_256_128_128 = 1; + break; + case dcc_control__128_128_xxx: + output->grph.rgb.max_uncompressed_blk_size = 128; + output->grph.rgb.max_compressed_blk_size = 128; + output->grph.rgb.independent_64b_blks = false; + output->grph.rgb.dcc_controls.dcc_128_128_uncontrained = 1; + output->grph.rgb.dcc_controls.dcc_256_128_128 = 1; + break; + case dcc_control__256_64_64: + output->grph.rgb.max_uncompressed_blk_size = 256; + output->grph.rgb.max_compressed_blk_size = 64; + output->grph.rgb.independent_64b_blks = true; + output->grph.rgb.dcc_controls.dcc_256_64_64 = 1; + break; + case dcc_control__256_128_128: + output->grph.rgb.max_uncompressed_blk_size = 256; + output->grph.rgb.max_compressed_blk_size = 128; + output->grph.rgb.independent_64b_blks = false; + output->grph.rgb.dcc_controls.dcc_256_128_128 = 1; + break; + } + output->capable = true; + output->const_color_support = true; + + return true; +} + +int hubbub31_init_dchub_sys_ctx(struct hubbub *hubbub, + struct dcn_hubbub_phys_addr_config *pa_config) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + struct dcn_vmid_page_table_config phys_config; + + REG_SET(DCN_VM_FB_LOCATION_BASE, 0, + FB_BASE, pa_config->system_aperture.fb_base >> 24); + REG_SET(DCN_VM_FB_LOCATION_TOP, 0, + FB_TOP, pa_config->system_aperture.fb_top >> 24); + REG_SET(DCN_VM_FB_OFFSET, 0, + FB_OFFSET, pa_config->system_aperture.fb_offset >> 24); + REG_SET(DCN_VM_AGP_BOT, 0, + AGP_BOT, pa_config->system_aperture.agp_bot >> 24); + REG_SET(DCN_VM_AGP_TOP, 0, + AGP_TOP, pa_config->system_aperture.agp_top >> 24); + REG_SET(DCN_VM_AGP_BASE, 0, + AGP_BASE, pa_config->system_aperture.agp_base >> 24); + + if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) { + phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12; + phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12; + phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr; + phys_config.depth = 0; + phys_config.block_size = 0; + // Init VMID 0 based on PA config + dcn20_vmid_setup(&hubbub2->vmid[0], &phys_config); + + dcn20_vmid_setup(&hubbub2->vmid[15], &phys_config); + } + + dcn21_dchvm_init(hubbub); + + return NUM_VMID; +} + +static void hubbub31_get_dchub_ref_freq(struct hubbub *hubbub, + unsigned int dccg_ref_freq_inKhz, + unsigned int *dchub_ref_freq_inKhz) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + uint32_t ref_div = 0; + uint32_t ref_en = 0; + unsigned int dc_refclk_khz = 24000; + + REG_GET_2(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, &ref_div, + DCHUBBUB_GLOBAL_TIMER_ENABLE, &ref_en); + + if (ref_en) { + if (ref_div == 2) + *dchub_ref_freq_inKhz = dc_refclk_khz / 2; + else + *dchub_ref_freq_inKhz = dc_refclk_khz; + + /* + * The external Reference Clock may change based on the board or + * platform requirements and the programmable integer divide must + * be programmed to provide a suitable DLG RefClk frequency between + * a minimum of 20MHz and maximum of 50MHz + */ + if (*dchub_ref_freq_inKhz < 20000 || *dchub_ref_freq_inKhz > 50000) + ASSERT_CRITICAL(false); + + return; + } else { + *dchub_ref_freq_inKhz = dc_refclk_khz; + + // HUBBUB global timer must be enabled. + ASSERT_CRITICAL(false); + return; + } +} + +static bool hubbub31_verify_allow_pstate_change_high(struct hubbub *hubbub) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + + /* + * Pstate latency is ~20us so if we wait over 40us and pstate allow + * still not asserted, we are probably stuck and going to hang + */ + const unsigned int pstate_wait_timeout_us = 100; + const unsigned int pstate_wait_expected_timeout_us = 40; + + static unsigned int max_sampled_pstate_wait_us; /* data collection */ + static bool forced_pstate_allow; /* help with revert wa */ + + unsigned int debug_data = 0; + unsigned int i; + + if (forced_pstate_allow) { + /* we hacked to force pstate allow to prevent hang last time + * we verify_allow_pstate_change_high. so disable force + * here so we can check status + */ + REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL, + DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 0, + DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 0); + forced_pstate_allow = false; + } + + REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, hubbub2->debug_test_index_pstate); + + for (i = 0; i < pstate_wait_timeout_us; i++) { + debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA); + + /* Debug bit is specific to ASIC. */ + if (debug_data & (1 << 26)) { + if (i > pstate_wait_expected_timeout_us) + DC_LOG_WARNING("pstate took longer than expected ~%dus\n", i); + return true; + } + if (max_sampled_pstate_wait_us < i) + max_sampled_pstate_wait_us = i; + + udelay(1); + } + + /* force pstate allow to prevent system hang + * and break to debugger to investigate + */ + REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL, + DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 1, + DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 1); + forced_pstate_allow = true; + + DC_LOG_WARNING("pstate TEST_DEBUG_DATA: 0x%X\n", + debug_data); + + return false; +} + +void hubbub31_init(struct hubbub *hubbub) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + + /*Enable clock gate*/ + if (hubbub->ctx->dc->debug.disable_clock_gate) { + /*done in hwseq*/ + /*REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);*/ + REG_UPDATE_2(DCHUBBUB_CLOCK_CNTL, + DISPCLK_R_DCHUBBUB_GATE_DIS, 1, + DCFCLK_R_DCHUBBUB_GATE_DIS, 1); + } + + /* + only the DCN will determine when to connect the SDP port + */ + REG_UPDATE(DCHUBBUB_SDPIF_CFG0, SDPIF_PORT_CONTROL, 1); +} +static const struct hubbub_funcs hubbub31_funcs = { + .update_dchub = hubbub2_update_dchub, + .init_dchub_sys_ctx = hubbub31_init_dchub_sys_ctx, + .init_vm_ctx = hubbub2_init_vm_ctx, + .dcc_support_swizzle = hubbub3_dcc_support_swizzle, + .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format, + .get_dcc_compression_cap = hubbub31_get_dcc_compression_cap, + .wm_read_state = hubbub21_wm_read_state, + .get_dchub_ref_freq = hubbub31_get_dchub_ref_freq, + .program_watermarks = hubbub31_program_watermarks, + .allow_self_refresh_control = hubbub1_allow_self_refresh_control, + .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, + .verify_allow_pstate_change_high = hubbub31_verify_allow_pstate_change_high, + .program_det_size = dcn31_program_det_size, + .wait_for_det_apply = dcn31_wait_for_det_apply, + .program_compbuf_size = dcn31_program_compbuf_size, + .init_crb = dcn31_init_crb, + .hubbub_read_state = hubbub2_read_state, +}; + +void hubbub31_construct(struct dcn20_hubbub *hubbub31, + struct dc_context *ctx, + const struct dcn_hubbub_registers *hubbub_regs, + const struct dcn_hubbub_shift *hubbub_shift, + const struct dcn_hubbub_mask *hubbub_mask, + int det_size_kb, + int pixel_chunk_size_kb, + int config_return_buffer_size_kb) +{ + + hubbub3_construct(hubbub31, ctx, hubbub_regs, hubbub_shift, hubbub_mask); + hubbub31->base.funcs = &hubbub31_funcs; + hubbub31->detile_buf_size = det_size_kb * 1024; + hubbub31->pixel_chunk_size = pixel_chunk_size_kb * 1024; + hubbub31->crb_size_segs = config_return_buffer_size_kb / DCN31_CRB_SEGMENT_SIZE_KB; + + hubbub31->debug_test_index_pstate = 0x6; +} + diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.h new file mode 100644 index 000000000000..89d6208287b5 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.h @@ -0,0 +1,147 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_HUBBUB_DCN31_H__ +#define __DC_HUBBUB_DCN31_H__ + +#include "dcn21/dcn21_hubbub.h" + +#define HUBBUB_REG_LIST_DCN31(id)\ + HUBBUB_REG_LIST_DCN30(id),\ + SR(DCHVM_CTRL0),\ + SR(DCHVM_MEM_CTRL),\ + SR(DCHVM_CLK_CTRL),\ + SR(DCHVM_RIOMMU_CTRL0),\ + SR(DCHVM_RIOMMU_STAT0),\ + SR(DCHUBBUB_DET0_CTRL),\ + SR(DCHUBBUB_DET1_CTRL),\ + SR(DCHUBBUB_DET2_CTRL),\ + SR(DCHUBBUB_DET3_CTRL),\ + SR(DCHUBBUB_COMPBUF_CTRL),\ + SR(COMPBUF_RESERVED_SPACE),\ + SR(DCHUBBUB_DEBUG_CTRL_0),\ + SR(DCHUBBUB_CLOCK_CNTL),\ + SR(DCHUBBUB_SDPIF_CFG0),\ + SR(DCHUBBUB_SDPIF_CFG1),\ + SR(DCHUBBUB_MEM_PWR_MODE_CTRL),\ + SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A),\ + SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A),\ + SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B),\ + SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B),\ + SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C),\ + SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C),\ + SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D),\ + SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D) + +#define HUBBUB_MASK_SH_LIST_DCN31(mask_sh)\ + HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \ + HUBBUB_MASK_SH_LIST_STUTTER(mask_sh), \ + HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ + HUBBUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE, mask_sh), \ + HUBBUB_SF(DCN_VM_FB_LOCATION_TOP, FB_TOP, mask_sh), \ + HUBBUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET, mask_sh), \ + HUBBUB_SF(DCN_VM_AGP_BOT, AGP_BOT, mask_sh), \ + HUBBUB_SF(DCN_VM_AGP_TOP, AGP_TOP, mask_sh), \ + HUBBUB_SF(DCN_VM_AGP_BASE, AGP_BASE, mask_sh), \ + HUBBUB_SF(DCHVM_CTRL0, HOSTVM_INIT_REQ, mask_sh),\ + HUBBUB_SF(DCHVM_MEM_CTRL, HVM_GPUVMRET_PWR_REQ_DIS, mask_sh),\ + HUBBUB_SF(DCHVM_MEM_CTRL, HVM_GPUVMRET_FORCE_REQ, mask_sh),\ + HUBBUB_SF(DCHVM_MEM_CTRL, HVM_GPUVMRET_POWER_STATUS, mask_sh),\ + HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DISPCLK_R_GATE_DIS, mask_sh),\ + HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DISPCLK_G_GATE_DIS, mask_sh),\ + HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DCFCLK_R_GATE_DIS, mask_sh),\ + HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DCFCLK_G_GATE_DIS, mask_sh),\ + HUBBUB_SF(DCHVM_CLK_CTRL, TR_REQ_REQCLKREQ_MODE, mask_sh),\ + HUBBUB_SF(DCHVM_CLK_CTRL, TW_RSP_COMPCLKREQ_MODE, mask_sh),\ + HUBBUB_SF(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, mask_sh),\ + HUBBUB_SF(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, mask_sh),\ + HUBBUB_SF(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, mask_sh),\ + HUBBUB_SF(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, mask_sh),\ + HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, mask_sh),\ + HUBBUB_SF(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, mask_sh),\ + HUBBUB_SF(DCHUBBUB_DET0_CTRL, DET0_SIZE, mask_sh),\ + HUBBUB_SF(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, mask_sh),\ + HUBBUB_SF(DCHUBBUB_DET1_CTRL, DET1_SIZE, mask_sh),\ + HUBBUB_SF(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, mask_sh),\ + HUBBUB_SF(DCHUBBUB_DET2_CTRL, DET2_SIZE, mask_sh),\ + HUBBUB_SF(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, mask_sh),\ + HUBBUB_SF(DCHUBBUB_DET3_CTRL, DET3_SIZE, mask_sh),\ + HUBBUB_SF(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, mask_sh),\ + HUBBUB_SF(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE, mask_sh),\ + HUBBUB_SF(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE_CURRENT, mask_sh),\ + HUBBUB_SF(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR, mask_sh),\ + HUBBUB_SF(COMPBUF_RESERVED_SPACE, COMPBUF_RESERVED_SPACE_64B, mask_sh),\ + HUBBUB_SF(COMPBUF_RESERVED_SPACE, COMPBUF_RESERVED_SPACE_ZS, mask_sh),\ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_ADDR_MSB, DCN_VM_FAULT_ADDR_MSB, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_ADDR_LSB, DCN_VM_FAULT_ADDR_LSB, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_CLEAR, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_MODE, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_INTERRUPT_ENABLE, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_RANGE_FAULT_DISABLE, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_PRQ_FAULT_DISABLE, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_STATUS, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_TABLE_LEVEL, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh),\ + HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DISPCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\ + HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DCFCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\ + HUBBUB_SF(DCHUBBUB_SDPIF_CFG0, SDPIF_PORT_CONTROL, mask_sh),\ + HUBBUB_SF(DCHUBBUB_MEM_PWR_MODE_CTRL, DET_MEM_PWR_LS_MODE, mask_sh) + +int hubbub31_init_dchub_sys_ctx(struct hubbub *hubbub, + struct dcn_hubbub_phys_addr_config *pa_config); + +void hubbub31_init(struct hubbub *hubbub); + +void hubbub31_construct(struct dcn20_hubbub *hubbub3, + struct dc_context *ctx, + const struct dcn_hubbub_registers *hubbub_regs, + const struct dcn_hubbub_shift *hubbub_shift, + const struct dcn_hubbub_mask *hubbub_mask, + int det_size_kb, + int pixel_chunk_size_kb, + int config_return_buffer_size_kb); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c new file mode 100644 index 000000000000..5264dc26cce1 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c @@ -0,0 +1,1032 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#include "dcn30/dcn30_hubbub.h" +#include "dcn32_hubbub.h" +#include "dm_services.h" +#include "reg_helper.h" + + +#define CTX \ + hubbub2->base.ctx +#define DC_LOGGER \ + hubbub2->base.ctx->logger +#define REG(reg)\ + hubbub2->regs->reg + +#undef FN +#define FN(reg_name, field_name) \ + hubbub2->shifts->field_name, hubbub2->masks->field_name + +/** + * DCN32_CRB_SEGMENT_SIZE_KB: Maximum Configurable Return Buffer size for + * DCN32 + */ +#define DCN32_CRB_SEGMENT_SIZE_KB 64 + +static void dcn32_init_crb(struct hubbub *hubbub) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + + REG_GET(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, + &hubbub2->det0_size); + + REG_GET(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, + &hubbub2->det1_size); + + REG_GET(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, + &hubbub2->det2_size); + + REG_GET(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, + &hubbub2->det3_size); + + REG_GET(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE_CURRENT, + &hubbub2->compbuf_size_segments); + + REG_SET_2(COMPBUF_RESERVED_SPACE, 0, + COMPBUF_RESERVED_SPACE_64B, hubbub2->pixel_chunk_size / 32, + COMPBUF_RESERVED_SPACE_ZS, hubbub2->pixel_chunk_size / 128); + REG_UPDATE(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, 0x47F); +} + +void hubbub32_set_request_limit(struct hubbub *hubbub, int memory_channel_count, int words_per_channel) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + + uint32_t request_limit = 3 * memory_channel_count * words_per_channel / 4; + + ASSERT((request_limit & (~0xFFF)) == 0); //field is only 24 bits long + ASSERT(request_limit > 0); //field is only 24 bits long + + if (request_limit > 0xFFF) + request_limit = 0xFFF; + + if (request_limit > 0) + REG_UPDATE(SDPIF_REQUEST_RATE_LIMIT, SDPIF_REQUEST_RATE_LIMIT, request_limit); +} + + +void dcn32_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigned int det_buffer_size_in_kbyte) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + + unsigned int det_size_segments = (det_buffer_size_in_kbyte + DCN32_CRB_SEGMENT_SIZE_KB - 1) / DCN32_CRB_SEGMENT_SIZE_KB; + + switch (hubp_inst) { + case 0: + REG_UPDATE(DCHUBBUB_DET0_CTRL, + DET0_SIZE, det_size_segments); + hubbub2->det0_size = det_size_segments; + break; + case 1: + REG_UPDATE(DCHUBBUB_DET1_CTRL, + DET1_SIZE, det_size_segments); + hubbub2->det1_size = det_size_segments; + break; + case 2: + REG_UPDATE(DCHUBBUB_DET2_CTRL, + DET2_SIZE, det_size_segments); + hubbub2->det2_size = det_size_segments; + break; + case 3: + REG_UPDATE(DCHUBBUB_DET3_CTRL, + DET3_SIZE, det_size_segments); + hubbub2->det3_size = det_size_segments; + break; + default: + break; + } + if (hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size + + hubbub2->det3_size + hubbub2->compbuf_size_segments > hubbub2->crb_size_segs) { + /* This may happen during seamless transition from ODM 2:1 to ODM4:1 */ + DC_LOG_WARNING("CRB Config Warning: DET size (%d,%d,%d,%d) + Compbuf size (%d) > CRB segments (%d)\n", + hubbub2->det0_size, hubbub2->det1_size, hubbub2->det2_size, hubbub2->det3_size, + hubbub2->compbuf_size_segments, hubbub2->crb_size_segs); + } +} + +void dcn32_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + unsigned int compbuf_size_segments = (compbuf_size_kb + DCN32_CRB_SEGMENT_SIZE_KB - 1) / DCN32_CRB_SEGMENT_SIZE_KB; + + if (safe_to_increase || compbuf_size_segments <= hubbub2->compbuf_size_segments) { + if (compbuf_size_segments > hubbub2->compbuf_size_segments) { + REG_WAIT(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, hubbub2->det0_size, 1, 100); + REG_WAIT(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, hubbub2->det1_size, 1, 100); + REG_WAIT(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, hubbub2->det2_size, 1, 100); + REG_WAIT(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, hubbub2->det3_size, 1, 100); + } + /* Should never be hit, if it is we have an erroneous hw config*/ + ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size + + hubbub2->det3_size + compbuf_size_segments <= hubbub2->crb_size_segs); + REG_UPDATE(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE, compbuf_size_segments); + hubbub2->compbuf_size_segments = compbuf_size_segments; + ASSERT(REG_GET(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR, &compbuf_size_segments) && !compbuf_size_segments); + } +} + +static uint32_t convert_and_clamp( + uint32_t wm_ns, + uint32_t refclk_mhz, + uint32_t clamp_value) +{ + uint32_t ret_val = 0; + ret_val = wm_ns * refclk_mhz; + + ret_val /= 1000; + + if (ret_val > clamp_value) + ret_val = clamp_value; + + return ret_val; +} + +bool hubbub32_program_urgent_watermarks( + struct hubbub *hubbub, + union dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + uint32_t prog_wm_value; + bool wm_pending = false; + + /* Repeat for water mark set A, B, C and D. */ + /* clock state A */ + if (safe_to_lower || watermarks->a.urgent_ns > hubbub2->watermarks.a.urgent_ns) { + hubbub2->watermarks.a.urgent_ns = watermarks->a.urgent_ns; + prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns, + refclk_mhz, 0x3fff); + REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); + + DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->a.urgent_ns, prog_wm_value); + } else if (watermarks->a.urgent_ns < hubbub2->watermarks.a.urgent_ns) + wm_pending = true; + + /* determine the transfer time for a quantity of data for a particular requestor.*/ + if (safe_to_lower || watermarks->a.frac_urg_bw_flip + > hubbub2->watermarks.a.frac_urg_bw_flip) { + hubbub2->watermarks.a.frac_urg_bw_flip = watermarks->a.frac_urg_bw_flip; + + REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0, + DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->a.frac_urg_bw_flip); + } else if (watermarks->a.frac_urg_bw_flip + < hubbub2->watermarks.a.frac_urg_bw_flip) + wm_pending = true; + + if (safe_to_lower || watermarks->a.frac_urg_bw_nom + > hubbub2->watermarks.a.frac_urg_bw_nom) { + hubbub2->watermarks.a.frac_urg_bw_nom = watermarks->a.frac_urg_bw_nom; + + REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0, + DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom); + } else if (watermarks->a.frac_urg_bw_nom + < hubbub2->watermarks.a.frac_urg_bw_nom) + wm_pending = true; + + if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub2->watermarks.a.urgent_latency_ns) { + hubbub2->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns; + prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns, + refclk_mhz, 0x3fff); + REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0, + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value); + } else if (watermarks->a.urgent_latency_ns < hubbub2->watermarks.a.urgent_latency_ns) + wm_pending = true; + + /* clock state B */ + if (safe_to_lower || watermarks->b.urgent_ns > hubbub2->watermarks.b.urgent_ns) { + hubbub2->watermarks.b.urgent_ns = watermarks->b.urgent_ns; + prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns, + refclk_mhz, 0x3fff); + REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value); + + DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->b.urgent_ns, prog_wm_value); + } else if (watermarks->b.urgent_ns < hubbub2->watermarks.b.urgent_ns) + wm_pending = true; + + /* determine the transfer time for a quantity of data for a particular requestor.*/ + if (safe_to_lower || watermarks->b.frac_urg_bw_flip + > hubbub2->watermarks.b.frac_urg_bw_flip) { + hubbub2->watermarks.b.frac_urg_bw_flip = watermarks->b.frac_urg_bw_flip; + + REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0, + DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->b.frac_urg_bw_flip); + } else if (watermarks->b.frac_urg_bw_flip + < hubbub2->watermarks.b.frac_urg_bw_flip) + wm_pending = true; + + if (safe_to_lower || watermarks->b.frac_urg_bw_nom + > hubbub2->watermarks.b.frac_urg_bw_nom) { + hubbub2->watermarks.b.frac_urg_bw_nom = watermarks->b.frac_urg_bw_nom; + + REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0, + DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->b.frac_urg_bw_nom); + } else if (watermarks->b.frac_urg_bw_nom + < hubbub2->watermarks.b.frac_urg_bw_nom) + wm_pending = true; + + if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub2->watermarks.b.urgent_latency_ns) { + hubbub2->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns; + prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns, + refclk_mhz, 0x3fff); + REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0, + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value); + } else if (watermarks->b.urgent_latency_ns < hubbub2->watermarks.b.urgent_latency_ns) + wm_pending = true; + + /* clock state C */ + if (safe_to_lower || watermarks->c.urgent_ns > hubbub2->watermarks.c.urgent_ns) { + hubbub2->watermarks.c.urgent_ns = watermarks->c.urgent_ns; + prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns, + refclk_mhz, 0x3fff); + REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value); + + DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->c.urgent_ns, prog_wm_value); + } else if (watermarks->c.urgent_ns < hubbub2->watermarks.c.urgent_ns) + wm_pending = true; + + /* determine the transfer time for a quantity of data for a particular requestor.*/ + if (safe_to_lower || watermarks->c.frac_urg_bw_flip + > hubbub2->watermarks.c.frac_urg_bw_flip) { + hubbub2->watermarks.c.frac_urg_bw_flip = watermarks->c.frac_urg_bw_flip; + + REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, 0, + DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, watermarks->c.frac_urg_bw_flip); + } else if (watermarks->c.frac_urg_bw_flip + < hubbub2->watermarks.c.frac_urg_bw_flip) + wm_pending = true; + + if (safe_to_lower || watermarks->c.frac_urg_bw_nom + > hubbub2->watermarks.c.frac_urg_bw_nom) { + hubbub2->watermarks.c.frac_urg_bw_nom = watermarks->c.frac_urg_bw_nom; + + REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, 0, + DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->c.frac_urg_bw_nom); + } else if (watermarks->c.frac_urg_bw_nom + < hubbub2->watermarks.c.frac_urg_bw_nom) + wm_pending = true; + + if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub2->watermarks.c.urgent_latency_ns) { + hubbub2->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns; + prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns, + refclk_mhz, 0x3fff); + REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0, + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value); + } else if (watermarks->c.urgent_latency_ns < hubbub2->watermarks.c.urgent_latency_ns) + wm_pending = true; + + /* clock state D */ + if (safe_to_lower || watermarks->d.urgent_ns > hubbub2->watermarks.d.urgent_ns) { + hubbub2->watermarks.d.urgent_ns = watermarks->d.urgent_ns; + prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns, + refclk_mhz, 0x3fff); + REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value); + + DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->d.urgent_ns, prog_wm_value); + } else if (watermarks->d.urgent_ns < hubbub2->watermarks.d.urgent_ns) + wm_pending = true; + + /* determine the transfer time for a quantity of data for a particular requestor.*/ + if (safe_to_lower || watermarks->d.frac_urg_bw_flip + > hubbub2->watermarks.d.frac_urg_bw_flip) { + hubbub2->watermarks.d.frac_urg_bw_flip = watermarks->d.frac_urg_bw_flip; + + REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, 0, + DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, watermarks->d.frac_urg_bw_flip); + } else if (watermarks->d.frac_urg_bw_flip + < hubbub2->watermarks.d.frac_urg_bw_flip) + wm_pending = true; + + if (safe_to_lower || watermarks->d.frac_urg_bw_nom + > hubbub2->watermarks.d.frac_urg_bw_nom) { + hubbub2->watermarks.d.frac_urg_bw_nom = watermarks->d.frac_urg_bw_nom; + + REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0, + DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->d.frac_urg_bw_nom); + } else if (watermarks->d.frac_urg_bw_nom + < hubbub2->watermarks.d.frac_urg_bw_nom) + wm_pending = true; + + if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub2->watermarks.d.urgent_latency_ns) { + hubbub2->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns; + prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns, + refclk_mhz, 0x3fff); + REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0, + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value); + } else if (watermarks->d.urgent_latency_ns < hubbub2->watermarks.d.urgent_latency_ns) + wm_pending = true; + + return wm_pending; +} + +bool hubbub32_program_stutter_watermarks( + struct hubbub *hubbub, + union dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + uint32_t prog_wm_value; + bool wm_pending = false; + + /* clock state A */ + if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns + > hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) { + hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = + watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns; + prog_wm_value = convert_and_clamp( + watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, + refclk_mhz, 0xffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); + } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns + < hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns) + wm_pending = true; + + if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns + > hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns) { + hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns = + watermarks->a.cstate_pstate.cstate_exit_ns; + prog_wm_value = convert_and_clamp( + watermarks->a.cstate_pstate.cstate_exit_ns, + refclk_mhz, 0xffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value); + } else if (watermarks->a.cstate_pstate.cstate_exit_ns + < hubbub2->watermarks.a.cstate_pstate.cstate_exit_ns) + wm_pending = true; + + /* clock state B */ + if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns + > hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) { + hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = + watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns; + prog_wm_value = convert_and_clamp( + watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, + refclk_mhz, 0xffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); + } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns + < hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns) + wm_pending = true; + + if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns + > hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns) { + hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns = + watermarks->b.cstate_pstate.cstate_exit_ns; + prog_wm_value = convert_and_clamp( + watermarks->b.cstate_pstate.cstate_exit_ns, + refclk_mhz, 0xffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value); + } else if (watermarks->b.cstate_pstate.cstate_exit_ns + < hubbub2->watermarks.b.cstate_pstate.cstate_exit_ns) + wm_pending = true; + + /* clock state C */ + if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns + > hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) { + hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = + watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns; + prog_wm_value = convert_and_clamp( + watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, + refclk_mhz, 0xffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); + } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns + < hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns) + wm_pending = true; + + if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns + > hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns) { + hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns = + watermarks->c.cstate_pstate.cstate_exit_ns; + prog_wm_value = convert_and_clamp( + watermarks->c.cstate_pstate.cstate_exit_ns, + refclk_mhz, 0xffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value); + } else if (watermarks->c.cstate_pstate.cstate_exit_ns + < hubbub2->watermarks.c.cstate_pstate.cstate_exit_ns) + wm_pending = true; + + /* clock state D */ + if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns + > hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) { + hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = + watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns; + prog_wm_value = convert_and_clamp( + watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, + refclk_mhz, 0xffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value); + } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns + < hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns) + wm_pending = true; + + if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns + > hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns) { + hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns = + watermarks->d.cstate_pstate.cstate_exit_ns; + prog_wm_value = convert_and_clamp( + watermarks->d.cstate_pstate.cstate_exit_ns, + refclk_mhz, 0xffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value); + } else if (watermarks->d.cstate_pstate.cstate_exit_ns + < hubbub2->watermarks.d.cstate_pstate.cstate_exit_ns) + wm_pending = true; + + return wm_pending; +} + + +bool hubbub32_program_pstate_watermarks( + struct hubbub *hubbub, + union dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + uint32_t prog_wm_value; + + bool wm_pending = false; + + /* Section for UCLK_PSTATE_CHANGE_WATERMARKS */ + /* clock state A */ + if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns + > hubbub2->watermarks.a.cstate_pstate.pstate_change_ns) { + hubbub2->watermarks.a.cstate_pstate.pstate_change_ns = + watermarks->a.cstate_pstate.pstate_change_ns; + prog_wm_value = convert_and_clamp( + watermarks->a.cstate_pstate.pstate_change_ns, + refclk_mhz, 0xffff); + REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, 0, + DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n" + "HW register value = 0x%x\n\n", + watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value); + } else if (watermarks->a.cstate_pstate.pstate_change_ns + < hubbub2->watermarks.a.cstate_pstate.pstate_change_ns) + wm_pending = true; + + /* clock state B */ + if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns + > hubbub2->watermarks.b.cstate_pstate.pstate_change_ns) { + hubbub2->watermarks.b.cstate_pstate.pstate_change_ns = + watermarks->b.cstate_pstate.pstate_change_ns; + prog_wm_value = convert_and_clamp( + watermarks->b.cstate_pstate.pstate_change_ns, + refclk_mhz, 0xffff); + REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, 0, + DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n" + "HW register value = 0x%x\n\n", + watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value); + } else if (watermarks->b.cstate_pstate.pstate_change_ns + < hubbub2->watermarks.b.cstate_pstate.pstate_change_ns) + wm_pending = true; + + /* clock state C */ + if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns + > hubbub2->watermarks.c.cstate_pstate.pstate_change_ns) { + hubbub2->watermarks.c.cstate_pstate.pstate_change_ns = + watermarks->c.cstate_pstate.pstate_change_ns; + prog_wm_value = convert_and_clamp( + watermarks->c.cstate_pstate.pstate_change_ns, + refclk_mhz, 0xffff); + REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, 0, + DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n" + "HW register value = 0x%x\n\n", + watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value); + } else if (watermarks->c.cstate_pstate.pstate_change_ns + < hubbub2->watermarks.c.cstate_pstate.pstate_change_ns) + wm_pending = true; + + /* clock state D */ + if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns + > hubbub2->watermarks.d.cstate_pstate.pstate_change_ns) { + hubbub2->watermarks.d.cstate_pstate.pstate_change_ns = + watermarks->d.cstate_pstate.pstate_change_ns; + prog_wm_value = convert_and_clamp( + watermarks->d.cstate_pstate.pstate_change_ns, + refclk_mhz, 0xffff); + REG_SET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, 0, + DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n" + "HW register value = 0x%x\n\n", + watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value); + } else if (watermarks->d.cstate_pstate.pstate_change_ns + < hubbub2->watermarks.d.cstate_pstate.pstate_change_ns) + wm_pending = true; + + /* Section for FCLK_PSTATE_CHANGE_WATERMARKS */ + /* clock state A */ + if (safe_to_lower || watermarks->a.cstate_pstate.fclk_pstate_change_ns + > hubbub2->watermarks.a.cstate_pstate.fclk_pstate_change_ns) { + hubbub2->watermarks.a.cstate_pstate.fclk_pstate_change_ns = + watermarks->a.cstate_pstate.fclk_pstate_change_ns; + prog_wm_value = convert_and_clamp( + watermarks->a.cstate_pstate.fclk_pstate_change_ns, + refclk_mhz, 0xffff); + REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, 0, + DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_A calculated =%d\n" + "HW register value = 0x%x\n\n", + watermarks->a.cstate_pstate.fclk_pstate_change_ns, prog_wm_value); + } else if (watermarks->a.cstate_pstate.fclk_pstate_change_ns + < hubbub2->watermarks.a.cstate_pstate.fclk_pstate_change_ns) + wm_pending = true; + + /* clock state B */ + if (safe_to_lower || watermarks->b.cstate_pstate.fclk_pstate_change_ns + > hubbub2->watermarks.b.cstate_pstate.fclk_pstate_change_ns) { + hubbub2->watermarks.b.cstate_pstate.fclk_pstate_change_ns = + watermarks->b.cstate_pstate.fclk_pstate_change_ns; + prog_wm_value = convert_and_clamp( + watermarks->b.cstate_pstate.fclk_pstate_change_ns, + refclk_mhz, 0xffff); + REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, 0, + DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_B calculated =%d\n" + "HW register value = 0x%x\n\n", + watermarks->b.cstate_pstate.fclk_pstate_change_ns, prog_wm_value); + } else if (watermarks->b.cstate_pstate.fclk_pstate_change_ns + < hubbub2->watermarks.b.cstate_pstate.fclk_pstate_change_ns) + wm_pending = true; + + /* clock state C */ + if (safe_to_lower || watermarks->c.cstate_pstate.fclk_pstate_change_ns + > hubbub2->watermarks.c.cstate_pstate.fclk_pstate_change_ns) { + hubbub2->watermarks.c.cstate_pstate.fclk_pstate_change_ns = + watermarks->c.cstate_pstate.fclk_pstate_change_ns; + prog_wm_value = convert_and_clamp( + watermarks->c.cstate_pstate.fclk_pstate_change_ns, + refclk_mhz, 0xffff); + REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, 0, + DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_C calculated =%d\n" + "HW register value = 0x%x\n\n", + watermarks->c.cstate_pstate.fclk_pstate_change_ns, prog_wm_value); + } else if (watermarks->c.cstate_pstate.fclk_pstate_change_ns + < hubbub2->watermarks.c.cstate_pstate.fclk_pstate_change_ns) + wm_pending = true; + + /* clock state D */ + if (safe_to_lower || watermarks->d.cstate_pstate.fclk_pstate_change_ns + > hubbub2->watermarks.d.cstate_pstate.fclk_pstate_change_ns) { + hubbub2->watermarks.d.cstate_pstate.fclk_pstate_change_ns = + watermarks->d.cstate_pstate.fclk_pstate_change_ns; + prog_wm_value = convert_and_clamp( + watermarks->d.cstate_pstate.fclk_pstate_change_ns, + refclk_mhz, 0xffff); + REG_SET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, 0, + DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("FCLK_CHANGE_WATERMARK_D calculated =%d\n" + "HW register value = 0x%x\n\n", + watermarks->d.cstate_pstate.fclk_pstate_change_ns, prog_wm_value); + } else if (watermarks->d.cstate_pstate.fclk_pstate_change_ns + < hubbub2->watermarks.d.cstate_pstate.fclk_pstate_change_ns) + wm_pending = true; + + return wm_pending; +} + + +bool hubbub32_program_usr_watermarks( + struct hubbub *hubbub, + union dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + uint32_t prog_wm_value; + + bool wm_pending = false; + + /* clock state A */ + if (safe_to_lower || watermarks->a.usr_retraining_ns + > hubbub2->watermarks.a.usr_retraining_ns) { + hubbub2->watermarks.a.usr_retraining_ns = watermarks->a.usr_retraining_ns; + prog_wm_value = convert_and_clamp( + watermarks->a.usr_retraining_ns, + refclk_mhz, 0x3fff); + REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, 0, + DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_A calculated =%d\n" + "HW register value = 0x%x\n\n", + watermarks->a.usr_retraining_ns, prog_wm_value); + } else if (watermarks->a.usr_retraining_ns + < hubbub2->watermarks.a.usr_retraining_ns) + wm_pending = true; + + /* clock state B */ + if (safe_to_lower || watermarks->b.usr_retraining_ns + > hubbub2->watermarks.b.usr_retraining_ns) { + hubbub2->watermarks.b.usr_retraining_ns = watermarks->b.usr_retraining_ns; + prog_wm_value = convert_and_clamp( + watermarks->b.usr_retraining_ns, + refclk_mhz, 0x3fff); + REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, 0, + DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_B calculated =%d\n" + "HW register value = 0x%x\n\n", + watermarks->b.usr_retraining_ns, prog_wm_value); + } else if (watermarks->b.usr_retraining_ns + < hubbub2->watermarks.b.usr_retraining_ns) + wm_pending = true; + + /* clock state C */ + if (safe_to_lower || watermarks->c.usr_retraining_ns + > hubbub2->watermarks.c.usr_retraining_ns) { + hubbub2->watermarks.c.usr_retraining_ns = + watermarks->c.usr_retraining_ns; + prog_wm_value = convert_and_clamp( + watermarks->c.usr_retraining_ns, + refclk_mhz, 0x3fff); + REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, 0, + DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_C calculated =%d\n" + "HW register value = 0x%x\n\n", + watermarks->c.usr_retraining_ns, prog_wm_value); + } else if (watermarks->c.usr_retraining_ns + < hubbub2->watermarks.c.usr_retraining_ns) + wm_pending = true; + + /* clock state D */ + if (safe_to_lower || watermarks->d.usr_retraining_ns + > hubbub2->watermarks.d.usr_retraining_ns) { + hubbub2->watermarks.d.usr_retraining_ns = + watermarks->d.usr_retraining_ns; + prog_wm_value = convert_and_clamp( + watermarks->d.usr_retraining_ns, + refclk_mhz, 0x3fff); + REG_SET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, 0, + DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("USR_RETRAINING_WATERMARK_D calculated =%d\n" + "HW register value = 0x%x\n\n", + watermarks->d.usr_retraining_ns, prog_wm_value); + } else if (watermarks->d.usr_retraining_ns + < hubbub2->watermarks.d.usr_retraining_ns) + wm_pending = true; + + return wm_pending; +} + +void hubbub32_force_usr_retraining_allow(struct hubbub *hubbub, bool allow) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + + /* + * DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_ENABLE = 1 means enabling forcing value + * DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_VALUE = 1 or 0, means value to be forced when force enable + */ + + REG_UPDATE_2(DCHUBBUB_ARB_USR_RETRAINING_CNTL, + DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_VALUE, allow, + DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_ENABLE, allow); +} + +static bool hubbub32_program_watermarks( + struct hubbub *hubbub, + union dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower) +{ + bool wm_pending = false; + + if (hubbub32_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) + wm_pending = true; + + if (hubbub32_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) + wm_pending = true; + + if (hubbub32_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) + wm_pending = true; + + if (hubbub32_program_usr_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) + wm_pending = true; + + /* + * The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric. + * If the memory controller is fully utilized and the DCHub requestors are + * well ahead of their amortized schedule, then it is safe to prevent the next winner + * from being committed and sent to the fabric. + * The utilization of the memory controller is approximated by ensuring that + * the number of outstanding requests is greater than a threshold specified + * by the ARB_MIN_REQ_OUTSTANDING. To determine that the DCHub requestors are well ahead of the amortized schedule, + * the slack of the next winner is compared with the ARB_SAT_LEVEL in DLG RefClk cycles. + * + * TODO: Revisit request limit after figure out right number. request limit for RM isn't decided yet, set maximum value (0x1FF) + * to turn off it for now. + */ + /*REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0, + DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz); + REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, + DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);*/ + + hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter); + + hubbub32_force_usr_retraining_allow(hubbub, hubbub->ctx->dc->debug.force_usr_allow); + + return wm_pending; +} + +/* Copy values from WM set A to all other sets */ +static void hubbub32_init_watermarks(struct hubbub *hubbub) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + uint32_t reg; + + reg = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A); + REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, reg); + REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, reg); + REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, reg); + + reg = REG_READ(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A); + REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, reg); + REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, reg); + REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, reg); + + reg = REG_READ(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A); + REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, reg); + REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, reg); + REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, reg); + + reg = REG_READ(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A); + REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, reg); + REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, reg); + REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, reg); + + reg = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A); + REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, reg); + REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, reg); + REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, reg); + + reg = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A); + REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, reg); + REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, reg); + REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, reg); + + reg = REG_READ(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A); + REG_WRITE(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, reg); + REG_WRITE(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, reg); + REG_WRITE(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, reg); + + reg = REG_READ(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A); + REG_WRITE(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, reg); + REG_WRITE(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, reg); + REG_WRITE(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, reg); + + reg = REG_READ(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A); + REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, reg); + REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, reg); + REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, reg); +} + +static void hubbub32_wm_read_state(struct hubbub *hubbub, + struct dcn_hubbub_wm *wm) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + struct dcn_hubbub_wm_set *s; + + memset(wm, 0, sizeof(struct dcn_hubbub_wm)); + + s = &wm->sets[0]; + s->wm_set = 0; + REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, &s->data_urgent); + + REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, &s->sr_enter); + + REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, &s->sr_exit); + + REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, + DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, &s->dram_clk_change); + + REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, + DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, &s->usr_retrain); + + REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, + DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, &s->fclk_pstate_change); + + s = &wm->sets[1]; + s->wm_set = 1; + REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, &s->data_urgent); + + REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, &s->sr_enter); + + REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, &s->sr_exit); + + REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, + DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, &s->dram_clk_change); + + REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, + DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, &s->usr_retrain); + + REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, + DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, &s->fclk_pstate_change); + + s = &wm->sets[2]; + s->wm_set = 2; + REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, &s->data_urgent); + + REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, &s->sr_enter); + + REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, &s->sr_exit); + + REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, + DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, &s->dram_clk_change); + + REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, + DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, &s->usr_retrain); + + REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, + DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, &s->fclk_pstate_change); + + s = &wm->sets[3]; + s->wm_set = 3; + REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, &s->data_urgent); + + REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, &s->sr_enter); + + REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, &s->sr_exit); + + REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, + DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, &s->dram_clk_change); + + REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, + DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, &s->usr_retrain); + + REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, + DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, &s->fclk_pstate_change); +} + +void hubbub32_force_wm_propagate_to_pipes(struct hubbub *hubbub) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + uint32_t refclk_mhz = hubbub->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000; + uint32_t prog_wm_value = convert_and_clamp(hubbub2->watermarks.a.urgent_ns, + refclk_mhz, 0x3fff); + + REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); +} + +void hubbub32_get_mall_en(struct hubbub *hubbub, unsigned int *mall_in_use) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + uint32_t prefetch_complete, mall_en; + + REG_GET_2(DCHUBBUB_ARB_MALL_CNTL, MALL_IN_USE, &mall_en, + MALL_PREFETCH_COMPLETE, &prefetch_complete); + + *mall_in_use = prefetch_complete && mall_en; +} + +void hubbub32_init(struct hubbub *hubbub) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + + /* Enable clock gate*/ + if (hubbub->ctx->dc->debug.disable_clock_gate) { + /*done in hwseq*/ + /*REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);*/ + + REG_UPDATE_2(DCHUBBUB_CLOCK_CNTL, + DISPCLK_R_DCHUBBUB_GATE_DIS, 1, + DCFCLK_R_DCHUBBUB_GATE_DIS, 1); + } + /* + ignore the "df_pre_cstate_req" from the SDP port control. + only the DCN will determine when to connect the SDP port + */ + REG_UPDATE(DCHUBBUB_SDPIF_CFG0, + SDPIF_PORT_CONTROL, 1); + /*Set SDP's max outstanding request to 512 + must set the register back to 0 (max outstanding = 256) in zero frame buffer mode*/ + REG_UPDATE(DCHUBBUB_SDPIF_CFG1, + SDPIF_MAX_NUM_OUTSTANDING, 1); + /*must set the registers back to 256 in zero frame buffer mode*/ + REG_UPDATE_2(DCHUBBUB_ARB_DF_REQ_OUTSTAND, + DCHUBBUB_ARB_MAX_REQ_OUTSTAND, 512, + DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 512); +} + +static const struct hubbub_funcs hubbub32_funcs = { + .update_dchub = hubbub2_update_dchub, + .init_dchub_sys_ctx = hubbub3_init_dchub_sys_ctx, + .init_vm_ctx = hubbub2_init_vm_ctx, + .dcc_support_swizzle = hubbub3_dcc_support_swizzle, + .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format, + .get_dcc_compression_cap = hubbub3_get_dcc_compression_cap, + .wm_read_state = hubbub32_wm_read_state, + .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq, + .program_watermarks = hubbub32_program_watermarks, + .allow_self_refresh_control = hubbub1_allow_self_refresh_control, + .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, + .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high, + .force_wm_propagate_to_pipes = hubbub32_force_wm_propagate_to_pipes, + .force_pstate_change_control = hubbub3_force_pstate_change_control, + .init_watermarks = hubbub32_init_watermarks, + .program_det_size = dcn32_program_det_size, + .program_compbuf_size = dcn32_program_compbuf_size, + .init_crb = dcn32_init_crb, + .hubbub_read_state = hubbub2_read_state, + .force_usr_retraining_allow = hubbub32_force_usr_retraining_allow, + .set_request_limit = hubbub32_set_request_limit, + .get_mall_en = hubbub32_get_mall_en, +}; + +void hubbub32_construct(struct dcn20_hubbub *hubbub2, + struct dc_context *ctx, + const struct dcn_hubbub_registers *hubbub_regs, + const struct dcn_hubbub_shift *hubbub_shift, + const struct dcn_hubbub_mask *hubbub_mask, + int det_size_kb, + int pixel_chunk_size_kb, + int config_return_buffer_size_kb) +{ + hubbub2->base.ctx = ctx; + hubbub2->base.funcs = &hubbub32_funcs; + hubbub2->regs = hubbub_regs; + hubbub2->shifts = hubbub_shift; + hubbub2->masks = hubbub_mask; + + hubbub2->debug_test_index_pstate = 0xB; + hubbub2->detile_buf_size = det_size_kb * 1024; + hubbub2->pixel_chunk_size = pixel_chunk_size_kb * 1024; + hubbub2->crb_size_segs = config_return_buffer_size_kb / DCN32_CRB_SEGMENT_SIZE_KB; +} diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.h new file mode 100644 index 000000000000..bfc55dbbad1f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.h @@ -0,0 +1,166 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_HUBBUB_DCN32_H__ +#define __DC_HUBBUB_DCN32_H__ + +#include "dcn21/dcn21_hubbub.h" + +#define HUBBUB_MASK_SH_LIST_DCN32(mask_sh)\ + HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \ + HUBBUB_SF(DCHUBBUB_SOFT_RESET, DCHUBBUB_GLOBAL_SOFT_RESET, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_SAT_LEVEL, DCHUBBUB_ARB_SAT_LEVEL, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MAX_REQ_OUTSTAND, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, mask_sh), \ + HUBBUB_MASK_SH_LIST_STUTTER(mask_sh), \ + HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ + HUBBUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE, mask_sh), \ + HUBBUB_SF(DCN_VM_FB_LOCATION_TOP, FB_TOP, mask_sh), \ + HUBBUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET, mask_sh), \ + HUBBUB_SF(DCN_VM_AGP_BOT, AGP_BOT, mask_sh), \ + HUBBUB_SF(DCN_VM_AGP_TOP, AGP_TOP, mask_sh), \ + HUBBUB_SF(DCN_VM_AGP_BASE, AGP_BASE, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, mask_sh),\ + HUBBUB_SF(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, mask_sh),\ + HUBBUB_SF(DCHUBBUB_DET0_CTRL, DET0_SIZE, mask_sh),\ + HUBBUB_SF(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, mask_sh),\ + HUBBUB_SF(DCHUBBUB_DET1_CTRL, DET1_SIZE, mask_sh),\ + HUBBUB_SF(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, mask_sh),\ + HUBBUB_SF(DCHUBBUB_DET2_CTRL, DET2_SIZE, mask_sh),\ + HUBBUB_SF(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, mask_sh),\ + HUBBUB_SF(DCHUBBUB_DET3_CTRL, DET3_SIZE, mask_sh),\ + HUBBUB_SF(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, mask_sh),\ + HUBBUB_SF(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE, mask_sh),\ + HUBBUB_SF(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE_CURRENT, mask_sh),\ + HUBBUB_SF(COMPBUF_RESERVED_SPACE, COMPBUF_RESERVED_SPACE_64B, mask_sh),\ + HUBBUB_SF(COMPBUF_RESERVED_SPACE, COMPBUF_RESERVED_SPACE_ZS, mask_sh),\ + HUBBUB_SF(DCHUBBUB_ARB_USR_RETRAINING_CNTL, DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_VALUE, mask_sh),\ + HUBBUB_SF(DCHUBBUB_ARB_USR_RETRAINING_CNTL, DCHUBBUB_ARB_ALLOW_USR_RETRAINING_FORCE_ENABLE, mask_sh),\ + HUBBUB_SF(DCHUBBUB_ARB_USR_RETRAINING_CNTL, DCHUBBUB_ARB_DO_NOT_FORCE_ALLOW_USR_RETRAINING_DURING_PSTATE_CHANGE_REQUEST, mask_sh),\ + HUBBUB_SF(DCHUBBUB_ARB_USR_RETRAINING_CNTL, DCHUBBUB_ARB_DO_NOT_FORCE_ALLOW_USR_RETRAINING_DURING_PRE_CSTATE, mask_sh),\ + HUBBUB_SF(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, mask_sh),\ + HUBBUB_SF(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, mask_sh),\ + HUBBUB_SF(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, mask_sh),\ + HUBBUB_SF(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, mask_sh),\ + HUBBUB_SF(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, mask_sh),\ + HUBBUB_SF(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, mask_sh),\ + HUBBUB_SF(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, mask_sh),\ + HUBBUB_SF(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, mask_sh),\ + HUBBUB_SF(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_ADDR_MSB, DCN_VM_FAULT_ADDR_MSB, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_ADDR_LSB, DCN_VM_FAULT_ADDR_LSB, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_CLEAR, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_STATUS_MODE, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_ERROR_INTERRUPT_ENABLE, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_RANGE_FAULT_DISABLE, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_CNTL, DCN_VM_PRQ_FAULT_DISABLE, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_STATUS, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_TABLE_LEVEL, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \ + HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh),\ + HUBBUB_SF(SDPIF_REQUEST_RATE_LIMIT, SDPIF_REQUEST_RATE_LIMIT, mask_sh),\ + HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DISPCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\ + HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DCFCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\ + HUBBUB_SF(DCHUBBUB_SDPIF_CFG0, SDPIF_PORT_CONTROL, mask_sh),\ + HUBBUB_SF(DCHUBBUB_SDPIF_CFG1, SDPIF_MAX_NUM_OUTSTANDING, mask_sh),\ + HUBBUB_SF(DCHUBBUB_MEM_PWR_MODE_CTRL, DET_MEM_PWR_LS_MODE, mask_sh),\ + HUBBUB_SF(DCHUBBUB_ARB_MALL_CNTL, MALL_PREFETCH_COMPLETE, mask_sh),\ + HUBBUB_SF(DCHUBBUB_ARB_MALL_CNTL, MALL_IN_USE, mask_sh) + + + +bool hubbub32_program_urgent_watermarks( + struct hubbub *hubbub, + union dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower); + +bool hubbub32_program_stutter_watermarks( + struct hubbub *hubbub, + union dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower); + +bool hubbub32_program_pstate_watermarks( + struct hubbub *hubbub, + union dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower); + +bool hubbub32_program_usr_watermarks( + struct hubbub *hubbub, + union dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower); + +void hubbub32_force_usr_retraining_allow(struct hubbub *hubbub, bool allow); + +void hubbub32_force_wm_propagate_to_pipes(struct hubbub *hubbub); + +void hubbub32_init(struct hubbub *hubbub); + +void dcn32_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigned int det_buffer_size_in_kbyte); + +void hubbub32_construct(struct dcn20_hubbub *hubbub2, + struct dc_context *ctx, + const struct dcn_hubbub_registers *hubbub_regs, + const struct dcn_hubbub_shift *hubbub_shift, + const struct dcn_hubbub_mask *hubbub_mask, + int det_size_kb, + int pixel_chunk_size_kb, + int config_return_buffer_size_kb); + +void hubbub32_set_request_limit(struct hubbub *hubbub, int umc_count, int words_per_umc); + +void hubbub32_get_mall_en(struct hubbub *hubbub, unsigned int *mall_in_use); + +void dcn32_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c new file mode 100644 index 000000000000..6293173ba2b9 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c @@ -0,0 +1,611 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#include "dcn30/dcn30_hubbub.h" +#include "dcn31/dcn31_hubbub.h" +#include "dcn32/dcn32_hubbub.h" +#include "dcn35_hubbub.h" +#include "dm_services.h" +#include "reg_helper.h" + + +#define CTX \ + hubbub2->base.ctx +#define DC_LOGGER \ + hubbub2->base.ctx->logger +#define REG(reg)\ + hubbub2->regs->reg + +#undef FN +#define FN(reg_name, field_name) \ + hubbub2->shifts->field_name, hubbub2->masks->field_name + +#define DCN35_CRB_SEGMENT_SIZE_KB 64 + +static void dcn35_init_crb(struct hubbub *hubbub) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + + REG_GET(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, + &hubbub2->det0_size); + + REG_GET(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, + &hubbub2->det1_size); + + REG_GET(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, + &hubbub2->det2_size); + + REG_GET(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, + &hubbub2->det3_size); + + REG_GET(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE_CURRENT, + &hubbub2->compbuf_size_segments); + + REG_SET_2(COMPBUF_RESERVED_SPACE, 0, + COMPBUF_RESERVED_SPACE_64B, hubbub2->pixel_chunk_size / 32, + COMPBUF_RESERVED_SPACE_ZS, hubbub2->pixel_chunk_size / 128); + REG_UPDATE(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, 0x5FF); +} + +static void dcn35_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + unsigned int compbuf_size_segments = (compbuf_size_kb + DCN35_CRB_SEGMENT_SIZE_KB - 1) / DCN35_CRB_SEGMENT_SIZE_KB; + + if (safe_to_increase || compbuf_size_segments <= hubbub2->compbuf_size_segments) { + if (compbuf_size_segments > hubbub2->compbuf_size_segments) { + REG_WAIT(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, hubbub2->det0_size, 1, 100); + REG_WAIT(DCHUBBUB_DET1_CTRL, DET1_SIZE_CURRENT, hubbub2->det1_size, 1, 100); + REG_WAIT(DCHUBBUB_DET2_CTRL, DET2_SIZE_CURRENT, hubbub2->det2_size, 1, 100); + REG_WAIT(DCHUBBUB_DET3_CTRL, DET3_SIZE_CURRENT, hubbub2->det3_size, 1, 100); + } + /* Should never be hit, if it is we have an erroneous hw config*/ + ASSERT(hubbub2->det0_size + hubbub2->det1_size + hubbub2->det2_size + + hubbub2->det3_size + compbuf_size_segments <= hubbub2->crb_size_segs); + REG_UPDATE(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE, compbuf_size_segments); + hubbub2->compbuf_size_segments = compbuf_size_segments; + ASSERT(REG_GET(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR, &compbuf_size_segments) && !compbuf_size_segments); + } +} + +static uint32_t convert_and_clamp( + uint32_t wm_ns, + uint32_t refclk_mhz, + uint32_t clamp_value) +{ + uint32_t ret_val = 0; + + ret_val = wm_ns * refclk_mhz; + + ret_val /= 1000; + + if (ret_val > clamp_value) + ret_val = clamp_value; + + return ret_val; +} + +static bool hubbub35_program_stutter_z8_watermarks( + struct hubbub *hubbub, + union dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + uint32_t prog_wm_value; + bool wm_pending = false; + + /* clock state A */ + if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns + > hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns) { + hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns = + watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns; + prog_wm_value = convert_and_clamp( + watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns, + refclk_mhz, 0xfffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, 0, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_A calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns, prog_wm_value); + } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns + < hubbub2->watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns) + wm_pending = true; + + if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_z8_ns + > hubbub2->watermarks.a.cstate_pstate.cstate_exit_z8_ns) { + hubbub2->watermarks.a.cstate_pstate.cstate_exit_z8_ns = + watermarks->a.cstate_pstate.cstate_exit_z8_ns; + prog_wm_value = convert_and_clamp( + watermarks->a.cstate_pstate.cstate_exit_z8_ns, + refclk_mhz, 0xfffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, 0, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_A calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->a.cstate_pstate.cstate_exit_z8_ns, prog_wm_value); + } else if (watermarks->a.cstate_pstate.cstate_exit_z8_ns + < hubbub2->watermarks.a.cstate_pstate.cstate_exit_z8_ns) + wm_pending = true; + + /* clock state B */ + + if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns + > hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns) { + hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns = + watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns; + prog_wm_value = convert_and_clamp( + watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns, + refclk_mhz, 0xfffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, 0, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_B calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns, prog_wm_value); + } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns + < hubbub2->watermarks.b.cstate_pstate.cstate_enter_plus_exit_z8_ns) + wm_pending = true; + + if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_z8_ns + > hubbub2->watermarks.b.cstate_pstate.cstate_exit_z8_ns) { + hubbub2->watermarks.b.cstate_pstate.cstate_exit_z8_ns = + watermarks->b.cstate_pstate.cstate_exit_z8_ns; + prog_wm_value = convert_and_clamp( + watermarks->b.cstate_pstate.cstate_exit_z8_ns, + refclk_mhz, 0xfffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, 0, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_B calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->b.cstate_pstate.cstate_exit_z8_ns, prog_wm_value); + } else if (watermarks->b.cstate_pstate.cstate_exit_z8_ns + < hubbub2->watermarks.b.cstate_pstate.cstate_exit_z8_ns) + wm_pending = true; + + /* clock state C */ + if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns + > hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns) { + hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns = + watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns; + prog_wm_value = convert_and_clamp( + watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns, + refclk_mhz, 0xfffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, 0, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_C calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns, prog_wm_value); + } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns + < hubbub2->watermarks.c.cstate_pstate.cstate_enter_plus_exit_z8_ns) + wm_pending = true; + + if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_z8_ns + > hubbub2->watermarks.c.cstate_pstate.cstate_exit_z8_ns) { + hubbub2->watermarks.c.cstate_pstate.cstate_exit_z8_ns = + watermarks->c.cstate_pstate.cstate_exit_z8_ns; + prog_wm_value = convert_and_clamp( + watermarks->c.cstate_pstate.cstate_exit_z8_ns, + refclk_mhz, 0xfffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, 0, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_C calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->c.cstate_pstate.cstate_exit_z8_ns, prog_wm_value); + } else if (watermarks->c.cstate_pstate.cstate_exit_z8_ns + < hubbub2->watermarks.c.cstate_pstate.cstate_exit_z8_ns) + wm_pending = true; + + /* clock state D */ + if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns + > hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns) { + hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns = + watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns; + prog_wm_value = convert_and_clamp( + watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns, + refclk_mhz, 0xfffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, 0, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_D calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns, prog_wm_value); + } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns + < hubbub2->watermarks.d.cstate_pstate.cstate_enter_plus_exit_z8_ns) + wm_pending = true; + + if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_z8_ns + > hubbub2->watermarks.d.cstate_pstate.cstate_exit_z8_ns) { + hubbub2->watermarks.d.cstate_pstate.cstate_exit_z8_ns = + watermarks->d.cstate_pstate.cstate_exit_z8_ns; + prog_wm_value = convert_and_clamp( + watermarks->d.cstate_pstate.cstate_exit_z8_ns, + refclk_mhz, 0xfffff); + REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, 0, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, prog_wm_value); + DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_D calculated =%d\n" + "HW register value = 0x%x\n", + watermarks->d.cstate_pstate.cstate_exit_z8_ns, prog_wm_value); + } else if (watermarks->d.cstate_pstate.cstate_exit_z8_ns + < hubbub2->watermarks.d.cstate_pstate.cstate_exit_z8_ns) + wm_pending = true; + + return wm_pending; +} + +static void hubbub35_get_dchub_ref_freq(struct hubbub *hubbub, + unsigned int dccg_ref_freq_inKhz, + unsigned int *dchub_ref_freq_inKhz) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + uint32_t ref_div = 0; + uint32_t ref_en = 0; + unsigned int dc_refclk_khz = 24000; + + REG_GET_2(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, &ref_div, + DCHUBBUB_GLOBAL_TIMER_ENABLE, &ref_en); + + if (ref_en) { + if (ref_div == 2) + *dchub_ref_freq_inKhz = dc_refclk_khz / 2; + else + *dchub_ref_freq_inKhz = dc_refclk_khz; + + /* + * The external Reference Clock may change based on the board or + * platform requirements and the programmable integer divide must + * be programmed to provide a suitable DLG RefClk frequency between + * a minimum of 20MHz and maximum of 50MHz + */ + if (*dchub_ref_freq_inKhz < 20000 || *dchub_ref_freq_inKhz > 50000) + ASSERT_CRITICAL(false); + + return; + } else { + *dchub_ref_freq_inKhz = dc_refclk_khz; + /*init sequence issue on bringup patch*/ + REG_UPDATE_2(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 1, + DCHUBBUB_GLOBAL_TIMER_ENABLE, 1); + // HUBBUB global timer must be enabled. + ASSERT_CRITICAL(false); + return; + } +} + + +static bool hubbub35_program_watermarks( + struct hubbub *hubbub, + union dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower) +{ + bool wm_pending = false; + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + + if (hubbub32_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) + wm_pending = true; + + if (hubbub32_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) + wm_pending = true; + + if (hubbub32_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) + wm_pending = true; + + if (hubbub32_program_usr_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) + wm_pending = true; + + if (hubbub35_program_stutter_z8_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower)) + wm_pending = true; + + REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0, + DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz); + REG_UPDATE_2(DCHUBBUB_ARB_DF_REQ_OUTSTAND, + DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0xFF, + DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD, 0xA);/*hw delta*/ + REG_UPDATE(DCHUBBUB_ARB_HOSTVM_CNTL, DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD, 0xF); + + hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter); + + hubbub32_force_usr_retraining_allow(hubbub, hubbub->ctx->dc->debug.force_usr_allow); + + return wm_pending; +} + +/* Copy values from WM set A to all other sets */ +static void hubbub35_init_watermarks(struct hubbub *hubbub) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + uint32_t reg; + + reg = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A); + REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, reg); + REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, reg); + REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, reg); + + reg = REG_READ(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A); + REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, reg); + REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, reg); + REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, reg); + + reg = REG_READ(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A); + REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, reg); + REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, reg); + REG_WRITE(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, reg); + + reg = REG_READ(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A); + REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, reg); + REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, reg); + REG_WRITE(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, reg); + + reg = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A); + REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, reg); + REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, reg); + REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, reg); + + reg = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A); + REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, reg); + REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, reg); + REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, reg); + + reg = REG_READ(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A); + REG_WRITE(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, reg); + REG_WRITE(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, reg); + REG_WRITE(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, reg); + + reg = REG_READ(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A); + REG_WRITE(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, reg); + REG_WRITE(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, reg); + REG_WRITE(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, reg); + + reg = REG_READ(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A); + REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, reg); + REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, reg); + REG_WRITE(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, reg); + + reg = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A); + REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, reg); + REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, reg); + REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, reg); + + reg = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A); + REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, reg); + REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, reg); + REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, reg); + +} + +static void hubbub35_wm_read_state(struct hubbub *hubbub, + struct dcn_hubbub_wm *wm) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + struct dcn_hubbub_wm_set *s; + + memset(wm, 0, sizeof(struct dcn_hubbub_wm)); + + s = &wm->sets[0]; + s->wm_set = 0; + REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, &s->data_urgent); + + REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, &s->sr_enter); + + REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, &s->sr_exit); + + REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, + DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A, &s->dram_clk_change); + + REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, + DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A, &s->usr_retrain); + + REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, + DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A, &s->fclk_pstate_change); + + REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, &s->sr_enter_exit_Z8); + + REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, &s->sr_enter_Z8); + s = &wm->sets[1]; + s->wm_set = 1; + REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, &s->data_urgent); + + REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, &s->sr_enter); + + REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, &s->sr_exit); + + REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, + DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B, &s->dram_clk_change); + + REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, + DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B, &s->usr_retrain); + + REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, + DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B, &s->fclk_pstate_change); + + REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, &s->sr_enter_exit_Z8); + + REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, &s->sr_enter_Z8); + + s = &wm->sets[2]; + s->wm_set = 2; + REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, &s->data_urgent); + + REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, &s->sr_enter); + + REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, &s->sr_exit); + + REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, + DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C, &s->dram_clk_change); + + REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, + DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C, &s->usr_retrain); + + REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, + DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C, &s->fclk_pstate_change); + + REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, &s->sr_enter_exit_Z8); + + REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, &s->sr_enter_Z8); + + s = &wm->sets[3]; + s->wm_set = 3; + REG_GET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, + DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, &s->data_urgent); + + REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, &s->sr_enter); + + REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, &s->sr_exit); + + REG_GET(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, + DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D, &s->dram_clk_change); + + REG_GET(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, + DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D, &s->usr_retrain); + + REG_GET(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, + DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D, &s->fclk_pstate_change); + + REG_GET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, + DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, &s->sr_enter_exit_Z8); + + REG_GET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, + DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, &s->sr_enter_Z8); +} + +static void hubbub35_set_fgcg(struct dcn20_hubbub *hubbub2, bool enable) +{ + REG_UPDATE(DCHUBBUB_CLOCK_CNTL, DCHUBBUB_FGCG_REP_DIS, !enable); +} + +static void hubbub35_init(struct hubbub *hubbub) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + /*Enable clock gaters*/ + if (hubbub->ctx->dc->debug.disable_clock_gate) { + /*done in hwseq*/ + /*REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);*/ + + REG_UPDATE_2(DCHUBBUB_CLOCK_CNTL, + DISPCLK_R_DCHUBBUB_GATE_DIS, 1, + DCFCLK_R_DCHUBBUB_GATE_DIS, 1); + } + hubbub35_set_fgcg(hubbub2, + hubbub->ctx->dc->debug.enable_fine_grain_clock_gating + .bits.dchubbub); + /* + ignore the "df_pre_cstate_req" from the SDP port control. + only the DCN will determine when to connect the SDP port + */ + REG_UPDATE(DCHUBBUB_SDPIF_CFG0, + SDPIF_PORT_CONTROL, 1); + /*Set SDP's max outstanding request + When set to 1: Max outstanding is 512 + When set to 0: Max outstanding is 256 + must set the register back to 0 (max outstanding = 256) in zero frame buffer mode*/ + REG_UPDATE(DCHUBBUB_SDPIF_CFG1, + SDPIF_MAX_NUM_OUTSTANDING, 0); + + REG_UPDATE_2(DCHUBBUB_ARB_DF_REQ_OUTSTAND, + DCHUBBUB_ARB_MAX_REQ_OUTSTAND, 256, + DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 256); + +} + +/*static void hubbub35_set_request_limit(struct hubbub *hubbub, + int memory_channel_count, + int words_per_channel) +{ + struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); + + uint32_t request_limit = 3 * memory_channel_count * words_per_channel / 4; + + ASSERT((request_limit & (~0xFFF)) == 0); //field is only 24 bits long + ASSERT(request_limit > 0); //field is only 24 bits long + + if (request_limit > 0xFFF) + request_limit = 0xFFF; + + if (request_limit > 0) + REG_UPDATE(SDPIF_REQUEST_RATE_LIMIT, SDPIF_REQUEST_RATE_LIMIT, request_limit); +}*/ + +static const struct hubbub_funcs hubbub35_funcs = { + .update_dchub = hubbub2_update_dchub, + .init_dchub_sys_ctx = hubbub31_init_dchub_sys_ctx, + .init_vm_ctx = hubbub2_init_vm_ctx, + .dcc_support_swizzle = hubbub3_dcc_support_swizzle, + .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format, + .get_dcc_compression_cap = hubbub3_get_dcc_compression_cap, + .wm_read_state = hubbub35_wm_read_state, + .get_dchub_ref_freq = hubbub35_get_dchub_ref_freq, + .program_watermarks = hubbub35_program_watermarks, + .allow_self_refresh_control = hubbub1_allow_self_refresh_control, + .is_allow_self_refresh_enabled = hubbub1_is_allow_self_refresh_enabled, + .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high, + .force_wm_propagate_to_pipes = hubbub32_force_wm_propagate_to_pipes, + .force_pstate_change_control = hubbub3_force_pstate_change_control, + .init_watermarks = hubbub35_init_watermarks, + .program_det_size = dcn32_program_det_size, + .program_compbuf_size = dcn35_program_compbuf_size, + .init_crb = dcn35_init_crb, + .hubbub_read_state = hubbub2_read_state, + .force_usr_retraining_allow = hubbub32_force_usr_retraining_allow, + .dchubbub_init = hubbub35_init, +}; + +void hubbub35_construct(struct dcn20_hubbub *hubbub2, + struct dc_context *ctx, + const struct dcn_hubbub_registers *hubbub_regs, + const struct dcn_hubbub_shift *hubbub_shift, + const struct dcn_hubbub_mask *hubbub_mask, + int det_size_kb, + int pixel_chunk_size_kb, + int config_return_buffer_size_kb) +{ + hubbub2->base.ctx = ctx; + hubbub2->base.funcs = &hubbub35_funcs; + hubbub2->regs = hubbub_regs; + hubbub2->shifts = hubbub_shift; + hubbub2->masks = hubbub_mask; + + hubbub2->debug_test_index_pstate = 0xB; + hubbub2->detile_buf_size = det_size_kb * 1024; + hubbub2->pixel_chunk_size = pixel_chunk_size_kb * 1024; + hubbub2->crb_size_segs = config_return_buffer_size_kb / DCN35_CRB_SEGMENT_SIZE_KB; /*todo*/ +} diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h new file mode 100644 index 000000000000..54cf00ffceb8 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h @@ -0,0 +1,155 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_HUBBUB_DCN35_H__ +#define __DC_HUBBUB_DCN35_H__ + +#include "dcn32/dcn32_hubbub.h" + +#define HUBBUB_REG_LIST_DCN35(id)\ + SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A),\ + SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B),\ + SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C),\ + SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D),\ + SR(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL),\ + SR(DCHUBBUB_ARB_DRAM_STATE_CNTL),\ + SR(DCHUBBUB_ARB_SAT_LEVEL),\ + SR(DCHUBBUB_ARB_DF_REQ_OUTSTAND),\ + SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \ + SR(DCHUBBUB_SOFT_RESET),\ + SR(DCHUBBUB_CRC_CTRL), \ + SR(DCN_VM_FB_LOCATION_BASE),\ + SR(DCN_VM_FB_LOCATION_TOP),\ + SR(DCN_VM_FB_OFFSET),\ + SR(DCN_VM_AGP_BOT),\ + SR(DCN_VM_AGP_TOP),\ + SR(DCN_VM_AGP_BASE),\ + HUBBUB_SR_WATERMARK_REG_LIST(), \ + SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A),\ + SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B),\ + SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C),\ + SR(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D),\ + SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A),\ + SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B),\ + SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C),\ + SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D),\ + SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A),\ + SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B),\ + SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C),\ + SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D),\ + SR(DCHUBBUB_DET0_CTRL),\ + SR(DCHUBBUB_DET1_CTRL),\ + SR(DCHUBBUB_DET2_CTRL),\ + SR(DCHUBBUB_DET3_CTRL),\ + SR(DCHUBBUB_COMPBUF_CTRL),\ + SR(COMPBUF_RESERVED_SPACE),\ + SR(DCHUBBUB_DEBUG_CTRL_0),\ + SR(DCHUBBUB_ARB_USR_RETRAINING_CNTL),\ + SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_A),\ + SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_B),\ + SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_C),\ + SR(DCHUBBUB_ARB_USR_RETRAINING_WATERMARK_D),\ + SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_A),\ + SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_B),\ + SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_C),\ + SR(DCHUBBUB_ARB_UCLK_PSTATE_CHANGE_WATERMARK_D),\ + SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_A),\ + SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_B),\ + SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C),\ + SR(DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D),\ + SR(DCN_VM_FAULT_ADDR_MSB),\ + SR(DCN_VM_FAULT_ADDR_LSB),\ + SR(DCN_VM_FAULT_CNTL),\ + SR(DCN_VM_FAULT_STATUS),\ + SR(SDPIF_REQUEST_RATE_LIMIT),\ + SR(DCHUBBUB_CLOCK_CNTL),\ + SR(DCHUBBUB_SDPIF_CFG0),\ + SR(DCHUBBUB_SDPIF_CFG1),\ + SR(DCHUBBUB_MEM_PWR_MODE_CTRL),\ + SR(DCHUBBUB_ARB_HOSTVM_CNTL),\ + SR(DCHVM_CTRL0),\ + SR(DCHVM_MEM_CTRL),\ + SR(DCHVM_CLK_CTRL),\ + SR(DCHVM_RIOMMU_CTRL0),\ + SR(DCHVM_RIOMMU_STAT0),\ + SR(DCHUBBUB_COMPBUF_CTRL),\ + SR(COMPBUF_RESERVED_SPACE),\ + SR(DCHUBBUB_DEBUG_CTRL_0),\ + SR(DCHUBBUB_CLOCK_CNTL),\ + SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A),\ + SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A),\ + SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B),\ + SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B),\ + SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C),\ + SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C),\ + SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D),\ + SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D),\ + SR(DCHUBBUB_ARB_QOS_FORCE) + + +#define HUBBUB_MASK_SH_LIST_DCN35(mask_sh)\ + HUBBUB_MASK_SH_LIST_DCN32(mask_sh), \ + HUBBUB_SF(DCHVM_CTRL0, HOSTVM_INIT_REQ, mask_sh),\ + HUBBUB_SF(DCHVM_MEM_CTRL, HVM_GPUVMRET_PWR_REQ_DIS, mask_sh),\ + HUBBUB_SF(DCHVM_MEM_CTRL, HVM_GPUVMRET_FORCE_REQ, mask_sh),\ + HUBBUB_SF(DCHVM_MEM_CTRL, HVM_GPUVMRET_POWER_STATUS, mask_sh),\ + HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DISPCLK_R_GATE_DIS, mask_sh),\ + HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DISPCLK_G_GATE_DIS, mask_sh),\ + HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DCFCLK_R_GATE_DIS, mask_sh),\ + HUBBUB_SF(DCHVM_CLK_CTRL, HVM_DCFCLK_G_GATE_DIS, mask_sh),\ + HUBBUB_SF(DCHVM_CLK_CTRL, TR_REQ_REQCLKREQ_MODE, mask_sh),\ + HUBBUB_SF(DCHVM_CLK_CTRL, TW_RSP_COMPCLKREQ_MODE, mask_sh),\ + HUBBUB_SF(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, mask_sh),\ + HUBBUB_SF(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, mask_sh),\ + HUBBUB_SF(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, mask_sh),\ + HUBBUB_SF(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, mask_sh),\ + HUBBUB_SF(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE, mask_sh),\ + HUBBUB_SF(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE_CURRENT, mask_sh),\ + HUBBUB_SF(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR, mask_sh),\ + HUBBUB_SF(COMPBUF_RESERVED_SPACE, COMPBUF_RESERVED_SPACE_64B, mask_sh),\ + HUBBUB_SF(COMPBUF_RESERVED_SPACE, COMPBUF_RESERVED_SPACE_ZS, mask_sh),\ + HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DCHUBBUB_FGCG_REP_DIS, mask_sh),\ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_CSTATE_DEEPSLEEP_LEGACY_MODE, mask_sh), \ + HUBBUB_SF(DCHUBBUB_ARB_HOSTVM_CNTL, DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD, mask_sh),\ + HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD, mask_sh) + +void hubbub35_construct(struct dcn20_hubbub *hubbub2, + struct dc_context *ctx, + const struct dcn_hubbub_registers *hubbub_regs, + const struct dcn_hubbub_shift *hubbub_shift, + const struct dcn_hubbub_mask *hubbub_mask, + int det_size_kb, + int pixel_chunk_size_kb, + int config_return_buffer_size_kb); +#endif