#define FIRMWARE_DCN_351_DMUB "amdgpu/dcn_3_5_1_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_DCN_351_DMUB);
+#define FIRMWARE_DCN_401_DMUB "amdgpu/dcn_4_0_1_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_401_DMUB);
+
/* Number of bytes in PSP header for firmware. */
#define PSP_HEADER_BYTES 0x100
case IP_VERSION(3, 1, 4):
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(4, 0, 1):
hw_params.dpia_supported = true;
hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
break;
if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
adev->dm.dc->debug.force_subvp_mclk_switch = true;
- if (amdgpu_dc_debug_mask & DC_ENABLE_DML2)
+ if (amdgpu_dc_debug_mask & DC_ENABLE_DML2) {
adev->dm.dc->debug.using_dml2 = true;
+ adev->dm.dc->debug.using_dml21 = true;
+ }
adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
case IP_VERSION(3, 2, 1):
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(4, 0, 1):
return 0;
default:
break;
case IP_VERSION(3, 5, 1):
dmub_asic = DMUB_ASIC_DCN35;
break;
+ case IP_VERSION(4, 0, 1):
+ dmub_asic = DMUB_ASIC_DCN401;
+ break;
+
default:
/* ASIC doesn't support DMUB. */
return 0;
case IP_VERSION(2, 1, 0):
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(4, 0, 1):
if (register_outbox_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
goto fail;
case IP_VERSION(3, 2, 1):
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(4, 0, 1):
psr_feature_enabled = true;
break;
default:
case IP_VERSION(3, 2, 1):
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(4, 0, 1):
if (dcn10_register_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
goto fail;
case IP_VERSION(3, 5, 1):
fw_name_dmub = FIRMWARE_DCN_351_DMUB;
break;
+ case IP_VERSION(4, 0, 1):
+ fw_name_dmub = FIRMWARE_DCN_401_DMUB;
+ break;
default:
/* ASIC doesn't support DMUB. */
return 0;
case IP_VERSION(3, 2, 1):
case IP_VERSION(3, 5, 0):
case IP_VERSION(3, 5, 1):
+ case IP_VERSION(4, 0, 1):
adev->mode_info.num_crtc = 4;
adev->mode_info.num_hpd = 4;
adev->mode_info.num_dig = 4;
return true;
}
+#if defined(CONFIG_DRM_AMD_DC_FP)
static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
struct dc_sink *sink, struct dc_stream_state *stream,
struct dsc_dec_dpcd_caps *dsc_caps)
}
}
-
static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
struct dc_sink *sink, struct dc_stream_state *stream,
struct dsc_dec_dpcd_caps *dsc_caps,
}
}
-
static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
struct dc_sink *sink, struct dc_stream_state *stream,
struct dsc_dec_dpcd_caps *dsc_caps)
if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
}
+#endif
static struct dc_stream_state *
create_stream_for_sink(struct drm_connector *connector,
int mode_refresh;
int preferred_refresh = 0;
enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN;
+#if defined(CONFIG_DRM_AMD_DC_FP)
struct dsc_dec_dpcd_caps dsc_caps;
-
+#endif
struct dc_link *link = NULL;
struct dc_sink *sink = NULL;
stream->timing = *aconnector->timing_requested;
}
+#if defined(CONFIG_DRM_AMD_DC_FP)
/* SST DSC determination policy */
update_dsc_caps(aconnector, sink, stream, &dsc_caps);
if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
+#endif
update_stream_scaling_settings(&mode, dm_state, stream);
}
}
+#if defined(CONFIG_DRM_AMD_DC_FP)
if (dc_resource_is_dsc_encoding_supported(dc)) {
ret = pre_validate_dsc(state, &dm_state, vars);
if (ret != 0)
goto fail;
}
+#endif
/* Run this here since we want to validate the streams we created */
ret = drm_atomic_helper_check_planes(dev, state);
goto fail;
}
+#if defined(CONFIG_DRM_AMD_DC_FP)
if (dc_resource_is_dsc_encoding_supported(dc)) {
ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
if (ret) {
goto fail;
}
}
+#endif
ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
if (ret) {
return false;
}
+#if defined(CONFIG_DRM_AMD_DC_FP)
static bool is_synaptics_cascaded_panamera(struct dc_link *link, struct drm_dp_mst_port *port)
{
u8 branch_vendor_data[4] = { 0 }; // Vendor data 0x50C ~ 0x50F
return true;
}
+#endif
static bool retrieve_downstream_port_device(struct amdgpu_dm_connector *aconnector)
{
amdgpu_dm_update_freesync_caps(
connector, aconnector->edid);
+#if defined(CONFIG_DRM_AMD_DC_FP)
if (!validate_dsc_caps_on_connector(aconnector))
memset(&aconnector->dc_sink->dsc_caps,
0, sizeof(aconnector->dc_sink->dsc_caps));
+#endif
if (!retrieve_downstream_port_device(aconnector))
memset(&aconnector->mst_downstream_port_present,
struct amdgpu_dm_connector *aconnector;
};
+#if defined(CONFIG_DRM_AMD_DC_FP)
static int kbps_to_peak_pbn(int kbps)
{
u64 peak_kbps = kbps;
return bw_range->max_target_bpp_x16 && bw_range->min_target_bpp_x16;
}
+#endif
enum dc_status dm_dp_mst_is_port_support_mode(
struct amdgpu_dm_connector *aconnector,
struct dc_stream_state *stream)
{
- int pbn, branch_max_throughput_mps = 0;
+ int branch_max_throughput_mps = 0;
+#if defined(CONFIG_DRM_AMD_DC_FP)
struct dc_link_settings cur_link_settings;
+ int pbn;
unsigned int end_to_end_bw_in_kbps = 0;
unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0;
struct dc_dsc_bw_range bw_range = {0};
return DC_FAIL_BANDWIDTH_VALIDATE;
}
}
-
/* check is mst dsc output bandwidth branch_overall_throughput_0_mps */
switch (stream->timing.pixel_encoding) {
case PIXEL_ENCODING_RGB:
default:
break;
}
+#endif
if (branch_max_throughput_mps != 0 &&
((stream->timing.pix_clk_100hz / 10) > branch_max_throughput_mps * 1000))
DC_LIBS += dcn32
DC_LIBS += dcn321
DC_LIBS += dcn35
+DC_LIBS += dcn401
DC_LIBS += dml
DC_LIBS += dml2
endif
DC_LIBS += hdcp
+ifdef CONFIG_DRM_AMD_DC_FP
+DC_LIBS += spl
+DC_SPL_TRANS += dc_spl_translate.o
+endif
+
AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LIBS)))
include $(AMD_DC)
AMD_DM_REG_UPDATE = $(addprefix $(AMDDALPATH)/dc/,dc_helper.o)
+AMD_DC_SPL_TRANS = $(addprefix $(AMDDALPATH)/dc/,$(DC_SPL_TRANS))
+
AMD_DISPLAY_FILES += $(AMD_DISPLAY_CORE)
AMD_DISPLAY_FILES += $(AMD_DM_REG_UPDATE)
AMD_DISPLAY_EDID = $(addprefix $(AMDDALPATH)/dc/,$(DC_EDID))
AMD_DISPLAY_FILES += $(AMD_DISPLAY_DMUB) $(AMD_DISPLAY_EDID)
+AMD_DISPLAY_FILES += $(AMD_DC_SPL_TRANS)
struct bios_parser *bp,
struct dc_firmware_info *info);
+static enum bp_result get_firmware_info_v3_5(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info);
+
static struct atom_hpd_int_record *get_hpd_record(struct bios_parser *bp,
struct atom_display_object_path_v2 *object);
case 4:
result = get_firmware_info_v3_4(bp, info);
break;
+ case 5:
+ result = get_firmware_info_v3_5(bp, info);
+ break;
default:
break;
}
return BP_RESULT_OK;
}
+static enum bp_result get_firmware_info_v3_5(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info)
+{
+ struct atom_firmware_info_v3_5 *firmware_info;
+ struct atom_common_table_header *header;
+ struct atom_data_revision revision;
+ struct atom_display_controller_info_v4_5 *dce_info_v4_5 = NULL;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ firmware_info = GET_IMAGE(struct atom_firmware_info_v3_5,
+ DATA_TABLES(firmwareinfo));
+
+ if (!firmware_info)
+ return BP_RESULT_BADBIOSTABLE;
+
+ memset(info, 0, sizeof(*info));
+
+ if (firmware_info->board_i2c_feature_id == 0x2) {
+ info->oem_i2c_present = true;
+ info->oem_i2c_obj_id = firmware_info->board_i2c_feature_gpio_id;
+ } else {
+ info->oem_i2c_present = false;
+ }
+
+ header = GET_IMAGE(struct atom_common_table_header,
+ DATA_TABLES(dce_info));
+
+ get_atom_data_table_revision(header, &revision);
+
+ switch (revision.major) {
+ case 4:
+ switch (revision.minor) {
+ case 5:
+ dce_info_v4_5 = GET_IMAGE(struct atom_display_controller_info_v4_5,
+ DATA_TABLES(dce_info));
+
+ if (!dce_info_v4_5)
+ return BP_RESULT_BADBIOSTABLE;
+
+ /* 100MHz expected */
+ info->pll_info.crystal_frequency = dce_info_v4_5->dce_refclk_10khz * 10;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+
+ return BP_RESULT_OK;
+}
+
static enum bp_result bios_parser_get_encoder_cap_info(
struct dc_bios *dcb,
struct graphics_object_id object_id,
return result;
}
+static enum bp_result get_vram_info_from_umc_info_v40(
+ struct bios_parser *bp,
+ struct dc_vram_info *info)
+{
+ struct atom_umc_info_v4_0 *info_v40;
+ enum bp_result result = BP_RESULT_OK;
+
+ info_v40 = GET_IMAGE(struct atom_umc_info_v4_0,
+ DATA_TABLES(umc_info));
+
+ if (info_v40 == NULL)
+ return BP_RESULT_BADBIOSTABLE;
+
+ info->num_chans = info_v40->channel_num;
+ info->dram_channel_width_bytes = (1 << info_v40->channel_width) / 8;
+
+ return result;
+}
+
/*
* get_integrated_info_v11
*
struct atom_common_table_header *header;
struct atom_data_revision revision;
- if (info && DATA_TABLES(vram_info)) {
+ // vram info moved to umc_info for DCN4x
+ if (info && DATA_TABLES(umc_info)) {
+ header = GET_IMAGE(struct atom_common_table_header,
+ DATA_TABLES(umc_info));
+
+ get_atom_data_table_revision(header, &revision);
+
+ switch (revision.major) {
+ case 4:
+ switch (revision.minor) {
+ case 0:
+ result = get_vram_info_from_umc_info_v40(bp, info);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (result != BP_RESULT_OK && info && DATA_TABLES(vram_info)) {
header = GET_IMAGE(struct atom_common_table_header,
DATA_TABLES(vram_info));
bp->base.integrated_info = bios_parser_create_integrated_info(&bp->base);
bp->base.fw_info_valid = bios_parser_get_firmware_info(&bp->base, &bp->base.fw_info) == BP_RESULT_OK;
bios_parser_get_vram_info(&bp->base, &bp->base.vram_info);
-
+ bios_parser_get_soc_bb_info(&bp->base, &bp->base.bb_info);
return true;
}
case DCN_VERSION_3_21:
case DCN_VERSION_3_5:
case DCN_VERSION_3_51:
+ case DCN_VERSION_4_01:
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
AMD_DAL_CLK_MGR_DCN35 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn35/,$(CLK_MGR_DCN35))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN35)
+
+###############################################################################
+# DCN401
+###############################################################################
+CLK_MGR_DCN401 = dcn401_clk_mgr.o dcn401_clk_mgr_smu_msg.o
+
+AMD_DAL_CLK_MGR_DCN401 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn401/,$(CLK_MGR_DCN401))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN401)
endif
#include "dcn316/dcn316_clk_mgr.h"
#include "dcn32/dcn32_clk_mgr.h"
#include "dcn35/dcn35_clk_mgr.h"
+#include "dcn401/dcn401_clk_mgr.h"
int clk_mgr_helper_get_active_display_cnt(
struct dc *dc,
}
break;
+ case AMDGPU_FAMILY_GC_12_0_0: {
+ struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
+
+ if (clk_mgr == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dcn401_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
+ return &clk_mgr->base;
+ }
+ break;
#endif /* CONFIG_DRM_AMD_DC_FP */
default:
ASSERT(0); /* Unknown Asic */
case AMDGPU_FAMILY_GC_11_5_0:
dcn35_clk_mgr_destroy(clk_mgr);
break;
+ case AMDGPU_FAMILY_GC_12_0_0:
+ dcn401_clk_mgr_destroy(clk_mgr);
+ break;
default:
break;
#include "hw_sequencer_private.h"
+#if defined(CONFIG_DRM_AMD_DC_FP)
#include "dml2/dml2_internal_types.h"
+#endif
#include "dce/dmub_outbox.h"
get_subvp_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH)
get_mclk_switch_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ else if (dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2)
+ get_fams2_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color));
}
}
}
DC_LOG_DC("Display Core initialized\n");
-
-
return dc;
destruct_dc:
*/
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
+ if (dc->hwss.fams2_global_control_lock)
+ dc->hwss.fams2_global_control_lock(dc, context, true);
if (dc->hwss.update_dsc_pg)
dc->hwss.update_dsc_pg(dc, context, false);
dc->hwss.commit_subvp_config(dc, context);
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use);
+ if (dc->hwss.fams2_global_control_lock)
+ dc->hwss.fams2_global_control_lock(dc, context, false);
for (i = 0; i < context->stream_count; i++) {
const struct dc_link *link = context->streams[i]->link;
elevate_update_type(&overall_type, UPDATE_TYPE_MED);
}
+ if (u->cm2_params) {
+ if ((u->cm2_params->component_settings.shaper_3dlut_setting
+ != u->surface->mcm_shaper_3dlut_setting)
+ || (u->cm2_params->component_settings.lut1d_enable
+ != u->surface->mcm_lut1d_enable))
+ update_flags->bits.mcm_transfer_function_enable_change = 1;
+ if (u->cm2_params->cm2_luts.lut3d_data.lut3d_src
+ != u->surface->mcm_luts.lut3d_data.lut3d_src)
+ update_flags->bits.mcm_transfer_function_enable_change = 1;
+ }
if (update_flags->bits.in_transfer_func_change) {
type = UPDATE_TYPE_MED;
elevate_update_type(&overall_type, type);
type = UPDATE_TYPE_FULL;
elevate_update_type(&overall_type, type);
}
+ if (update_flags->bits.mcm_transfer_function_enable_change) {
+ type = UPDATE_TYPE_FULL;
+ elevate_update_type(&overall_type, type);
+ }
if (dc->debug.enable_legacy_fast_update &&
(update_flags->bits.gamma_change ||
if (srf_update->gamut_remap_matrix)
surface->gamut_remap_matrix =
*srf_update->gamut_remap_matrix;
+ if (srf_update->cm2_params) {
+ surface->mcm_shaper_3dlut_setting = srf_update->cm2_params->component_settings.shaper_3dlut_setting;
+ surface->mcm_lut1d_enable = srf_update->cm2_params->component_settings.lut1d_enable;
+ surface->mcm_luts = srf_update->cm2_params->cm2_luts;
+ }
+ if (srf_update->cursor_csc_color_matrix)
+ surface->cursor_csc_color_matrix =
+ *srf_update->cursor_csc_color_matrix;
}
static void copy_stream_update_to_stream(struct dc *dc,
}
}
+static bool check_address_only_update(union surface_update_flags update_flags)
+{
+ union surface_update_flags addr_only_update_flags;
+ addr_only_update_flags.raw = 0;
+ addr_only_update_flags.bits.addr_update = 1;
+
+ return update_flags.bits.addr_update &&
+ !(update_flags.raw & ~addr_only_update_flags.raw);
+}
/**
* build_dmub_cmd_list() - Build an array of DMCUB commands to be sent to DMCUB
build_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context, dc_dmub_cmd, dmub_cmd_count);
}
+static void commit_plane_for_stream_offload_fams2_flip(struct dc *dc,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_state *context)
+{
+ int i, j;
+
+ /* update dirty rect for PSR */
+ dc_dmub_update_dirty_rect(dc, surface_count, stream,
+ srf_updates, context);
+
+ /* Perform requested Updates */
+ for (i = 0; i < surface_count; i++) {
+ struct dc_plane_state *plane_state = srf_updates[i].surface;
+
+ /* set offload flag so driver does not program address */
+ plane_state->address.offload_flip = true;
+
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
+ continue;
+
+ if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
+ continue;
+
+ /* update pipe context for plane */
+ if (pipe_ctx->plane_state->update_flags.bits.addr_update)
+ dc->hwss.update_plane_addr(dc, pipe_ctx);
+ }
+ }
+
+ /* Send commands to DMCUB */
+ dc_dmub_srv_fams2_passthrough_flip(dc,
+ context,
+ stream,
+ srf_updates,
+ surface_count);
+
+ /* reset offload flip flag */
+ for (i = 0; i < surface_count; i++) {
+ struct dc_plane_state *plane_state = srf_updates[i].surface;
+ plane_state->address.offload_flip = false;
+ }
+}
+
static void commit_planes_for_stream_fast(struct dc *dc,
struct dc_surface_update *srf_updates,
int surface_count,
int i, j;
struct pipe_ctx *top_pipe_to_program = NULL;
struct dc_stream_status *stream_status = NULL;
+ bool should_offload_fams2_flip = false;
+
+ if (dc->debug.fams2_config.bits.enable &&
+ dc->debug.fams2_config.bits.enable_offload_flip &&
+ dc_state_is_fams2_in_use(dc, context)) {
+ /* if not offloading to HWFQ, offload to FAMS2 if needed */
+ should_offload_fams2_flip = true;
+ for (i = 0; i < surface_count; i++) {
+ if (srf_updates[i].surface &&
+ srf_updates[i].surface->update_flags.raw &&
+ !check_address_only_update(srf_updates[i].surface->update_flags)) {
+ /* more than address update, need to acquire FAMS2 lock */
+ should_offload_fams2_flip = false;
+ break;
+ }
+ }
+ }
dc_exit_ips_for_hw_access(dc);
continue;
pipe_ctx->plane_state->triplebuffer_flips = false;
if (update_type == UPDATE_TYPE_FAST &&
- dc->hwss.program_triplebuffer &&
+ dc->hwss.program_triplebuffer != NULL &&
!pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
/*triple buffer for VUpdate only*/
pipe_ctx->plane_state->triplebuffer_flips = true;
stream_status = dc_state_get_stream_status(context, stream);
+ if (should_offload_fams2_flip) {
+ commit_plane_for_stream_offload_fams2_flip(dc,
+ srf_updates,
+ surface_count,
+ stream,
+ context);
+ } else {
+ build_dmub_cmd_list(dc,
+ srf_updates,
+ surface_count,
+ stream,
+ context,
+ context->dc_dmub_cmd,
+ &(context->dmub_cmd_count));
+ hwss_build_fast_sequence(dc,
+ context->dc_dmub_cmd,
+ context->dmub_cmd_count,
+ context->block_sequence,
+ &(context->block_sequence_steps),
+ top_pipe_to_program,
+ stream_status,
+ context);
+ hwss_execute_sequence(dc,
+ context->block_sequence,
+ context->block_sequence_steps);
+ }
+
build_dmub_cmd_list(dc,
srf_updates,
surface_count,
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
if (dc->hwss.subvp_pipe_control_lock)
- dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use);
- dc->hwss.interdependent_update_lock(dc, context, true);
+ dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use);
+
+ if (dc->hwss.fams2_global_control_lock)
+ dc->hwss.fams2_global_control_lock(dc, context, true);
+ dc->hwss.interdependent_update_lock(dc, context, true);
} else {
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
+
+ if (dc->hwss.fams2_global_control_lock)
+ dc->hwss.fams2_global_control_lock(dc, context, true);
+
/* Lock the top pipe while updating plane addrs, since freesync requires
* plane addr update event triggers to be synchronized.
* top_pipe_to_program is expected to never be NULL
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes,
NULL, subvp_prev_use);
+
+ if (dc->hwss.fams2_global_control_lock)
+ dc->hwss.fams2_global_control_lock(dc, context, false);
+
return;
}
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
+ if (dc->hwss.fams2_global_control_lock)
+ dc->hwss.fams2_global_control_lock(dc, context, false);
} else {
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
+ if (dc->hwss.fams2_global_control_lock)
+ dc->hwss.fams2_global_control_lock(dc, context, false);
}
// Fire manual trigger only when bottom plane is flipped
fast_update[i].gamut_remap_matrix = srf_updates[i].gamut_remap_matrix;
fast_update[i].input_csc_color_matrix = srf_updates[i].input_csc_color_matrix;
fast_update[i].coeff_reduction_factor = srf_updates[i].coeff_reduction_factor;
+ fast_update[i].cursor_csc_color_matrix = srf_updates[i].cursor_csc_color_matrix;
}
}
fast_update[i].gamma ||
fast_update[i].gamut_remap_matrix ||
fast_update[i].input_csc_color_matrix ||
+ fast_update[i].cursor_csc_color_matrix ||
fast_update[i].coeff_reduction_factor)
return true;
}
srf_updates[i].surface->force_full_update ||
(srf_updates[i].flip_addr &&
srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) ||
+ (srf_updates[i].cm2_params &&
+ (srf_updates[i].cm2_params->component_settings.shaper_3dlut_setting != srf_updates[i].surface->mcm_shaper_3dlut_setting ||
+ srf_updates[i].cm2_params->component_settings.lut1d_enable != srf_updates[i].surface->mcm_lut1d_enable)) ||
!is_surface_in_context(context, srf_updates[i].surface)))
return true;
}
* specially handle compatibility problems with transitions among those
* features as they are now transparent to the new sequence.
*/
- if (dc->ctx->dce_version > DCN_VERSION_3_51)
+ if (dc->ctx->dce_version > DCN_VERSION_4_01)
return update_planes_and_stream_v3(dc, srf_updates,
surface_count, stream, stream_update);
return update_planes_and_stream_v2(dc, srf_updates,
* we get more confident about this change we'll need to enable
* the new sequence for all ASICs.
*/
- if (dc->ctx->dce_version > DCN_VERSION_3_51) {
+ if (dc->ctx->dce_version > DCN_VERSION_4_01) {
update_planes_and_stream_v3(dc, srf_updates, surface_count,
stream, stream_update);
return;
return profile;
}
+/* Need to account for padding due to pixel-to-symbol packing
+ * for uncompressed 128b/132b streams.
+ */
+static uint32_t apply_128b_132b_stream_overhead(
+ const struct dc_crtc_timing *timing, const uint32_t kbps)
+{
+ uint32_t total_kbps = kbps;
+#if defined(CONFIG_DRM_AMD_DC_FP)
+ if (dc_get_disable_128b_132b_stream_overhead())
+ return kbps;
+#endif
+
+ if (!timing->flags.DSC) {
+ struct fixed31_32 bpp;
+ struct fixed31_32 overhead_factor;
+
+ bpp = dc_fixpt_from_int(kbps);
+ bpp = dc_fixpt_div_int(bpp, timing->pix_clk_100hz / 10);
+
+ /* Symbols_per_HActive = HActive * bpp / (4 lanes * 32-bit symbol size)
+ * Overhead_factor = ceil(Symbols_per_HActive) / Symbols_per_HActive
+ */
+ overhead_factor = dc_fixpt_from_int(timing->h_addressable);
+ overhead_factor = dc_fixpt_mul(overhead_factor, bpp);
+ overhead_factor = dc_fixpt_div_int(overhead_factor, 128);
+ overhead_factor = dc_fixpt_div(
+ dc_fixpt_from_int(dc_fixpt_ceil(overhead_factor)),
+ overhead_factor);
+
+ total_kbps = dc_fixpt_ceil(
+ dc_fixpt_mul_int(overhead_factor, total_kbps));
+ }
+
+ return total_kbps;
+}
+
+uint32_t dc_bandwidth_in_kbps_from_timing(
+ const struct dc_crtc_timing *timing,
+ const enum dc_link_encoding_format link_encoding)
+{
+ uint32_t bits_per_channel = 0;
+ uint32_t kbps;
+
+#if defined(CONFIG_DRM_AMD_DC_FP)
+ if (timing->flags.DSC)
+ return dc_dsc_stream_bandwidth_in_kbps(timing,
+ timing->dsc_cfg.bits_per_pixel,
+ timing->dsc_cfg.num_slices_h,
+ timing->dsc_cfg.is_dp);
+#endif
+
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ bits_per_channel = 6;
+ break;
+ case COLOR_DEPTH_888:
+ bits_per_channel = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ bits_per_channel = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ bits_per_channel = 12;
+ break;
+ case COLOR_DEPTH_141414:
+ bits_per_channel = 14;
+ break;
+ case COLOR_DEPTH_161616:
+ bits_per_channel = 16;
+ break;
+ default:
+ ASSERT(bits_per_channel != 0);
+ bits_per_channel = 8;
+ break;
+ }
+
+ kbps = timing->pix_clk_100hz / 10;
+ kbps *= bits_per_channel;
+
+ if (timing->flags.Y_ONLY != 1) {
+ /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
+ kbps *= 3;
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ kbps /= 2;
+ else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ kbps = kbps * 2 / 3;
+ }
+
+ if (link_encoding == DC_LINK_ENCODING_DP_128b_132b)
+ kbps = apply_128b_132b_stream_overhead(timing, kbps);
+
+ if (link_encoding == DC_LINK_ENCODING_HDMI_FRL &&
+ timing->vic == 0 && timing->hdmi_vic == 0 &&
+ timing->frl_uncompressed_video_bandwidth_in_kbps != 0)
+ kbps = timing->frl_uncompressed_video_bandwidth_in_kbps;
+
+ return kbps;
+}
}
}
+void get_fams2_visual_confirm_color(
+ struct dc *dc,
+ struct dc_state *context,
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color)
+{
+ uint32_t color_value = MAX_TG_COLOR_VALUE;
+
+ if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !context || !dc->debug.fams2_config.bits.enable)
+ return;
+
+ /* driver only handles visual confirm when FAMS2 is disabled */
+ if (!dc_state_is_fams2_in_use(dc, context)) {
+ /* when FAMS2 is disabled, all pipes are grey */
+ color->color_g_y = color_value / 2;
+ color->color_b_cb = color_value / 2;
+ color->color_r_cr = color_value / 2;
+ }
+}
+
void hwss_build_fast_sequence(struct dc *dc,
struct dc_dmub_cmd *dc_dmub_cmd,
unsigned int dmub_cmd_count,
block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST;
(*num_steps)++;
}
+ if (dc->hwss.fams2_global_control_lock_fast) {
+ block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.dc = dc;
+ block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.lock = true;
+ block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.is_required = dc_state_is_fams2_in_use(dc, context);
+ block_sequence[*num_steps].func = DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST;
+ (*num_steps)++;
+ }
if (dc->hwss.pipe_control_lock) {
block_sequence[*num_steps].params.pipe_control_lock_params.dc = dc;
block_sequence[*num_steps].params.pipe_control_lock_params.lock = true;
block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST;
(*num_steps)++;
}
+ if (dc->hwss.fams2_global_control_lock_fast) {
+ block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.dc = dc;
+ block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.lock = false;
+ block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.is_required = dc_state_is_fams2_in_use(dc, context);
+ block_sequence[*num_steps].func = DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST;
+ (*num_steps)++;
+ }
current_pipe = pipe_ctx;
while (current_pipe) {
case DMUB_SUBVP_SAVE_SURF_ADDR:
hwss_subvp_save_surf_addr(params);
break;
+ case DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST:
+ dc->hwss.fams2_global_control_lock_fast(params);
+ break;
default:
ASSERT(false);
break;
plane_state->color_space,
NULL);
}
+
+ if (dpp && dpp->funcs->set_cursor_matrix) {
+ dpp->funcs->set_cursor_matrix(dpp,
+ plane_state->color_space,
+ plane_state->cursor_csc_color_matrix);
+ }
}
void hwss_program_bias_and_scale(union block_sequence_params *params)
#include "dcn321/dcn321_resource.h"
#include "dcn35/dcn35_resource.h"
#include "dcn351/dcn351_resource.h"
+#include "dcn401/dcn401_resource.h"
+#if defined(CONFIG_DRM_AMD_DC_FP)
+#include "dc_spl_translate.h"
+#endif
#define VISUAL_CONFIRM_BASE_DEFAULT 3
#define VISUAL_CONFIRM_BASE_MIN 1
if (ASICREV_IS_GC_11_0_4(asic_id.hw_internal_rev))
dc_version = DCN_VERSION_3_51;
break;
+ case AMDGPU_FAMILY_GC_12_0_0:
+ if (ASICREV_IS_DCN401(asic_id.hw_internal_rev))
+ dc_version = DCN_VERSION_4_01;
+ break;
default:
dc_version = DCE_VERSION_UNKNOWN;
break;
case DCN_VERSION_3_51:
res_pool = dcn351_create_resource_pool(init_data, dc);
break;
+ case DCN_VERSION_4_01:
+ res_pool = dcn401_create_resource_pool(init_data, dc);
+ break;
#endif /* CONFIG_DRM_AMD_DC_FP */
default:
break;
pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
pipe_ctx->plane_state->format);
+ if (pipe_ctx->stream->ctx->dc->config.use_spl) {
+#if defined(CONFIG_DRM_AMD_DC_FP)
+ struct spl_in *spl_in = &pipe_ctx->plane_res.spl_in;
+ struct spl_out *spl_out = &pipe_ctx->plane_res.spl_out;
+
+ if (plane_state->ctx->dce_version > DCE_VERSION_MAX)
+ pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
+ else
+ pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
+
+ pipe_ctx->plane_res.scl_data.lb_params.alpha_en = plane_state->per_pixel_alpha;
+ spl_out->scl_data.h_active = pipe_ctx->plane_res.scl_data.h_active;
+ spl_out->scl_data.v_active = pipe_ctx->plane_res.scl_data.v_active;
+
+ // Convert pipe_ctx to respective input params for SPL
+ translate_SPL_in_params_from_pipe_ctx(pipe_ctx, spl_in);
+ // Set SPL output parameters to dscl_prog_data to be used for hw registers
+ spl_out->dscl_prog_data = resource_get_dscl_prog_data(pipe_ctx);
+ // Calculate scaler parameters from SPL
+ res = spl_calculate_scaler_params(spl_in, spl_out);
+ // Convert respective out params from SPL to scaler data
+ translate_SPL_out_params_to_pipe_ctx(pipe_ctx, spl_out);
+#endif
+ } else {
+
/* depends on h_active */
calculate_recout(pipe_ctx);
/* depends on pixel format */
pipe_ctx->plane_res.scl_data.viewport.height = MIN_VIEWPORT_SIZE;
if (pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE)
pipe_ctx->plane_res.scl_data.viewport.width = MIN_VIEWPORT_SIZE;
-
-
+ }
DC_LOG_SCALER("%s pipe %d:\nViewport: height:%d width:%d x:%d y:%d Recout: height:%d width:%d x:%d y:%d HACTIVE:%d VACTIVE:%d\n"
"src_rect: height:%d width:%d x:%d y:%d dst_rect: height:%d width:%d x:%d y:%d clip_rect: height:%d width:%d x:%d y:%d\n",
__func__,
return false;
}
+struct dscl_prog_data *resource_get_dscl_prog_data(struct pipe_ctx *pipe_ctx)
+{
+ return &pipe_ctx->plane_res.scl_data.dscl_prog_data;
+}
+
void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuration_options *dml2_options)
{
dml2_options->callbacks.dc = dc;
#include "resource.h"
#include "link_enc_cfg.h"
+#if defined(CONFIG_DRM_AMD_DC_FP)
#include "dml2/dml2_wrapper.h"
#include "dml2/dml2_internal_types.h"
+#endif
#define DC_LOGGER \
dc->ctx->logger
return stream;
}
+bool dc_state_is_fams2_in_use(
+ const struct dc *dc,
+ const struct dc_state *state)
+{
+ bool is_fams2_in_use = false;
+
+ if (state)
+ is_fams2_in_use |= state->bw_ctx.bw.dcn.fams2_stream_count > 0;
+
+ if (dc->current_state)
+ is_fams2_in_use |= dc->current_state->bw_ctx.bw.dcn.fams2_stream_count > 0;
+
+ return is_fams2_in_use;
+}
bool zstate_support;
bool ips_support;
uint32_t num_of_internal_disp;
+ uint32_t max_dwb_htap;
+ uint32_t max_dwb_vtap;
enum dp_protocol_version max_dp_protocol_version;
+ bool spdif_aud;
unsigned int mall_size_per_mem_channel;
unsigned int mall_size_total;
unsigned int cursor_cache_size;
uint32_t max_v_total;
uint32_t max_disp_clock_khz_at_vmin;
uint8_t subvp_drr_vblank_start_margin_us;
+ bool cursor_not_scaled;
};
struct dc_bug_wa {
uint8_t dcfclk : 1;
uint8_t dcfclk_ds: 1;
} clock_update_disable_mask;
+ //Customer Specific WAs
+ uint32_t force_backlight_start_level;
};
struct dc_dcc_surface_param {
struct dc_size surface_size;
enum surface_pixel_format format;
- enum swizzle_mode_values swizzle_mode;
+ unsigned int plane0_pitch;
+ struct dc_size plane1_size;
+ unsigned int plane1_pitch;
+ union {
+ enum swizzle_mode_values swizzle_mode;
+ enum swizzle_mode_addr3_values swizzle_mode_addr3;
+ };
enum dc_scan_direction scan;
};
struct dc_plane_state;
struct dc_state;
-
struct dc_cap_funcs {
bool (*get_dcc_compression_cap)(const struct dc *dc,
const struct dc_dcc_surface_param *input,
bool is_asymmetric_memory;
bool is_single_rank_dimm;
bool is_vmin_only_asic;
+ bool use_spl;
+ bool prefer_easf;
bool use_pipe_ctx_sync_logic;
bool ignore_dpref_ss;
bool enable_mipi_converter_optimization;
VISUAL_CONFIRM_REPLAY = 12,
VISUAL_CONFIRM_SUBVP = 14,
VISUAL_CONFIRM_MCLK_SWITCH = 16,
+ VISUAL_CONFIRM_FAMS2 = 19,
};
enum dc_psr_power_opts {
bool enable_single_display_2to1_odm_policy;
bool enable_double_buffered_dsc_pg_support;
bool enable_dp_dig_pixel_rate_div_policy;
+ bool using_dml21;
enum lttpr_mode lttpr_mode_override;
unsigned int dsc_delay_factor_wa_x1000;
unsigned int min_prefetch_in_strobe_ns;
unsigned int static_screen_wait_frames;
bool force_chroma_subsampling_1tap;
bool disable_422_left_edge_pixel;
+ bool dml21_force_pstate_method;
+ uint32_t dml21_force_pstate_method_value;
+ uint32_t dml21_disable_pstate_method_mask;
+ union dmub_fams2_global_feature_config fams2_config;
unsigned int force_cositing;
};
uint32_t stereo_format_change:1;
uint32_t lut_3d:1;
uint32_t tmz_changed:1;
+ uint32_t mcm_transfer_function_enable_change:1; /* disable or enable MCM transfer func */
uint32_t full_update:1;
} bits;
bool is_statically_allocated;
enum chroma_cositing cositing;
+ enum dc_cm2_shaper_3dlut_setting mcm_shaper_3dlut_setting;
+ bool mcm_lut1d_enable;
+ struct dc_cm2_func_luts mcm_luts;
+ bool lut_bank_a;
+ enum mpcc_movable_cm_location mcm_location;
+ struct dc_csc_transform cursor_csc_color_matrix;
+ bool adaptive_sharpness_en;
+ unsigned int sharpnessX1000;
+ enum linear_light_scaling linear_light_scaling;
};
struct dc_plane_info {
int global_alpha_value;
bool input_csc_enabled;
int layer_index;
+ bool front_buffer_rendering_active;
enum chroma_cositing cositing;
};
const struct fixed31_32 *coeff_reduction_factor;
struct dc_transfer_func *out_transfer_func;
struct dc_csc_transform *output_csc_transform;
+ const struct dc_csc_transform *cursor_csc_color_matrix;
};
struct dc_surface_update {
const struct dc_3dlut *lut3d_func;
const struct dc_transfer_func *blend_tf;
const struct colorspace_transform *gamut_remap_matrix;
+ /*
+ * Color Transformations for pre-blend MCM (Shaper, 3DLUT, 1DLUT)
+ *
+ * change cm2_params.component_settings: Full update
+ * change cm2_params.cm2_luts: Fast update
+ */
+ struct dc_cm2_parameters *cm2_params;
+ const struct dc_csc_transform *cursor_csc_color_matrix;
};
/*
uint32_t dc_get_opp_for_plane(struct dc *dc, struct dc_plane_state *plane);
void dc_set_disable_128b_132b_stream_overhead(bool disable);
+bool dc_get_disable_128b_132b_stream_overhead(void);
/* The function returns minimum bandwidth required to drive a given timing
* return - minimum required timing bandwidth in kbps.
union dpcd_sink_ext_caps dpcd_sink_ext_caps;
struct psr_settings psr_settings;
-
struct replay_settings replay_settings;
/* Drive settings read from integrated info table */
struct dc_firmware_info fw_info;
bool fw_info_valid;
struct dc_vram_info vram_info;
+ struct bp_soc_bb_info bb_info;
struct dc_golden_table golden_table;
};
return result;
}
+void dc_dmub_srv_fams2_update_config(struct dc *dc,
+ struct dc_state *context,
+ bool enable)
+{
+ uint8_t num_cmds = 1;
+ uint32_t i;
+ union dmub_rb_cmd cmd[MAX_STREAMS + 1];
+ struct dmub_rb_cmd_fams2 *global_cmd = &cmd[0].fams2_config;
+
+ memset(cmd, 0, sizeof(union dmub_rb_cmd) * (MAX_STREAMS + 1));
+ /* fill in generic command header */
+ global_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
+ global_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
+ global_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
+
+ /* send global configuration parameters */
+ global_cmd->config.global.max_allow_delay_us = 100 * 1000; //100ms
+ global_cmd->config.global.lock_wait_time_us = 5000; //5ms
+
+ /* copy static feature configuration */
+ global_cmd->config.global.features.all = dc->debug.fams2_config.all;
+
+ /* apply feature configuration based on current driver state */
+ global_cmd->config.global.features.bits.enable_visual_confirm = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2;
+ global_cmd->config.global.features.bits.enable = enable;
+
+ /* construct per-stream configs */
+ if (enable) {
+ for (i = 0; i < context->bw_ctx.bw.dcn.fams2_stream_count; i++) {
+ struct dmub_rb_cmd_fams2 *stream_cmd = &cmd[i+1].fams2_config;
+
+ /* configure command header */
+ stream_cmd->header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
+ stream_cmd->header.sub_type = DMUB_CMD__FAMS2_CONFIG;
+ stream_cmd->header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2) - sizeof(struct dmub_cmd_header);
+ stream_cmd->header.multi_cmd_pending = 1;
+ /* copy stream static state */
+ memcpy(&stream_cmd->config.stream,
+ &context->bw_ctx.bw.dcn.fams2_stream_params[i],
+ sizeof(struct dmub_fams2_stream_static_state));
+ }
+ }
+
+ if (enable && context->bw_ctx.bw.dcn.fams2_stream_count) {
+ /* set multi pending for global, and unset for last stream cmd */
+ global_cmd->config.global.num_streams = context->bw_ctx.bw.dcn.fams2_stream_count;
+ global_cmd->header.multi_cmd_pending = 1;
+ cmd[context->bw_ctx.bw.dcn.fams2_stream_count].fams2_config.header.multi_cmd_pending = 0;
+ num_cmds += context->bw_ctx.bw.dcn.fams2_stream_count;
+ }
+
+ dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+void dc_dmub_srv_fams2_drr_update(struct dc *dc,
+ uint32_t tg_inst,
+ uint32_t vtotal_min,
+ uint32_t vtotal_max,
+ uint32_t vtotal_mid,
+ uint32_t vtotal_mid_frame_num,
+ bool program_manual_trigger)
+{
+ union dmub_rb_cmd cmd = { 0 };
+
+ cmd.fams2_drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
+ cmd.fams2_drr_update.header.sub_type = DMUB_CMD__FAMS2_DRR_UPDATE;
+ cmd.fams2_drr_update.dmub_optc_state_req.tg_inst = tg_inst;
+ cmd.fams2_drr_update.dmub_optc_state_req.v_total_max = vtotal_max;
+ cmd.fams2_drr_update.dmub_optc_state_req.v_total_min = vtotal_min;
+ cmd.fams2_drr_update.dmub_optc_state_req.v_total_mid = vtotal_mid;
+ cmd.fams2_drr_update.dmub_optc_state_req.v_total_mid_frame_num = vtotal_mid_frame_num;
+ cmd.fams2_drr_update.dmub_optc_state_req.program_manual_trigger = program_manual_trigger;
+
+ cmd.fams2_drr_update.header.payload_bytes = sizeof(cmd.fams2_drr_update) - sizeof(cmd.fams2_drr_update.header);
+
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+void dc_dmub_srv_fams2_passthrough_flip(
+ struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *stream,
+ struct dc_surface_update *srf_updates,
+ int surface_count)
+{
+ int plane_index;
+ union dmub_rb_cmd cmds[MAX_PLANES];
+ struct dc_plane_address *address;
+ struct dc_plane_state *plane_state;
+ int num_cmds = 0;
+ struct dc_stream_status *stream_status = dc_stream_get_status(stream);
+
+ if (surface_count <= 0 || stream_status == NULL)
+ return;
+
+ memset(cmds, 0, sizeof(union dmub_rb_cmd) * MAX_PLANES);
+
+ /* build command for each surface update */
+ for (plane_index = 0; plane_index < surface_count; plane_index++) {
+ plane_state = srf_updates[plane_index].surface;
+ address = &plane_state->address;
+
+ /* skip if there is no address update for plane */
+ if (!srf_updates[plane_index].flip_addr)
+ continue;
+
+ /* build command header */
+ cmds[num_cmds].fams2_flip.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
+ cmds[num_cmds].fams2_flip.header.sub_type = DMUB_CMD__FAMS2_FLIP;
+ cmds[num_cmds].fams2_flip.header.payload_bytes = sizeof(struct dmub_rb_cmd_fams2_flip);
+
+ /* for chaining multiple commands, all but last command should set to 1 */
+ cmds[num_cmds].fams2_flip.header.multi_cmd_pending = 1;
+
+ /* set topology info */
+ cmds[num_cmds].fams2_flip.flip_info.pipe_mask = dc_plane_get_pipe_mask(state, plane_state);
+ if (stream_status)
+ cmds[num_cmds].fams2_flip.flip_info.otg_inst = stream_status->primary_otg_inst;
+
+ cmds[num_cmds].fams2_flip.flip_info.config.bits.is_immediate = plane_state->flip_immediate;
+
+ /* build address info for command */
+ switch (address->type) {
+ case PLN_ADDR_TYPE_GRAPHICS:
+ if (address->grph.addr.quad_part == 0) {
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_lo =
+ address->grph.meta_addr.low_part;
+ cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_hi =
+ (uint16_t)address->grph.meta_addr.high_part;
+ cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_lo =
+ address->grph.addr.low_part;
+ cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_hi =
+ (uint16_t)address->grph.addr.high_part;
+ break;
+ case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE:
+ if (address->video_progressive.luma_addr.quad_part == 0 ||
+ address->video_progressive.chroma_addr.quad_part == 0) {
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_lo =
+ address->video_progressive.luma_meta_addr.low_part;
+ cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_hi =
+ (uint16_t)address->video_progressive.luma_meta_addr.high_part;
+ cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_c_lo =
+ address->video_progressive.chroma_meta_addr.low_part;
+ cmds[num_cmds].fams2_flip.flip_info.addr_info.meta_addr_c_hi =
+ (uint16_t)address->video_progressive.chroma_meta_addr.high_part;
+ cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_lo =
+ address->video_progressive.luma_addr.low_part;
+ cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_hi =
+ (uint16_t)address->video_progressive.luma_addr.high_part;
+ cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_c_lo =
+ address->video_progressive.chroma_addr.low_part;
+ cmds[num_cmds].fams2_flip.flip_info.addr_info.surf_addr_c_hi =
+ (uint16_t)address->video_progressive.chroma_addr.high_part;
+ break;
+ default:
+ // Should never be hit
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ num_cmds++;
+ }
+
+ if (num_cmds > 0) {
+ cmds[num_cmds - 1].fams2_flip.header.multi_cmd_pending = 0;
+ dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmds, DM_DMUB_WAIT_TYPE_WAIT);
+ }
+}
bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type);
bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv,
- unsigned int stream_mask);
+ unsigned int stream_mask);
bool dc_dmub_srv_is_restore_required(struct dc_dmub_srv *dc_dmub_srv);
bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code,
uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type);
+void dc_dmub_srv_fams2_update_config(struct dc *dc,
+ struct dc_state *context,
+ bool enable);
+void dc_dmub_srv_fams2_drr_update(struct dc *dc,
+ uint32_t tg_inst,
+ uint32_t vtotal_min,
+ uint32_t vtotal_max,
+ uint32_t vtotal_mid,
+ uint32_t vtotal_mid_frame_num,
+ bool program_manual_trigger);
+void dc_dmub_srv_fams2_passthrough_flip(
+ struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *stream,
+ struct dc_surface_update *srf_updates,
+ int surface_count);
#endif /* _DMUB_DC_SRV_H_ */
va_end(ap);
-
/* mmio write directly */
reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value;
return "DCN 3.5";
case DCN_VERSION_3_51:
return "DCN 3.5.1";
+ case DCN_VERSION_4_01:
+ return "DCN 4.0.1";
default:
return "Unknown";
}
enum dc_plane_addr_type {
PLN_ADDR_TYPE_GRAPHICS = 0,
+ PLN_ADDR_TYPE_3DLUT,
PLN_ADDR_TYPE_GRPH_STEREO,
PLN_ADDR_TYPE_VIDEO_PROGRESSIVE,
PLN_ADDR_TYPE_RGBEA
union large_integer dcc_const_color;
} grph;
+ struct {
+ PHYSICAL_ADDRESS_LOC addr;
+ } lut3d;
+
/*stereo*/
struct {
PHYSICAL_ADDRESS_LOC left_addr;
PHYSICAL_ADDRESS_LOC right_alpha_addr;
PHYSICAL_ADDRESS_LOC right_alpha_meta_addr;
union large_integer right_alpha_dcc_const_color;
-
} grph_stereo;
/*video progressive*/
union large_integer page_table_base;
uint8_t vmid;
+ /* dc should use hw flip queue rather than directly programming the surface address.
+ * Value is determined on each flip. */
+ bool offload_flip;
};
struct dc_size {
DC_TRIPLEBUFFER_DISABLE = 0x0,
DC_TRIPLEBUFFER_ENABLE = 0x1,
};
+enum tile_split_values_new {
+ DC_SURF_TILE_SPLIT_1KB = 0x4,
+};
/* TODO: These values come from hardware spec. We need to readdress this
* if they ever change.
DC_SW_UNKNOWN = DC_SW_MAX
};
+// Definition of swizzle modes with addr3 ASICs
+enum swizzle_mode_addr3_values {
+ DC_ADDR3_SW_LINEAR = 0,
+ DC_ADDR3_SW_256B_2D = 1,
+ DC_ADDR3_SW_4KB_2D = 2,
+ DC_ADDR3_SW_64KB_2D = 3,
+ DC_ADDR3_SW_256KB_2D = 4,
+ DC_ADDR3_SW_4KB_3D = 5,
+ DC_ADDR3_SW_64KB_3D = 6,
+ DC_ADDR3_SW_256KB_3D = 7,
+ DC_ADDR3_SW_MAX = 8,
+ DC_ADDR3_SW_UNKNOWN = DC_ADDR3_SW_MAX
+};
+
union dc_tiling_info {
struct {
bool rb_aligned;
bool pipe_aligned;
unsigned int num_pkrs;
- } gfx9;
+ } gfx9;/*gfx9, gfx10 and above*/
+ struct {
+ enum swizzle_mode_addr3_values swizzle;
+ } gfx_addr3;/*gfx with addr3 and above*/
};
/* Rotation angle */
unsigned int pixel_clk_khz;
unsigned int ref_clk_khz;
struct rect viewport;
+ struct rect recout;
struct fixed31_32 h_scale_ratio;
struct fixed31_32 v_scale_ratio;
enum dc_rotation_angle rotation;
CM_GAMUT_REMAP_COEF_FORMAT_S3_12 = 1
};
+enum mpcc_gamut_remap_mode_select {
+ MPCC_GAMUT_REMAP_MODE_SELECT_0 = 0,
+ MPCC_GAMUT_REMAP_MODE_SELECT_1,
+ MPCC_GAMUT_REMAP_MODE_SELECT_2
+};
+
+enum mpcc_gamut_remap_id {
+ MPCC_OGAM_GAMUT_REMAP,
+ MPCC_MCM_FIRST_GAMUT_REMAP,
+ MPCC_MCM_SECOND_GAMUT_REMAP
+};
+
+enum cursor_matrix_mode {
+ CUR_MATRIX_BYPASS = 0,
+ CUR_MATRIX_SET_A,
+ CUR_MATRIX_SET_B
+};
+
struct mcif_warmup_params {
union large_integer start_address;
unsigned int address_increment;
const struct dc *dc,
struct dc_state *state);
+bool dc_state_is_fams2_in_use(
+ const struct dc *dc,
+ const struct dc_state *state);
+
#endif /* _DC_STATE_PRIV_H_ */
HPD_EN_FOR_SECONDARY_EDP_ONLY,
};
+enum dc_cm2_shaper_3dlut_setting {
+ DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL,
+ DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER,
+ /* Bypassing Shaper will always bypass 3DLUT */
+ DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT
+};
+
+enum dc_cm2_gpu_mem_layout {
+ DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB,
+ DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_BGR,
+ DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR
+};
+
+enum dc_cm2_gpu_mem_pixel_component_order {
+ DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA,
+};
+
+enum dc_cm2_gpu_mem_format {
+ DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12MSB,
+ DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12LSB,
+ DC_CM2_GPU_MEM_FORMAT_16161616_FLOAT_FP1_5_10
+};
+
+struct dc_cm2_gpu_mem_format_parameters {
+ enum dc_cm2_gpu_mem_format format;
+ union {
+ struct {
+ /* bias & scale for float only */
+ uint16_t bias;
+ uint16_t scale;
+ } float_params;
+ };
+};
+
+enum dc_cm2_gpu_mem_size {
+ DC_CM2_GPU_MEM_SIZE_171717,
+ DC_CM2_GPU_MEM_SIZE_TRANSFORMED
+};
+
+struct dc_cm2_gpu_mem_parameters {
+ struct dc_plane_address addr;
+ enum dc_cm2_gpu_mem_layout layout;
+ struct dc_cm2_gpu_mem_format_parameters format_params;
+ enum dc_cm2_gpu_mem_pixel_component_order component_order;
+ enum dc_cm2_gpu_mem_size size;
+};
+
+enum dc_cm2_transfer_func_source {
+ DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM,
+ DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM
+};
+
+struct dc_cm2_component_settings {
+ enum dc_cm2_shaper_3dlut_setting shaper_3dlut_setting;
+ bool lut1d_enable;
+};
+
+/*
+ * All pointers in this struct must remain valid for as long as the 3DLUTs are used
+ */
+struct dc_cm2_func_luts {
+ const struct dc_transfer_func *shaper;
+ struct {
+ enum dc_cm2_transfer_func_source lut3d_src;
+ union {
+ const struct dc_3dlut *lut3d_func;
+ struct dc_cm2_gpu_mem_parameters gpu_mem_params;
+ };
+ } lut3d_data;
+ const struct dc_transfer_func *lut1d_func;
+};
+
+struct dc_cm2_parameters {
+ struct dc_cm2_component_settings component_settings;
+ struct dc_cm2_func_luts cm2_luts;
+};
+
enum mall_stream_type {
SUBVP_NONE, // subvp not in use
SUBVP_MAIN, // subvp in use, this stream is main stream
return true;
}
+static bool dcn401_program_pix_clk(
+ struct clock_source *clock_source,
+ struct pixel_clk_params *pix_clk_params,
+ enum dp_link_encoding encoding,
+ struct pll_settings *pll_settings)
+{
+ struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source);
+ unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
+ const struct pixel_rate_range_table_entry *e =
+ look_up_in_video_optimized_rate_tlb(pix_clk_params->requested_pix_clk_100hz / 10);
+ struct bp_pixel_clock_parameters bp_pc_params = {0};
+ enum transmitter_color_depth bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24;
+ struct dp_dto_params dto_params = { 0 };
+
+ dto_params.otg_inst = inst;
+ dto_params.signal = pix_clk_params->signal_type;
+
+ // all but TMDS gets Driver to program DP_DTO without calling VBIOS Command table
+ if (!dc_is_hdmi_tmds_signal(pix_clk_params->signal_type)) {
+ long long ref_dtbclk_khz = clock_source->ctx->dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(clock_source->ctx->dc->clk_mgr);
+ long long dprefclk_khz = clock_source->ctx->dc->clk_mgr->dprefclk_khz;
+ long long dtbclk_p_src_clk_khz;
+ /* if signal is DP132B128B dtbclk_p_src is DTBCLK else DPREFCLK */
+ dtbclk_p_src_clk_khz = encoding == DP_128b_132b_ENCODING ? ref_dtbclk_khz : dprefclk_khz;
+ if (e) {
+ dto_params.pixclk_hz = e->target_pixel_rate_khz * e->mult_factor;
+ dto_params.refclk_hz = dtbclk_p_src_clk_khz * e->div_factor;
+ } else {
+ dto_params.pixclk_hz = pix_clk_params->requested_pix_clk_100hz * 100;
+ dto_params.refclk_hz = dtbclk_p_src_clk_khz * 1000;
+ }
+
+ /* enable DP DTO */
+ clock_source->ctx->dc->res_pool->dccg->funcs->set_dp_dto(
+ clock_source->ctx->dc->res_pool->dccg,
+ &dto_params);
+
+ } else {
+ /* disables DP DTO when provided with TMDS signal type */
+ clock_source->ctx->dc->res_pool->dccg->funcs->set_dp_dto(
+ clock_source->ctx->dc->res_pool->dccg,
+ &dto_params);
+
+ /*ATOMBIOS expects pixel rate adjusted by deep color ratio)*/
+ bp_pc_params.controller_id = pix_clk_params->controller_id;
+ bp_pc_params.pll_id = clock_source->id;
+ bp_pc_params.target_pixel_clock_100hz = pll_settings->actual_pix_clk_100hz;
+ bp_pc_params.encoder_object_id = pix_clk_params->encoder_object_id;
+ bp_pc_params.signal_type = pix_clk_params->signal_type;
+
+ // Make sure we send the correct color depth to DMUB for HDMI
+ if (pix_clk_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A) {
+ switch (pix_clk_params->color_depth) {
+ case COLOR_DEPTH_888:
+ bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24;
+ break;
+ case COLOR_DEPTH_101010:
+ bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_30;
+ break;
+ case COLOR_DEPTH_121212:
+ bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_36;
+ break;
+ case COLOR_DEPTH_161616:
+ bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_48;
+ break;
+ default:
+ bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24;
+ break;
+ }
+ bp_pc_params.color_depth = bp_pc_colour_depth;
+ }
+
+ if (clock_source->id != CLOCK_SOURCE_ID_DP_DTO) {
+ bp_pc_params.flags.SET_GENLOCK_REF_DIV_SRC =
+ pll_settings->use_external_clk;
+ bp_pc_params.flags.SET_XTALIN_REF_SRC =
+ !pll_settings->use_external_clk;
+ if (pix_clk_params->flags.SUPPORT_YCBCR420) {
+ bp_pc_params.flags.SUPPORT_YUV_420 = 1;
+ }
+ }
+ if (clk_src->bios->funcs->set_pixel_clock(
+ clk_src->bios, &bp_pc_params) != BP_RESULT_OK)
+ return false;
+ /* Resync deep color DTO */
+ if (clock_source->id != CLOCK_SOURCE_ID_DP_DTO)
+ dce112_program_pixel_clk_resync(clk_src,
+ pix_clk_params->signal_type,
+ pix_clk_params->color_depth,
+ pix_clk_params->flags.SUPPORT_YCBCR420);
+ }
+
+ return true;
+}
+
static bool dce110_clock_source_power_down(
struct clock_source *clk_src)
{
.get_pixel_clk_frequency_100hz = get_pixel_clk_frequency_100hz
};
+static const struct clock_source_funcs dcn401_clk_src_funcs = {
+ .cs_power_down = dce110_clock_source_power_down,
+ .program_pix_clk = dcn401_program_pix_clk,
+ .get_pix_clk_dividers = dcn3_get_pix_clk_dividers,
+ .get_pixel_clk_frequency_100hz = get_pixel_clk_frequency_100hz
+};
+
/*****************************************/
/* Constructor */
/*****************************************/
return ret;
}
+bool dcn401_clk_src_construct(
+ struct dce110_clk_src *clk_src,
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ const struct dce110_clk_src_shift *cs_shift,
+ const struct dce110_clk_src_mask *cs_mask)
+{
+ bool ret = dce112_clk_src_construct(clk_src, ctx, bios, id, regs, cs_shift, cs_mask);
+
+ clk_src->base.funcs = &dcn401_clk_src_funcs;
+
+ return ret;
+}
bool dcn301_clk_src_construct(
struct dce110_clk_src *clk_src,
struct dc_context *ctx,
const struct dce110_clk_src_shift *cs_shift,
const struct dce110_clk_src_mask *cs_mask);
+bool dcn401_clk_src_construct(
+ struct dce110_clk_src *clk_src,
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ const struct dce110_clk_src_shift *cs_shift,
+ const struct dce110_clk_src_mask *cs_mask);
/* this table is use to find *1.001 and /1.001 pixel rates from non-precise pixel rate */
struct pixel_rate_range_table_entry {
unsigned int range_min_khz;
I2C_COMMON_MASK_SH_LIST_DCN30(mask_sh),\
I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_CLK_EN, mask_sh)
+#define I2C_COMMON_MASK_SH_LIST_DCN401(mask_sh)\
+ I2C_COMMON_MASK_SH_LIST_DCN30(mask_sh),\
+ I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_CLK_EN, mask_sh)
+
struct dce_i2c_registers {
uint32_t SETUP;
uint32_t SPEED;
#define SE_REG_FIELD_LIST_DCN4_01_COMMON(type) \
type COMPRESSED_PIXEL_FORMAT;\
type DP_VID_N_INTERVAL;\
- type DIG_FIFO_OUTPUT_PIXEL_PER_CYCLE
-
+ type DIG_FIFO_OUTPUT_PIXEL_PER_CYCLE;\
+ type DP_STEER_FIFO_ENABLE
struct dcn10_stream_encoder_shift {
SE_REG_FIELD_LIST_DCN1_0(uint8_t);
uint8_t HDMI_ACP_SEND;
MPC_RMU_3DLUT_SIZE, &s->lut3d_size);
}
- REG_GET_2(MPCC_OGAM_CONTROL[mpcc_inst],
+ REG_GET_2(MPCC_OGAM_CONTROL[mpcc_inst],
MPCC_OGAM_MODE_CURRENT, &s->rgam_mode,
MPCC_OGAM_SELECT_CURRENT, &s->rgam_lut);
}
void mpc3_init_mpcc(struct mpcc *mpcc, int mpcc_inst);
+void mpc3_mpc_init_single_inst(
+ struct mpc *mpc,
+ unsigned int mpcc_id);
+
enum dc_lut_mode mpc3_get_ogam_current(
struct mpc *mpc,
int mpcc_id);
}
}
-static void dcn32_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase)
+void dcn32_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
unsigned int compbuf_size_segments = (compbuf_size_kb + DCN32_CRB_SEGMENT_SIZE_KB - 1) / DCN32_CRB_SEGMENT_SIZE_KB;
void hubbub32_get_mall_en(struct hubbub *hubbub, unsigned int *mall_in_use);
+void dcn32_program_compbuf_size(struct hubbub *hubbub, unsigned int compbuf_size_kb, bool safe_to_increase);
+
#endif
#include "basics/conversion.h"
#include "dcn10/dcn10_cm_common.h"
#include "dc.h"
+#include "dcn401/dcn401_mpc.h"
#define REG(reg)\
mpc30->mpc_regs->reg
.power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut,
.get_mpc_out_mux = mpc1_get_mpc_out_mux,
.set_bg_color = mpc1_set_bg_color,
+ .set_movable_cm_location = mpc401_set_movable_cm_location,
+ .populate_lut = mpc401_populate_lut,
};
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_auto.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_math.o := $(dml_rcflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn401/dcn401_fpu.o := $(dml_ccflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn401/dcn401_fpu.o := $(dml_rcflags)
+
ifdef CONFIG_DRM_AMD_DC_FP
DML += display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o
DML += dcn10/dcn10_fpu.o
DML += dcn314/dcn314_fpu.o
DML += dcn35/dcn35_fpu.o
DML += dcn351/dcn351_fpu.o
+DML += dcn401/dcn401_fpu.o
DML += dsc/rc_calc_fpu.o
DML += calcs/dcn_calcs.o calcs/dcn_calc_math.o calcs/dcn_calc_auto.o
endif
*
*/
-#define UNIT_TEST 0
-#if !UNIT_TEST
#include "dc.h"
-#endif
#include "../display_mode_lib.h"
#include "display_mode_vba_314.h"
#include "../dml_inline_defs.h"
unsigned int ref_freq_to_pix_freq;
unsigned int vratio_prefetch;
unsigned int vratio_prefetch_c;
+ unsigned int refcyc_per_tdlut_group;
unsigned int refcyc_per_pte_group_vblank_l;
unsigned int refcyc_per_pte_group_vblank_c;
unsigned int refcyc_per_meta_chunk_vblank_l;
endif
endif
+# DRIVER_BUILD is mostly used in DML2.1 source
+subdir-ccflags-y += -DDRIVER_BUILD=1
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_core
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_mcg/
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_dpmm/
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_pmo/
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_standalone_libraries/
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/inc
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/inc
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/
+
CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_util.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_wrapper.o := $(dml2_ccflags)
AMD_DISPLAY_FILES += $(AMD_DAL_DML2)
+CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml_top.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml_top_mcache.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_optimization := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) -Wframe-larger-than=2048
+CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_shared.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_ccflags)
+
+
+CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/inc/dml2_debug.o := $(dml2_ccflags)
+
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml_top.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml_top_mcache.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags) -Wframe-larger-than=2048
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_shared.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/inc/dml2_debug.o := $(dml2_rcflags)
+
+DML21 := src/dml2_top/dml_top.o
+DML21 += src/dml2_top/dml_top_mcache.o
+DML21 += src/dml2_top/dml2_top_optimization.o
+DML21 += src/inc/dml2_debug.o
+DML21 += src/dml2_core/dml2_core_dcn4.o
+DML21 += src/dml2_core/dml2_core_factory.o
+DML21 += src/dml2_core/dml2_core_dcn4_calcs.o
+DML21 += src/dml2_core/dml2_core_shared.o
+DML21 += src/dml2_dpmm/dml2_dpmm_dcn4.o
+DML21 += src/dml2_dpmm/dml2_dpmm_factory.o
+DML21 += src/dml2_mcg/dml2_mcg_dcn4.o
+DML21 += src/dml2_mcg/dml2_mcg_factory.o
+DML21 += src/dml2_pmo/dml2_pmo_dcn3.o
+DML21 += src/dml2_pmo/dml2_pmo_dcn4.o
+DML21 += src/dml2_pmo/dml2_pmo_factory.o
+DML21 += src/dml2_pmo/dml2_pmo_dcn4_fams2.o
+DML21 += src/dml2_standalone_libraries/lib_float_math.o
+DML21 += dml21_translation_helper.o
+DML21 += dml21_wrapper.o
+DML21 += dml21_utils.o
+
+AMD_DAL_DML21 = $(addprefix $(AMDDALPATH)/dc/dml2/dml21/,$(DML21))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DML21)
+
dml_project_dcn321 = 2,
dml_project_dcn35 = 3,
dml_project_dcn351 = 4,
+ dml_project_dcn401 = 5,
};
enum dml_prefetch_modes {
dml_prefetch_support_uclk_fclk_and_stutter_if_possible = 0,
stream->stream_id, plane_idx, &plane_id);
cfg_idx = find_disp_cfg_idx_by_plane_id(mapping, plane_id);
mpc_factor = (unsigned int)disp_cfg->hw.DPPPerSurface[cfg_idx];
+ } else if (ctx->architecture == dml2_architecture_21) {
+ if (ctx->config.svp_pstate.callbacks.get_stream_subvp_type(state, stream) == SUBVP_PHANTOM) {
+ struct dc_stream_state *main_stream;
+ struct dc_stream_status *main_stream_status;
+
+ /* get stream id of main stream */
+ main_stream = ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(state, stream);
+ main_stream_status = ctx->config.callbacks.get_stream_status(state, main_stream);
+
+ /* get plane id for associated main plane */
+ get_plane_id(ctx, state, main_stream_status->plane_states[plane_idx],
+ main_stream->stream_id, plane_idx, &plane_id);
+ } else {
+ get_plane_id(ctx, state, status->plane_states[plane_idx],
+ stream->stream_id, plane_idx, &plane_id);
+ }
+
+ cfg_idx = find_disp_cfg_idx_by_plane_id(mapping, plane_id);
+ mpc_factor = ctx->v21.mode_programming.programming->plane_programming[cfg_idx].num_dpps_required;
} else {
mpc_factor = 1;
ASSERT(false);
break;
}
}
+ else if (ctx->architecture == dml2_architecture_21) {
+ if (ctx->config.svp_pstate.callbacks.get_stream_subvp_type(state, stream) == SUBVP_PHANTOM) {
+ struct dc_stream_state *main_stream;
+
+ /* get stream id of main stream */
+ main_stream = ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(state, stream);
+
+ /* get cfg idx for associated main stream */
+ cfg_idx = find_disp_cfg_idx_by_stream_id(
+ mapping, main_stream->stream_id);
+ } else {
+ cfg_idx = find_disp_cfg_idx_by_stream_id(
+ mapping, stream->stream_id);
+ }
+
+ return ctx->v21.mode_programming.programming->stream_programming[cfg_idx].num_odms_required;
+ }
+
ASSERT(false);
return 1;
}
unsigned int stream_id;
const unsigned int *ODMMode, *DPPPerSurface;
+ unsigned int odm_mode_array[__DML2_WRAPPER_MAX_STREAMS_PLANES__] = {0}, dpp_per_surface_array[__DML2_WRAPPER_MAX_STREAMS_PLANES__] = {0};
struct dc_pipe_mapping_scratch scratch;
if (ctx->config.map_dc_pipes_with_callbacks)
return map_dc_pipes_with_callbacks(
ctx, state, disp_cfg, mapping, existing_state);
- ODMMode = (unsigned int *)disp_cfg->hw.ODMMode;
- DPPPerSurface = disp_cfg->hw.DPPPerSurface;
+ if (ctx->architecture == dml2_architecture_21) {
+ /*
+ * Extract ODM and DPP outputs from DML2.1 and map them in an array as required for pipe mapping in dml2_map_dc_pipes.
+ * As data cannot be directly extracted in const pointers, assign these arrays to const pointers before proceeding to
+ * maximize the reuse of existing code. Const pointers are required because dml2.0 dml_display_cfg_st is const.
+ *
+ */
+ for (i = 0; i < __DML2_WRAPPER_MAX_STREAMS_PLANES__; i++) {
+ odm_mode_array[i] = ctx->v21.mode_programming.programming->stream_programming[i].num_odms_required;
+ dpp_per_surface_array[i] = ctx->v21.mode_programming.programming->plane_programming[i].num_dpps_required;
+ }
+
+ ODMMode = (const unsigned int *)odm_mode_array;
+ DPPPerSurface = (const unsigned int *)dpp_per_surface_array;
+ } else {
+ ODMMode = (unsigned int *)disp_cfg->hw.ODMMode;
+ DPPPerSurface = disp_cfg->hw.DPPPerSurface;
+ }
for (stream_index = 0; stream_index < state->stream_count; stream_index++) {
memset(&scratch, 0, sizeof(struct dc_pipe_mapping_scratch));
scratch.odm_info.odm_factor = 1;
}
+ /* After DML2.1 update, ODM interpretation needs to change and is no longer same as for DML2.0.
+ * This is not an issue with new resource management logic. This block ensure backcompat
+ * with legacy pipe management with updated DML.
+ * */
+ if (ctx->architecture == dml2_architecture_21) {
+ if (ODMMode[stream_disp_cfg_index] == 1) {
+ scratch.odm_info.odm_factor = 1;
+ } else if (ODMMode[stream_disp_cfg_index] == 2) {
+ scratch.odm_info.odm_factor = 2;
+ } else if (ODMMode[stream_disp_cfg_index] == 4) {
+ scratch.odm_info.odm_factor = 4;
+ } else {
+ ASSERT(false);
+ scratch.odm_info.odm_factor = 1;
+ }
+ }
calculate_odm_slices(state->streams[stream_index], scratch.odm_info.odm_factor, scratch.odm_info.odm_slice_end_x);
// If there are no planes, you still want to setup ODM...
#include "dml2_wrapper.h"
#include "dml2_policy.h"
+#include "dml_top.h"
+#include "dml21_wrapper.h"
struct dml2_wrapper_optimize_configuration_params {
struct display_mode_lib_st *dml_core_ctx;
enum dml2_architecture {
dml2_architecture_20,
+ dml2_architecture_21
+};
+
+struct prepare_mcache_programming_locals {
+ struct dml2_build_mcache_programming_in_out build_mcache_programming_params;
+};
+
+struct dml21_wrapper_scratch {
+ struct prepare_mcache_programming_locals prepare_mcache_locals;
+ struct pipe_ctx temp_pipe;
};
struct dml2_pipe_combine_factor {
struct dml2_wrapper_scratch scratch;
struct dcn_watermarks g6_temp_read_watermark_set;
} v20;
+ struct {
+ struct dml21_wrapper_scratch scratch;
+ struct dml2_initialize_instance_in_out dml_init;
+ struct dml2_display_cfg display_config;
+ struct dml2_check_mode_supported_in_out mode_support;
+ struct dml2_build_mode_programming_in_out mode_programming;
+ struct dml2_dml_to_dc_pipe_mapping dml_to_dc_pipe_mapping;
+ } v21;
};
};
void dml2_init_ip_params(struct dml2_context *dml2, const struct dc *in_dc, struct ip_params_st *out)
{
switch (dml2->v20.dml_core_ctx.project) {
-
case dml_project_dcn32:
case dml_project_dcn321:
default:
out->config_return_buffer_segment_size_in_kbytes = 64; /*required, but not exist,, hard coded in dml2_translate_ip_params*/
break;
+ case dml_project_dcn401:
+ // Hardcoded values for DCN4m
+ out->vblank_nom_default_us = 668; //600;
+ out->rob_buffer_size_kbytes = 192; //128;
+ out->config_return_buffer_size_in_kbytes = 1344; //1280;
+ out->config_return_buffer_segment_size_in_kbytes = 64;
+ out->compressed_buffer_segment_size_in_kbytes = 64;
+ out->meta_fifo_size_in_kentries = 22;
+ out->dpte_buffer_size_in_pte_reqs_luma = 68;
+ out->dpte_buffer_size_in_pte_reqs_chroma = 36;
+ out->gpuvm_max_page_table_levels = 4;
+ out->pixel_chunk_size_kbytes = 8;
+ out->alpha_pixel_chunk_size_kbytes = 4;
+ out->min_pixel_chunk_size_bytes = 1024;
+ out->writeback_chunk_size_kbytes = 8;
+ out->line_buffer_size_bits = 1171920;
+ out->max_line_buffer_lines = 32;
+ out->writeback_interface_buffer_size_kbytes = 90;
+ //Number of pipes after DCN Pipe harvesting
+ out->max_num_dpp = dml2->config.dcn_pipe_count;
+ out->max_num_otg = dml2->config.dcn_pipe_count;
+ out->max_num_wb = 1;
+ out->max_dchub_pscl_bw_pix_per_clk = 4;
+ out->max_pscl_lb_bw_pix_per_clk = 2;
+ out->max_lb_vscl_bw_pix_per_clk = 4;
+ out->max_vscl_hscl_bw_pix_per_clk = 4;
+ out->max_hscl_ratio = 6;
+ out->max_vscl_ratio = 6;
+ out->max_hscl_taps = 8;
+ out->max_vscl_taps = 8;
+ out->dispclk_ramp_margin_percent = 1;
+ out->dppclk_delay_subtotal = 47;
+ out->dppclk_delay_scl = 50;
+ out->dppclk_delay_scl_lb_only = 16;
+ out->dppclk_delay_cnvc_formatter = 28;
+ out->dppclk_delay_cnvc_cursor = 6;
+ out->dispclk_delay_subtotal = 125;
+ out->cursor_buffer_size = 24; //16
+ out->cursor_chunk_size = 2;
+ out->max_inter_dcn_tile_repeaters = 8;
+ out->writeback_max_hscl_ratio = 1;
+ out->writeback_max_vscl_ratio = 1;
+ out->writeback_min_hscl_ratio = 1;
+ out->writeback_min_vscl_ratio = 1;
+ out->writeback_max_hscl_taps = 1;
+ out->writeback_max_vscl_taps = 1;
+ out->writeback_line_buffer_buffer_size = 0;
+ out->num_dsc = 4;
+ out->maximum_dsc_bits_per_component = 12;
+ out->maximum_pixels_per_line_per_dsc_unit = 5760;
+ out->dsc422_native_support = true;
+ out->dcc_supported = true;
+ out->ptoi_supported = false;
+
+ out->gpuvm_enable = false;
+ out->hostvm_enable = false;
+ out->cursor_64bpp_support = true; //false;
+ out->dynamic_metadata_vm_enabled = false;
+
+ out->max_num_hdmi_frl_outputs = 1;
+ out->max_num_dp2p0_outputs = 4; //2;
+ out->max_num_dp2p0_streams = 4;
+ break;
}
}
out->dispclk_dppclk_vco_speed_mhz = 3600;
break;
+ case dml_project_dcn401:
+ out->pct_ideal_fabric_bw_after_urgent = 76; //67;
+ out->max_avg_sdp_bw_use_normal_percent = 75; //80;
+ out->max_avg_fabric_bw_use_normal_percent = 57; //60;
+
+ out->urgent_out_of_order_return_per_channel_pixel_only_bytes = 0; //4096;
+ out->urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 0; //4096;
+ out->urgent_out_of_order_return_per_channel_vm_only_bytes = 0; //4096;
+
+ out->num_chans = 16;
+ out->round_trip_ping_latency_dcfclk_cycles = 1000; //263;
+ out->smn_latency_us = 0; //2 us
+ out->mall_allocated_for_dcn_mbytes = dml2->config.mall_cfg.max_cab_allocation_bytes / 1048576; // 64;
+ break;
}
/* ---Overrides if available--- */
if (dml2->config.bbox_overrides.dram_num_chan)
p->in_states->state_array[1].dcfclk_mhz = 1434.0;
p->in_states->state_array[1].dram_speed_mts = 1000 * transactions_per_mem_clock;
break;
+ case dml_project_dcn401:
+ p->in_states->num_states = 2;
+ transactions_per_mem_clock = 16;
+ p->in_states->state_array[0].socclk_mhz = 300; //620.0;
+ p->in_states->state_array[0].dscclk_mhz = 666.667; //716.667;
+ p->in_states->state_array[0].phyclk_mhz = 810;
+ p->in_states->state_array[0].phyclk_d18_mhz = 667;
+ p->in_states->state_array[0].phyclk_d32_mhz = 625;
+ p->in_states->state_array[0].dtbclk_mhz = 2000; //1564.0;
+ p->in_states->state_array[0].fabricclk_mhz = 300; //450.0;
+ p->in_states->state_array[0].dcfclk_mhz = 200; //300.0;
+ p->in_states->state_array[0].dispclk_mhz = 2000; //2150.0;
+ p->in_states->state_array[0].dppclk_mhz = 2000; //2150.0;
+ p->in_states->state_array[0].dram_speed_mts = 97 * transactions_per_mem_clock; //100 *
+
+ p->in_states->state_array[0].urgent_latency_pixel_data_only_us = 4;
+ p->in_states->state_array[0].urgent_latency_pixel_mixed_with_vm_data_us = 0;
+ p->in_states->state_array[0].urgent_latency_vm_data_only_us = 0;
+ p->in_states->state_array[0].writeback_latency_us = 12;
+ p->in_states->state_array[0].urgent_latency_adjustment_fabric_clock_component_us = 1;
+ p->in_states->state_array[0].urgent_latency_adjustment_fabric_clock_reference_mhz = 1000; //3000;
+ p->in_states->state_array[0].sr_exit_z8_time_us = 0;
+ p->in_states->state_array[0].sr_enter_plus_exit_z8_time_us = 0;
+ p->in_states->state_array[0].dram_clock_change_latency_us = 400;
+ p->in_states->state_array[0].use_ideal_dram_bw_strobe = true;
+ p->in_states->state_array[0].sr_exit_time_us = 15.70; //42.97;
+ p->in_states->state_array[0].sr_enter_plus_exit_time_us = 20.20; //49.94;
+ p->in_states->state_array[0].fclk_change_latency_us = 0; //20;
+ p->in_states->state_array[0].usr_retraining_latency_us = 0; //2;
+
+ p->in_states->state_array[1].socclk_mhz = 1600; //1200.0;
+ p->in_states->state_array[1].fabricclk_mhz = 2500; //2500.0;
+ p->in_states->state_array[1].dcfclk_mhz = 1800; //1564.0;
+ p->in_states->state_array[1].dram_speed_mts = 1125 * transactions_per_mem_clock;
+ break;
}
/* Override from passed values, if available */
default:
out->SurfaceTiling[location] = (enum dml_swizzle_mode)in->tiling_info.gfx9.swizzle;
break;
+ case dml_project_dcn401:
+ // Temporary use gfx11 swizzle in dml, until proper dml for DCN4x is integrated/implemented
+ switch (in->tiling_info.gfx_addr3.swizzle) {
+ case DC_ADDR3_SW_4KB_2D:
+ case DC_ADDR3_SW_64KB_2D:
+ case DC_ADDR3_SW_256KB_2D:
+ default:
+ out->SurfaceTiling[location] = dml_sw_64kb_r_x;
+ break;
+ case DC_ADDR3_SW_LINEAR:
+ out->SurfaceTiling[location] = dml_sw_linear;
+ break;
+ }
}
switch (in->format) {
#include "dml2_translation_helper.h"
#include "dml2_mall_phantom.h"
#include "dml2_dc_resource_mgmt.h"
+#include "dml21_wrapper.h"
static void initialize_dml2_ip_params(struct dml2_context *dml2, const struct dc *in_dc, struct ip_params_st *out)
return false;
dml2_apply_debug_options(in_dc, dml2);
+ /* DML2.1 validation path */
+ if (dml2->architecture == dml2_architecture_21) {
+ out = dml21_validate(in_dc, context, dml2, fast_validate);
+ return out;
+ }
/* Use dml_validate_only for fast_validate path */
if (fast_validate)
static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
{
+ // TODO : Temporarily add DCN_VERSION_3_2 for N-1 validation. Remove DCN_VERSION_3_2 after N-1 validation phase is complete.
+ if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01 || in_dc->ctx->dce_version == DCN_VERSION_3_2)) {
+ dml21_reinit(in_dc, dml2, config);
+ }
// Store config options
(*dml2)->config = *config;
case DCN_VERSION_3_21:
(*dml2)->v20.dml_core_ctx.project = dml_project_dcn321;
break;
+ case DCN_VERSION_4_01:
+ (*dml2)->v20.dml_core_ctx.project = dml_project_dcn401;
+ break;
default:
(*dml2)->v20.dml_core_ctx.project = dml_project_default;
break;
bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
{
+ DC_FP_START();
+ // TODO : Temporarily add DCN_VERSION_3_2 for N-1 validation. Remove DCN_VERSION_3_2 after N-1 validation phase is complete.
+ if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01 || in_dc->ctx->dce_version == DCN_VERSION_3_2)) {
+ return dml21_create(in_dc, dml2, config);
+ }
+
// Allocate Mode Lib Ctx
*dml2 = dml2_allocate_memory();
dml2_init(in_dc, config, dml2);
+ DC_FP_END();
return true;
}
void dml2_copy(struct dml2_context *dst_dml2,
struct dml2_context *src_dml2)
{
+ if (src_dml2->architecture == dml2_architecture_21) {
+ dml21_copy(dst_dml2, src_dml2);
+ return;
+ }
/* copy Mode Lib Ctx */
memcpy(dst_dml2, src_dml2, sizeof(struct dml2_context));
}
bool dml2_create_copy(struct dml2_context **dst_dml2,
struct dml2_context *src_dml2)
{
+ if (src_dml2->architecture == dml2_architecture_21)
+ return dml21_create_copy(dst_dml2, src_dml2);
/* Allocate Mode Lib Ctx */
*dst_dml2 = dml2_allocate_memory();
const struct dml2_configuration_options *config,
struct dml2_context **dml2)
{
+ // TODO : Temporarily add DCN_VERSION_3_2 for N-1 validation. Remove DCN_VERSION_3_2 after N-1 validation phase is complete.
+ if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01 || in_dc->ctx->dce_version == DCN_VERSION_3_2)) {
+ dml21_reinit(in_dc, dml2, config);
+ }
dml2_init(in_dc, config, dml2);
}
struct dml2_clks_limit_table clks_table;
};
+enum dml2_force_pstate_methods {
+ dml2_force_pstate_method_auto = 0,
+ dml2_force_pstate_method_vactive,
+ dml2_force_pstate_method_vblank,
+ dml2_force_pstate_method_drr,
+ dml2_force_pstate_method_subvp,
+};
+
struct dml2_configuration_options {
int dcn_pipe_count;
bool use_native_pstate_optimization;
struct dml2_soc_bbox_overrides bbox_overrides;
unsigned int max_segments_per_hubp;
unsigned int det_segment_size;
+ /* Only for debugging purposes when initializing SOCBB params via tool for DML21. */
+ struct socbb_ip_params_external *external_socbb_ip_params;
+ struct {
+ bool force_pstate_method_enable;
+ enum dml2_force_pstate_methods force_pstate_method_value;
+ } pmo;
bool map_dc_pipes_with_callbacks;
bool use_clock_dc_limits;
###############################################################################
-endif
\ No newline at end of file
+DPP_DCN401 = dcn401_dpp.o dcn401_dpp_cm.o dcn401_dpp_dscl.o
+
+AMD_DAL_DPP_DCN401 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn401/,$(DPP_DCN401))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN401)
+
+endif
return true;
}
+/*compute the maximum number of lines that we can fit in the line buffer*/
+void dscl2_spl_calc_lb_num_partitions(
+ bool alpha_en,
+ const struct spl_scaler_data *scl_data,
+ enum lb_memory_config lb_config,
+ int *num_part_y,
+ int *num_part_c)
+{
+ int memory_line_size_y, memory_line_size_c, memory_line_size_a,
+ lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a;
+
+ int line_size = scl_data->viewport.width < scl_data->recout.width ?
+ scl_data->viewport.width : scl_data->recout.width;
+ int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ?
+ scl_data->viewport_c.width : scl_data->recout.width;
+
+ if (line_size == 0)
+ line_size = 1;
+
+ if (line_size_c == 0)
+ line_size_c = 1;
+
+ memory_line_size_y = (line_size + 5) / 6; /* +5 to ceil */
+ memory_line_size_c = (line_size_c + 5) / 6; /* +5 to ceil */
+ memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */
+
+ if (lb_config == LB_MEMORY_CONFIG_1) {
+ lb_memory_size = 970;
+ lb_memory_size_c = 970;
+ lb_memory_size_a = 970;
+ } else if (lb_config == LB_MEMORY_CONFIG_2) {
+ lb_memory_size = 1290;
+ lb_memory_size_c = 1290;
+ lb_memory_size_a = 1290;
+ } else if (lb_config == LB_MEMORY_CONFIG_3) {
+ /* 420 mode: using 3rd mem from Y, Cr and Cb */
+ lb_memory_size = 970 + 1290 + 484 + 484 + 484;
+ lb_memory_size_c = 970 + 1290;
+ lb_memory_size_a = 970 + 1290 + 484;
+ } else {
+ lb_memory_size = 970 + 1290 + 484;
+ lb_memory_size_c = 970 + 1290 + 484;
+ lb_memory_size_a = 970 + 1290 + 484;
+ }
+ *num_part_y = lb_memory_size / memory_line_size_y;
+ *num_part_c = lb_memory_size_c / memory_line_size_c;
+ num_partitions_a = lb_memory_size_a / memory_line_size_a;
+ if (alpha_en
+ && (num_partitions_a < *num_part_y))
+ *num_part_y = num_partitions_a;
+
+ if (*num_part_y > 64)
+ *num_part_y = 64;
+ if (*num_part_c > 64)
+ *num_part_c = 64;
+}
#define __DCN20_DPP_H__
#include "dcn10/dcn10_dpp.h"
-
+#include "spl/dc_spl_types.h"
#define TO_DCN20_DPP(dpp)\
container_of(dpp, struct dcn20_dpp, base)
int *num_part_y,
int *num_part_c);
+void dscl2_spl_calc_lb_num_partitions(
+ bool alpha_en,
+ const struct spl_scaler_data *scl_data,
+ enum lb_memory_config lb_config,
+ int *num_part_y,
+ int *num_part_c);
+
void dpp2_set_cursor_attributes(
struct dpp *dpp_base,
struct dc_cursor_attributes *cursor_attributes);
SRI(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_R, CM, id),\
SRI(CM_BLNDGAM_LUT_CONTROL, CM, id)
-
-
#define DPP_REG_LIST_SH_MASK_DCN30_COMMON(mask_sh)\
TF_SF(CM0_CM_MEM_PWR_STATUS, GAMCOR_MEM_PWR_STATE, mask_sh),\
TF_SF(CM0_CM_DEALPHA, CM_DEALPHA_EN, mask_sh),\
struct scaler_data *scl_data,
const struct scaling_taps *in_taps);
-void dpp3_cnv_setup (
+void dpp3_cnv_setup(
struct dpp *dpp_base,
enum surface_pixel_format format,
enum expansion_mode mode,
return true;
}
+void dscl32_spl_calc_lb_num_partitions(
+ bool alpha_en,
+ const struct spl_scaler_data *scl_data,
+ enum lb_memory_config lb_config,
+ int *num_part_y,
+ int *num_part_c)
+{
+ int memory_line_size_y, memory_line_size_c, memory_line_size_a,
+ lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a;
+
+ int line_size = scl_data->viewport.width < scl_data->recout.width ?
+ scl_data->viewport.width : scl_data->recout.width;
+ int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ?
+ scl_data->viewport_c.width : scl_data->recout.width;
+
+ if (line_size == 0)
+ line_size = 1;
+
+ if (line_size_c == 0)
+ line_size_c = 1;
+
+ memory_line_size_y = (line_size + 5) / 6; /* +5 to ceil */
+ memory_line_size_c = (line_size_c + 5) / 6; /* +5 to ceil */
+ memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */
+
+ if (lb_config == LB_MEMORY_CONFIG_1) {
+ lb_memory_size = 970;
+ lb_memory_size_c = 970;
+ lb_memory_size_a = 970;
+ } else if (lb_config == LB_MEMORY_CONFIG_2) {
+ lb_memory_size = 1290;
+ lb_memory_size_c = 1290;
+ lb_memory_size_a = 1290;
+ } else if (lb_config == LB_MEMORY_CONFIG_3) {
+ if (scl_data->viewport.width == scl_data->h_active &&
+ scl_data->viewport.height == scl_data->v_active) {
+ /* 420 mode: luma using all 3 mem from Y, plus 3rd mem from Cr and Cb */
+ /* use increased LB size for calculation only if Scaler not enabled */
+ lb_memory_size = 970 + 1290 + 1170 + 1170 + 1170;
+ lb_memory_size_c = 970 + 1290;
+ lb_memory_size_a = 970 + 1290 + 1170;
+ } else {
+ /* 420 mode: luma using all 3 mem from Y, plus 3rd mem from Cr and Cb */
+ lb_memory_size = 970 + 1290 + 484 + 484 + 484;
+ lb_memory_size_c = 970 + 1290;
+ lb_memory_size_a = 970 + 1290 + 484;
+ }
+ } else {
+ if (scl_data->viewport.width == scl_data->h_active &&
+ scl_data->viewport.height == scl_data->v_active) {
+ /* use increased LB size for calculation only if Scaler not enabled */
+ lb_memory_size = 970 + 1290 + 1170;
+ lb_memory_size_c = 970 + 1290 + 1170;
+ lb_memory_size_a = 970 + 1290 + 1170;
+ } else {
+ lb_memory_size = 970 + 1290 + 484;
+ lb_memory_size_c = 970 + 1290 + 484;
+ lb_memory_size_a = 970 + 1290 + 484;
+ }
+ }
+ *num_part_y = lb_memory_size / memory_line_size_y;
+ *num_part_c = lb_memory_size_c / memory_line_size_c;
+ num_partitions_a = lb_memory_size_a / memory_line_size_a;
+
+ if (alpha_en
+ && (num_partitions_a < *num_part_y))
+ *num_part_y = num_partitions_a;
+
+ if (*num_part_y > 32)
+ *num_part_y = 32;
+ if (*num_part_c > 32)
+ *num_part_c = 32;
+}
#include "dcn20/dcn20_dpp.h"
#include "dcn30/dcn30_dpp.h"
+#include "spl/dc_spl_types.h"
bool dpp32_construct(struct dcn3_dpp *dpp3,
struct dc_context *ctx,
const struct dcn3_dpp_shift *tf_shift,
const struct dcn3_dpp_mask *tf_mask);
+void dscl32_spl_calc_lb_num_partitions(
+ bool alpha_en,
+ const struct spl_scaler_data *scl_data,
+ enum lb_memory_config lb_config,
+ int *num_part_y,
+ int *num_part_c);
+
#endif /* __DCN32_DPP_H__ */
AMD_DISPLAY_FILES += $(addprefix $(AMDDALPATH)/dc/dsc/dcn35/,$(DSC_DCN35))
+###############################################################################
+# DCN401
+###############################################################################
+DSC_DCN401 += dcn401_dsc.o
+
+AMD_DISPLAY_FILES += $(addprefix $(AMDDALPATH)/dc/dsc/dcn401/,$(DSC_DCN401))
-endif
DSC = dc_dsc.o rc_calc.o rc_calc_dpi.o
AMD_DISPLAY_FILES += $(AMD_DAL_DSC)
+endif
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
#endif
-/* Need to account for padding due to pixel-to-symbol packing
- * for uncompressed 128b/132b streams.
- */
-static uint32_t apply_128b_132b_stream_overhead(
- const struct dc_crtc_timing *timing, const uint32_t kbps)
-{
- uint32_t total_kbps = kbps;
-
- if (disable_128b_132b_stream_overhead)
- return kbps;
-
- if (!timing->flags.DSC) {
- struct fixed31_32 bpp;
- struct fixed31_32 overhead_factor;
-
- bpp = dc_fixpt_from_int(kbps);
- bpp = dc_fixpt_div_int(bpp, timing->pix_clk_100hz / 10);
-
- /* Symbols_per_HActive = HActive * bpp / (4 lanes * 32-bit symbol size)
- * Overhead_factor = ceil(Symbols_per_HActive) / Symbols_per_HActive
- */
- overhead_factor = dc_fixpt_from_int(timing->h_addressable);
- overhead_factor = dc_fixpt_mul(overhead_factor, bpp);
- overhead_factor = dc_fixpt_div_int(overhead_factor, 128);
- overhead_factor = dc_fixpt_div(
- dc_fixpt_from_int(dc_fixpt_ceil(overhead_factor)),
- overhead_factor);
-
- total_kbps = dc_fixpt_ceil(
- dc_fixpt_mul_int(overhead_factor, total_kbps));
- }
-
- return total_kbps;
-}
-
-uint32_t dc_bandwidth_in_kbps_from_timing(
- const struct dc_crtc_timing *timing,
- const enum dc_link_encoding_format link_encoding)
-{
- uint32_t bits_per_channel = 0;
- uint32_t kbps;
-
- if (timing->flags.DSC)
- return dc_dsc_stream_bandwidth_in_kbps(timing,
- timing->dsc_cfg.bits_per_pixel,
- timing->dsc_cfg.num_slices_h,
- timing->dsc_cfg.is_dp);
-
- switch (timing->display_color_depth) {
- case COLOR_DEPTH_666:
- bits_per_channel = 6;
- break;
- case COLOR_DEPTH_888:
- bits_per_channel = 8;
- break;
- case COLOR_DEPTH_101010:
- bits_per_channel = 10;
- break;
- case COLOR_DEPTH_121212:
- bits_per_channel = 12;
- break;
- case COLOR_DEPTH_141414:
- bits_per_channel = 14;
- break;
- case COLOR_DEPTH_161616:
- bits_per_channel = 16;
- break;
- default:
- ASSERT(bits_per_channel != 0);
- bits_per_channel = 8;
- break;
- }
-
- kbps = timing->pix_clk_100hz / 10;
- kbps *= bits_per_channel;
-
- if (timing->flags.Y_ONLY != 1) {
- /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
- kbps *= 3;
- if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
- kbps /= 2;
- else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
- kbps = kbps * 2 / 3;
- }
-
- if (link_encoding == DC_LINK_ENCODING_DP_128b_132b)
- kbps = apply_128b_132b_stream_overhead(timing, kbps);
-
- if (link_encoding == DC_LINK_ENCODING_HDMI_FRL &&
- timing->vic == 0 && timing->hdmi_vic == 0 &&
- timing->frl_uncompressed_video_bandwidth_in_kbps != 0)
- kbps = timing->frl_uncompressed_video_bandwidth_in_kbps;
-
- return kbps;
-}
-
-
/* Forward Declerations */
static bool decide_dsc_bandwidth_range(
const uint32_t min_bpp_x16,
disable_128b_132b_stream_overhead = disable;
}
+bool dc_get_disable_128b_132b_stream_overhead(void)
+{
+ return disable_128b_132b_stream_overhead;
+}
+
void dc_dsc_get_default_config_option(const struct dc *dc, struct dc_dsc_config_options *options)
{
options->dsc_min_slice_height_override = dc->debug.dsc_min_slice_height_override;
AMD_DAL_GPIO_DCN32 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn32/,$(GPIO_DCN32))
AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCN32)
+
+###############################################################################
+# DCN 4.01
+###############################################################################
+GPIO_DCN401 = hw_translate_dcn401.o hw_factory_dcn401.o
+
+AMD_DAL_GPIO_DCN401 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn401/,$(GPIO_DCN401))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCN401)
+
#include "dcn30/hw_factory_dcn30.h"
#include "dcn315/hw_factory_dcn315.h"
#include "dcn32/hw_factory_dcn32.h"
+#include "dcn401/hw_factory_dcn401.h"
bool dal_hw_factory_init(
struct hw_factory *factory,
case DCN_VERSION_3_51:
dal_hw_factory_dcn32_init(factory);
return true;
+ case DCN_VERSION_4_01:
+ dal_hw_factory_dcn401_init(factory);
+ return true;
default:
ASSERT_CRITICAL(false);
return false;
#include "dcn30/hw_translate_dcn30.h"
#include "dcn315/hw_translate_dcn315.h"
#include "dcn32/hw_translate_dcn32.h"
+#include "dcn401/hw_translate_dcn401.h"
/*
* This unit
case DCN_VERSION_3_51:
dal_hw_translate_dcn32_init(translate);
return true;
+ case DCN_VERSION_4_01:
+ dal_hw_translate_dcn401_init(translate);
+ return true;
default:
BREAK_TO_DEBUGGER();
return false;
###############################################################################
+HWSS_DCN401 = dcn401_hwseq.o dcn401_init.o
+
+AMD_DAL_HWSS_DCN401 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn401/,$(HWSS_DCN401))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN401)
endif
" rc_pg_flc rc_mc_fll rc_mc_flc pr_nom_l pr_nom_c rc_pg_nl rc_pg_nc "
" mr_nom_l mr_nom_c rc_mc_nl rc_mc_nc rc_ld_pl rc_ld_pc rc_ld_l "
" rc_ld_c cha_cur0 ofst_cur1 cha_cur1 vr_af_vc0 ddrq_limt x_rt_dlay"
- " x_rp_dlay x_rr_sfl\n");
+ " x_rp_dlay x_rr_sfl rc_td_grp\n");
+
for (i = 0; i < pool->pipe_count; i++) {
struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
if (!s->blank_en)
DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
" %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
- " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
+ " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %xh\n",
pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
- dlg_regs->xfc_reg_remote_surface_flip_latency);
+ dlg_regs->xfc_reg_remote_surface_flip_latency, dlg_regs->refcyc_per_tdlut_group);
}
DTN_INFO("========TTU========\n");
* gsl_0 <=> pipe_ctx->stream_res.gsl_group == 1
* Using a magic value like -1 would require tracking all inits/resets
*/
- void dcn20_setup_gsl_group_as_lock(
+void dcn20_setup_gsl_group_as_lock(
const struct dc *dc,
struct pipe_ctx *pipe_ctx,
bool enable)
plane_state->color_space,
NULL);
+ if (dpp->funcs->set_cursor_matrix) {
+ dpp->funcs->set_cursor_matrix(dpp,
+ plane_state->color_space,
+ plane_state->cursor_csc_color_matrix);
+ }
if (dpp->funcs->dpp_program_bias_and_scale) {
//TODO :for CNVC set scale and bias registers if necessary
build_prescale_params(&bns_params, plane_state);
if (dc->res_pool->hubbub->funcs->program_det_size)
dc->res_pool->hubbub->funcs->program_det_size(
dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb);
+
+ if (dc->res_pool->hubbub->funcs->program_det_segments)
+ dc->res_pool->hubbub->funcs->program_det_segments(
+ dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size);
}
if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw)
|| pipe_ctx->plane_state->update_flags.bits.hdr_mult)
hws->funcs.set_hdr_multiplier(pipe_ctx);
+ if (hws->funcs.populate_mcm_luts) {
+ hws->funcs.populate_mcm_luts(dc, pipe_ctx, pipe_ctx->plane_state->mcm_luts,
+ pipe_ctx->plane_state->lut_bank_a);
+ pipe_ctx->plane_state->lut_bank_a = !pipe_ctx->plane_state->lut_bank_a;
+ }
if (pipe_ctx->update_flags.bits.enable ||
pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
pipe_ctx->plane_state->update_flags.bits.gamma_change ||
(context->res_ctx.pipe_ctx[i].plane_state && dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM))) {
if (hubbub->funcs->program_det_size)
hubbub->funcs->program_det_size(hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
+ if (dc->res_pool->hubbub->funcs->program_det_segments)
+ dc->res_pool->hubbub->funcs->program_det_segments(hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
}
hws->funcs.plane_atomic_disconnect(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
#include "dcn30/dcn30_hwseq.h"
#include "dcn31/dcn31_hwseq.h"
#include "dcn32/dcn32_hwseq.h"
+#include "dcn401/dcn401_hwseq.h"
#include "dcn32_init.h"
static const struct hw_sequencer_funcs dcn32_funcs = {
.is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy,
.apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw,
.reset_back_end_for_pipe = dcn20_reset_back_end_for_pipe,
+ .populate_mcm_luts = dcn401_populate_mcm_luts,
};
void dcn32_hw_sequencer_init_functions(struct dc *dc)
uint8_t subvp_index;
};
+struct fams2_global_control_lock_fast_params {
+ struct dc *dc;
+ bool is_required;
+ bool lock;
+};
+
union block_sequence_params {
struct update_plane_addr_params update_plane_addr_params;
struct subvp_pipe_control_lock_fast_params subvp_pipe_control_lock_fast_params;
struct set_output_csc_params set_output_csc_params;
struct set_ocsc_default_params set_ocsc_default_params;
struct subvp_save_surf_addr subvp_save_surf_addr;
+ struct fams2_global_control_lock_fast_params fams2_global_control_lock_fast_params;
};
enum block_sequence_func {
MPC_SET_OUTPUT_CSC,
MPC_SET_OCSC_DEFAULT,
DMUB_SUBVP_SAVE_SURF_ADDR,
+ DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST,
+
};
struct block_sequence {
bool (*is_pipe_topology_transition_seamless)(struct dc *dc,
const struct dc_state *cur_ctx,
const struct dc_state *new_ctx);
+ void (*fams2_global_control_lock)(struct dc *dc,
+ struct dc_state *context,
+ bool lock);
+ void (*fams2_update_config)(struct dc *dc,
+ struct dc_state *context,
+ bool enable);
+ void (*fams2_global_control_lock_fast)(union block_sequence_params *params);
void (*set_long_vtotal)(struct pipe_ctx **pipe_ctx, int num_pipes, uint32_t v_total_min, uint32_t v_total_max);
};
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
+void get_fams2_visual_confirm_color(
+ struct dc *dc,
+ struct dc_state *context,
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color);
+
void get_mclk_switch_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
void (*reset_back_end_for_pipe)(struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context);
+ void (*populate_mcm_luts)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_cm2_func_luts mcm_luts,
+ bool lut_bank_a);
};
struct dce_hwseq {
#include "panel_cntl.h"
#include "dmub/inc/dmub_cmd.h"
#include "pg_cntl.h"
+#include "spl/dc_spl.h"
#define MAX_CLOCK_SOURCES 7
#define MAX_SVP_PHANTOM_STREAMS 2
#include "transform.h"
#include "dpp.h"
+#include "dml2/dml21/inc/dml_top_dchub_registers.h"
+#include "dml2/dml21/inc/dml_top_types.h"
+
struct resource_pool;
struct dc_state;
struct resource_context;
struct dc *dc,
struct dc_state *new_ctx,
struct dc_stream_state *stream);
+
enum dc_status (*patch_unknown_plane_state)(
struct dc_plane_state *plane_state);
struct resource_context *res_ctx,
const struct resource_pool *pool,
struct dc_stream_state *stream);
+
void (*populate_dml_writeback_from_context)(
struct dc *dc,
struct resource_context *res_ctx,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt);
+
void (*update_bw_bounding_box)(
struct dc *dc,
struct clk_bw_params *bw_params);
struct abm *abm;
struct dmcu *dmcu;
struct dmub_psr *psr;
-
struct dmub_replay *replay;
struct abm *multiple_abms[MAX_PIPES];
/* scl_data is scratch space required to program a plane */
struct scaler_data scl_data;
/* Below pointers to hw objects are required to enable the plane */
+ /* spl_in and spl_out are the input and output structures for SPL
+ * which are required when using Scaler Programming Library
+ * these are scratch spaces needed when programming a plane
+ */
+ struct spl_in spl_in;
+ struct spl_out spl_out;
+ /* Below pointers to hw objects are required to enable the plane */
struct hubp *hubp;
struct mem_input *mi;
struct input_pixel_processor *ipp;
int det_buffer_size_kb;
bool unbounded_req;
unsigned int surface_size_in_mall_bytes;
+ struct dml2_dchub_per_pipe_register_set hubp_regs;
+ struct dml2_hubp_pipe_mcache_regs mcache_regs;
struct dwbc *dwbc;
struct mcif_wb *mcif_wb;
unsigned int mall_subvp_size_bytes;
unsigned int legacy_svp_drr_stream_index;
bool legacy_svp_drr_stream_index_valid;
+ struct dml2_mcache_surface_allocation mcache_allocations[DML2_MAX_PLANES];
+ struct dmub_fams2_stream_static_state fams2_stream_params[DML2_MAX_PLANES];
+ unsigned fams2_stream_count;
+ struct dml2_display_arb_regs arb_regs;
};
union bw_output {
#define WM_C 2
#define WM_D 3
#define WM_SET_COUNT 4
+#define WM_1A 2
+#define WM_1B 3
#define DCN_MINIMUM_DISPCLK_Khz 100000
#define DCN_MINIMUM_DPPCLK_Khz 100000
struct dummy_pstate_entry {
unsigned int dram_speed_mts;
- double dummy_pstate_latency_us;
+ unsigned int dummy_pstate_latency_us;
};
struct clk_bw_params {
unsigned int vram_type;
unsigned int num_channels;
unsigned int dram_channel_width_bytes;
- unsigned int dispclk_vco_khz;
+ unsigned int dispclk_vco_khz;
unsigned int dc_mode_softmax_memclk;
unsigned int max_memclk_mhz;
struct clk_limit_table clk_table;
#define CLK_COMMON_REG_LIST_DCN_BASE() \
SR(DENTIST_DISPCLK_CNTL)
-#define VBIOS_SMU_MSG_BOX_REG_LIST_RV() \
- .MP1_SMN_C2PMSG_91 = mmMP1_SMN_C2PMSG_91, \
- .MP1_SMN_C2PMSG_83 = mmMP1_SMN_C2PMSG_83, \
- .MP1_SMN_C2PMSG_67 = mmMP1_SMN_C2PMSG_67
-
#define CLK_COMMON_REG_LIST_DCN_201() \
SR(DENTIST_DISPCLK_CNTL), \
CLK_SRI(CLK4_CLK_PLL_REQ, CLK4, 0), \
CLK_SRI(CLK3_CLK2_DFS_CNTL, CLK3, 0)
#define CLK_REG_LIST_DCN3() \
- CLK_COMMON_REG_LIST_DCN_BASE(), \
+ SR(DENTIST_DISPCLK_CNTL), \
CLK_SRI(CLK0_CLK_PLL_REQ, CLK02, 0), \
CLK_SRI(CLK0_CLK2_DFS_CNTL, CLK02, 0)
type DENTIST_DISPCLK_WDIVIDER; \
type DENTIST_DISPCLK_CHG_DONE;
-/*
- ***************************************************************************************
- ****************** Clock Manager Private Structures ***********************************
- ***************************************************************************************
- */
#define CLK20_REG_FIELD_LIST(type) \
type DENTIST_DPPCLK_WDIVIDER; \
type DENTIST_DPPCLK_CHG_DONE; \
type FbMult_int; \
type FbMult_frac;
-#define VBIOS_SMU_REG_FIELD_LIST(type) \
- type CONTENT;
-
-struct clk_mgr_shift {
- CLK_REG_FIELD_LIST(uint8_t)
- CLK20_REG_FIELD_LIST(uint8_t)
- VBIOS_SMU_REG_FIELD_LIST(uint32_t)
-};
-
-struct clk_mgr_mask {
- CLK_REG_FIELD_LIST(uint32_t)
- CLK20_REG_FIELD_LIST(uint32_t)
- VBIOS_SMU_REG_FIELD_LIST(uint32_t)
-};
+/*
+ ***************************************************************************************
+ ****************** Clock Manager Private Structures ***********************************
+ ***************************************************************************************
+ */
struct clk_mgr_registers {
uint32_t DPREFCLK_CNTL;
uint32_t DENTIST_DISPCLK_CNTL;
+
uint32_t CLK4_CLK2_CURRENT_CNT;
uint32_t CLK4_CLK_PLL_REQ;
uint32_t CLK0_CLK1_DFS_CNTL;
uint32_t CLK0_CLK3_DFS_CNTL;
uint32_t CLK0_CLK4_DFS_CNTL;
+};
- uint32_t MP1_SMN_C2PMSG_67;
- uint32_t MP1_SMN_C2PMSG_83;
- uint32_t MP1_SMN_C2PMSG_91;
+struct clk_mgr_shift {
+ CLK_REG_FIELD_LIST(uint8_t)
+ CLK20_REG_FIELD_LIST(uint8_t)
+};
+
+struct clk_mgr_mask {
+ CLK_REG_FIELD_LIST(uint32_t)
+ CLK20_REG_FIELD_LIST(uint32_t)
};
enum clock_type {
* blocks for the Data Fabric Interface that are not clock/power gated.
*/
+#include "dc/dc_hw_types.h"
+
enum dcc_control {
dcc_control__256_256_xxx,
dcc_control__128_128_xxx,
enum segment_order *segment_order_horz,
enum segment_order *segment_order_vert);
+ bool (*dcc_support_swizzle_addr3)(
+ enum swizzle_mode_addr3_values swizzle,
+ unsigned int plane_pitch,
+ unsigned int bytes_per_element,
+ enum segment_order *segment_order_horz,
+ enum segment_order *segment_order_vert);
+
+ bool (*dcc_support_pixel_format_plane0_plane1)(
+ enum surface_pixel_format format,
+ unsigned int *plane0_bpe,
+ unsigned int *plane1_bpe);
bool (*dcc_support_pixel_format)(
enum surface_pixel_format format,
unsigned int *bytes_per_element);
void (*set_request_limit)(struct hubbub *hubbub, int memory_channel_count, int words_per_channel);
void (*dchubbub_init)(struct hubbub *hubbub);
void (*get_mall_en)(struct hubbub *hubbub, unsigned int *mall_in_use);
+ void (*program_det_segments)(struct hubbub *hubbub, int hubp_inst, unsigned det_buffer_size_seg);
+ void (*program_compbuf_segments)(struct hubbub *hubbub, unsigned compbuf_size_seg, bool safe_to_increase);
};
struct hubbub {
void (*dpp_get_gamut_remap)(struct dpp *dpp_base,
struct dpp_grph_csc_adjustment *adjust);
+ void (*set_cursor_matrix)(
+ struct dpp *dpp_base,
+ enum dc_color_space color_space,
+ struct dc_csc_transform cursor_csc_color_matrix);
};
#include "mem_input.h"
#include "cursor_reg_cache.h"
+#include "dml2/dml21/inc/dml_top_dchub_registers.h"
+
#define OPP_ID_INVALID 0xf
#define MAX_TTU 0xffffff
hubp_ind_block_64b_no_128bcl,
};
+enum hubp_3dlut_fl_mode {
+ hubp_3dlut_fl_mode_disable = 0,
+ hubp_3dlut_fl_mode_native_1 = 1,
+ hubp_3dlut_fl_mode_native_2 = 2,
+ hubp_3dlut_fl_mode_transform = 3
+};
+
+enum hubp_3dlut_fl_format {
+ hubp_3dlut_fl_format_unorm_12msb_bitslice = 0,
+ hubp_3dlut_fl_format_unorm_12lsb_bitslice = 1,
+ hubp_3dlut_fl_format_float_fp1_5_10 = 2
+};
+
+enum hubp_3dlut_fl_addressing_mode {
+ hubp_3dlut_fl_addressing_mode_sw_linear = 0,
+ hubp_3dlut_fl_addressing_mode_simple_linear = 1
+};
+
+enum hubp_3dlut_fl_width {
+ hubp_3dlut_fl_width_17 = 17,
+ hubp_3dlut_fl_width_33 = 33,
+ hubp_3dlut_fl_width_transformed = 4916
+};
+
+enum hubp_3dlut_fl_crossbar_bit_slice {
+ hubp_3dlut_fl_crossbar_bit_slice_0_15 = 0,
+ hubp_3dlut_fl_crossbar_bit_slice_16_31 = 1,
+ hubp_3dlut_fl_crossbar_bit_slice_32_47 = 2,
+ hubp_3dlut_fl_crossbar_bit_slice_48_63 = 3
+};
+
struct hubp {
const struct hubp_funcs *funcs;
struct dc_context *ctx;
bool (*hubp_in_blank)(struct hubp *hubp);
void (*hubp_soft_reset)(struct hubp *hubp, bool reset);
+ void (*hubp_set_flip_int)(struct hubp *hubp);
+
void (*hubp_update_force_pstate_disallow)(struct hubp *hubp, bool allow);
void (*hubp_update_force_cursor_pstate_disallow)(struct hubp *hubp, bool allow);
void (*hubp_update_mall_sel)(struct hubp *hubp, uint32_t mall_sel, bool c_cursor);
void (*hubp_prepare_subvp_buffering)(struct hubp *hubp, bool enable);
-
- void (*hubp_set_flip_int)(struct hubp *hubp);
+ void (*hubp_surface_update_lock)(struct hubp *hubp,
+ bool lock);
void (*program_extended_blank)(struct hubp *hubp,
unsigned int min_dst_y_next_start_optimized);
void (*hubp_wait_pipe_read_start)(struct hubp *hubp);
+ void (*hubp_update_3dlut_fl_bias_scale)(struct hubp *hubp, uint16_t bias, uint16_t scale);
+ void (*hubp_program_3dlut_fl_mode)(struct hubp *hubp,
+ enum hubp_3dlut_fl_mode mode);
+ void (*hubp_program_3dlut_fl_format)(struct hubp *hubp,
+ enum hubp_3dlut_fl_format format);
+ void (*hubp_program_3dlut_fl_addr)(struct hubp *hubp,
+ const struct dc_plane_address address);
+ void (*hubp_program_3dlut_fl_dlg_param)(struct hubp *hubp, int refcyc_per_3dlut_group);
+ void (*hubp_enable_3dlut_fl)(struct hubp *hubp, bool enable);
+ void (*hubp_program_3dlut_fl_addressing_mode)(struct hubp *hubp, enum hubp_3dlut_fl_addressing_mode addr_mode);
+ void (*hubp_program_3dlut_fl_width)(struct hubp *hubp, enum hubp_3dlut_fl_width width);
+ void (*hubp_program_3dlut_fl_tmz_protected)(struct hubp *hubp, bool protection_enabled);
+ void (*hubp_program_3dlut_fl_crossbar)(struct hubp *hubp,
+ enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_y_g,
+ enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b,
+ enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cr_r);
+ int (*hubp_get_3dlut_fl_done)(struct hubp *hubp);
};
#endif
#include "include/grph_object_id.h"
#include "dml/display_mode_structs.h"
+#include "dml2/dml21/inc/dml_top_dchub_registers.h"
struct dchub_init_data;
struct cstate_pstate_watermarks_st {
uint32_t urgent_ns;
uint32_t frac_urg_bw_nom;
uint32_t frac_urg_bw_flip;
- int32_t urgent_latency_ns;
+ uint32_t urgent_latency_ns;
struct cstate_pstate_watermarks_st cstate_pstate;
uint32_t usr_retraining_ns;
};
struct dcn_watermarks c;
struct dcn_watermarks d;
}; // legacy
+ struct {
+ struct dml2_dchub_watermark_regs a;
+ struct dml2_dchub_watermark_regs b;
+ struct dml2_dchub_watermark_regs c;
+ struct dml2_dchub_watermark_regs d;
+ } dcn4; //dcn4+
};
struct dce_watermarks {
MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA
};
+enum mpcc_movable_cm_location {
+ MPCC_MOVABLE_CM_LOCATION_BEFORE,
+ MPCC_MOVABLE_CM_LOCATION_AFTER,
+};
+
+enum MCM_LUT_XABLE {
+ MCM_LUT_DISABLE,
+ MCM_LUT_DISABLED = MCM_LUT_DISABLE,
+ MCM_LUT_ENABLE,
+ MCM_LUT_ENABLED = MCM_LUT_ENABLE,
+};
+
+enum MCM_LUT_ID {
+ MCM_LUT_3DLUT,
+ MCM_LUT_1DLUT,
+ MCM_LUT_SHAPER
+};
+
+union mcm_lut_params {
+ const struct pwl_params *pwl;
+ const struct tetrahedral_params *lut3d;
+};
+
/**
* struct mpcc_blnd_cfg - MPCC blending configuration
*/
struct mpc_grph_gamut_adjustment {
struct fixed31_32 temperature_matrix[CSC_TEMPERATURE_MATRIX_SIZE];
enum graphics_gamut_adjust_type gamut_adjust_type;
+ enum mpcc_gamut_remap_id mpcc_gamut_remap_block_id;
};
struct mpcc_sm_cfg {
int (*release_rmu)(struct mpc *mpc, int mpcc_id);
unsigned int (*get_mpc_out_mux)(
- struct mpc *mpc,
- int opp_id);
+ struct mpc *mpc,
+ int opp_id);
void (*set_bg_color)(struct mpc *mpc,
struct tg_color *bg_color,
int mpcc_id);
void (*set_mpc_mem_lp_mode)(struct mpc *mpc);
+ void (*set_movable_cm_location)(struct mpc *mpc, enum mpcc_movable_cm_location location, int mpcc_id);
+ void (*update_3dlut_fast_load_select)(struct mpc *mpc, int mpcc_id, int hubp_idx);
+ void (*get_3dlut_fast_load_status)(struct mpc *mpc, int mpcc_id, uint32_t *done, uint32_t *soft_underflow, uint32_t *hard_underflow);
+ void (*populate_lut)(struct mpc *mpc, const enum MCM_LUT_ID id, const union mcm_lut_params params,
+ bool lut_bank_a, int mpcc_id);
+ void (*program_lut_read_write_control)(struct mpc *mpc, const enum MCM_LUT_ID id, bool lut_bank_a, int mpcc_id);
+ void (*program_lut_mode)(struct mpc *mpc, const enum MCM_LUT_ID id, const enum MCM_LUT_XABLE xable,
+ bool lut_bank_a, int mpcc_id);
};
#endif
#include "hw_shared.h"
#include "dc_hw_types.h"
#include "fixed31_32.h"
+#include "spl/dc_spl_types.h"
#include "spl/dc_spl_types.h"
struct sharpness_adj sharpness;
enum pixel_format format;
struct line_buffer_params lb_params;
+ // Below struct holds the scaler values to program hw registers
+ struct dscl_prog_data dscl_prog_data;
};
struct transform_funcs {
struct transform *xfm_base,
const struct dc_cursor_attributes *attr);
+ bool (*transform_program_blnd_lut)(
+ struct transform *xfm,
+ const struct pwl_params *params);
+ bool (*transform_program_shaper_lut)(
+ struct transform *xfm,
+ const struct pwl_params *params);
+ bool (*transform_program_3dlut)(
+ struct transform *xfm,
+ struct tetrahedral_params *params);
};
const uint16_t *get_filter_2tap_16p(void);
#include "core_status.h"
#include "dal_asic_id.h"
#include "dm_pp_smu.h"
+#include "spl/dc_spl.h"
#define MEMORY_TYPE_MULTIPLIER_CZ 4
#define MEMORY_TYPE_HBM 2
struct hpo_dp_stream_encoder *(*create_hpo_dp_stream_encoder)(
enum engine_id eng_id, struct dc_context *ctx);
-
struct hpo_dp_link_encoder *(*create_hpo_dp_link_encoder)(
uint8_t inst,
struct dc_context *ctx);
-
struct dce_hwseq *(*create_hwseq)(
struct dc_context *ctx);
};
bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream);
+/* Get hw programming parameters container from pipe context
+ * @pipe_ctx: pipe context
+ * @dscl_prog_data: struct to hold programmable hw reg values
+ */
+struct dscl_prog_data *resource_get_dscl_prog_data(struct pipe_ctx *pipe_ctx);
/* Setup dc callbacks for dml2
* @dc: the display core structure
* @dml2_options: struct to hold callbacks
AMD_DAL_IRQ_DCN351= $(addprefix $(AMDDALPATH)/dc/irq/dcn351/,$(IRQ_DCN351))
AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN351)
+
+###############################################################################
+# DCN 401
+###############################################################################
+IRQ_DCN401 = irq_service_dcn401.o
+
+AMD_DAL_IRQ_DCN401= $(addprefix $(AMDDALPATH)/dc/irq/dcn401/,$(IRQ_DCN401))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN401)
###############################################################################
###############################################################################
+OPTC_DCN401 = dcn401_optc.o
+
+AMD_DAL_OPTC_DCN401 = $(addprefix $(AMDDALPATH)/dc/optc/dcn401/,$(OPTC_DCN401))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN401)
endif
+
SF(GSL_SOURCE_SELECT, GSL2_READY_SOURCE_SEL, mask_sh),\
SF(OTG0_OTG_GLOBAL_CONTROL2, MANUAL_FLOW_CONTROL_SEL, mask_sh)
-
-
#define TG_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\
TG_COMMON_MASK_SH_LIST_DCN(mask_sh),\
SF(OTG0_OTG_TEST_PATTERN_PARAMETERS, OTG_TEST_PATTERN_INC0, mask_sh),\
#define TG_REG_FIELD_LIST_DCN3_2(type) \
type OTG_H_TIMING_DIV_MODE_MANUAL;
-
#define TG_REG_FIELD_LIST_DCN3_5(type) \
type OTG_CRC0_WINDOWA_X_START_READBACK;\
type OTG_CRC0_WINDOWA_X_END_READBACK;\
###############################################################################
+###############################################################################
+
+RESOURCE_DCN401 = dcn401_resource.o
+
+AMD_DAL_RESOURCE_DCN401 = $(addprefix $(AMDDALPATH)/dc/resource/dcn401/,$(RESOURCE_DCN401))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN401)
+
endif
.force_disable_subvp = false,
.exit_idle_opt_for_cursor_updates = true,
.using_dml2 = false,
+ .using_dml21 = false, // TODO : Temporary for N-1 validation. Remove after N-1 is done.
.enable_single_display_2to1_odm_policy = true,
/* Must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions*/
#define ASICREV_IS_GC_11_0_3(eChipRev) (eChipRev >= GC_11_0_3_A0 && eChipRev < GC_11_UNKNOWN)
#define ASICREV_IS_GC_11_0_4(eChipRev) (eChipRev >= GC_11_0_4_A0 && eChipRev < GC_11_UNKNOWN)
+#define AMDGPU_FAMILY_GC_12_0_0 152 /* GC 12.0.0 */
+
+enum {
+ GC_12_0_0_A0 = 0x50,
+ GC_12_0_1_A0 = 0x40,
+ GC_12_UNKNOWN = 0xFF,
+};
+
+#define ASICREV_IS_DCN4(eChipRev) (eChipRev >= GC_12_0_1_A0 && eChipRev < GC_12_0_0_A0)
+#define ASICREV_IS_DCN401(eChipRev) (eChipRev >= GC_12_0_0_A0 && eChipRev < GC_12_UNKNOWN)
+
/*
* ASIC chip ID
*/
DCN_VERSION_3_21,
DCN_VERSION_3_5,
DCN_VERSION_3_51,
+ DCN_VERSION_4_01,
DCN_VERSION_MAX
};