2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include "dm_services.h"
30 #include "include/irq_service_interface.h"
31 #include "dcn10/dcn10_resource.h"
33 #include "dcn10/dcn10_ipp.h"
34 #include "dcn10/dcn10_mpc.h"
35 #include "irq/dcn10/irq_service_dcn10.h"
36 #include "dcn10/dcn10_dpp.h"
37 #include "dcn10/dcn10_timing_generator.h"
38 #include "dcn10/dcn10_hw_sequencer.h"
39 #include "dce110/dce110_hw_sequencer.h"
40 #include "dcn10/dcn10_opp.h"
41 #include "dce/dce_link_encoder.h"
42 #include "dce/dce_stream_encoder.h"
43 #include "dce/dce_clocks.h"
44 #include "dce/dce_clock_source.h"
45 #include "dcn10/dcn10_mem_input.h"
46 #include "dce/dce_audio.h"
47 #include "dce/dce_hwseq.h"
48 #include "../virtual/virtual_stream_encoder.h"
49 #include "dce110/dce110_resource.h"
51 #include "vega10/soc15ip.h"
53 #include "raven1/DCN/dcn_1_0_offset.h"
54 #include "raven1/DCN/dcn_1_0_sh_mask.h"
56 #include "raven1/NBIO/nbio_7_0_offset.h"
58 #include "raven1/MMHUB/mmhub_9_1_offset.h"
59 #include "raven1/MMHUB/mmhub_9_1_sh_mask.h"
61 #include "reg_helper.h"
62 #include "dce/dce_abm.h"
63 #include "dce/dce_dmcu.h"
65 #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
66 #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f
67 #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
68 #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f
69 #define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
70 #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f
71 #define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
72 #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f
73 #define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
74 #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f
75 #define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
76 #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x260f
77 #define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
78 #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x270f
79 #define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
83 enum dcn10_clk_src_array_id {
91 /* begin *********************
92 * macros to expend register list macro defined in HW object header file */
95 #define BASE_INNER(seg) \
96 DCE_BASE__INST0_SEG ## seg
101 #define SR(reg_name)\
102 .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
105 #define SRI(reg_name, block, id)\
106 .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
107 mm ## block ## id ## _ ## reg_name
110 #define SRII(reg_name, block, id)\
111 .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
112 mm ## block ## id ## _ ## reg_name
115 #define NBIO_BASE_INNER(seg) \
116 NBIF_BASE__INST0_SEG ## seg
118 #define NBIO_BASE(seg) \
121 #define NBIO_SR(reg_name)\
122 .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \
126 #define MMHUB_BASE_INNER(seg) \
127 MMHUB_BASE__INST0_SEG ## seg
129 #define MMHUB_BASE(seg) \
130 MMHUB_BASE_INNER(seg)
132 #define MMHUB_SR(reg_name)\
133 .reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \
136 /* macros to expend register list macro defined in HW object header file
137 * end *********************/
140 static const struct dce_dmcu_registers dmcu_regs = {
141 DMCU_DCN10_REG_LIST()
144 static const struct dce_dmcu_shift dmcu_shift = {
145 DMCU_MASK_SH_LIST_DCN10(__SHIFT)
148 static const struct dce_dmcu_mask dmcu_mask = {
149 DMCU_MASK_SH_LIST_DCN10(_MASK)
152 static const struct dce_abm_registers abm_regs = {
153 ABM_DCN10_REG_LIST(0)
156 static const struct dce_abm_shift abm_shift = {
157 ABM_MASK_SH_LIST_DCN10(__SHIFT)
160 static const struct dce_abm_mask abm_mask = {
161 ABM_MASK_SH_LIST_DCN10(_MASK)
164 #define stream_enc_regs(id)\
166 SE_DCN_REG_LIST(id),\
168 .AFMT_AVI_INFO0 = 0,\
169 .AFMT_AVI_INFO1 = 0,\
170 .AFMT_AVI_INFO2 = 0,\
171 .AFMT_AVI_INFO3 = 0,\
174 static const struct dce110_stream_enc_registers stream_enc_regs[] = {
181 static const struct dce_stream_encoder_shift se_shift = {
182 SE_COMMON_MASK_SH_LIST_DCN10(__SHIFT)
185 static const struct dce_stream_encoder_mask se_mask = {
186 SE_COMMON_MASK_SH_LIST_DCN10(_MASK),
187 .AFMT_GENERIC0_UPDATE = 0,
188 .AFMT_GENERIC2_UPDATE = 0,
191 .HDMI_AVI_INFO_SEND = 0,
192 .HDMI_AVI_INFO_CONT = 0,
193 .HDMI_AVI_INFO_LINE = 0,
194 .DP_SEC_AVI_ENABLE = 0,
195 .AFMT_AVI_INFO_VERSION = 0
198 #define audio_regs(id)\
200 AUD_COMMON_REG_LIST(id)\
203 static const struct dce_audio_registers audio_regs[] = {
210 #define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
211 SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
212 SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\
213 AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)
215 static const struct dce_audio_shift audio_shift = {
216 DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
219 static const struct dce_aduio_mask audio_mask = {
220 DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
223 #define aux_regs(id)\
228 static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
237 #define hpd_regs(id)\
242 static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = {
251 #define link_regs(id)\
253 LE_DCN10_REG_LIST(id), \
254 SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
257 static const struct dce110_link_enc_registers link_enc_regs[] = {
267 #define ipp_regs(id)\
269 IPP_REG_LIST_DCN10(id),\
272 static const struct dcn10_ipp_registers ipp_regs[] = {
279 static const struct dcn10_ipp_shift ipp_shift = {
280 IPP_MASK_SH_LIST_DCN10(__SHIFT)
283 static const struct dcn10_ipp_mask ipp_mask = {
284 IPP_MASK_SH_LIST_DCN10(_MASK),
287 #define opp_regs(id)\
289 OPP_REG_LIST_DCN10(id),\
292 static const struct dcn10_opp_registers opp_regs[] = {
299 static const struct dcn10_opp_shift opp_shift = {
300 OPP_MASK_SH_LIST_DCN10(__SHIFT)
303 static const struct dcn10_opp_mask opp_mask = {
304 OPP_MASK_SH_LIST_DCN10(_MASK),
309 TF_REG_LIST_DCN10(id),\
312 static const struct dcn_dpp_registers tf_regs[] = {
319 static const struct dcn_dpp_shift tf_shift = {
320 TF_REG_LIST_SH_MASK_DCN10(__SHIFT)
323 static const struct dcn_dpp_mask tf_mask = {
324 TF_REG_LIST_SH_MASK_DCN10(_MASK),
327 static const struct dcn_mpc_registers mpc_regs = {
328 MPC_COMMON_REG_LIST_DCN1_0(0),
329 MPC_COMMON_REG_LIST_DCN1_0(1),
330 MPC_COMMON_REG_LIST_DCN1_0(2),
331 MPC_COMMON_REG_LIST_DCN1_0(3)
334 static const struct dcn_mpc_shift mpc_shift = {
335 MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT)
338 static const struct dcn_mpc_mask mpc_mask = {
339 MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK),
343 [id] = {TG_COMMON_REG_LIST_DCN1_0(id)}
345 static const struct dcn_tg_registers tg_regs[] = {
352 static const struct dcn_tg_shift tg_shift = {
353 TG_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT)
356 static const struct dcn_tg_mask tg_mask = {
357 TG_COMMON_MASK_SH_LIST_DCN1_0(_MASK)
361 static const struct bios_registers bios_regs = {
362 NBIO_SR(BIOS_SCRATCH_6)
367 MI_REG_LIST_DCN10(id)\
371 static const struct dcn_mi_registers mi_regs[] = {
378 static const struct dcn_mi_shift mi_shift = {
379 MI_MASK_SH_LIST_DCN10(__SHIFT)
382 static const struct dcn_mi_mask mi_mask = {
383 MI_MASK_SH_LIST_DCN10(_MASK)
386 #define clk_src_regs(index, pllid)\
388 CS_COMMON_REG_LIST_DCN1_0(index, pllid),\
391 static const struct dce110_clk_src_regs clk_src_regs[] = {
398 static const struct dce110_clk_src_shift cs_shift = {
399 CS_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT)
402 static const struct dce110_clk_src_mask cs_mask = {
403 CS_COMMON_MASK_SH_LIST_DCN1_0(_MASK)
407 static const struct resource_caps res_cap = {
408 .num_timing_generator = 4,
409 .num_video_plane = 4,
411 .num_stream_encoder = 4,
415 static const struct dc_debug debug_defaults_drv = {
416 .disable_dcc = false,
417 .sanity_checks = true,
418 .disable_dmcu = true,
419 .force_abm_enable = false,
420 .timing_trace = false,
422 .disable_pplib_clock_request = true,
423 .disable_pplib_wm_range = false,
424 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
426 .disable_pipe_split = true
430 static const struct dc_debug debug_defaults_diags = {
431 .disable_dmcu = true,
432 .force_abm_enable = false,
433 .timing_trace = true,
435 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
436 .disable_pplib_clock_request = true,
437 .disable_pplib_wm_range = true,
439 .disable_pipe_split = false
443 static void dcn10_dpp_destroy(struct transform **xfm)
445 dm_free(TO_DCN10_DPP(*xfm));
449 static struct transform *dcn10_dpp_create(
450 struct dc_context *ctx,
453 struct dcn10_dpp *dpp =
454 dm_alloc(sizeof(struct dcn10_dpp));
459 if (dcn10_dpp_construct(dpp, ctx, inst,
460 &tf_regs[inst], &tf_shift, &tf_mask))
468 static struct input_pixel_processor *dcn10_ipp_create(
469 struct dc_context *ctx, uint32_t inst)
471 struct dcn10_ipp *ipp =
472 dm_alloc(sizeof(struct dcn10_ipp));
479 dcn10_ipp_construct(ipp, ctx, inst,
480 &ipp_regs[inst], &ipp_shift, &ipp_mask);
485 static struct output_pixel_processor *dcn10_opp_create(
486 struct dc_context *ctx, uint32_t inst)
488 struct dcn10_opp *opp =
489 dm_alloc(sizeof(struct dcn10_opp));
496 dcn10_opp_construct(opp, ctx, inst,
497 &opp_regs[inst], &opp_shift, &opp_mask);
501 static struct mpc *dcn10_mpc_create(struct dc_context *ctx)
503 struct dcn10_mpc *mpc10 = dm_alloc(sizeof(struct dcn10_mpc));
508 dcn10_mpc_construct(mpc10, ctx,
517 static struct timing_generator *dcn10_timing_generator_create(
518 struct dc_context *ctx,
521 struct dcn10_timing_generator *tgn10 =
522 dm_alloc(sizeof(struct dcn10_timing_generator));
527 tgn10->base.inst = instance;
528 tgn10->base.ctx = ctx;
530 tgn10->tg_regs = &tg_regs[instance];
531 tgn10->tg_shift = &tg_shift;
532 tgn10->tg_mask = &tg_mask;
534 dcn10_timing_generator_init(tgn10);
539 static const struct encoder_feature_support link_enc_feature = {
540 .max_hdmi_deep_color = COLOR_DEPTH_121212,
541 .max_hdmi_pixel_clock = 600000,
542 .ycbcr420_supported = true,
543 .flags.bits.IS_HBR2_CAPABLE = true,
544 .flags.bits.IS_HBR3_CAPABLE = true,
545 .flags.bits.IS_TPS3_CAPABLE = true,
546 .flags.bits.IS_TPS4_CAPABLE = true,
547 .flags.bits.IS_YCBCR_CAPABLE = true
550 struct link_encoder *dcn10_link_encoder_create(
551 const struct encoder_init_data *enc_init_data)
553 struct dce110_link_encoder *enc110 =
554 dm_alloc(sizeof(struct dce110_link_encoder));
559 if (dce110_link_encoder_construct(
563 &link_enc_regs[enc_init_data->transmitter],
564 &link_enc_aux_regs[enc_init_data->channel - 1],
565 &link_enc_hpd_regs[enc_init_data->hpd_source])) {
567 return &enc110->base;
575 struct clock_source *dcn10_clock_source_create(
576 struct dc_context *ctx,
577 struct dc_bios *bios,
578 enum clock_source_id id,
579 const struct dce110_clk_src_regs *regs,
582 struct dce110_clk_src *clk_src =
583 dm_alloc(sizeof(struct dce110_clk_src));
588 if (dce110_clk_src_construct(clk_src, ctx, bios, id,
589 regs, &cs_shift, &cs_mask)) {
590 clk_src->base.dp_clk_src = dp_clk_src;
591 return &clk_src->base;
598 static void read_dce_straps(
599 struct dc_context *ctx,
600 struct resource_straps *straps)
602 generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX),
603 FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
606 static struct audio *create_audio(
607 struct dc_context *ctx, unsigned int inst)
609 return dce_audio_create(ctx, inst,
610 &audio_regs[inst], &audio_shift, &audio_mask);
613 static struct stream_encoder *dcn10_stream_encoder_create(
614 enum engine_id eng_id,
615 struct dc_context *ctx)
617 struct dce110_stream_encoder *enc110 =
618 dm_alloc(sizeof(struct dce110_stream_encoder));
623 if (dce110_stream_encoder_construct(
624 enc110, ctx, ctx->dc_bios, eng_id,
625 &stream_enc_regs[eng_id], &se_shift, &se_mask))
626 return &enc110->base;
633 static const struct dce_hwseq_registers hwseq_reg = {
634 HWSEQ_DCN1_REG_LIST()
637 static const struct dce_hwseq_shift hwseq_shift = {
638 HWSEQ_DCN1_MASK_SH_LIST(__SHIFT)
641 static const struct dce_hwseq_mask hwseq_mask = {
642 HWSEQ_DCN1_MASK_SH_LIST(_MASK)
645 static struct dce_hwseq *dcn10_hwseq_create(
646 struct dc_context *ctx)
648 struct dce_hwseq *hws = dm_alloc(sizeof(struct dce_hwseq));
652 hws->regs = &hwseq_reg;
653 hws->shifts = &hwseq_shift;
654 hws->masks = &hwseq_mask;
659 static const struct resource_create_funcs res_create_funcs = {
660 .read_dce_straps = read_dce_straps,
661 .create_audio = create_audio,
662 .create_stream_encoder = dcn10_stream_encoder_create,
663 .create_hwseq = dcn10_hwseq_create,
666 static const struct resource_create_funcs res_create_maximus_funcs = {
667 .read_dce_straps = NULL,
668 .create_audio = NULL,
669 .create_stream_encoder = NULL,
670 .create_hwseq = dcn10_hwseq_create,
673 void dcn10_clock_source_destroy(struct clock_source **clk_src)
675 dm_free(TO_DCE110_CLK_SRC(*clk_src));
679 static void destruct(struct dcn10_resource_pool *pool)
683 for (i = 0; i < pool->base.stream_enc_count; i++) {
684 if (pool->base.stream_enc[i] != NULL) {
685 /* TODO: free dcn version of stream encoder once implemented
686 * rather than using virtual stream encoder
688 dm_free(pool->base.stream_enc[i]);
689 pool->base.stream_enc[i] = NULL;
693 if (pool->base.mpc != NULL) {
694 dm_free(TO_DCN10_MPC(pool->base.mpc));
695 pool->base.mpc = NULL;
697 for (i = 0; i < pool->base.pipe_count; i++) {
698 if (pool->base.opps[i] != NULL)
699 pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]);
701 if (pool->base.transforms[i] != NULL)
702 dcn10_dpp_destroy(&pool->base.transforms[i]);
704 if (pool->base.ipps[i] != NULL)
705 pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]);
707 if (pool->base.mis[i] != NULL) {
708 dm_free(TO_DCN10_MEM_INPUT(pool->base.mis[i]));
709 pool->base.mis[i] = NULL;
712 if (pool->base.irqs != NULL) {
713 dal_irq_service_destroy(&pool->base.irqs);
716 if (pool->base.timing_generators[i] != NULL) {
717 dm_free(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
718 pool->base.timing_generators[i] = NULL;
722 for (i = 0; i < pool->base.stream_enc_count; i++) {
723 if (pool->base.stream_enc[i] != NULL)
724 dm_free(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i]));
727 for (i = 0; i < pool->base.audio_count; i++) {
728 if (pool->base.audios[i])
729 dce_aud_destroy(&pool->base.audios[i]);
732 for (i = 0; i < pool->base.clk_src_count; i++) {
733 if (pool->base.clock_sources[i] != NULL) {
734 dcn10_clock_source_destroy(&pool->base.clock_sources[i]);
735 pool->base.clock_sources[i] = NULL;
739 if (pool->base.dp_clock_source != NULL) {
740 dcn10_clock_source_destroy(&pool->base.dp_clock_source);
741 pool->base.dp_clock_source = NULL;
744 if (pool->base.abm != NULL)
745 dce_abm_destroy(&pool->base.abm);
747 if (pool->base.dmcu != NULL)
748 dce_dmcu_destroy(&pool->base.dmcu);
750 if (pool->base.display_clock != NULL)
751 dce_disp_clk_destroy(&pool->base.display_clock);
754 static struct mem_input *dcn10_mem_input_create(
755 struct dc_context *ctx,
758 struct dcn10_mem_input *mem_inputn10 =
759 dm_alloc(sizeof(struct dcn10_mem_input));
764 if (dcn10_mem_input_construct(mem_inputn10, ctx, inst,
765 &mi_regs[inst], &mi_shift, &mi_mask))
766 return &mem_inputn10->base;
769 dm_free(mem_inputn10);
773 static void get_pixel_clock_parameters(
774 const struct pipe_ctx *pipe_ctx,
775 struct pixel_clk_params *pixel_clk_params)
777 const struct dc_stream_state *stream = pipe_ctx->stream;
778 pixel_clk_params->requested_pix_clk = stream->timing.pix_clk_khz;
779 pixel_clk_params->encoder_object_id = stream->sink->link->link_enc->id;
780 pixel_clk_params->signal_type = pipe_ctx->stream->signal;
781 pixel_clk_params->controller_id = pipe_ctx->pipe_idx + 1;
782 /* TODO: un-hardcode*/
783 pixel_clk_params->requested_sym_clk = LINK_RATE_LOW *
784 LINK_RATE_REF_FREQ_IN_KHZ;
785 pixel_clk_params->flags.ENABLE_SS = 0;
786 pixel_clk_params->color_depth =
787 stream->timing.display_color_depth;
788 pixel_clk_params->flags.DISPLAY_BLANKED = 1;
789 pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding;
791 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
792 pixel_clk_params->color_depth = COLOR_DEPTH_888;
794 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
795 pixel_clk_params->requested_pix_clk /= 2;
799 static void build_clamping_params(struct dc_stream_state *stream)
801 stream->clamping.clamping_level = CLAMPING_FULL_RANGE;
802 stream->clamping.c_depth = stream->timing.display_color_depth;
803 stream->clamping.pixel_encoding = stream->timing.pixel_encoding;
806 static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx)
809 get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->pix_clk_params);
811 pipe_ctx->clock_source->funcs->get_pix_clk_dividers(
812 pipe_ctx->clock_source,
813 &pipe_ctx->pix_clk_params,
814 &pipe_ctx->pll_settings);
816 pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding;
818 resource_build_bit_depth_reduction_params(pipe_ctx->stream,
819 &pipe_ctx->stream->bit_depth_params);
820 build_clamping_params(pipe_ctx->stream);
825 static enum dc_status build_mapped_resource(
826 const struct core_dc *dc,
827 struct validate_context *context,
828 struct validate_context *old_context)
830 enum dc_status status = DC_OK;
833 for (i = 0; i < context->stream_count; i++) {
834 struct dc_stream_state *stream = context->streams[i];
836 if (old_context && resource_is_stream_unchanged(old_context, stream)) {
837 if (stream != NULL && old_context->streams[i] != NULL) {
838 /* todo: shouldn't have to copy missing parameter here */
839 resource_build_bit_depth_reduction_params(stream,
840 &stream->bit_depth_params);
841 stream->clamping.pixel_encoding =
842 stream->timing.pixel_encoding;
844 resource_build_bit_depth_reduction_params(stream,
845 &stream->bit_depth_params);
846 build_clamping_params(stream);
852 for (j = 0; j < dc->res_pool->pipe_count ; j++) {
853 struct pipe_ctx *pipe_ctx =
854 &context->res_ctx.pipe_ctx[j];
856 if (context->res_ctx.pipe_ctx[j].stream != stream)
859 status = build_pipe_hw_param(pipe_ctx);
864 /* do not need to validate non root pipes */
872 enum dc_status dcn10_validate_with_context(
873 const struct core_dc *dc,
874 const struct dc_validation_set set[],
876 struct validate_context *context,
877 struct validate_context *old_context)
879 enum dc_status result = DC_OK;
885 for (i = 0; i < set_count; i++) {
886 context->streams[i] = set[i].stream;
887 dc_stream_retain(context->streams[i]);
888 context->stream_count++;
891 result = resource_map_pool_resources(dc, context, old_context);
895 result = resource_map_phy_clock_resources(dc, context, old_context);
899 result = build_mapped_resource(dc, context, old_context);
903 if (!resource_validate_attach_surfaces(set, set_count,
904 old_context, context, dc->res_pool))
905 return DC_FAIL_ATTACH_SURFACES;
907 result = resource_build_scaling_params_for_context(dc, context);
911 if (!dcn_validate_bandwidth(dc, context))
912 return DC_FAIL_BANDWIDTH_VALIDATE;
917 enum dc_status dcn10_validate_guaranteed(
918 const struct core_dc *dc,
919 struct dc_stream_state *dc_stream,
920 struct validate_context *context)
922 enum dc_status result = DC_ERROR_UNEXPECTED;
924 context->streams[0] = dc_stream;
925 dc_stream_retain(context->streams[0]);
926 context->stream_count++;
928 result = resource_map_pool_resources(dc, context, NULL);
931 result = resource_map_phy_clock_resources(dc, context, NULL);
934 result = build_mapped_resource(dc, context, NULL);
936 if (result == DC_OK) {
937 validate_guaranteed_copy_streams(
938 context, dc->public.caps.max_streams);
939 result = resource_build_scaling_params_for_context(dc, context);
941 if (result == DC_OK && !dcn_validate_bandwidth(dc, context))
942 return DC_FAIL_BANDWIDTH_VALIDATE;
947 static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer(
948 struct validate_context *context,
949 const struct resource_pool *pool,
950 struct dc_stream_state *stream)
952 struct resource_context *res_ctx = &context->res_ctx;
953 struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
954 struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool);
962 idle_pipe->stream = head_pipe->stream;
963 idle_pipe->tg = head_pipe->tg;
964 idle_pipe->opp = head_pipe->opp;
966 idle_pipe->mi = pool->mis[idle_pipe->pipe_idx];
967 idle_pipe->ipp = pool->ipps[idle_pipe->pipe_idx];
968 idle_pipe->xfm = pool->transforms[idle_pipe->pipe_idx];
974 dcc_control__256_256_xxx,
975 dcc_control__128_128_xxx,
976 dcc_control__256_64_64,
981 segment_order__contiguous,
982 segment_order__non_contiguous,
985 static bool dcc_support_pixel_format(
986 enum surface_pixel_format format,
987 unsigned int *bytes_per_element)
989 /* DML: get_bytes_per_element */
991 case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
992 case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
993 *bytes_per_element = 2;
995 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
996 case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
997 case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
998 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
999 *bytes_per_element = 4;
1001 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
1002 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
1003 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
1004 *bytes_per_element = 8;
1011 static bool dcc_support_swizzle(
1012 enum swizzle_mode_values swizzle,
1013 unsigned int bytes_per_element,
1014 enum segment_order *segment_order_horz,
1015 enum segment_order *segment_order_vert)
1017 bool standard_swizzle = false;
1018 bool display_swizzle = false;
1025 case DC_SW_64KB_S_X:
1027 standard_swizzle = true;
1033 case DC_SW_64KB_D_X:
1035 display_swizzle = true;
1041 if (bytes_per_element == 1 && standard_swizzle) {
1042 *segment_order_horz = segment_order__contiguous;
1043 *segment_order_vert = segment_order__na;
1046 if (bytes_per_element == 2 && standard_swizzle) {
1047 *segment_order_horz = segment_order__non_contiguous;
1048 *segment_order_vert = segment_order__contiguous;
1051 if (bytes_per_element == 4 && standard_swizzle) {
1052 *segment_order_horz = segment_order__non_contiguous;
1053 *segment_order_vert = segment_order__contiguous;
1056 if (bytes_per_element == 8 && standard_swizzle) {
1057 *segment_order_horz = segment_order__na;
1058 *segment_order_vert = segment_order__contiguous;
1061 if (bytes_per_element == 8 && display_swizzle) {
1062 *segment_order_horz = segment_order__contiguous;
1063 *segment_order_vert = segment_order__non_contiguous;
1070 static void get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height,
1071 unsigned int bytes_per_element)
1073 /* copied from DML. might want to refactor DML to leverage from DML */
1074 /* DML : get_blk256_size */
1075 if (bytes_per_element == 1) {
1077 *blk256_height = 16;
1078 } else if (bytes_per_element == 2) {
1081 } else if (bytes_per_element == 4) {
1084 } else if (bytes_per_element == 8) {
1090 static void det_request_size(
1091 unsigned int height,
1094 bool *req128_horz_wc,
1095 bool *req128_vert_wc)
1097 unsigned int detile_buf_size = 164 * 1024; /* 164KB for DCN1.0 */
1099 unsigned int blk256_height = 0;
1100 unsigned int blk256_width = 0;
1101 unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
1103 get_blk256_size(&blk256_width, &blk256_height, bpe);
1105 swath_bytes_horz_wc = height * blk256_height * bpe;
1106 swath_bytes_vert_wc = width * blk256_width * bpe;
1108 *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
1109 false : /* full 256B request */
1110 true; /* half 128b request */
1112 *req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ?
1113 false : /* full 256B request */
1114 true; /* half 128b request */
1117 static bool get_dcc_compression_cap(const struct dc *dc,
1118 const struct dc_dcc_surface_param *input,
1119 struct dc_surface_dcc_cap *output)
1121 /* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */
1122 enum dcc_control dcc_control;
1124 enum segment_order segment_order_horz, segment_order_vert;
1125 bool req128_horz_wc, req128_vert_wc;
1127 memset(output, 0, sizeof(*output));
1129 if (dc->debug.disable_dcc)
1132 if (!dcc_support_pixel_format(input->format,
1136 if (!dcc_support_swizzle(input->swizzle_mode, bpe,
1137 &segment_order_horz, &segment_order_vert))
1140 det_request_size(input->surface_size.height, input->surface_size.width,
1141 bpe, &req128_horz_wc, &req128_vert_wc);
1143 if (!req128_horz_wc && !req128_vert_wc) {
1144 dcc_control = dcc_control__256_256_xxx;
1145 } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
1146 if (!req128_horz_wc)
1147 dcc_control = dcc_control__256_256_xxx;
1148 else if (segment_order_horz == segment_order__contiguous)
1149 dcc_control = dcc_control__128_128_xxx;
1151 dcc_control = dcc_control__256_64_64;
1152 } else if (input->scan == SCAN_DIRECTION_VERTICAL) {
1153 if (!req128_vert_wc)
1154 dcc_control = dcc_control__256_256_xxx;
1155 else if (segment_order_vert == segment_order__contiguous)
1156 dcc_control = dcc_control__128_128_xxx;
1158 dcc_control = dcc_control__256_64_64;
1160 if ((req128_horz_wc &&
1161 segment_order_horz == segment_order__non_contiguous) ||
1163 segment_order_vert == segment_order__non_contiguous))
1164 /* access_dir not known, must use most constraining */
1165 dcc_control = dcc_control__256_64_64;
1167 /* reg128 is true for either horz and vert
1168 * but segment_order is contiguous
1170 dcc_control = dcc_control__128_128_xxx;
1173 switch (dcc_control) {
1174 case dcc_control__256_256_xxx:
1175 output->grph.rgb.max_uncompressed_blk_size = 256;
1176 output->grph.rgb.max_compressed_blk_size = 256;
1177 output->grph.rgb.independent_64b_blks = false;
1179 case dcc_control__128_128_xxx:
1180 output->grph.rgb.max_uncompressed_blk_size = 128;
1181 output->grph.rgb.max_compressed_blk_size = 128;
1182 output->grph.rgb.independent_64b_blks = false;
1184 case dcc_control__256_64_64:
1185 output->grph.rgb.max_uncompressed_blk_size = 256;
1186 output->grph.rgb.max_compressed_blk_size = 64;
1187 output->grph.rgb.independent_64b_blks = true;
1190 output->capable = true;
1191 output->const_color_support = false;
1197 static void dcn10_destroy_resource_pool(struct resource_pool **pool)
1199 struct dcn10_resource_pool *dcn10_pool = TO_DCN10_RES_POOL(*pool);
1201 destruct(dcn10_pool);
1202 dm_free(dcn10_pool);
1207 static struct dc_cap_funcs cap_funcs = {
1208 .get_dcc_compression_cap = get_dcc_compression_cap
1211 static struct resource_funcs dcn10_res_pool_funcs = {
1212 .destroy = dcn10_destroy_resource_pool,
1213 .link_enc_create = dcn10_link_encoder_create,
1214 .validate_with_context = dcn10_validate_with_context,
1215 .validate_guaranteed = dcn10_validate_guaranteed,
1216 .validate_bandwidth = dcn_validate_bandwidth,
1217 .acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer,
1220 static bool construct(
1221 uint8_t num_virtual_links,
1223 struct dcn10_resource_pool *pool)
1226 struct dc_context *ctx = dc->ctx;
1228 ctx->dc_bios->regs = &bios_regs;
1230 pool->base.res_cap = &res_cap;
1231 pool->base.funcs = &dcn10_res_pool_funcs;
1234 * TODO fill in from actual raven resource when we create
1235 * more than virtual encoder
1238 /*************************************************
1239 * Resource + asic cap harcoding *
1240 *************************************************/
1241 pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
1243 /* TODO: Hardcode to correct number of functional controllers */
1244 pool->base.pipe_count = 4;
1245 dc->public.caps.max_downscale_ratio = 200;
1246 dc->public.caps.i2c_speed_in_khz = 100;
1247 dc->public.caps.max_cursor_size = 256;
1249 dc->public.caps.max_slave_planes = 1;
1251 if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
1252 dc->public.debug = debug_defaults_drv;
1254 dc->public.debug = debug_defaults_diags;
1256 /*************************************************
1257 * Create resources *
1258 *************************************************/
1260 pool->base.clock_sources[DCN10_CLK_SRC_PLL0] =
1261 dcn10_clock_source_create(ctx, ctx->dc_bios,
1262 CLOCK_SOURCE_COMBO_PHY_PLL0,
1263 &clk_src_regs[0], false);
1264 pool->base.clock_sources[DCN10_CLK_SRC_PLL1] =
1265 dcn10_clock_source_create(ctx, ctx->dc_bios,
1266 CLOCK_SOURCE_COMBO_PHY_PLL1,
1267 &clk_src_regs[1], false);
1268 pool->base.clock_sources[DCN10_CLK_SRC_PLL2] =
1269 dcn10_clock_source_create(ctx, ctx->dc_bios,
1270 CLOCK_SOURCE_COMBO_PHY_PLL2,
1271 &clk_src_regs[2], false);
1272 pool->base.clock_sources[DCN10_CLK_SRC_PLL3] =
1273 dcn10_clock_source_create(ctx, ctx->dc_bios,
1274 CLOCK_SOURCE_COMBO_PHY_PLL3,
1275 &clk_src_regs[3], false);
1277 pool->base.clk_src_count = DCN10_CLK_SRC_TOTAL;
1279 pool->base.dp_clock_source =
1280 dcn10_clock_source_create(ctx, ctx->dc_bios,
1281 CLOCK_SOURCE_ID_DP_DTO,
1282 /* todo: not reuse phy_pll registers */
1283 &clk_src_regs[0], true);
1285 for (i = 0; i < pool->base.clk_src_count; i++) {
1286 if (pool->base.clock_sources[i] == NULL) {
1287 dm_error("DC: failed to create clock sources!\n");
1288 BREAK_TO_DEBUGGER();
1289 goto clock_source_create_fail;
1293 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1294 pool->base.display_clock = dce120_disp_clk_create(ctx);
1295 if (pool->base.display_clock == NULL) {
1296 dm_error("DC: failed to create display clock!\n");
1297 BREAK_TO_DEBUGGER();
1298 goto disp_clk_create_fail;
1302 pool->base.dmcu = dcn10_dmcu_create(ctx,
1306 if (pool->base.dmcu == NULL) {
1307 dm_error("DC: failed to create dmcu!\n");
1308 BREAK_TO_DEBUGGER();
1309 goto res_create_fail;
1312 pool->base.abm = dce_abm_create(ctx,
1316 if (pool->base.abm == NULL) {
1317 dm_error("DC: failed to create abm!\n");
1318 BREAK_TO_DEBUGGER();
1319 goto res_create_fail;
1322 dml_init_instance(&dc->dml, DML_PROJECT_RAVEN1);
1323 dc->dcn_ip = dcn10_ip_defaults;
1324 dc->dcn_soc = dcn10_soc_defaults;
1326 dc->dcn_soc.number_of_channels = dc->ctx->asic_id.vram_width / ddr4_dram_width;
1327 ASSERT(dc->dcn_soc.number_of_channels < 3);
1328 if (dc->dcn_soc.number_of_channels == 0)/*old sbios bug*/
1329 dc->dcn_soc.number_of_channels = 2;
1331 if (dc->dcn_soc.number_of_channels == 1) {
1332 dc->dcn_soc.fabric_and_dram_bandwidth_vmax0p9 = 19.2f;
1333 dc->dcn_soc.fabric_and_dram_bandwidth_vnom0p8 = 17.066f;
1334 dc->dcn_soc.fabric_and_dram_bandwidth_vmid0p72 = 14.933f;
1335 dc->dcn_soc.fabric_and_dram_bandwidth_vmin0p65 = 12.8f;
1338 if (!dc->public.debug.disable_pplib_clock_request)
1339 dcn_bw_update_from_pplib(dc);
1340 dcn_bw_sync_calcs_and_dml(dc);
1341 if (!dc->public.debug.disable_pplib_wm_range)
1342 dcn_bw_notify_pplib_of_wm_ranges(dc);
1345 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1346 struct irq_service_init_data init_data;
1347 init_data.ctx = dc->ctx;
1348 pool->base.irqs = dal_irq_service_dcn10_create(&init_data);
1349 if (!pool->base.irqs)
1350 goto irqs_create_fail;
1354 /* mem input -> ipp -> dpp -> opp -> TG */
1355 for (i = 0; i < pool->base.pipe_count; i++) {
1356 pool->base.mis[i] = dcn10_mem_input_create(ctx, i);
1357 if (pool->base.mis[i] == NULL) {
1358 BREAK_TO_DEBUGGER();
1360 "DC: failed to create memory input!\n");
1361 goto mi_create_fail;
1364 pool->base.ipps[i] = dcn10_ipp_create(ctx, i);
1365 if (pool->base.ipps[i] == NULL) {
1366 BREAK_TO_DEBUGGER();
1368 "DC: failed to create input pixel processor!\n");
1369 goto ipp_create_fail;
1372 pool->base.transforms[i] = dcn10_dpp_create(ctx, i);
1373 if (pool->base.transforms[i] == NULL) {
1374 BREAK_TO_DEBUGGER();
1376 "DC: failed to create dpp!\n");
1377 goto dpp_create_fail;
1380 pool->base.opps[i] = dcn10_opp_create(ctx, i);
1381 if (pool->base.opps[i] == NULL) {
1382 BREAK_TO_DEBUGGER();
1384 "DC: failed to create output pixel processor!\n");
1385 goto opp_create_fail;
1388 pool->base.timing_generators[i] = dcn10_timing_generator_create(
1390 if (pool->base.timing_generators[i] == NULL) {
1391 BREAK_TO_DEBUGGER();
1392 dm_error("DC: failed to create tg!\n");
1393 goto otg_create_fail;
1396 pool->base.mpc = dcn10_mpc_create(ctx);
1397 if (pool->base.mpc == NULL) {
1398 BREAK_TO_DEBUGGER();
1399 dm_error("DC: failed to create mpc!\n");
1400 goto mpc_create_fail;
1403 if (!resource_construct(num_virtual_links, dc, &pool->base,
1404 (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ?
1405 &res_create_funcs : &res_create_maximus_funcs)))
1406 goto res_create_fail;
1408 dcn10_hw_sequencer_construct(dc);
1409 dc->public.caps.max_surfaces = pool->base.pipe_count;
1411 dc->public.cap_funcs = cap_funcs;
1415 disp_clk_create_fail:
1424 clock_source_create_fail:
1431 struct resource_pool *dcn10_create_resource_pool(
1432 uint8_t num_virtual_links,
1435 struct dcn10_resource_pool *pool =
1436 dm_alloc(sizeof(struct dcn10_resource_pool));
1441 if (construct(num_virtual_links, dc, pool))
1444 BREAK_TO_DEBUGGER();