2 * Copyright 2012-15 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/version.h>
27 #include <drm/drm_atomic_helper.h>
28 #include <drm/drm_dp_mst_helper.h>
29 #include "dm_services.h"
31 #include "amdgpu_dm.h"
32 #include "amdgpu_dm_mst_types.h"
35 #include "dm_helpers.h"
37 #include "dc_link_ddc.h"
39 #include "i2caux_interface.h"
40 #if defined(CONFIG_DEBUG_FS)
41 #include "amdgpu_dm_debugfs.h"
44 #if defined(CONFIG_DRM_AMD_DC_DCN)
45 #include "dc/dcn20/dcn20_resource.h"
48 static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
49 struct drm_dp_aux_msg *msg)
52 struct aux_payload payload;
53 enum aux_channel_operation_result operation_result;
55 if (WARN_ON(msg->size > 16))
58 payload.address = msg->address;
59 payload.data = msg->buffer;
60 payload.length = msg->size;
61 payload.reply = &msg->reply;
62 payload.i2c_over_aux = (msg->request & DP_AUX_NATIVE_WRITE) == 0;
63 payload.write = (msg->request & DP_AUX_I2C_READ) == 0;
64 payload.mot = (msg->request & DP_AUX_I2C_MOT) != 0;
65 payload.defer_delay = 0;
67 result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,
74 switch (operation_result) {
75 case AUX_CHANNEL_OPERATION_SUCCEEDED:
77 case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
78 case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN:
81 case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
82 case AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE:
85 case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
94 dm_dp_mst_connector_destroy(struct drm_connector *connector)
96 struct amdgpu_dm_connector *aconnector =
97 to_amdgpu_dm_connector(connector);
98 struct amdgpu_encoder *amdgpu_encoder = aconnector->mst_encoder;
100 if (aconnector->dc_sink) {
101 dc_link_remove_remote_sink(aconnector->dc_link,
102 aconnector->dc_sink);
103 dc_sink_release(aconnector->dc_sink);
106 kfree(aconnector->edid);
108 drm_encoder_cleanup(&amdgpu_encoder->base);
109 kfree(amdgpu_encoder);
110 drm_connector_cleanup(connector);
111 drm_dp_mst_put_port_malloc(aconnector->port);
116 amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
118 struct amdgpu_dm_connector *amdgpu_dm_connector =
119 to_amdgpu_dm_connector(connector);
122 r = drm_dp_mst_connector_late_register(connector,
123 amdgpu_dm_connector->port);
127 #if defined(CONFIG_DEBUG_FS)
128 connector_debugfs_init(amdgpu_dm_connector);
135 amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
137 struct amdgpu_dm_connector *amdgpu_dm_connector =
138 to_amdgpu_dm_connector(connector);
139 struct drm_dp_mst_port *port = amdgpu_dm_connector->port;
141 drm_dp_mst_connector_early_unregister(connector, port);
144 static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
145 .fill_modes = drm_helper_probe_single_connector_modes,
146 .destroy = dm_dp_mst_connector_destroy,
147 .reset = amdgpu_dm_connector_funcs_reset,
148 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
149 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
150 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
151 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
152 .late_register = amdgpu_dm_mst_connector_late_register,
153 .early_unregister = amdgpu_dm_mst_connector_early_unregister,
156 #if defined(CONFIG_DRM_AMD_DC_DCN)
157 static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
159 struct dc_sink *dc_sink = aconnector->dc_sink;
160 struct drm_dp_mst_port *port = aconnector->port;
161 u8 dsc_caps[16] = { 0 };
163 aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port);
165 if (!aconnector->dsc_aux)
168 if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DSC_SUPPORT, dsc_caps, 16) < 0)
171 if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
173 &dc_sink->dsc_caps.dsc_dec_caps))
180 static int dm_dp_mst_get_modes(struct drm_connector *connector)
182 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
186 return drm_add_edid_modes(connector, NULL);
188 if (!aconnector->edid) {
190 edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
193 drm_connector_update_edid_property(
199 aconnector->edid = edid;
202 if (aconnector->dc_sink && aconnector->dc_sink->sink_signal == SIGNAL_TYPE_VIRTUAL) {
203 dc_sink_release(aconnector->dc_sink);
204 aconnector->dc_sink = NULL;
207 if (!aconnector->dc_sink) {
208 struct dc_sink *dc_sink;
209 struct dc_sink_init_data init_params = {
210 .link = aconnector->dc_link,
211 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
212 dc_sink = dc_link_add_remote_sink(
214 (uint8_t *)aconnector->edid,
215 (aconnector->edid->extensions + 1) * EDID_LENGTH,
218 dc_sink->priv = aconnector;
219 /* dc_link_add_remote_sink returns a new reference */
220 aconnector->dc_sink = dc_sink;
222 if (aconnector->dc_sink) {
223 amdgpu_dm_update_freesync_caps(
224 connector, aconnector->edid);
226 #if defined(CONFIG_DRM_AMD_DC_DCN)
227 if (!validate_dsc_caps_on_connector(aconnector))
228 memset(&aconnector->dc_sink->dsc_caps,
229 0, sizeof(aconnector->dc_sink->dsc_caps));
234 drm_connector_update_edid_property(
235 &aconnector->base, aconnector->edid);
237 ret = drm_add_edid_modes(connector, aconnector->edid);
242 static struct drm_encoder *
243 dm_mst_atomic_best_encoder(struct drm_connector *connector,
244 struct drm_connector_state *connector_state)
246 return &to_amdgpu_dm_connector(connector)->mst_encoder->base;
250 dm_dp_mst_detect(struct drm_connector *connector,
251 struct drm_modeset_acquire_ctx *ctx, bool force)
253 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
254 struct amdgpu_dm_connector *master = aconnector->mst_port;
256 return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
260 static int dm_dp_mst_atomic_check(struct drm_connector *connector,
261 struct drm_atomic_state *state)
263 struct drm_connector_state *new_conn_state =
264 drm_atomic_get_new_connector_state(state, connector);
265 struct drm_connector_state *old_conn_state =
266 drm_atomic_get_old_connector_state(state, connector);
267 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
268 struct drm_crtc_state *new_crtc_state;
269 struct drm_dp_mst_topology_mgr *mst_mgr;
270 struct drm_dp_mst_port *mst_port;
272 mst_port = aconnector->port;
273 mst_mgr = &aconnector->mst_port->mst_mgr;
275 if (!old_conn_state->crtc)
278 if (new_conn_state->crtc) {
279 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
280 if (!new_crtc_state ||
281 !drm_atomic_crtc_needs_modeset(new_crtc_state) ||
282 new_crtc_state->enable)
286 return drm_dp_atomic_release_vcpi_slots(state,
291 static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = {
292 .get_modes = dm_dp_mst_get_modes,
293 .mode_valid = amdgpu_dm_connector_mode_valid,
294 .atomic_best_encoder = dm_mst_atomic_best_encoder,
295 .detect_ctx = dm_dp_mst_detect,
296 .atomic_check = dm_dp_mst_atomic_check,
299 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
301 drm_encoder_cleanup(encoder);
305 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
306 .destroy = amdgpu_dm_encoder_destroy,
309 static struct amdgpu_encoder *
310 dm_dp_create_fake_mst_encoder(struct amdgpu_dm_connector *connector)
312 struct drm_device *dev = connector->base.dev;
313 struct amdgpu_device *adev = dev->dev_private;
314 struct amdgpu_encoder *amdgpu_encoder;
315 struct drm_encoder *encoder;
317 amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL);
321 encoder = &amdgpu_encoder->base;
322 encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
326 &amdgpu_encoder->base,
327 &amdgpu_dm_encoder_funcs,
328 DRM_MODE_ENCODER_DPMST,
331 drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs);
333 return amdgpu_encoder;
336 static struct drm_connector *
337 dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
338 struct drm_dp_mst_port *port,
339 const char *pathprop)
341 struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
342 struct drm_device *dev = master->base.dev;
343 struct amdgpu_device *adev = dev->dev_private;
344 struct amdgpu_dm_connector *aconnector;
345 struct drm_connector *connector;
347 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
351 connector = &aconnector->base;
352 aconnector->port = port;
353 aconnector->mst_port = master;
355 if (drm_connector_init(
358 &dm_dp_mst_connector_funcs,
359 DRM_MODE_CONNECTOR_DisplayPort)) {
363 drm_connector_helper_add(connector, &dm_dp_mst_connector_helper_funcs);
365 amdgpu_dm_connector_init_helper(
368 DRM_MODE_CONNECTOR_DisplayPort,
370 master->connector_id);
372 aconnector->mst_encoder = dm_dp_create_fake_mst_encoder(master);
373 drm_connector_attach_encoder(&aconnector->base,
374 &aconnector->mst_encoder->base);
376 connector->max_bpc_property = master->base.max_bpc_property;
377 if (connector->max_bpc_property)
378 drm_connector_attach_max_bpc_property(connector, 8, 16);
380 connector->vrr_capable_property = master->base.vrr_capable_property;
381 if (connector->vrr_capable_property)
382 drm_connector_attach_vrr_capable_property(connector);
384 drm_object_attach_property(
386 dev->mode_config.path_property,
388 drm_object_attach_property(
390 dev->mode_config.tile_property,
393 drm_connector_set_path_property(connector, pathprop);
396 * Initialize connector state before adding the connectror to drm and
399 amdgpu_dm_connector_funcs_reset(connector);
401 drm_dp_mst_get_port_malloc(port);
406 static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
407 .add_connector = dm_dp_add_mst_connector,
410 void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
411 struct amdgpu_dm_connector *aconnector,
414 aconnector->dm_dp_aux.aux.name =
415 kasprintf(GFP_KERNEL, "AMDGPU DM aux hw bus %d",
417 aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
418 aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc;
420 drm_dp_aux_init(&aconnector->dm_dp_aux.aux);
421 drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux,
424 if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
427 aconnector->mst_mgr.cbs = &dm_mst_cbs;
428 drm_dp_mst_topology_mgr_init(
429 &aconnector->mst_mgr,
431 &aconnector->dm_dp_aux.aux,
434 aconnector->connector_id);
437 int dm_mst_get_pbn_divider(struct dc_link *link)
442 return dc_link_bandwidth_kbps(link,
443 dc_link_get_link_cap(link)) / (8 * 1000 * 54);
446 #if defined(CONFIG_DRM_AMD_DC_DCN)
448 struct dsc_mst_fairness_params {
449 struct dc_crtc_timing *timing;
450 struct dc_sink *sink;
451 struct dc_dsc_bw_range bw_range;
452 bool compression_possible;
453 struct drm_dp_mst_port *port;
456 struct dsc_mst_fairness_vars {
462 static int kbps_to_peak_pbn(int kbps)
464 u64 peak_kbps = kbps;
467 peak_kbps = div_u64(peak_kbps, 1000);
468 return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
471 static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params,
472 struct dsc_mst_fairness_vars *vars,
477 for (i = 0; i < count; i++) {
478 memset(¶ms[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg));
479 if (vars[i].dsc_enabled && dc_dsc_compute_config(
480 params[i].sink->ctx->dc->res_pool->dscs[0],
481 ¶ms[i].sink->dsc_caps.dsc_dec_caps,
482 params[i].sink->ctx->dc->debug.dsc_min_slice_height_override,
485 ¶ms[i].timing->dsc_cfg)) {
486 params[i].timing->flags.DSC = 1;
487 params[i].timing->dsc_cfg.bits_per_pixel = vars[i].bpp_x16;
489 params[i].timing->flags.DSC = 0;
494 static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
496 struct dc_dsc_config dsc_config;
499 kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
500 dc_dsc_compute_config(
501 param.sink->ctx->dc->res_pool->dscs[0],
502 ¶m.sink->dsc_caps.dsc_dec_caps,
503 param.sink->ctx->dc->debug.dsc_min_slice_height_override,
504 (int) kbps, param.timing, &dsc_config);
506 return dsc_config.bits_per_pixel;
509 static void increase_dsc_bpp(struct drm_atomic_state *state,
510 struct dc_link *dc_link,
511 struct dsc_mst_fairness_params *params,
512 struct dsc_mst_fairness_vars *vars,
516 bool bpp_increased[MAX_PIPES];
517 int initial_slack[MAX_PIPES];
518 int min_initial_slack;
520 int remaining_to_increase = 0;
521 int pbn_per_timeslot;
522 int link_timeslots_used;
525 for (i = 0; i < count; i++) {
526 if (vars[i].dsc_enabled) {
527 initial_slack[i] = kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i].pbn;
528 bpp_increased[i] = false;
529 remaining_to_increase += 1;
531 initial_slack[i] = 0;
532 bpp_increased[i] = true;
536 pbn_per_timeslot = dc_link_bandwidth_kbps(dc_link,
537 dc_link_get_link_cap(dc_link)) / (8 * 1000 * 54);
539 while (remaining_to_increase) {
541 min_initial_slack = -1;
542 for (i = 0; i < count; i++) {
543 if (!bpp_increased[i]) {
544 if (min_initial_slack == -1 || min_initial_slack > initial_slack[i]) {
545 min_initial_slack = initial_slack[i];
551 if (next_index == -1)
554 link_timeslots_used = 0;
556 for (i = 0; i < count; i++)
557 link_timeslots_used += DIV_ROUND_UP(vars[i].pbn, pbn_per_timeslot);
559 fair_pbn_alloc = (63 - link_timeslots_used) / remaining_to_increase * pbn_per_timeslot;
561 if (initial_slack[next_index] > fair_pbn_alloc) {
562 vars[next_index].pbn += fair_pbn_alloc;
563 if (drm_dp_atomic_find_vcpi_slots(state,
564 params[next_index].port->mgr,
565 params[next_index].port,
566 vars[next_index].pbn,
567 dm_mst_get_pbn_divider(dc_link)) < 0)
569 if (!drm_dp_mst_atomic_check(state)) {
570 vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn);
572 vars[next_index].pbn -= fair_pbn_alloc;
573 if (drm_dp_atomic_find_vcpi_slots(state,
574 params[next_index].port->mgr,
575 params[next_index].port,
576 vars[next_index].pbn,
577 dm_mst_get_pbn_divider(dc_link)) < 0)
581 vars[next_index].pbn += initial_slack[next_index];
582 if (drm_dp_atomic_find_vcpi_slots(state,
583 params[next_index].port->mgr,
584 params[next_index].port,
585 vars[next_index].pbn,
586 dm_mst_get_pbn_divider(dc_link)) < 0)
588 if (!drm_dp_mst_atomic_check(state)) {
589 vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16;
591 vars[next_index].pbn -= initial_slack[next_index];
592 if (drm_dp_atomic_find_vcpi_slots(state,
593 params[next_index].port->mgr,
594 params[next_index].port,
595 vars[next_index].pbn,
596 dm_mst_get_pbn_divider(dc_link)) < 0)
601 bpp_increased[next_index] = true;
602 remaining_to_increase--;
606 static void try_disable_dsc(struct drm_atomic_state *state,
607 struct dc_link *dc_link,
608 struct dsc_mst_fairness_params *params,
609 struct dsc_mst_fairness_vars *vars,
613 bool tried[MAX_PIPES];
614 int kbps_increase[MAX_PIPES];
615 int max_kbps_increase;
617 int remaining_to_try = 0;
619 for (i = 0; i < count; i++) {
620 if (vars[i].dsc_enabled && vars[i].bpp_x16 == params[i].bw_range.max_target_bpp_x16) {
621 kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps;
623 remaining_to_try += 1;
625 kbps_increase[i] = 0;
630 while (remaining_to_try) {
632 max_kbps_increase = -1;
633 for (i = 0; i < count; i++) {
635 if (max_kbps_increase == -1 || max_kbps_increase < kbps_increase[i]) {
636 max_kbps_increase = kbps_increase[i];
642 if (next_index == -1)
645 vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
646 if (drm_dp_atomic_find_vcpi_slots(state,
647 params[next_index].port->mgr,
648 params[next_index].port,
649 vars[next_index].pbn,
653 if (!drm_dp_mst_atomic_check(state)) {
654 vars[next_index].dsc_enabled = false;
655 vars[next_index].bpp_x16 = 0;
657 vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps);
658 if (drm_dp_atomic_find_vcpi_slots(state,
659 params[next_index].port->mgr,
660 params[next_index].port,
661 vars[next_index].pbn,
662 dm_mst_get_pbn_divider(dc_link)) < 0)
666 tried[next_index] = true;
671 static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
672 struct dc_state *dc_state,
673 struct dc_link *dc_link)
676 struct dc_stream_state *stream;
677 struct dsc_mst_fairness_params params[MAX_PIPES];
678 struct dsc_mst_fairness_vars vars[MAX_PIPES];
679 struct amdgpu_dm_connector *aconnector;
682 memset(params, 0, sizeof(params));
685 for (i = 0; i < dc_state->stream_count; i++) {
686 struct dc_dsc_policy dsc_policy = {0};
688 stream = dc_state->streams[i];
690 if (stream->link != dc_link)
693 stream->timing.flags.DSC = 0;
695 params[count].timing = &stream->timing;
696 params[count].sink = stream->sink;
697 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
698 params[count].port = aconnector->port;
699 params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported;
700 dc_dsc_get_policy_for_timing(params[count].timing, &dsc_policy);
701 if (!dc_dsc_compute_bandwidth_range(
702 stream->sink->ctx->dc->res_pool->dscs[0],
703 stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
704 dsc_policy.min_target_bpp,
705 dsc_policy.max_target_bpp,
706 &stream->sink->dsc_caps.dsc_dec_caps,
707 &stream->timing, ¶ms[count].bw_range))
708 params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
712 /* Try no compression */
713 for (i = 0; i < count; i++) {
714 vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
715 vars[i].dsc_enabled = false;
717 if (drm_dp_atomic_find_vcpi_slots(state,
724 if (!drm_dp_mst_atomic_check(state)) {
725 set_dsc_configs_from_fairness_vars(params, vars, count);
729 /* Try max compression */
730 for (i = 0; i < count; i++) {
731 if (params[i].compression_possible) {
732 vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
733 vars[i].dsc_enabled = true;
734 vars[i].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
735 if (drm_dp_atomic_find_vcpi_slots(state,
739 dm_mst_get_pbn_divider(dc_link)) < 0)
742 vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
743 vars[i].dsc_enabled = false;
745 if (drm_dp_atomic_find_vcpi_slots(state,
753 if (drm_dp_mst_atomic_check(state))
756 /* Optimize degree of compression */
757 increase_dsc_bpp(state, dc_link, params, vars, count);
759 try_disable_dsc(state, dc_link, params, vars, count);
761 set_dsc_configs_from_fairness_vars(params, vars, count);
766 bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
767 struct dc_state *dc_state)
770 struct dc_stream_state *stream;
771 bool computed_streams[MAX_PIPES];
772 struct amdgpu_dm_connector *aconnector;
774 for (i = 0; i < dc_state->stream_count; i++)
775 computed_streams[i] = false;
777 for (i = 0; i < dc_state->stream_count; i++) {
778 stream = dc_state->streams[i];
780 if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST)
783 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
785 if (!aconnector || !aconnector->dc_sink)
788 if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported)
791 if (computed_streams[i])
794 mutex_lock(&aconnector->mst_mgr.lock);
795 if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) {
796 mutex_unlock(&aconnector->mst_mgr.lock);
799 mutex_unlock(&aconnector->mst_mgr.lock);
801 for (j = 0; j < dc_state->stream_count; j++) {
802 if (dc_state->streams[j]->link == stream->link)
803 computed_streams[j] = true;
807 for (i = 0; i < dc_state->stream_count; i++) {
808 stream = dc_state->streams[i];
810 if (stream->timing.flags.DSC == 1)
811 dcn20_add_dsc_to_stream_resource(stream->ctx->dc, dc_state, stream);