2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "amdgpu_dm_trace.h"
41 #include "amdgpu_display.h"
42 #include "amdgpu_ucode.h"
44 #include "amdgpu_dm.h"
45 #ifdef CONFIG_DRM_AMD_DC_HDCP
46 #include "amdgpu_dm_hdcp.h"
47 #include <drm/drm_hdcp.h>
49 #include "amdgpu_pm.h"
51 #include "amd_shared.h"
52 #include "amdgpu_dm_irq.h"
53 #include "dm_helpers.h"
54 #include "amdgpu_dm_mst_types.h"
55 #if defined(CONFIG_DEBUG_FS)
56 #include "amdgpu_dm_debugfs.h"
59 #include "ivsrcid/ivsrcid_vislands30.h"
61 #include <linux/module.h>
62 #include <linux/moduleparam.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
88 #include "soc15_common.h"
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
99 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
101 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
103 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
105 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
108 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
109 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
111 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
112 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
114 /* Number of bytes in PSP header for firmware. */
115 #define PSP_HEADER_BYTES 0x100
117 /* Number of bytes in PSP footer for firmware. */
118 #define PSP_FOOTER_BYTES 0x100
123 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
125 * requests into DC requests, and DC responses into DRM responses.
127 * The root control structure is &struct amdgpu_display_manager.
130 /* basic init/fini API */
131 static int amdgpu_dm_init(struct amdgpu_device *adev);
132 static void amdgpu_dm_fini(struct amdgpu_device *adev);
134 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
136 switch (link->dpcd_caps.dongle_type) {
137 case DISPLAY_DONGLE_NONE:
138 return DRM_MODE_SUBCONNECTOR_Native;
139 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
140 return DRM_MODE_SUBCONNECTOR_VGA;
141 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
142 case DISPLAY_DONGLE_DP_DVI_DONGLE:
143 return DRM_MODE_SUBCONNECTOR_DVID;
144 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
145 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
146 return DRM_MODE_SUBCONNECTOR_HDMIA;
147 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
149 return DRM_MODE_SUBCONNECTOR_Unknown;
153 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
155 struct dc_link *link = aconnector->dc_link;
156 struct drm_connector *connector = &aconnector->base;
157 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
159 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
162 if (aconnector->dc_sink)
163 subconnector = get_subconnector_type(link);
165 drm_object_property_set_value(&connector->base,
166 connector->dev->mode_config.dp_subconnector_property,
171 * initializes drm_device display related structures, based on the information
172 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
173 * drm_encoder, drm_mode_config
175 * Returns 0 on success
177 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
178 /* removes and deallocates the drm structures, created by the above function */
179 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
181 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
182 struct drm_plane *plane,
183 unsigned long possible_crtcs,
184 const struct dc_plane_cap *plane_cap);
185 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
186 struct drm_plane *plane,
187 uint32_t link_index);
188 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
189 struct amdgpu_dm_connector *amdgpu_dm_connector,
191 struct amdgpu_encoder *amdgpu_encoder);
192 static int amdgpu_dm_encoder_init(struct drm_device *dev,
193 struct amdgpu_encoder *aencoder,
194 uint32_t link_index);
196 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
198 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
200 static int amdgpu_dm_atomic_check(struct drm_device *dev,
201 struct drm_atomic_state *state);
203 static void handle_cursor_update(struct drm_plane *plane,
204 struct drm_plane_state *old_plane_state);
206 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
207 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
208 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
209 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
212 static const struct drm_format_info *
213 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
216 * dm_vblank_get_counter
219 * Get counter for number of vertical blanks
222 * struct amdgpu_device *adev - [in] desired amdgpu device
223 * int disp_idx - [in] which CRTC to get the counter from
226 * Counter for vertical blanks
228 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
230 if (crtc >= adev->mode_info.num_crtc)
233 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
235 if (acrtc->dm_irq_params.stream == NULL) {
236 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
241 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
245 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
246 u32 *vbl, u32 *position)
248 uint32_t v_blank_start, v_blank_end, h_position, v_position;
250 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
253 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
255 if (acrtc->dm_irq_params.stream == NULL) {
256 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
262 * TODO rework base driver to use values directly.
263 * for now parse it back into reg-format
265 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
271 *position = v_position | (h_position << 16);
272 *vbl = v_blank_start | (v_blank_end << 16);
278 static bool dm_is_idle(void *handle)
284 static int dm_wait_for_idle(void *handle)
290 static bool dm_check_soft_reset(void *handle)
295 static int dm_soft_reset(void *handle)
301 static struct amdgpu_crtc *
302 get_crtc_by_otg_inst(struct amdgpu_device *adev,
305 struct drm_device *dev = adev_to_drm(adev);
306 struct drm_crtc *crtc;
307 struct amdgpu_crtc *amdgpu_crtc;
309 if (otg_inst == -1) {
311 return adev->mode_info.crtcs[0];
314 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
315 amdgpu_crtc = to_amdgpu_crtc(crtc);
317 if (amdgpu_crtc->otg_inst == otg_inst)
324 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
326 return acrtc->dm_irq_params.freesync_config.state ==
327 VRR_STATE_ACTIVE_VARIABLE ||
328 acrtc->dm_irq_params.freesync_config.state ==
329 VRR_STATE_ACTIVE_FIXED;
332 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
334 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
335 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
339 * dm_pflip_high_irq() - Handle pageflip interrupt
340 * @interrupt_params: ignored
342 * Handles the pageflip interrupt by notifying all interested parties
343 * that the pageflip has been completed.
345 static void dm_pflip_high_irq(void *interrupt_params)
347 struct amdgpu_crtc *amdgpu_crtc;
348 struct common_irq_params *irq_params = interrupt_params;
349 struct amdgpu_device *adev = irq_params->adev;
351 struct drm_pending_vblank_event *e;
352 uint32_t vpos, hpos, v_blank_start, v_blank_end;
355 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
357 /* IRQ could occur when in initial stage */
358 /* TODO work and BO cleanup */
359 if (amdgpu_crtc == NULL) {
360 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
364 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
366 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
367 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
368 amdgpu_crtc->pflip_status,
369 AMDGPU_FLIP_SUBMITTED,
370 amdgpu_crtc->crtc_id,
372 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
376 /* page flip completed. */
377 e = amdgpu_crtc->event;
378 amdgpu_crtc->event = NULL;
383 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
385 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
387 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
388 &v_blank_end, &hpos, &vpos) ||
389 (vpos < v_blank_start)) {
390 /* Update to correct count and vblank timestamp if racing with
391 * vblank irq. This also updates to the correct vblank timestamp
392 * even in VRR mode, as scanout is past the front-porch atm.
394 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
396 /* Wake up userspace by sending the pageflip event with proper
397 * count and timestamp of vblank of flip completion.
400 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
402 /* Event sent, so done with vblank for this flip */
403 drm_crtc_vblank_put(&amdgpu_crtc->base);
406 /* VRR active and inside front-porch: vblank count and
407 * timestamp for pageflip event will only be up to date after
408 * drm_crtc_handle_vblank() has been executed from late vblank
409 * irq handler after start of back-porch (vline 0). We queue the
410 * pageflip event for send-out by drm_crtc_handle_vblank() with
411 * updated timestamp and count, once it runs after us.
413 * We need to open-code this instead of using the helper
414 * drm_crtc_arm_vblank_event(), as that helper would
415 * call drm_crtc_accurate_vblank_count(), which we must
416 * not call in VRR mode while we are in front-porch!
419 /* sequence will be replaced by real count during send-out. */
420 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
421 e->pipe = amdgpu_crtc->crtc_id;
423 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
427 /* Keep track of vblank of this flip for flip throttling. We use the
428 * cooked hw counter, as that one incremented at start of this vblank
429 * of pageflip completion, so last_flip_vblank is the forbidden count
430 * for queueing new pageflips if vsync + VRR is enabled.
432 amdgpu_crtc->dm_irq_params.last_flip_vblank =
433 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
435 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
436 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
438 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
439 amdgpu_crtc->crtc_id, amdgpu_crtc,
440 vrr_active, (int) !e);
443 static void dm_vupdate_high_irq(void *interrupt_params)
445 struct common_irq_params *irq_params = interrupt_params;
446 struct amdgpu_device *adev = irq_params->adev;
447 struct amdgpu_crtc *acrtc;
451 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
454 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
456 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
460 /* Core vblank handling is done here after end of front-porch in
461 * vrr mode, as vblank timestamping will give valid results
462 * while now done after front-porch. This will also deliver
463 * page-flip completion events that have been queued to us
464 * if a pageflip happened inside front-porch.
467 drm_crtc_handle_vblank(&acrtc->base);
469 /* BTR processing for pre-DCE12 ASICs */
470 if (acrtc->dm_irq_params.stream &&
471 adev->family < AMDGPU_FAMILY_AI) {
472 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
473 mod_freesync_handle_v_update(
474 adev->dm.freesync_module,
475 acrtc->dm_irq_params.stream,
476 &acrtc->dm_irq_params.vrr_params);
478 dc_stream_adjust_vmin_vmax(
480 acrtc->dm_irq_params.stream,
481 &acrtc->dm_irq_params.vrr_params.adjust);
482 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
489 * dm_crtc_high_irq() - Handles CRTC interrupt
490 * @interrupt_params: used for determining the CRTC instance
492 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
495 static void dm_crtc_high_irq(void *interrupt_params)
497 struct common_irq_params *irq_params = interrupt_params;
498 struct amdgpu_device *adev = irq_params->adev;
499 struct amdgpu_crtc *acrtc;
503 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
507 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
509 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
510 vrr_active, acrtc->dm_irq_params.active_planes);
513 * Core vblank handling at start of front-porch is only possible
514 * in non-vrr mode, as only there vblank timestamping will give
515 * valid results while done in front-porch. Otherwise defer it
516 * to dm_vupdate_high_irq after end of front-porch.
519 drm_crtc_handle_vblank(&acrtc->base);
522 * Following stuff must happen at start of vblank, for crc
523 * computation and below-the-range btr support in vrr mode.
525 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
527 /* BTR updates need to happen before VUPDATE on Vega and above. */
528 if (adev->family < AMDGPU_FAMILY_AI)
531 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
533 if (acrtc->dm_irq_params.stream &&
534 acrtc->dm_irq_params.vrr_params.supported &&
535 acrtc->dm_irq_params.freesync_config.state ==
536 VRR_STATE_ACTIVE_VARIABLE) {
537 mod_freesync_handle_v_update(adev->dm.freesync_module,
538 acrtc->dm_irq_params.stream,
539 &acrtc->dm_irq_params.vrr_params);
541 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
542 &acrtc->dm_irq_params.vrr_params.adjust);
546 * If there aren't any active_planes then DCH HUBP may be clock-gated.
547 * In that case, pageflip completion interrupts won't fire and pageflip
548 * completion events won't get delivered. Prevent this by sending
549 * pending pageflip events from here if a flip is still pending.
551 * If any planes are enabled, use dm_pflip_high_irq() instead, to
552 * avoid race conditions between flip programming and completion,
553 * which could cause too early flip completion events.
555 if (adev->family >= AMDGPU_FAMILY_RV &&
556 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
557 acrtc->dm_irq_params.active_planes == 0) {
559 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
561 drm_crtc_vblank_put(&acrtc->base);
563 acrtc->pflip_status = AMDGPU_FLIP_NONE;
566 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
569 static int dm_set_clockgating_state(void *handle,
570 enum amd_clockgating_state state)
575 static int dm_set_powergating_state(void *handle,
576 enum amd_powergating_state state)
581 /* Prototypes of private functions */
582 static int dm_early_init(void* handle);
584 /* Allocate memory for FBC compressed data */
585 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
587 struct drm_device *dev = connector->dev;
588 struct amdgpu_device *adev = drm_to_adev(dev);
589 struct dm_compressor_info *compressor = &adev->dm.compressor;
590 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
591 struct drm_display_mode *mode;
592 unsigned long max_size = 0;
594 if (adev->dm.dc->fbc_compressor == NULL)
597 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
600 if (compressor->bo_ptr)
604 list_for_each_entry(mode, &connector->modes, head) {
605 if (max_size < mode->htotal * mode->vtotal)
606 max_size = mode->htotal * mode->vtotal;
610 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
611 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
612 &compressor->gpu_addr, &compressor->cpu_addr);
615 DRM_ERROR("DM: Failed to initialize FBC\n");
617 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
618 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
625 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
626 int pipe, bool *enabled,
627 unsigned char *buf, int max_bytes)
629 struct drm_device *dev = dev_get_drvdata(kdev);
630 struct amdgpu_device *adev = drm_to_adev(dev);
631 struct drm_connector *connector;
632 struct drm_connector_list_iter conn_iter;
633 struct amdgpu_dm_connector *aconnector;
638 mutex_lock(&adev->dm.audio_lock);
640 drm_connector_list_iter_begin(dev, &conn_iter);
641 drm_for_each_connector_iter(connector, &conn_iter) {
642 aconnector = to_amdgpu_dm_connector(connector);
643 if (aconnector->audio_inst != port)
647 ret = drm_eld_size(connector->eld);
648 memcpy(buf, connector->eld, min(max_bytes, ret));
652 drm_connector_list_iter_end(&conn_iter);
654 mutex_unlock(&adev->dm.audio_lock);
656 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
661 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
662 .get_eld = amdgpu_dm_audio_component_get_eld,
665 static int amdgpu_dm_audio_component_bind(struct device *kdev,
666 struct device *hda_kdev, void *data)
668 struct drm_device *dev = dev_get_drvdata(kdev);
669 struct amdgpu_device *adev = drm_to_adev(dev);
670 struct drm_audio_component *acomp = data;
672 acomp->ops = &amdgpu_dm_audio_component_ops;
674 adev->dm.audio_component = acomp;
679 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
680 struct device *hda_kdev, void *data)
682 struct drm_device *dev = dev_get_drvdata(kdev);
683 struct amdgpu_device *adev = drm_to_adev(dev);
684 struct drm_audio_component *acomp = data;
688 adev->dm.audio_component = NULL;
691 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
692 .bind = amdgpu_dm_audio_component_bind,
693 .unbind = amdgpu_dm_audio_component_unbind,
696 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
703 adev->mode_info.audio.enabled = true;
705 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
707 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
708 adev->mode_info.audio.pin[i].channels = -1;
709 adev->mode_info.audio.pin[i].rate = -1;
710 adev->mode_info.audio.pin[i].bits_per_sample = -1;
711 adev->mode_info.audio.pin[i].status_bits = 0;
712 adev->mode_info.audio.pin[i].category_code = 0;
713 adev->mode_info.audio.pin[i].connected = false;
714 adev->mode_info.audio.pin[i].id =
715 adev->dm.dc->res_pool->audios[i]->inst;
716 adev->mode_info.audio.pin[i].offset = 0;
719 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
723 adev->dm.audio_registered = true;
728 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
733 if (!adev->mode_info.audio.enabled)
736 if (adev->dm.audio_registered) {
737 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
738 adev->dm.audio_registered = false;
741 /* TODO: Disable audio? */
743 adev->mode_info.audio.enabled = false;
746 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
748 struct drm_audio_component *acomp = adev->dm.audio_component;
750 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
751 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
753 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
758 static int dm_dmub_hw_init(struct amdgpu_device *adev)
760 const struct dmcub_firmware_header_v1_0 *hdr;
761 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
762 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
763 const struct firmware *dmub_fw = adev->dm.dmub_fw;
764 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
765 struct abm *abm = adev->dm.dc->res_pool->abm;
766 struct dmub_srv_hw_params hw_params;
767 enum dmub_status status;
768 const unsigned char *fw_inst_const, *fw_bss_data;
769 uint32_t i, fw_inst_const_size, fw_bss_data_size;
773 /* DMUB isn't supported on the ASIC. */
777 DRM_ERROR("No framebuffer info for DMUB service.\n");
782 /* Firmware required for DMUB support. */
783 DRM_ERROR("No firmware provided for DMUB.\n");
787 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
788 if (status != DMUB_STATUS_OK) {
789 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
793 if (!has_hw_support) {
794 DRM_INFO("DMUB unsupported on ASIC\n");
798 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
800 fw_inst_const = dmub_fw->data +
801 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
804 fw_bss_data = dmub_fw->data +
805 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
806 le32_to_cpu(hdr->inst_const_bytes);
808 /* Copy firmware and bios info into FB memory. */
809 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
810 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
812 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
814 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
815 * amdgpu_ucode_init_single_fw will load dmub firmware
816 * fw_inst_const part to cw0; otherwise, the firmware back door load
817 * will be done by dm_dmub_hw_init
819 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
820 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
824 if (fw_bss_data_size)
825 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
826 fw_bss_data, fw_bss_data_size);
828 /* Copy firmware bios info into FB memory. */
829 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
832 /* Reset regions that need to be reset. */
833 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
834 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
836 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
837 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
839 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
840 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
842 /* Initialize hardware. */
843 memset(&hw_params, 0, sizeof(hw_params));
844 hw_params.fb_base = adev->gmc.fb_start;
845 hw_params.fb_offset = adev->gmc.aper_base;
847 /* backdoor load firmware and trigger dmub running */
848 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
849 hw_params.load_inst_const = true;
852 hw_params.psp_version = dmcu->psp_version;
854 for (i = 0; i < fb_info->num_fb; ++i)
855 hw_params.fb[i] = &fb_info->fb[i];
857 status = dmub_srv_hw_init(dmub_srv, &hw_params);
858 if (status != DMUB_STATUS_OK) {
859 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
863 /* Wait for firmware load to finish. */
864 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
865 if (status != DMUB_STATUS_OK)
866 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
868 /* Init DMCU and ABM if available. */
870 dmcu->funcs->dmcu_init(dmcu);
871 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
874 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
875 if (!adev->dm.dc->ctx->dmub_srv) {
876 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
880 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
881 adev->dm.dmcub_fw_version);
886 #if defined(CONFIG_DRM_AMD_DC_DCN)
887 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
890 uint32_t logical_addr_low;
891 uint32_t logical_addr_high;
892 uint32_t agp_base, agp_bot, agp_top;
893 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
895 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
896 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
898 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
900 * Raven2 has a HW issue that it is unable to use the vram which
901 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
902 * workaround that increase system aperture high address (add 1)
903 * to get rid of the VM fault and hardware hang.
905 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
907 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
910 agp_bot = adev->gmc.agp_start >> 24;
911 agp_top = adev->gmc.agp_end >> 24;
914 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
915 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
916 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
917 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
918 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
919 page_table_base.low_part = lower_32_bits(pt_base);
921 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
922 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
924 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
925 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
926 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
928 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
929 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
930 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
932 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
933 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
934 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
936 pa_config->is_hvm_enabled = 0;
941 #ifdef CONFIG_DEBUG_FS
942 static int create_crtc_crc_properties(struct amdgpu_display_manager *dm)
944 dm->crc_win_x_start_property =
945 drm_property_create_range(adev_to_drm(dm->adev),
946 DRM_MODE_PROP_ATOMIC,
947 "AMD_CRC_WIN_X_START", 0, U16_MAX);
948 if (!dm->crc_win_x_start_property)
951 dm->crc_win_y_start_property =
952 drm_property_create_range(adev_to_drm(dm->adev),
953 DRM_MODE_PROP_ATOMIC,
954 "AMD_CRC_WIN_Y_START", 0, U16_MAX);
955 if (!dm->crc_win_y_start_property)
958 dm->crc_win_x_end_property =
959 drm_property_create_range(adev_to_drm(dm->adev),
960 DRM_MODE_PROP_ATOMIC,
961 "AMD_CRC_WIN_X_END", 0, U16_MAX);
962 if (!dm->crc_win_x_end_property)
965 dm->crc_win_y_end_property =
966 drm_property_create_range(adev_to_drm(dm->adev),
967 DRM_MODE_PROP_ATOMIC,
968 "AMD_CRC_WIN_Y_END", 0, U16_MAX);
969 if (!dm->crc_win_y_end_property)
976 static int amdgpu_dm_init(struct amdgpu_device *adev)
978 struct dc_init_data init_data;
979 #ifdef CONFIG_DRM_AMD_DC_HDCP
980 struct dc_callback_init init_params;
984 adev->dm.ddev = adev_to_drm(adev);
985 adev->dm.adev = adev;
987 /* Zero all the fields */
988 memset(&init_data, 0, sizeof(init_data));
989 #ifdef CONFIG_DRM_AMD_DC_HDCP
990 memset(&init_params, 0, sizeof(init_params));
993 mutex_init(&adev->dm.dc_lock);
994 mutex_init(&adev->dm.audio_lock);
996 if(amdgpu_dm_irq_init(adev)) {
997 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1001 init_data.asic_id.chip_family = adev->family;
1003 init_data.asic_id.pci_revision_id = adev->pdev->revision;
1004 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1006 init_data.asic_id.vram_width = adev->gmc.vram_width;
1007 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1008 init_data.asic_id.atombios_base_address =
1009 adev->mode_info.atom_context->bios;
1011 init_data.driver = adev;
1013 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1015 if (!adev->dm.cgs_device) {
1016 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1020 init_data.cgs_device = adev->dm.cgs_device;
1022 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1024 switch (adev->asic_type) {
1029 init_data.flags.gpu_vm_support = true;
1030 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1031 init_data.flags.disable_dmcu = true;
1033 #if defined(CONFIG_DRM_AMD_DC_DCN)
1035 init_data.flags.gpu_vm_support = true;
1042 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1043 init_data.flags.fbc_support = true;
1045 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1046 init_data.flags.multi_mon_pp_mclk_switch = true;
1048 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1049 init_data.flags.disable_fractional_pwm = true;
1051 init_data.flags.power_down_display_on_boot = true;
1053 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
1055 /* Display Core create. */
1056 adev->dm.dc = dc_create(&init_data);
1059 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1061 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1065 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1066 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1067 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1070 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1071 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1073 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1074 adev->dm.dc->debug.disable_stutter = true;
1076 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1077 adev->dm.dc->debug.disable_dsc = true;
1079 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1080 adev->dm.dc->debug.disable_clock_gate = true;
1082 r = dm_dmub_hw_init(adev);
1084 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1088 dc_hardware_init(adev->dm.dc);
1090 #if defined(CONFIG_DRM_AMD_DC_DCN)
1091 if (adev->apu_flags) {
1092 struct dc_phy_addr_space_config pa_config;
1094 mmhub_read_system_context(adev, &pa_config);
1096 // Call the DC init_memory func
1097 dc_setup_system_context(adev->dm.dc, &pa_config);
1101 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1102 if (!adev->dm.freesync_module) {
1104 "amdgpu: failed to initialize freesync_module.\n");
1106 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1107 adev->dm.freesync_module);
1109 amdgpu_dm_init_color_mod();
1111 #ifdef CONFIG_DRM_AMD_DC_HDCP
1112 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1113 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1115 if (!adev->dm.hdcp_workqueue)
1116 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1118 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1120 dc_init_callbacks(adev->dm.dc, &init_params);
1123 #ifdef CONFIG_DEBUG_FS
1124 if (create_crtc_crc_properties(&adev->dm))
1125 DRM_ERROR("amdgpu: failed to create crc property.\n");
1127 if (amdgpu_dm_initialize_drm_device(adev)) {
1129 "amdgpu: failed to initialize sw for display support.\n");
1133 /* create fake encoders for MST */
1134 dm_dp_create_fake_mst_encoders(adev);
1136 /* TODO: Add_display_info? */
1138 /* TODO use dynamic cursor width */
1139 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1140 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1142 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1144 "amdgpu: failed to initialize sw for display support.\n");
1149 DRM_DEBUG_DRIVER("KMS initialized.\n");
1153 amdgpu_dm_fini(adev);
1158 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1162 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1163 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1166 amdgpu_dm_audio_fini(adev);
1168 amdgpu_dm_destroy_drm_device(&adev->dm);
1170 #ifdef CONFIG_DRM_AMD_DC_HDCP
1171 if (adev->dm.hdcp_workqueue) {
1172 hdcp_destroy(adev->dm.hdcp_workqueue);
1173 adev->dm.hdcp_workqueue = NULL;
1177 dc_deinit_callbacks(adev->dm.dc);
1179 if (adev->dm.dc->ctx->dmub_srv) {
1180 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1181 adev->dm.dc->ctx->dmub_srv = NULL;
1184 if (adev->dm.dmub_bo)
1185 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1186 &adev->dm.dmub_bo_gpu_addr,
1187 &adev->dm.dmub_bo_cpu_addr);
1189 /* DC Destroy TODO: Replace destroy DAL */
1191 dc_destroy(&adev->dm.dc);
1193 * TODO: pageflip, vlank interrupt
1195 * amdgpu_dm_irq_fini(adev);
1198 if (adev->dm.cgs_device) {
1199 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1200 adev->dm.cgs_device = NULL;
1202 if (adev->dm.freesync_module) {
1203 mod_freesync_destroy(adev->dm.freesync_module);
1204 adev->dm.freesync_module = NULL;
1207 mutex_destroy(&adev->dm.audio_lock);
1208 mutex_destroy(&adev->dm.dc_lock);
1213 static int load_dmcu_fw(struct amdgpu_device *adev)
1215 const char *fw_name_dmcu = NULL;
1217 const struct dmcu_firmware_header_v1_0 *hdr;
1219 switch(adev->asic_type) {
1220 #if defined(CONFIG_DRM_AMD_DC_SI)
1235 case CHIP_POLARIS11:
1236 case CHIP_POLARIS10:
1237 case CHIP_POLARIS12:
1245 case CHIP_SIENNA_CICHLID:
1246 case CHIP_NAVY_FLOUNDER:
1247 case CHIP_DIMGREY_CAVEFISH:
1251 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1254 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1255 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1256 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1257 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1262 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1266 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1267 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1271 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1273 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1274 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1275 adev->dm.fw_dmcu = NULL;
1279 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1284 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1286 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1288 release_firmware(adev->dm.fw_dmcu);
1289 adev->dm.fw_dmcu = NULL;
1293 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1294 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1295 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1296 adev->firmware.fw_size +=
1297 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1299 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1300 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1301 adev->firmware.fw_size +=
1302 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1304 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1306 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1311 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1313 struct amdgpu_device *adev = ctx;
1315 return dm_read_reg(adev->dm.dc->ctx, address);
1318 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1321 struct amdgpu_device *adev = ctx;
1323 return dm_write_reg(adev->dm.dc->ctx, address, value);
1326 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1328 struct dmub_srv_create_params create_params;
1329 struct dmub_srv_region_params region_params;
1330 struct dmub_srv_region_info region_info;
1331 struct dmub_srv_fb_params fb_params;
1332 struct dmub_srv_fb_info *fb_info;
1333 struct dmub_srv *dmub_srv;
1334 const struct dmcub_firmware_header_v1_0 *hdr;
1335 const char *fw_name_dmub;
1336 enum dmub_asic dmub_asic;
1337 enum dmub_status status;
1340 switch (adev->asic_type) {
1342 dmub_asic = DMUB_ASIC_DCN21;
1343 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1344 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1345 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1347 case CHIP_SIENNA_CICHLID:
1348 dmub_asic = DMUB_ASIC_DCN30;
1349 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1351 case CHIP_NAVY_FLOUNDER:
1352 dmub_asic = DMUB_ASIC_DCN30;
1353 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1356 dmub_asic = DMUB_ASIC_DCN301;
1357 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1359 case CHIP_DIMGREY_CAVEFISH:
1360 dmub_asic = DMUB_ASIC_DCN302;
1361 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1365 /* ASIC doesn't support DMUB. */
1369 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1371 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1375 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1377 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1381 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1383 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1384 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1385 AMDGPU_UCODE_ID_DMCUB;
1386 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1388 adev->firmware.fw_size +=
1389 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1391 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1392 adev->dm.dmcub_fw_version);
1395 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1397 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1398 dmub_srv = adev->dm.dmub_srv;
1401 DRM_ERROR("Failed to allocate DMUB service!\n");
1405 memset(&create_params, 0, sizeof(create_params));
1406 create_params.user_ctx = adev;
1407 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1408 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1409 create_params.asic = dmub_asic;
1411 /* Create the DMUB service. */
1412 status = dmub_srv_create(dmub_srv, &create_params);
1413 if (status != DMUB_STATUS_OK) {
1414 DRM_ERROR("Error creating DMUB service: %d\n", status);
1418 /* Calculate the size of all the regions for the DMUB service. */
1419 memset(®ion_params, 0, sizeof(region_params));
1421 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1422 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1423 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1424 region_params.vbios_size = adev->bios_size;
1425 region_params.fw_bss_data = region_params.bss_data_size ?
1426 adev->dm.dmub_fw->data +
1427 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1428 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1429 region_params.fw_inst_const =
1430 adev->dm.dmub_fw->data +
1431 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1434 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1437 if (status != DMUB_STATUS_OK) {
1438 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1443 * Allocate a framebuffer based on the total size of all the regions.
1444 * TODO: Move this into GART.
1446 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1447 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1448 &adev->dm.dmub_bo_gpu_addr,
1449 &adev->dm.dmub_bo_cpu_addr);
1453 /* Rebase the regions on the framebuffer address. */
1454 memset(&fb_params, 0, sizeof(fb_params));
1455 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1456 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1457 fb_params.region_info = ®ion_info;
1459 adev->dm.dmub_fb_info =
1460 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1461 fb_info = adev->dm.dmub_fb_info;
1465 "Failed to allocate framebuffer info for DMUB service!\n");
1469 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1470 if (status != DMUB_STATUS_OK) {
1471 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1478 static int dm_sw_init(void *handle)
1480 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1483 r = dm_dmub_sw_init(adev);
1487 return load_dmcu_fw(adev);
1490 static int dm_sw_fini(void *handle)
1492 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1494 kfree(adev->dm.dmub_fb_info);
1495 adev->dm.dmub_fb_info = NULL;
1497 if (adev->dm.dmub_srv) {
1498 dmub_srv_destroy(adev->dm.dmub_srv);
1499 adev->dm.dmub_srv = NULL;
1502 release_firmware(adev->dm.dmub_fw);
1503 adev->dm.dmub_fw = NULL;
1505 release_firmware(adev->dm.fw_dmcu);
1506 adev->dm.fw_dmcu = NULL;
1511 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1513 struct amdgpu_dm_connector *aconnector;
1514 struct drm_connector *connector;
1515 struct drm_connector_list_iter iter;
1518 drm_connector_list_iter_begin(dev, &iter);
1519 drm_for_each_connector_iter(connector, &iter) {
1520 aconnector = to_amdgpu_dm_connector(connector);
1521 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1522 aconnector->mst_mgr.aux) {
1523 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1525 aconnector->base.base.id);
1527 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1529 DRM_ERROR("DM_MST: Failed to start MST\n");
1530 aconnector->dc_link->type =
1531 dc_connection_single;
1536 drm_connector_list_iter_end(&iter);
1541 static int dm_late_init(void *handle)
1543 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1545 struct dmcu_iram_parameters params;
1546 unsigned int linear_lut[16];
1548 struct dmcu *dmcu = NULL;
1551 dmcu = adev->dm.dc->res_pool->dmcu;
1553 for (i = 0; i < 16; i++)
1554 linear_lut[i] = 0xFFFF * i / 15;
1557 params.backlight_ramping_start = 0xCCCC;
1558 params.backlight_ramping_reduction = 0xCCCCCCCC;
1559 params.backlight_lut_array_size = 16;
1560 params.backlight_lut_array = linear_lut;
1562 /* Min backlight level after ABM reduction, Don't allow below 1%
1563 * 0xFFFF x 0.01 = 0x28F
1565 params.min_abm_backlight = 0x28F;
1567 /* In the case where abm is implemented on dmcub,
1568 * dmcu object will be null.
1569 * ABM 2.4 and up are implemented on dmcub.
1572 ret = dmcu_load_iram(dmcu, params);
1573 else if (adev->dm.dc->ctx->dmub_srv)
1574 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1579 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1582 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1584 struct amdgpu_dm_connector *aconnector;
1585 struct drm_connector *connector;
1586 struct drm_connector_list_iter iter;
1587 struct drm_dp_mst_topology_mgr *mgr;
1589 bool need_hotplug = false;
1591 drm_connector_list_iter_begin(dev, &iter);
1592 drm_for_each_connector_iter(connector, &iter) {
1593 aconnector = to_amdgpu_dm_connector(connector);
1594 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1595 aconnector->mst_port)
1598 mgr = &aconnector->mst_mgr;
1601 drm_dp_mst_topology_mgr_suspend(mgr);
1603 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1605 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1606 need_hotplug = true;
1610 drm_connector_list_iter_end(&iter);
1613 drm_kms_helper_hotplug_event(dev);
1616 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1618 struct smu_context *smu = &adev->smu;
1621 if (!is_support_sw_smu(adev))
1624 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1625 * on window driver dc implementation.
1626 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1627 * should be passed to smu during boot up and resume from s3.
1628 * boot up: dc calculate dcn watermark clock settings within dc_create,
1629 * dcn20_resource_construct
1630 * then call pplib functions below to pass the settings to smu:
1631 * smu_set_watermarks_for_clock_ranges
1632 * smu_set_watermarks_table
1633 * navi10_set_watermarks_table
1634 * smu_write_watermarks_table
1636 * For Renoir, clock settings of dcn watermark are also fixed values.
1637 * dc has implemented different flow for window driver:
1638 * dc_hardware_init / dc_set_power_state
1643 * smu_set_watermarks_for_clock_ranges
1644 * renoir_set_watermarks_table
1645 * smu_write_watermarks_table
1648 * dc_hardware_init -> amdgpu_dm_init
1649 * dc_set_power_state --> dm_resume
1651 * therefore, this function apply to navi10/12/14 but not Renoir
1654 switch(adev->asic_type) {
1663 ret = smu_write_watermarks_table(smu);
1665 DRM_ERROR("Failed to update WMTABLE!\n");
1673 * dm_hw_init() - Initialize DC device
1674 * @handle: The base driver device containing the amdgpu_dm device.
1676 * Initialize the &struct amdgpu_display_manager device. This involves calling
1677 * the initializers of each DM component, then populating the struct with them.
1679 * Although the function implies hardware initialization, both hardware and
1680 * software are initialized here. Splitting them out to their relevant init
1681 * hooks is a future TODO item.
1683 * Some notable things that are initialized here:
1685 * - Display Core, both software and hardware
1686 * - DC modules that we need (freesync and color management)
1687 * - DRM software states
1688 * - Interrupt sources and handlers
1690 * - Debug FS entries, if enabled
1692 static int dm_hw_init(void *handle)
1694 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1695 /* Create DAL display manager */
1696 amdgpu_dm_init(adev);
1697 amdgpu_dm_hpd_init(adev);
1703 * dm_hw_fini() - Teardown DC device
1704 * @handle: The base driver device containing the amdgpu_dm device.
1706 * Teardown components within &struct amdgpu_display_manager that require
1707 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1708 * were loaded. Also flush IRQ workqueues and disable them.
1710 static int dm_hw_fini(void *handle)
1712 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1714 amdgpu_dm_hpd_fini(adev);
1716 amdgpu_dm_irq_fini(adev);
1717 amdgpu_dm_fini(adev);
1722 static int dm_enable_vblank(struct drm_crtc *crtc);
1723 static void dm_disable_vblank(struct drm_crtc *crtc);
1725 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1726 struct dc_state *state, bool enable)
1728 enum dc_irq_source irq_source;
1729 struct amdgpu_crtc *acrtc;
1733 for (i = 0; i < state->stream_count; i++) {
1734 acrtc = get_crtc_by_otg_inst(
1735 adev, state->stream_status[i].primary_otg_inst);
1737 if (acrtc && state->stream_status[i].plane_count != 0) {
1738 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1739 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1740 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1741 acrtc->crtc_id, enable ? "en" : "dis", rc);
1743 DRM_WARN("Failed to %s pflip interrupts\n",
1744 enable ? "enable" : "disable");
1747 rc = dm_enable_vblank(&acrtc->base);
1749 DRM_WARN("Failed to enable vblank interrupts\n");
1751 dm_disable_vblank(&acrtc->base);
1759 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1761 struct dc_state *context = NULL;
1762 enum dc_status res = DC_ERROR_UNEXPECTED;
1764 struct dc_stream_state *del_streams[MAX_PIPES];
1765 int del_streams_count = 0;
1767 memset(del_streams, 0, sizeof(del_streams));
1769 context = dc_create_state(dc);
1770 if (context == NULL)
1771 goto context_alloc_fail;
1773 dc_resource_state_copy_construct_current(dc, context);
1775 /* First remove from context all streams */
1776 for (i = 0; i < context->stream_count; i++) {
1777 struct dc_stream_state *stream = context->streams[i];
1779 del_streams[del_streams_count++] = stream;
1782 /* Remove all planes for removed streams and then remove the streams */
1783 for (i = 0; i < del_streams_count; i++) {
1784 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1785 res = DC_FAIL_DETACH_SURFACES;
1789 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1795 res = dc_validate_global_state(dc, context, false);
1798 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1802 res = dc_commit_state(dc, context);
1805 dc_release_state(context);
1811 static int dm_suspend(void *handle)
1813 struct amdgpu_device *adev = handle;
1814 struct amdgpu_display_manager *dm = &adev->dm;
1817 if (amdgpu_in_reset(adev)) {
1818 mutex_lock(&dm->dc_lock);
1819 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1821 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1823 amdgpu_dm_commit_zero_streams(dm->dc);
1825 amdgpu_dm_irq_suspend(adev);
1830 WARN_ON(adev->dm.cached_state);
1831 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1833 s3_handle_mst(adev_to_drm(adev), true);
1835 amdgpu_dm_irq_suspend(adev);
1838 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1843 static struct amdgpu_dm_connector *
1844 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1845 struct drm_crtc *crtc)
1848 struct drm_connector_state *new_con_state;
1849 struct drm_connector *connector;
1850 struct drm_crtc *crtc_from_state;
1852 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1853 crtc_from_state = new_con_state->crtc;
1855 if (crtc_from_state == crtc)
1856 return to_amdgpu_dm_connector(connector);
1862 static void emulated_link_detect(struct dc_link *link)
1864 struct dc_sink_init_data sink_init_data = { 0 };
1865 struct display_sink_capability sink_caps = { 0 };
1866 enum dc_edid_status edid_status;
1867 struct dc_context *dc_ctx = link->ctx;
1868 struct dc_sink *sink = NULL;
1869 struct dc_sink *prev_sink = NULL;
1871 link->type = dc_connection_none;
1872 prev_sink = link->local_sink;
1874 if (prev_sink != NULL)
1875 dc_sink_retain(prev_sink);
1877 switch (link->connector_signal) {
1878 case SIGNAL_TYPE_HDMI_TYPE_A: {
1879 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1880 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1884 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1885 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1886 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1890 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1891 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1892 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1896 case SIGNAL_TYPE_LVDS: {
1897 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1898 sink_caps.signal = SIGNAL_TYPE_LVDS;
1902 case SIGNAL_TYPE_EDP: {
1903 sink_caps.transaction_type =
1904 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1905 sink_caps.signal = SIGNAL_TYPE_EDP;
1909 case SIGNAL_TYPE_DISPLAY_PORT: {
1910 sink_caps.transaction_type =
1911 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1912 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1917 DC_ERROR("Invalid connector type! signal:%d\n",
1918 link->connector_signal);
1922 sink_init_data.link = link;
1923 sink_init_data.sink_signal = sink_caps.signal;
1925 sink = dc_sink_create(&sink_init_data);
1927 DC_ERROR("Failed to create sink!\n");
1931 /* dc_sink_create returns a new reference */
1932 link->local_sink = sink;
1934 edid_status = dm_helpers_read_local_edid(
1939 if (edid_status != EDID_OK)
1940 DC_ERROR("Failed to read EDID");
1944 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1945 struct amdgpu_display_manager *dm)
1948 struct dc_surface_update surface_updates[MAX_SURFACES];
1949 struct dc_plane_info plane_infos[MAX_SURFACES];
1950 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1951 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1952 struct dc_stream_update stream_update;
1956 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1959 dm_error("Failed to allocate update bundle\n");
1963 for (k = 0; k < dc_state->stream_count; k++) {
1964 bundle->stream_update.stream = dc_state->streams[k];
1966 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1967 bundle->surface_updates[m].surface =
1968 dc_state->stream_status->plane_states[m];
1969 bundle->surface_updates[m].surface->force_full_update =
1972 dc_commit_updates_for_stream(
1973 dm->dc, bundle->surface_updates,
1974 dc_state->stream_status->plane_count,
1975 dc_state->streams[k], &bundle->stream_update, dc_state);
1984 static void dm_set_dpms_off(struct dc_link *link)
1986 struct dc_stream_state *stream_state;
1987 struct amdgpu_dm_connector *aconnector = link->priv;
1988 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
1989 struct dc_stream_update stream_update;
1990 bool dpms_off = true;
1992 memset(&stream_update, 0, sizeof(stream_update));
1993 stream_update.dpms_off = &dpms_off;
1995 mutex_lock(&adev->dm.dc_lock);
1996 stream_state = dc_stream_find_from_link(link);
1998 if (stream_state == NULL) {
1999 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2000 mutex_unlock(&adev->dm.dc_lock);
2004 stream_update.stream = stream_state;
2005 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2006 stream_state, &stream_update,
2007 stream_state->ctx->dc->current_state);
2008 mutex_unlock(&adev->dm.dc_lock);
2011 static int dm_resume(void *handle)
2013 struct amdgpu_device *adev = handle;
2014 struct drm_device *ddev = adev_to_drm(adev);
2015 struct amdgpu_display_manager *dm = &adev->dm;
2016 struct amdgpu_dm_connector *aconnector;
2017 struct drm_connector *connector;
2018 struct drm_connector_list_iter iter;
2019 struct drm_crtc *crtc;
2020 struct drm_crtc_state *new_crtc_state;
2021 struct dm_crtc_state *dm_new_crtc_state;
2022 struct drm_plane *plane;
2023 struct drm_plane_state *new_plane_state;
2024 struct dm_plane_state *dm_new_plane_state;
2025 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2026 enum dc_connection_type new_connection_type = dc_connection_none;
2027 struct dc_state *dc_state;
2030 if (amdgpu_in_reset(adev)) {
2031 dc_state = dm->cached_dc_state;
2033 r = dm_dmub_hw_init(adev);
2035 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2037 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2040 amdgpu_dm_irq_resume_early(adev);
2042 for (i = 0; i < dc_state->stream_count; i++) {
2043 dc_state->streams[i]->mode_changed = true;
2044 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2045 dc_state->stream_status->plane_states[j]->update_flags.raw
2050 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2052 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2054 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2056 dc_release_state(dm->cached_dc_state);
2057 dm->cached_dc_state = NULL;
2059 amdgpu_dm_irq_resume_late(adev);
2061 mutex_unlock(&dm->dc_lock);
2065 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2066 dc_release_state(dm_state->context);
2067 dm_state->context = dc_create_state(dm->dc);
2068 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2069 dc_resource_state_construct(dm->dc, dm_state->context);
2071 /* Before powering on DC we need to re-initialize DMUB. */
2072 r = dm_dmub_hw_init(adev);
2074 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2076 /* power on hardware */
2077 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2079 /* program HPD filter */
2083 * early enable HPD Rx IRQ, should be done before set mode as short
2084 * pulse interrupts are used for MST
2086 amdgpu_dm_irq_resume_early(adev);
2088 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2089 s3_handle_mst(ddev, false);
2092 drm_connector_list_iter_begin(ddev, &iter);
2093 drm_for_each_connector_iter(connector, &iter) {
2094 aconnector = to_amdgpu_dm_connector(connector);
2097 * this is the case when traversing through already created
2098 * MST connectors, should be skipped
2100 if (aconnector->mst_port)
2103 mutex_lock(&aconnector->hpd_lock);
2104 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2105 DRM_ERROR("KMS: Failed to detect connector\n");
2107 if (aconnector->base.force && new_connection_type == dc_connection_none)
2108 emulated_link_detect(aconnector->dc_link);
2110 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2112 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2113 aconnector->fake_enable = false;
2115 if (aconnector->dc_sink)
2116 dc_sink_release(aconnector->dc_sink);
2117 aconnector->dc_sink = NULL;
2118 amdgpu_dm_update_connector_after_detect(aconnector);
2119 mutex_unlock(&aconnector->hpd_lock);
2121 drm_connector_list_iter_end(&iter);
2123 /* Force mode set in atomic commit */
2124 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2125 new_crtc_state->active_changed = true;
2128 * atomic_check is expected to create the dc states. We need to release
2129 * them here, since they were duplicated as part of the suspend
2132 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2133 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2134 if (dm_new_crtc_state->stream) {
2135 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2136 dc_stream_release(dm_new_crtc_state->stream);
2137 dm_new_crtc_state->stream = NULL;
2141 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2142 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2143 if (dm_new_plane_state->dc_state) {
2144 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2145 dc_plane_state_release(dm_new_plane_state->dc_state);
2146 dm_new_plane_state->dc_state = NULL;
2150 drm_atomic_helper_resume(ddev, dm->cached_state);
2152 dm->cached_state = NULL;
2154 amdgpu_dm_irq_resume_late(adev);
2156 amdgpu_dm_smu_write_watermarks_table(adev);
2164 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2165 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2166 * the base driver's device list to be initialized and torn down accordingly.
2168 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2171 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2173 .early_init = dm_early_init,
2174 .late_init = dm_late_init,
2175 .sw_init = dm_sw_init,
2176 .sw_fini = dm_sw_fini,
2177 .hw_init = dm_hw_init,
2178 .hw_fini = dm_hw_fini,
2179 .suspend = dm_suspend,
2180 .resume = dm_resume,
2181 .is_idle = dm_is_idle,
2182 .wait_for_idle = dm_wait_for_idle,
2183 .check_soft_reset = dm_check_soft_reset,
2184 .soft_reset = dm_soft_reset,
2185 .set_clockgating_state = dm_set_clockgating_state,
2186 .set_powergating_state = dm_set_powergating_state,
2189 const struct amdgpu_ip_block_version dm_ip_block =
2191 .type = AMD_IP_BLOCK_TYPE_DCE,
2195 .funcs = &amdgpu_dm_funcs,
2205 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2206 .fb_create = amdgpu_display_user_framebuffer_create,
2207 .get_format_info = amd_get_format_info,
2208 .output_poll_changed = drm_fb_helper_output_poll_changed,
2209 .atomic_check = amdgpu_dm_atomic_check,
2210 .atomic_commit = drm_atomic_helper_commit,
2213 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2214 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2217 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2219 u32 max_cll, min_cll, max, min, q, r;
2220 struct amdgpu_dm_backlight_caps *caps;
2221 struct amdgpu_display_manager *dm;
2222 struct drm_connector *conn_base;
2223 struct amdgpu_device *adev;
2224 struct dc_link *link = NULL;
2225 static const u8 pre_computed_values[] = {
2226 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2227 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2229 if (!aconnector || !aconnector->dc_link)
2232 link = aconnector->dc_link;
2233 if (link->connector_signal != SIGNAL_TYPE_EDP)
2236 conn_base = &aconnector->base;
2237 adev = drm_to_adev(conn_base->dev);
2239 caps = &dm->backlight_caps;
2240 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2241 caps->aux_support = false;
2242 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2243 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2245 if (caps->ext_caps->bits.oled == 1 ||
2246 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2247 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2248 caps->aux_support = true;
2250 /* From the specification (CTA-861-G), for calculating the maximum
2251 * luminance we need to use:
2252 * Luminance = 50*2**(CV/32)
2253 * Where CV is a one-byte value.
2254 * For calculating this expression we may need float point precision;
2255 * to avoid this complexity level, we take advantage that CV is divided
2256 * by a constant. From the Euclids division algorithm, we know that CV
2257 * can be written as: CV = 32*q + r. Next, we replace CV in the
2258 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2259 * need to pre-compute the value of r/32. For pre-computing the values
2260 * We just used the following Ruby line:
2261 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2262 * The results of the above expressions can be verified at
2263 * pre_computed_values.
2267 max = (1 << q) * pre_computed_values[r];
2269 // min luminance: maxLum * (CV/255)^2 / 100
2270 q = DIV_ROUND_CLOSEST(min_cll, 255);
2271 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2273 caps->aux_max_input_signal = max;
2274 caps->aux_min_input_signal = min;
2277 void amdgpu_dm_update_connector_after_detect(
2278 struct amdgpu_dm_connector *aconnector)
2280 struct drm_connector *connector = &aconnector->base;
2281 struct drm_device *dev = connector->dev;
2282 struct dc_sink *sink;
2284 /* MST handled by drm_mst framework */
2285 if (aconnector->mst_mgr.mst_state == true)
2288 sink = aconnector->dc_link->local_sink;
2290 dc_sink_retain(sink);
2293 * Edid mgmt connector gets first update only in mode_valid hook and then
2294 * the connector sink is set to either fake or physical sink depends on link status.
2295 * Skip if already done during boot.
2297 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2298 && aconnector->dc_em_sink) {
2301 * For S3 resume with headless use eml_sink to fake stream
2302 * because on resume connector->sink is set to NULL
2304 mutex_lock(&dev->mode_config.mutex);
2307 if (aconnector->dc_sink) {
2308 amdgpu_dm_update_freesync_caps(connector, NULL);
2310 * retain and release below are used to
2311 * bump up refcount for sink because the link doesn't point
2312 * to it anymore after disconnect, so on next crtc to connector
2313 * reshuffle by UMD we will get into unwanted dc_sink release
2315 dc_sink_release(aconnector->dc_sink);
2317 aconnector->dc_sink = sink;
2318 dc_sink_retain(aconnector->dc_sink);
2319 amdgpu_dm_update_freesync_caps(connector,
2322 amdgpu_dm_update_freesync_caps(connector, NULL);
2323 if (!aconnector->dc_sink) {
2324 aconnector->dc_sink = aconnector->dc_em_sink;
2325 dc_sink_retain(aconnector->dc_sink);
2329 mutex_unlock(&dev->mode_config.mutex);
2332 dc_sink_release(sink);
2337 * TODO: temporary guard to look for proper fix
2338 * if this sink is MST sink, we should not do anything
2340 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2341 dc_sink_release(sink);
2345 if (aconnector->dc_sink == sink) {
2347 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2350 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2351 aconnector->connector_id);
2353 dc_sink_release(sink);
2357 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2358 aconnector->connector_id, aconnector->dc_sink, sink);
2360 mutex_lock(&dev->mode_config.mutex);
2363 * 1. Update status of the drm connector
2364 * 2. Send an event and let userspace tell us what to do
2368 * TODO: check if we still need the S3 mode update workaround.
2369 * If yes, put it here.
2371 if (aconnector->dc_sink)
2372 amdgpu_dm_update_freesync_caps(connector, NULL);
2374 aconnector->dc_sink = sink;
2375 dc_sink_retain(aconnector->dc_sink);
2376 if (sink->dc_edid.length == 0) {
2377 aconnector->edid = NULL;
2378 if (aconnector->dc_link->aux_mode) {
2379 drm_dp_cec_unset_edid(
2380 &aconnector->dm_dp_aux.aux);
2384 (struct edid *)sink->dc_edid.raw_edid;
2386 drm_connector_update_edid_property(connector,
2388 drm_add_edid_modes(connector, aconnector->edid);
2390 if (aconnector->dc_link->aux_mode)
2391 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2395 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2396 update_connector_ext_caps(aconnector);
2398 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2399 amdgpu_dm_update_freesync_caps(connector, NULL);
2400 drm_connector_update_edid_property(connector, NULL);
2401 aconnector->num_modes = 0;
2402 dc_sink_release(aconnector->dc_sink);
2403 aconnector->dc_sink = NULL;
2404 aconnector->edid = NULL;
2405 #ifdef CONFIG_DRM_AMD_DC_HDCP
2406 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2407 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2408 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2412 mutex_unlock(&dev->mode_config.mutex);
2414 update_subconnector_property(aconnector);
2417 dc_sink_release(sink);
2420 static void handle_hpd_irq(void *param)
2422 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2423 struct drm_connector *connector = &aconnector->base;
2424 struct drm_device *dev = connector->dev;
2425 enum dc_connection_type new_connection_type = dc_connection_none;
2426 #ifdef CONFIG_DRM_AMD_DC_HDCP
2427 struct amdgpu_device *adev = drm_to_adev(dev);
2428 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2432 * In case of failure or MST no need to update connector status or notify the OS
2433 * since (for MST case) MST does this in its own context.
2435 mutex_lock(&aconnector->hpd_lock);
2437 #ifdef CONFIG_DRM_AMD_DC_HDCP
2438 if (adev->dm.hdcp_workqueue) {
2439 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2440 dm_con_state->update_hdcp = true;
2443 if (aconnector->fake_enable)
2444 aconnector->fake_enable = false;
2446 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2447 DRM_ERROR("KMS: Failed to detect connector\n");
2449 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2450 emulated_link_detect(aconnector->dc_link);
2453 drm_modeset_lock_all(dev);
2454 dm_restore_drm_connector_state(dev, connector);
2455 drm_modeset_unlock_all(dev);
2457 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2458 drm_kms_helper_hotplug_event(dev);
2460 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2461 if (new_connection_type == dc_connection_none &&
2462 aconnector->dc_link->type == dc_connection_none)
2463 dm_set_dpms_off(aconnector->dc_link);
2465 amdgpu_dm_update_connector_after_detect(aconnector);
2467 drm_modeset_lock_all(dev);
2468 dm_restore_drm_connector_state(dev, connector);
2469 drm_modeset_unlock_all(dev);
2471 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2472 drm_kms_helper_hotplug_event(dev);
2474 mutex_unlock(&aconnector->hpd_lock);
2478 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2480 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2482 bool new_irq_handled = false;
2484 int dpcd_bytes_to_read;
2486 const int max_process_count = 30;
2487 int process_count = 0;
2489 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2491 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2492 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2493 /* DPCD 0x200 - 0x201 for downstream IRQ */
2494 dpcd_addr = DP_SINK_COUNT;
2496 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2497 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2498 dpcd_addr = DP_SINK_COUNT_ESI;
2501 dret = drm_dp_dpcd_read(
2502 &aconnector->dm_dp_aux.aux,
2505 dpcd_bytes_to_read);
2507 while (dret == dpcd_bytes_to_read &&
2508 process_count < max_process_count) {
2514 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2515 /* handle HPD short pulse irq */
2516 if (aconnector->mst_mgr.mst_state)
2518 &aconnector->mst_mgr,
2522 if (new_irq_handled) {
2523 /* ACK at DPCD to notify down stream */
2524 const int ack_dpcd_bytes_to_write =
2525 dpcd_bytes_to_read - 1;
2527 for (retry = 0; retry < 3; retry++) {
2530 wret = drm_dp_dpcd_write(
2531 &aconnector->dm_dp_aux.aux,
2534 ack_dpcd_bytes_to_write);
2535 if (wret == ack_dpcd_bytes_to_write)
2539 /* check if there is new irq to be handled */
2540 dret = drm_dp_dpcd_read(
2541 &aconnector->dm_dp_aux.aux,
2544 dpcd_bytes_to_read);
2546 new_irq_handled = false;
2552 if (process_count == max_process_count)
2553 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2556 static void handle_hpd_rx_irq(void *param)
2558 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2559 struct drm_connector *connector = &aconnector->base;
2560 struct drm_device *dev = connector->dev;
2561 struct dc_link *dc_link = aconnector->dc_link;
2562 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2563 bool result = false;
2564 enum dc_connection_type new_connection_type = dc_connection_none;
2565 struct amdgpu_device *adev = drm_to_adev(dev);
2566 union hpd_irq_data hpd_irq_data;
2568 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2571 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2572 * conflict, after implement i2c helper, this mutex should be
2575 if (dc_link->type != dc_connection_mst_branch)
2576 mutex_lock(&aconnector->hpd_lock);
2578 read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2580 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2581 (dc_link->type == dc_connection_mst_branch)) {
2582 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2584 dm_handle_hpd_rx_irq(aconnector);
2586 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2588 dm_handle_hpd_rx_irq(aconnector);
2593 mutex_lock(&adev->dm.dc_lock);
2594 #ifdef CONFIG_DRM_AMD_DC_HDCP
2595 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2597 result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2599 mutex_unlock(&adev->dm.dc_lock);
2602 if (result && !is_mst_root_connector) {
2603 /* Downstream Port status changed. */
2604 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2605 DRM_ERROR("KMS: Failed to detect connector\n");
2607 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2608 emulated_link_detect(dc_link);
2610 if (aconnector->fake_enable)
2611 aconnector->fake_enable = false;
2613 amdgpu_dm_update_connector_after_detect(aconnector);
2616 drm_modeset_lock_all(dev);
2617 dm_restore_drm_connector_state(dev, connector);
2618 drm_modeset_unlock_all(dev);
2620 drm_kms_helper_hotplug_event(dev);
2621 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2623 if (aconnector->fake_enable)
2624 aconnector->fake_enable = false;
2626 amdgpu_dm_update_connector_after_detect(aconnector);
2629 drm_modeset_lock_all(dev);
2630 dm_restore_drm_connector_state(dev, connector);
2631 drm_modeset_unlock_all(dev);
2633 drm_kms_helper_hotplug_event(dev);
2636 #ifdef CONFIG_DRM_AMD_DC_HDCP
2637 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2638 if (adev->dm.hdcp_workqueue)
2639 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2643 if (dc_link->type != dc_connection_mst_branch) {
2644 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2645 mutex_unlock(&aconnector->hpd_lock);
2649 static void register_hpd_handlers(struct amdgpu_device *adev)
2651 struct drm_device *dev = adev_to_drm(adev);
2652 struct drm_connector *connector;
2653 struct amdgpu_dm_connector *aconnector;
2654 const struct dc_link *dc_link;
2655 struct dc_interrupt_params int_params = {0};
2657 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2658 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2660 list_for_each_entry(connector,
2661 &dev->mode_config.connector_list, head) {
2663 aconnector = to_amdgpu_dm_connector(connector);
2664 dc_link = aconnector->dc_link;
2666 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2667 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2668 int_params.irq_source = dc_link->irq_source_hpd;
2670 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2672 (void *) aconnector);
2675 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2677 /* Also register for DP short pulse (hpd_rx). */
2678 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2679 int_params.irq_source = dc_link->irq_source_hpd_rx;
2681 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2683 (void *) aconnector);
2688 #if defined(CONFIG_DRM_AMD_DC_SI)
2689 /* Register IRQ sources and initialize IRQ callbacks */
2690 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2692 struct dc *dc = adev->dm.dc;
2693 struct common_irq_params *c_irq_params;
2694 struct dc_interrupt_params int_params = {0};
2697 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2699 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2700 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2703 * Actions of amdgpu_irq_add_id():
2704 * 1. Register a set() function with base driver.
2705 * Base driver will call set() function to enable/disable an
2706 * interrupt in DC hardware.
2707 * 2. Register amdgpu_dm_irq_handler().
2708 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2709 * coming from DC hardware.
2710 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2711 * for acknowledging and handling. */
2713 /* Use VBLANK interrupt */
2714 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2715 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2717 DRM_ERROR("Failed to add crtc irq id!\n");
2721 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2722 int_params.irq_source =
2723 dc_interrupt_to_irq_source(dc, i+1 , 0);
2725 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2727 c_irq_params->adev = adev;
2728 c_irq_params->irq_src = int_params.irq_source;
2730 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2731 dm_crtc_high_irq, c_irq_params);
2734 /* Use GRPH_PFLIP interrupt */
2735 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2736 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2737 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2739 DRM_ERROR("Failed to add page flip irq id!\n");
2743 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2744 int_params.irq_source =
2745 dc_interrupt_to_irq_source(dc, i, 0);
2747 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2749 c_irq_params->adev = adev;
2750 c_irq_params->irq_src = int_params.irq_source;
2752 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2753 dm_pflip_high_irq, c_irq_params);
2758 r = amdgpu_irq_add_id(adev, client_id,
2759 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2761 DRM_ERROR("Failed to add hpd irq id!\n");
2765 register_hpd_handlers(adev);
2771 /* Register IRQ sources and initialize IRQ callbacks */
2772 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2774 struct dc *dc = adev->dm.dc;
2775 struct common_irq_params *c_irq_params;
2776 struct dc_interrupt_params int_params = {0};
2779 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2781 if (adev->asic_type >= CHIP_VEGA10)
2782 client_id = SOC15_IH_CLIENTID_DCE;
2784 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2785 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2788 * Actions of amdgpu_irq_add_id():
2789 * 1. Register a set() function with base driver.
2790 * Base driver will call set() function to enable/disable an
2791 * interrupt in DC hardware.
2792 * 2. Register amdgpu_dm_irq_handler().
2793 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2794 * coming from DC hardware.
2795 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2796 * for acknowledging and handling. */
2798 /* Use VBLANK interrupt */
2799 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2800 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2802 DRM_ERROR("Failed to add crtc irq id!\n");
2806 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2807 int_params.irq_source =
2808 dc_interrupt_to_irq_source(dc, i, 0);
2810 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2812 c_irq_params->adev = adev;
2813 c_irq_params->irq_src = int_params.irq_source;
2815 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2816 dm_crtc_high_irq, c_irq_params);
2819 /* Use VUPDATE interrupt */
2820 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2821 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2823 DRM_ERROR("Failed to add vupdate irq id!\n");
2827 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2828 int_params.irq_source =
2829 dc_interrupt_to_irq_source(dc, i, 0);
2831 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2833 c_irq_params->adev = adev;
2834 c_irq_params->irq_src = int_params.irq_source;
2836 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2837 dm_vupdate_high_irq, c_irq_params);
2840 /* Use GRPH_PFLIP interrupt */
2841 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2842 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2843 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2845 DRM_ERROR("Failed to add page flip irq id!\n");
2849 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2850 int_params.irq_source =
2851 dc_interrupt_to_irq_source(dc, i, 0);
2853 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2855 c_irq_params->adev = adev;
2856 c_irq_params->irq_src = int_params.irq_source;
2858 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2859 dm_pflip_high_irq, c_irq_params);
2864 r = amdgpu_irq_add_id(adev, client_id,
2865 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2867 DRM_ERROR("Failed to add hpd irq id!\n");
2871 register_hpd_handlers(adev);
2876 #if defined(CONFIG_DRM_AMD_DC_DCN)
2877 /* Register IRQ sources and initialize IRQ callbacks */
2878 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2880 struct dc *dc = adev->dm.dc;
2881 struct common_irq_params *c_irq_params;
2882 struct dc_interrupt_params int_params = {0};
2886 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2887 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2890 * Actions of amdgpu_irq_add_id():
2891 * 1. Register a set() function with base driver.
2892 * Base driver will call set() function to enable/disable an
2893 * interrupt in DC hardware.
2894 * 2. Register amdgpu_dm_irq_handler().
2895 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2896 * coming from DC hardware.
2897 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2898 * for acknowledging and handling.
2901 /* Use VSTARTUP interrupt */
2902 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2903 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2905 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2908 DRM_ERROR("Failed to add crtc irq id!\n");
2912 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2913 int_params.irq_source =
2914 dc_interrupt_to_irq_source(dc, i, 0);
2916 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2918 c_irq_params->adev = adev;
2919 c_irq_params->irq_src = int_params.irq_source;
2921 amdgpu_dm_irq_register_interrupt(
2922 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2925 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2926 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2927 * to trigger at end of each vblank, regardless of state of the lock,
2928 * matching DCE behaviour.
2930 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2931 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2933 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2936 DRM_ERROR("Failed to add vupdate irq id!\n");
2940 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2941 int_params.irq_source =
2942 dc_interrupt_to_irq_source(dc, i, 0);
2944 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2946 c_irq_params->adev = adev;
2947 c_irq_params->irq_src = int_params.irq_source;
2949 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2950 dm_vupdate_high_irq, c_irq_params);
2953 /* Use GRPH_PFLIP interrupt */
2954 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2955 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2957 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2959 DRM_ERROR("Failed to add page flip irq id!\n");
2963 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2964 int_params.irq_source =
2965 dc_interrupt_to_irq_source(dc, i, 0);
2967 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2969 c_irq_params->adev = adev;
2970 c_irq_params->irq_src = int_params.irq_source;
2972 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2973 dm_pflip_high_irq, c_irq_params);
2978 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2981 DRM_ERROR("Failed to add hpd irq id!\n");
2985 register_hpd_handlers(adev);
2992 * Acquires the lock for the atomic state object and returns
2993 * the new atomic state.
2995 * This should only be called during atomic check.
2997 static int dm_atomic_get_state(struct drm_atomic_state *state,
2998 struct dm_atomic_state **dm_state)
3000 struct drm_device *dev = state->dev;
3001 struct amdgpu_device *adev = drm_to_adev(dev);
3002 struct amdgpu_display_manager *dm = &adev->dm;
3003 struct drm_private_state *priv_state;
3008 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3009 if (IS_ERR(priv_state))
3010 return PTR_ERR(priv_state);
3012 *dm_state = to_dm_atomic_state(priv_state);
3017 static struct dm_atomic_state *
3018 dm_atomic_get_new_state(struct drm_atomic_state *state)
3020 struct drm_device *dev = state->dev;
3021 struct amdgpu_device *adev = drm_to_adev(dev);
3022 struct amdgpu_display_manager *dm = &adev->dm;
3023 struct drm_private_obj *obj;
3024 struct drm_private_state *new_obj_state;
3027 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3028 if (obj->funcs == dm->atomic_obj.funcs)
3029 return to_dm_atomic_state(new_obj_state);
3035 static struct drm_private_state *
3036 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3038 struct dm_atomic_state *old_state, *new_state;
3040 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3044 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3046 old_state = to_dm_atomic_state(obj->state);
3048 if (old_state && old_state->context)
3049 new_state->context = dc_copy_state(old_state->context);
3051 if (!new_state->context) {
3056 return &new_state->base;
3059 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3060 struct drm_private_state *state)
3062 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3064 if (dm_state && dm_state->context)
3065 dc_release_state(dm_state->context);
3070 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3071 .atomic_duplicate_state = dm_atomic_duplicate_state,
3072 .atomic_destroy_state = dm_atomic_destroy_state,
3075 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3077 struct dm_atomic_state *state;
3080 adev->mode_info.mode_config_initialized = true;
3082 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3083 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3085 adev_to_drm(adev)->mode_config.max_width = 16384;
3086 adev_to_drm(adev)->mode_config.max_height = 16384;
3088 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3089 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3090 /* indicates support for immediate flip */
3091 adev_to_drm(adev)->mode_config.async_page_flip = true;
3093 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3095 state = kzalloc(sizeof(*state), GFP_KERNEL);
3099 state->context = dc_create_state(adev->dm.dc);
3100 if (!state->context) {
3105 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3107 drm_atomic_private_obj_init(adev_to_drm(adev),
3108 &adev->dm.atomic_obj,
3110 &dm_atomic_state_funcs);
3112 r = amdgpu_display_modeset_create_props(adev);
3114 dc_release_state(state->context);
3119 r = amdgpu_dm_audio_init(adev);
3121 dc_release_state(state->context);
3129 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3130 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3131 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3133 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3134 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3136 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3138 #if defined(CONFIG_ACPI)
3139 struct amdgpu_dm_backlight_caps caps;
3141 memset(&caps, 0, sizeof(caps));
3143 if (dm->backlight_caps.caps_valid)
3146 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3147 if (caps.caps_valid) {
3148 dm->backlight_caps.caps_valid = true;
3149 if (caps.aux_support)
3151 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3152 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3154 dm->backlight_caps.min_input_signal =
3155 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3156 dm->backlight_caps.max_input_signal =
3157 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3160 if (dm->backlight_caps.aux_support)
3163 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3164 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3168 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3175 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3176 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3181 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3182 unsigned *min, unsigned *max)
3187 if (caps->aux_support) {
3188 // Firmware limits are in nits, DC API wants millinits.
3189 *max = 1000 * caps->aux_max_input_signal;
3190 *min = 1000 * caps->aux_min_input_signal;
3192 // Firmware limits are 8-bit, PWM control is 16-bit.
3193 *max = 0x101 * caps->max_input_signal;
3194 *min = 0x101 * caps->min_input_signal;
3199 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3200 uint32_t brightness)
3204 if (!get_brightness_range(caps, &min, &max))
3207 // Rescale 0..255 to min..max
3208 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3209 AMDGPU_MAX_BL_LEVEL);
3212 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3213 uint32_t brightness)
3217 if (!get_brightness_range(caps, &min, &max))
3220 if (brightness < min)
3222 // Rescale min..max to 0..255
3223 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3227 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3229 struct amdgpu_display_manager *dm = bl_get_data(bd);
3230 struct amdgpu_dm_backlight_caps caps;
3231 struct dc_link *link = NULL;
3235 amdgpu_dm_update_backlight_caps(dm);
3236 caps = dm->backlight_caps;
3238 link = (struct dc_link *)dm->backlight_link;
3240 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3241 // Change brightness based on AUX property
3242 if (caps.aux_support)
3243 return set_backlight_via_aux(link, brightness);
3245 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3250 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3252 struct amdgpu_display_manager *dm = bl_get_data(bd);
3253 int ret = dc_link_get_backlight_level(dm->backlight_link);
3255 if (ret == DC_ERROR_UNEXPECTED)
3256 return bd->props.brightness;
3257 return convert_brightness_to_user(&dm->backlight_caps, ret);
3260 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3261 .options = BL_CORE_SUSPENDRESUME,
3262 .get_brightness = amdgpu_dm_backlight_get_brightness,
3263 .update_status = amdgpu_dm_backlight_update_status,
3267 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3270 struct backlight_properties props = { 0 };
3272 amdgpu_dm_update_backlight_caps(dm);
3274 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3275 props.brightness = AMDGPU_MAX_BL_LEVEL;
3276 props.type = BACKLIGHT_RAW;
3278 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3279 adev_to_drm(dm->adev)->primary->index);
3281 dm->backlight_dev = backlight_device_register(bl_name,
3282 adev_to_drm(dm->adev)->dev,
3284 &amdgpu_dm_backlight_ops,
3287 if (IS_ERR(dm->backlight_dev))
3288 DRM_ERROR("DM: Backlight registration failed!\n");
3290 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3295 static int initialize_plane(struct amdgpu_display_manager *dm,
3296 struct amdgpu_mode_info *mode_info, int plane_id,
3297 enum drm_plane_type plane_type,
3298 const struct dc_plane_cap *plane_cap)
3300 struct drm_plane *plane;
3301 unsigned long possible_crtcs;
3304 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3306 DRM_ERROR("KMS: Failed to allocate plane\n");
3309 plane->type = plane_type;
3312 * HACK: IGT tests expect that the primary plane for a CRTC
3313 * can only have one possible CRTC. Only expose support for
3314 * any CRTC if they're not going to be used as a primary plane
3315 * for a CRTC - like overlay or underlay planes.
3317 possible_crtcs = 1 << plane_id;
3318 if (plane_id >= dm->dc->caps.max_streams)
3319 possible_crtcs = 0xff;
3321 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3324 DRM_ERROR("KMS: Failed to initialize plane\n");
3330 mode_info->planes[plane_id] = plane;
3336 static void register_backlight_device(struct amdgpu_display_manager *dm,
3337 struct dc_link *link)
3339 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3340 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3342 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3343 link->type != dc_connection_none) {
3345 * Event if registration failed, we should continue with
3346 * DM initialization because not having a backlight control
3347 * is better then a black screen.
3349 amdgpu_dm_register_backlight_device(dm);
3351 if (dm->backlight_dev)
3352 dm->backlight_link = link;
3359 * In this architecture, the association
3360 * connector -> encoder -> crtc
3361 * id not really requried. The crtc and connector will hold the
3362 * display_index as an abstraction to use with DAL component
3364 * Returns 0 on success
3366 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3368 struct amdgpu_display_manager *dm = &adev->dm;
3370 struct amdgpu_dm_connector *aconnector = NULL;
3371 struct amdgpu_encoder *aencoder = NULL;
3372 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3374 int32_t primary_planes;
3375 enum dc_connection_type new_connection_type = dc_connection_none;
3376 const struct dc_plane_cap *plane;
3378 dm->display_indexes_num = dm->dc->caps.max_streams;
3379 /* Update the actual used number of crtc */
3380 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3382 link_cnt = dm->dc->caps.max_links;
3383 if (amdgpu_dm_mode_config_init(dm->adev)) {
3384 DRM_ERROR("DM: Failed to initialize mode config\n");
3388 /* There is one primary plane per CRTC */
3389 primary_planes = dm->dc->caps.max_streams;
3390 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3393 * Initialize primary planes, implicit planes for legacy IOCTLS.
3394 * Order is reversed to match iteration order in atomic check.
3396 for (i = (primary_planes - 1); i >= 0; i--) {
3397 plane = &dm->dc->caps.planes[i];
3399 if (initialize_plane(dm, mode_info, i,
3400 DRM_PLANE_TYPE_PRIMARY, plane)) {
3401 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3407 * Initialize overlay planes, index starting after primary planes.
3408 * These planes have a higher DRM index than the primary planes since
3409 * they should be considered as having a higher z-order.
3410 * Order is reversed to match iteration order in atomic check.
3412 * Only support DCN for now, and only expose one so we don't encourage
3413 * userspace to use up all the pipes.
3415 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3416 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3418 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3421 if (!plane->blends_with_above || !plane->blends_with_below)
3424 if (!plane->pixel_format_support.argb8888)
3427 if (initialize_plane(dm, NULL, primary_planes + i,
3428 DRM_PLANE_TYPE_OVERLAY, plane)) {
3429 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3433 /* Only create one overlay plane. */
3437 for (i = 0; i < dm->dc->caps.max_streams; i++)
3438 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3439 DRM_ERROR("KMS: Failed to initialize crtc\n");
3443 /* loops over all connectors on the board */
3444 for (i = 0; i < link_cnt; i++) {
3445 struct dc_link *link = NULL;
3447 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3449 "KMS: Cannot support more than %d display indexes\n",
3450 AMDGPU_DM_MAX_DISPLAY_INDEX);
3454 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3458 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3462 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3463 DRM_ERROR("KMS: Failed to initialize encoder\n");
3467 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3468 DRM_ERROR("KMS: Failed to initialize connector\n");
3472 link = dc_get_link_at_index(dm->dc, i);
3474 if (!dc_link_detect_sink(link, &new_connection_type))
3475 DRM_ERROR("KMS: Failed to detect connector\n");
3477 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3478 emulated_link_detect(link);
3479 amdgpu_dm_update_connector_after_detect(aconnector);
3481 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3482 amdgpu_dm_update_connector_after_detect(aconnector);
3483 register_backlight_device(dm, link);
3484 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3485 amdgpu_dm_set_psr_caps(link);
3491 /* Software is initialized. Now we can register interrupt handlers. */
3492 switch (adev->asic_type) {
3493 #if defined(CONFIG_DRM_AMD_DC_SI)
3498 if (dce60_register_irq_handlers(dm->adev)) {
3499 DRM_ERROR("DM: Failed to initialize IRQ\n");
3513 case CHIP_POLARIS11:
3514 case CHIP_POLARIS10:
3515 case CHIP_POLARIS12:
3520 if (dce110_register_irq_handlers(dm->adev)) {
3521 DRM_ERROR("DM: Failed to initialize IRQ\n");
3525 #if defined(CONFIG_DRM_AMD_DC_DCN)
3531 case CHIP_SIENNA_CICHLID:
3532 case CHIP_NAVY_FLOUNDER:
3533 case CHIP_DIMGREY_CAVEFISH:
3535 if (dcn10_register_irq_handlers(dm->adev)) {
3536 DRM_ERROR("DM: Failed to initialize IRQ\n");
3542 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3554 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3556 drm_mode_config_cleanup(dm->ddev);
3557 drm_atomic_private_obj_fini(&dm->atomic_obj);
3561 /******************************************************************************
3562 * amdgpu_display_funcs functions
3563 *****************************************************************************/
3566 * dm_bandwidth_update - program display watermarks
3568 * @adev: amdgpu_device pointer
3570 * Calculate and program the display watermarks and line buffer allocation.
3572 static void dm_bandwidth_update(struct amdgpu_device *adev)
3574 /* TODO: implement later */
3577 static const struct amdgpu_display_funcs dm_display_funcs = {
3578 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3579 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3580 .backlight_set_level = NULL, /* never called for DC */
3581 .backlight_get_level = NULL, /* never called for DC */
3582 .hpd_sense = NULL,/* called unconditionally */
3583 .hpd_set_polarity = NULL, /* called unconditionally */
3584 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3585 .page_flip_get_scanoutpos =
3586 dm_crtc_get_scanoutpos,/* called unconditionally */
3587 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3588 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3591 #if defined(CONFIG_DEBUG_KERNEL_DC)
3593 static ssize_t s3_debug_store(struct device *device,
3594 struct device_attribute *attr,
3600 struct drm_device *drm_dev = dev_get_drvdata(device);
3601 struct amdgpu_device *adev = drm_to_adev(drm_dev);
3603 ret = kstrtoint(buf, 0, &s3_state);
3608 drm_kms_helper_hotplug_event(adev_to_drm(adev));
3613 return ret == 0 ? count : 0;
3616 DEVICE_ATTR_WO(s3_debug);
3620 static int dm_early_init(void *handle)
3622 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3624 switch (adev->asic_type) {
3625 #if defined(CONFIG_DRM_AMD_DC_SI)
3629 adev->mode_info.num_crtc = 6;
3630 adev->mode_info.num_hpd = 6;
3631 adev->mode_info.num_dig = 6;
3634 adev->mode_info.num_crtc = 2;
3635 adev->mode_info.num_hpd = 2;
3636 adev->mode_info.num_dig = 2;
3641 adev->mode_info.num_crtc = 6;
3642 adev->mode_info.num_hpd = 6;
3643 adev->mode_info.num_dig = 6;
3646 adev->mode_info.num_crtc = 4;
3647 adev->mode_info.num_hpd = 6;
3648 adev->mode_info.num_dig = 7;
3652 adev->mode_info.num_crtc = 2;
3653 adev->mode_info.num_hpd = 6;
3654 adev->mode_info.num_dig = 6;
3658 adev->mode_info.num_crtc = 6;
3659 adev->mode_info.num_hpd = 6;
3660 adev->mode_info.num_dig = 7;
3663 adev->mode_info.num_crtc = 3;
3664 adev->mode_info.num_hpd = 6;
3665 adev->mode_info.num_dig = 9;
3668 adev->mode_info.num_crtc = 2;
3669 adev->mode_info.num_hpd = 6;
3670 adev->mode_info.num_dig = 9;
3672 case CHIP_POLARIS11:
3673 case CHIP_POLARIS12:
3674 adev->mode_info.num_crtc = 5;
3675 adev->mode_info.num_hpd = 5;
3676 adev->mode_info.num_dig = 5;
3678 case CHIP_POLARIS10:
3680 adev->mode_info.num_crtc = 6;
3681 adev->mode_info.num_hpd = 6;
3682 adev->mode_info.num_dig = 6;
3687 adev->mode_info.num_crtc = 6;
3688 adev->mode_info.num_hpd = 6;
3689 adev->mode_info.num_dig = 6;
3691 #if defined(CONFIG_DRM_AMD_DC_DCN)
3695 adev->mode_info.num_crtc = 4;
3696 adev->mode_info.num_hpd = 4;
3697 adev->mode_info.num_dig = 4;
3701 case CHIP_SIENNA_CICHLID:
3702 case CHIP_NAVY_FLOUNDER:
3703 adev->mode_info.num_crtc = 6;
3704 adev->mode_info.num_hpd = 6;
3705 adev->mode_info.num_dig = 6;
3708 case CHIP_DIMGREY_CAVEFISH:
3709 adev->mode_info.num_crtc = 5;
3710 adev->mode_info.num_hpd = 5;
3711 adev->mode_info.num_dig = 5;
3715 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3719 amdgpu_dm_set_irq_funcs(adev);
3721 if (adev->mode_info.funcs == NULL)
3722 adev->mode_info.funcs = &dm_display_funcs;
3725 * Note: Do NOT change adev->audio_endpt_rreg and
3726 * adev->audio_endpt_wreg because they are initialised in
3727 * amdgpu_device_init()
3729 #if defined(CONFIG_DEBUG_KERNEL_DC)
3731 adev_to_drm(adev)->dev,
3732 &dev_attr_s3_debug);
3738 static bool modeset_required(struct drm_crtc_state *crtc_state,
3739 struct dc_stream_state *new_stream,
3740 struct dc_stream_state *old_stream)
3742 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3745 static bool modereset_required(struct drm_crtc_state *crtc_state)
3747 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3750 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3752 drm_encoder_cleanup(encoder);
3756 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3757 .destroy = amdgpu_dm_encoder_destroy,
3761 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3762 struct drm_framebuffer *fb,
3763 int *min_downscale, int *max_upscale)
3765 struct amdgpu_device *adev = drm_to_adev(dev);
3766 struct dc *dc = adev->dm.dc;
3767 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3768 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3770 switch (fb->format->format) {
3771 case DRM_FORMAT_P010:
3772 case DRM_FORMAT_NV12:
3773 case DRM_FORMAT_NV21:
3774 *max_upscale = plane_cap->max_upscale_factor.nv12;
3775 *min_downscale = plane_cap->max_downscale_factor.nv12;
3778 case DRM_FORMAT_XRGB16161616F:
3779 case DRM_FORMAT_ARGB16161616F:
3780 case DRM_FORMAT_XBGR16161616F:
3781 case DRM_FORMAT_ABGR16161616F:
3782 *max_upscale = plane_cap->max_upscale_factor.fp16;
3783 *min_downscale = plane_cap->max_downscale_factor.fp16;
3787 *max_upscale = plane_cap->max_upscale_factor.argb8888;
3788 *min_downscale = plane_cap->max_downscale_factor.argb8888;
3793 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3794 * scaling factor of 1.0 == 1000 units.
3796 if (*max_upscale == 1)
3797 *max_upscale = 1000;
3799 if (*min_downscale == 1)
3800 *min_downscale = 1000;
3804 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3805 struct dc_scaling_info *scaling_info)
3807 int scale_w, scale_h, min_downscale, max_upscale;
3809 memset(scaling_info, 0, sizeof(*scaling_info));
3811 /* Source is fixed 16.16 but we ignore mantissa for now... */
3812 scaling_info->src_rect.x = state->src_x >> 16;
3813 scaling_info->src_rect.y = state->src_y >> 16;
3815 scaling_info->src_rect.width = state->src_w >> 16;
3816 if (scaling_info->src_rect.width == 0)
3819 scaling_info->src_rect.height = state->src_h >> 16;
3820 if (scaling_info->src_rect.height == 0)
3823 scaling_info->dst_rect.x = state->crtc_x;
3824 scaling_info->dst_rect.y = state->crtc_y;
3826 if (state->crtc_w == 0)
3829 scaling_info->dst_rect.width = state->crtc_w;
3831 if (state->crtc_h == 0)
3834 scaling_info->dst_rect.height = state->crtc_h;
3836 /* DRM doesn't specify clipping on destination output. */
3837 scaling_info->clip_rect = scaling_info->dst_rect;
3839 /* Validate scaling per-format with DC plane caps */
3840 if (state->plane && state->plane->dev && state->fb) {
3841 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
3842 &min_downscale, &max_upscale);
3844 min_downscale = 250;
3845 max_upscale = 16000;
3848 scale_w = scaling_info->dst_rect.width * 1000 /
3849 scaling_info->src_rect.width;
3851 if (scale_w < min_downscale || scale_w > max_upscale)
3854 scale_h = scaling_info->dst_rect.height * 1000 /
3855 scaling_info->src_rect.height;
3857 if (scale_h < min_downscale || scale_h > max_upscale)
3861 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3862 * assume reasonable defaults based on the format.
3869 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3870 uint64_t tiling_flags)
3872 /* Fill GFX8 params */
3873 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3874 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3876 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3877 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3878 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3879 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3880 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3882 /* XXX fix me for VI */
3883 tiling_info->gfx8.num_banks = num_banks;
3884 tiling_info->gfx8.array_mode =
3885 DC_ARRAY_2D_TILED_THIN1;
3886 tiling_info->gfx8.tile_split = tile_split;
3887 tiling_info->gfx8.bank_width = bankw;
3888 tiling_info->gfx8.bank_height = bankh;
3889 tiling_info->gfx8.tile_aspect = mtaspect;
3890 tiling_info->gfx8.tile_mode =
3891 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3892 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3893 == DC_ARRAY_1D_TILED_THIN1) {
3894 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3897 tiling_info->gfx8.pipe_config =
3898 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3902 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
3903 union dc_tiling_info *tiling_info)
3905 tiling_info->gfx9.num_pipes =
3906 adev->gfx.config.gb_addr_config_fields.num_pipes;
3907 tiling_info->gfx9.num_banks =
3908 adev->gfx.config.gb_addr_config_fields.num_banks;
3909 tiling_info->gfx9.pipe_interleave =
3910 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3911 tiling_info->gfx9.num_shader_engines =
3912 adev->gfx.config.gb_addr_config_fields.num_se;
3913 tiling_info->gfx9.max_compressed_frags =
3914 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3915 tiling_info->gfx9.num_rb_per_se =
3916 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3917 tiling_info->gfx9.shaderEnable = 1;
3918 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3919 adev->asic_type == CHIP_NAVY_FLOUNDER ||
3920 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
3921 adev->asic_type == CHIP_VANGOGH)
3922 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3926 validate_dcc(struct amdgpu_device *adev,
3927 const enum surface_pixel_format format,
3928 const enum dc_rotation_angle rotation,
3929 const union dc_tiling_info *tiling_info,
3930 const struct dc_plane_dcc_param *dcc,
3931 const struct dc_plane_address *address,
3932 const struct plane_size *plane_size)
3934 struct dc *dc = adev->dm.dc;
3935 struct dc_dcc_surface_param input;
3936 struct dc_surface_dcc_cap output;
3938 memset(&input, 0, sizeof(input));
3939 memset(&output, 0, sizeof(output));
3944 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
3945 !dc->cap_funcs.get_dcc_compression_cap)
3948 input.format = format;
3949 input.surface_size.width = plane_size->surface_size.width;
3950 input.surface_size.height = plane_size->surface_size.height;
3951 input.swizzle_mode = tiling_info->gfx9.swizzle;
3953 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3954 input.scan = SCAN_DIRECTION_HORIZONTAL;
3955 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3956 input.scan = SCAN_DIRECTION_VERTICAL;
3958 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3961 if (!output.capable)
3964 if (dcc->independent_64b_blks == 0 &&
3965 output.grph.rgb.independent_64b_blks != 0)
3972 modifier_has_dcc(uint64_t modifier)
3974 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
3978 modifier_gfx9_swizzle_mode(uint64_t modifier)
3980 if (modifier == DRM_FORMAT_MOD_LINEAR)
3983 return AMD_FMT_MOD_GET(TILE, modifier);
3986 static const struct drm_format_info *
3987 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
3989 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
3993 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
3994 union dc_tiling_info *tiling_info,
3997 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
3998 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
3999 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4000 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4002 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4004 if (!IS_AMD_FMT_MOD(modifier))
4007 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4008 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4010 if (adev->family >= AMDGPU_FAMILY_NV) {
4011 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4013 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4015 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4019 enum dm_micro_swizzle {
4020 MICRO_SWIZZLE_Z = 0,
4021 MICRO_SWIZZLE_S = 1,
4022 MICRO_SWIZZLE_D = 2,
4026 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4030 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4031 const struct drm_format_info *info = drm_format_info(format);
4033 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4039 * We always have to allow this modifier, because core DRM still
4040 * checks LINEAR support if userspace does not provide modifers.
4042 if (modifier == DRM_FORMAT_MOD_LINEAR)
4046 * The arbitrary tiling support for multiplane formats has not been hooked
4049 if (info->num_planes > 1)
4053 * For D swizzle the canonical modifier depends on the bpp, so check
4056 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4057 adev->family >= AMDGPU_FAMILY_NV) {
4058 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4062 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4066 if (modifier_has_dcc(modifier)) {
4067 /* Per radeonsi comments 16/64 bpp are more complicated. */
4068 if (info->cpp[0] != 4)
4076 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4081 if (*cap - *size < 1) {
4082 uint64_t new_cap = *cap * 2;
4083 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4091 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4097 (*mods)[*size] = mod;
4102 add_gfx9_modifiers(const struct amdgpu_device *adev,
4103 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4105 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4106 int pipe_xor_bits = min(8, pipes +
4107 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4108 int bank_xor_bits = min(8 - pipe_xor_bits,
4109 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4110 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4111 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4114 if (adev->family == AMDGPU_FAMILY_RV) {
4115 /* Raven2 and later */
4116 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4119 * No _D DCC swizzles yet because we only allow 32bpp, which
4120 * doesn't support _D on DCN
4123 if (has_constant_encode) {
4124 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4125 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4126 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4127 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4128 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4129 AMD_FMT_MOD_SET(DCC, 1) |
4130 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4131 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4132 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4135 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4136 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4137 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4138 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4139 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4140 AMD_FMT_MOD_SET(DCC, 1) |
4141 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4142 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4143 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4145 if (has_constant_encode) {
4146 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4147 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4148 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4149 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4150 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4151 AMD_FMT_MOD_SET(DCC, 1) |
4152 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4153 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4154 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4156 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4157 AMD_FMT_MOD_SET(RB, rb) |
4158 AMD_FMT_MOD_SET(PIPE, pipes));
4161 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4162 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4163 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4164 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4165 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4166 AMD_FMT_MOD_SET(DCC, 1) |
4167 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4168 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4169 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4170 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4171 AMD_FMT_MOD_SET(RB, rb) |
4172 AMD_FMT_MOD_SET(PIPE, pipes));
4176 * Only supported for 64bpp on Raven, will be filtered on format in
4177 * dm_plane_format_mod_supported.
4179 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4180 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4181 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4182 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4183 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4185 if (adev->family == AMDGPU_FAMILY_RV) {
4186 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4187 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4188 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4189 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4190 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4194 * Only supported for 64bpp on Raven, will be filtered on format in
4195 * dm_plane_format_mod_supported.
4197 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4198 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4199 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4201 if (adev->family == AMDGPU_FAMILY_RV) {
4202 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4203 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4204 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4209 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4210 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4212 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4214 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4215 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4216 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4217 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4218 AMD_FMT_MOD_SET(DCC, 1) |
4219 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4220 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4221 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4223 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4224 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4225 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4226 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4227 AMD_FMT_MOD_SET(DCC, 1) |
4228 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4229 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4230 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4231 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4233 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4234 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4235 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4236 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4238 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4239 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4240 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4241 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4244 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4245 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4246 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4247 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4249 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4250 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4251 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4255 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4256 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4258 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4259 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4261 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4262 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4263 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4264 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4265 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4266 AMD_FMT_MOD_SET(DCC, 1) |
4267 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4268 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4269 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4270 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4272 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4273 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4274 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4275 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4276 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4277 AMD_FMT_MOD_SET(DCC, 1) |
4278 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4279 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4280 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4281 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4282 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4284 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4285 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4286 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4287 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4288 AMD_FMT_MOD_SET(PACKERS, pkrs));
4290 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4291 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4292 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4293 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4294 AMD_FMT_MOD_SET(PACKERS, pkrs));
4296 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4297 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4298 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4299 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4301 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4302 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4303 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4307 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4309 uint64_t size = 0, capacity = 128;
4312 /* We have not hooked up any pre-GFX9 modifiers. */
4313 if (adev->family < AMDGPU_FAMILY_AI)
4316 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4318 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4319 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4320 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4321 return *mods ? 0 : -ENOMEM;
4324 switch (adev->family) {
4325 case AMDGPU_FAMILY_AI:
4326 case AMDGPU_FAMILY_RV:
4327 add_gfx9_modifiers(adev, mods, &size, &capacity);
4329 case AMDGPU_FAMILY_NV:
4330 case AMDGPU_FAMILY_VGH:
4331 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4332 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4334 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4338 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4340 /* INVALID marks the end of the list. */
4341 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4350 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4351 const struct amdgpu_framebuffer *afb,
4352 const enum surface_pixel_format format,
4353 const enum dc_rotation_angle rotation,
4354 const struct plane_size *plane_size,
4355 union dc_tiling_info *tiling_info,
4356 struct dc_plane_dcc_param *dcc,
4357 struct dc_plane_address *address,
4358 const bool force_disable_dcc)
4360 const uint64_t modifier = afb->base.modifier;
4363 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4364 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4366 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4367 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4370 dcc->meta_pitch = afb->base.pitches[1];
4371 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4373 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4374 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4377 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4385 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4386 const struct amdgpu_framebuffer *afb,
4387 const enum surface_pixel_format format,
4388 const enum dc_rotation_angle rotation,
4389 const uint64_t tiling_flags,
4390 union dc_tiling_info *tiling_info,
4391 struct plane_size *plane_size,
4392 struct dc_plane_dcc_param *dcc,
4393 struct dc_plane_address *address,
4395 bool force_disable_dcc)
4397 const struct drm_framebuffer *fb = &afb->base;
4400 memset(tiling_info, 0, sizeof(*tiling_info));
4401 memset(plane_size, 0, sizeof(*plane_size));
4402 memset(dcc, 0, sizeof(*dcc));
4403 memset(address, 0, sizeof(*address));
4405 address->tmz_surface = tmz_surface;
4407 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4408 uint64_t addr = afb->address + fb->offsets[0];
4410 plane_size->surface_size.x = 0;
4411 plane_size->surface_size.y = 0;
4412 plane_size->surface_size.width = fb->width;
4413 plane_size->surface_size.height = fb->height;
4414 plane_size->surface_pitch =
4415 fb->pitches[0] / fb->format->cpp[0];
4417 address->type = PLN_ADDR_TYPE_GRAPHICS;
4418 address->grph.addr.low_part = lower_32_bits(addr);
4419 address->grph.addr.high_part = upper_32_bits(addr);
4420 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4421 uint64_t luma_addr = afb->address + fb->offsets[0];
4422 uint64_t chroma_addr = afb->address + fb->offsets[1];
4424 plane_size->surface_size.x = 0;
4425 plane_size->surface_size.y = 0;
4426 plane_size->surface_size.width = fb->width;
4427 plane_size->surface_size.height = fb->height;
4428 plane_size->surface_pitch =
4429 fb->pitches[0] / fb->format->cpp[0];
4431 plane_size->chroma_size.x = 0;
4432 plane_size->chroma_size.y = 0;
4433 /* TODO: set these based on surface format */
4434 plane_size->chroma_size.width = fb->width / 2;
4435 plane_size->chroma_size.height = fb->height / 2;
4437 plane_size->chroma_pitch =
4438 fb->pitches[1] / fb->format->cpp[1];
4440 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4441 address->video_progressive.luma_addr.low_part =
4442 lower_32_bits(luma_addr);
4443 address->video_progressive.luma_addr.high_part =
4444 upper_32_bits(luma_addr);
4445 address->video_progressive.chroma_addr.low_part =
4446 lower_32_bits(chroma_addr);
4447 address->video_progressive.chroma_addr.high_part =
4448 upper_32_bits(chroma_addr);
4451 if (adev->family >= AMDGPU_FAMILY_AI) {
4452 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4453 rotation, plane_size,
4460 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4467 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4468 bool *per_pixel_alpha, bool *global_alpha,
4469 int *global_alpha_value)
4471 *per_pixel_alpha = false;
4472 *global_alpha = false;
4473 *global_alpha_value = 0xff;
4475 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4478 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4479 static const uint32_t alpha_formats[] = {
4480 DRM_FORMAT_ARGB8888,
4481 DRM_FORMAT_RGBA8888,
4482 DRM_FORMAT_ABGR8888,
4484 uint32_t format = plane_state->fb->format->format;
4487 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4488 if (format == alpha_formats[i]) {
4489 *per_pixel_alpha = true;
4495 if (plane_state->alpha < 0xffff) {
4496 *global_alpha = true;
4497 *global_alpha_value = plane_state->alpha >> 8;
4502 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4503 const enum surface_pixel_format format,
4504 enum dc_color_space *color_space)
4508 *color_space = COLOR_SPACE_SRGB;
4510 /* DRM color properties only affect non-RGB formats. */
4511 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4514 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4516 switch (plane_state->color_encoding) {
4517 case DRM_COLOR_YCBCR_BT601:
4519 *color_space = COLOR_SPACE_YCBCR601;
4521 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4524 case DRM_COLOR_YCBCR_BT709:
4526 *color_space = COLOR_SPACE_YCBCR709;
4528 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4531 case DRM_COLOR_YCBCR_BT2020:
4533 *color_space = COLOR_SPACE_2020_YCBCR;
4546 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4547 const struct drm_plane_state *plane_state,
4548 const uint64_t tiling_flags,
4549 struct dc_plane_info *plane_info,
4550 struct dc_plane_address *address,
4552 bool force_disable_dcc)
4554 const struct drm_framebuffer *fb = plane_state->fb;
4555 const struct amdgpu_framebuffer *afb =
4556 to_amdgpu_framebuffer(plane_state->fb);
4557 struct drm_format_name_buf format_name;
4560 memset(plane_info, 0, sizeof(*plane_info));
4562 switch (fb->format->format) {
4564 plane_info->format =
4565 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4567 case DRM_FORMAT_RGB565:
4568 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4570 case DRM_FORMAT_XRGB8888:
4571 case DRM_FORMAT_ARGB8888:
4572 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4574 case DRM_FORMAT_XRGB2101010:
4575 case DRM_FORMAT_ARGB2101010:
4576 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4578 case DRM_FORMAT_XBGR2101010:
4579 case DRM_FORMAT_ABGR2101010:
4580 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4582 case DRM_FORMAT_XBGR8888:
4583 case DRM_FORMAT_ABGR8888:
4584 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4586 case DRM_FORMAT_NV21:
4587 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4589 case DRM_FORMAT_NV12:
4590 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4592 case DRM_FORMAT_P010:
4593 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4595 case DRM_FORMAT_XRGB16161616F:
4596 case DRM_FORMAT_ARGB16161616F:
4597 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4599 case DRM_FORMAT_XBGR16161616F:
4600 case DRM_FORMAT_ABGR16161616F:
4601 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4605 "Unsupported screen format %s\n",
4606 drm_get_format_name(fb->format->format, &format_name));
4610 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4611 case DRM_MODE_ROTATE_0:
4612 plane_info->rotation = ROTATION_ANGLE_0;
4614 case DRM_MODE_ROTATE_90:
4615 plane_info->rotation = ROTATION_ANGLE_90;
4617 case DRM_MODE_ROTATE_180:
4618 plane_info->rotation = ROTATION_ANGLE_180;
4620 case DRM_MODE_ROTATE_270:
4621 plane_info->rotation = ROTATION_ANGLE_270;
4624 plane_info->rotation = ROTATION_ANGLE_0;
4628 plane_info->visible = true;
4629 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4631 plane_info->layer_index = 0;
4633 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4634 &plane_info->color_space);
4638 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4639 plane_info->rotation, tiling_flags,
4640 &plane_info->tiling_info,
4641 &plane_info->plane_size,
4642 &plane_info->dcc, address, tmz_surface,
4647 fill_blending_from_plane_state(
4648 plane_state, &plane_info->per_pixel_alpha,
4649 &plane_info->global_alpha, &plane_info->global_alpha_value);
4654 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4655 struct dc_plane_state *dc_plane_state,
4656 struct drm_plane_state *plane_state,
4657 struct drm_crtc_state *crtc_state)
4659 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4660 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4661 struct dc_scaling_info scaling_info;
4662 struct dc_plane_info plane_info;
4664 bool force_disable_dcc = false;
4666 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4670 dc_plane_state->src_rect = scaling_info.src_rect;
4671 dc_plane_state->dst_rect = scaling_info.dst_rect;
4672 dc_plane_state->clip_rect = scaling_info.clip_rect;
4673 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4675 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4676 ret = fill_dc_plane_info_and_addr(adev, plane_state,
4679 &dc_plane_state->address,
4685 dc_plane_state->format = plane_info.format;
4686 dc_plane_state->color_space = plane_info.color_space;
4687 dc_plane_state->format = plane_info.format;
4688 dc_plane_state->plane_size = plane_info.plane_size;
4689 dc_plane_state->rotation = plane_info.rotation;
4690 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4691 dc_plane_state->stereo_format = plane_info.stereo_format;
4692 dc_plane_state->tiling_info = plane_info.tiling_info;
4693 dc_plane_state->visible = plane_info.visible;
4694 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4695 dc_plane_state->global_alpha = plane_info.global_alpha;
4696 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4697 dc_plane_state->dcc = plane_info.dcc;
4698 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4701 * Always set input transfer function, since plane state is refreshed
4704 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4711 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4712 const struct dm_connector_state *dm_state,
4713 struct dc_stream_state *stream)
4715 enum amdgpu_rmx_type rmx_type;
4717 struct rect src = { 0 }; /* viewport in composition space*/
4718 struct rect dst = { 0 }; /* stream addressable area */
4720 /* no mode. nothing to be done */
4724 /* Full screen scaling by default */
4725 src.width = mode->hdisplay;
4726 src.height = mode->vdisplay;
4727 dst.width = stream->timing.h_addressable;
4728 dst.height = stream->timing.v_addressable;
4731 rmx_type = dm_state->scaling;
4732 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4733 if (src.width * dst.height <
4734 src.height * dst.width) {
4735 /* height needs less upscaling/more downscaling */
4736 dst.width = src.width *
4737 dst.height / src.height;
4739 /* width needs less upscaling/more downscaling */
4740 dst.height = src.height *
4741 dst.width / src.width;
4743 } else if (rmx_type == RMX_CENTER) {
4747 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4748 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4750 if (dm_state->underscan_enable) {
4751 dst.x += dm_state->underscan_hborder / 2;
4752 dst.y += dm_state->underscan_vborder / 2;
4753 dst.width -= dm_state->underscan_hborder;
4754 dst.height -= dm_state->underscan_vborder;
4761 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
4762 dst.x, dst.y, dst.width, dst.height);
4766 static enum dc_color_depth
4767 convert_color_depth_from_display_info(const struct drm_connector *connector,
4768 bool is_y420, int requested_bpc)
4775 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4776 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4778 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4780 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4783 bpc = (uint8_t)connector->display_info.bpc;
4784 /* Assume 8 bpc by default if no bpc is specified. */
4785 bpc = bpc ? bpc : 8;
4788 if (requested_bpc > 0) {
4790 * Cap display bpc based on the user requested value.
4792 * The value for state->max_bpc may not correctly updated
4793 * depending on when the connector gets added to the state
4794 * or if this was called outside of atomic check, so it
4795 * can't be used directly.
4797 bpc = min_t(u8, bpc, requested_bpc);
4799 /* Round down to the nearest even number. */
4800 bpc = bpc - (bpc & 1);
4806 * Temporary Work around, DRM doesn't parse color depth for
4807 * EDID revision before 1.4
4808 * TODO: Fix edid parsing
4810 return COLOR_DEPTH_888;
4812 return COLOR_DEPTH_666;
4814 return COLOR_DEPTH_888;
4816 return COLOR_DEPTH_101010;
4818 return COLOR_DEPTH_121212;
4820 return COLOR_DEPTH_141414;
4822 return COLOR_DEPTH_161616;
4824 return COLOR_DEPTH_UNDEFINED;
4828 static enum dc_aspect_ratio
4829 get_aspect_ratio(const struct drm_display_mode *mode_in)
4831 /* 1-1 mapping, since both enums follow the HDMI spec. */
4832 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4835 static enum dc_color_space
4836 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4838 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4840 switch (dc_crtc_timing->pixel_encoding) {
4841 case PIXEL_ENCODING_YCBCR422:
4842 case PIXEL_ENCODING_YCBCR444:
4843 case PIXEL_ENCODING_YCBCR420:
4846 * 27030khz is the separation point between HDTV and SDTV
4847 * according to HDMI spec, we use YCbCr709 and YCbCr601
4850 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4851 if (dc_crtc_timing->flags.Y_ONLY)
4853 COLOR_SPACE_YCBCR709_LIMITED;
4855 color_space = COLOR_SPACE_YCBCR709;
4857 if (dc_crtc_timing->flags.Y_ONLY)
4859 COLOR_SPACE_YCBCR601_LIMITED;
4861 color_space = COLOR_SPACE_YCBCR601;
4866 case PIXEL_ENCODING_RGB:
4867 color_space = COLOR_SPACE_SRGB;
4878 static bool adjust_colour_depth_from_display_info(
4879 struct dc_crtc_timing *timing_out,
4880 const struct drm_display_info *info)
4882 enum dc_color_depth depth = timing_out->display_color_depth;
4885 normalized_clk = timing_out->pix_clk_100hz / 10;
4886 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4887 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4888 normalized_clk /= 2;
4889 /* Adjusting pix clock following on HDMI spec based on colour depth */
4891 case COLOR_DEPTH_888:
4893 case COLOR_DEPTH_101010:
4894 normalized_clk = (normalized_clk * 30) / 24;
4896 case COLOR_DEPTH_121212:
4897 normalized_clk = (normalized_clk * 36) / 24;
4899 case COLOR_DEPTH_161616:
4900 normalized_clk = (normalized_clk * 48) / 24;
4903 /* The above depths are the only ones valid for HDMI. */
4906 if (normalized_clk <= info->max_tmds_clock) {
4907 timing_out->display_color_depth = depth;
4910 } while (--depth > COLOR_DEPTH_666);
4914 static void fill_stream_properties_from_drm_display_mode(
4915 struct dc_stream_state *stream,
4916 const struct drm_display_mode *mode_in,
4917 const struct drm_connector *connector,
4918 const struct drm_connector_state *connector_state,
4919 const struct dc_stream_state *old_stream,
4922 struct dc_crtc_timing *timing_out = &stream->timing;
4923 const struct drm_display_info *info = &connector->display_info;
4924 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4925 struct hdmi_vendor_infoframe hv_frame;
4926 struct hdmi_avi_infoframe avi_frame;
4928 memset(&hv_frame, 0, sizeof(hv_frame));
4929 memset(&avi_frame, 0, sizeof(avi_frame));
4931 timing_out->h_border_left = 0;
4932 timing_out->h_border_right = 0;
4933 timing_out->v_border_top = 0;
4934 timing_out->v_border_bottom = 0;
4935 /* TODO: un-hardcode */
4936 if (drm_mode_is_420_only(info, mode_in)
4937 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4938 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4939 else if (drm_mode_is_420_also(info, mode_in)
4940 && aconnector->force_yuv420_output)
4941 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4942 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4943 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4944 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4946 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4948 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4949 timing_out->display_color_depth = convert_color_depth_from_display_info(
4951 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4953 timing_out->scan_type = SCANNING_TYPE_NODATA;
4954 timing_out->hdmi_vic = 0;
4957 timing_out->vic = old_stream->timing.vic;
4958 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4959 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4961 timing_out->vic = drm_match_cea_mode(mode_in);
4962 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4963 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4964 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4965 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4968 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4969 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4970 timing_out->vic = avi_frame.video_code;
4971 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4972 timing_out->hdmi_vic = hv_frame.vic;
4975 timing_out->h_addressable = mode_in->crtc_hdisplay;
4976 timing_out->h_total = mode_in->crtc_htotal;
4977 timing_out->h_sync_width =
4978 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4979 timing_out->h_front_porch =
4980 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4981 timing_out->v_total = mode_in->crtc_vtotal;
4982 timing_out->v_addressable = mode_in->crtc_vdisplay;
4983 timing_out->v_front_porch =
4984 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4985 timing_out->v_sync_width =
4986 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4987 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4988 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4990 stream->output_color_space = get_output_color_space(timing_out);
4992 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4993 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4994 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4995 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4996 drm_mode_is_420_also(info, mode_in) &&
4997 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4998 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4999 adjust_colour_depth_from_display_info(timing_out, info);
5004 static void fill_audio_info(struct audio_info *audio_info,
5005 const struct drm_connector *drm_connector,
5006 const struct dc_sink *dc_sink)
5009 int cea_revision = 0;
5010 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5012 audio_info->manufacture_id = edid_caps->manufacturer_id;
5013 audio_info->product_id = edid_caps->product_id;
5015 cea_revision = drm_connector->display_info.cea_rev;
5017 strscpy(audio_info->display_name,
5018 edid_caps->display_name,
5019 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5021 if (cea_revision >= 3) {
5022 audio_info->mode_count = edid_caps->audio_mode_count;
5024 for (i = 0; i < audio_info->mode_count; ++i) {
5025 audio_info->modes[i].format_code =
5026 (enum audio_format_code)
5027 (edid_caps->audio_modes[i].format_code);
5028 audio_info->modes[i].channel_count =
5029 edid_caps->audio_modes[i].channel_count;
5030 audio_info->modes[i].sample_rates.all =
5031 edid_caps->audio_modes[i].sample_rate;
5032 audio_info->modes[i].sample_size =
5033 edid_caps->audio_modes[i].sample_size;
5037 audio_info->flags.all = edid_caps->speaker_flags;
5039 /* TODO: We only check for the progressive mode, check for interlace mode too */
5040 if (drm_connector->latency_present[0]) {
5041 audio_info->video_latency = drm_connector->video_latency[0];
5042 audio_info->audio_latency = drm_connector->audio_latency[0];
5045 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5050 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5051 struct drm_display_mode *dst_mode)
5053 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5054 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5055 dst_mode->crtc_clock = src_mode->crtc_clock;
5056 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5057 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5058 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
5059 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5060 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5061 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5062 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5063 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5064 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5065 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5066 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5070 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5071 const struct drm_display_mode *native_mode,
5074 if (scale_enabled) {
5075 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5076 } else if (native_mode->clock == drm_mode->clock &&
5077 native_mode->htotal == drm_mode->htotal &&
5078 native_mode->vtotal == drm_mode->vtotal) {
5079 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5081 /* no scaling nor amdgpu inserted, no need to patch */
5085 static struct dc_sink *
5086 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5088 struct dc_sink_init_data sink_init_data = { 0 };
5089 struct dc_sink *sink = NULL;
5090 sink_init_data.link = aconnector->dc_link;
5091 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5093 sink = dc_sink_create(&sink_init_data);
5095 DRM_ERROR("Failed to create sink!\n");
5098 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5103 static void set_multisync_trigger_params(
5104 struct dc_stream_state *stream)
5106 if (stream->triggered_crtc_reset.enabled) {
5107 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5108 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5112 static void set_master_stream(struct dc_stream_state *stream_set[],
5115 int j, highest_rfr = 0, master_stream = 0;
5117 for (j = 0; j < stream_count; j++) {
5118 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5119 int refresh_rate = 0;
5121 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5122 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5123 if (refresh_rate > highest_rfr) {
5124 highest_rfr = refresh_rate;
5129 for (j = 0; j < stream_count; j++) {
5131 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5135 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5139 if (context->stream_count < 2)
5141 for (i = 0; i < context->stream_count ; i++) {
5142 if (!context->streams[i])
5145 * TODO: add a function to read AMD VSDB bits and set
5146 * crtc_sync_master.multi_sync_enabled flag
5147 * For now it's set to false
5149 set_multisync_trigger_params(context->streams[i]);
5151 set_master_stream(context->streams, context->stream_count);
5154 static struct dc_stream_state *
5155 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5156 const struct drm_display_mode *drm_mode,
5157 const struct dm_connector_state *dm_state,
5158 const struct dc_stream_state *old_stream,
5161 struct drm_display_mode *preferred_mode = NULL;
5162 struct drm_connector *drm_connector;
5163 const struct drm_connector_state *con_state =
5164 dm_state ? &dm_state->base : NULL;
5165 struct dc_stream_state *stream = NULL;
5166 struct drm_display_mode mode = *drm_mode;
5167 bool native_mode_found = false;
5168 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5170 int preferred_refresh = 0;
5171 #if defined(CONFIG_DRM_AMD_DC_DCN)
5172 struct dsc_dec_dpcd_caps dsc_caps;
5173 uint32_t link_bandwidth_kbps;
5175 struct dc_sink *sink = NULL;
5176 if (aconnector == NULL) {
5177 DRM_ERROR("aconnector is NULL!\n");
5181 drm_connector = &aconnector->base;
5183 if (!aconnector->dc_sink) {
5184 sink = create_fake_sink(aconnector);
5188 sink = aconnector->dc_sink;
5189 dc_sink_retain(sink);
5192 stream = dc_create_stream_for_sink(sink);
5194 if (stream == NULL) {
5195 DRM_ERROR("Failed to create stream for sink!\n");
5199 stream->dm_stream_context = aconnector;
5201 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5202 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5204 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5205 /* Search for preferred mode */
5206 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5207 native_mode_found = true;
5211 if (!native_mode_found)
5212 preferred_mode = list_first_entry_or_null(
5213 &aconnector->base.modes,
5214 struct drm_display_mode,
5217 mode_refresh = drm_mode_vrefresh(&mode);
5219 if (preferred_mode == NULL) {
5221 * This may not be an error, the use case is when we have no
5222 * usermode calls to reset and set mode upon hotplug. In this
5223 * case, we call set mode ourselves to restore the previous mode
5224 * and the modelist may not be filled in in time.
5226 DRM_DEBUG_DRIVER("No preferred mode found\n");
5228 decide_crtc_timing_for_drm_display_mode(
5229 &mode, preferred_mode,
5230 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5231 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5235 drm_mode_set_crtcinfo(&mode, 0);
5238 * If scaling is enabled and refresh rate didn't change
5239 * we copy the vic and polarities of the old timings
5241 if (!scale || mode_refresh != preferred_refresh)
5242 fill_stream_properties_from_drm_display_mode(stream,
5243 &mode, &aconnector->base, con_state, NULL, requested_bpc);
5245 fill_stream_properties_from_drm_display_mode(stream,
5246 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
5248 stream->timing.flags.DSC = 0;
5250 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5251 #if defined(CONFIG_DRM_AMD_DC_DCN)
5252 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5253 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5254 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5256 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5257 dc_link_get_link_cap(aconnector->dc_link));
5259 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5260 /* Set DSC policy according to dsc_clock_en */
5261 dc_dsc_policy_set_enable_dsc_when_not_needed(
5262 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5264 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5266 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5268 link_bandwidth_kbps,
5270 &stream->timing.dsc_cfg))
5271 stream->timing.flags.DSC = 1;
5272 /* Overwrite the stream flag if DSC is enabled through debugfs */
5273 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5274 stream->timing.flags.DSC = 1;
5276 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5277 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5279 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5280 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5282 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5283 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5288 update_stream_scaling_settings(&mode, dm_state, stream);
5291 &stream->audio_info,
5295 update_stream_signal(stream, sink);
5297 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5298 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5300 if (stream->link->psr_settings.psr_feature_enabled) {
5302 // should decide stream support vsc sdp colorimetry capability
5303 // before building vsc info packet
5305 stream->use_vsc_sdp_for_colorimetry = false;
5306 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5307 stream->use_vsc_sdp_for_colorimetry =
5308 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5310 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5311 stream->use_vsc_sdp_for_colorimetry = true;
5313 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5316 dc_sink_release(sink);
5321 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5323 drm_crtc_cleanup(crtc);
5327 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5328 struct drm_crtc_state *state)
5330 struct dm_crtc_state *cur = to_dm_crtc_state(state);
5332 /* TODO Destroy dc_stream objects are stream object is flattened */
5334 dc_stream_release(cur->stream);
5337 __drm_atomic_helper_crtc_destroy_state(state);
5343 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5345 struct dm_crtc_state *state;
5348 dm_crtc_destroy_state(crtc, crtc->state);
5350 state = kzalloc(sizeof(*state), GFP_KERNEL);
5351 if (WARN_ON(!state))
5354 __drm_atomic_helper_crtc_reset(crtc, &state->base);
5357 static struct drm_crtc_state *
5358 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5360 struct dm_crtc_state *state, *cur;
5362 cur = to_dm_crtc_state(crtc->state);
5364 if (WARN_ON(!crtc->state))
5367 state = kzalloc(sizeof(*state), GFP_KERNEL);
5371 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5374 state->stream = cur->stream;
5375 dc_stream_retain(state->stream);
5378 state->active_planes = cur->active_planes;
5379 state->vrr_infopacket = cur->vrr_infopacket;
5380 state->abm_level = cur->abm_level;
5381 state->vrr_supported = cur->vrr_supported;
5382 state->freesync_config = cur->freesync_config;
5383 state->crc_src = cur->crc_src;
5384 state->cm_has_degamma = cur->cm_has_degamma;
5385 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5386 #ifdef CONFIG_DEBUG_FS
5387 state->crc_window = cur->crc_window;
5389 /* TODO Duplicate dc_stream after objects are stream object is flattened */
5391 return &state->base;
5394 #ifdef CONFIG_DEBUG_FS
5395 static int amdgpu_dm_crtc_atomic_set_property(struct drm_crtc *crtc,
5396 struct drm_crtc_state *crtc_state,
5397 struct drm_property *property,
5400 struct drm_device *dev = crtc->dev;
5401 struct amdgpu_device *adev = drm_to_adev(dev);
5402 struct dm_crtc_state *dm_new_state =
5403 to_dm_crtc_state(crtc_state);
5405 if (property == adev->dm.crc_win_x_start_property)
5406 dm_new_state->crc_window.x_start = val;
5407 else if (property == adev->dm.crc_win_y_start_property)
5408 dm_new_state->crc_window.y_start = val;
5409 else if (property == adev->dm.crc_win_x_end_property)
5410 dm_new_state->crc_window.x_end = val;
5411 else if (property == adev->dm.crc_win_y_end_property)
5412 dm_new_state->crc_window.y_end = val;
5419 static int amdgpu_dm_crtc_atomic_get_property(struct drm_crtc *crtc,
5420 const struct drm_crtc_state *state,
5421 struct drm_property *property,
5424 struct drm_device *dev = crtc->dev;
5425 struct amdgpu_device *adev = drm_to_adev(dev);
5426 struct dm_crtc_state *dm_state =
5427 to_dm_crtc_state(state);
5429 if (property == adev->dm.crc_win_x_start_property)
5430 *val = dm_state->crc_window.x_start;
5431 else if (property == adev->dm.crc_win_y_start_property)
5432 *val = dm_state->crc_window.y_start;
5433 else if (property == adev->dm.crc_win_x_end_property)
5434 *val = dm_state->crc_window.x_end;
5435 else if (property == adev->dm.crc_win_y_end_property)
5436 *val = dm_state->crc_window.y_end;
5444 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5446 enum dc_irq_source irq_source;
5447 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5448 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5451 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5453 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5455 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5456 acrtc->crtc_id, enable ? "en" : "dis", rc);
5460 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5462 enum dc_irq_source irq_source;
5463 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5464 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5465 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5466 struct amdgpu_display_manager *dm = &adev->dm;
5470 /* vblank irq on -> Only need vupdate irq in vrr mode */
5471 if (amdgpu_dm_vrr_active(acrtc_state))
5472 rc = dm_set_vupdate_irq(crtc, true);
5474 /* vblank irq off -> vupdate irq off */
5475 rc = dm_set_vupdate_irq(crtc, false);
5481 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5483 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5486 mutex_lock(&dm->dc_lock);
5489 dm->active_vblank_irq_count++;
5491 dm->active_vblank_irq_count--;
5493 #if defined(CONFIG_DRM_AMD_DC_DCN)
5494 dc_allow_idle_optimizations(
5495 adev->dm.dc, dm->active_vblank_irq_count == 0 ? true : false);
5497 DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
5500 mutex_unlock(&dm->dc_lock);
5505 static int dm_enable_vblank(struct drm_crtc *crtc)
5507 return dm_set_vblank(crtc, true);
5510 static void dm_disable_vblank(struct drm_crtc *crtc)
5512 dm_set_vblank(crtc, false);
5515 /* Implemented only the options currently availible for the driver */
5516 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5517 .reset = dm_crtc_reset_state,
5518 .destroy = amdgpu_dm_crtc_destroy,
5519 .set_config = drm_atomic_helper_set_config,
5520 .page_flip = drm_atomic_helper_page_flip,
5521 .atomic_duplicate_state = dm_crtc_duplicate_state,
5522 .atomic_destroy_state = dm_crtc_destroy_state,
5523 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5524 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5525 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5526 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5527 .enable_vblank = dm_enable_vblank,
5528 .disable_vblank = dm_disable_vblank,
5529 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5530 #ifdef CONFIG_DEBUG_FS
5531 .atomic_set_property = amdgpu_dm_crtc_atomic_set_property,
5532 .atomic_get_property = amdgpu_dm_crtc_atomic_get_property,
5536 static enum drm_connector_status
5537 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5540 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5544 * 1. This interface is NOT called in context of HPD irq.
5545 * 2. This interface *is called* in context of user-mode ioctl. Which
5546 * makes it a bad place for *any* MST-related activity.
5549 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5550 !aconnector->fake_enable)
5551 connected = (aconnector->dc_sink != NULL);
5553 connected = (aconnector->base.force == DRM_FORCE_ON);
5555 update_subconnector_property(aconnector);
5557 return (connected ? connector_status_connected :
5558 connector_status_disconnected);
5561 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5562 struct drm_connector_state *connector_state,
5563 struct drm_property *property,
5566 struct drm_device *dev = connector->dev;
5567 struct amdgpu_device *adev = drm_to_adev(dev);
5568 struct dm_connector_state *dm_old_state =
5569 to_dm_connector_state(connector->state);
5570 struct dm_connector_state *dm_new_state =
5571 to_dm_connector_state(connector_state);
5575 if (property == dev->mode_config.scaling_mode_property) {
5576 enum amdgpu_rmx_type rmx_type;
5579 case DRM_MODE_SCALE_CENTER:
5580 rmx_type = RMX_CENTER;
5582 case DRM_MODE_SCALE_ASPECT:
5583 rmx_type = RMX_ASPECT;
5585 case DRM_MODE_SCALE_FULLSCREEN:
5586 rmx_type = RMX_FULL;
5588 case DRM_MODE_SCALE_NONE:
5594 if (dm_old_state->scaling == rmx_type)
5597 dm_new_state->scaling = rmx_type;
5599 } else if (property == adev->mode_info.underscan_hborder_property) {
5600 dm_new_state->underscan_hborder = val;
5602 } else if (property == adev->mode_info.underscan_vborder_property) {
5603 dm_new_state->underscan_vborder = val;
5605 } else if (property == adev->mode_info.underscan_property) {
5606 dm_new_state->underscan_enable = val;
5608 } else if (property == adev->mode_info.abm_level_property) {
5609 dm_new_state->abm_level = val;
5616 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5617 const struct drm_connector_state *state,
5618 struct drm_property *property,
5621 struct drm_device *dev = connector->dev;
5622 struct amdgpu_device *adev = drm_to_adev(dev);
5623 struct dm_connector_state *dm_state =
5624 to_dm_connector_state(state);
5627 if (property == dev->mode_config.scaling_mode_property) {
5628 switch (dm_state->scaling) {
5630 *val = DRM_MODE_SCALE_CENTER;
5633 *val = DRM_MODE_SCALE_ASPECT;
5636 *val = DRM_MODE_SCALE_FULLSCREEN;
5640 *val = DRM_MODE_SCALE_NONE;
5644 } else if (property == adev->mode_info.underscan_hborder_property) {
5645 *val = dm_state->underscan_hborder;
5647 } else if (property == adev->mode_info.underscan_vborder_property) {
5648 *val = dm_state->underscan_vborder;
5650 } else if (property == adev->mode_info.underscan_property) {
5651 *val = dm_state->underscan_enable;
5653 } else if (property == adev->mode_info.abm_level_property) {
5654 *val = dm_state->abm_level;
5661 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5663 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5665 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5668 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5670 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5671 const struct dc_link *link = aconnector->dc_link;
5672 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5673 struct amdgpu_display_manager *dm = &adev->dm;
5676 * Call only if mst_mgr was iniitalized before since it's not done
5677 * for all connector types.
5679 if (aconnector->mst_mgr.dev)
5680 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5682 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5683 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5685 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5686 link->type != dc_connection_none &&
5687 dm->backlight_dev) {
5688 backlight_device_unregister(dm->backlight_dev);
5689 dm->backlight_dev = NULL;
5693 if (aconnector->dc_em_sink)
5694 dc_sink_release(aconnector->dc_em_sink);
5695 aconnector->dc_em_sink = NULL;
5696 if (aconnector->dc_sink)
5697 dc_sink_release(aconnector->dc_sink);
5698 aconnector->dc_sink = NULL;
5700 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5701 drm_connector_unregister(connector);
5702 drm_connector_cleanup(connector);
5703 if (aconnector->i2c) {
5704 i2c_del_adapter(&aconnector->i2c->base);
5705 kfree(aconnector->i2c);
5707 kfree(aconnector->dm_dp_aux.aux.name);
5712 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5714 struct dm_connector_state *state =
5715 to_dm_connector_state(connector->state);
5717 if (connector->state)
5718 __drm_atomic_helper_connector_destroy_state(connector->state);
5722 state = kzalloc(sizeof(*state), GFP_KERNEL);
5725 state->scaling = RMX_OFF;
5726 state->underscan_enable = false;
5727 state->underscan_hborder = 0;
5728 state->underscan_vborder = 0;
5729 state->base.max_requested_bpc = 8;
5730 state->vcpi_slots = 0;
5732 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5733 state->abm_level = amdgpu_dm_abm_level;
5735 __drm_atomic_helper_connector_reset(connector, &state->base);
5739 struct drm_connector_state *
5740 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5742 struct dm_connector_state *state =
5743 to_dm_connector_state(connector->state);
5745 struct dm_connector_state *new_state =
5746 kmemdup(state, sizeof(*state), GFP_KERNEL);
5751 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5753 new_state->freesync_capable = state->freesync_capable;
5754 new_state->abm_level = state->abm_level;
5755 new_state->scaling = state->scaling;
5756 new_state->underscan_enable = state->underscan_enable;
5757 new_state->underscan_hborder = state->underscan_hborder;
5758 new_state->underscan_vborder = state->underscan_vborder;
5759 new_state->vcpi_slots = state->vcpi_slots;
5760 new_state->pbn = state->pbn;
5761 return &new_state->base;
5765 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5767 struct amdgpu_dm_connector *amdgpu_dm_connector =
5768 to_amdgpu_dm_connector(connector);
5771 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5772 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5773 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5774 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5779 #if defined(CONFIG_DEBUG_FS)
5780 connector_debugfs_init(amdgpu_dm_connector);
5786 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5787 .reset = amdgpu_dm_connector_funcs_reset,
5788 .detect = amdgpu_dm_connector_detect,
5789 .fill_modes = drm_helper_probe_single_connector_modes,
5790 .destroy = amdgpu_dm_connector_destroy,
5791 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5792 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5793 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5794 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5795 .late_register = amdgpu_dm_connector_late_register,
5796 .early_unregister = amdgpu_dm_connector_unregister
5799 static int get_modes(struct drm_connector *connector)
5801 return amdgpu_dm_connector_get_modes(connector);
5804 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5806 struct dc_sink_init_data init_params = {
5807 .link = aconnector->dc_link,
5808 .sink_signal = SIGNAL_TYPE_VIRTUAL
5812 if (!aconnector->base.edid_blob_ptr) {
5813 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5814 aconnector->base.name);
5816 aconnector->base.force = DRM_FORCE_OFF;
5817 aconnector->base.override_edid = false;
5821 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5823 aconnector->edid = edid;
5825 aconnector->dc_em_sink = dc_link_add_remote_sink(
5826 aconnector->dc_link,
5828 (edid->extensions + 1) * EDID_LENGTH,
5831 if (aconnector->base.force == DRM_FORCE_ON) {
5832 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5833 aconnector->dc_link->local_sink :
5834 aconnector->dc_em_sink;
5835 dc_sink_retain(aconnector->dc_sink);
5839 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5841 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5844 * In case of headless boot with force on for DP managed connector
5845 * Those settings have to be != 0 to get initial modeset
5847 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5848 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5849 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5853 aconnector->base.override_edid = true;
5854 create_eml_sink(aconnector);
5857 static struct dc_stream_state *
5858 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5859 const struct drm_display_mode *drm_mode,
5860 const struct dm_connector_state *dm_state,
5861 const struct dc_stream_state *old_stream)
5863 struct drm_connector *connector = &aconnector->base;
5864 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5865 struct dc_stream_state *stream;
5866 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5867 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5868 enum dc_status dc_result = DC_OK;
5871 stream = create_stream_for_sink(aconnector, drm_mode,
5872 dm_state, old_stream,
5874 if (stream == NULL) {
5875 DRM_ERROR("Failed to create stream for sink!\n");
5879 dc_result = dc_validate_stream(adev->dm.dc, stream);
5881 if (dc_result != DC_OK) {
5882 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5887 dc_status_to_str(dc_result));
5889 dc_stream_release(stream);
5891 requested_bpc -= 2; /* lower bpc to retry validation */
5894 } while (stream == NULL && requested_bpc >= 6);
5899 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5900 struct drm_display_mode *mode)
5902 int result = MODE_ERROR;
5903 struct dc_sink *dc_sink;
5904 /* TODO: Unhardcode stream count */
5905 struct dc_stream_state *stream;
5906 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5908 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5909 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5913 * Only run this the first time mode_valid is called to initilialize
5916 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5917 !aconnector->dc_em_sink)
5918 handle_edid_mgmt(aconnector);
5920 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5922 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
5923 aconnector->base.force != DRM_FORCE_ON) {
5924 DRM_ERROR("dc_sink is NULL!\n");
5928 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5930 dc_stream_release(stream);
5935 /* TODO: error handling*/
5939 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5940 struct dc_info_packet *out)
5942 struct hdmi_drm_infoframe frame;
5943 unsigned char buf[30]; /* 26 + 4 */
5947 memset(out, 0, sizeof(*out));
5949 if (!state->hdr_output_metadata)
5952 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5956 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5960 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5964 /* Prepare the infopacket for DC. */
5965 switch (state->connector->connector_type) {
5966 case DRM_MODE_CONNECTOR_HDMIA:
5967 out->hb0 = 0x87; /* type */
5968 out->hb1 = 0x01; /* version */
5969 out->hb2 = 0x1A; /* length */
5970 out->sb[0] = buf[3]; /* checksum */
5974 case DRM_MODE_CONNECTOR_DisplayPort:
5975 case DRM_MODE_CONNECTOR_eDP:
5976 out->hb0 = 0x00; /* sdp id, zero */
5977 out->hb1 = 0x87; /* type */
5978 out->hb2 = 0x1D; /* payload len - 1 */
5979 out->hb3 = (0x13 << 2); /* sdp version */
5980 out->sb[0] = 0x01; /* version */
5981 out->sb[1] = 0x1A; /* length */
5989 memcpy(&out->sb[i], &buf[4], 26);
5992 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5993 sizeof(out->sb), false);
5999 is_hdr_metadata_different(const struct drm_connector_state *old_state,
6000 const struct drm_connector_state *new_state)
6002 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
6003 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
6005 if (old_blob != new_blob) {
6006 if (old_blob && new_blob &&
6007 old_blob->length == new_blob->length)
6008 return memcmp(old_blob->data, new_blob->data,
6018 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6019 struct drm_atomic_state *state)
6021 struct drm_connector_state *new_con_state =
6022 drm_atomic_get_new_connector_state(state, conn);
6023 struct drm_connector_state *old_con_state =
6024 drm_atomic_get_old_connector_state(state, conn);
6025 struct drm_crtc *crtc = new_con_state->crtc;
6026 struct drm_crtc_state *new_crtc_state;
6029 trace_amdgpu_dm_connector_atomic_check(new_con_state);
6034 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6035 struct dc_info_packet hdr_infopacket;
6037 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6041 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6042 if (IS_ERR(new_crtc_state))
6043 return PTR_ERR(new_crtc_state);
6046 * DC considers the stream backends changed if the
6047 * static metadata changes. Forcing the modeset also
6048 * gives a simple way for userspace to switch from
6049 * 8bpc to 10bpc when setting the metadata to enter
6052 * Changing the static metadata after it's been
6053 * set is permissible, however. So only force a
6054 * modeset if we're entering or exiting HDR.
6056 new_crtc_state->mode_changed =
6057 !old_con_state->hdr_output_metadata ||
6058 !new_con_state->hdr_output_metadata;
6064 static const struct drm_connector_helper_funcs
6065 amdgpu_dm_connector_helper_funcs = {
6067 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6068 * modes will be filtered by drm_mode_validate_size(), and those modes
6069 * are missing after user start lightdm. So we need to renew modes list.
6070 * in get_modes call back, not just return the modes count
6072 .get_modes = get_modes,
6073 .mode_valid = amdgpu_dm_connector_mode_valid,
6074 .atomic_check = amdgpu_dm_connector_atomic_check,
6077 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6081 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6083 struct drm_atomic_state *state = new_crtc_state->state;
6084 struct drm_plane *plane;
6087 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6088 struct drm_plane_state *new_plane_state;
6090 /* Cursor planes are "fake". */
6091 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6094 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6096 if (!new_plane_state) {
6098 * The plane is enable on the CRTC and hasn't changed
6099 * state. This means that it previously passed
6100 * validation and is therefore enabled.
6106 /* We need a framebuffer to be considered enabled. */
6107 num_active += (new_plane_state->fb != NULL);
6113 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6114 struct drm_crtc_state *new_crtc_state)
6116 struct dm_crtc_state *dm_new_crtc_state =
6117 to_dm_crtc_state(new_crtc_state);
6119 dm_new_crtc_state->active_planes = 0;
6121 if (!dm_new_crtc_state->stream)
6124 dm_new_crtc_state->active_planes =
6125 count_crtc_active_planes(new_crtc_state);
6128 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6129 struct drm_atomic_state *state)
6131 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6133 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6134 struct dc *dc = adev->dm.dc;
6135 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6138 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6140 dm_update_crtc_active_planes(crtc, crtc_state);
6142 if (unlikely(!dm_crtc_state->stream &&
6143 modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6149 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6150 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6151 * planes are disabled, which is not supported by the hardware. And there is legacy
6152 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6154 if (crtc_state->enable &&
6155 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6156 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6160 /* In some use cases, like reset, no stream is attached */
6161 if (!dm_crtc_state->stream)
6164 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6167 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6171 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6172 const struct drm_display_mode *mode,
6173 struct drm_display_mode *adjusted_mode)
6178 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6179 .disable = dm_crtc_helper_disable,
6180 .atomic_check = dm_crtc_helper_atomic_check,
6181 .mode_fixup = dm_crtc_helper_mode_fixup,
6182 .get_scanout_position = amdgpu_crtc_get_scanout_position,
6185 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6190 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6192 switch (display_color_depth) {
6193 case COLOR_DEPTH_666:
6195 case COLOR_DEPTH_888:
6197 case COLOR_DEPTH_101010:
6199 case COLOR_DEPTH_121212:
6201 case COLOR_DEPTH_141414:
6203 case COLOR_DEPTH_161616:
6211 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6212 struct drm_crtc_state *crtc_state,
6213 struct drm_connector_state *conn_state)
6215 struct drm_atomic_state *state = crtc_state->state;
6216 struct drm_connector *connector = conn_state->connector;
6217 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6218 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6219 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6220 struct drm_dp_mst_topology_mgr *mst_mgr;
6221 struct drm_dp_mst_port *mst_port;
6222 enum dc_color_depth color_depth;
6224 bool is_y420 = false;
6226 if (!aconnector->port || !aconnector->dc_sink)
6229 mst_port = aconnector->port;
6230 mst_mgr = &aconnector->mst_port->mst_mgr;
6232 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6235 if (!state->duplicated) {
6236 int max_bpc = conn_state->max_requested_bpc;
6237 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6238 aconnector->force_yuv420_output;
6239 color_depth = convert_color_depth_from_display_info(connector,
6242 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6243 clock = adjusted_mode->clock;
6244 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6246 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6249 dm_new_connector_state->pbn,
6250 dm_mst_get_pbn_divider(aconnector->dc_link));
6251 if (dm_new_connector_state->vcpi_slots < 0) {
6252 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6253 return dm_new_connector_state->vcpi_slots;
6258 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6259 .disable = dm_encoder_helper_disable,
6260 .atomic_check = dm_encoder_helper_atomic_check
6263 #if defined(CONFIG_DRM_AMD_DC_DCN)
6264 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6265 struct dc_state *dc_state)
6267 struct dc_stream_state *stream = NULL;
6268 struct drm_connector *connector;
6269 struct drm_connector_state *new_con_state, *old_con_state;
6270 struct amdgpu_dm_connector *aconnector;
6271 struct dm_connector_state *dm_conn_state;
6272 int i, j, clock, bpp;
6273 int vcpi, pbn_div, pbn = 0;
6275 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6277 aconnector = to_amdgpu_dm_connector(connector);
6279 if (!aconnector->port)
6282 if (!new_con_state || !new_con_state->crtc)
6285 dm_conn_state = to_dm_connector_state(new_con_state);
6287 for (j = 0; j < dc_state->stream_count; j++) {
6288 stream = dc_state->streams[j];
6292 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6301 if (stream->timing.flags.DSC != 1) {
6302 drm_dp_mst_atomic_enable_dsc(state,
6310 pbn_div = dm_mst_get_pbn_divider(stream->link);
6311 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6312 clock = stream->timing.pix_clk_100hz / 10;
6313 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6314 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6321 dm_conn_state->pbn = pbn;
6322 dm_conn_state->vcpi_slots = vcpi;
6328 static void dm_drm_plane_reset(struct drm_plane *plane)
6330 struct dm_plane_state *amdgpu_state = NULL;
6333 plane->funcs->atomic_destroy_state(plane, plane->state);
6335 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6336 WARN_ON(amdgpu_state == NULL);
6339 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6342 static struct drm_plane_state *
6343 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6345 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6347 old_dm_plane_state = to_dm_plane_state(plane->state);
6348 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6349 if (!dm_plane_state)
6352 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6354 if (old_dm_plane_state->dc_state) {
6355 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6356 dc_plane_state_retain(dm_plane_state->dc_state);
6359 return &dm_plane_state->base;
6362 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6363 struct drm_plane_state *state)
6365 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6367 if (dm_plane_state->dc_state)
6368 dc_plane_state_release(dm_plane_state->dc_state);
6370 drm_atomic_helper_plane_destroy_state(plane, state);
6373 static const struct drm_plane_funcs dm_plane_funcs = {
6374 .update_plane = drm_atomic_helper_update_plane,
6375 .disable_plane = drm_atomic_helper_disable_plane,
6376 .destroy = drm_primary_helper_destroy,
6377 .reset = dm_drm_plane_reset,
6378 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6379 .atomic_destroy_state = dm_drm_plane_destroy_state,
6380 .format_mod_supported = dm_plane_format_mod_supported,
6383 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6384 struct drm_plane_state *new_state)
6386 struct amdgpu_framebuffer *afb;
6387 struct drm_gem_object *obj;
6388 struct amdgpu_device *adev;
6389 struct amdgpu_bo *rbo;
6390 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6391 struct list_head list;
6392 struct ttm_validate_buffer tv;
6393 struct ww_acquire_ctx ticket;
6397 if (!new_state->fb) {
6398 DRM_DEBUG_DRIVER("No FB bound\n");
6402 afb = to_amdgpu_framebuffer(new_state->fb);
6403 obj = new_state->fb->obj[0];
6404 rbo = gem_to_amdgpu_bo(obj);
6405 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6406 INIT_LIST_HEAD(&list);
6410 list_add(&tv.head, &list);
6412 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6414 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6418 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6419 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6421 domain = AMDGPU_GEM_DOMAIN_VRAM;
6423 r = amdgpu_bo_pin(rbo, domain);
6424 if (unlikely(r != 0)) {
6425 if (r != -ERESTARTSYS)
6426 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6427 ttm_eu_backoff_reservation(&ticket, &list);
6431 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6432 if (unlikely(r != 0)) {
6433 amdgpu_bo_unpin(rbo);
6434 ttm_eu_backoff_reservation(&ticket, &list);
6435 DRM_ERROR("%p bind failed\n", rbo);
6439 ttm_eu_backoff_reservation(&ticket, &list);
6441 afb->address = amdgpu_bo_gpu_offset(rbo);
6446 * We don't do surface updates on planes that have been newly created,
6447 * but we also don't have the afb->address during atomic check.
6449 * Fill in buffer attributes depending on the address here, but only on
6450 * newly created planes since they're not being used by DC yet and this
6451 * won't modify global state.
6453 dm_plane_state_old = to_dm_plane_state(plane->state);
6454 dm_plane_state_new = to_dm_plane_state(new_state);
6456 if (dm_plane_state_new->dc_state &&
6457 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6458 struct dc_plane_state *plane_state =
6459 dm_plane_state_new->dc_state;
6460 bool force_disable_dcc = !plane_state->dcc.enable;
6462 fill_plane_buffer_attributes(
6463 adev, afb, plane_state->format, plane_state->rotation,
6465 &plane_state->tiling_info, &plane_state->plane_size,
6466 &plane_state->dcc, &plane_state->address,
6467 afb->tmz_surface, force_disable_dcc);
6473 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6474 struct drm_plane_state *old_state)
6476 struct amdgpu_bo *rbo;
6482 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6483 r = amdgpu_bo_reserve(rbo, false);
6485 DRM_ERROR("failed to reserve rbo before unpin\n");
6489 amdgpu_bo_unpin(rbo);
6490 amdgpu_bo_unreserve(rbo);
6491 amdgpu_bo_unref(&rbo);
6494 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6495 struct drm_crtc_state *new_crtc_state)
6497 struct drm_framebuffer *fb = state->fb;
6498 int min_downscale, max_upscale;
6500 int max_scale = INT_MAX;
6502 /* Plane enabled? Get min/max allowed scaling factors from plane caps. */
6503 if (fb && state->crtc) {
6504 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6505 &min_downscale, &max_upscale);
6507 * Convert to drm convention: 16.16 fixed point, instead of dc's
6508 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6509 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6511 min_scale = (1000 << 16) / max_upscale;
6512 max_scale = (1000 << 16) / min_downscale;
6515 return drm_atomic_helper_check_plane_state(
6516 state, new_crtc_state, min_scale, max_scale, true, true);
6519 static int dm_plane_atomic_check(struct drm_plane *plane,
6520 struct drm_plane_state *state)
6522 struct amdgpu_device *adev = drm_to_adev(plane->dev);
6523 struct dc *dc = adev->dm.dc;
6524 struct dm_plane_state *dm_plane_state;
6525 struct dc_scaling_info scaling_info;
6526 struct drm_crtc_state *new_crtc_state;
6529 trace_amdgpu_dm_plane_atomic_check(state);
6531 dm_plane_state = to_dm_plane_state(state);
6533 if (!dm_plane_state->dc_state)
6537 drm_atomic_get_new_crtc_state(state->state, state->crtc);
6538 if (!new_crtc_state)
6541 ret = dm_plane_helper_check_state(state, new_crtc_state);
6545 ret = fill_dc_scaling_info(state, &scaling_info);
6549 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6555 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6556 struct drm_plane_state *new_plane_state)
6558 /* Only support async updates on cursor planes. */
6559 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6565 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6566 struct drm_plane_state *new_state)
6568 struct drm_plane_state *old_state =
6569 drm_atomic_get_old_plane_state(new_state->state, plane);
6571 trace_amdgpu_dm_atomic_update_cursor(new_state);
6573 swap(plane->state->fb, new_state->fb);
6575 plane->state->src_x = new_state->src_x;
6576 plane->state->src_y = new_state->src_y;
6577 plane->state->src_w = new_state->src_w;
6578 plane->state->src_h = new_state->src_h;
6579 plane->state->crtc_x = new_state->crtc_x;
6580 plane->state->crtc_y = new_state->crtc_y;
6581 plane->state->crtc_w = new_state->crtc_w;
6582 plane->state->crtc_h = new_state->crtc_h;
6584 handle_cursor_update(plane, old_state);
6587 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6588 .prepare_fb = dm_plane_helper_prepare_fb,
6589 .cleanup_fb = dm_plane_helper_cleanup_fb,
6590 .atomic_check = dm_plane_atomic_check,
6591 .atomic_async_check = dm_plane_atomic_async_check,
6592 .atomic_async_update = dm_plane_atomic_async_update
6596 * TODO: these are currently initialized to rgb formats only.
6597 * For future use cases we should either initialize them dynamically based on
6598 * plane capabilities, or initialize this array to all formats, so internal drm
6599 * check will succeed, and let DC implement proper check
6601 static const uint32_t rgb_formats[] = {
6602 DRM_FORMAT_XRGB8888,
6603 DRM_FORMAT_ARGB8888,
6604 DRM_FORMAT_RGBA8888,
6605 DRM_FORMAT_XRGB2101010,
6606 DRM_FORMAT_XBGR2101010,
6607 DRM_FORMAT_ARGB2101010,
6608 DRM_FORMAT_ABGR2101010,
6609 DRM_FORMAT_XBGR8888,
6610 DRM_FORMAT_ABGR8888,
6614 static const uint32_t overlay_formats[] = {
6615 DRM_FORMAT_XRGB8888,
6616 DRM_FORMAT_ARGB8888,
6617 DRM_FORMAT_RGBA8888,
6618 DRM_FORMAT_XBGR8888,
6619 DRM_FORMAT_ABGR8888,
6623 static const u32 cursor_formats[] = {
6627 static int get_plane_formats(const struct drm_plane *plane,
6628 const struct dc_plane_cap *plane_cap,
6629 uint32_t *formats, int max_formats)
6631 int i, num_formats = 0;
6634 * TODO: Query support for each group of formats directly from
6635 * DC plane caps. This will require adding more formats to the
6639 switch (plane->type) {
6640 case DRM_PLANE_TYPE_PRIMARY:
6641 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6642 if (num_formats >= max_formats)
6645 formats[num_formats++] = rgb_formats[i];
6648 if (plane_cap && plane_cap->pixel_format_support.nv12)
6649 formats[num_formats++] = DRM_FORMAT_NV12;
6650 if (plane_cap && plane_cap->pixel_format_support.p010)
6651 formats[num_formats++] = DRM_FORMAT_P010;
6652 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6653 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6654 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6655 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6656 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6660 case DRM_PLANE_TYPE_OVERLAY:
6661 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6662 if (num_formats >= max_formats)
6665 formats[num_formats++] = overlay_formats[i];
6669 case DRM_PLANE_TYPE_CURSOR:
6670 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6671 if (num_formats >= max_formats)
6674 formats[num_formats++] = cursor_formats[i];
6682 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6683 struct drm_plane *plane,
6684 unsigned long possible_crtcs,
6685 const struct dc_plane_cap *plane_cap)
6687 uint32_t formats[32];
6690 unsigned int supported_rotations;
6691 uint64_t *modifiers = NULL;
6693 num_formats = get_plane_formats(plane, plane_cap, formats,
6694 ARRAY_SIZE(formats));
6696 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6700 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6701 &dm_plane_funcs, formats, num_formats,
6702 modifiers, plane->type, NULL);
6707 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6708 plane_cap && plane_cap->per_pixel_alpha) {
6709 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6710 BIT(DRM_MODE_BLEND_PREMULTI);
6712 drm_plane_create_alpha_property(plane);
6713 drm_plane_create_blend_mode_property(plane, blend_caps);
6716 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6718 (plane_cap->pixel_format_support.nv12 ||
6719 plane_cap->pixel_format_support.p010)) {
6720 /* This only affects YUV formats. */
6721 drm_plane_create_color_properties(
6723 BIT(DRM_COLOR_YCBCR_BT601) |
6724 BIT(DRM_COLOR_YCBCR_BT709) |
6725 BIT(DRM_COLOR_YCBCR_BT2020),
6726 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6727 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6728 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6731 supported_rotations =
6732 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6733 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6735 if (dm->adev->asic_type >= CHIP_BONAIRE &&
6736 plane->type != DRM_PLANE_TYPE_CURSOR)
6737 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6738 supported_rotations);
6740 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6742 /* Create (reset) the plane state */
6743 if (plane->funcs->reset)
6744 plane->funcs->reset(plane);
6749 #ifdef CONFIG_DEBUG_FS
6750 static void attach_crtc_crc_properties(struct amdgpu_display_manager *dm,
6751 struct amdgpu_crtc *acrtc)
6753 drm_object_attach_property(&acrtc->base.base,
6754 dm->crc_win_x_start_property,
6756 drm_object_attach_property(&acrtc->base.base,
6757 dm->crc_win_y_start_property,
6759 drm_object_attach_property(&acrtc->base.base,
6760 dm->crc_win_x_end_property,
6762 drm_object_attach_property(&acrtc->base.base,
6763 dm->crc_win_y_end_property,
6768 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6769 struct drm_plane *plane,
6770 uint32_t crtc_index)
6772 struct amdgpu_crtc *acrtc = NULL;
6773 struct drm_plane *cursor_plane;
6777 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6781 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6782 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6784 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6788 res = drm_crtc_init_with_planes(
6793 &amdgpu_dm_crtc_funcs, NULL);
6798 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6800 /* Create (reset) the plane state */
6801 if (acrtc->base.funcs->reset)
6802 acrtc->base.funcs->reset(&acrtc->base);
6804 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6805 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6807 acrtc->crtc_id = crtc_index;
6808 acrtc->base.enabled = false;
6809 acrtc->otg_inst = -1;
6811 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6812 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6813 true, MAX_COLOR_LUT_ENTRIES);
6814 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6815 #ifdef CONFIG_DEBUG_FS
6816 attach_crtc_crc_properties(dm, acrtc);
6822 kfree(cursor_plane);
6827 static int to_drm_connector_type(enum signal_type st)
6830 case SIGNAL_TYPE_HDMI_TYPE_A:
6831 return DRM_MODE_CONNECTOR_HDMIA;
6832 case SIGNAL_TYPE_EDP:
6833 return DRM_MODE_CONNECTOR_eDP;
6834 case SIGNAL_TYPE_LVDS:
6835 return DRM_MODE_CONNECTOR_LVDS;
6836 case SIGNAL_TYPE_RGB:
6837 return DRM_MODE_CONNECTOR_VGA;
6838 case SIGNAL_TYPE_DISPLAY_PORT:
6839 case SIGNAL_TYPE_DISPLAY_PORT_MST:
6840 return DRM_MODE_CONNECTOR_DisplayPort;
6841 case SIGNAL_TYPE_DVI_DUAL_LINK:
6842 case SIGNAL_TYPE_DVI_SINGLE_LINK:
6843 return DRM_MODE_CONNECTOR_DVID;
6844 case SIGNAL_TYPE_VIRTUAL:
6845 return DRM_MODE_CONNECTOR_VIRTUAL;
6848 return DRM_MODE_CONNECTOR_Unknown;
6852 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6854 struct drm_encoder *encoder;
6856 /* There is only one encoder per connector */
6857 drm_connector_for_each_possible_encoder(connector, encoder)
6863 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6865 struct drm_encoder *encoder;
6866 struct amdgpu_encoder *amdgpu_encoder;
6868 encoder = amdgpu_dm_connector_to_encoder(connector);
6870 if (encoder == NULL)
6873 amdgpu_encoder = to_amdgpu_encoder(encoder);
6875 amdgpu_encoder->native_mode.clock = 0;
6877 if (!list_empty(&connector->probed_modes)) {
6878 struct drm_display_mode *preferred_mode = NULL;
6880 list_for_each_entry(preferred_mode,
6881 &connector->probed_modes,
6883 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6884 amdgpu_encoder->native_mode = *preferred_mode;
6892 static struct drm_display_mode *
6893 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6895 int hdisplay, int vdisplay)
6897 struct drm_device *dev = encoder->dev;
6898 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6899 struct drm_display_mode *mode = NULL;
6900 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6902 mode = drm_mode_duplicate(dev, native_mode);
6907 mode->hdisplay = hdisplay;
6908 mode->vdisplay = vdisplay;
6909 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6910 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6916 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6917 struct drm_connector *connector)
6919 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6920 struct drm_display_mode *mode = NULL;
6921 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6922 struct amdgpu_dm_connector *amdgpu_dm_connector =
6923 to_amdgpu_dm_connector(connector);
6927 char name[DRM_DISPLAY_MODE_LEN];
6930 } common_modes[] = {
6931 { "640x480", 640, 480},
6932 { "800x600", 800, 600},
6933 { "1024x768", 1024, 768},
6934 { "1280x720", 1280, 720},
6935 { "1280x800", 1280, 800},
6936 {"1280x1024", 1280, 1024},
6937 { "1440x900", 1440, 900},
6938 {"1680x1050", 1680, 1050},
6939 {"1600x1200", 1600, 1200},
6940 {"1920x1080", 1920, 1080},
6941 {"1920x1200", 1920, 1200}
6944 n = ARRAY_SIZE(common_modes);
6946 for (i = 0; i < n; i++) {
6947 struct drm_display_mode *curmode = NULL;
6948 bool mode_existed = false;
6950 if (common_modes[i].w > native_mode->hdisplay ||
6951 common_modes[i].h > native_mode->vdisplay ||
6952 (common_modes[i].w == native_mode->hdisplay &&
6953 common_modes[i].h == native_mode->vdisplay))
6956 list_for_each_entry(curmode, &connector->probed_modes, head) {
6957 if (common_modes[i].w == curmode->hdisplay &&
6958 common_modes[i].h == curmode->vdisplay) {
6959 mode_existed = true;
6967 mode = amdgpu_dm_create_common_mode(encoder,
6968 common_modes[i].name, common_modes[i].w,
6970 drm_mode_probed_add(connector, mode);
6971 amdgpu_dm_connector->num_modes++;
6975 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6978 struct amdgpu_dm_connector *amdgpu_dm_connector =
6979 to_amdgpu_dm_connector(connector);
6982 /* empty probed_modes */
6983 INIT_LIST_HEAD(&connector->probed_modes);
6984 amdgpu_dm_connector->num_modes =
6985 drm_add_edid_modes(connector, edid);
6987 /* sorting the probed modes before calling function
6988 * amdgpu_dm_get_native_mode() since EDID can have
6989 * more than one preferred mode. The modes that are
6990 * later in the probed mode list could be of higher
6991 * and preferred resolution. For example, 3840x2160
6992 * resolution in base EDID preferred timing and 4096x2160
6993 * preferred resolution in DID extension block later.
6995 drm_mode_sort(&connector->probed_modes);
6996 amdgpu_dm_get_native_mode(connector);
6998 amdgpu_dm_connector->num_modes = 0;
7002 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7004 struct amdgpu_dm_connector *amdgpu_dm_connector =
7005 to_amdgpu_dm_connector(connector);
7006 struct drm_encoder *encoder;
7007 struct edid *edid = amdgpu_dm_connector->edid;
7009 encoder = amdgpu_dm_connector_to_encoder(connector);
7011 if (!drm_edid_is_valid(edid)) {
7012 amdgpu_dm_connector->num_modes =
7013 drm_add_modes_noedid(connector, 640, 480);
7015 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7016 amdgpu_dm_connector_add_common_modes(encoder, connector);
7018 amdgpu_dm_fbc_init(connector);
7020 return amdgpu_dm_connector->num_modes;
7023 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7024 struct amdgpu_dm_connector *aconnector,
7026 struct dc_link *link,
7029 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7032 * Some of the properties below require access to state, like bpc.
7033 * Allocate some default initial connector state with our reset helper.
7035 if (aconnector->base.funcs->reset)
7036 aconnector->base.funcs->reset(&aconnector->base);
7038 aconnector->connector_id = link_index;
7039 aconnector->dc_link = link;
7040 aconnector->base.interlace_allowed = false;
7041 aconnector->base.doublescan_allowed = false;
7042 aconnector->base.stereo_allowed = false;
7043 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7044 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7045 aconnector->audio_inst = -1;
7046 mutex_init(&aconnector->hpd_lock);
7049 * configure support HPD hot plug connector_>polled default value is 0
7050 * which means HPD hot plug not supported
7052 switch (connector_type) {
7053 case DRM_MODE_CONNECTOR_HDMIA:
7054 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7055 aconnector->base.ycbcr_420_allowed =
7056 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7058 case DRM_MODE_CONNECTOR_DisplayPort:
7059 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7060 aconnector->base.ycbcr_420_allowed =
7061 link->link_enc->features.dp_ycbcr420_supported ? true : false;
7063 case DRM_MODE_CONNECTOR_DVID:
7064 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7070 drm_object_attach_property(&aconnector->base.base,
7071 dm->ddev->mode_config.scaling_mode_property,
7072 DRM_MODE_SCALE_NONE);
7074 drm_object_attach_property(&aconnector->base.base,
7075 adev->mode_info.underscan_property,
7077 drm_object_attach_property(&aconnector->base.base,
7078 adev->mode_info.underscan_hborder_property,
7080 drm_object_attach_property(&aconnector->base.base,
7081 adev->mode_info.underscan_vborder_property,
7084 if (!aconnector->mst_port)
7085 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7087 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7088 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7089 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7091 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7092 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7093 drm_object_attach_property(&aconnector->base.base,
7094 adev->mode_info.abm_level_property, 0);
7097 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7098 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7099 connector_type == DRM_MODE_CONNECTOR_eDP) {
7100 drm_object_attach_property(
7101 &aconnector->base.base,
7102 dm->ddev->mode_config.hdr_output_metadata_property, 0);
7104 if (!aconnector->mst_port)
7105 drm_connector_attach_vrr_capable_property(&aconnector->base);
7107 #ifdef CONFIG_DRM_AMD_DC_HDCP
7108 if (adev->dm.hdcp_workqueue)
7109 drm_connector_attach_content_protection_property(&aconnector->base, true);
7114 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7115 struct i2c_msg *msgs, int num)
7117 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7118 struct ddc_service *ddc_service = i2c->ddc_service;
7119 struct i2c_command cmd;
7123 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7128 cmd.number_of_payloads = num;
7129 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7132 for (i = 0; i < num; i++) {
7133 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7134 cmd.payloads[i].address = msgs[i].addr;
7135 cmd.payloads[i].length = msgs[i].len;
7136 cmd.payloads[i].data = msgs[i].buf;
7140 ddc_service->ctx->dc,
7141 ddc_service->ddc_pin->hw_info.ddc_channel,
7145 kfree(cmd.payloads);
7149 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7151 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7154 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7155 .master_xfer = amdgpu_dm_i2c_xfer,
7156 .functionality = amdgpu_dm_i2c_func,
7159 static struct amdgpu_i2c_adapter *
7160 create_i2c(struct ddc_service *ddc_service,
7164 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7165 struct amdgpu_i2c_adapter *i2c;
7167 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7170 i2c->base.owner = THIS_MODULE;
7171 i2c->base.class = I2C_CLASS_DDC;
7172 i2c->base.dev.parent = &adev->pdev->dev;
7173 i2c->base.algo = &amdgpu_dm_i2c_algo;
7174 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7175 i2c_set_adapdata(&i2c->base, i2c);
7176 i2c->ddc_service = ddc_service;
7177 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7184 * Note: this function assumes that dc_link_detect() was called for the
7185 * dc_link which will be represented by this aconnector.
7187 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7188 struct amdgpu_dm_connector *aconnector,
7189 uint32_t link_index,
7190 struct amdgpu_encoder *aencoder)
7194 struct dc *dc = dm->dc;
7195 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7196 struct amdgpu_i2c_adapter *i2c;
7198 link->priv = aconnector;
7200 DRM_DEBUG_DRIVER("%s()\n", __func__);
7202 i2c = create_i2c(link->ddc, link->link_index, &res);
7204 DRM_ERROR("Failed to create i2c adapter data\n");
7208 aconnector->i2c = i2c;
7209 res = i2c_add_adapter(&i2c->base);
7212 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7216 connector_type = to_drm_connector_type(link->connector_signal);
7218 res = drm_connector_init_with_ddc(
7221 &amdgpu_dm_connector_funcs,
7226 DRM_ERROR("connector_init failed\n");
7227 aconnector->connector_id = -1;
7231 drm_connector_helper_add(
7233 &amdgpu_dm_connector_helper_funcs);
7235 amdgpu_dm_connector_init_helper(
7242 drm_connector_attach_encoder(
7243 &aconnector->base, &aencoder->base);
7245 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7246 || connector_type == DRM_MODE_CONNECTOR_eDP)
7247 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7252 aconnector->i2c = NULL;
7257 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7259 switch (adev->mode_info.num_crtc) {
7276 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7277 struct amdgpu_encoder *aencoder,
7278 uint32_t link_index)
7280 struct amdgpu_device *adev = drm_to_adev(dev);
7282 int res = drm_encoder_init(dev,
7284 &amdgpu_dm_encoder_funcs,
7285 DRM_MODE_ENCODER_TMDS,
7288 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7291 aencoder->encoder_id = link_index;
7293 aencoder->encoder_id = -1;
7295 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7300 static void manage_dm_interrupts(struct amdgpu_device *adev,
7301 struct amdgpu_crtc *acrtc,
7305 * We have no guarantee that the frontend index maps to the same
7306 * backend index - some even map to more than one.
7308 * TODO: Use a different interrupt or check DC itself for the mapping.
7311 amdgpu_display_crtc_idx_to_irq_type(
7316 drm_crtc_vblank_on(&acrtc->base);
7319 &adev->pageflip_irq,
7325 &adev->pageflip_irq,
7327 drm_crtc_vblank_off(&acrtc->base);
7331 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7332 struct amdgpu_crtc *acrtc)
7335 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7338 * This reads the current state for the IRQ and force reapplies
7339 * the setting to hardware.
7341 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7345 is_scaling_state_different(const struct dm_connector_state *dm_state,
7346 const struct dm_connector_state *old_dm_state)
7348 if (dm_state->scaling != old_dm_state->scaling)
7350 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7351 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7353 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7354 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7356 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7357 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7362 #ifdef CONFIG_DRM_AMD_DC_HDCP
7363 static bool is_content_protection_different(struct drm_connector_state *state,
7364 const struct drm_connector_state *old_state,
7365 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7367 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7368 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7370 /* Handle: Type0/1 change */
7371 if (old_state->hdcp_content_type != state->hdcp_content_type &&
7372 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7373 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7377 /* CP is being re enabled, ignore this
7379 * Handles: ENABLED -> DESIRED
7381 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7382 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7383 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7387 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7389 * Handles: UNDESIRED -> ENABLED
7391 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7392 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7393 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7395 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7396 * hot-plug, headless s3, dpms
7398 * Handles: DESIRED -> DESIRED (Special case)
7400 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7401 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7402 dm_con_state->update_hdcp = false;
7407 * Handles: UNDESIRED -> UNDESIRED
7408 * DESIRED -> DESIRED
7409 * ENABLED -> ENABLED
7411 if (old_state->content_protection == state->content_protection)
7415 * Handles: UNDESIRED -> DESIRED
7416 * DESIRED -> UNDESIRED
7417 * ENABLED -> UNDESIRED
7419 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7423 * Handles: DESIRED -> ENABLED
7429 static void remove_stream(struct amdgpu_device *adev,
7430 struct amdgpu_crtc *acrtc,
7431 struct dc_stream_state *stream)
7433 /* this is the update mode case */
7435 acrtc->otg_inst = -1;
7436 acrtc->enabled = false;
7439 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7440 struct dc_cursor_position *position)
7442 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7444 int xorigin = 0, yorigin = 0;
7446 position->enable = false;
7450 if (!crtc || !plane->state->fb)
7453 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7454 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7455 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7457 plane->state->crtc_w,
7458 plane->state->crtc_h);
7462 x = plane->state->crtc_x;
7463 y = plane->state->crtc_y;
7465 if (x <= -amdgpu_crtc->max_cursor_width ||
7466 y <= -amdgpu_crtc->max_cursor_height)
7470 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7474 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7477 position->enable = true;
7478 position->translate_by_source = true;
7481 position->x_hotspot = xorigin;
7482 position->y_hotspot = yorigin;
7487 static void handle_cursor_update(struct drm_plane *plane,
7488 struct drm_plane_state *old_plane_state)
7490 struct amdgpu_device *adev = drm_to_adev(plane->dev);
7491 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7492 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7493 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7494 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7495 uint64_t address = afb ? afb->address : 0;
7496 struct dc_cursor_position position;
7497 struct dc_cursor_attributes attributes;
7500 if (!plane->state->fb && !old_plane_state->fb)
7503 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
7505 amdgpu_crtc->crtc_id,
7506 plane->state->crtc_w,
7507 plane->state->crtc_h);
7509 ret = get_cursor_position(plane, crtc, &position);
7513 if (!position.enable) {
7514 /* turn off cursor */
7515 if (crtc_state && crtc_state->stream) {
7516 mutex_lock(&adev->dm.dc_lock);
7517 dc_stream_set_cursor_position(crtc_state->stream,
7519 mutex_unlock(&adev->dm.dc_lock);
7524 amdgpu_crtc->cursor_width = plane->state->crtc_w;
7525 amdgpu_crtc->cursor_height = plane->state->crtc_h;
7527 memset(&attributes, 0, sizeof(attributes));
7528 attributes.address.high_part = upper_32_bits(address);
7529 attributes.address.low_part = lower_32_bits(address);
7530 attributes.width = plane->state->crtc_w;
7531 attributes.height = plane->state->crtc_h;
7532 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7533 attributes.rotation_angle = 0;
7534 attributes.attribute_flags.value = 0;
7536 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7538 if (crtc_state->stream) {
7539 mutex_lock(&adev->dm.dc_lock);
7540 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7542 DRM_ERROR("DC failed to set cursor attributes\n");
7544 if (!dc_stream_set_cursor_position(crtc_state->stream,
7546 DRM_ERROR("DC failed to set cursor position\n");
7547 mutex_unlock(&adev->dm.dc_lock);
7551 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7554 assert_spin_locked(&acrtc->base.dev->event_lock);
7555 WARN_ON(acrtc->event);
7557 acrtc->event = acrtc->base.state->event;
7559 /* Set the flip status */
7560 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7562 /* Mark this event as consumed */
7563 acrtc->base.state->event = NULL;
7565 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7569 static void update_freesync_state_on_stream(
7570 struct amdgpu_display_manager *dm,
7571 struct dm_crtc_state *new_crtc_state,
7572 struct dc_stream_state *new_stream,
7573 struct dc_plane_state *surface,
7574 u32 flip_timestamp_in_us)
7576 struct mod_vrr_params vrr_params;
7577 struct dc_info_packet vrr_infopacket = {0};
7578 struct amdgpu_device *adev = dm->adev;
7579 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7580 unsigned long flags;
7586 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7587 * For now it's sufficient to just guard against these conditions.
7590 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7593 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7594 vrr_params = acrtc->dm_irq_params.vrr_params;
7597 mod_freesync_handle_preflip(
7598 dm->freesync_module,
7601 flip_timestamp_in_us,
7604 if (adev->family < AMDGPU_FAMILY_AI &&
7605 amdgpu_dm_vrr_active(new_crtc_state)) {
7606 mod_freesync_handle_v_update(dm->freesync_module,
7607 new_stream, &vrr_params);
7609 /* Need to call this before the frame ends. */
7610 dc_stream_adjust_vmin_vmax(dm->dc,
7611 new_crtc_state->stream,
7612 &vrr_params.adjust);
7616 mod_freesync_build_vrr_infopacket(
7617 dm->freesync_module,
7621 TRANSFER_FUNC_UNKNOWN,
7624 new_crtc_state->freesync_timing_changed |=
7625 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7627 sizeof(vrr_params.adjust)) != 0);
7629 new_crtc_state->freesync_vrr_info_changed |=
7630 (memcmp(&new_crtc_state->vrr_infopacket,
7632 sizeof(vrr_infopacket)) != 0);
7634 acrtc->dm_irq_params.vrr_params = vrr_params;
7635 new_crtc_state->vrr_infopacket = vrr_infopacket;
7637 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7638 new_stream->vrr_infopacket = vrr_infopacket;
7640 if (new_crtc_state->freesync_vrr_info_changed)
7641 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7642 new_crtc_state->base.crtc->base.id,
7643 (int)new_crtc_state->base.vrr_enabled,
7644 (int)vrr_params.state);
7646 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7649 static void update_stream_irq_parameters(
7650 struct amdgpu_display_manager *dm,
7651 struct dm_crtc_state *new_crtc_state)
7653 struct dc_stream_state *new_stream = new_crtc_state->stream;
7654 struct mod_vrr_params vrr_params;
7655 struct mod_freesync_config config = new_crtc_state->freesync_config;
7656 struct amdgpu_device *adev = dm->adev;
7657 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7658 unsigned long flags;
7664 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7665 * For now it's sufficient to just guard against these conditions.
7667 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7670 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7671 vrr_params = acrtc->dm_irq_params.vrr_params;
7673 if (new_crtc_state->vrr_supported &&
7674 config.min_refresh_in_uhz &&
7675 config.max_refresh_in_uhz) {
7676 config.state = new_crtc_state->base.vrr_enabled ?
7677 VRR_STATE_ACTIVE_VARIABLE :
7680 config.state = VRR_STATE_UNSUPPORTED;
7683 mod_freesync_build_vrr_params(dm->freesync_module,
7685 &config, &vrr_params);
7687 new_crtc_state->freesync_timing_changed |=
7688 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7689 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7691 new_crtc_state->freesync_config = config;
7692 /* Copy state for access from DM IRQ handler */
7693 acrtc->dm_irq_params.freesync_config = config;
7694 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7695 acrtc->dm_irq_params.vrr_params = vrr_params;
7696 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7699 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7700 struct dm_crtc_state *new_state)
7702 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7703 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7705 if (!old_vrr_active && new_vrr_active) {
7706 /* Transition VRR inactive -> active:
7707 * While VRR is active, we must not disable vblank irq, as a
7708 * reenable after disable would compute bogus vblank/pflip
7709 * timestamps if it likely happened inside display front-porch.
7711 * We also need vupdate irq for the actual core vblank handling
7714 dm_set_vupdate_irq(new_state->base.crtc, true);
7715 drm_crtc_vblank_get(new_state->base.crtc);
7716 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7717 __func__, new_state->base.crtc->base.id);
7718 } else if (old_vrr_active && !new_vrr_active) {
7719 /* Transition VRR active -> inactive:
7720 * Allow vblank irq disable again for fixed refresh rate.
7722 dm_set_vupdate_irq(new_state->base.crtc, false);
7723 drm_crtc_vblank_put(new_state->base.crtc);
7724 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7725 __func__, new_state->base.crtc->base.id);
7729 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7731 struct drm_plane *plane;
7732 struct drm_plane_state *old_plane_state, *new_plane_state;
7736 * TODO: Make this per-stream so we don't issue redundant updates for
7737 * commits with multiple streams.
7739 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7741 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7742 handle_cursor_update(plane, old_plane_state);
7745 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7746 struct dc_state *dc_state,
7747 struct drm_device *dev,
7748 struct amdgpu_display_manager *dm,
7749 struct drm_crtc *pcrtc,
7750 bool wait_for_vblank)
7753 uint64_t timestamp_ns;
7754 struct drm_plane *plane;
7755 struct drm_plane_state *old_plane_state, *new_plane_state;
7756 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7757 struct drm_crtc_state *new_pcrtc_state =
7758 drm_atomic_get_new_crtc_state(state, pcrtc);
7759 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7760 struct dm_crtc_state *dm_old_crtc_state =
7761 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7762 int planes_count = 0, vpos, hpos;
7764 unsigned long flags;
7765 struct amdgpu_bo *abo;
7766 uint32_t target_vblank, last_flip_vblank;
7767 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7768 bool pflip_present = false;
7770 struct dc_surface_update surface_updates[MAX_SURFACES];
7771 struct dc_plane_info plane_infos[MAX_SURFACES];
7772 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7773 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7774 struct dc_stream_update stream_update;
7777 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7780 dm_error("Failed to allocate update bundle\n");
7785 * Disable the cursor first if we're disabling all the planes.
7786 * It'll remain on the screen after the planes are re-enabled
7789 if (acrtc_state->active_planes == 0)
7790 amdgpu_dm_commit_cursors(state);
7792 /* update planes when needed */
7793 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7794 struct drm_crtc *crtc = new_plane_state->crtc;
7795 struct drm_crtc_state *new_crtc_state;
7796 struct drm_framebuffer *fb = new_plane_state->fb;
7797 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
7798 bool plane_needs_flip;
7799 struct dc_plane_state *dc_plane;
7800 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7802 /* Cursor plane is handled after stream updates */
7803 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7806 if (!fb || !crtc || pcrtc != crtc)
7809 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7810 if (!new_crtc_state->active)
7813 dc_plane = dm_new_plane_state->dc_state;
7815 bundle->surface_updates[planes_count].surface = dc_plane;
7816 if (new_pcrtc_state->color_mgmt_changed) {
7817 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7818 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7819 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7822 fill_dc_scaling_info(new_plane_state,
7823 &bundle->scaling_infos[planes_count]);
7825 bundle->surface_updates[planes_count].scaling_info =
7826 &bundle->scaling_infos[planes_count];
7828 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7830 pflip_present = pflip_present || plane_needs_flip;
7832 if (!plane_needs_flip) {
7837 abo = gem_to_amdgpu_bo(fb->obj[0]);
7840 * Wait for all fences on this FB. Do limited wait to avoid
7841 * deadlock during GPU reset when this fence will not signal
7842 * but we hold reservation lock for the BO.
7844 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7846 msecs_to_jiffies(5000));
7847 if (unlikely(r <= 0))
7848 DRM_ERROR("Waiting for fences timed out!");
7850 fill_dc_plane_info_and_addr(
7851 dm->adev, new_plane_state,
7853 &bundle->plane_infos[planes_count],
7854 &bundle->flip_addrs[planes_count].address,
7855 afb->tmz_surface, false);
7857 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7858 new_plane_state->plane->index,
7859 bundle->plane_infos[planes_count].dcc.enable);
7861 bundle->surface_updates[planes_count].plane_info =
7862 &bundle->plane_infos[planes_count];
7865 * Only allow immediate flips for fast updates that don't
7866 * change FB pitch, DCC state, rotation or mirroing.
7868 bundle->flip_addrs[planes_count].flip_immediate =
7869 crtc->state->async_flip &&
7870 acrtc_state->update_type == UPDATE_TYPE_FAST;
7872 timestamp_ns = ktime_get_ns();
7873 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7874 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7875 bundle->surface_updates[planes_count].surface = dc_plane;
7877 if (!bundle->surface_updates[planes_count].surface) {
7878 DRM_ERROR("No surface for CRTC: id=%d\n",
7879 acrtc_attach->crtc_id);
7883 if (plane == pcrtc->primary)
7884 update_freesync_state_on_stream(
7887 acrtc_state->stream,
7889 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7891 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7893 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7894 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7900 if (pflip_present) {
7902 /* Use old throttling in non-vrr fixed refresh rate mode
7903 * to keep flip scheduling based on target vblank counts
7904 * working in a backwards compatible way, e.g., for
7905 * clients using the GLX_OML_sync_control extension or
7906 * DRI3/Present extension with defined target_msc.
7908 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7911 /* For variable refresh rate mode only:
7912 * Get vblank of last completed flip to avoid > 1 vrr
7913 * flips per video frame by use of throttling, but allow
7914 * flip programming anywhere in the possibly large
7915 * variable vrr vblank interval for fine-grained flip
7916 * timing control and more opportunity to avoid stutter
7917 * on late submission of flips.
7919 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7920 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7921 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7924 target_vblank = last_flip_vblank + wait_for_vblank;
7927 * Wait until we're out of the vertical blank period before the one
7928 * targeted by the flip
7930 while ((acrtc_attach->enabled &&
7931 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7932 0, &vpos, &hpos, NULL,
7933 NULL, &pcrtc->hwmode)
7934 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7935 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7936 (int)(target_vblank -
7937 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7938 usleep_range(1000, 1100);
7942 * Prepare the flip event for the pageflip interrupt to handle.
7944 * This only works in the case where we've already turned on the
7945 * appropriate hardware blocks (eg. HUBP) so in the transition case
7946 * from 0 -> n planes we have to skip a hardware generated event
7947 * and rely on sending it from software.
7949 if (acrtc_attach->base.state->event &&
7950 acrtc_state->active_planes > 0) {
7951 drm_crtc_vblank_get(pcrtc);
7953 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7955 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7956 prepare_flip_isr(acrtc_attach);
7958 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7961 if (acrtc_state->stream) {
7962 if (acrtc_state->freesync_vrr_info_changed)
7963 bundle->stream_update.vrr_infopacket =
7964 &acrtc_state->stream->vrr_infopacket;
7968 /* Update the planes if changed or disable if we don't have any. */
7969 if ((planes_count || acrtc_state->active_planes == 0) &&
7970 acrtc_state->stream) {
7971 bundle->stream_update.stream = acrtc_state->stream;
7972 if (new_pcrtc_state->mode_changed) {
7973 bundle->stream_update.src = acrtc_state->stream->src;
7974 bundle->stream_update.dst = acrtc_state->stream->dst;
7977 if (new_pcrtc_state->color_mgmt_changed) {
7979 * TODO: This isn't fully correct since we've actually
7980 * already modified the stream in place.
7982 bundle->stream_update.gamut_remap =
7983 &acrtc_state->stream->gamut_remap_matrix;
7984 bundle->stream_update.output_csc_transform =
7985 &acrtc_state->stream->csc_color_matrix;
7986 bundle->stream_update.out_transfer_func =
7987 acrtc_state->stream->out_transfer_func;
7990 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7991 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7992 bundle->stream_update.abm_level = &acrtc_state->abm_level;
7995 * If FreeSync state on the stream has changed then we need to
7996 * re-adjust the min/max bounds now that DC doesn't handle this
7997 * as part of commit.
7999 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
8000 amdgpu_dm_vrr_active(acrtc_state)) {
8001 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8002 dc_stream_adjust_vmin_vmax(
8003 dm->dc, acrtc_state->stream,
8004 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8005 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8007 mutex_lock(&dm->dc_lock);
8008 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8009 acrtc_state->stream->link->psr_settings.psr_allow_active)
8010 amdgpu_dm_psr_disable(acrtc_state->stream);
8012 dc_commit_updates_for_stream(dm->dc,
8013 bundle->surface_updates,
8015 acrtc_state->stream,
8016 &bundle->stream_update,
8020 * Enable or disable the interrupts on the backend.
8022 * Most pipes are put into power gating when unused.
8024 * When power gating is enabled on a pipe we lose the
8025 * interrupt enablement state when power gating is disabled.
8027 * So we need to update the IRQ control state in hardware
8028 * whenever the pipe turns on (since it could be previously
8029 * power gated) or off (since some pipes can't be power gated
8032 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8033 dm_update_pflip_irq_state(drm_to_adev(dev),
8036 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8037 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8038 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8039 amdgpu_dm_link_setup_psr(acrtc_state->stream);
8040 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8041 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8042 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8043 amdgpu_dm_psr_enable(acrtc_state->stream);
8046 mutex_unlock(&dm->dc_lock);
8050 * Update cursor state *after* programming all the planes.
8051 * This avoids redundant programming in the case where we're going
8052 * to be disabling a single plane - those pipes are being disabled.
8054 if (acrtc_state->active_planes)
8055 amdgpu_dm_commit_cursors(state);
8061 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8062 struct drm_atomic_state *state)
8064 struct amdgpu_device *adev = drm_to_adev(dev);
8065 struct amdgpu_dm_connector *aconnector;
8066 struct drm_connector *connector;
8067 struct drm_connector_state *old_con_state, *new_con_state;
8068 struct drm_crtc_state *new_crtc_state;
8069 struct dm_crtc_state *new_dm_crtc_state;
8070 const struct dc_stream_status *status;
8073 /* Notify device removals. */
8074 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8075 if (old_con_state->crtc != new_con_state->crtc) {
8076 /* CRTC changes require notification. */
8080 if (!new_con_state->crtc)
8083 new_crtc_state = drm_atomic_get_new_crtc_state(
8084 state, new_con_state->crtc);
8086 if (!new_crtc_state)
8089 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8093 aconnector = to_amdgpu_dm_connector(connector);
8095 mutex_lock(&adev->dm.audio_lock);
8096 inst = aconnector->audio_inst;
8097 aconnector->audio_inst = -1;
8098 mutex_unlock(&adev->dm.audio_lock);
8100 amdgpu_dm_audio_eld_notify(adev, inst);
8103 /* Notify audio device additions. */
8104 for_each_new_connector_in_state(state, connector, new_con_state, i) {
8105 if (!new_con_state->crtc)
8108 new_crtc_state = drm_atomic_get_new_crtc_state(
8109 state, new_con_state->crtc);
8111 if (!new_crtc_state)
8114 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8117 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8118 if (!new_dm_crtc_state->stream)
8121 status = dc_stream_get_status(new_dm_crtc_state->stream);
8125 aconnector = to_amdgpu_dm_connector(connector);
8127 mutex_lock(&adev->dm.audio_lock);
8128 inst = status->audio_inst;
8129 aconnector->audio_inst = inst;
8130 mutex_unlock(&adev->dm.audio_lock);
8132 amdgpu_dm_audio_eld_notify(adev, inst);
8137 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8138 * @crtc_state: the DRM CRTC state
8139 * @stream_state: the DC stream state.
8141 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8142 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8144 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8145 struct dc_stream_state *stream_state)
8147 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8151 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8152 * @state: The atomic state to commit
8154 * This will tell DC to commit the constructed DC state from atomic_check,
8155 * programming the hardware. Any failures here implies a hardware failure, since
8156 * atomic check should have filtered anything non-kosher.
8158 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8160 struct drm_device *dev = state->dev;
8161 struct amdgpu_device *adev = drm_to_adev(dev);
8162 struct amdgpu_display_manager *dm = &adev->dm;
8163 struct dm_atomic_state *dm_state;
8164 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8166 struct drm_crtc *crtc;
8167 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8168 unsigned long flags;
8169 bool wait_for_vblank = true;
8170 struct drm_connector *connector;
8171 struct drm_connector_state *old_con_state, *new_con_state;
8172 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8173 int crtc_disable_count = 0;
8174 bool mode_set_reset_required = false;
8176 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8178 drm_atomic_helper_update_legacy_modeset_state(dev, state);
8180 dm_state = dm_atomic_get_new_state(state);
8181 if (dm_state && dm_state->context) {
8182 dc_state = dm_state->context;
8184 /* No state changes, retain current state. */
8185 dc_state_temp = dc_create_state(dm->dc);
8186 ASSERT(dc_state_temp);
8187 dc_state = dc_state_temp;
8188 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8191 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8192 new_crtc_state, i) {
8193 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8195 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8197 if (old_crtc_state->active &&
8198 (!new_crtc_state->active ||
8199 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8200 manage_dm_interrupts(adev, acrtc, false);
8201 dc_stream_release(dm_old_crtc_state->stream);
8205 drm_atomic_helper_calc_timestamping_constants(state);
8207 /* update changed items */
8208 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8209 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8211 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8212 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8215 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8216 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8217 "connectors_changed:%d\n",
8219 new_crtc_state->enable,
8220 new_crtc_state->active,
8221 new_crtc_state->planes_changed,
8222 new_crtc_state->mode_changed,
8223 new_crtc_state->active_changed,
8224 new_crtc_state->connectors_changed);
8226 /* Disable cursor if disabling crtc */
8227 if (old_crtc_state->active && !new_crtc_state->active) {
8228 struct dc_cursor_position position;
8230 memset(&position, 0, sizeof(position));
8231 mutex_lock(&dm->dc_lock);
8232 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8233 mutex_unlock(&dm->dc_lock);
8236 /* Copy all transient state flags into dc state */
8237 if (dm_new_crtc_state->stream) {
8238 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8239 dm_new_crtc_state->stream);
8242 /* handles headless hotplug case, updating new_state and
8243 * aconnector as needed
8246 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8248 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8250 if (!dm_new_crtc_state->stream) {
8252 * this could happen because of issues with
8253 * userspace notifications delivery.
8254 * In this case userspace tries to set mode on
8255 * display which is disconnected in fact.
8256 * dc_sink is NULL in this case on aconnector.
8257 * We expect reset mode will come soon.
8259 * This can also happen when unplug is done
8260 * during resume sequence ended
8262 * In this case, we want to pretend we still
8263 * have a sink to keep the pipe running so that
8264 * hw state is consistent with the sw state
8266 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8267 __func__, acrtc->base.base.id);
8271 if (dm_old_crtc_state->stream)
8272 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8274 pm_runtime_get_noresume(dev->dev);
8276 acrtc->enabled = true;
8277 acrtc->hw_mode = new_crtc_state->mode;
8278 crtc->hwmode = new_crtc_state->mode;
8279 mode_set_reset_required = true;
8280 } else if (modereset_required(new_crtc_state)) {
8281 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8282 /* i.e. reset mode */
8283 if (dm_old_crtc_state->stream)
8284 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8285 mode_set_reset_required = true;
8287 } /* for_each_crtc_in_state() */
8290 /* if there mode set or reset, disable eDP PSR */
8291 if (mode_set_reset_required)
8292 amdgpu_dm_psr_disable_all(dm);
8294 dm_enable_per_frame_crtc_master_sync(dc_state);
8295 mutex_lock(&dm->dc_lock);
8296 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8297 mutex_unlock(&dm->dc_lock);
8300 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8301 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8303 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8305 if (dm_new_crtc_state->stream != NULL) {
8306 const struct dc_stream_status *status =
8307 dc_stream_get_status(dm_new_crtc_state->stream);
8310 status = dc_stream_get_status_from_state(dc_state,
8311 dm_new_crtc_state->stream);
8313 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8315 acrtc->otg_inst = status->primary_otg_inst;
8318 #ifdef CONFIG_DRM_AMD_DC_HDCP
8319 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8320 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8321 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8322 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8324 new_crtc_state = NULL;
8327 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8329 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8331 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8332 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8333 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8334 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8335 dm_new_con_state->update_hdcp = true;
8339 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8340 hdcp_update_display(
8341 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8342 new_con_state->hdcp_content_type,
8343 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
8348 /* Handle connector state changes */
8349 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8350 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8351 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8352 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8353 struct dc_surface_update dummy_updates[MAX_SURFACES];
8354 struct dc_stream_update stream_update;
8355 struct dc_info_packet hdr_packet;
8356 struct dc_stream_status *status = NULL;
8357 bool abm_changed, hdr_changed, scaling_changed;
8359 memset(&dummy_updates, 0, sizeof(dummy_updates));
8360 memset(&stream_update, 0, sizeof(stream_update));
8363 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8364 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8367 /* Skip any modesets/resets */
8368 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8371 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8372 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8374 scaling_changed = is_scaling_state_different(dm_new_con_state,
8377 abm_changed = dm_new_crtc_state->abm_level !=
8378 dm_old_crtc_state->abm_level;
8381 is_hdr_metadata_different(old_con_state, new_con_state);
8383 if (!scaling_changed && !abm_changed && !hdr_changed)
8386 stream_update.stream = dm_new_crtc_state->stream;
8387 if (scaling_changed) {
8388 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8389 dm_new_con_state, dm_new_crtc_state->stream);
8391 stream_update.src = dm_new_crtc_state->stream->src;
8392 stream_update.dst = dm_new_crtc_state->stream->dst;
8396 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8398 stream_update.abm_level = &dm_new_crtc_state->abm_level;
8402 fill_hdr_info_packet(new_con_state, &hdr_packet);
8403 stream_update.hdr_static_metadata = &hdr_packet;
8406 status = dc_stream_get_status(dm_new_crtc_state->stream);
8408 WARN_ON(!status->plane_count);
8411 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8412 * Here we create an empty update on each plane.
8413 * To fix this, DC should permit updating only stream properties.
8415 for (j = 0; j < status->plane_count; j++)
8416 dummy_updates[j].surface = status->plane_states[0];
8419 mutex_lock(&dm->dc_lock);
8420 dc_commit_updates_for_stream(dm->dc,
8422 status->plane_count,
8423 dm_new_crtc_state->stream,
8426 mutex_unlock(&dm->dc_lock);
8429 /* Count number of newly disabled CRTCs for dropping PM refs later. */
8430 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8431 new_crtc_state, i) {
8432 if (old_crtc_state->active && !new_crtc_state->active)
8433 crtc_disable_count++;
8435 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8436 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8438 /* For freesync config update on crtc state and params for irq */
8439 update_stream_irq_parameters(dm, dm_new_crtc_state);
8441 /* Handle vrr on->off / off->on transitions */
8442 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8447 * Enable interrupts for CRTCs that are newly enabled or went through
8448 * a modeset. It was intentionally deferred until after the front end
8449 * state was modified to wait until the OTG was on and so the IRQ
8450 * handlers didn't access stale or invalid state.
8452 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8453 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8454 bool configure_crc = false;
8456 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8458 if (new_crtc_state->active &&
8459 (!old_crtc_state->active ||
8460 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8461 dc_stream_retain(dm_new_crtc_state->stream);
8462 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8463 manage_dm_interrupts(adev, acrtc, true);
8465 if (IS_ENABLED(CONFIG_DEBUG_FS) && new_crtc_state->active &&
8466 amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
8468 * Frontend may have changed so reapply the CRC capture
8469 * settings for the stream.
8471 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8472 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8474 if (amdgpu_dm_crc_window_is_default(dm_new_crtc_state)) {
8475 if (!old_crtc_state->active || drm_atomic_crtc_needs_modeset(new_crtc_state))
8476 configure_crc = true;
8478 if (amdgpu_dm_crc_window_changed(dm_new_crtc_state, dm_old_crtc_state))
8479 configure_crc = true;
8483 amdgpu_dm_crtc_configure_crc_source(
8484 crtc, dm_new_crtc_state, dm_new_crtc_state->crc_src);
8488 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8489 if (new_crtc_state->async_flip)
8490 wait_for_vblank = false;
8492 /* update planes when needed per crtc*/
8493 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8494 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8496 if (dm_new_crtc_state->stream)
8497 amdgpu_dm_commit_planes(state, dc_state, dev,
8498 dm, crtc, wait_for_vblank);
8501 /* Update audio instances for each connector. */
8502 amdgpu_dm_commit_audio(dev, state);
8505 * send vblank event on all events not handled in flip and
8506 * mark consumed event for drm_atomic_helper_commit_hw_done
8508 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8509 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8511 if (new_crtc_state->event)
8512 drm_send_event_locked(dev, &new_crtc_state->event->base);
8514 new_crtc_state->event = NULL;
8516 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8518 /* Signal HW programming completion */
8519 drm_atomic_helper_commit_hw_done(state);
8521 if (wait_for_vblank)
8522 drm_atomic_helper_wait_for_flip_done(dev, state);
8524 drm_atomic_helper_cleanup_planes(dev, state);
8526 /* return the stolen vga memory back to VRAM */
8527 if (!adev->mman.keep_stolen_vga_memory)
8528 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8529 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8532 * Finally, drop a runtime PM reference for each newly disabled CRTC,
8533 * so we can put the GPU into runtime suspend if we're not driving any
8536 for (i = 0; i < crtc_disable_count; i++)
8537 pm_runtime_put_autosuspend(dev->dev);
8538 pm_runtime_mark_last_busy(dev->dev);
8541 dc_release_state(dc_state_temp);
8545 static int dm_force_atomic_commit(struct drm_connector *connector)
8548 struct drm_device *ddev = connector->dev;
8549 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8550 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8551 struct drm_plane *plane = disconnected_acrtc->base.primary;
8552 struct drm_connector_state *conn_state;
8553 struct drm_crtc_state *crtc_state;
8554 struct drm_plane_state *plane_state;
8559 state->acquire_ctx = ddev->mode_config.acquire_ctx;
8561 /* Construct an atomic state to restore previous display setting */
8564 * Attach connectors to drm_atomic_state
8566 conn_state = drm_atomic_get_connector_state(state, connector);
8568 ret = PTR_ERR_OR_ZERO(conn_state);
8572 /* Attach crtc to drm_atomic_state*/
8573 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8575 ret = PTR_ERR_OR_ZERO(crtc_state);
8579 /* force a restore */
8580 crtc_state->mode_changed = true;
8582 /* Attach plane to drm_atomic_state */
8583 plane_state = drm_atomic_get_plane_state(state, plane);
8585 ret = PTR_ERR_OR_ZERO(plane_state);
8590 /* Call commit internally with the state we just constructed */
8591 ret = drm_atomic_commit(state);
8596 DRM_ERROR("Restoring old state failed with %i\n", ret);
8597 drm_atomic_state_put(state);
8603 * This function handles all cases when set mode does not come upon hotplug.
8604 * This includes when a display is unplugged then plugged back into the
8605 * same port and when running without usermode desktop manager supprot
8607 void dm_restore_drm_connector_state(struct drm_device *dev,
8608 struct drm_connector *connector)
8610 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8611 struct amdgpu_crtc *disconnected_acrtc;
8612 struct dm_crtc_state *acrtc_state;
8614 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8617 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8618 if (!disconnected_acrtc)
8621 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8622 if (!acrtc_state->stream)
8626 * If the previous sink is not released and different from the current,
8627 * we deduce we are in a state where we can not rely on usermode call
8628 * to turn on the display, so we do it here
8630 if (acrtc_state->stream->sink != aconnector->dc_sink)
8631 dm_force_atomic_commit(&aconnector->base);
8635 * Grabs all modesetting locks to serialize against any blocking commits,
8636 * Waits for completion of all non blocking commits.
8638 static int do_aquire_global_lock(struct drm_device *dev,
8639 struct drm_atomic_state *state)
8641 struct drm_crtc *crtc;
8642 struct drm_crtc_commit *commit;
8646 * Adding all modeset locks to aquire_ctx will
8647 * ensure that when the framework release it the
8648 * extra locks we are locking here will get released to
8650 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8654 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8655 spin_lock(&crtc->commit_lock);
8656 commit = list_first_entry_or_null(&crtc->commit_list,
8657 struct drm_crtc_commit, commit_entry);
8659 drm_crtc_commit_get(commit);
8660 spin_unlock(&crtc->commit_lock);
8666 * Make sure all pending HW programming completed and
8669 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8672 ret = wait_for_completion_interruptible_timeout(
8673 &commit->flip_done, 10*HZ);
8676 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8677 "timed out\n", crtc->base.id, crtc->name);
8679 drm_crtc_commit_put(commit);
8682 return ret < 0 ? ret : 0;
8685 static void get_freesync_config_for_crtc(
8686 struct dm_crtc_state *new_crtc_state,
8687 struct dm_connector_state *new_con_state)
8689 struct mod_freesync_config config = {0};
8690 struct amdgpu_dm_connector *aconnector =
8691 to_amdgpu_dm_connector(new_con_state->base.connector);
8692 struct drm_display_mode *mode = &new_crtc_state->base.mode;
8693 int vrefresh = drm_mode_vrefresh(mode);
8695 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8696 vrefresh >= aconnector->min_vfreq &&
8697 vrefresh <= aconnector->max_vfreq;
8699 if (new_crtc_state->vrr_supported) {
8700 new_crtc_state->stream->ignore_msa_timing_param = true;
8701 config.state = new_crtc_state->base.vrr_enabled ?
8702 VRR_STATE_ACTIVE_VARIABLE :
8704 config.min_refresh_in_uhz =
8705 aconnector->min_vfreq * 1000000;
8706 config.max_refresh_in_uhz =
8707 aconnector->max_vfreq * 1000000;
8708 config.vsif_supported = true;
8712 new_crtc_state->freesync_config = config;
8715 static void reset_freesync_config_for_crtc(
8716 struct dm_crtc_state *new_crtc_state)
8718 new_crtc_state->vrr_supported = false;
8720 memset(&new_crtc_state->vrr_infopacket, 0,
8721 sizeof(new_crtc_state->vrr_infopacket));
8724 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8725 struct drm_atomic_state *state,
8726 struct drm_crtc *crtc,
8727 struct drm_crtc_state *old_crtc_state,
8728 struct drm_crtc_state *new_crtc_state,
8730 bool *lock_and_validation_needed)
8732 struct dm_atomic_state *dm_state = NULL;
8733 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8734 struct dc_stream_state *new_stream;
8738 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8739 * update changed items
8741 struct amdgpu_crtc *acrtc = NULL;
8742 struct amdgpu_dm_connector *aconnector = NULL;
8743 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8744 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8748 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8749 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8750 acrtc = to_amdgpu_crtc(crtc);
8751 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8753 /* TODO This hack should go away */
8754 if (aconnector && enable) {
8755 /* Make sure fake sink is created in plug-in scenario */
8756 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8758 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8761 if (IS_ERR(drm_new_conn_state)) {
8762 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8766 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8767 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8769 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8772 new_stream = create_validate_stream_for_sink(aconnector,
8773 &new_crtc_state->mode,
8775 dm_old_crtc_state->stream);
8778 * we can have no stream on ACTION_SET if a display
8779 * was disconnected during S3, in this case it is not an
8780 * error, the OS will be updated after detection, and
8781 * will do the right thing on next atomic commit
8785 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8786 __func__, acrtc->base.base.id);
8792 * TODO: Check VSDB bits to decide whether this should
8793 * be enabled or not.
8795 new_stream->triggered_crtc_reset.enabled =
8796 dm->force_timing_sync;
8798 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8800 ret = fill_hdr_info_packet(drm_new_conn_state,
8801 &new_stream->hdr_static_metadata);
8806 * If we already removed the old stream from the context
8807 * (and set the new stream to NULL) then we can't reuse
8808 * the old stream even if the stream and scaling are unchanged.
8809 * We'll hit the BUG_ON and black screen.
8811 * TODO: Refactor this function to allow this check to work
8812 * in all conditions.
8814 if (dm_new_crtc_state->stream &&
8815 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8816 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8817 new_crtc_state->mode_changed = false;
8818 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8819 new_crtc_state->mode_changed);
8823 /* mode_changed flag may get updated above, need to check again */
8824 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8828 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8829 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8830 "connectors_changed:%d\n",
8832 new_crtc_state->enable,
8833 new_crtc_state->active,
8834 new_crtc_state->planes_changed,
8835 new_crtc_state->mode_changed,
8836 new_crtc_state->active_changed,
8837 new_crtc_state->connectors_changed);
8839 /* Remove stream for any changed/disabled CRTC */
8842 if (!dm_old_crtc_state->stream)
8845 ret = dm_atomic_get_state(state, &dm_state);
8849 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8852 /* i.e. reset mode */
8853 if (dc_remove_stream_from_ctx(
8856 dm_old_crtc_state->stream) != DC_OK) {
8861 dc_stream_release(dm_old_crtc_state->stream);
8862 dm_new_crtc_state->stream = NULL;
8864 reset_freesync_config_for_crtc(dm_new_crtc_state);
8866 *lock_and_validation_needed = true;
8868 } else {/* Add stream for any updated/enabled CRTC */
8870 * Quick fix to prevent NULL pointer on new_stream when
8871 * added MST connectors not found in existing crtc_state in the chained mode
8872 * TODO: need to dig out the root cause of that
8874 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8877 if (modereset_required(new_crtc_state))
8880 if (modeset_required(new_crtc_state, new_stream,
8881 dm_old_crtc_state->stream)) {
8883 WARN_ON(dm_new_crtc_state->stream);
8885 ret = dm_atomic_get_state(state, &dm_state);
8889 dm_new_crtc_state->stream = new_stream;
8891 dc_stream_retain(new_stream);
8893 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8896 if (dc_add_stream_to_ctx(
8899 dm_new_crtc_state->stream) != DC_OK) {
8904 *lock_and_validation_needed = true;
8909 /* Release extra reference */
8911 dc_stream_release(new_stream);
8914 * We want to do dc stream updates that do not require a
8915 * full modeset below.
8917 if (!(enable && aconnector && new_crtc_state->active))
8920 * Given above conditions, the dc state cannot be NULL because:
8921 * 1. We're in the process of enabling CRTCs (just been added
8922 * to the dc context, or already is on the context)
8923 * 2. Has a valid connector attached, and
8924 * 3. Is currently active and enabled.
8925 * => The dc stream state currently exists.
8927 BUG_ON(dm_new_crtc_state->stream == NULL);
8929 /* Scaling or underscan settings */
8930 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8931 update_stream_scaling_settings(
8932 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8935 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8938 * Color management settings. We also update color properties
8939 * when a modeset is needed, to ensure it gets reprogrammed.
8941 if (dm_new_crtc_state->base.color_mgmt_changed ||
8942 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8943 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8948 /* Update Freesync settings. */
8949 get_freesync_config_for_crtc(dm_new_crtc_state,
8956 dc_stream_release(new_stream);
8960 static bool should_reset_plane(struct drm_atomic_state *state,
8961 struct drm_plane *plane,
8962 struct drm_plane_state *old_plane_state,
8963 struct drm_plane_state *new_plane_state)
8965 struct drm_plane *other;
8966 struct drm_plane_state *old_other_state, *new_other_state;
8967 struct drm_crtc_state *new_crtc_state;
8971 * TODO: Remove this hack once the checks below are sufficient
8972 * enough to determine when we need to reset all the planes on
8975 if (state->allow_modeset)
8978 /* Exit early if we know that we're adding or removing the plane. */
8979 if (old_plane_state->crtc != new_plane_state->crtc)
8982 /* old crtc == new_crtc == NULL, plane not in context. */
8983 if (!new_plane_state->crtc)
8987 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8989 if (!new_crtc_state)
8992 /* CRTC Degamma changes currently require us to recreate planes. */
8993 if (new_crtc_state->color_mgmt_changed)
8996 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9000 * If there are any new primary or overlay planes being added or
9001 * removed then the z-order can potentially change. To ensure
9002 * correct z-order and pipe acquisition the current DC architecture
9003 * requires us to remove and recreate all existing planes.
9005 * TODO: Come up with a more elegant solution for this.
9007 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9008 struct amdgpu_framebuffer *old_afb, *new_afb;
9009 if (other->type == DRM_PLANE_TYPE_CURSOR)
9012 if (old_other_state->crtc != new_plane_state->crtc &&
9013 new_other_state->crtc != new_plane_state->crtc)
9016 if (old_other_state->crtc != new_other_state->crtc)
9019 /* Src/dst size and scaling updates. */
9020 if (old_other_state->src_w != new_other_state->src_w ||
9021 old_other_state->src_h != new_other_state->src_h ||
9022 old_other_state->crtc_w != new_other_state->crtc_w ||
9023 old_other_state->crtc_h != new_other_state->crtc_h)
9026 /* Rotation / mirroring updates. */
9027 if (old_other_state->rotation != new_other_state->rotation)
9030 /* Blending updates. */
9031 if (old_other_state->pixel_blend_mode !=
9032 new_other_state->pixel_blend_mode)
9035 /* Alpha updates. */
9036 if (old_other_state->alpha != new_other_state->alpha)
9039 /* Colorspace changes. */
9040 if (old_other_state->color_range != new_other_state->color_range ||
9041 old_other_state->color_encoding != new_other_state->color_encoding)
9044 /* Framebuffer checks fall at the end. */
9045 if (!old_other_state->fb || !new_other_state->fb)
9048 /* Pixel format changes can require bandwidth updates. */
9049 if (old_other_state->fb->format != new_other_state->fb->format)
9052 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9053 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9055 /* Tiling and DCC changes also require bandwidth updates. */
9056 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9057 old_afb->base.modifier != new_afb->base.modifier)
9064 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9065 struct drm_plane_state *new_plane_state,
9066 struct drm_framebuffer *fb)
9068 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9069 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9073 if (fb->width > new_acrtc->max_cursor_width ||
9074 fb->height > new_acrtc->max_cursor_height) {
9075 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9076 new_plane_state->fb->width,
9077 new_plane_state->fb->height);
9080 if (new_plane_state->src_w != fb->width << 16 ||
9081 new_plane_state->src_h != fb->height << 16) {
9082 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9086 /* Pitch in pixels */
9087 pitch = fb->pitches[0] / fb->format->cpp[0];
9089 if (fb->width != pitch) {
9090 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9099 /* FB pitch is supported by cursor plane */
9102 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9106 /* Core DRM takes care of checking FB modifiers, so we only need to
9107 * check tiling flags when the FB doesn't have a modifier. */
9108 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9109 if (adev->family < AMDGPU_FAMILY_AI) {
9110 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9111 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9112 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9114 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9117 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9125 static int dm_update_plane_state(struct dc *dc,
9126 struct drm_atomic_state *state,
9127 struct drm_plane *plane,
9128 struct drm_plane_state *old_plane_state,
9129 struct drm_plane_state *new_plane_state,
9131 bool *lock_and_validation_needed)
9134 struct dm_atomic_state *dm_state = NULL;
9135 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9136 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9137 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9138 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9139 struct amdgpu_crtc *new_acrtc;
9144 new_plane_crtc = new_plane_state->crtc;
9145 old_plane_crtc = old_plane_state->crtc;
9146 dm_new_plane_state = to_dm_plane_state(new_plane_state);
9147 dm_old_plane_state = to_dm_plane_state(old_plane_state);
9149 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9150 if (!enable || !new_plane_crtc ||
9151 drm_atomic_plane_disabling(plane->state, new_plane_state))
9154 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9156 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9157 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9161 if (new_plane_state->fb) {
9162 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9163 new_plane_state->fb);
9171 needs_reset = should_reset_plane(state, plane, old_plane_state,
9174 /* Remove any changed/removed planes */
9179 if (!old_plane_crtc)
9182 old_crtc_state = drm_atomic_get_old_crtc_state(
9183 state, old_plane_crtc);
9184 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9186 if (!dm_old_crtc_state->stream)
9189 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9190 plane->base.id, old_plane_crtc->base.id);
9192 ret = dm_atomic_get_state(state, &dm_state);
9196 if (!dc_remove_plane_from_context(
9198 dm_old_crtc_state->stream,
9199 dm_old_plane_state->dc_state,
9200 dm_state->context)) {
9206 dc_plane_state_release(dm_old_plane_state->dc_state);
9207 dm_new_plane_state->dc_state = NULL;
9209 *lock_and_validation_needed = true;
9211 } else { /* Add new planes */
9212 struct dc_plane_state *dc_new_plane_state;
9214 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9217 if (!new_plane_crtc)
9220 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9221 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9223 if (!dm_new_crtc_state->stream)
9229 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9233 WARN_ON(dm_new_plane_state->dc_state);
9235 dc_new_plane_state = dc_create_plane_state(dc);
9236 if (!dc_new_plane_state)
9239 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9240 plane->base.id, new_plane_crtc->base.id);
9242 ret = fill_dc_plane_attributes(
9243 drm_to_adev(new_plane_crtc->dev),
9248 dc_plane_state_release(dc_new_plane_state);
9252 ret = dm_atomic_get_state(state, &dm_state);
9254 dc_plane_state_release(dc_new_plane_state);
9259 * Any atomic check errors that occur after this will
9260 * not need a release. The plane state will be attached
9261 * to the stream, and therefore part of the atomic
9262 * state. It'll be released when the atomic state is
9265 if (!dc_add_plane_to_context(
9267 dm_new_crtc_state->stream,
9269 dm_state->context)) {
9271 dc_plane_state_release(dc_new_plane_state);
9275 dm_new_plane_state->dc_state = dc_new_plane_state;
9277 /* Tell DC to do a full surface update every time there
9278 * is a plane change. Inefficient, but works for now.
9280 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9282 *lock_and_validation_needed = true;
9289 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9290 struct drm_crtc *crtc,
9291 struct drm_crtc_state *new_crtc_state)
9293 struct drm_plane_state *new_cursor_state, *new_primary_state;
9294 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9296 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9297 * cursor per pipe but it's going to inherit the scaling and
9298 * positioning from the underlying pipe. Check the cursor plane's
9299 * blending properties match the primary plane's. */
9301 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9302 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9303 if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9307 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9308 (new_cursor_state->src_w >> 16);
9309 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9310 (new_cursor_state->src_h >> 16);
9312 primary_scale_w = new_primary_state->crtc_w * 1000 /
9313 (new_primary_state->src_w >> 16);
9314 primary_scale_h = new_primary_state->crtc_h * 1000 /
9315 (new_primary_state->src_h >> 16);
9317 if (cursor_scale_w != primary_scale_w ||
9318 cursor_scale_h != primary_scale_h) {
9319 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9326 #if defined(CONFIG_DRM_AMD_DC_DCN)
9327 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9329 struct drm_connector *connector;
9330 struct drm_connector_state *conn_state;
9331 struct amdgpu_dm_connector *aconnector = NULL;
9333 for_each_new_connector_in_state(state, connector, conn_state, i) {
9334 if (conn_state->crtc != crtc)
9337 aconnector = to_amdgpu_dm_connector(connector);
9338 if (!aconnector->port || !aconnector->mst_port)
9347 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9352 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9353 * @dev: The DRM device
9354 * @state: The atomic state to commit
9356 * Validate that the given atomic state is programmable by DC into hardware.
9357 * This involves constructing a &struct dc_state reflecting the new hardware
9358 * state we wish to commit, then querying DC to see if it is programmable. It's
9359 * important not to modify the existing DC state. Otherwise, atomic_check
9360 * may unexpectedly commit hardware changes.
9362 * When validating the DC state, it's important that the right locks are
9363 * acquired. For full updates case which removes/adds/updates streams on one
9364 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9365 * that any such full update commit will wait for completion of any outstanding
9366 * flip using DRMs synchronization events.
9368 * Note that DM adds the affected connectors for all CRTCs in state, when that
9369 * might not seem necessary. This is because DC stream creation requires the
9370 * DC sink, which is tied to the DRM connector state. Cleaning this up should
9371 * be possible but non-trivial - a possible TODO item.
9373 * Return: -Error code if validation failed.
9375 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9376 struct drm_atomic_state *state)
9378 struct amdgpu_device *adev = drm_to_adev(dev);
9379 struct dm_atomic_state *dm_state = NULL;
9380 struct dc *dc = adev->dm.dc;
9381 struct drm_connector *connector;
9382 struct drm_connector_state *old_con_state, *new_con_state;
9383 struct drm_crtc *crtc;
9384 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9385 struct drm_plane *plane;
9386 struct drm_plane_state *old_plane_state, *new_plane_state;
9387 enum dc_status status;
9389 bool lock_and_validation_needed = false;
9390 struct dm_crtc_state *dm_old_crtc_state;
9392 trace_amdgpu_dm_atomic_check_begin(state);
9394 ret = drm_atomic_helper_check_modeset(dev, state);
9398 /* Check connector changes */
9399 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9400 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9401 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9403 /* Skip connectors that are disabled or part of modeset already. */
9404 if (!old_con_state->crtc && !new_con_state->crtc)
9407 if (!new_con_state->crtc)
9410 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9411 if (IS_ERR(new_crtc_state)) {
9412 ret = PTR_ERR(new_crtc_state);
9416 if (dm_old_con_state->abm_level !=
9417 dm_new_con_state->abm_level)
9418 new_crtc_state->connectors_changed = true;
9421 #if defined(CONFIG_DRM_AMD_DC_DCN)
9422 if (adev->asic_type >= CHIP_NAVI10) {
9423 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9424 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9425 ret = add_affected_mst_dsc_crtcs(state, crtc);
9432 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9433 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9435 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9436 !new_crtc_state->color_mgmt_changed &&
9437 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9438 dm_old_crtc_state->dsc_force_changed == false)
9441 if (!new_crtc_state->enable)
9444 ret = drm_atomic_add_affected_connectors(state, crtc);
9448 ret = drm_atomic_add_affected_planes(state, crtc);
9452 if (dm_old_crtc_state->dsc_force_changed)
9453 new_crtc_state->mode_changed = true;
9457 * Add all primary and overlay planes on the CRTC to the state
9458 * whenever a plane is enabled to maintain correct z-ordering
9459 * and to enable fast surface updates.
9461 drm_for_each_crtc(crtc, dev) {
9462 bool modified = false;
9464 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9465 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9468 if (new_plane_state->crtc == crtc ||
9469 old_plane_state->crtc == crtc) {
9478 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9479 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9483 drm_atomic_get_plane_state(state, plane);
9485 if (IS_ERR(new_plane_state)) {
9486 ret = PTR_ERR(new_plane_state);
9492 /* Remove exiting planes if they are modified */
9493 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9494 ret = dm_update_plane_state(dc, state, plane,
9498 &lock_and_validation_needed);
9503 /* Disable all crtcs which require disable */
9504 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9505 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9509 &lock_and_validation_needed);
9514 /* Enable all crtcs which require enable */
9515 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9516 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9520 &lock_and_validation_needed);
9525 /* Add new/modified planes */
9526 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9527 ret = dm_update_plane_state(dc, state, plane,
9531 &lock_and_validation_needed);
9536 /* Run this here since we want to validate the streams we created */
9537 ret = drm_atomic_helper_check_planes(dev, state);
9541 /* Check cursor planes scaling */
9542 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9543 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9548 if (state->legacy_cursor_update) {
9550 * This is a fast cursor update coming from the plane update
9551 * helper, check if it can be done asynchronously for better
9554 state->async_update =
9555 !drm_atomic_helper_async_check(dev, state);
9558 * Skip the remaining global validation if this is an async
9559 * update. Cursor updates can be done without affecting
9560 * state or bandwidth calcs and this avoids the performance
9561 * penalty of locking the private state object and
9562 * allocating a new dc_state.
9564 if (state->async_update)
9568 /* Check scaling and underscan changes*/
9569 /* TODO Removed scaling changes validation due to inability to commit
9570 * new stream into context w\o causing full reset. Need to
9571 * decide how to handle.
9573 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9574 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9575 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9576 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9578 /* Skip any modesets/resets */
9579 if (!acrtc || drm_atomic_crtc_needs_modeset(
9580 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9583 /* Skip any thing not scale or underscan changes */
9584 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9587 lock_and_validation_needed = true;
9591 * Streams and planes are reset when there are changes that affect
9592 * bandwidth. Anything that affects bandwidth needs to go through
9593 * DC global validation to ensure that the configuration can be applied
9596 * We have to currently stall out here in atomic_check for outstanding
9597 * commits to finish in this case because our IRQ handlers reference
9598 * DRM state directly - we can end up disabling interrupts too early
9601 * TODO: Remove this stall and drop DM state private objects.
9603 if (lock_and_validation_needed) {
9604 ret = dm_atomic_get_state(state, &dm_state);
9608 ret = do_aquire_global_lock(dev, state);
9612 #if defined(CONFIG_DRM_AMD_DC_DCN)
9613 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9616 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9622 * Perform validation of MST topology in the state:
9623 * We need to perform MST atomic check before calling
9624 * dc_validate_global_state(), or there is a chance
9625 * to get stuck in an infinite loop and hang eventually.
9627 ret = drm_dp_mst_atomic_check(state);
9630 status = dc_validate_global_state(dc, dm_state->context, false);
9631 if (status != DC_OK) {
9632 DC_LOG_WARNING("DC global validation failure: %s (%d)",
9633 dc_status_to_str(status), status);
9639 * The commit is a fast update. Fast updates shouldn't change
9640 * the DC context, affect global validation, and can have their
9641 * commit work done in parallel with other commits not touching
9642 * the same resource. If we have a new DC context as part of
9643 * the DM atomic state from validation we need to free it and
9644 * retain the existing one instead.
9646 * Furthermore, since the DM atomic state only contains the DC
9647 * context and can safely be annulled, we can free the state
9648 * and clear the associated private object now to free
9649 * some memory and avoid a possible use-after-free later.
9652 for (i = 0; i < state->num_private_objs; i++) {
9653 struct drm_private_obj *obj = state->private_objs[i].ptr;
9655 if (obj->funcs == adev->dm.atomic_obj.funcs) {
9656 int j = state->num_private_objs-1;
9658 dm_atomic_destroy_state(obj,
9659 state->private_objs[i].state);
9661 /* If i is not at the end of the array then the
9662 * last element needs to be moved to where i was
9663 * before the array can safely be truncated.
9666 state->private_objs[i] =
9667 state->private_objs[j];
9669 state->private_objs[j].ptr = NULL;
9670 state->private_objs[j].state = NULL;
9671 state->private_objs[j].old_state = NULL;
9672 state->private_objs[j].new_state = NULL;
9674 state->num_private_objs = j;
9680 /* Store the overall update type for use later in atomic check. */
9681 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9682 struct dm_crtc_state *dm_new_crtc_state =
9683 to_dm_crtc_state(new_crtc_state);
9685 dm_new_crtc_state->update_type = lock_and_validation_needed ?
9690 /* Must be success */
9693 trace_amdgpu_dm_atomic_check_finish(state, ret);
9698 if (ret == -EDEADLK)
9699 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9700 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9701 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9703 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9705 trace_amdgpu_dm_atomic_check_finish(state, ret);
9710 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9711 struct amdgpu_dm_connector *amdgpu_dm_connector)
9714 bool capable = false;
9716 if (amdgpu_dm_connector->dc_link &&
9717 dm_helpers_dp_read_dpcd(
9719 amdgpu_dm_connector->dc_link,
9720 DP_DOWN_STREAM_PORT_COUNT,
9722 sizeof(dpcd_data))) {
9723 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9728 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9732 bool edid_check_required;
9733 struct detailed_timing *timing;
9734 struct detailed_non_pixel *data;
9735 struct detailed_data_monitor_range *range;
9736 struct amdgpu_dm_connector *amdgpu_dm_connector =
9737 to_amdgpu_dm_connector(connector);
9738 struct dm_connector_state *dm_con_state = NULL;
9740 struct drm_device *dev = connector->dev;
9741 struct amdgpu_device *adev = drm_to_adev(dev);
9742 bool freesync_capable = false;
9744 if (!connector->state) {
9745 DRM_ERROR("%s - Connector has no state", __func__);
9750 dm_con_state = to_dm_connector_state(connector->state);
9752 amdgpu_dm_connector->min_vfreq = 0;
9753 amdgpu_dm_connector->max_vfreq = 0;
9754 amdgpu_dm_connector->pixel_clock_mhz = 0;
9759 dm_con_state = to_dm_connector_state(connector->state);
9761 edid_check_required = false;
9762 if (!amdgpu_dm_connector->dc_sink) {
9763 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9766 if (!adev->dm.freesync_module)
9769 * if edid non zero restrict freesync only for dp and edp
9772 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9773 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9774 edid_check_required = is_dp_capable_without_timing_msa(
9776 amdgpu_dm_connector);
9779 if (edid_check_required == true && (edid->version > 1 ||
9780 (edid->version == 1 && edid->revision > 1))) {
9781 for (i = 0; i < 4; i++) {
9783 timing = &edid->detailed_timings[i];
9784 data = &timing->data.other_data;
9785 range = &data->data.range;
9787 * Check if monitor has continuous frequency mode
9789 if (data->type != EDID_DETAIL_MONITOR_RANGE)
9792 * Check for flag range limits only. If flag == 1 then
9793 * no additional timing information provided.
9794 * Default GTF, GTF Secondary curve and CVT are not
9797 if (range->flags != 1)
9800 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9801 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9802 amdgpu_dm_connector->pixel_clock_mhz =
9803 range->pixel_clock_mhz * 10;
9807 if (amdgpu_dm_connector->max_vfreq -
9808 amdgpu_dm_connector->min_vfreq > 10) {
9810 freesync_capable = true;
9816 dm_con_state->freesync_capable = freesync_capable;
9818 if (connector->vrr_capable_property)
9819 drm_connector_set_vrr_capable_property(connector,
9823 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9825 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9827 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9829 if (link->type == dc_connection_none)
9831 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9832 dpcd_data, sizeof(dpcd_data))) {
9833 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9835 if (dpcd_data[0] == 0) {
9836 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9837 link->psr_settings.psr_feature_enabled = false;
9839 link->psr_settings.psr_version = DC_PSR_VERSION_1;
9840 link->psr_settings.psr_feature_enabled = true;
9843 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9848 * amdgpu_dm_link_setup_psr() - configure psr link
9849 * @stream: stream state
9851 * Return: true if success
9853 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9855 struct dc_link *link = NULL;
9856 struct psr_config psr_config = {0};
9857 struct psr_context psr_context = {0};
9863 link = stream->link;
9865 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9867 if (psr_config.psr_version > 0) {
9868 psr_config.psr_exit_link_training_required = 0x1;
9869 psr_config.psr_frame_capture_indication_req = 0;
9870 psr_config.psr_rfb_setup_time = 0x37;
9871 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9872 psr_config.allow_smu_optimizations = 0x0;
9874 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9877 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
9883 * amdgpu_dm_psr_enable() - enable psr f/w
9884 * @stream: stream state
9886 * Return: true if success
9888 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9890 struct dc_link *link = stream->link;
9891 unsigned int vsync_rate_hz = 0;
9892 struct dc_static_screen_params params = {0};
9893 /* Calculate number of static frames before generating interrupt to
9896 // Init fail safe of 2 frames static
9897 unsigned int num_frames_static = 2;
9899 DRM_DEBUG_DRIVER("Enabling psr...\n");
9901 vsync_rate_hz = div64_u64(div64_u64((
9902 stream->timing.pix_clk_100hz * 100),
9903 stream->timing.v_total),
9904 stream->timing.h_total);
9907 * Calculate number of frames such that at least 30 ms of time has
9910 if (vsync_rate_hz != 0) {
9911 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9912 num_frames_static = (30000 / frame_time_microsec) + 1;
9915 params.triggers.cursor_update = true;
9916 params.triggers.overlay_update = true;
9917 params.triggers.surface_update = true;
9918 params.num_frames = num_frames_static;
9920 dc_stream_set_static_screen_params(link->ctx->dc,
9924 return dc_link_set_psr_allow_active(link, true, false, false);
9928 * amdgpu_dm_psr_disable() - disable psr f/w
9929 * @stream: stream state
9931 * Return: true if success
9933 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9936 DRM_DEBUG_DRIVER("Disabling psr...\n");
9938 return dc_link_set_psr_allow_active(stream->link, false, true, false);
9942 * amdgpu_dm_psr_disable() - disable psr f/w
9943 * if psr is enabled on any stream
9945 * Return: true if success
9947 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9949 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9950 return dc_set_psr_allow_active(dm->dc, false);
9953 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9955 struct amdgpu_device *adev = drm_to_adev(dev);
9956 struct dc *dc = adev->dm.dc;
9959 mutex_lock(&adev->dm.dc_lock);
9960 if (dc->current_state) {
9961 for (i = 0; i < dc->current_state->stream_count; ++i)
9962 dc->current_state->streams[i]
9963 ->triggered_crtc_reset.enabled =
9964 adev->dm.force_timing_sync;
9966 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9967 dc_trigger_sync(dc, dc->current_state);
9969 mutex_unlock(&adev->dm.dc_lock);
9972 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
9973 uint32_t value, const char *func_name)
9975 #ifdef DM_CHECK_ADDR_0
9977 DC_ERR("invalid register write. address = 0");
9981 cgs_write_register(ctx->cgs_device, address, value);
9982 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
9985 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
9986 const char *func_name)
9989 #ifdef DM_CHECK_ADDR_0
9991 DC_ERR("invalid register read; address = 0\n");
9996 if (ctx->dmub_srv &&
9997 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
9998 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10003 value = cgs_read_register(ctx->cgs_device, address);
10005 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);