2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "amdgpu_dm_trace.h"
41 #include "amdgpu_display.h"
42 #include "amdgpu_ucode.h"
44 #include "amdgpu_dm.h"
45 #ifdef CONFIG_DRM_AMD_DC_HDCP
46 #include "amdgpu_dm_hdcp.h"
47 #include <drm/drm_hdcp.h>
49 #include "amdgpu_pm.h"
51 #include "amd_shared.h"
52 #include "amdgpu_dm_irq.h"
53 #include "dm_helpers.h"
54 #include "amdgpu_dm_mst_types.h"
55 #if defined(CONFIG_DEBUG_FS)
56 #include "amdgpu_dm_debugfs.h"
59 #include "ivsrcid/ivsrcid_vislands30.h"
61 #include <linux/module.h>
62 #include <linux/moduleparam.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
88 #include "soc15_common.h"
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
99 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
101 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
103 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
105 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
108 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
109 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
111 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
112 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
114 /* Number of bytes in PSP header for firmware. */
115 #define PSP_HEADER_BYTES 0x100
117 /* Number of bytes in PSP footer for firmware. */
118 #define PSP_FOOTER_BYTES 0x100
123 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
125 * requests into DC requests, and DC responses into DRM responses.
127 * The root control structure is &struct amdgpu_display_manager.
130 /* basic init/fini API */
131 static int amdgpu_dm_init(struct amdgpu_device *adev);
132 static void amdgpu_dm_fini(struct amdgpu_device *adev);
134 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
136 switch (link->dpcd_caps.dongle_type) {
137 case DISPLAY_DONGLE_NONE:
138 return DRM_MODE_SUBCONNECTOR_Native;
139 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
140 return DRM_MODE_SUBCONNECTOR_VGA;
141 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
142 case DISPLAY_DONGLE_DP_DVI_DONGLE:
143 return DRM_MODE_SUBCONNECTOR_DVID;
144 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
145 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
146 return DRM_MODE_SUBCONNECTOR_HDMIA;
147 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
149 return DRM_MODE_SUBCONNECTOR_Unknown;
153 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
155 struct dc_link *link = aconnector->dc_link;
156 struct drm_connector *connector = &aconnector->base;
157 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
159 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
162 if (aconnector->dc_sink)
163 subconnector = get_subconnector_type(link);
165 drm_object_property_set_value(&connector->base,
166 connector->dev->mode_config.dp_subconnector_property,
171 * initializes drm_device display related structures, based on the information
172 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
173 * drm_encoder, drm_mode_config
175 * Returns 0 on success
177 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
178 /* removes and deallocates the drm structures, created by the above function */
179 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
181 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
182 struct drm_plane *plane,
183 unsigned long possible_crtcs,
184 const struct dc_plane_cap *plane_cap);
185 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
186 struct drm_plane *plane,
187 uint32_t link_index);
188 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
189 struct amdgpu_dm_connector *amdgpu_dm_connector,
191 struct amdgpu_encoder *amdgpu_encoder);
192 static int amdgpu_dm_encoder_init(struct drm_device *dev,
193 struct amdgpu_encoder *aencoder,
194 uint32_t link_index);
196 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
198 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
200 static int amdgpu_dm_atomic_check(struct drm_device *dev,
201 struct drm_atomic_state *state);
203 static void handle_cursor_update(struct drm_plane *plane,
204 struct drm_plane_state *old_plane_state);
206 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
207 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
208 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
209 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
212 static const struct drm_format_info *
213 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
216 * dm_vblank_get_counter
219 * Get counter for number of vertical blanks
222 * struct amdgpu_device *adev - [in] desired amdgpu device
223 * int disp_idx - [in] which CRTC to get the counter from
226 * Counter for vertical blanks
228 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
230 if (crtc >= adev->mode_info.num_crtc)
233 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
235 if (acrtc->dm_irq_params.stream == NULL) {
236 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
241 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
245 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
246 u32 *vbl, u32 *position)
248 uint32_t v_blank_start, v_blank_end, h_position, v_position;
250 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
253 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
255 if (acrtc->dm_irq_params.stream == NULL) {
256 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
262 * TODO rework base driver to use values directly.
263 * for now parse it back into reg-format
265 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
271 *position = v_position | (h_position << 16);
272 *vbl = v_blank_start | (v_blank_end << 16);
278 static bool dm_is_idle(void *handle)
284 static int dm_wait_for_idle(void *handle)
290 static bool dm_check_soft_reset(void *handle)
295 static int dm_soft_reset(void *handle)
301 static struct amdgpu_crtc *
302 get_crtc_by_otg_inst(struct amdgpu_device *adev,
305 struct drm_device *dev = adev_to_drm(adev);
306 struct drm_crtc *crtc;
307 struct amdgpu_crtc *amdgpu_crtc;
309 if (otg_inst == -1) {
311 return adev->mode_info.crtcs[0];
314 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
315 amdgpu_crtc = to_amdgpu_crtc(crtc);
317 if (amdgpu_crtc->otg_inst == otg_inst)
324 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
326 return acrtc->dm_irq_params.freesync_config.state ==
327 VRR_STATE_ACTIVE_VARIABLE ||
328 acrtc->dm_irq_params.freesync_config.state ==
329 VRR_STATE_ACTIVE_FIXED;
332 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
334 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
335 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
339 * dm_pflip_high_irq() - Handle pageflip interrupt
340 * @interrupt_params: ignored
342 * Handles the pageflip interrupt by notifying all interested parties
343 * that the pageflip has been completed.
345 static void dm_pflip_high_irq(void *interrupt_params)
347 struct amdgpu_crtc *amdgpu_crtc;
348 struct common_irq_params *irq_params = interrupt_params;
349 struct amdgpu_device *adev = irq_params->adev;
351 struct drm_pending_vblank_event *e;
352 uint32_t vpos, hpos, v_blank_start, v_blank_end;
355 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
357 /* IRQ could occur when in initial stage */
358 /* TODO work and BO cleanup */
359 if (amdgpu_crtc == NULL) {
360 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
364 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
366 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
367 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
368 amdgpu_crtc->pflip_status,
369 AMDGPU_FLIP_SUBMITTED,
370 amdgpu_crtc->crtc_id,
372 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
376 /* page flip completed. */
377 e = amdgpu_crtc->event;
378 amdgpu_crtc->event = NULL;
383 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
385 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
387 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
388 &v_blank_end, &hpos, &vpos) ||
389 (vpos < v_blank_start)) {
390 /* Update to correct count and vblank timestamp if racing with
391 * vblank irq. This also updates to the correct vblank timestamp
392 * even in VRR mode, as scanout is past the front-porch atm.
394 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
396 /* Wake up userspace by sending the pageflip event with proper
397 * count and timestamp of vblank of flip completion.
400 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
402 /* Event sent, so done with vblank for this flip */
403 drm_crtc_vblank_put(&amdgpu_crtc->base);
406 /* VRR active and inside front-porch: vblank count and
407 * timestamp for pageflip event will only be up to date after
408 * drm_crtc_handle_vblank() has been executed from late vblank
409 * irq handler after start of back-porch (vline 0). We queue the
410 * pageflip event for send-out by drm_crtc_handle_vblank() with
411 * updated timestamp and count, once it runs after us.
413 * We need to open-code this instead of using the helper
414 * drm_crtc_arm_vblank_event(), as that helper would
415 * call drm_crtc_accurate_vblank_count(), which we must
416 * not call in VRR mode while we are in front-porch!
419 /* sequence will be replaced by real count during send-out. */
420 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
421 e->pipe = amdgpu_crtc->crtc_id;
423 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
427 /* Keep track of vblank of this flip for flip throttling. We use the
428 * cooked hw counter, as that one incremented at start of this vblank
429 * of pageflip completion, so last_flip_vblank is the forbidden count
430 * for queueing new pageflips if vsync + VRR is enabled.
432 amdgpu_crtc->dm_irq_params.last_flip_vblank =
433 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
435 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
436 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
438 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
439 amdgpu_crtc->crtc_id, amdgpu_crtc,
440 vrr_active, (int) !e);
443 static void dm_vupdate_high_irq(void *interrupt_params)
445 struct common_irq_params *irq_params = interrupt_params;
446 struct amdgpu_device *adev = irq_params->adev;
447 struct amdgpu_crtc *acrtc;
451 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
454 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
456 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
460 /* Core vblank handling is done here after end of front-porch in
461 * vrr mode, as vblank timestamping will give valid results
462 * while now done after front-porch. This will also deliver
463 * page-flip completion events that have been queued to us
464 * if a pageflip happened inside front-porch.
467 drm_crtc_handle_vblank(&acrtc->base);
469 /* BTR processing for pre-DCE12 ASICs */
470 if (acrtc->dm_irq_params.stream &&
471 adev->family < AMDGPU_FAMILY_AI) {
472 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
473 mod_freesync_handle_v_update(
474 adev->dm.freesync_module,
475 acrtc->dm_irq_params.stream,
476 &acrtc->dm_irq_params.vrr_params);
478 dc_stream_adjust_vmin_vmax(
480 acrtc->dm_irq_params.stream,
481 &acrtc->dm_irq_params.vrr_params.adjust);
482 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
489 * dm_crtc_high_irq() - Handles CRTC interrupt
490 * @interrupt_params: used for determining the CRTC instance
492 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
495 static void dm_crtc_high_irq(void *interrupt_params)
497 struct common_irq_params *irq_params = interrupt_params;
498 struct amdgpu_device *adev = irq_params->adev;
499 struct amdgpu_crtc *acrtc;
503 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
507 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
509 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
510 vrr_active, acrtc->dm_irq_params.active_planes);
513 * Core vblank handling at start of front-porch is only possible
514 * in non-vrr mode, as only there vblank timestamping will give
515 * valid results while done in front-porch. Otherwise defer it
516 * to dm_vupdate_high_irq after end of front-porch.
519 drm_crtc_handle_vblank(&acrtc->base);
522 * Following stuff must happen at start of vblank, for crc
523 * computation and below-the-range btr support in vrr mode.
525 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
527 /* BTR updates need to happen before VUPDATE on Vega and above. */
528 if (adev->family < AMDGPU_FAMILY_AI)
531 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
533 if (acrtc->dm_irq_params.stream &&
534 acrtc->dm_irq_params.vrr_params.supported &&
535 acrtc->dm_irq_params.freesync_config.state ==
536 VRR_STATE_ACTIVE_VARIABLE) {
537 mod_freesync_handle_v_update(adev->dm.freesync_module,
538 acrtc->dm_irq_params.stream,
539 &acrtc->dm_irq_params.vrr_params);
541 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
542 &acrtc->dm_irq_params.vrr_params.adjust);
546 * If there aren't any active_planes then DCH HUBP may be clock-gated.
547 * In that case, pageflip completion interrupts won't fire and pageflip
548 * completion events won't get delivered. Prevent this by sending
549 * pending pageflip events from here if a flip is still pending.
551 * If any planes are enabled, use dm_pflip_high_irq() instead, to
552 * avoid race conditions between flip programming and completion,
553 * which could cause too early flip completion events.
555 if (adev->family >= AMDGPU_FAMILY_RV &&
556 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
557 acrtc->dm_irq_params.active_planes == 0) {
559 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
561 drm_crtc_vblank_put(&acrtc->base);
563 acrtc->pflip_status = AMDGPU_FLIP_NONE;
566 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
569 static int dm_set_clockgating_state(void *handle,
570 enum amd_clockgating_state state)
575 static int dm_set_powergating_state(void *handle,
576 enum amd_powergating_state state)
581 /* Prototypes of private functions */
582 static int dm_early_init(void* handle);
584 /* Allocate memory for FBC compressed data */
585 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
587 struct drm_device *dev = connector->dev;
588 struct amdgpu_device *adev = drm_to_adev(dev);
589 struct dm_compressor_info *compressor = &adev->dm.compressor;
590 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
591 struct drm_display_mode *mode;
592 unsigned long max_size = 0;
594 if (adev->dm.dc->fbc_compressor == NULL)
597 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
600 if (compressor->bo_ptr)
604 list_for_each_entry(mode, &connector->modes, head) {
605 if (max_size < mode->htotal * mode->vtotal)
606 max_size = mode->htotal * mode->vtotal;
610 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
611 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
612 &compressor->gpu_addr, &compressor->cpu_addr);
615 DRM_ERROR("DM: Failed to initialize FBC\n");
617 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
618 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
625 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
626 int pipe, bool *enabled,
627 unsigned char *buf, int max_bytes)
629 struct drm_device *dev = dev_get_drvdata(kdev);
630 struct amdgpu_device *adev = drm_to_adev(dev);
631 struct drm_connector *connector;
632 struct drm_connector_list_iter conn_iter;
633 struct amdgpu_dm_connector *aconnector;
638 mutex_lock(&adev->dm.audio_lock);
640 drm_connector_list_iter_begin(dev, &conn_iter);
641 drm_for_each_connector_iter(connector, &conn_iter) {
642 aconnector = to_amdgpu_dm_connector(connector);
643 if (aconnector->audio_inst != port)
647 ret = drm_eld_size(connector->eld);
648 memcpy(buf, connector->eld, min(max_bytes, ret));
652 drm_connector_list_iter_end(&conn_iter);
654 mutex_unlock(&adev->dm.audio_lock);
656 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
661 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
662 .get_eld = amdgpu_dm_audio_component_get_eld,
665 static int amdgpu_dm_audio_component_bind(struct device *kdev,
666 struct device *hda_kdev, void *data)
668 struct drm_device *dev = dev_get_drvdata(kdev);
669 struct amdgpu_device *adev = drm_to_adev(dev);
670 struct drm_audio_component *acomp = data;
672 acomp->ops = &amdgpu_dm_audio_component_ops;
674 adev->dm.audio_component = acomp;
679 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
680 struct device *hda_kdev, void *data)
682 struct drm_device *dev = dev_get_drvdata(kdev);
683 struct amdgpu_device *adev = drm_to_adev(dev);
684 struct drm_audio_component *acomp = data;
688 adev->dm.audio_component = NULL;
691 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
692 .bind = amdgpu_dm_audio_component_bind,
693 .unbind = amdgpu_dm_audio_component_unbind,
696 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
703 adev->mode_info.audio.enabled = true;
705 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
707 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
708 adev->mode_info.audio.pin[i].channels = -1;
709 adev->mode_info.audio.pin[i].rate = -1;
710 adev->mode_info.audio.pin[i].bits_per_sample = -1;
711 adev->mode_info.audio.pin[i].status_bits = 0;
712 adev->mode_info.audio.pin[i].category_code = 0;
713 adev->mode_info.audio.pin[i].connected = false;
714 adev->mode_info.audio.pin[i].id =
715 adev->dm.dc->res_pool->audios[i]->inst;
716 adev->mode_info.audio.pin[i].offset = 0;
719 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
723 adev->dm.audio_registered = true;
728 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
733 if (!adev->mode_info.audio.enabled)
736 if (adev->dm.audio_registered) {
737 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
738 adev->dm.audio_registered = false;
741 /* TODO: Disable audio? */
743 adev->mode_info.audio.enabled = false;
746 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
748 struct drm_audio_component *acomp = adev->dm.audio_component;
750 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
751 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
753 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
758 static int dm_dmub_hw_init(struct amdgpu_device *adev)
760 const struct dmcub_firmware_header_v1_0 *hdr;
761 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
762 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
763 const struct firmware *dmub_fw = adev->dm.dmub_fw;
764 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
765 struct abm *abm = adev->dm.dc->res_pool->abm;
766 struct dmub_srv_hw_params hw_params;
767 enum dmub_status status;
768 const unsigned char *fw_inst_const, *fw_bss_data;
769 uint32_t i, fw_inst_const_size, fw_bss_data_size;
773 /* DMUB isn't supported on the ASIC. */
777 DRM_ERROR("No framebuffer info for DMUB service.\n");
782 /* Firmware required for DMUB support. */
783 DRM_ERROR("No firmware provided for DMUB.\n");
787 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
788 if (status != DMUB_STATUS_OK) {
789 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
793 if (!has_hw_support) {
794 DRM_INFO("DMUB unsupported on ASIC\n");
798 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
800 fw_inst_const = dmub_fw->data +
801 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
804 fw_bss_data = dmub_fw->data +
805 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
806 le32_to_cpu(hdr->inst_const_bytes);
808 /* Copy firmware and bios info into FB memory. */
809 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
810 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
812 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
814 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
815 * amdgpu_ucode_init_single_fw will load dmub firmware
816 * fw_inst_const part to cw0; otherwise, the firmware back door load
817 * will be done by dm_dmub_hw_init
819 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
820 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
824 if (fw_bss_data_size)
825 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
826 fw_bss_data, fw_bss_data_size);
828 /* Copy firmware bios info into FB memory. */
829 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
832 /* Reset regions that need to be reset. */
833 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
834 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
836 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
837 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
839 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
840 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
842 /* Initialize hardware. */
843 memset(&hw_params, 0, sizeof(hw_params));
844 hw_params.fb_base = adev->gmc.fb_start;
845 hw_params.fb_offset = adev->gmc.aper_base;
847 /* backdoor load firmware and trigger dmub running */
848 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
849 hw_params.load_inst_const = true;
852 hw_params.psp_version = dmcu->psp_version;
854 for (i = 0; i < fb_info->num_fb; ++i)
855 hw_params.fb[i] = &fb_info->fb[i];
857 status = dmub_srv_hw_init(dmub_srv, &hw_params);
858 if (status != DMUB_STATUS_OK) {
859 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
863 /* Wait for firmware load to finish. */
864 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
865 if (status != DMUB_STATUS_OK)
866 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
868 /* Init DMCU and ABM if available. */
870 dmcu->funcs->dmcu_init(dmcu);
871 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
874 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
875 if (!adev->dm.dc->ctx->dmub_srv) {
876 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
880 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
881 adev->dm.dmcub_fw_version);
886 #if defined(CONFIG_DRM_AMD_DC_DCN)
887 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
890 uint32_t logical_addr_low;
891 uint32_t logical_addr_high;
892 uint32_t agp_base, agp_bot, agp_top;
893 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
895 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
896 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
898 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
900 * Raven2 has a HW issue that it is unable to use the vram which
901 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
902 * workaround that increase system aperture high address (add 1)
903 * to get rid of the VM fault and hardware hang.
905 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
907 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
910 agp_bot = adev->gmc.agp_start >> 24;
911 agp_top = adev->gmc.agp_end >> 24;
914 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
915 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
916 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
917 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
918 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
919 page_table_base.low_part = lower_32_bits(pt_base);
921 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
922 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
924 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
925 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
926 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
928 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
929 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
930 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
932 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
933 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
934 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
936 pa_config->is_hvm_enabled = 0;
941 static int amdgpu_dm_init(struct amdgpu_device *adev)
943 struct dc_init_data init_data;
944 #ifdef CONFIG_DRM_AMD_DC_HDCP
945 struct dc_callback_init init_params;
949 adev->dm.ddev = adev_to_drm(adev);
950 adev->dm.adev = adev;
952 /* Zero all the fields */
953 memset(&init_data, 0, sizeof(init_data));
954 #ifdef CONFIG_DRM_AMD_DC_HDCP
955 memset(&init_params, 0, sizeof(init_params));
958 mutex_init(&adev->dm.dc_lock);
959 mutex_init(&adev->dm.audio_lock);
961 if(amdgpu_dm_irq_init(adev)) {
962 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
966 init_data.asic_id.chip_family = adev->family;
968 init_data.asic_id.pci_revision_id = adev->pdev->revision;
969 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
971 init_data.asic_id.vram_width = adev->gmc.vram_width;
972 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
973 init_data.asic_id.atombios_base_address =
974 adev->mode_info.atom_context->bios;
976 init_data.driver = adev;
978 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
980 if (!adev->dm.cgs_device) {
981 DRM_ERROR("amdgpu: failed to create cgs device.\n");
985 init_data.cgs_device = adev->dm.cgs_device;
987 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
989 switch (adev->asic_type) {
994 init_data.flags.gpu_vm_support = true;
995 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
996 init_data.flags.disable_dmcu = true;
998 #if defined(CONFIG_DRM_AMD_DC_DCN)
1000 init_data.flags.gpu_vm_support = true;
1007 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1008 init_data.flags.fbc_support = true;
1010 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1011 init_data.flags.multi_mon_pp_mclk_switch = true;
1013 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1014 init_data.flags.disable_fractional_pwm = true;
1016 init_data.flags.power_down_display_on_boot = true;
1018 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
1020 /* Display Core create. */
1021 adev->dm.dc = dc_create(&init_data);
1024 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1026 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1030 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1031 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1032 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1035 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1036 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1038 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1039 adev->dm.dc->debug.disable_stutter = true;
1041 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1042 adev->dm.dc->debug.disable_dsc = true;
1044 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1045 adev->dm.dc->debug.disable_clock_gate = true;
1047 r = dm_dmub_hw_init(adev);
1049 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1053 dc_hardware_init(adev->dm.dc);
1055 #if defined(CONFIG_DRM_AMD_DC_DCN)
1056 if (adev->apu_flags) {
1057 struct dc_phy_addr_space_config pa_config;
1059 mmhub_read_system_context(adev, &pa_config);
1061 // Call the DC init_memory func
1062 dc_setup_system_context(adev->dm.dc, &pa_config);
1066 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1067 if (!adev->dm.freesync_module) {
1069 "amdgpu: failed to initialize freesync_module.\n");
1071 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1072 adev->dm.freesync_module);
1074 amdgpu_dm_init_color_mod();
1076 #ifdef CONFIG_DRM_AMD_DC_HDCP
1077 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1078 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1080 if (!adev->dm.hdcp_workqueue)
1081 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1083 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1085 dc_init_callbacks(adev->dm.dc, &init_params);
1088 if (amdgpu_dm_initialize_drm_device(adev)) {
1090 "amdgpu: failed to initialize sw for display support.\n");
1094 /* create fake encoders for MST */
1095 dm_dp_create_fake_mst_encoders(adev);
1097 /* TODO: Add_display_info? */
1099 /* TODO use dynamic cursor width */
1100 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1101 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1103 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1105 "amdgpu: failed to initialize sw for display support.\n");
1110 DRM_DEBUG_DRIVER("KMS initialized.\n");
1114 amdgpu_dm_fini(adev);
1119 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1123 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1124 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1127 amdgpu_dm_audio_fini(adev);
1129 amdgpu_dm_destroy_drm_device(&adev->dm);
1131 #ifdef CONFIG_DRM_AMD_DC_HDCP
1132 if (adev->dm.hdcp_workqueue) {
1133 hdcp_destroy(adev->dm.hdcp_workqueue);
1134 adev->dm.hdcp_workqueue = NULL;
1138 dc_deinit_callbacks(adev->dm.dc);
1140 if (adev->dm.dc->ctx->dmub_srv) {
1141 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1142 adev->dm.dc->ctx->dmub_srv = NULL;
1145 if (adev->dm.dmub_bo)
1146 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1147 &adev->dm.dmub_bo_gpu_addr,
1148 &adev->dm.dmub_bo_cpu_addr);
1150 /* DC Destroy TODO: Replace destroy DAL */
1152 dc_destroy(&adev->dm.dc);
1154 * TODO: pageflip, vlank interrupt
1156 * amdgpu_dm_irq_fini(adev);
1159 if (adev->dm.cgs_device) {
1160 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1161 adev->dm.cgs_device = NULL;
1163 if (adev->dm.freesync_module) {
1164 mod_freesync_destroy(adev->dm.freesync_module);
1165 adev->dm.freesync_module = NULL;
1168 mutex_destroy(&adev->dm.audio_lock);
1169 mutex_destroy(&adev->dm.dc_lock);
1174 static int load_dmcu_fw(struct amdgpu_device *adev)
1176 const char *fw_name_dmcu = NULL;
1178 const struct dmcu_firmware_header_v1_0 *hdr;
1180 switch(adev->asic_type) {
1181 #if defined(CONFIG_DRM_AMD_DC_SI)
1196 case CHIP_POLARIS11:
1197 case CHIP_POLARIS10:
1198 case CHIP_POLARIS12:
1206 case CHIP_SIENNA_CICHLID:
1207 case CHIP_NAVY_FLOUNDER:
1208 case CHIP_DIMGREY_CAVEFISH:
1212 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1215 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1216 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1217 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1218 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1223 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1227 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1228 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1232 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1234 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1235 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1236 adev->dm.fw_dmcu = NULL;
1240 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1245 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1247 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1249 release_firmware(adev->dm.fw_dmcu);
1250 adev->dm.fw_dmcu = NULL;
1254 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1255 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1256 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1257 adev->firmware.fw_size +=
1258 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1260 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1261 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1262 adev->firmware.fw_size +=
1263 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1265 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1267 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1272 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1274 struct amdgpu_device *adev = ctx;
1276 return dm_read_reg(adev->dm.dc->ctx, address);
1279 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1282 struct amdgpu_device *adev = ctx;
1284 return dm_write_reg(adev->dm.dc->ctx, address, value);
1287 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1289 struct dmub_srv_create_params create_params;
1290 struct dmub_srv_region_params region_params;
1291 struct dmub_srv_region_info region_info;
1292 struct dmub_srv_fb_params fb_params;
1293 struct dmub_srv_fb_info *fb_info;
1294 struct dmub_srv *dmub_srv;
1295 const struct dmcub_firmware_header_v1_0 *hdr;
1296 const char *fw_name_dmub;
1297 enum dmub_asic dmub_asic;
1298 enum dmub_status status;
1301 switch (adev->asic_type) {
1303 dmub_asic = DMUB_ASIC_DCN21;
1304 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1305 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1306 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1308 case CHIP_SIENNA_CICHLID:
1309 dmub_asic = DMUB_ASIC_DCN30;
1310 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1312 case CHIP_NAVY_FLOUNDER:
1313 dmub_asic = DMUB_ASIC_DCN30;
1314 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1317 dmub_asic = DMUB_ASIC_DCN301;
1318 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1320 case CHIP_DIMGREY_CAVEFISH:
1321 dmub_asic = DMUB_ASIC_DCN302;
1322 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1326 /* ASIC doesn't support DMUB. */
1330 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1332 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1336 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1338 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1342 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1344 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1345 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1346 AMDGPU_UCODE_ID_DMCUB;
1347 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1349 adev->firmware.fw_size +=
1350 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1352 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1353 adev->dm.dmcub_fw_version);
1356 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1358 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1359 dmub_srv = adev->dm.dmub_srv;
1362 DRM_ERROR("Failed to allocate DMUB service!\n");
1366 memset(&create_params, 0, sizeof(create_params));
1367 create_params.user_ctx = adev;
1368 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1369 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1370 create_params.asic = dmub_asic;
1372 /* Create the DMUB service. */
1373 status = dmub_srv_create(dmub_srv, &create_params);
1374 if (status != DMUB_STATUS_OK) {
1375 DRM_ERROR("Error creating DMUB service: %d\n", status);
1379 /* Calculate the size of all the regions for the DMUB service. */
1380 memset(®ion_params, 0, sizeof(region_params));
1382 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1383 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1384 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1385 region_params.vbios_size = adev->bios_size;
1386 region_params.fw_bss_data = region_params.bss_data_size ?
1387 adev->dm.dmub_fw->data +
1388 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1389 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1390 region_params.fw_inst_const =
1391 adev->dm.dmub_fw->data +
1392 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1395 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1398 if (status != DMUB_STATUS_OK) {
1399 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1404 * Allocate a framebuffer based on the total size of all the regions.
1405 * TODO: Move this into GART.
1407 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1408 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1409 &adev->dm.dmub_bo_gpu_addr,
1410 &adev->dm.dmub_bo_cpu_addr);
1414 /* Rebase the regions on the framebuffer address. */
1415 memset(&fb_params, 0, sizeof(fb_params));
1416 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1417 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1418 fb_params.region_info = ®ion_info;
1420 adev->dm.dmub_fb_info =
1421 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1422 fb_info = adev->dm.dmub_fb_info;
1426 "Failed to allocate framebuffer info for DMUB service!\n");
1430 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1431 if (status != DMUB_STATUS_OK) {
1432 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1439 static int dm_sw_init(void *handle)
1441 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1444 r = dm_dmub_sw_init(adev);
1448 return load_dmcu_fw(adev);
1451 static int dm_sw_fini(void *handle)
1453 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1455 kfree(adev->dm.dmub_fb_info);
1456 adev->dm.dmub_fb_info = NULL;
1458 if (adev->dm.dmub_srv) {
1459 dmub_srv_destroy(adev->dm.dmub_srv);
1460 adev->dm.dmub_srv = NULL;
1463 release_firmware(adev->dm.dmub_fw);
1464 adev->dm.dmub_fw = NULL;
1466 release_firmware(adev->dm.fw_dmcu);
1467 adev->dm.fw_dmcu = NULL;
1472 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1474 struct amdgpu_dm_connector *aconnector;
1475 struct drm_connector *connector;
1476 struct drm_connector_list_iter iter;
1479 drm_connector_list_iter_begin(dev, &iter);
1480 drm_for_each_connector_iter(connector, &iter) {
1481 aconnector = to_amdgpu_dm_connector(connector);
1482 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1483 aconnector->mst_mgr.aux) {
1484 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1486 aconnector->base.base.id);
1488 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1490 DRM_ERROR("DM_MST: Failed to start MST\n");
1491 aconnector->dc_link->type =
1492 dc_connection_single;
1497 drm_connector_list_iter_end(&iter);
1502 static int dm_late_init(void *handle)
1504 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1506 struct dmcu_iram_parameters params;
1507 unsigned int linear_lut[16];
1509 struct dmcu *dmcu = NULL;
1512 dmcu = adev->dm.dc->res_pool->dmcu;
1514 for (i = 0; i < 16; i++)
1515 linear_lut[i] = 0xFFFF * i / 15;
1518 params.backlight_ramping_start = 0xCCCC;
1519 params.backlight_ramping_reduction = 0xCCCCCCCC;
1520 params.backlight_lut_array_size = 16;
1521 params.backlight_lut_array = linear_lut;
1523 /* Min backlight level after ABM reduction, Don't allow below 1%
1524 * 0xFFFF x 0.01 = 0x28F
1526 params.min_abm_backlight = 0x28F;
1528 /* In the case where abm is implemented on dmcub,
1529 * dmcu object will be null.
1530 * ABM 2.4 and up are implemented on dmcub.
1533 ret = dmcu_load_iram(dmcu, params);
1534 else if (adev->dm.dc->ctx->dmub_srv)
1535 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1540 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1543 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1545 struct amdgpu_dm_connector *aconnector;
1546 struct drm_connector *connector;
1547 struct drm_connector_list_iter iter;
1548 struct drm_dp_mst_topology_mgr *mgr;
1550 bool need_hotplug = false;
1552 drm_connector_list_iter_begin(dev, &iter);
1553 drm_for_each_connector_iter(connector, &iter) {
1554 aconnector = to_amdgpu_dm_connector(connector);
1555 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1556 aconnector->mst_port)
1559 mgr = &aconnector->mst_mgr;
1562 drm_dp_mst_topology_mgr_suspend(mgr);
1564 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1566 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1567 need_hotplug = true;
1571 drm_connector_list_iter_end(&iter);
1574 drm_kms_helper_hotplug_event(dev);
1577 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1579 struct smu_context *smu = &adev->smu;
1582 if (!is_support_sw_smu(adev))
1585 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1586 * on window driver dc implementation.
1587 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1588 * should be passed to smu during boot up and resume from s3.
1589 * boot up: dc calculate dcn watermark clock settings within dc_create,
1590 * dcn20_resource_construct
1591 * then call pplib functions below to pass the settings to smu:
1592 * smu_set_watermarks_for_clock_ranges
1593 * smu_set_watermarks_table
1594 * navi10_set_watermarks_table
1595 * smu_write_watermarks_table
1597 * For Renoir, clock settings of dcn watermark are also fixed values.
1598 * dc has implemented different flow for window driver:
1599 * dc_hardware_init / dc_set_power_state
1604 * smu_set_watermarks_for_clock_ranges
1605 * renoir_set_watermarks_table
1606 * smu_write_watermarks_table
1609 * dc_hardware_init -> amdgpu_dm_init
1610 * dc_set_power_state --> dm_resume
1612 * therefore, this function apply to navi10/12/14 but not Renoir
1615 switch(adev->asic_type) {
1624 ret = smu_write_watermarks_table(smu);
1626 DRM_ERROR("Failed to update WMTABLE!\n");
1634 * dm_hw_init() - Initialize DC device
1635 * @handle: The base driver device containing the amdgpu_dm device.
1637 * Initialize the &struct amdgpu_display_manager device. This involves calling
1638 * the initializers of each DM component, then populating the struct with them.
1640 * Although the function implies hardware initialization, both hardware and
1641 * software are initialized here. Splitting them out to their relevant init
1642 * hooks is a future TODO item.
1644 * Some notable things that are initialized here:
1646 * - Display Core, both software and hardware
1647 * - DC modules that we need (freesync and color management)
1648 * - DRM software states
1649 * - Interrupt sources and handlers
1651 * - Debug FS entries, if enabled
1653 static int dm_hw_init(void *handle)
1655 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1656 /* Create DAL display manager */
1657 amdgpu_dm_init(adev);
1658 amdgpu_dm_hpd_init(adev);
1664 * dm_hw_fini() - Teardown DC device
1665 * @handle: The base driver device containing the amdgpu_dm device.
1667 * Teardown components within &struct amdgpu_display_manager that require
1668 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1669 * were loaded. Also flush IRQ workqueues and disable them.
1671 static int dm_hw_fini(void *handle)
1673 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1675 amdgpu_dm_hpd_fini(adev);
1677 amdgpu_dm_irq_fini(adev);
1678 amdgpu_dm_fini(adev);
1683 static int dm_enable_vblank(struct drm_crtc *crtc);
1684 static void dm_disable_vblank(struct drm_crtc *crtc);
1686 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1687 struct dc_state *state, bool enable)
1689 enum dc_irq_source irq_source;
1690 struct amdgpu_crtc *acrtc;
1694 for (i = 0; i < state->stream_count; i++) {
1695 acrtc = get_crtc_by_otg_inst(
1696 adev, state->stream_status[i].primary_otg_inst);
1698 if (acrtc && state->stream_status[i].plane_count != 0) {
1699 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1700 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1701 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1702 acrtc->crtc_id, enable ? "en" : "dis", rc);
1704 DRM_WARN("Failed to %s pflip interrupts\n",
1705 enable ? "enable" : "disable");
1708 rc = dm_enable_vblank(&acrtc->base);
1710 DRM_WARN("Failed to enable vblank interrupts\n");
1712 dm_disable_vblank(&acrtc->base);
1720 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1722 struct dc_state *context = NULL;
1723 enum dc_status res = DC_ERROR_UNEXPECTED;
1725 struct dc_stream_state *del_streams[MAX_PIPES];
1726 int del_streams_count = 0;
1728 memset(del_streams, 0, sizeof(del_streams));
1730 context = dc_create_state(dc);
1731 if (context == NULL)
1732 goto context_alloc_fail;
1734 dc_resource_state_copy_construct_current(dc, context);
1736 /* First remove from context all streams */
1737 for (i = 0; i < context->stream_count; i++) {
1738 struct dc_stream_state *stream = context->streams[i];
1740 del_streams[del_streams_count++] = stream;
1743 /* Remove all planes for removed streams and then remove the streams */
1744 for (i = 0; i < del_streams_count; i++) {
1745 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1746 res = DC_FAIL_DETACH_SURFACES;
1750 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1756 res = dc_validate_global_state(dc, context, false);
1759 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1763 res = dc_commit_state(dc, context);
1766 dc_release_state(context);
1772 static int dm_suspend(void *handle)
1774 struct amdgpu_device *adev = handle;
1775 struct amdgpu_display_manager *dm = &adev->dm;
1778 if (amdgpu_in_reset(adev)) {
1779 mutex_lock(&dm->dc_lock);
1781 #if defined(CONFIG_DRM_AMD_DC_DCN)
1782 dc_allow_idle_optimizations(adev->dm.dc, false);
1785 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1787 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1789 amdgpu_dm_commit_zero_streams(dm->dc);
1791 amdgpu_dm_irq_suspend(adev);
1796 WARN_ON(adev->dm.cached_state);
1797 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1799 s3_handle_mst(adev_to_drm(adev), true);
1801 amdgpu_dm_irq_suspend(adev);
1804 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1809 static struct amdgpu_dm_connector *
1810 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1811 struct drm_crtc *crtc)
1814 struct drm_connector_state *new_con_state;
1815 struct drm_connector *connector;
1816 struct drm_crtc *crtc_from_state;
1818 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1819 crtc_from_state = new_con_state->crtc;
1821 if (crtc_from_state == crtc)
1822 return to_amdgpu_dm_connector(connector);
1828 static void emulated_link_detect(struct dc_link *link)
1830 struct dc_sink_init_data sink_init_data = { 0 };
1831 struct display_sink_capability sink_caps = { 0 };
1832 enum dc_edid_status edid_status;
1833 struct dc_context *dc_ctx = link->ctx;
1834 struct dc_sink *sink = NULL;
1835 struct dc_sink *prev_sink = NULL;
1837 link->type = dc_connection_none;
1838 prev_sink = link->local_sink;
1840 if (prev_sink != NULL)
1841 dc_sink_retain(prev_sink);
1843 switch (link->connector_signal) {
1844 case SIGNAL_TYPE_HDMI_TYPE_A: {
1845 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1846 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1850 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1851 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1852 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1856 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1857 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1858 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1862 case SIGNAL_TYPE_LVDS: {
1863 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1864 sink_caps.signal = SIGNAL_TYPE_LVDS;
1868 case SIGNAL_TYPE_EDP: {
1869 sink_caps.transaction_type =
1870 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1871 sink_caps.signal = SIGNAL_TYPE_EDP;
1875 case SIGNAL_TYPE_DISPLAY_PORT: {
1876 sink_caps.transaction_type =
1877 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1878 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1883 DC_ERROR("Invalid connector type! signal:%d\n",
1884 link->connector_signal);
1888 sink_init_data.link = link;
1889 sink_init_data.sink_signal = sink_caps.signal;
1891 sink = dc_sink_create(&sink_init_data);
1893 DC_ERROR("Failed to create sink!\n");
1897 /* dc_sink_create returns a new reference */
1898 link->local_sink = sink;
1900 edid_status = dm_helpers_read_local_edid(
1905 if (edid_status != EDID_OK)
1906 DC_ERROR("Failed to read EDID");
1910 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1911 struct amdgpu_display_manager *dm)
1914 struct dc_surface_update surface_updates[MAX_SURFACES];
1915 struct dc_plane_info plane_infos[MAX_SURFACES];
1916 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1917 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1918 struct dc_stream_update stream_update;
1922 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1925 dm_error("Failed to allocate update bundle\n");
1929 for (k = 0; k < dc_state->stream_count; k++) {
1930 bundle->stream_update.stream = dc_state->streams[k];
1932 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1933 bundle->surface_updates[m].surface =
1934 dc_state->stream_status->plane_states[m];
1935 bundle->surface_updates[m].surface->force_full_update =
1938 dc_commit_updates_for_stream(
1939 dm->dc, bundle->surface_updates,
1940 dc_state->stream_status->plane_count,
1941 dc_state->streams[k], &bundle->stream_update, dc_state);
1950 static void dm_set_dpms_off(struct dc_link *link)
1952 struct dc_stream_state *stream_state;
1953 struct amdgpu_dm_connector *aconnector = link->priv;
1954 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
1955 struct dc_stream_update stream_update;
1956 bool dpms_off = true;
1958 memset(&stream_update, 0, sizeof(stream_update));
1959 stream_update.dpms_off = &dpms_off;
1961 mutex_lock(&adev->dm.dc_lock);
1962 stream_state = dc_stream_find_from_link(link);
1964 if (stream_state == NULL) {
1965 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
1966 mutex_unlock(&adev->dm.dc_lock);
1970 stream_update.stream = stream_state;
1971 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
1972 stream_state, &stream_update,
1973 stream_state->ctx->dc->current_state);
1974 mutex_unlock(&adev->dm.dc_lock);
1977 static int dm_resume(void *handle)
1979 struct amdgpu_device *adev = handle;
1980 struct drm_device *ddev = adev_to_drm(adev);
1981 struct amdgpu_display_manager *dm = &adev->dm;
1982 struct amdgpu_dm_connector *aconnector;
1983 struct drm_connector *connector;
1984 struct drm_connector_list_iter iter;
1985 struct drm_crtc *crtc;
1986 struct drm_crtc_state *new_crtc_state;
1987 struct dm_crtc_state *dm_new_crtc_state;
1988 struct drm_plane *plane;
1989 struct drm_plane_state *new_plane_state;
1990 struct dm_plane_state *dm_new_plane_state;
1991 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1992 enum dc_connection_type new_connection_type = dc_connection_none;
1993 struct dc_state *dc_state;
1996 if (amdgpu_in_reset(adev)) {
1997 dc_state = dm->cached_dc_state;
1999 r = dm_dmub_hw_init(adev);
2001 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2003 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2006 amdgpu_dm_irq_resume_early(adev);
2008 for (i = 0; i < dc_state->stream_count; i++) {
2009 dc_state->streams[i]->mode_changed = true;
2010 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2011 dc_state->stream_status->plane_states[j]->update_flags.raw
2016 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2018 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2020 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2022 dc_release_state(dm->cached_dc_state);
2023 dm->cached_dc_state = NULL;
2025 amdgpu_dm_irq_resume_late(adev);
2027 mutex_unlock(&dm->dc_lock);
2031 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2032 dc_release_state(dm_state->context);
2033 dm_state->context = dc_create_state(dm->dc);
2034 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2035 dc_resource_state_construct(dm->dc, dm_state->context);
2037 /* Before powering on DC we need to re-initialize DMUB. */
2038 r = dm_dmub_hw_init(adev);
2040 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2042 /* power on hardware */
2043 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2045 /* program HPD filter */
2049 * early enable HPD Rx IRQ, should be done before set mode as short
2050 * pulse interrupts are used for MST
2052 amdgpu_dm_irq_resume_early(adev);
2054 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2055 s3_handle_mst(ddev, false);
2058 drm_connector_list_iter_begin(ddev, &iter);
2059 drm_for_each_connector_iter(connector, &iter) {
2060 aconnector = to_amdgpu_dm_connector(connector);
2063 * this is the case when traversing through already created
2064 * MST connectors, should be skipped
2066 if (aconnector->mst_port)
2069 mutex_lock(&aconnector->hpd_lock);
2070 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2071 DRM_ERROR("KMS: Failed to detect connector\n");
2073 if (aconnector->base.force && new_connection_type == dc_connection_none)
2074 emulated_link_detect(aconnector->dc_link);
2076 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2078 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2079 aconnector->fake_enable = false;
2081 if (aconnector->dc_sink)
2082 dc_sink_release(aconnector->dc_sink);
2083 aconnector->dc_sink = NULL;
2084 amdgpu_dm_update_connector_after_detect(aconnector);
2085 mutex_unlock(&aconnector->hpd_lock);
2087 drm_connector_list_iter_end(&iter);
2089 /* Force mode set in atomic commit */
2090 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2091 new_crtc_state->active_changed = true;
2094 * atomic_check is expected to create the dc states. We need to release
2095 * them here, since they were duplicated as part of the suspend
2098 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2099 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2100 if (dm_new_crtc_state->stream) {
2101 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2102 dc_stream_release(dm_new_crtc_state->stream);
2103 dm_new_crtc_state->stream = NULL;
2107 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2108 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2109 if (dm_new_plane_state->dc_state) {
2110 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2111 dc_plane_state_release(dm_new_plane_state->dc_state);
2112 dm_new_plane_state->dc_state = NULL;
2116 drm_atomic_helper_resume(ddev, dm->cached_state);
2118 dm->cached_state = NULL;
2120 amdgpu_dm_irq_resume_late(adev);
2122 amdgpu_dm_smu_write_watermarks_table(adev);
2130 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2131 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2132 * the base driver's device list to be initialized and torn down accordingly.
2134 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2137 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2139 .early_init = dm_early_init,
2140 .late_init = dm_late_init,
2141 .sw_init = dm_sw_init,
2142 .sw_fini = dm_sw_fini,
2143 .hw_init = dm_hw_init,
2144 .hw_fini = dm_hw_fini,
2145 .suspend = dm_suspend,
2146 .resume = dm_resume,
2147 .is_idle = dm_is_idle,
2148 .wait_for_idle = dm_wait_for_idle,
2149 .check_soft_reset = dm_check_soft_reset,
2150 .soft_reset = dm_soft_reset,
2151 .set_clockgating_state = dm_set_clockgating_state,
2152 .set_powergating_state = dm_set_powergating_state,
2155 const struct amdgpu_ip_block_version dm_ip_block =
2157 .type = AMD_IP_BLOCK_TYPE_DCE,
2161 .funcs = &amdgpu_dm_funcs,
2171 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2172 .fb_create = amdgpu_display_user_framebuffer_create,
2173 .get_format_info = amd_get_format_info,
2174 .output_poll_changed = drm_fb_helper_output_poll_changed,
2175 .atomic_check = amdgpu_dm_atomic_check,
2176 .atomic_commit = drm_atomic_helper_commit,
2179 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2180 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2183 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2185 u32 max_cll, min_cll, max, min, q, r;
2186 struct amdgpu_dm_backlight_caps *caps;
2187 struct amdgpu_display_manager *dm;
2188 struct drm_connector *conn_base;
2189 struct amdgpu_device *adev;
2190 struct dc_link *link = NULL;
2191 static const u8 pre_computed_values[] = {
2192 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2193 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2195 if (!aconnector || !aconnector->dc_link)
2198 link = aconnector->dc_link;
2199 if (link->connector_signal != SIGNAL_TYPE_EDP)
2202 conn_base = &aconnector->base;
2203 adev = drm_to_adev(conn_base->dev);
2205 caps = &dm->backlight_caps;
2206 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2207 caps->aux_support = false;
2208 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2209 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2211 if (caps->ext_caps->bits.oled == 1 ||
2212 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2213 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2214 caps->aux_support = true;
2216 /* From the specification (CTA-861-G), for calculating the maximum
2217 * luminance we need to use:
2218 * Luminance = 50*2**(CV/32)
2219 * Where CV is a one-byte value.
2220 * For calculating this expression we may need float point precision;
2221 * to avoid this complexity level, we take advantage that CV is divided
2222 * by a constant. From the Euclids division algorithm, we know that CV
2223 * can be written as: CV = 32*q + r. Next, we replace CV in the
2224 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2225 * need to pre-compute the value of r/32. For pre-computing the values
2226 * We just used the following Ruby line:
2227 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2228 * The results of the above expressions can be verified at
2229 * pre_computed_values.
2233 max = (1 << q) * pre_computed_values[r];
2235 // min luminance: maxLum * (CV/255)^2 / 100
2236 q = DIV_ROUND_CLOSEST(min_cll, 255);
2237 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2239 caps->aux_max_input_signal = max;
2240 caps->aux_min_input_signal = min;
2243 void amdgpu_dm_update_connector_after_detect(
2244 struct amdgpu_dm_connector *aconnector)
2246 struct drm_connector *connector = &aconnector->base;
2247 struct drm_device *dev = connector->dev;
2248 struct dc_sink *sink;
2250 /* MST handled by drm_mst framework */
2251 if (aconnector->mst_mgr.mst_state == true)
2254 sink = aconnector->dc_link->local_sink;
2256 dc_sink_retain(sink);
2259 * Edid mgmt connector gets first update only in mode_valid hook and then
2260 * the connector sink is set to either fake or physical sink depends on link status.
2261 * Skip if already done during boot.
2263 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2264 && aconnector->dc_em_sink) {
2267 * For S3 resume with headless use eml_sink to fake stream
2268 * because on resume connector->sink is set to NULL
2270 mutex_lock(&dev->mode_config.mutex);
2273 if (aconnector->dc_sink) {
2274 amdgpu_dm_update_freesync_caps(connector, NULL);
2276 * retain and release below are used to
2277 * bump up refcount for sink because the link doesn't point
2278 * to it anymore after disconnect, so on next crtc to connector
2279 * reshuffle by UMD we will get into unwanted dc_sink release
2281 dc_sink_release(aconnector->dc_sink);
2283 aconnector->dc_sink = sink;
2284 dc_sink_retain(aconnector->dc_sink);
2285 amdgpu_dm_update_freesync_caps(connector,
2288 amdgpu_dm_update_freesync_caps(connector, NULL);
2289 if (!aconnector->dc_sink) {
2290 aconnector->dc_sink = aconnector->dc_em_sink;
2291 dc_sink_retain(aconnector->dc_sink);
2295 mutex_unlock(&dev->mode_config.mutex);
2298 dc_sink_release(sink);
2303 * TODO: temporary guard to look for proper fix
2304 * if this sink is MST sink, we should not do anything
2306 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2307 dc_sink_release(sink);
2311 if (aconnector->dc_sink == sink) {
2313 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2316 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2317 aconnector->connector_id);
2319 dc_sink_release(sink);
2323 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2324 aconnector->connector_id, aconnector->dc_sink, sink);
2326 mutex_lock(&dev->mode_config.mutex);
2329 * 1. Update status of the drm connector
2330 * 2. Send an event and let userspace tell us what to do
2334 * TODO: check if we still need the S3 mode update workaround.
2335 * If yes, put it here.
2337 if (aconnector->dc_sink)
2338 amdgpu_dm_update_freesync_caps(connector, NULL);
2340 aconnector->dc_sink = sink;
2341 dc_sink_retain(aconnector->dc_sink);
2342 if (sink->dc_edid.length == 0) {
2343 aconnector->edid = NULL;
2344 if (aconnector->dc_link->aux_mode) {
2345 drm_dp_cec_unset_edid(
2346 &aconnector->dm_dp_aux.aux);
2350 (struct edid *)sink->dc_edid.raw_edid;
2352 drm_connector_update_edid_property(connector,
2354 drm_add_edid_modes(connector, aconnector->edid);
2356 if (aconnector->dc_link->aux_mode)
2357 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2361 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2362 update_connector_ext_caps(aconnector);
2364 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2365 amdgpu_dm_update_freesync_caps(connector, NULL);
2366 drm_connector_update_edid_property(connector, NULL);
2367 aconnector->num_modes = 0;
2368 dc_sink_release(aconnector->dc_sink);
2369 aconnector->dc_sink = NULL;
2370 aconnector->edid = NULL;
2371 #ifdef CONFIG_DRM_AMD_DC_HDCP
2372 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2373 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2374 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2378 mutex_unlock(&dev->mode_config.mutex);
2380 update_subconnector_property(aconnector);
2383 dc_sink_release(sink);
2386 static void handle_hpd_irq(void *param)
2388 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2389 struct drm_connector *connector = &aconnector->base;
2390 struct drm_device *dev = connector->dev;
2391 enum dc_connection_type new_connection_type = dc_connection_none;
2392 #ifdef CONFIG_DRM_AMD_DC_HDCP
2393 struct amdgpu_device *adev = drm_to_adev(dev);
2394 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2398 * In case of failure or MST no need to update connector status or notify the OS
2399 * since (for MST case) MST does this in its own context.
2401 mutex_lock(&aconnector->hpd_lock);
2403 #ifdef CONFIG_DRM_AMD_DC_HDCP
2404 if (adev->dm.hdcp_workqueue) {
2405 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2406 dm_con_state->update_hdcp = true;
2409 if (aconnector->fake_enable)
2410 aconnector->fake_enable = false;
2412 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2413 DRM_ERROR("KMS: Failed to detect connector\n");
2415 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2416 emulated_link_detect(aconnector->dc_link);
2419 drm_modeset_lock_all(dev);
2420 dm_restore_drm_connector_state(dev, connector);
2421 drm_modeset_unlock_all(dev);
2423 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2424 drm_kms_helper_hotplug_event(dev);
2426 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2427 if (new_connection_type == dc_connection_none &&
2428 aconnector->dc_link->type == dc_connection_none)
2429 dm_set_dpms_off(aconnector->dc_link);
2431 amdgpu_dm_update_connector_after_detect(aconnector);
2433 drm_modeset_lock_all(dev);
2434 dm_restore_drm_connector_state(dev, connector);
2435 drm_modeset_unlock_all(dev);
2437 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2438 drm_kms_helper_hotplug_event(dev);
2440 mutex_unlock(&aconnector->hpd_lock);
2444 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2446 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2448 bool new_irq_handled = false;
2450 int dpcd_bytes_to_read;
2452 const int max_process_count = 30;
2453 int process_count = 0;
2455 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2457 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2458 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2459 /* DPCD 0x200 - 0x201 for downstream IRQ */
2460 dpcd_addr = DP_SINK_COUNT;
2462 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2463 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2464 dpcd_addr = DP_SINK_COUNT_ESI;
2467 dret = drm_dp_dpcd_read(
2468 &aconnector->dm_dp_aux.aux,
2471 dpcd_bytes_to_read);
2473 while (dret == dpcd_bytes_to_read &&
2474 process_count < max_process_count) {
2480 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2481 /* handle HPD short pulse irq */
2482 if (aconnector->mst_mgr.mst_state)
2484 &aconnector->mst_mgr,
2488 if (new_irq_handled) {
2489 /* ACK at DPCD to notify down stream */
2490 const int ack_dpcd_bytes_to_write =
2491 dpcd_bytes_to_read - 1;
2493 for (retry = 0; retry < 3; retry++) {
2496 wret = drm_dp_dpcd_write(
2497 &aconnector->dm_dp_aux.aux,
2500 ack_dpcd_bytes_to_write);
2501 if (wret == ack_dpcd_bytes_to_write)
2505 /* check if there is new irq to be handled */
2506 dret = drm_dp_dpcd_read(
2507 &aconnector->dm_dp_aux.aux,
2510 dpcd_bytes_to_read);
2512 new_irq_handled = false;
2518 if (process_count == max_process_count)
2519 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2522 static void handle_hpd_rx_irq(void *param)
2524 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2525 struct drm_connector *connector = &aconnector->base;
2526 struct drm_device *dev = connector->dev;
2527 struct dc_link *dc_link = aconnector->dc_link;
2528 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2529 bool result = false;
2530 enum dc_connection_type new_connection_type = dc_connection_none;
2531 struct amdgpu_device *adev = drm_to_adev(dev);
2532 union hpd_irq_data hpd_irq_data;
2534 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2537 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2538 * conflict, after implement i2c helper, this mutex should be
2541 if (dc_link->type != dc_connection_mst_branch)
2542 mutex_lock(&aconnector->hpd_lock);
2544 read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2546 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2547 (dc_link->type == dc_connection_mst_branch)) {
2548 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2550 dm_handle_hpd_rx_irq(aconnector);
2552 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2554 dm_handle_hpd_rx_irq(aconnector);
2559 mutex_lock(&adev->dm.dc_lock);
2560 #ifdef CONFIG_DRM_AMD_DC_HDCP
2561 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2563 result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2565 mutex_unlock(&adev->dm.dc_lock);
2568 if (result && !is_mst_root_connector) {
2569 /* Downstream Port status changed. */
2570 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2571 DRM_ERROR("KMS: Failed to detect connector\n");
2573 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2574 emulated_link_detect(dc_link);
2576 if (aconnector->fake_enable)
2577 aconnector->fake_enable = false;
2579 amdgpu_dm_update_connector_after_detect(aconnector);
2582 drm_modeset_lock_all(dev);
2583 dm_restore_drm_connector_state(dev, connector);
2584 drm_modeset_unlock_all(dev);
2586 drm_kms_helper_hotplug_event(dev);
2587 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2589 if (aconnector->fake_enable)
2590 aconnector->fake_enable = false;
2592 amdgpu_dm_update_connector_after_detect(aconnector);
2595 drm_modeset_lock_all(dev);
2596 dm_restore_drm_connector_state(dev, connector);
2597 drm_modeset_unlock_all(dev);
2599 drm_kms_helper_hotplug_event(dev);
2602 #ifdef CONFIG_DRM_AMD_DC_HDCP
2603 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2604 if (adev->dm.hdcp_workqueue)
2605 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2609 if (dc_link->type != dc_connection_mst_branch) {
2610 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2611 mutex_unlock(&aconnector->hpd_lock);
2615 static void register_hpd_handlers(struct amdgpu_device *adev)
2617 struct drm_device *dev = adev_to_drm(adev);
2618 struct drm_connector *connector;
2619 struct amdgpu_dm_connector *aconnector;
2620 const struct dc_link *dc_link;
2621 struct dc_interrupt_params int_params = {0};
2623 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2624 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2626 list_for_each_entry(connector,
2627 &dev->mode_config.connector_list, head) {
2629 aconnector = to_amdgpu_dm_connector(connector);
2630 dc_link = aconnector->dc_link;
2632 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2633 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2634 int_params.irq_source = dc_link->irq_source_hpd;
2636 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2638 (void *) aconnector);
2641 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2643 /* Also register for DP short pulse (hpd_rx). */
2644 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2645 int_params.irq_source = dc_link->irq_source_hpd_rx;
2647 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2649 (void *) aconnector);
2654 #if defined(CONFIG_DRM_AMD_DC_SI)
2655 /* Register IRQ sources and initialize IRQ callbacks */
2656 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2658 struct dc *dc = adev->dm.dc;
2659 struct common_irq_params *c_irq_params;
2660 struct dc_interrupt_params int_params = {0};
2663 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2665 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2666 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2669 * Actions of amdgpu_irq_add_id():
2670 * 1. Register a set() function with base driver.
2671 * Base driver will call set() function to enable/disable an
2672 * interrupt in DC hardware.
2673 * 2. Register amdgpu_dm_irq_handler().
2674 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2675 * coming from DC hardware.
2676 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2677 * for acknowledging and handling. */
2679 /* Use VBLANK interrupt */
2680 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2681 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2683 DRM_ERROR("Failed to add crtc irq id!\n");
2687 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2688 int_params.irq_source =
2689 dc_interrupt_to_irq_source(dc, i+1 , 0);
2691 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2693 c_irq_params->adev = adev;
2694 c_irq_params->irq_src = int_params.irq_source;
2696 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2697 dm_crtc_high_irq, c_irq_params);
2700 /* Use GRPH_PFLIP interrupt */
2701 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2702 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2703 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2705 DRM_ERROR("Failed to add page flip irq id!\n");
2709 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2710 int_params.irq_source =
2711 dc_interrupt_to_irq_source(dc, i, 0);
2713 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2715 c_irq_params->adev = adev;
2716 c_irq_params->irq_src = int_params.irq_source;
2718 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2719 dm_pflip_high_irq, c_irq_params);
2724 r = amdgpu_irq_add_id(adev, client_id,
2725 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2727 DRM_ERROR("Failed to add hpd irq id!\n");
2731 register_hpd_handlers(adev);
2737 /* Register IRQ sources and initialize IRQ callbacks */
2738 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2740 struct dc *dc = adev->dm.dc;
2741 struct common_irq_params *c_irq_params;
2742 struct dc_interrupt_params int_params = {0};
2745 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2747 if (adev->asic_type >= CHIP_VEGA10)
2748 client_id = SOC15_IH_CLIENTID_DCE;
2750 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2751 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2754 * Actions of amdgpu_irq_add_id():
2755 * 1. Register a set() function with base driver.
2756 * Base driver will call set() function to enable/disable an
2757 * interrupt in DC hardware.
2758 * 2. Register amdgpu_dm_irq_handler().
2759 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2760 * coming from DC hardware.
2761 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2762 * for acknowledging and handling. */
2764 /* Use VBLANK interrupt */
2765 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2766 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2768 DRM_ERROR("Failed to add crtc irq id!\n");
2772 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2773 int_params.irq_source =
2774 dc_interrupt_to_irq_source(dc, i, 0);
2776 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2778 c_irq_params->adev = adev;
2779 c_irq_params->irq_src = int_params.irq_source;
2781 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2782 dm_crtc_high_irq, c_irq_params);
2785 /* Use VUPDATE interrupt */
2786 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2787 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2789 DRM_ERROR("Failed to add vupdate irq id!\n");
2793 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2794 int_params.irq_source =
2795 dc_interrupt_to_irq_source(dc, i, 0);
2797 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2799 c_irq_params->adev = adev;
2800 c_irq_params->irq_src = int_params.irq_source;
2802 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2803 dm_vupdate_high_irq, c_irq_params);
2806 /* Use GRPH_PFLIP interrupt */
2807 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2808 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2809 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2811 DRM_ERROR("Failed to add page flip irq id!\n");
2815 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2816 int_params.irq_source =
2817 dc_interrupt_to_irq_source(dc, i, 0);
2819 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2821 c_irq_params->adev = adev;
2822 c_irq_params->irq_src = int_params.irq_source;
2824 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2825 dm_pflip_high_irq, c_irq_params);
2830 r = amdgpu_irq_add_id(adev, client_id,
2831 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2833 DRM_ERROR("Failed to add hpd irq id!\n");
2837 register_hpd_handlers(adev);
2842 #if defined(CONFIG_DRM_AMD_DC_DCN)
2843 /* Register IRQ sources and initialize IRQ callbacks */
2844 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2846 struct dc *dc = adev->dm.dc;
2847 struct common_irq_params *c_irq_params;
2848 struct dc_interrupt_params int_params = {0};
2852 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2853 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2856 * Actions of amdgpu_irq_add_id():
2857 * 1. Register a set() function with base driver.
2858 * Base driver will call set() function to enable/disable an
2859 * interrupt in DC hardware.
2860 * 2. Register amdgpu_dm_irq_handler().
2861 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2862 * coming from DC hardware.
2863 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2864 * for acknowledging and handling.
2867 /* Use VSTARTUP interrupt */
2868 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2869 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2871 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2874 DRM_ERROR("Failed to add crtc irq id!\n");
2878 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2879 int_params.irq_source =
2880 dc_interrupt_to_irq_source(dc, i, 0);
2882 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2884 c_irq_params->adev = adev;
2885 c_irq_params->irq_src = int_params.irq_source;
2887 amdgpu_dm_irq_register_interrupt(
2888 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2891 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2892 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2893 * to trigger at end of each vblank, regardless of state of the lock,
2894 * matching DCE behaviour.
2896 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2897 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2899 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2902 DRM_ERROR("Failed to add vupdate irq id!\n");
2906 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2907 int_params.irq_source =
2908 dc_interrupt_to_irq_source(dc, i, 0);
2910 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2912 c_irq_params->adev = adev;
2913 c_irq_params->irq_src = int_params.irq_source;
2915 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2916 dm_vupdate_high_irq, c_irq_params);
2919 /* Use GRPH_PFLIP interrupt */
2920 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2921 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2923 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2925 DRM_ERROR("Failed to add page flip irq id!\n");
2929 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2930 int_params.irq_source =
2931 dc_interrupt_to_irq_source(dc, i, 0);
2933 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2935 c_irq_params->adev = adev;
2936 c_irq_params->irq_src = int_params.irq_source;
2938 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2939 dm_pflip_high_irq, c_irq_params);
2944 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2947 DRM_ERROR("Failed to add hpd irq id!\n");
2951 register_hpd_handlers(adev);
2958 * Acquires the lock for the atomic state object and returns
2959 * the new atomic state.
2961 * This should only be called during atomic check.
2963 static int dm_atomic_get_state(struct drm_atomic_state *state,
2964 struct dm_atomic_state **dm_state)
2966 struct drm_device *dev = state->dev;
2967 struct amdgpu_device *adev = drm_to_adev(dev);
2968 struct amdgpu_display_manager *dm = &adev->dm;
2969 struct drm_private_state *priv_state;
2974 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2975 if (IS_ERR(priv_state))
2976 return PTR_ERR(priv_state);
2978 *dm_state = to_dm_atomic_state(priv_state);
2983 static struct dm_atomic_state *
2984 dm_atomic_get_new_state(struct drm_atomic_state *state)
2986 struct drm_device *dev = state->dev;
2987 struct amdgpu_device *adev = drm_to_adev(dev);
2988 struct amdgpu_display_manager *dm = &adev->dm;
2989 struct drm_private_obj *obj;
2990 struct drm_private_state *new_obj_state;
2993 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2994 if (obj->funcs == dm->atomic_obj.funcs)
2995 return to_dm_atomic_state(new_obj_state);
3001 static struct drm_private_state *
3002 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3004 struct dm_atomic_state *old_state, *new_state;
3006 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3010 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3012 old_state = to_dm_atomic_state(obj->state);
3014 if (old_state && old_state->context)
3015 new_state->context = dc_copy_state(old_state->context);
3017 if (!new_state->context) {
3022 return &new_state->base;
3025 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3026 struct drm_private_state *state)
3028 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3030 if (dm_state && dm_state->context)
3031 dc_release_state(dm_state->context);
3036 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3037 .atomic_duplicate_state = dm_atomic_duplicate_state,
3038 .atomic_destroy_state = dm_atomic_destroy_state,
3041 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3043 struct dm_atomic_state *state;
3046 adev->mode_info.mode_config_initialized = true;
3048 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3049 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3051 adev_to_drm(adev)->mode_config.max_width = 16384;
3052 adev_to_drm(adev)->mode_config.max_height = 16384;
3054 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3055 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3056 /* indicates support for immediate flip */
3057 adev_to_drm(adev)->mode_config.async_page_flip = true;
3059 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3061 state = kzalloc(sizeof(*state), GFP_KERNEL);
3065 state->context = dc_create_state(adev->dm.dc);
3066 if (!state->context) {
3071 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3073 drm_atomic_private_obj_init(adev_to_drm(adev),
3074 &adev->dm.atomic_obj,
3076 &dm_atomic_state_funcs);
3078 r = amdgpu_display_modeset_create_props(adev);
3080 dc_release_state(state->context);
3085 r = amdgpu_dm_audio_init(adev);
3087 dc_release_state(state->context);
3095 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3096 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3097 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3099 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3100 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3102 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3104 #if defined(CONFIG_ACPI)
3105 struct amdgpu_dm_backlight_caps caps;
3107 memset(&caps, 0, sizeof(caps));
3109 if (dm->backlight_caps.caps_valid)
3112 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3113 if (caps.caps_valid) {
3114 dm->backlight_caps.caps_valid = true;
3115 if (caps.aux_support)
3117 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3118 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3120 dm->backlight_caps.min_input_signal =
3121 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3122 dm->backlight_caps.max_input_signal =
3123 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3126 if (dm->backlight_caps.aux_support)
3129 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3130 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3134 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3141 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3142 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3147 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3148 unsigned *min, unsigned *max)
3153 if (caps->aux_support) {
3154 // Firmware limits are in nits, DC API wants millinits.
3155 *max = 1000 * caps->aux_max_input_signal;
3156 *min = 1000 * caps->aux_min_input_signal;
3158 // Firmware limits are 8-bit, PWM control is 16-bit.
3159 *max = 0x101 * caps->max_input_signal;
3160 *min = 0x101 * caps->min_input_signal;
3165 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3166 uint32_t brightness)
3170 if (!get_brightness_range(caps, &min, &max))
3173 // Rescale 0..255 to min..max
3174 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3175 AMDGPU_MAX_BL_LEVEL);
3178 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3179 uint32_t brightness)
3183 if (!get_brightness_range(caps, &min, &max))
3186 if (brightness < min)
3188 // Rescale min..max to 0..255
3189 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3193 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3195 struct amdgpu_display_manager *dm = bl_get_data(bd);
3196 struct amdgpu_dm_backlight_caps caps;
3197 struct dc_link *link = NULL;
3201 amdgpu_dm_update_backlight_caps(dm);
3202 caps = dm->backlight_caps;
3204 link = (struct dc_link *)dm->backlight_link;
3206 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3207 // Change brightness based on AUX property
3208 if (caps.aux_support)
3209 return set_backlight_via_aux(link, brightness);
3211 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3216 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3218 struct amdgpu_display_manager *dm = bl_get_data(bd);
3219 int ret = dc_link_get_backlight_level(dm->backlight_link);
3221 if (ret == DC_ERROR_UNEXPECTED)
3222 return bd->props.brightness;
3223 return convert_brightness_to_user(&dm->backlight_caps, ret);
3226 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3227 .options = BL_CORE_SUSPENDRESUME,
3228 .get_brightness = amdgpu_dm_backlight_get_brightness,
3229 .update_status = amdgpu_dm_backlight_update_status,
3233 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3236 struct backlight_properties props = { 0 };
3238 amdgpu_dm_update_backlight_caps(dm);
3240 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3241 props.brightness = AMDGPU_MAX_BL_LEVEL;
3242 props.type = BACKLIGHT_RAW;
3244 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3245 adev_to_drm(dm->adev)->primary->index);
3247 dm->backlight_dev = backlight_device_register(bl_name,
3248 adev_to_drm(dm->adev)->dev,
3250 &amdgpu_dm_backlight_ops,
3253 if (IS_ERR(dm->backlight_dev))
3254 DRM_ERROR("DM: Backlight registration failed!\n");
3256 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3261 static int initialize_plane(struct amdgpu_display_manager *dm,
3262 struct amdgpu_mode_info *mode_info, int plane_id,
3263 enum drm_plane_type plane_type,
3264 const struct dc_plane_cap *plane_cap)
3266 struct drm_plane *plane;
3267 unsigned long possible_crtcs;
3270 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3272 DRM_ERROR("KMS: Failed to allocate plane\n");
3275 plane->type = plane_type;
3278 * HACK: IGT tests expect that the primary plane for a CRTC
3279 * can only have one possible CRTC. Only expose support for
3280 * any CRTC if they're not going to be used as a primary plane
3281 * for a CRTC - like overlay or underlay planes.
3283 possible_crtcs = 1 << plane_id;
3284 if (plane_id >= dm->dc->caps.max_streams)
3285 possible_crtcs = 0xff;
3287 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3290 DRM_ERROR("KMS: Failed to initialize plane\n");
3296 mode_info->planes[plane_id] = plane;
3302 static void register_backlight_device(struct amdgpu_display_manager *dm,
3303 struct dc_link *link)
3305 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3306 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3308 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3309 link->type != dc_connection_none) {
3311 * Event if registration failed, we should continue with
3312 * DM initialization because not having a backlight control
3313 * is better then a black screen.
3315 amdgpu_dm_register_backlight_device(dm);
3317 if (dm->backlight_dev)
3318 dm->backlight_link = link;
3325 * In this architecture, the association
3326 * connector -> encoder -> crtc
3327 * id not really requried. The crtc and connector will hold the
3328 * display_index as an abstraction to use with DAL component
3330 * Returns 0 on success
3332 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3334 struct amdgpu_display_manager *dm = &adev->dm;
3336 struct amdgpu_dm_connector *aconnector = NULL;
3337 struct amdgpu_encoder *aencoder = NULL;
3338 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3340 int32_t primary_planes;
3341 enum dc_connection_type new_connection_type = dc_connection_none;
3342 const struct dc_plane_cap *plane;
3344 dm->display_indexes_num = dm->dc->caps.max_streams;
3345 /* Update the actual used number of crtc */
3346 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3348 link_cnt = dm->dc->caps.max_links;
3349 if (amdgpu_dm_mode_config_init(dm->adev)) {
3350 DRM_ERROR("DM: Failed to initialize mode config\n");
3354 /* There is one primary plane per CRTC */
3355 primary_planes = dm->dc->caps.max_streams;
3356 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3359 * Initialize primary planes, implicit planes for legacy IOCTLS.
3360 * Order is reversed to match iteration order in atomic check.
3362 for (i = (primary_planes - 1); i >= 0; i--) {
3363 plane = &dm->dc->caps.planes[i];
3365 if (initialize_plane(dm, mode_info, i,
3366 DRM_PLANE_TYPE_PRIMARY, plane)) {
3367 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3373 * Initialize overlay planes, index starting after primary planes.
3374 * These planes have a higher DRM index than the primary planes since
3375 * they should be considered as having a higher z-order.
3376 * Order is reversed to match iteration order in atomic check.
3378 * Only support DCN for now, and only expose one so we don't encourage
3379 * userspace to use up all the pipes.
3381 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3382 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3384 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3387 if (!plane->blends_with_above || !plane->blends_with_below)
3390 if (!plane->pixel_format_support.argb8888)
3393 if (initialize_plane(dm, NULL, primary_planes + i,
3394 DRM_PLANE_TYPE_OVERLAY, plane)) {
3395 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3399 /* Only create one overlay plane. */
3403 for (i = 0; i < dm->dc->caps.max_streams; i++)
3404 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3405 DRM_ERROR("KMS: Failed to initialize crtc\n");
3409 /* loops over all connectors on the board */
3410 for (i = 0; i < link_cnt; i++) {
3411 struct dc_link *link = NULL;
3413 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3415 "KMS: Cannot support more than %d display indexes\n",
3416 AMDGPU_DM_MAX_DISPLAY_INDEX);
3420 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3424 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3428 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3429 DRM_ERROR("KMS: Failed to initialize encoder\n");
3433 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3434 DRM_ERROR("KMS: Failed to initialize connector\n");
3438 link = dc_get_link_at_index(dm->dc, i);
3440 if (!dc_link_detect_sink(link, &new_connection_type))
3441 DRM_ERROR("KMS: Failed to detect connector\n");
3443 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3444 emulated_link_detect(link);
3445 amdgpu_dm_update_connector_after_detect(aconnector);
3447 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3448 amdgpu_dm_update_connector_after_detect(aconnector);
3449 register_backlight_device(dm, link);
3450 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3451 amdgpu_dm_set_psr_caps(link);
3457 /* Software is initialized. Now we can register interrupt handlers. */
3458 switch (adev->asic_type) {
3459 #if defined(CONFIG_DRM_AMD_DC_SI)
3464 if (dce60_register_irq_handlers(dm->adev)) {
3465 DRM_ERROR("DM: Failed to initialize IRQ\n");
3479 case CHIP_POLARIS11:
3480 case CHIP_POLARIS10:
3481 case CHIP_POLARIS12:
3486 if (dce110_register_irq_handlers(dm->adev)) {
3487 DRM_ERROR("DM: Failed to initialize IRQ\n");
3491 #if defined(CONFIG_DRM_AMD_DC_DCN)
3497 case CHIP_SIENNA_CICHLID:
3498 case CHIP_NAVY_FLOUNDER:
3499 case CHIP_DIMGREY_CAVEFISH:
3501 if (dcn10_register_irq_handlers(dm->adev)) {
3502 DRM_ERROR("DM: Failed to initialize IRQ\n");
3508 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3520 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3522 drm_mode_config_cleanup(dm->ddev);
3523 drm_atomic_private_obj_fini(&dm->atomic_obj);
3527 /******************************************************************************
3528 * amdgpu_display_funcs functions
3529 *****************************************************************************/
3532 * dm_bandwidth_update - program display watermarks
3534 * @adev: amdgpu_device pointer
3536 * Calculate and program the display watermarks and line buffer allocation.
3538 static void dm_bandwidth_update(struct amdgpu_device *adev)
3540 /* TODO: implement later */
3543 static const struct amdgpu_display_funcs dm_display_funcs = {
3544 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3545 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3546 .backlight_set_level = NULL, /* never called for DC */
3547 .backlight_get_level = NULL, /* never called for DC */
3548 .hpd_sense = NULL,/* called unconditionally */
3549 .hpd_set_polarity = NULL, /* called unconditionally */
3550 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3551 .page_flip_get_scanoutpos =
3552 dm_crtc_get_scanoutpos,/* called unconditionally */
3553 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3554 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3557 #if defined(CONFIG_DEBUG_KERNEL_DC)
3559 static ssize_t s3_debug_store(struct device *device,
3560 struct device_attribute *attr,
3566 struct drm_device *drm_dev = dev_get_drvdata(device);
3567 struct amdgpu_device *adev = drm_to_adev(drm_dev);
3569 ret = kstrtoint(buf, 0, &s3_state);
3574 drm_kms_helper_hotplug_event(adev_to_drm(adev));
3579 return ret == 0 ? count : 0;
3582 DEVICE_ATTR_WO(s3_debug);
3586 static int dm_early_init(void *handle)
3588 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3590 switch (adev->asic_type) {
3591 #if defined(CONFIG_DRM_AMD_DC_SI)
3595 adev->mode_info.num_crtc = 6;
3596 adev->mode_info.num_hpd = 6;
3597 adev->mode_info.num_dig = 6;
3600 adev->mode_info.num_crtc = 2;
3601 adev->mode_info.num_hpd = 2;
3602 adev->mode_info.num_dig = 2;
3607 adev->mode_info.num_crtc = 6;
3608 adev->mode_info.num_hpd = 6;
3609 adev->mode_info.num_dig = 6;
3612 adev->mode_info.num_crtc = 4;
3613 adev->mode_info.num_hpd = 6;
3614 adev->mode_info.num_dig = 7;
3618 adev->mode_info.num_crtc = 2;
3619 adev->mode_info.num_hpd = 6;
3620 adev->mode_info.num_dig = 6;
3624 adev->mode_info.num_crtc = 6;
3625 adev->mode_info.num_hpd = 6;
3626 adev->mode_info.num_dig = 7;
3629 adev->mode_info.num_crtc = 3;
3630 adev->mode_info.num_hpd = 6;
3631 adev->mode_info.num_dig = 9;
3634 adev->mode_info.num_crtc = 2;
3635 adev->mode_info.num_hpd = 6;
3636 adev->mode_info.num_dig = 9;
3638 case CHIP_POLARIS11:
3639 case CHIP_POLARIS12:
3640 adev->mode_info.num_crtc = 5;
3641 adev->mode_info.num_hpd = 5;
3642 adev->mode_info.num_dig = 5;
3644 case CHIP_POLARIS10:
3646 adev->mode_info.num_crtc = 6;
3647 adev->mode_info.num_hpd = 6;
3648 adev->mode_info.num_dig = 6;
3653 adev->mode_info.num_crtc = 6;
3654 adev->mode_info.num_hpd = 6;
3655 adev->mode_info.num_dig = 6;
3657 #if defined(CONFIG_DRM_AMD_DC_DCN)
3661 adev->mode_info.num_crtc = 4;
3662 adev->mode_info.num_hpd = 4;
3663 adev->mode_info.num_dig = 4;
3667 case CHIP_SIENNA_CICHLID:
3668 case CHIP_NAVY_FLOUNDER:
3669 adev->mode_info.num_crtc = 6;
3670 adev->mode_info.num_hpd = 6;
3671 adev->mode_info.num_dig = 6;
3674 case CHIP_DIMGREY_CAVEFISH:
3675 adev->mode_info.num_crtc = 5;
3676 adev->mode_info.num_hpd = 5;
3677 adev->mode_info.num_dig = 5;
3681 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3685 amdgpu_dm_set_irq_funcs(adev);
3687 if (adev->mode_info.funcs == NULL)
3688 adev->mode_info.funcs = &dm_display_funcs;
3691 * Note: Do NOT change adev->audio_endpt_rreg and
3692 * adev->audio_endpt_wreg because they are initialised in
3693 * amdgpu_device_init()
3695 #if defined(CONFIG_DEBUG_KERNEL_DC)
3697 adev_to_drm(adev)->dev,
3698 &dev_attr_s3_debug);
3704 static bool modeset_required(struct drm_crtc_state *crtc_state,
3705 struct dc_stream_state *new_stream,
3706 struct dc_stream_state *old_stream)
3708 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3711 static bool modereset_required(struct drm_crtc_state *crtc_state)
3713 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3716 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3718 drm_encoder_cleanup(encoder);
3722 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3723 .destroy = amdgpu_dm_encoder_destroy,
3727 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3728 struct drm_framebuffer *fb,
3729 int *min_downscale, int *max_upscale)
3731 struct amdgpu_device *adev = drm_to_adev(dev);
3732 struct dc *dc = adev->dm.dc;
3733 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3734 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3736 switch (fb->format->format) {
3737 case DRM_FORMAT_P010:
3738 case DRM_FORMAT_NV12:
3739 case DRM_FORMAT_NV21:
3740 *max_upscale = plane_cap->max_upscale_factor.nv12;
3741 *min_downscale = plane_cap->max_downscale_factor.nv12;
3744 case DRM_FORMAT_XRGB16161616F:
3745 case DRM_FORMAT_ARGB16161616F:
3746 case DRM_FORMAT_XBGR16161616F:
3747 case DRM_FORMAT_ABGR16161616F:
3748 *max_upscale = plane_cap->max_upscale_factor.fp16;
3749 *min_downscale = plane_cap->max_downscale_factor.fp16;
3753 *max_upscale = plane_cap->max_upscale_factor.argb8888;
3754 *min_downscale = plane_cap->max_downscale_factor.argb8888;
3759 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3760 * scaling factor of 1.0 == 1000 units.
3762 if (*max_upscale == 1)
3763 *max_upscale = 1000;
3765 if (*min_downscale == 1)
3766 *min_downscale = 1000;
3770 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3771 struct dc_scaling_info *scaling_info)
3773 int scale_w, scale_h, min_downscale, max_upscale;
3775 memset(scaling_info, 0, sizeof(*scaling_info));
3777 /* Source is fixed 16.16 but we ignore mantissa for now... */
3778 scaling_info->src_rect.x = state->src_x >> 16;
3779 scaling_info->src_rect.y = state->src_y >> 16;
3781 scaling_info->src_rect.width = state->src_w >> 16;
3782 if (scaling_info->src_rect.width == 0)
3785 scaling_info->src_rect.height = state->src_h >> 16;
3786 if (scaling_info->src_rect.height == 0)
3789 scaling_info->dst_rect.x = state->crtc_x;
3790 scaling_info->dst_rect.y = state->crtc_y;
3792 if (state->crtc_w == 0)
3795 scaling_info->dst_rect.width = state->crtc_w;
3797 if (state->crtc_h == 0)
3800 scaling_info->dst_rect.height = state->crtc_h;
3802 /* DRM doesn't specify clipping on destination output. */
3803 scaling_info->clip_rect = scaling_info->dst_rect;
3805 /* Validate scaling per-format with DC plane caps */
3806 if (state->plane && state->plane->dev && state->fb) {
3807 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
3808 &min_downscale, &max_upscale);
3810 min_downscale = 250;
3811 max_upscale = 16000;
3814 scale_w = scaling_info->dst_rect.width * 1000 /
3815 scaling_info->src_rect.width;
3817 if (scale_w < min_downscale || scale_w > max_upscale)
3820 scale_h = scaling_info->dst_rect.height * 1000 /
3821 scaling_info->src_rect.height;
3823 if (scale_h < min_downscale || scale_h > max_upscale)
3827 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3828 * assume reasonable defaults based on the format.
3835 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3836 uint64_t tiling_flags)
3838 /* Fill GFX8 params */
3839 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3840 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3842 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3843 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3844 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3845 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3846 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3848 /* XXX fix me for VI */
3849 tiling_info->gfx8.num_banks = num_banks;
3850 tiling_info->gfx8.array_mode =
3851 DC_ARRAY_2D_TILED_THIN1;
3852 tiling_info->gfx8.tile_split = tile_split;
3853 tiling_info->gfx8.bank_width = bankw;
3854 tiling_info->gfx8.bank_height = bankh;
3855 tiling_info->gfx8.tile_aspect = mtaspect;
3856 tiling_info->gfx8.tile_mode =
3857 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3858 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3859 == DC_ARRAY_1D_TILED_THIN1) {
3860 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3863 tiling_info->gfx8.pipe_config =
3864 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3868 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
3869 union dc_tiling_info *tiling_info)
3871 tiling_info->gfx9.num_pipes =
3872 adev->gfx.config.gb_addr_config_fields.num_pipes;
3873 tiling_info->gfx9.num_banks =
3874 adev->gfx.config.gb_addr_config_fields.num_banks;
3875 tiling_info->gfx9.pipe_interleave =
3876 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3877 tiling_info->gfx9.num_shader_engines =
3878 adev->gfx.config.gb_addr_config_fields.num_se;
3879 tiling_info->gfx9.max_compressed_frags =
3880 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3881 tiling_info->gfx9.num_rb_per_se =
3882 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3883 tiling_info->gfx9.shaderEnable = 1;
3884 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3885 adev->asic_type == CHIP_NAVY_FLOUNDER ||
3886 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
3887 adev->asic_type == CHIP_VANGOGH)
3888 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3892 validate_dcc(struct amdgpu_device *adev,
3893 const enum surface_pixel_format format,
3894 const enum dc_rotation_angle rotation,
3895 const union dc_tiling_info *tiling_info,
3896 const struct dc_plane_dcc_param *dcc,
3897 const struct dc_plane_address *address,
3898 const struct plane_size *plane_size)
3900 struct dc *dc = adev->dm.dc;
3901 struct dc_dcc_surface_param input;
3902 struct dc_surface_dcc_cap output;
3904 memset(&input, 0, sizeof(input));
3905 memset(&output, 0, sizeof(output));
3910 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
3911 !dc->cap_funcs.get_dcc_compression_cap)
3914 input.format = format;
3915 input.surface_size.width = plane_size->surface_size.width;
3916 input.surface_size.height = plane_size->surface_size.height;
3917 input.swizzle_mode = tiling_info->gfx9.swizzle;
3919 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3920 input.scan = SCAN_DIRECTION_HORIZONTAL;
3921 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3922 input.scan = SCAN_DIRECTION_VERTICAL;
3924 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3927 if (!output.capable)
3930 if (dcc->independent_64b_blks == 0 &&
3931 output.grph.rgb.independent_64b_blks != 0)
3938 modifier_has_dcc(uint64_t modifier)
3940 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
3944 modifier_gfx9_swizzle_mode(uint64_t modifier)
3946 if (modifier == DRM_FORMAT_MOD_LINEAR)
3949 return AMD_FMT_MOD_GET(TILE, modifier);
3952 static const struct drm_format_info *
3953 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
3955 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
3959 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
3960 union dc_tiling_info *tiling_info,
3963 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
3964 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
3965 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
3966 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
3968 fill_gfx9_tiling_info_from_device(adev, tiling_info);
3970 if (!IS_AMD_FMT_MOD(modifier))
3973 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
3974 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
3976 if (adev->family >= AMDGPU_FAMILY_NV) {
3977 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
3979 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
3981 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
3985 enum dm_micro_swizzle {
3986 MICRO_SWIZZLE_Z = 0,
3987 MICRO_SWIZZLE_S = 1,
3988 MICRO_SWIZZLE_D = 2,
3992 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
3996 struct amdgpu_device *adev = drm_to_adev(plane->dev);
3997 const struct drm_format_info *info = drm_format_info(format);
3999 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4005 * We always have to allow this modifier, because core DRM still
4006 * checks LINEAR support if userspace does not provide modifers.
4008 if (modifier == DRM_FORMAT_MOD_LINEAR)
4012 * The arbitrary tiling support for multiplane formats has not been hooked
4015 if (info->num_planes > 1)
4019 * For D swizzle the canonical modifier depends on the bpp, so check
4022 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4023 adev->family >= AMDGPU_FAMILY_NV) {
4024 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4028 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4032 if (modifier_has_dcc(modifier)) {
4033 /* Per radeonsi comments 16/64 bpp are more complicated. */
4034 if (info->cpp[0] != 4)
4042 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4047 if (*cap - *size < 1) {
4048 uint64_t new_cap = *cap * 2;
4049 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4057 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4063 (*mods)[*size] = mod;
4068 add_gfx9_modifiers(const struct amdgpu_device *adev,
4069 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4071 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4072 int pipe_xor_bits = min(8, pipes +
4073 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4074 int bank_xor_bits = min(8 - pipe_xor_bits,
4075 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4076 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4077 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4080 if (adev->family == AMDGPU_FAMILY_RV) {
4081 /* Raven2 and later */
4082 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4085 * No _D DCC swizzles yet because we only allow 32bpp, which
4086 * doesn't support _D on DCN
4089 if (has_constant_encode) {
4090 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4091 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4092 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4093 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4094 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4095 AMD_FMT_MOD_SET(DCC, 1) |
4096 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4097 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4098 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4101 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4102 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4103 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4104 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4105 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4106 AMD_FMT_MOD_SET(DCC, 1) |
4107 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4108 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4109 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4111 if (has_constant_encode) {
4112 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4113 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4114 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4115 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4116 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4117 AMD_FMT_MOD_SET(DCC, 1) |
4118 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4119 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4120 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4122 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4123 AMD_FMT_MOD_SET(RB, rb) |
4124 AMD_FMT_MOD_SET(PIPE, pipes));
4127 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4128 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4129 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4130 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4131 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4132 AMD_FMT_MOD_SET(DCC, 1) |
4133 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4134 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4135 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4136 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4137 AMD_FMT_MOD_SET(RB, rb) |
4138 AMD_FMT_MOD_SET(PIPE, pipes));
4142 * Only supported for 64bpp on Raven, will be filtered on format in
4143 * dm_plane_format_mod_supported.
4145 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4146 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4147 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4148 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4149 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4151 if (adev->family == AMDGPU_FAMILY_RV) {
4152 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4153 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4154 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4155 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4156 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4160 * Only supported for 64bpp on Raven, will be filtered on format in
4161 * dm_plane_format_mod_supported.
4163 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4164 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4165 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4167 if (adev->family == AMDGPU_FAMILY_RV) {
4168 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4169 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4170 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4175 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4176 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4178 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4180 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4181 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4182 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4183 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4184 AMD_FMT_MOD_SET(DCC, 1) |
4185 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4186 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4187 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4189 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4190 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4191 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4192 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4193 AMD_FMT_MOD_SET(DCC, 1) |
4194 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4195 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4196 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4197 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4199 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4200 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4201 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4202 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4204 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4205 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4206 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4207 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4210 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4211 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4212 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4213 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4215 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4216 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4217 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4221 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4222 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4224 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4225 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4227 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4228 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4229 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4230 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4231 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4232 AMD_FMT_MOD_SET(DCC, 1) |
4233 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4234 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4235 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4236 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4238 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4239 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4240 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4241 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4242 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4243 AMD_FMT_MOD_SET(DCC, 1) |
4244 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4245 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4246 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4247 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4248 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4250 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4251 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4252 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4253 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4254 AMD_FMT_MOD_SET(PACKERS, pkrs));
4256 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4257 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4258 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4259 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4260 AMD_FMT_MOD_SET(PACKERS, pkrs));
4262 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4263 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4264 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4265 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4267 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4268 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4269 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4273 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4275 uint64_t size = 0, capacity = 128;
4278 /* We have not hooked up any pre-GFX9 modifiers. */
4279 if (adev->family < AMDGPU_FAMILY_AI)
4282 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4284 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4285 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4286 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4287 return *mods ? 0 : -ENOMEM;
4290 switch (adev->family) {
4291 case AMDGPU_FAMILY_AI:
4292 case AMDGPU_FAMILY_RV:
4293 add_gfx9_modifiers(adev, mods, &size, &capacity);
4295 case AMDGPU_FAMILY_NV:
4296 case AMDGPU_FAMILY_VGH:
4297 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4298 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4300 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4304 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4306 /* INVALID marks the end of the list. */
4307 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4316 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4317 const struct amdgpu_framebuffer *afb,
4318 const enum surface_pixel_format format,
4319 const enum dc_rotation_angle rotation,
4320 const struct plane_size *plane_size,
4321 union dc_tiling_info *tiling_info,
4322 struct dc_plane_dcc_param *dcc,
4323 struct dc_plane_address *address,
4324 const bool force_disable_dcc)
4326 const uint64_t modifier = afb->base.modifier;
4329 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4330 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4332 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4333 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4336 dcc->meta_pitch = afb->base.pitches[1];
4337 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4339 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4340 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4343 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4351 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4352 const struct amdgpu_framebuffer *afb,
4353 const enum surface_pixel_format format,
4354 const enum dc_rotation_angle rotation,
4355 const uint64_t tiling_flags,
4356 union dc_tiling_info *tiling_info,
4357 struct plane_size *plane_size,
4358 struct dc_plane_dcc_param *dcc,
4359 struct dc_plane_address *address,
4361 bool force_disable_dcc)
4363 const struct drm_framebuffer *fb = &afb->base;
4366 memset(tiling_info, 0, sizeof(*tiling_info));
4367 memset(plane_size, 0, sizeof(*plane_size));
4368 memset(dcc, 0, sizeof(*dcc));
4369 memset(address, 0, sizeof(*address));
4371 address->tmz_surface = tmz_surface;
4373 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4374 uint64_t addr = afb->address + fb->offsets[0];
4376 plane_size->surface_size.x = 0;
4377 plane_size->surface_size.y = 0;
4378 plane_size->surface_size.width = fb->width;
4379 plane_size->surface_size.height = fb->height;
4380 plane_size->surface_pitch =
4381 fb->pitches[0] / fb->format->cpp[0];
4383 address->type = PLN_ADDR_TYPE_GRAPHICS;
4384 address->grph.addr.low_part = lower_32_bits(addr);
4385 address->grph.addr.high_part = upper_32_bits(addr);
4386 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4387 uint64_t luma_addr = afb->address + fb->offsets[0];
4388 uint64_t chroma_addr = afb->address + fb->offsets[1];
4390 plane_size->surface_size.x = 0;
4391 plane_size->surface_size.y = 0;
4392 plane_size->surface_size.width = fb->width;
4393 plane_size->surface_size.height = fb->height;
4394 plane_size->surface_pitch =
4395 fb->pitches[0] / fb->format->cpp[0];
4397 plane_size->chroma_size.x = 0;
4398 plane_size->chroma_size.y = 0;
4399 /* TODO: set these based on surface format */
4400 plane_size->chroma_size.width = fb->width / 2;
4401 plane_size->chroma_size.height = fb->height / 2;
4403 plane_size->chroma_pitch =
4404 fb->pitches[1] / fb->format->cpp[1];
4406 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4407 address->video_progressive.luma_addr.low_part =
4408 lower_32_bits(luma_addr);
4409 address->video_progressive.luma_addr.high_part =
4410 upper_32_bits(luma_addr);
4411 address->video_progressive.chroma_addr.low_part =
4412 lower_32_bits(chroma_addr);
4413 address->video_progressive.chroma_addr.high_part =
4414 upper_32_bits(chroma_addr);
4417 if (adev->family >= AMDGPU_FAMILY_AI) {
4418 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4419 rotation, plane_size,
4426 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4433 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4434 bool *per_pixel_alpha, bool *global_alpha,
4435 int *global_alpha_value)
4437 *per_pixel_alpha = false;
4438 *global_alpha = false;
4439 *global_alpha_value = 0xff;
4441 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4444 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4445 static const uint32_t alpha_formats[] = {
4446 DRM_FORMAT_ARGB8888,
4447 DRM_FORMAT_RGBA8888,
4448 DRM_FORMAT_ABGR8888,
4450 uint32_t format = plane_state->fb->format->format;
4453 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4454 if (format == alpha_formats[i]) {
4455 *per_pixel_alpha = true;
4461 if (plane_state->alpha < 0xffff) {
4462 *global_alpha = true;
4463 *global_alpha_value = plane_state->alpha >> 8;
4468 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4469 const enum surface_pixel_format format,
4470 enum dc_color_space *color_space)
4474 *color_space = COLOR_SPACE_SRGB;
4476 /* DRM color properties only affect non-RGB formats. */
4477 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4480 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4482 switch (plane_state->color_encoding) {
4483 case DRM_COLOR_YCBCR_BT601:
4485 *color_space = COLOR_SPACE_YCBCR601;
4487 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4490 case DRM_COLOR_YCBCR_BT709:
4492 *color_space = COLOR_SPACE_YCBCR709;
4494 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4497 case DRM_COLOR_YCBCR_BT2020:
4499 *color_space = COLOR_SPACE_2020_YCBCR;
4512 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4513 const struct drm_plane_state *plane_state,
4514 const uint64_t tiling_flags,
4515 struct dc_plane_info *plane_info,
4516 struct dc_plane_address *address,
4518 bool force_disable_dcc)
4520 const struct drm_framebuffer *fb = plane_state->fb;
4521 const struct amdgpu_framebuffer *afb =
4522 to_amdgpu_framebuffer(plane_state->fb);
4523 struct drm_format_name_buf format_name;
4526 memset(plane_info, 0, sizeof(*plane_info));
4528 switch (fb->format->format) {
4530 plane_info->format =
4531 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4533 case DRM_FORMAT_RGB565:
4534 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4536 case DRM_FORMAT_XRGB8888:
4537 case DRM_FORMAT_ARGB8888:
4538 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4540 case DRM_FORMAT_XRGB2101010:
4541 case DRM_FORMAT_ARGB2101010:
4542 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4544 case DRM_FORMAT_XBGR2101010:
4545 case DRM_FORMAT_ABGR2101010:
4546 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4548 case DRM_FORMAT_XBGR8888:
4549 case DRM_FORMAT_ABGR8888:
4550 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4552 case DRM_FORMAT_NV21:
4553 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4555 case DRM_FORMAT_NV12:
4556 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4558 case DRM_FORMAT_P010:
4559 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4561 case DRM_FORMAT_XRGB16161616F:
4562 case DRM_FORMAT_ARGB16161616F:
4563 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4565 case DRM_FORMAT_XBGR16161616F:
4566 case DRM_FORMAT_ABGR16161616F:
4567 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4571 "Unsupported screen format %s\n",
4572 drm_get_format_name(fb->format->format, &format_name));
4576 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4577 case DRM_MODE_ROTATE_0:
4578 plane_info->rotation = ROTATION_ANGLE_0;
4580 case DRM_MODE_ROTATE_90:
4581 plane_info->rotation = ROTATION_ANGLE_90;
4583 case DRM_MODE_ROTATE_180:
4584 plane_info->rotation = ROTATION_ANGLE_180;
4586 case DRM_MODE_ROTATE_270:
4587 plane_info->rotation = ROTATION_ANGLE_270;
4590 plane_info->rotation = ROTATION_ANGLE_0;
4594 plane_info->visible = true;
4595 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4597 plane_info->layer_index = 0;
4599 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4600 &plane_info->color_space);
4604 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4605 plane_info->rotation, tiling_flags,
4606 &plane_info->tiling_info,
4607 &plane_info->plane_size,
4608 &plane_info->dcc, address, tmz_surface,
4613 fill_blending_from_plane_state(
4614 plane_state, &plane_info->per_pixel_alpha,
4615 &plane_info->global_alpha, &plane_info->global_alpha_value);
4620 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4621 struct dc_plane_state *dc_plane_state,
4622 struct drm_plane_state *plane_state,
4623 struct drm_crtc_state *crtc_state)
4625 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4626 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4627 struct dc_scaling_info scaling_info;
4628 struct dc_plane_info plane_info;
4630 bool force_disable_dcc = false;
4632 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4636 dc_plane_state->src_rect = scaling_info.src_rect;
4637 dc_plane_state->dst_rect = scaling_info.dst_rect;
4638 dc_plane_state->clip_rect = scaling_info.clip_rect;
4639 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4641 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4642 ret = fill_dc_plane_info_and_addr(adev, plane_state,
4645 &dc_plane_state->address,
4651 dc_plane_state->format = plane_info.format;
4652 dc_plane_state->color_space = plane_info.color_space;
4653 dc_plane_state->format = plane_info.format;
4654 dc_plane_state->plane_size = plane_info.plane_size;
4655 dc_plane_state->rotation = plane_info.rotation;
4656 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4657 dc_plane_state->stereo_format = plane_info.stereo_format;
4658 dc_plane_state->tiling_info = plane_info.tiling_info;
4659 dc_plane_state->visible = plane_info.visible;
4660 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4661 dc_plane_state->global_alpha = plane_info.global_alpha;
4662 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4663 dc_plane_state->dcc = plane_info.dcc;
4664 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4667 * Always set input transfer function, since plane state is refreshed
4670 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4677 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4678 const struct dm_connector_state *dm_state,
4679 struct dc_stream_state *stream)
4681 enum amdgpu_rmx_type rmx_type;
4683 struct rect src = { 0 }; /* viewport in composition space*/
4684 struct rect dst = { 0 }; /* stream addressable area */
4686 /* no mode. nothing to be done */
4690 /* Full screen scaling by default */
4691 src.width = mode->hdisplay;
4692 src.height = mode->vdisplay;
4693 dst.width = stream->timing.h_addressable;
4694 dst.height = stream->timing.v_addressable;
4697 rmx_type = dm_state->scaling;
4698 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4699 if (src.width * dst.height <
4700 src.height * dst.width) {
4701 /* height needs less upscaling/more downscaling */
4702 dst.width = src.width *
4703 dst.height / src.height;
4705 /* width needs less upscaling/more downscaling */
4706 dst.height = src.height *
4707 dst.width / src.width;
4709 } else if (rmx_type == RMX_CENTER) {
4713 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4714 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4716 if (dm_state->underscan_enable) {
4717 dst.x += dm_state->underscan_hborder / 2;
4718 dst.y += dm_state->underscan_vborder / 2;
4719 dst.width -= dm_state->underscan_hborder;
4720 dst.height -= dm_state->underscan_vborder;
4727 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
4728 dst.x, dst.y, dst.width, dst.height);
4732 static enum dc_color_depth
4733 convert_color_depth_from_display_info(const struct drm_connector *connector,
4734 bool is_y420, int requested_bpc)
4741 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4742 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4744 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4746 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4749 bpc = (uint8_t)connector->display_info.bpc;
4750 /* Assume 8 bpc by default if no bpc is specified. */
4751 bpc = bpc ? bpc : 8;
4754 if (requested_bpc > 0) {
4756 * Cap display bpc based on the user requested value.
4758 * The value for state->max_bpc may not correctly updated
4759 * depending on when the connector gets added to the state
4760 * or if this was called outside of atomic check, so it
4761 * can't be used directly.
4763 bpc = min_t(u8, bpc, requested_bpc);
4765 /* Round down to the nearest even number. */
4766 bpc = bpc - (bpc & 1);
4772 * Temporary Work around, DRM doesn't parse color depth for
4773 * EDID revision before 1.4
4774 * TODO: Fix edid parsing
4776 return COLOR_DEPTH_888;
4778 return COLOR_DEPTH_666;
4780 return COLOR_DEPTH_888;
4782 return COLOR_DEPTH_101010;
4784 return COLOR_DEPTH_121212;
4786 return COLOR_DEPTH_141414;
4788 return COLOR_DEPTH_161616;
4790 return COLOR_DEPTH_UNDEFINED;
4794 static enum dc_aspect_ratio
4795 get_aspect_ratio(const struct drm_display_mode *mode_in)
4797 /* 1-1 mapping, since both enums follow the HDMI spec. */
4798 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4801 static enum dc_color_space
4802 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4804 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4806 switch (dc_crtc_timing->pixel_encoding) {
4807 case PIXEL_ENCODING_YCBCR422:
4808 case PIXEL_ENCODING_YCBCR444:
4809 case PIXEL_ENCODING_YCBCR420:
4812 * 27030khz is the separation point between HDTV and SDTV
4813 * according to HDMI spec, we use YCbCr709 and YCbCr601
4816 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4817 if (dc_crtc_timing->flags.Y_ONLY)
4819 COLOR_SPACE_YCBCR709_LIMITED;
4821 color_space = COLOR_SPACE_YCBCR709;
4823 if (dc_crtc_timing->flags.Y_ONLY)
4825 COLOR_SPACE_YCBCR601_LIMITED;
4827 color_space = COLOR_SPACE_YCBCR601;
4832 case PIXEL_ENCODING_RGB:
4833 color_space = COLOR_SPACE_SRGB;
4844 static bool adjust_colour_depth_from_display_info(
4845 struct dc_crtc_timing *timing_out,
4846 const struct drm_display_info *info)
4848 enum dc_color_depth depth = timing_out->display_color_depth;
4851 normalized_clk = timing_out->pix_clk_100hz / 10;
4852 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4853 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4854 normalized_clk /= 2;
4855 /* Adjusting pix clock following on HDMI spec based on colour depth */
4857 case COLOR_DEPTH_888:
4859 case COLOR_DEPTH_101010:
4860 normalized_clk = (normalized_clk * 30) / 24;
4862 case COLOR_DEPTH_121212:
4863 normalized_clk = (normalized_clk * 36) / 24;
4865 case COLOR_DEPTH_161616:
4866 normalized_clk = (normalized_clk * 48) / 24;
4869 /* The above depths are the only ones valid for HDMI. */
4872 if (normalized_clk <= info->max_tmds_clock) {
4873 timing_out->display_color_depth = depth;
4876 } while (--depth > COLOR_DEPTH_666);
4880 static void fill_stream_properties_from_drm_display_mode(
4881 struct dc_stream_state *stream,
4882 const struct drm_display_mode *mode_in,
4883 const struct drm_connector *connector,
4884 const struct drm_connector_state *connector_state,
4885 const struct dc_stream_state *old_stream,
4888 struct dc_crtc_timing *timing_out = &stream->timing;
4889 const struct drm_display_info *info = &connector->display_info;
4890 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4891 struct hdmi_vendor_infoframe hv_frame;
4892 struct hdmi_avi_infoframe avi_frame;
4894 memset(&hv_frame, 0, sizeof(hv_frame));
4895 memset(&avi_frame, 0, sizeof(avi_frame));
4897 timing_out->h_border_left = 0;
4898 timing_out->h_border_right = 0;
4899 timing_out->v_border_top = 0;
4900 timing_out->v_border_bottom = 0;
4901 /* TODO: un-hardcode */
4902 if (drm_mode_is_420_only(info, mode_in)
4903 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4904 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4905 else if (drm_mode_is_420_also(info, mode_in)
4906 && aconnector->force_yuv420_output)
4907 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4908 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4909 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4910 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4912 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4914 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4915 timing_out->display_color_depth = convert_color_depth_from_display_info(
4917 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4919 timing_out->scan_type = SCANNING_TYPE_NODATA;
4920 timing_out->hdmi_vic = 0;
4923 timing_out->vic = old_stream->timing.vic;
4924 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4925 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4927 timing_out->vic = drm_match_cea_mode(mode_in);
4928 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4929 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4930 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4931 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4934 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4935 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4936 timing_out->vic = avi_frame.video_code;
4937 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4938 timing_out->hdmi_vic = hv_frame.vic;
4941 timing_out->h_addressable = mode_in->crtc_hdisplay;
4942 timing_out->h_total = mode_in->crtc_htotal;
4943 timing_out->h_sync_width =
4944 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4945 timing_out->h_front_porch =
4946 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4947 timing_out->v_total = mode_in->crtc_vtotal;
4948 timing_out->v_addressable = mode_in->crtc_vdisplay;
4949 timing_out->v_front_porch =
4950 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4951 timing_out->v_sync_width =
4952 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4953 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4954 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4956 stream->output_color_space = get_output_color_space(timing_out);
4958 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4959 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4960 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4961 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4962 drm_mode_is_420_also(info, mode_in) &&
4963 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4964 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4965 adjust_colour_depth_from_display_info(timing_out, info);
4970 static void fill_audio_info(struct audio_info *audio_info,
4971 const struct drm_connector *drm_connector,
4972 const struct dc_sink *dc_sink)
4975 int cea_revision = 0;
4976 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4978 audio_info->manufacture_id = edid_caps->manufacturer_id;
4979 audio_info->product_id = edid_caps->product_id;
4981 cea_revision = drm_connector->display_info.cea_rev;
4983 strscpy(audio_info->display_name,
4984 edid_caps->display_name,
4985 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4987 if (cea_revision >= 3) {
4988 audio_info->mode_count = edid_caps->audio_mode_count;
4990 for (i = 0; i < audio_info->mode_count; ++i) {
4991 audio_info->modes[i].format_code =
4992 (enum audio_format_code)
4993 (edid_caps->audio_modes[i].format_code);
4994 audio_info->modes[i].channel_count =
4995 edid_caps->audio_modes[i].channel_count;
4996 audio_info->modes[i].sample_rates.all =
4997 edid_caps->audio_modes[i].sample_rate;
4998 audio_info->modes[i].sample_size =
4999 edid_caps->audio_modes[i].sample_size;
5003 audio_info->flags.all = edid_caps->speaker_flags;
5005 /* TODO: We only check for the progressive mode, check for interlace mode too */
5006 if (drm_connector->latency_present[0]) {
5007 audio_info->video_latency = drm_connector->video_latency[0];
5008 audio_info->audio_latency = drm_connector->audio_latency[0];
5011 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5016 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5017 struct drm_display_mode *dst_mode)
5019 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5020 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5021 dst_mode->crtc_clock = src_mode->crtc_clock;
5022 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5023 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5024 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
5025 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5026 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5027 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5028 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5029 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5030 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5031 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5032 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5036 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5037 const struct drm_display_mode *native_mode,
5040 if (scale_enabled) {
5041 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5042 } else if (native_mode->clock == drm_mode->clock &&
5043 native_mode->htotal == drm_mode->htotal &&
5044 native_mode->vtotal == drm_mode->vtotal) {
5045 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5047 /* no scaling nor amdgpu inserted, no need to patch */
5051 static struct dc_sink *
5052 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5054 struct dc_sink_init_data sink_init_data = { 0 };
5055 struct dc_sink *sink = NULL;
5056 sink_init_data.link = aconnector->dc_link;
5057 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5059 sink = dc_sink_create(&sink_init_data);
5061 DRM_ERROR("Failed to create sink!\n");
5064 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5069 static void set_multisync_trigger_params(
5070 struct dc_stream_state *stream)
5072 if (stream->triggered_crtc_reset.enabled) {
5073 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5074 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5078 static void set_master_stream(struct dc_stream_state *stream_set[],
5081 int j, highest_rfr = 0, master_stream = 0;
5083 for (j = 0; j < stream_count; j++) {
5084 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5085 int refresh_rate = 0;
5087 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5088 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5089 if (refresh_rate > highest_rfr) {
5090 highest_rfr = refresh_rate;
5095 for (j = 0; j < stream_count; j++) {
5097 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5101 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5105 if (context->stream_count < 2)
5107 for (i = 0; i < context->stream_count ; i++) {
5108 if (!context->streams[i])
5111 * TODO: add a function to read AMD VSDB bits and set
5112 * crtc_sync_master.multi_sync_enabled flag
5113 * For now it's set to false
5115 set_multisync_trigger_params(context->streams[i]);
5117 set_master_stream(context->streams, context->stream_count);
5120 static struct dc_stream_state *
5121 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5122 const struct drm_display_mode *drm_mode,
5123 const struct dm_connector_state *dm_state,
5124 const struct dc_stream_state *old_stream,
5127 struct drm_display_mode *preferred_mode = NULL;
5128 struct drm_connector *drm_connector;
5129 const struct drm_connector_state *con_state =
5130 dm_state ? &dm_state->base : NULL;
5131 struct dc_stream_state *stream = NULL;
5132 struct drm_display_mode mode = *drm_mode;
5133 bool native_mode_found = false;
5134 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5136 int preferred_refresh = 0;
5137 #if defined(CONFIG_DRM_AMD_DC_DCN)
5138 struct dsc_dec_dpcd_caps dsc_caps;
5139 uint32_t link_bandwidth_kbps;
5141 struct dc_sink *sink = NULL;
5142 if (aconnector == NULL) {
5143 DRM_ERROR("aconnector is NULL!\n");
5147 drm_connector = &aconnector->base;
5149 if (!aconnector->dc_sink) {
5150 sink = create_fake_sink(aconnector);
5154 sink = aconnector->dc_sink;
5155 dc_sink_retain(sink);
5158 stream = dc_create_stream_for_sink(sink);
5160 if (stream == NULL) {
5161 DRM_ERROR("Failed to create stream for sink!\n");
5165 stream->dm_stream_context = aconnector;
5167 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5168 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5170 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5171 /* Search for preferred mode */
5172 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5173 native_mode_found = true;
5177 if (!native_mode_found)
5178 preferred_mode = list_first_entry_or_null(
5179 &aconnector->base.modes,
5180 struct drm_display_mode,
5183 mode_refresh = drm_mode_vrefresh(&mode);
5185 if (preferred_mode == NULL) {
5187 * This may not be an error, the use case is when we have no
5188 * usermode calls to reset and set mode upon hotplug. In this
5189 * case, we call set mode ourselves to restore the previous mode
5190 * and the modelist may not be filled in in time.
5192 DRM_DEBUG_DRIVER("No preferred mode found\n");
5194 decide_crtc_timing_for_drm_display_mode(
5195 &mode, preferred_mode,
5196 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5197 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5201 drm_mode_set_crtcinfo(&mode, 0);
5204 * If scaling is enabled and refresh rate didn't change
5205 * we copy the vic and polarities of the old timings
5207 if (!scale || mode_refresh != preferred_refresh)
5208 fill_stream_properties_from_drm_display_mode(stream,
5209 &mode, &aconnector->base, con_state, NULL, requested_bpc);
5211 fill_stream_properties_from_drm_display_mode(stream,
5212 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
5214 stream->timing.flags.DSC = 0;
5216 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5217 #if defined(CONFIG_DRM_AMD_DC_DCN)
5218 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5219 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5220 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5222 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5223 dc_link_get_link_cap(aconnector->dc_link));
5225 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5226 /* Set DSC policy according to dsc_clock_en */
5227 dc_dsc_policy_set_enable_dsc_when_not_needed(
5228 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5230 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5232 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5234 link_bandwidth_kbps,
5236 &stream->timing.dsc_cfg))
5237 stream->timing.flags.DSC = 1;
5238 /* Overwrite the stream flag if DSC is enabled through debugfs */
5239 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5240 stream->timing.flags.DSC = 1;
5242 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5243 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5245 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5246 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5248 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5249 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5254 update_stream_scaling_settings(&mode, dm_state, stream);
5257 &stream->audio_info,
5261 update_stream_signal(stream, sink);
5263 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5264 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5266 if (stream->link->psr_settings.psr_feature_enabled) {
5268 // should decide stream support vsc sdp colorimetry capability
5269 // before building vsc info packet
5271 stream->use_vsc_sdp_for_colorimetry = false;
5272 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5273 stream->use_vsc_sdp_for_colorimetry =
5274 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5276 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5277 stream->use_vsc_sdp_for_colorimetry = true;
5279 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5282 dc_sink_release(sink);
5287 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5289 drm_crtc_cleanup(crtc);
5293 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5294 struct drm_crtc_state *state)
5296 struct dm_crtc_state *cur = to_dm_crtc_state(state);
5298 /* TODO Destroy dc_stream objects are stream object is flattened */
5300 dc_stream_release(cur->stream);
5303 __drm_atomic_helper_crtc_destroy_state(state);
5309 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5311 struct dm_crtc_state *state;
5314 dm_crtc_destroy_state(crtc, crtc->state);
5316 state = kzalloc(sizeof(*state), GFP_KERNEL);
5317 if (WARN_ON(!state))
5320 __drm_atomic_helper_crtc_reset(crtc, &state->base);
5323 static struct drm_crtc_state *
5324 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5326 struct dm_crtc_state *state, *cur;
5328 cur = to_dm_crtc_state(crtc->state);
5330 if (WARN_ON(!crtc->state))
5333 state = kzalloc(sizeof(*state), GFP_KERNEL);
5337 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5340 state->stream = cur->stream;
5341 dc_stream_retain(state->stream);
5344 state->active_planes = cur->active_planes;
5345 state->vrr_infopacket = cur->vrr_infopacket;
5346 state->abm_level = cur->abm_level;
5347 state->vrr_supported = cur->vrr_supported;
5348 state->freesync_config = cur->freesync_config;
5349 state->crc_src = cur->crc_src;
5350 state->cm_has_degamma = cur->cm_has_degamma;
5351 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5353 /* TODO Duplicate dc_stream after objects are stream object is flattened */
5355 return &state->base;
5358 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5360 enum dc_irq_source irq_source;
5361 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5362 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5365 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5367 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5369 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5370 acrtc->crtc_id, enable ? "en" : "dis", rc);
5374 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5376 enum dc_irq_source irq_source;
5377 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5378 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5379 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5380 struct amdgpu_display_manager *dm = &adev->dm;
5384 /* vblank irq on -> Only need vupdate irq in vrr mode */
5385 if (amdgpu_dm_vrr_active(acrtc_state))
5386 rc = dm_set_vupdate_irq(crtc, true);
5388 /* vblank irq off -> vupdate irq off */
5389 rc = dm_set_vupdate_irq(crtc, false);
5395 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5397 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5400 #if defined(CONFIG_DRM_AMD_DC_DCN)
5401 if (amdgpu_in_reset(adev))
5404 mutex_lock(&dm->dc_lock);
5407 dm->active_vblank_irq_count++;
5409 dm->active_vblank_irq_count--;
5411 #if defined(CONFIG_DRM_AMD_DC_DCN)
5412 dc_allow_idle_optimizations(
5413 adev->dm.dc, dm->active_vblank_irq_count == 0 ? true : false);
5415 DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
5418 mutex_unlock(&dm->dc_lock);
5424 static int dm_enable_vblank(struct drm_crtc *crtc)
5426 return dm_set_vblank(crtc, true);
5429 static void dm_disable_vblank(struct drm_crtc *crtc)
5431 dm_set_vblank(crtc, false);
5434 /* Implemented only the options currently availible for the driver */
5435 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5436 .reset = dm_crtc_reset_state,
5437 .destroy = amdgpu_dm_crtc_destroy,
5438 .set_config = drm_atomic_helper_set_config,
5439 .page_flip = drm_atomic_helper_page_flip,
5440 .atomic_duplicate_state = dm_crtc_duplicate_state,
5441 .atomic_destroy_state = dm_crtc_destroy_state,
5442 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5443 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5444 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5445 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5446 .enable_vblank = dm_enable_vblank,
5447 .disable_vblank = dm_disable_vblank,
5448 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5451 static enum drm_connector_status
5452 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5455 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5459 * 1. This interface is NOT called in context of HPD irq.
5460 * 2. This interface *is called* in context of user-mode ioctl. Which
5461 * makes it a bad place for *any* MST-related activity.
5464 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5465 !aconnector->fake_enable)
5466 connected = (aconnector->dc_sink != NULL);
5468 connected = (aconnector->base.force == DRM_FORCE_ON);
5470 update_subconnector_property(aconnector);
5472 return (connected ? connector_status_connected :
5473 connector_status_disconnected);
5476 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5477 struct drm_connector_state *connector_state,
5478 struct drm_property *property,
5481 struct drm_device *dev = connector->dev;
5482 struct amdgpu_device *adev = drm_to_adev(dev);
5483 struct dm_connector_state *dm_old_state =
5484 to_dm_connector_state(connector->state);
5485 struct dm_connector_state *dm_new_state =
5486 to_dm_connector_state(connector_state);
5490 if (property == dev->mode_config.scaling_mode_property) {
5491 enum amdgpu_rmx_type rmx_type;
5494 case DRM_MODE_SCALE_CENTER:
5495 rmx_type = RMX_CENTER;
5497 case DRM_MODE_SCALE_ASPECT:
5498 rmx_type = RMX_ASPECT;
5500 case DRM_MODE_SCALE_FULLSCREEN:
5501 rmx_type = RMX_FULL;
5503 case DRM_MODE_SCALE_NONE:
5509 if (dm_old_state->scaling == rmx_type)
5512 dm_new_state->scaling = rmx_type;
5514 } else if (property == adev->mode_info.underscan_hborder_property) {
5515 dm_new_state->underscan_hborder = val;
5517 } else if (property == adev->mode_info.underscan_vborder_property) {
5518 dm_new_state->underscan_vborder = val;
5520 } else if (property == adev->mode_info.underscan_property) {
5521 dm_new_state->underscan_enable = val;
5523 } else if (property == adev->mode_info.abm_level_property) {
5524 dm_new_state->abm_level = val;
5531 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5532 const struct drm_connector_state *state,
5533 struct drm_property *property,
5536 struct drm_device *dev = connector->dev;
5537 struct amdgpu_device *adev = drm_to_adev(dev);
5538 struct dm_connector_state *dm_state =
5539 to_dm_connector_state(state);
5542 if (property == dev->mode_config.scaling_mode_property) {
5543 switch (dm_state->scaling) {
5545 *val = DRM_MODE_SCALE_CENTER;
5548 *val = DRM_MODE_SCALE_ASPECT;
5551 *val = DRM_MODE_SCALE_FULLSCREEN;
5555 *val = DRM_MODE_SCALE_NONE;
5559 } else if (property == adev->mode_info.underscan_hborder_property) {
5560 *val = dm_state->underscan_hborder;
5562 } else if (property == adev->mode_info.underscan_vborder_property) {
5563 *val = dm_state->underscan_vborder;
5565 } else if (property == adev->mode_info.underscan_property) {
5566 *val = dm_state->underscan_enable;
5568 } else if (property == adev->mode_info.abm_level_property) {
5569 *val = dm_state->abm_level;
5576 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5578 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5580 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5583 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5585 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5586 const struct dc_link *link = aconnector->dc_link;
5587 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5588 struct amdgpu_display_manager *dm = &adev->dm;
5591 * Call only if mst_mgr was iniitalized before since it's not done
5592 * for all connector types.
5594 if (aconnector->mst_mgr.dev)
5595 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5597 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5598 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5600 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5601 link->type != dc_connection_none &&
5602 dm->backlight_dev) {
5603 backlight_device_unregister(dm->backlight_dev);
5604 dm->backlight_dev = NULL;
5608 if (aconnector->dc_em_sink)
5609 dc_sink_release(aconnector->dc_em_sink);
5610 aconnector->dc_em_sink = NULL;
5611 if (aconnector->dc_sink)
5612 dc_sink_release(aconnector->dc_sink);
5613 aconnector->dc_sink = NULL;
5615 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5616 drm_connector_unregister(connector);
5617 drm_connector_cleanup(connector);
5618 if (aconnector->i2c) {
5619 i2c_del_adapter(&aconnector->i2c->base);
5620 kfree(aconnector->i2c);
5622 kfree(aconnector->dm_dp_aux.aux.name);
5627 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5629 struct dm_connector_state *state =
5630 to_dm_connector_state(connector->state);
5632 if (connector->state)
5633 __drm_atomic_helper_connector_destroy_state(connector->state);
5637 state = kzalloc(sizeof(*state), GFP_KERNEL);
5640 state->scaling = RMX_OFF;
5641 state->underscan_enable = false;
5642 state->underscan_hborder = 0;
5643 state->underscan_vborder = 0;
5644 state->base.max_requested_bpc = 8;
5645 state->vcpi_slots = 0;
5647 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5648 state->abm_level = amdgpu_dm_abm_level;
5650 __drm_atomic_helper_connector_reset(connector, &state->base);
5654 struct drm_connector_state *
5655 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5657 struct dm_connector_state *state =
5658 to_dm_connector_state(connector->state);
5660 struct dm_connector_state *new_state =
5661 kmemdup(state, sizeof(*state), GFP_KERNEL);
5666 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5668 new_state->freesync_capable = state->freesync_capable;
5669 new_state->abm_level = state->abm_level;
5670 new_state->scaling = state->scaling;
5671 new_state->underscan_enable = state->underscan_enable;
5672 new_state->underscan_hborder = state->underscan_hborder;
5673 new_state->underscan_vborder = state->underscan_vborder;
5674 new_state->vcpi_slots = state->vcpi_slots;
5675 new_state->pbn = state->pbn;
5676 return &new_state->base;
5680 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5682 struct amdgpu_dm_connector *amdgpu_dm_connector =
5683 to_amdgpu_dm_connector(connector);
5686 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5687 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5688 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5689 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5694 #if defined(CONFIG_DEBUG_FS)
5695 connector_debugfs_init(amdgpu_dm_connector);
5701 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5702 .reset = amdgpu_dm_connector_funcs_reset,
5703 .detect = amdgpu_dm_connector_detect,
5704 .fill_modes = drm_helper_probe_single_connector_modes,
5705 .destroy = amdgpu_dm_connector_destroy,
5706 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5707 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5708 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5709 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5710 .late_register = amdgpu_dm_connector_late_register,
5711 .early_unregister = amdgpu_dm_connector_unregister
5714 static int get_modes(struct drm_connector *connector)
5716 return amdgpu_dm_connector_get_modes(connector);
5719 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5721 struct dc_sink_init_data init_params = {
5722 .link = aconnector->dc_link,
5723 .sink_signal = SIGNAL_TYPE_VIRTUAL
5727 if (!aconnector->base.edid_blob_ptr) {
5728 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5729 aconnector->base.name);
5731 aconnector->base.force = DRM_FORCE_OFF;
5732 aconnector->base.override_edid = false;
5736 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5738 aconnector->edid = edid;
5740 aconnector->dc_em_sink = dc_link_add_remote_sink(
5741 aconnector->dc_link,
5743 (edid->extensions + 1) * EDID_LENGTH,
5746 if (aconnector->base.force == DRM_FORCE_ON) {
5747 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5748 aconnector->dc_link->local_sink :
5749 aconnector->dc_em_sink;
5750 dc_sink_retain(aconnector->dc_sink);
5754 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5756 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5759 * In case of headless boot with force on for DP managed connector
5760 * Those settings have to be != 0 to get initial modeset
5762 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5763 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5764 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5768 aconnector->base.override_edid = true;
5769 create_eml_sink(aconnector);
5772 static struct dc_stream_state *
5773 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5774 const struct drm_display_mode *drm_mode,
5775 const struct dm_connector_state *dm_state,
5776 const struct dc_stream_state *old_stream)
5778 struct drm_connector *connector = &aconnector->base;
5779 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5780 struct dc_stream_state *stream;
5781 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5782 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5783 enum dc_status dc_result = DC_OK;
5786 stream = create_stream_for_sink(aconnector, drm_mode,
5787 dm_state, old_stream,
5789 if (stream == NULL) {
5790 DRM_ERROR("Failed to create stream for sink!\n");
5794 dc_result = dc_validate_stream(adev->dm.dc, stream);
5796 if (dc_result != DC_OK) {
5797 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5802 dc_status_to_str(dc_result));
5804 dc_stream_release(stream);
5806 requested_bpc -= 2; /* lower bpc to retry validation */
5809 } while (stream == NULL && requested_bpc >= 6);
5814 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5815 struct drm_display_mode *mode)
5817 int result = MODE_ERROR;
5818 struct dc_sink *dc_sink;
5819 /* TODO: Unhardcode stream count */
5820 struct dc_stream_state *stream;
5821 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5823 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5824 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5828 * Only run this the first time mode_valid is called to initilialize
5831 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5832 !aconnector->dc_em_sink)
5833 handle_edid_mgmt(aconnector);
5835 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5837 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
5838 aconnector->base.force != DRM_FORCE_ON) {
5839 DRM_ERROR("dc_sink is NULL!\n");
5843 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5845 dc_stream_release(stream);
5850 /* TODO: error handling*/
5854 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5855 struct dc_info_packet *out)
5857 struct hdmi_drm_infoframe frame;
5858 unsigned char buf[30]; /* 26 + 4 */
5862 memset(out, 0, sizeof(*out));
5864 if (!state->hdr_output_metadata)
5867 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5871 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5875 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5879 /* Prepare the infopacket for DC. */
5880 switch (state->connector->connector_type) {
5881 case DRM_MODE_CONNECTOR_HDMIA:
5882 out->hb0 = 0x87; /* type */
5883 out->hb1 = 0x01; /* version */
5884 out->hb2 = 0x1A; /* length */
5885 out->sb[0] = buf[3]; /* checksum */
5889 case DRM_MODE_CONNECTOR_DisplayPort:
5890 case DRM_MODE_CONNECTOR_eDP:
5891 out->hb0 = 0x00; /* sdp id, zero */
5892 out->hb1 = 0x87; /* type */
5893 out->hb2 = 0x1D; /* payload len - 1 */
5894 out->hb3 = (0x13 << 2); /* sdp version */
5895 out->sb[0] = 0x01; /* version */
5896 out->sb[1] = 0x1A; /* length */
5904 memcpy(&out->sb[i], &buf[4], 26);
5907 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5908 sizeof(out->sb), false);
5914 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5915 const struct drm_connector_state *new_state)
5917 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5918 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5920 if (old_blob != new_blob) {
5921 if (old_blob && new_blob &&
5922 old_blob->length == new_blob->length)
5923 return memcmp(old_blob->data, new_blob->data,
5933 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5934 struct drm_atomic_state *state)
5936 struct drm_connector_state *new_con_state =
5937 drm_atomic_get_new_connector_state(state, conn);
5938 struct drm_connector_state *old_con_state =
5939 drm_atomic_get_old_connector_state(state, conn);
5940 struct drm_crtc *crtc = new_con_state->crtc;
5941 struct drm_crtc_state *new_crtc_state;
5944 trace_amdgpu_dm_connector_atomic_check(new_con_state);
5949 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5950 struct dc_info_packet hdr_infopacket;
5952 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5956 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5957 if (IS_ERR(new_crtc_state))
5958 return PTR_ERR(new_crtc_state);
5961 * DC considers the stream backends changed if the
5962 * static metadata changes. Forcing the modeset also
5963 * gives a simple way for userspace to switch from
5964 * 8bpc to 10bpc when setting the metadata to enter
5967 * Changing the static metadata after it's been
5968 * set is permissible, however. So only force a
5969 * modeset if we're entering or exiting HDR.
5971 new_crtc_state->mode_changed =
5972 !old_con_state->hdr_output_metadata ||
5973 !new_con_state->hdr_output_metadata;
5979 static const struct drm_connector_helper_funcs
5980 amdgpu_dm_connector_helper_funcs = {
5982 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5983 * modes will be filtered by drm_mode_validate_size(), and those modes
5984 * are missing after user start lightdm. So we need to renew modes list.
5985 * in get_modes call back, not just return the modes count
5987 .get_modes = get_modes,
5988 .mode_valid = amdgpu_dm_connector_mode_valid,
5989 .atomic_check = amdgpu_dm_connector_atomic_check,
5992 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5996 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5998 struct drm_atomic_state *state = new_crtc_state->state;
5999 struct drm_plane *plane;
6002 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6003 struct drm_plane_state *new_plane_state;
6005 /* Cursor planes are "fake". */
6006 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6009 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6011 if (!new_plane_state) {
6013 * The plane is enable on the CRTC and hasn't changed
6014 * state. This means that it previously passed
6015 * validation and is therefore enabled.
6021 /* We need a framebuffer to be considered enabled. */
6022 num_active += (new_plane_state->fb != NULL);
6028 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6029 struct drm_crtc_state *new_crtc_state)
6031 struct dm_crtc_state *dm_new_crtc_state =
6032 to_dm_crtc_state(new_crtc_state);
6034 dm_new_crtc_state->active_planes = 0;
6036 if (!dm_new_crtc_state->stream)
6039 dm_new_crtc_state->active_planes =
6040 count_crtc_active_planes(new_crtc_state);
6043 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6044 struct drm_atomic_state *state)
6046 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6048 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6049 struct dc *dc = adev->dm.dc;
6050 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6053 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6055 dm_update_crtc_active_planes(crtc, crtc_state);
6057 if (unlikely(!dm_crtc_state->stream &&
6058 modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6064 * We require the primary plane to be enabled whenever the CRTC is, otherwise
6065 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6066 * planes are disabled, which is not supported by the hardware. And there is legacy
6067 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6069 if (crtc_state->enable &&
6070 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6071 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6075 /* In some use cases, like reset, no stream is attached */
6076 if (!dm_crtc_state->stream)
6079 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6082 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6086 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6087 const struct drm_display_mode *mode,
6088 struct drm_display_mode *adjusted_mode)
6093 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6094 .disable = dm_crtc_helper_disable,
6095 .atomic_check = dm_crtc_helper_atomic_check,
6096 .mode_fixup = dm_crtc_helper_mode_fixup,
6097 .get_scanout_position = amdgpu_crtc_get_scanout_position,
6100 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6105 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6107 switch (display_color_depth) {
6108 case COLOR_DEPTH_666:
6110 case COLOR_DEPTH_888:
6112 case COLOR_DEPTH_101010:
6114 case COLOR_DEPTH_121212:
6116 case COLOR_DEPTH_141414:
6118 case COLOR_DEPTH_161616:
6126 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6127 struct drm_crtc_state *crtc_state,
6128 struct drm_connector_state *conn_state)
6130 struct drm_atomic_state *state = crtc_state->state;
6131 struct drm_connector *connector = conn_state->connector;
6132 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6133 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6134 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6135 struct drm_dp_mst_topology_mgr *mst_mgr;
6136 struct drm_dp_mst_port *mst_port;
6137 enum dc_color_depth color_depth;
6139 bool is_y420 = false;
6141 if (!aconnector->port || !aconnector->dc_sink)
6144 mst_port = aconnector->port;
6145 mst_mgr = &aconnector->mst_port->mst_mgr;
6147 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6150 if (!state->duplicated) {
6151 int max_bpc = conn_state->max_requested_bpc;
6152 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6153 aconnector->force_yuv420_output;
6154 color_depth = convert_color_depth_from_display_info(connector,
6157 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6158 clock = adjusted_mode->clock;
6159 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6161 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6164 dm_new_connector_state->pbn,
6165 dm_mst_get_pbn_divider(aconnector->dc_link));
6166 if (dm_new_connector_state->vcpi_slots < 0) {
6167 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6168 return dm_new_connector_state->vcpi_slots;
6173 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6174 .disable = dm_encoder_helper_disable,
6175 .atomic_check = dm_encoder_helper_atomic_check
6178 #if defined(CONFIG_DRM_AMD_DC_DCN)
6179 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6180 struct dc_state *dc_state)
6182 struct dc_stream_state *stream = NULL;
6183 struct drm_connector *connector;
6184 struct drm_connector_state *new_con_state, *old_con_state;
6185 struct amdgpu_dm_connector *aconnector;
6186 struct dm_connector_state *dm_conn_state;
6187 int i, j, clock, bpp;
6188 int vcpi, pbn_div, pbn = 0;
6190 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6192 aconnector = to_amdgpu_dm_connector(connector);
6194 if (!aconnector->port)
6197 if (!new_con_state || !new_con_state->crtc)
6200 dm_conn_state = to_dm_connector_state(new_con_state);
6202 for (j = 0; j < dc_state->stream_count; j++) {
6203 stream = dc_state->streams[j];
6207 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6216 if (stream->timing.flags.DSC != 1) {
6217 drm_dp_mst_atomic_enable_dsc(state,
6225 pbn_div = dm_mst_get_pbn_divider(stream->link);
6226 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6227 clock = stream->timing.pix_clk_100hz / 10;
6228 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6229 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6236 dm_conn_state->pbn = pbn;
6237 dm_conn_state->vcpi_slots = vcpi;
6243 static void dm_drm_plane_reset(struct drm_plane *plane)
6245 struct dm_plane_state *amdgpu_state = NULL;
6248 plane->funcs->atomic_destroy_state(plane, plane->state);
6250 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6251 WARN_ON(amdgpu_state == NULL);
6254 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6257 static struct drm_plane_state *
6258 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6260 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6262 old_dm_plane_state = to_dm_plane_state(plane->state);
6263 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6264 if (!dm_plane_state)
6267 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6269 if (old_dm_plane_state->dc_state) {
6270 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6271 dc_plane_state_retain(dm_plane_state->dc_state);
6274 return &dm_plane_state->base;
6277 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6278 struct drm_plane_state *state)
6280 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6282 if (dm_plane_state->dc_state)
6283 dc_plane_state_release(dm_plane_state->dc_state);
6285 drm_atomic_helper_plane_destroy_state(plane, state);
6288 static const struct drm_plane_funcs dm_plane_funcs = {
6289 .update_plane = drm_atomic_helper_update_plane,
6290 .disable_plane = drm_atomic_helper_disable_plane,
6291 .destroy = drm_primary_helper_destroy,
6292 .reset = dm_drm_plane_reset,
6293 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6294 .atomic_destroy_state = dm_drm_plane_destroy_state,
6295 .format_mod_supported = dm_plane_format_mod_supported,
6298 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6299 struct drm_plane_state *new_state)
6301 struct amdgpu_framebuffer *afb;
6302 struct drm_gem_object *obj;
6303 struct amdgpu_device *adev;
6304 struct amdgpu_bo *rbo;
6305 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6306 struct list_head list;
6307 struct ttm_validate_buffer tv;
6308 struct ww_acquire_ctx ticket;
6312 if (!new_state->fb) {
6313 DRM_DEBUG_DRIVER("No FB bound\n");
6317 afb = to_amdgpu_framebuffer(new_state->fb);
6318 obj = new_state->fb->obj[0];
6319 rbo = gem_to_amdgpu_bo(obj);
6320 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6321 INIT_LIST_HEAD(&list);
6325 list_add(&tv.head, &list);
6327 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6329 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6333 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6334 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6336 domain = AMDGPU_GEM_DOMAIN_VRAM;
6338 r = amdgpu_bo_pin(rbo, domain);
6339 if (unlikely(r != 0)) {
6340 if (r != -ERESTARTSYS)
6341 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6342 ttm_eu_backoff_reservation(&ticket, &list);
6346 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6347 if (unlikely(r != 0)) {
6348 amdgpu_bo_unpin(rbo);
6349 ttm_eu_backoff_reservation(&ticket, &list);
6350 DRM_ERROR("%p bind failed\n", rbo);
6354 ttm_eu_backoff_reservation(&ticket, &list);
6356 afb->address = amdgpu_bo_gpu_offset(rbo);
6361 * We don't do surface updates on planes that have been newly created,
6362 * but we also don't have the afb->address during atomic check.
6364 * Fill in buffer attributes depending on the address here, but only on
6365 * newly created planes since they're not being used by DC yet and this
6366 * won't modify global state.
6368 dm_plane_state_old = to_dm_plane_state(plane->state);
6369 dm_plane_state_new = to_dm_plane_state(new_state);
6371 if (dm_plane_state_new->dc_state &&
6372 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6373 struct dc_plane_state *plane_state =
6374 dm_plane_state_new->dc_state;
6375 bool force_disable_dcc = !plane_state->dcc.enable;
6377 fill_plane_buffer_attributes(
6378 adev, afb, plane_state->format, plane_state->rotation,
6380 &plane_state->tiling_info, &plane_state->plane_size,
6381 &plane_state->dcc, &plane_state->address,
6382 afb->tmz_surface, force_disable_dcc);
6388 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6389 struct drm_plane_state *old_state)
6391 struct amdgpu_bo *rbo;
6397 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6398 r = amdgpu_bo_reserve(rbo, false);
6400 DRM_ERROR("failed to reserve rbo before unpin\n");
6404 amdgpu_bo_unpin(rbo);
6405 amdgpu_bo_unreserve(rbo);
6406 amdgpu_bo_unref(&rbo);
6409 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6410 struct drm_crtc_state *new_crtc_state)
6412 struct drm_framebuffer *fb = state->fb;
6413 int min_downscale, max_upscale;
6415 int max_scale = INT_MAX;
6417 /* Plane enabled? Get min/max allowed scaling factors from plane caps. */
6418 if (fb && state->crtc) {
6419 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6420 &min_downscale, &max_upscale);
6422 * Convert to drm convention: 16.16 fixed point, instead of dc's
6423 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6424 * dst/src, so min_scale = 1.0 / max_upscale, etc.
6426 min_scale = (1000 << 16) / max_upscale;
6427 max_scale = (1000 << 16) / min_downscale;
6430 return drm_atomic_helper_check_plane_state(
6431 state, new_crtc_state, min_scale, max_scale, true, true);
6434 static int dm_plane_atomic_check(struct drm_plane *plane,
6435 struct drm_plane_state *state)
6437 struct amdgpu_device *adev = drm_to_adev(plane->dev);
6438 struct dc *dc = adev->dm.dc;
6439 struct dm_plane_state *dm_plane_state;
6440 struct dc_scaling_info scaling_info;
6441 struct drm_crtc_state *new_crtc_state;
6444 trace_amdgpu_dm_plane_atomic_check(state);
6446 dm_plane_state = to_dm_plane_state(state);
6448 if (!dm_plane_state->dc_state)
6452 drm_atomic_get_new_crtc_state(state->state, state->crtc);
6453 if (!new_crtc_state)
6456 ret = dm_plane_helper_check_state(state, new_crtc_state);
6460 ret = fill_dc_scaling_info(state, &scaling_info);
6464 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6470 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6471 struct drm_plane_state *new_plane_state)
6473 /* Only support async updates on cursor planes. */
6474 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6480 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6481 struct drm_plane_state *new_state)
6483 struct drm_plane_state *old_state =
6484 drm_atomic_get_old_plane_state(new_state->state, plane);
6486 trace_amdgpu_dm_atomic_update_cursor(new_state);
6488 swap(plane->state->fb, new_state->fb);
6490 plane->state->src_x = new_state->src_x;
6491 plane->state->src_y = new_state->src_y;
6492 plane->state->src_w = new_state->src_w;
6493 plane->state->src_h = new_state->src_h;
6494 plane->state->crtc_x = new_state->crtc_x;
6495 plane->state->crtc_y = new_state->crtc_y;
6496 plane->state->crtc_w = new_state->crtc_w;
6497 plane->state->crtc_h = new_state->crtc_h;
6499 handle_cursor_update(plane, old_state);
6502 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6503 .prepare_fb = dm_plane_helper_prepare_fb,
6504 .cleanup_fb = dm_plane_helper_cleanup_fb,
6505 .atomic_check = dm_plane_atomic_check,
6506 .atomic_async_check = dm_plane_atomic_async_check,
6507 .atomic_async_update = dm_plane_atomic_async_update
6511 * TODO: these are currently initialized to rgb formats only.
6512 * For future use cases we should either initialize them dynamically based on
6513 * plane capabilities, or initialize this array to all formats, so internal drm
6514 * check will succeed, and let DC implement proper check
6516 static const uint32_t rgb_formats[] = {
6517 DRM_FORMAT_XRGB8888,
6518 DRM_FORMAT_ARGB8888,
6519 DRM_FORMAT_RGBA8888,
6520 DRM_FORMAT_XRGB2101010,
6521 DRM_FORMAT_XBGR2101010,
6522 DRM_FORMAT_ARGB2101010,
6523 DRM_FORMAT_ABGR2101010,
6524 DRM_FORMAT_XBGR8888,
6525 DRM_FORMAT_ABGR8888,
6529 static const uint32_t overlay_formats[] = {
6530 DRM_FORMAT_XRGB8888,
6531 DRM_FORMAT_ARGB8888,
6532 DRM_FORMAT_RGBA8888,
6533 DRM_FORMAT_XBGR8888,
6534 DRM_FORMAT_ABGR8888,
6538 static const u32 cursor_formats[] = {
6542 static int get_plane_formats(const struct drm_plane *plane,
6543 const struct dc_plane_cap *plane_cap,
6544 uint32_t *formats, int max_formats)
6546 int i, num_formats = 0;
6549 * TODO: Query support for each group of formats directly from
6550 * DC plane caps. This will require adding more formats to the
6554 switch (plane->type) {
6555 case DRM_PLANE_TYPE_PRIMARY:
6556 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6557 if (num_formats >= max_formats)
6560 formats[num_formats++] = rgb_formats[i];
6563 if (plane_cap && plane_cap->pixel_format_support.nv12)
6564 formats[num_formats++] = DRM_FORMAT_NV12;
6565 if (plane_cap && plane_cap->pixel_format_support.p010)
6566 formats[num_formats++] = DRM_FORMAT_P010;
6567 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6568 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6569 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6570 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6571 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6575 case DRM_PLANE_TYPE_OVERLAY:
6576 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6577 if (num_formats >= max_formats)
6580 formats[num_formats++] = overlay_formats[i];
6584 case DRM_PLANE_TYPE_CURSOR:
6585 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6586 if (num_formats >= max_formats)
6589 formats[num_formats++] = cursor_formats[i];
6597 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6598 struct drm_plane *plane,
6599 unsigned long possible_crtcs,
6600 const struct dc_plane_cap *plane_cap)
6602 uint32_t formats[32];
6605 unsigned int supported_rotations;
6606 uint64_t *modifiers = NULL;
6608 num_formats = get_plane_formats(plane, plane_cap, formats,
6609 ARRAY_SIZE(formats));
6611 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6615 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6616 &dm_plane_funcs, formats, num_formats,
6617 modifiers, plane->type, NULL);
6622 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6623 plane_cap && plane_cap->per_pixel_alpha) {
6624 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6625 BIT(DRM_MODE_BLEND_PREMULTI);
6627 drm_plane_create_alpha_property(plane);
6628 drm_plane_create_blend_mode_property(plane, blend_caps);
6631 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6633 (plane_cap->pixel_format_support.nv12 ||
6634 plane_cap->pixel_format_support.p010)) {
6635 /* This only affects YUV formats. */
6636 drm_plane_create_color_properties(
6638 BIT(DRM_COLOR_YCBCR_BT601) |
6639 BIT(DRM_COLOR_YCBCR_BT709) |
6640 BIT(DRM_COLOR_YCBCR_BT2020),
6641 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6642 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6643 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6646 supported_rotations =
6647 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6648 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6650 if (dm->adev->asic_type >= CHIP_BONAIRE &&
6651 plane->type != DRM_PLANE_TYPE_CURSOR)
6652 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6653 supported_rotations);
6655 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6657 /* Create (reset) the plane state */
6658 if (plane->funcs->reset)
6659 plane->funcs->reset(plane);
6664 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6665 struct drm_plane *plane,
6666 uint32_t crtc_index)
6668 struct amdgpu_crtc *acrtc = NULL;
6669 struct drm_plane *cursor_plane;
6673 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6677 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6678 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6680 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6684 res = drm_crtc_init_with_planes(
6689 &amdgpu_dm_crtc_funcs, NULL);
6694 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6696 /* Create (reset) the plane state */
6697 if (acrtc->base.funcs->reset)
6698 acrtc->base.funcs->reset(&acrtc->base);
6700 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6701 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6703 acrtc->crtc_id = crtc_index;
6704 acrtc->base.enabled = false;
6705 acrtc->otg_inst = -1;
6707 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6708 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6709 true, MAX_COLOR_LUT_ENTRIES);
6710 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6716 kfree(cursor_plane);
6721 static int to_drm_connector_type(enum signal_type st)
6724 case SIGNAL_TYPE_HDMI_TYPE_A:
6725 return DRM_MODE_CONNECTOR_HDMIA;
6726 case SIGNAL_TYPE_EDP:
6727 return DRM_MODE_CONNECTOR_eDP;
6728 case SIGNAL_TYPE_LVDS:
6729 return DRM_MODE_CONNECTOR_LVDS;
6730 case SIGNAL_TYPE_RGB:
6731 return DRM_MODE_CONNECTOR_VGA;
6732 case SIGNAL_TYPE_DISPLAY_PORT:
6733 case SIGNAL_TYPE_DISPLAY_PORT_MST:
6734 return DRM_MODE_CONNECTOR_DisplayPort;
6735 case SIGNAL_TYPE_DVI_DUAL_LINK:
6736 case SIGNAL_TYPE_DVI_SINGLE_LINK:
6737 return DRM_MODE_CONNECTOR_DVID;
6738 case SIGNAL_TYPE_VIRTUAL:
6739 return DRM_MODE_CONNECTOR_VIRTUAL;
6742 return DRM_MODE_CONNECTOR_Unknown;
6746 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6748 struct drm_encoder *encoder;
6750 /* There is only one encoder per connector */
6751 drm_connector_for_each_possible_encoder(connector, encoder)
6757 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6759 struct drm_encoder *encoder;
6760 struct amdgpu_encoder *amdgpu_encoder;
6762 encoder = amdgpu_dm_connector_to_encoder(connector);
6764 if (encoder == NULL)
6767 amdgpu_encoder = to_amdgpu_encoder(encoder);
6769 amdgpu_encoder->native_mode.clock = 0;
6771 if (!list_empty(&connector->probed_modes)) {
6772 struct drm_display_mode *preferred_mode = NULL;
6774 list_for_each_entry(preferred_mode,
6775 &connector->probed_modes,
6777 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6778 amdgpu_encoder->native_mode = *preferred_mode;
6786 static struct drm_display_mode *
6787 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6789 int hdisplay, int vdisplay)
6791 struct drm_device *dev = encoder->dev;
6792 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6793 struct drm_display_mode *mode = NULL;
6794 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6796 mode = drm_mode_duplicate(dev, native_mode);
6801 mode->hdisplay = hdisplay;
6802 mode->vdisplay = vdisplay;
6803 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6804 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6810 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6811 struct drm_connector *connector)
6813 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6814 struct drm_display_mode *mode = NULL;
6815 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6816 struct amdgpu_dm_connector *amdgpu_dm_connector =
6817 to_amdgpu_dm_connector(connector);
6821 char name[DRM_DISPLAY_MODE_LEN];
6824 } common_modes[] = {
6825 { "640x480", 640, 480},
6826 { "800x600", 800, 600},
6827 { "1024x768", 1024, 768},
6828 { "1280x720", 1280, 720},
6829 { "1280x800", 1280, 800},
6830 {"1280x1024", 1280, 1024},
6831 { "1440x900", 1440, 900},
6832 {"1680x1050", 1680, 1050},
6833 {"1600x1200", 1600, 1200},
6834 {"1920x1080", 1920, 1080},
6835 {"1920x1200", 1920, 1200}
6838 n = ARRAY_SIZE(common_modes);
6840 for (i = 0; i < n; i++) {
6841 struct drm_display_mode *curmode = NULL;
6842 bool mode_existed = false;
6844 if (common_modes[i].w > native_mode->hdisplay ||
6845 common_modes[i].h > native_mode->vdisplay ||
6846 (common_modes[i].w == native_mode->hdisplay &&
6847 common_modes[i].h == native_mode->vdisplay))
6850 list_for_each_entry(curmode, &connector->probed_modes, head) {
6851 if (common_modes[i].w == curmode->hdisplay &&
6852 common_modes[i].h == curmode->vdisplay) {
6853 mode_existed = true;
6861 mode = amdgpu_dm_create_common_mode(encoder,
6862 common_modes[i].name, common_modes[i].w,
6864 drm_mode_probed_add(connector, mode);
6865 amdgpu_dm_connector->num_modes++;
6869 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6872 struct amdgpu_dm_connector *amdgpu_dm_connector =
6873 to_amdgpu_dm_connector(connector);
6876 /* empty probed_modes */
6877 INIT_LIST_HEAD(&connector->probed_modes);
6878 amdgpu_dm_connector->num_modes =
6879 drm_add_edid_modes(connector, edid);
6881 /* sorting the probed modes before calling function
6882 * amdgpu_dm_get_native_mode() since EDID can have
6883 * more than one preferred mode. The modes that are
6884 * later in the probed mode list could be of higher
6885 * and preferred resolution. For example, 3840x2160
6886 * resolution in base EDID preferred timing and 4096x2160
6887 * preferred resolution in DID extension block later.
6889 drm_mode_sort(&connector->probed_modes);
6890 amdgpu_dm_get_native_mode(connector);
6892 amdgpu_dm_connector->num_modes = 0;
6896 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6898 struct amdgpu_dm_connector *amdgpu_dm_connector =
6899 to_amdgpu_dm_connector(connector);
6900 struct drm_encoder *encoder;
6901 struct edid *edid = amdgpu_dm_connector->edid;
6903 encoder = amdgpu_dm_connector_to_encoder(connector);
6905 if (!drm_edid_is_valid(edid)) {
6906 amdgpu_dm_connector->num_modes =
6907 drm_add_modes_noedid(connector, 640, 480);
6909 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6910 amdgpu_dm_connector_add_common_modes(encoder, connector);
6912 amdgpu_dm_fbc_init(connector);
6914 return amdgpu_dm_connector->num_modes;
6917 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6918 struct amdgpu_dm_connector *aconnector,
6920 struct dc_link *link,
6923 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6926 * Some of the properties below require access to state, like bpc.
6927 * Allocate some default initial connector state with our reset helper.
6929 if (aconnector->base.funcs->reset)
6930 aconnector->base.funcs->reset(&aconnector->base);
6932 aconnector->connector_id = link_index;
6933 aconnector->dc_link = link;
6934 aconnector->base.interlace_allowed = false;
6935 aconnector->base.doublescan_allowed = false;
6936 aconnector->base.stereo_allowed = false;
6937 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6938 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6939 aconnector->audio_inst = -1;
6940 mutex_init(&aconnector->hpd_lock);
6943 * configure support HPD hot plug connector_>polled default value is 0
6944 * which means HPD hot plug not supported
6946 switch (connector_type) {
6947 case DRM_MODE_CONNECTOR_HDMIA:
6948 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6949 aconnector->base.ycbcr_420_allowed =
6950 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6952 case DRM_MODE_CONNECTOR_DisplayPort:
6953 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6954 aconnector->base.ycbcr_420_allowed =
6955 link->link_enc->features.dp_ycbcr420_supported ? true : false;
6957 case DRM_MODE_CONNECTOR_DVID:
6958 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6964 drm_object_attach_property(&aconnector->base.base,
6965 dm->ddev->mode_config.scaling_mode_property,
6966 DRM_MODE_SCALE_NONE);
6968 drm_object_attach_property(&aconnector->base.base,
6969 adev->mode_info.underscan_property,
6971 drm_object_attach_property(&aconnector->base.base,
6972 adev->mode_info.underscan_hborder_property,
6974 drm_object_attach_property(&aconnector->base.base,
6975 adev->mode_info.underscan_vborder_property,
6978 if (!aconnector->mst_port)
6979 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6981 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6982 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6983 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6985 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6986 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6987 drm_object_attach_property(&aconnector->base.base,
6988 adev->mode_info.abm_level_property, 0);
6991 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6992 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6993 connector_type == DRM_MODE_CONNECTOR_eDP) {
6994 drm_object_attach_property(
6995 &aconnector->base.base,
6996 dm->ddev->mode_config.hdr_output_metadata_property, 0);
6998 if (!aconnector->mst_port)
6999 drm_connector_attach_vrr_capable_property(&aconnector->base);
7001 #ifdef CONFIG_DRM_AMD_DC_HDCP
7002 if (adev->dm.hdcp_workqueue)
7003 drm_connector_attach_content_protection_property(&aconnector->base, true);
7008 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7009 struct i2c_msg *msgs, int num)
7011 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7012 struct ddc_service *ddc_service = i2c->ddc_service;
7013 struct i2c_command cmd;
7017 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7022 cmd.number_of_payloads = num;
7023 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7026 for (i = 0; i < num; i++) {
7027 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7028 cmd.payloads[i].address = msgs[i].addr;
7029 cmd.payloads[i].length = msgs[i].len;
7030 cmd.payloads[i].data = msgs[i].buf;
7034 ddc_service->ctx->dc,
7035 ddc_service->ddc_pin->hw_info.ddc_channel,
7039 kfree(cmd.payloads);
7043 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7045 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7048 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7049 .master_xfer = amdgpu_dm_i2c_xfer,
7050 .functionality = amdgpu_dm_i2c_func,
7053 static struct amdgpu_i2c_adapter *
7054 create_i2c(struct ddc_service *ddc_service,
7058 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7059 struct amdgpu_i2c_adapter *i2c;
7061 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7064 i2c->base.owner = THIS_MODULE;
7065 i2c->base.class = I2C_CLASS_DDC;
7066 i2c->base.dev.parent = &adev->pdev->dev;
7067 i2c->base.algo = &amdgpu_dm_i2c_algo;
7068 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7069 i2c_set_adapdata(&i2c->base, i2c);
7070 i2c->ddc_service = ddc_service;
7071 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7078 * Note: this function assumes that dc_link_detect() was called for the
7079 * dc_link which will be represented by this aconnector.
7081 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7082 struct amdgpu_dm_connector *aconnector,
7083 uint32_t link_index,
7084 struct amdgpu_encoder *aencoder)
7088 struct dc *dc = dm->dc;
7089 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7090 struct amdgpu_i2c_adapter *i2c;
7092 link->priv = aconnector;
7094 DRM_DEBUG_DRIVER("%s()\n", __func__);
7096 i2c = create_i2c(link->ddc, link->link_index, &res);
7098 DRM_ERROR("Failed to create i2c adapter data\n");
7102 aconnector->i2c = i2c;
7103 res = i2c_add_adapter(&i2c->base);
7106 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7110 connector_type = to_drm_connector_type(link->connector_signal);
7112 res = drm_connector_init_with_ddc(
7115 &amdgpu_dm_connector_funcs,
7120 DRM_ERROR("connector_init failed\n");
7121 aconnector->connector_id = -1;
7125 drm_connector_helper_add(
7127 &amdgpu_dm_connector_helper_funcs);
7129 amdgpu_dm_connector_init_helper(
7136 drm_connector_attach_encoder(
7137 &aconnector->base, &aencoder->base);
7139 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7140 || connector_type == DRM_MODE_CONNECTOR_eDP)
7141 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7146 aconnector->i2c = NULL;
7151 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7153 switch (adev->mode_info.num_crtc) {
7170 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7171 struct amdgpu_encoder *aencoder,
7172 uint32_t link_index)
7174 struct amdgpu_device *adev = drm_to_adev(dev);
7176 int res = drm_encoder_init(dev,
7178 &amdgpu_dm_encoder_funcs,
7179 DRM_MODE_ENCODER_TMDS,
7182 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7185 aencoder->encoder_id = link_index;
7187 aencoder->encoder_id = -1;
7189 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7194 static void manage_dm_interrupts(struct amdgpu_device *adev,
7195 struct amdgpu_crtc *acrtc,
7199 * We have no guarantee that the frontend index maps to the same
7200 * backend index - some even map to more than one.
7202 * TODO: Use a different interrupt or check DC itself for the mapping.
7205 amdgpu_display_crtc_idx_to_irq_type(
7210 drm_crtc_vblank_on(&acrtc->base);
7213 &adev->pageflip_irq,
7219 &adev->pageflip_irq,
7221 drm_crtc_vblank_off(&acrtc->base);
7225 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7226 struct amdgpu_crtc *acrtc)
7229 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7232 * This reads the current state for the IRQ and force reapplies
7233 * the setting to hardware.
7235 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7239 is_scaling_state_different(const struct dm_connector_state *dm_state,
7240 const struct dm_connector_state *old_dm_state)
7242 if (dm_state->scaling != old_dm_state->scaling)
7244 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7245 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7247 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7248 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7250 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7251 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7256 #ifdef CONFIG_DRM_AMD_DC_HDCP
7257 static bool is_content_protection_different(struct drm_connector_state *state,
7258 const struct drm_connector_state *old_state,
7259 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7261 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7262 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7264 /* Handle: Type0/1 change */
7265 if (old_state->hdcp_content_type != state->hdcp_content_type &&
7266 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7267 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7271 /* CP is being re enabled, ignore this
7273 * Handles: ENABLED -> DESIRED
7275 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7276 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7277 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7281 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7283 * Handles: UNDESIRED -> ENABLED
7285 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7286 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7287 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7289 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7290 * hot-plug, headless s3, dpms
7292 * Handles: DESIRED -> DESIRED (Special case)
7294 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7295 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7296 dm_con_state->update_hdcp = false;
7301 * Handles: UNDESIRED -> UNDESIRED
7302 * DESIRED -> DESIRED
7303 * ENABLED -> ENABLED
7305 if (old_state->content_protection == state->content_protection)
7309 * Handles: UNDESIRED -> DESIRED
7310 * DESIRED -> UNDESIRED
7311 * ENABLED -> UNDESIRED
7313 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7317 * Handles: DESIRED -> ENABLED
7323 static void remove_stream(struct amdgpu_device *adev,
7324 struct amdgpu_crtc *acrtc,
7325 struct dc_stream_state *stream)
7327 /* this is the update mode case */
7329 acrtc->otg_inst = -1;
7330 acrtc->enabled = false;
7333 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7334 struct dc_cursor_position *position)
7336 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7338 int xorigin = 0, yorigin = 0;
7340 position->enable = false;
7344 if (!crtc || !plane->state->fb)
7347 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7348 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7349 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7351 plane->state->crtc_w,
7352 plane->state->crtc_h);
7356 x = plane->state->crtc_x;
7357 y = plane->state->crtc_y;
7359 if (x <= -amdgpu_crtc->max_cursor_width ||
7360 y <= -amdgpu_crtc->max_cursor_height)
7364 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7368 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7371 position->enable = true;
7372 position->translate_by_source = true;
7375 position->x_hotspot = xorigin;
7376 position->y_hotspot = yorigin;
7381 static void handle_cursor_update(struct drm_plane *plane,
7382 struct drm_plane_state *old_plane_state)
7384 struct amdgpu_device *adev = drm_to_adev(plane->dev);
7385 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7386 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7387 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7388 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7389 uint64_t address = afb ? afb->address : 0;
7390 struct dc_cursor_position position;
7391 struct dc_cursor_attributes attributes;
7394 if (!plane->state->fb && !old_plane_state->fb)
7397 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
7399 amdgpu_crtc->crtc_id,
7400 plane->state->crtc_w,
7401 plane->state->crtc_h);
7403 ret = get_cursor_position(plane, crtc, &position);
7407 if (!position.enable) {
7408 /* turn off cursor */
7409 if (crtc_state && crtc_state->stream) {
7410 mutex_lock(&adev->dm.dc_lock);
7411 dc_stream_set_cursor_position(crtc_state->stream,
7413 mutex_unlock(&adev->dm.dc_lock);
7418 amdgpu_crtc->cursor_width = plane->state->crtc_w;
7419 amdgpu_crtc->cursor_height = plane->state->crtc_h;
7421 memset(&attributes, 0, sizeof(attributes));
7422 attributes.address.high_part = upper_32_bits(address);
7423 attributes.address.low_part = lower_32_bits(address);
7424 attributes.width = plane->state->crtc_w;
7425 attributes.height = plane->state->crtc_h;
7426 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7427 attributes.rotation_angle = 0;
7428 attributes.attribute_flags.value = 0;
7430 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7432 if (crtc_state->stream) {
7433 mutex_lock(&adev->dm.dc_lock);
7434 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7436 DRM_ERROR("DC failed to set cursor attributes\n");
7438 if (!dc_stream_set_cursor_position(crtc_state->stream,
7440 DRM_ERROR("DC failed to set cursor position\n");
7441 mutex_unlock(&adev->dm.dc_lock);
7445 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7448 assert_spin_locked(&acrtc->base.dev->event_lock);
7449 WARN_ON(acrtc->event);
7451 acrtc->event = acrtc->base.state->event;
7453 /* Set the flip status */
7454 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7456 /* Mark this event as consumed */
7457 acrtc->base.state->event = NULL;
7459 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7463 static void update_freesync_state_on_stream(
7464 struct amdgpu_display_manager *dm,
7465 struct dm_crtc_state *new_crtc_state,
7466 struct dc_stream_state *new_stream,
7467 struct dc_plane_state *surface,
7468 u32 flip_timestamp_in_us)
7470 struct mod_vrr_params vrr_params;
7471 struct dc_info_packet vrr_infopacket = {0};
7472 struct amdgpu_device *adev = dm->adev;
7473 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7474 unsigned long flags;
7480 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7481 * For now it's sufficient to just guard against these conditions.
7484 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7487 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7488 vrr_params = acrtc->dm_irq_params.vrr_params;
7491 mod_freesync_handle_preflip(
7492 dm->freesync_module,
7495 flip_timestamp_in_us,
7498 if (adev->family < AMDGPU_FAMILY_AI &&
7499 amdgpu_dm_vrr_active(new_crtc_state)) {
7500 mod_freesync_handle_v_update(dm->freesync_module,
7501 new_stream, &vrr_params);
7503 /* Need to call this before the frame ends. */
7504 dc_stream_adjust_vmin_vmax(dm->dc,
7505 new_crtc_state->stream,
7506 &vrr_params.adjust);
7510 mod_freesync_build_vrr_infopacket(
7511 dm->freesync_module,
7515 TRANSFER_FUNC_UNKNOWN,
7518 new_crtc_state->freesync_timing_changed |=
7519 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7521 sizeof(vrr_params.adjust)) != 0);
7523 new_crtc_state->freesync_vrr_info_changed |=
7524 (memcmp(&new_crtc_state->vrr_infopacket,
7526 sizeof(vrr_infopacket)) != 0);
7528 acrtc->dm_irq_params.vrr_params = vrr_params;
7529 new_crtc_state->vrr_infopacket = vrr_infopacket;
7531 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7532 new_stream->vrr_infopacket = vrr_infopacket;
7534 if (new_crtc_state->freesync_vrr_info_changed)
7535 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7536 new_crtc_state->base.crtc->base.id,
7537 (int)new_crtc_state->base.vrr_enabled,
7538 (int)vrr_params.state);
7540 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7543 static void update_stream_irq_parameters(
7544 struct amdgpu_display_manager *dm,
7545 struct dm_crtc_state *new_crtc_state)
7547 struct dc_stream_state *new_stream = new_crtc_state->stream;
7548 struct mod_vrr_params vrr_params;
7549 struct mod_freesync_config config = new_crtc_state->freesync_config;
7550 struct amdgpu_device *adev = dm->adev;
7551 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7552 unsigned long flags;
7558 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7559 * For now it's sufficient to just guard against these conditions.
7561 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7564 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7565 vrr_params = acrtc->dm_irq_params.vrr_params;
7567 if (new_crtc_state->vrr_supported &&
7568 config.min_refresh_in_uhz &&
7569 config.max_refresh_in_uhz) {
7570 config.state = new_crtc_state->base.vrr_enabled ?
7571 VRR_STATE_ACTIVE_VARIABLE :
7574 config.state = VRR_STATE_UNSUPPORTED;
7577 mod_freesync_build_vrr_params(dm->freesync_module,
7579 &config, &vrr_params);
7581 new_crtc_state->freesync_timing_changed |=
7582 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7583 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7585 new_crtc_state->freesync_config = config;
7586 /* Copy state for access from DM IRQ handler */
7587 acrtc->dm_irq_params.freesync_config = config;
7588 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7589 acrtc->dm_irq_params.vrr_params = vrr_params;
7590 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7593 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7594 struct dm_crtc_state *new_state)
7596 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7597 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7599 if (!old_vrr_active && new_vrr_active) {
7600 /* Transition VRR inactive -> active:
7601 * While VRR is active, we must not disable vblank irq, as a
7602 * reenable after disable would compute bogus vblank/pflip
7603 * timestamps if it likely happened inside display front-porch.
7605 * We also need vupdate irq for the actual core vblank handling
7608 dm_set_vupdate_irq(new_state->base.crtc, true);
7609 drm_crtc_vblank_get(new_state->base.crtc);
7610 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7611 __func__, new_state->base.crtc->base.id);
7612 } else if (old_vrr_active && !new_vrr_active) {
7613 /* Transition VRR active -> inactive:
7614 * Allow vblank irq disable again for fixed refresh rate.
7616 dm_set_vupdate_irq(new_state->base.crtc, false);
7617 drm_crtc_vblank_put(new_state->base.crtc);
7618 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7619 __func__, new_state->base.crtc->base.id);
7623 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7625 struct drm_plane *plane;
7626 struct drm_plane_state *old_plane_state, *new_plane_state;
7630 * TODO: Make this per-stream so we don't issue redundant updates for
7631 * commits with multiple streams.
7633 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7635 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7636 handle_cursor_update(plane, old_plane_state);
7639 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7640 struct dc_state *dc_state,
7641 struct drm_device *dev,
7642 struct amdgpu_display_manager *dm,
7643 struct drm_crtc *pcrtc,
7644 bool wait_for_vblank)
7647 uint64_t timestamp_ns;
7648 struct drm_plane *plane;
7649 struct drm_plane_state *old_plane_state, *new_plane_state;
7650 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7651 struct drm_crtc_state *new_pcrtc_state =
7652 drm_atomic_get_new_crtc_state(state, pcrtc);
7653 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7654 struct dm_crtc_state *dm_old_crtc_state =
7655 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7656 int planes_count = 0, vpos, hpos;
7658 unsigned long flags;
7659 struct amdgpu_bo *abo;
7660 uint32_t target_vblank, last_flip_vblank;
7661 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7662 bool pflip_present = false;
7664 struct dc_surface_update surface_updates[MAX_SURFACES];
7665 struct dc_plane_info plane_infos[MAX_SURFACES];
7666 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7667 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7668 struct dc_stream_update stream_update;
7671 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7674 dm_error("Failed to allocate update bundle\n");
7679 * Disable the cursor first if we're disabling all the planes.
7680 * It'll remain on the screen after the planes are re-enabled
7683 if (acrtc_state->active_planes == 0)
7684 amdgpu_dm_commit_cursors(state);
7686 /* update planes when needed */
7687 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7688 struct drm_crtc *crtc = new_plane_state->crtc;
7689 struct drm_crtc_state *new_crtc_state;
7690 struct drm_framebuffer *fb = new_plane_state->fb;
7691 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
7692 bool plane_needs_flip;
7693 struct dc_plane_state *dc_plane;
7694 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7696 /* Cursor plane is handled after stream updates */
7697 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7700 if (!fb || !crtc || pcrtc != crtc)
7703 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7704 if (!new_crtc_state->active)
7707 dc_plane = dm_new_plane_state->dc_state;
7709 bundle->surface_updates[planes_count].surface = dc_plane;
7710 if (new_pcrtc_state->color_mgmt_changed) {
7711 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7712 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7713 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7716 fill_dc_scaling_info(new_plane_state,
7717 &bundle->scaling_infos[planes_count]);
7719 bundle->surface_updates[planes_count].scaling_info =
7720 &bundle->scaling_infos[planes_count];
7722 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7724 pflip_present = pflip_present || plane_needs_flip;
7726 if (!plane_needs_flip) {
7731 abo = gem_to_amdgpu_bo(fb->obj[0]);
7734 * Wait for all fences on this FB. Do limited wait to avoid
7735 * deadlock during GPU reset when this fence will not signal
7736 * but we hold reservation lock for the BO.
7738 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7740 msecs_to_jiffies(5000));
7741 if (unlikely(r <= 0))
7742 DRM_ERROR("Waiting for fences timed out!");
7744 fill_dc_plane_info_and_addr(
7745 dm->adev, new_plane_state,
7747 &bundle->plane_infos[planes_count],
7748 &bundle->flip_addrs[planes_count].address,
7749 afb->tmz_surface, false);
7751 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7752 new_plane_state->plane->index,
7753 bundle->plane_infos[planes_count].dcc.enable);
7755 bundle->surface_updates[planes_count].plane_info =
7756 &bundle->plane_infos[planes_count];
7759 * Only allow immediate flips for fast updates that don't
7760 * change FB pitch, DCC state, rotation or mirroing.
7762 bundle->flip_addrs[planes_count].flip_immediate =
7763 crtc->state->async_flip &&
7764 acrtc_state->update_type == UPDATE_TYPE_FAST;
7766 timestamp_ns = ktime_get_ns();
7767 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7768 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7769 bundle->surface_updates[planes_count].surface = dc_plane;
7771 if (!bundle->surface_updates[planes_count].surface) {
7772 DRM_ERROR("No surface for CRTC: id=%d\n",
7773 acrtc_attach->crtc_id);
7777 if (plane == pcrtc->primary)
7778 update_freesync_state_on_stream(
7781 acrtc_state->stream,
7783 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7785 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7787 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7788 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7794 if (pflip_present) {
7796 /* Use old throttling in non-vrr fixed refresh rate mode
7797 * to keep flip scheduling based on target vblank counts
7798 * working in a backwards compatible way, e.g., for
7799 * clients using the GLX_OML_sync_control extension or
7800 * DRI3/Present extension with defined target_msc.
7802 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7805 /* For variable refresh rate mode only:
7806 * Get vblank of last completed flip to avoid > 1 vrr
7807 * flips per video frame by use of throttling, but allow
7808 * flip programming anywhere in the possibly large
7809 * variable vrr vblank interval for fine-grained flip
7810 * timing control and more opportunity to avoid stutter
7811 * on late submission of flips.
7813 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7814 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7815 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7818 target_vblank = last_flip_vblank + wait_for_vblank;
7821 * Wait until we're out of the vertical blank period before the one
7822 * targeted by the flip
7824 while ((acrtc_attach->enabled &&
7825 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7826 0, &vpos, &hpos, NULL,
7827 NULL, &pcrtc->hwmode)
7828 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7829 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7830 (int)(target_vblank -
7831 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7832 usleep_range(1000, 1100);
7836 * Prepare the flip event for the pageflip interrupt to handle.
7838 * This only works in the case where we've already turned on the
7839 * appropriate hardware blocks (eg. HUBP) so in the transition case
7840 * from 0 -> n planes we have to skip a hardware generated event
7841 * and rely on sending it from software.
7843 if (acrtc_attach->base.state->event &&
7844 acrtc_state->active_planes > 0) {
7845 drm_crtc_vblank_get(pcrtc);
7847 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7849 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7850 prepare_flip_isr(acrtc_attach);
7852 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7855 if (acrtc_state->stream) {
7856 if (acrtc_state->freesync_vrr_info_changed)
7857 bundle->stream_update.vrr_infopacket =
7858 &acrtc_state->stream->vrr_infopacket;
7862 /* Update the planes if changed or disable if we don't have any. */
7863 if ((planes_count || acrtc_state->active_planes == 0) &&
7864 acrtc_state->stream) {
7865 bundle->stream_update.stream = acrtc_state->stream;
7866 if (new_pcrtc_state->mode_changed) {
7867 bundle->stream_update.src = acrtc_state->stream->src;
7868 bundle->stream_update.dst = acrtc_state->stream->dst;
7871 if (new_pcrtc_state->color_mgmt_changed) {
7873 * TODO: This isn't fully correct since we've actually
7874 * already modified the stream in place.
7876 bundle->stream_update.gamut_remap =
7877 &acrtc_state->stream->gamut_remap_matrix;
7878 bundle->stream_update.output_csc_transform =
7879 &acrtc_state->stream->csc_color_matrix;
7880 bundle->stream_update.out_transfer_func =
7881 acrtc_state->stream->out_transfer_func;
7884 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7885 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7886 bundle->stream_update.abm_level = &acrtc_state->abm_level;
7889 * If FreeSync state on the stream has changed then we need to
7890 * re-adjust the min/max bounds now that DC doesn't handle this
7891 * as part of commit.
7893 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7894 amdgpu_dm_vrr_active(acrtc_state)) {
7895 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7896 dc_stream_adjust_vmin_vmax(
7897 dm->dc, acrtc_state->stream,
7898 &acrtc_attach->dm_irq_params.vrr_params.adjust);
7899 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7901 mutex_lock(&dm->dc_lock);
7902 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7903 acrtc_state->stream->link->psr_settings.psr_allow_active)
7904 amdgpu_dm_psr_disable(acrtc_state->stream);
7906 dc_commit_updates_for_stream(dm->dc,
7907 bundle->surface_updates,
7909 acrtc_state->stream,
7910 &bundle->stream_update,
7914 * Enable or disable the interrupts on the backend.
7916 * Most pipes are put into power gating when unused.
7918 * When power gating is enabled on a pipe we lose the
7919 * interrupt enablement state when power gating is disabled.
7921 * So we need to update the IRQ control state in hardware
7922 * whenever the pipe turns on (since it could be previously
7923 * power gated) or off (since some pipes can't be power gated
7926 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7927 dm_update_pflip_irq_state(drm_to_adev(dev),
7930 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7931 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7932 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7933 amdgpu_dm_link_setup_psr(acrtc_state->stream);
7934 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7935 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7936 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
7937 amdgpu_dm_psr_enable(acrtc_state->stream);
7940 mutex_unlock(&dm->dc_lock);
7944 * Update cursor state *after* programming all the planes.
7945 * This avoids redundant programming in the case where we're going
7946 * to be disabling a single plane - those pipes are being disabled.
7948 if (acrtc_state->active_planes)
7949 amdgpu_dm_commit_cursors(state);
7955 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7956 struct drm_atomic_state *state)
7958 struct amdgpu_device *adev = drm_to_adev(dev);
7959 struct amdgpu_dm_connector *aconnector;
7960 struct drm_connector *connector;
7961 struct drm_connector_state *old_con_state, *new_con_state;
7962 struct drm_crtc_state *new_crtc_state;
7963 struct dm_crtc_state *new_dm_crtc_state;
7964 const struct dc_stream_status *status;
7967 /* Notify device removals. */
7968 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7969 if (old_con_state->crtc != new_con_state->crtc) {
7970 /* CRTC changes require notification. */
7974 if (!new_con_state->crtc)
7977 new_crtc_state = drm_atomic_get_new_crtc_state(
7978 state, new_con_state->crtc);
7980 if (!new_crtc_state)
7983 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7987 aconnector = to_amdgpu_dm_connector(connector);
7989 mutex_lock(&adev->dm.audio_lock);
7990 inst = aconnector->audio_inst;
7991 aconnector->audio_inst = -1;
7992 mutex_unlock(&adev->dm.audio_lock);
7994 amdgpu_dm_audio_eld_notify(adev, inst);
7997 /* Notify audio device additions. */
7998 for_each_new_connector_in_state(state, connector, new_con_state, i) {
7999 if (!new_con_state->crtc)
8002 new_crtc_state = drm_atomic_get_new_crtc_state(
8003 state, new_con_state->crtc);
8005 if (!new_crtc_state)
8008 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8011 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8012 if (!new_dm_crtc_state->stream)
8015 status = dc_stream_get_status(new_dm_crtc_state->stream);
8019 aconnector = to_amdgpu_dm_connector(connector);
8021 mutex_lock(&adev->dm.audio_lock);
8022 inst = status->audio_inst;
8023 aconnector->audio_inst = inst;
8024 mutex_unlock(&adev->dm.audio_lock);
8026 amdgpu_dm_audio_eld_notify(adev, inst);
8031 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8032 * @crtc_state: the DRM CRTC state
8033 * @stream_state: the DC stream state.
8035 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8036 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8038 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8039 struct dc_stream_state *stream_state)
8041 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8045 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8046 * @state: The atomic state to commit
8048 * This will tell DC to commit the constructed DC state from atomic_check,
8049 * programming the hardware. Any failures here implies a hardware failure, since
8050 * atomic check should have filtered anything non-kosher.
8052 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8054 struct drm_device *dev = state->dev;
8055 struct amdgpu_device *adev = drm_to_adev(dev);
8056 struct amdgpu_display_manager *dm = &adev->dm;
8057 struct dm_atomic_state *dm_state;
8058 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8060 struct drm_crtc *crtc;
8061 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8062 unsigned long flags;
8063 bool wait_for_vblank = true;
8064 struct drm_connector *connector;
8065 struct drm_connector_state *old_con_state, *new_con_state;
8066 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8067 int crtc_disable_count = 0;
8068 bool mode_set_reset_required = false;
8070 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8072 drm_atomic_helper_update_legacy_modeset_state(dev, state);
8074 dm_state = dm_atomic_get_new_state(state);
8075 if (dm_state && dm_state->context) {
8076 dc_state = dm_state->context;
8078 /* No state changes, retain current state. */
8079 dc_state_temp = dc_create_state(dm->dc);
8080 ASSERT(dc_state_temp);
8081 dc_state = dc_state_temp;
8082 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8085 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8086 new_crtc_state, i) {
8087 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8089 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8091 if (old_crtc_state->active &&
8092 (!new_crtc_state->active ||
8093 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8094 manage_dm_interrupts(adev, acrtc, false);
8095 dc_stream_release(dm_old_crtc_state->stream);
8099 drm_atomic_helper_calc_timestamping_constants(state);
8101 /* update changed items */
8102 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8103 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8105 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8106 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8109 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8110 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8111 "connectors_changed:%d\n",
8113 new_crtc_state->enable,
8114 new_crtc_state->active,
8115 new_crtc_state->planes_changed,
8116 new_crtc_state->mode_changed,
8117 new_crtc_state->active_changed,
8118 new_crtc_state->connectors_changed);
8120 /* Disable cursor if disabling crtc */
8121 if (old_crtc_state->active && !new_crtc_state->active) {
8122 struct dc_cursor_position position;
8124 memset(&position, 0, sizeof(position));
8125 mutex_lock(&dm->dc_lock);
8126 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8127 mutex_unlock(&dm->dc_lock);
8130 /* Copy all transient state flags into dc state */
8131 if (dm_new_crtc_state->stream) {
8132 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8133 dm_new_crtc_state->stream);
8136 /* handles headless hotplug case, updating new_state and
8137 * aconnector as needed
8140 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8142 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8144 if (!dm_new_crtc_state->stream) {
8146 * this could happen because of issues with
8147 * userspace notifications delivery.
8148 * In this case userspace tries to set mode on
8149 * display which is disconnected in fact.
8150 * dc_sink is NULL in this case on aconnector.
8151 * We expect reset mode will come soon.
8153 * This can also happen when unplug is done
8154 * during resume sequence ended
8156 * In this case, we want to pretend we still
8157 * have a sink to keep the pipe running so that
8158 * hw state is consistent with the sw state
8160 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8161 __func__, acrtc->base.base.id);
8165 if (dm_old_crtc_state->stream)
8166 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8168 pm_runtime_get_noresume(dev->dev);
8170 acrtc->enabled = true;
8171 acrtc->hw_mode = new_crtc_state->mode;
8172 crtc->hwmode = new_crtc_state->mode;
8173 mode_set_reset_required = true;
8174 } else if (modereset_required(new_crtc_state)) {
8175 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8176 /* i.e. reset mode */
8177 if (dm_old_crtc_state->stream)
8178 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8179 mode_set_reset_required = true;
8181 } /* for_each_crtc_in_state() */
8184 /* if there mode set or reset, disable eDP PSR */
8185 if (mode_set_reset_required)
8186 amdgpu_dm_psr_disable_all(dm);
8188 dm_enable_per_frame_crtc_master_sync(dc_state);
8189 mutex_lock(&dm->dc_lock);
8190 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8191 mutex_unlock(&dm->dc_lock);
8194 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8195 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8197 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8199 if (dm_new_crtc_state->stream != NULL) {
8200 const struct dc_stream_status *status =
8201 dc_stream_get_status(dm_new_crtc_state->stream);
8204 status = dc_stream_get_status_from_state(dc_state,
8205 dm_new_crtc_state->stream);
8207 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8209 acrtc->otg_inst = status->primary_otg_inst;
8212 #ifdef CONFIG_DRM_AMD_DC_HDCP
8213 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8214 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8215 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8216 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8218 new_crtc_state = NULL;
8221 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8223 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8225 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8226 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8227 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8228 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8229 dm_new_con_state->update_hdcp = true;
8233 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8234 hdcp_update_display(
8235 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8236 new_con_state->hdcp_content_type,
8237 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
8242 /* Handle connector state changes */
8243 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8244 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8245 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8246 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8247 struct dc_surface_update dummy_updates[MAX_SURFACES];
8248 struct dc_stream_update stream_update;
8249 struct dc_info_packet hdr_packet;
8250 struct dc_stream_status *status = NULL;
8251 bool abm_changed, hdr_changed, scaling_changed;
8253 memset(&dummy_updates, 0, sizeof(dummy_updates));
8254 memset(&stream_update, 0, sizeof(stream_update));
8257 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8258 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8261 /* Skip any modesets/resets */
8262 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8265 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8266 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8268 scaling_changed = is_scaling_state_different(dm_new_con_state,
8271 abm_changed = dm_new_crtc_state->abm_level !=
8272 dm_old_crtc_state->abm_level;
8275 is_hdr_metadata_different(old_con_state, new_con_state);
8277 if (!scaling_changed && !abm_changed && !hdr_changed)
8280 stream_update.stream = dm_new_crtc_state->stream;
8281 if (scaling_changed) {
8282 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8283 dm_new_con_state, dm_new_crtc_state->stream);
8285 stream_update.src = dm_new_crtc_state->stream->src;
8286 stream_update.dst = dm_new_crtc_state->stream->dst;
8290 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8292 stream_update.abm_level = &dm_new_crtc_state->abm_level;
8296 fill_hdr_info_packet(new_con_state, &hdr_packet);
8297 stream_update.hdr_static_metadata = &hdr_packet;
8300 status = dc_stream_get_status(dm_new_crtc_state->stream);
8302 WARN_ON(!status->plane_count);
8305 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8306 * Here we create an empty update on each plane.
8307 * To fix this, DC should permit updating only stream properties.
8309 for (j = 0; j < status->plane_count; j++)
8310 dummy_updates[j].surface = status->plane_states[0];
8313 mutex_lock(&dm->dc_lock);
8314 dc_commit_updates_for_stream(dm->dc,
8316 status->plane_count,
8317 dm_new_crtc_state->stream,
8320 mutex_unlock(&dm->dc_lock);
8323 /* Count number of newly disabled CRTCs for dropping PM refs later. */
8324 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8325 new_crtc_state, i) {
8326 if (old_crtc_state->active && !new_crtc_state->active)
8327 crtc_disable_count++;
8329 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8330 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8332 /* For freesync config update on crtc state and params for irq */
8333 update_stream_irq_parameters(dm, dm_new_crtc_state);
8335 /* Handle vrr on->off / off->on transitions */
8336 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8341 * Enable interrupts for CRTCs that are newly enabled or went through
8342 * a modeset. It was intentionally deferred until after the front end
8343 * state was modified to wait until the OTG was on and so the IRQ
8344 * handlers didn't access stale or invalid state.
8346 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8347 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8349 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8351 if (new_crtc_state->active &&
8352 (!old_crtc_state->active ||
8353 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8354 dc_stream_retain(dm_new_crtc_state->stream);
8355 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8356 manage_dm_interrupts(adev, acrtc, true);
8358 #ifdef CONFIG_DEBUG_FS
8360 * Frontend may have changed so reapply the CRC capture
8361 * settings for the stream.
8363 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8365 if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
8366 amdgpu_dm_crtc_configure_crc_source(
8367 crtc, dm_new_crtc_state,
8368 dm_new_crtc_state->crc_src);
8374 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8375 if (new_crtc_state->async_flip)
8376 wait_for_vblank = false;
8378 /* update planes when needed per crtc*/
8379 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8380 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8382 if (dm_new_crtc_state->stream)
8383 amdgpu_dm_commit_planes(state, dc_state, dev,
8384 dm, crtc, wait_for_vblank);
8387 /* Update audio instances for each connector. */
8388 amdgpu_dm_commit_audio(dev, state);
8391 * send vblank event on all events not handled in flip and
8392 * mark consumed event for drm_atomic_helper_commit_hw_done
8394 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8395 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8397 if (new_crtc_state->event)
8398 drm_send_event_locked(dev, &new_crtc_state->event->base);
8400 new_crtc_state->event = NULL;
8402 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8404 /* Signal HW programming completion */
8405 drm_atomic_helper_commit_hw_done(state);
8407 if (wait_for_vblank)
8408 drm_atomic_helper_wait_for_flip_done(dev, state);
8410 drm_atomic_helper_cleanup_planes(dev, state);
8412 /* return the stolen vga memory back to VRAM */
8413 if (!adev->mman.keep_stolen_vga_memory)
8414 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8415 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8418 * Finally, drop a runtime PM reference for each newly disabled CRTC,
8419 * so we can put the GPU into runtime suspend if we're not driving any
8422 for (i = 0; i < crtc_disable_count; i++)
8423 pm_runtime_put_autosuspend(dev->dev);
8424 pm_runtime_mark_last_busy(dev->dev);
8427 dc_release_state(dc_state_temp);
8431 static int dm_force_atomic_commit(struct drm_connector *connector)
8434 struct drm_device *ddev = connector->dev;
8435 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8436 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8437 struct drm_plane *plane = disconnected_acrtc->base.primary;
8438 struct drm_connector_state *conn_state;
8439 struct drm_crtc_state *crtc_state;
8440 struct drm_plane_state *plane_state;
8445 state->acquire_ctx = ddev->mode_config.acquire_ctx;
8447 /* Construct an atomic state to restore previous display setting */
8450 * Attach connectors to drm_atomic_state
8452 conn_state = drm_atomic_get_connector_state(state, connector);
8454 ret = PTR_ERR_OR_ZERO(conn_state);
8458 /* Attach crtc to drm_atomic_state*/
8459 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8461 ret = PTR_ERR_OR_ZERO(crtc_state);
8465 /* force a restore */
8466 crtc_state->mode_changed = true;
8468 /* Attach plane to drm_atomic_state */
8469 plane_state = drm_atomic_get_plane_state(state, plane);
8471 ret = PTR_ERR_OR_ZERO(plane_state);
8476 /* Call commit internally with the state we just constructed */
8477 ret = drm_atomic_commit(state);
8482 DRM_ERROR("Restoring old state failed with %i\n", ret);
8483 drm_atomic_state_put(state);
8489 * This function handles all cases when set mode does not come upon hotplug.
8490 * This includes when a display is unplugged then plugged back into the
8491 * same port and when running without usermode desktop manager supprot
8493 void dm_restore_drm_connector_state(struct drm_device *dev,
8494 struct drm_connector *connector)
8496 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8497 struct amdgpu_crtc *disconnected_acrtc;
8498 struct dm_crtc_state *acrtc_state;
8500 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8503 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8504 if (!disconnected_acrtc)
8507 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8508 if (!acrtc_state->stream)
8512 * If the previous sink is not released and different from the current,
8513 * we deduce we are in a state where we can not rely on usermode call
8514 * to turn on the display, so we do it here
8516 if (acrtc_state->stream->sink != aconnector->dc_sink)
8517 dm_force_atomic_commit(&aconnector->base);
8521 * Grabs all modesetting locks to serialize against any blocking commits,
8522 * Waits for completion of all non blocking commits.
8524 static int do_aquire_global_lock(struct drm_device *dev,
8525 struct drm_atomic_state *state)
8527 struct drm_crtc *crtc;
8528 struct drm_crtc_commit *commit;
8532 * Adding all modeset locks to aquire_ctx will
8533 * ensure that when the framework release it the
8534 * extra locks we are locking here will get released to
8536 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8540 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8541 spin_lock(&crtc->commit_lock);
8542 commit = list_first_entry_or_null(&crtc->commit_list,
8543 struct drm_crtc_commit, commit_entry);
8545 drm_crtc_commit_get(commit);
8546 spin_unlock(&crtc->commit_lock);
8552 * Make sure all pending HW programming completed and
8555 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8558 ret = wait_for_completion_interruptible_timeout(
8559 &commit->flip_done, 10*HZ);
8562 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8563 "timed out\n", crtc->base.id, crtc->name);
8565 drm_crtc_commit_put(commit);
8568 return ret < 0 ? ret : 0;
8571 static void get_freesync_config_for_crtc(
8572 struct dm_crtc_state *new_crtc_state,
8573 struct dm_connector_state *new_con_state)
8575 struct mod_freesync_config config = {0};
8576 struct amdgpu_dm_connector *aconnector =
8577 to_amdgpu_dm_connector(new_con_state->base.connector);
8578 struct drm_display_mode *mode = &new_crtc_state->base.mode;
8579 int vrefresh = drm_mode_vrefresh(mode);
8581 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8582 vrefresh >= aconnector->min_vfreq &&
8583 vrefresh <= aconnector->max_vfreq;
8585 if (new_crtc_state->vrr_supported) {
8586 new_crtc_state->stream->ignore_msa_timing_param = true;
8587 config.state = new_crtc_state->base.vrr_enabled ?
8588 VRR_STATE_ACTIVE_VARIABLE :
8590 config.min_refresh_in_uhz =
8591 aconnector->min_vfreq * 1000000;
8592 config.max_refresh_in_uhz =
8593 aconnector->max_vfreq * 1000000;
8594 config.vsif_supported = true;
8598 new_crtc_state->freesync_config = config;
8601 static void reset_freesync_config_for_crtc(
8602 struct dm_crtc_state *new_crtc_state)
8604 new_crtc_state->vrr_supported = false;
8606 memset(&new_crtc_state->vrr_infopacket, 0,
8607 sizeof(new_crtc_state->vrr_infopacket));
8610 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8611 struct drm_atomic_state *state,
8612 struct drm_crtc *crtc,
8613 struct drm_crtc_state *old_crtc_state,
8614 struct drm_crtc_state *new_crtc_state,
8616 bool *lock_and_validation_needed)
8618 struct dm_atomic_state *dm_state = NULL;
8619 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8620 struct dc_stream_state *new_stream;
8624 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8625 * update changed items
8627 struct amdgpu_crtc *acrtc = NULL;
8628 struct amdgpu_dm_connector *aconnector = NULL;
8629 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8630 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8634 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8635 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8636 acrtc = to_amdgpu_crtc(crtc);
8637 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8639 /* TODO This hack should go away */
8640 if (aconnector && enable) {
8641 /* Make sure fake sink is created in plug-in scenario */
8642 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8644 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8647 if (IS_ERR(drm_new_conn_state)) {
8648 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8652 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8653 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8655 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8658 new_stream = create_validate_stream_for_sink(aconnector,
8659 &new_crtc_state->mode,
8661 dm_old_crtc_state->stream);
8664 * we can have no stream on ACTION_SET if a display
8665 * was disconnected during S3, in this case it is not an
8666 * error, the OS will be updated after detection, and
8667 * will do the right thing on next atomic commit
8671 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8672 __func__, acrtc->base.base.id);
8678 * TODO: Check VSDB bits to decide whether this should
8679 * be enabled or not.
8681 new_stream->triggered_crtc_reset.enabled =
8682 dm->force_timing_sync;
8684 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8686 ret = fill_hdr_info_packet(drm_new_conn_state,
8687 &new_stream->hdr_static_metadata);
8692 * If we already removed the old stream from the context
8693 * (and set the new stream to NULL) then we can't reuse
8694 * the old stream even if the stream and scaling are unchanged.
8695 * We'll hit the BUG_ON and black screen.
8697 * TODO: Refactor this function to allow this check to work
8698 * in all conditions.
8700 if (dm_new_crtc_state->stream &&
8701 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8702 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8703 new_crtc_state->mode_changed = false;
8704 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8705 new_crtc_state->mode_changed);
8709 /* mode_changed flag may get updated above, need to check again */
8710 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8714 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8715 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8716 "connectors_changed:%d\n",
8718 new_crtc_state->enable,
8719 new_crtc_state->active,
8720 new_crtc_state->planes_changed,
8721 new_crtc_state->mode_changed,
8722 new_crtc_state->active_changed,
8723 new_crtc_state->connectors_changed);
8725 /* Remove stream for any changed/disabled CRTC */
8728 if (!dm_old_crtc_state->stream)
8731 ret = dm_atomic_get_state(state, &dm_state);
8735 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8738 /* i.e. reset mode */
8739 if (dc_remove_stream_from_ctx(
8742 dm_old_crtc_state->stream) != DC_OK) {
8747 dc_stream_release(dm_old_crtc_state->stream);
8748 dm_new_crtc_state->stream = NULL;
8750 reset_freesync_config_for_crtc(dm_new_crtc_state);
8752 *lock_and_validation_needed = true;
8754 } else {/* Add stream for any updated/enabled CRTC */
8756 * Quick fix to prevent NULL pointer on new_stream when
8757 * added MST connectors not found in existing crtc_state in the chained mode
8758 * TODO: need to dig out the root cause of that
8760 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8763 if (modereset_required(new_crtc_state))
8766 if (modeset_required(new_crtc_state, new_stream,
8767 dm_old_crtc_state->stream)) {
8769 WARN_ON(dm_new_crtc_state->stream);
8771 ret = dm_atomic_get_state(state, &dm_state);
8775 dm_new_crtc_state->stream = new_stream;
8777 dc_stream_retain(new_stream);
8779 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8782 if (dc_add_stream_to_ctx(
8785 dm_new_crtc_state->stream) != DC_OK) {
8790 *lock_and_validation_needed = true;
8795 /* Release extra reference */
8797 dc_stream_release(new_stream);
8800 * We want to do dc stream updates that do not require a
8801 * full modeset below.
8803 if (!(enable && aconnector && new_crtc_state->active))
8806 * Given above conditions, the dc state cannot be NULL because:
8807 * 1. We're in the process of enabling CRTCs (just been added
8808 * to the dc context, or already is on the context)
8809 * 2. Has a valid connector attached, and
8810 * 3. Is currently active and enabled.
8811 * => The dc stream state currently exists.
8813 BUG_ON(dm_new_crtc_state->stream == NULL);
8815 /* Scaling or underscan settings */
8816 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8817 update_stream_scaling_settings(
8818 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8821 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8824 * Color management settings. We also update color properties
8825 * when a modeset is needed, to ensure it gets reprogrammed.
8827 if (dm_new_crtc_state->base.color_mgmt_changed ||
8828 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8829 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8834 /* Update Freesync settings. */
8835 get_freesync_config_for_crtc(dm_new_crtc_state,
8842 dc_stream_release(new_stream);
8846 static bool should_reset_plane(struct drm_atomic_state *state,
8847 struct drm_plane *plane,
8848 struct drm_plane_state *old_plane_state,
8849 struct drm_plane_state *new_plane_state)
8851 struct drm_plane *other;
8852 struct drm_plane_state *old_other_state, *new_other_state;
8853 struct drm_crtc_state *new_crtc_state;
8857 * TODO: Remove this hack once the checks below are sufficient
8858 * enough to determine when we need to reset all the planes on
8861 if (state->allow_modeset)
8864 /* Exit early if we know that we're adding or removing the plane. */
8865 if (old_plane_state->crtc != new_plane_state->crtc)
8868 /* old crtc == new_crtc == NULL, plane not in context. */
8869 if (!new_plane_state->crtc)
8873 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8875 if (!new_crtc_state)
8878 /* CRTC Degamma changes currently require us to recreate planes. */
8879 if (new_crtc_state->color_mgmt_changed)
8882 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8886 * If there are any new primary or overlay planes being added or
8887 * removed then the z-order can potentially change. To ensure
8888 * correct z-order and pipe acquisition the current DC architecture
8889 * requires us to remove and recreate all existing planes.
8891 * TODO: Come up with a more elegant solution for this.
8893 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8894 struct amdgpu_framebuffer *old_afb, *new_afb;
8895 if (other->type == DRM_PLANE_TYPE_CURSOR)
8898 if (old_other_state->crtc != new_plane_state->crtc &&
8899 new_other_state->crtc != new_plane_state->crtc)
8902 if (old_other_state->crtc != new_other_state->crtc)
8905 /* Src/dst size and scaling updates. */
8906 if (old_other_state->src_w != new_other_state->src_w ||
8907 old_other_state->src_h != new_other_state->src_h ||
8908 old_other_state->crtc_w != new_other_state->crtc_w ||
8909 old_other_state->crtc_h != new_other_state->crtc_h)
8912 /* Rotation / mirroring updates. */
8913 if (old_other_state->rotation != new_other_state->rotation)
8916 /* Blending updates. */
8917 if (old_other_state->pixel_blend_mode !=
8918 new_other_state->pixel_blend_mode)
8921 /* Alpha updates. */
8922 if (old_other_state->alpha != new_other_state->alpha)
8925 /* Colorspace changes. */
8926 if (old_other_state->color_range != new_other_state->color_range ||
8927 old_other_state->color_encoding != new_other_state->color_encoding)
8930 /* Framebuffer checks fall at the end. */
8931 if (!old_other_state->fb || !new_other_state->fb)
8934 /* Pixel format changes can require bandwidth updates. */
8935 if (old_other_state->fb->format != new_other_state->fb->format)
8938 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
8939 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
8941 /* Tiling and DCC changes also require bandwidth updates. */
8942 if (old_afb->tiling_flags != new_afb->tiling_flags ||
8943 old_afb->base.modifier != new_afb->base.modifier)
8950 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
8951 struct drm_plane_state *new_plane_state,
8952 struct drm_framebuffer *fb)
8954 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
8955 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
8959 if (fb->width > new_acrtc->max_cursor_width ||
8960 fb->height > new_acrtc->max_cursor_height) {
8961 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
8962 new_plane_state->fb->width,
8963 new_plane_state->fb->height);
8966 if (new_plane_state->src_w != fb->width << 16 ||
8967 new_plane_state->src_h != fb->height << 16) {
8968 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
8972 /* Pitch in pixels */
8973 pitch = fb->pitches[0] / fb->format->cpp[0];
8975 if (fb->width != pitch) {
8976 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
8985 /* FB pitch is supported by cursor plane */
8988 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
8992 /* Core DRM takes care of checking FB modifiers, so we only need to
8993 * check tiling flags when the FB doesn't have a modifier. */
8994 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
8995 if (adev->family < AMDGPU_FAMILY_AI) {
8996 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
8997 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
8998 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9000 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9003 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9011 static int dm_update_plane_state(struct dc *dc,
9012 struct drm_atomic_state *state,
9013 struct drm_plane *plane,
9014 struct drm_plane_state *old_plane_state,
9015 struct drm_plane_state *new_plane_state,
9017 bool *lock_and_validation_needed)
9020 struct dm_atomic_state *dm_state = NULL;
9021 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9022 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9023 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9024 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9025 struct amdgpu_crtc *new_acrtc;
9030 new_plane_crtc = new_plane_state->crtc;
9031 old_plane_crtc = old_plane_state->crtc;
9032 dm_new_plane_state = to_dm_plane_state(new_plane_state);
9033 dm_old_plane_state = to_dm_plane_state(old_plane_state);
9035 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9036 if (!enable || !new_plane_crtc ||
9037 drm_atomic_plane_disabling(plane->state, new_plane_state))
9040 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9042 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9043 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9047 if (new_plane_state->fb) {
9048 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9049 new_plane_state->fb);
9057 needs_reset = should_reset_plane(state, plane, old_plane_state,
9060 /* Remove any changed/removed planes */
9065 if (!old_plane_crtc)
9068 old_crtc_state = drm_atomic_get_old_crtc_state(
9069 state, old_plane_crtc);
9070 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9072 if (!dm_old_crtc_state->stream)
9075 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9076 plane->base.id, old_plane_crtc->base.id);
9078 ret = dm_atomic_get_state(state, &dm_state);
9082 if (!dc_remove_plane_from_context(
9084 dm_old_crtc_state->stream,
9085 dm_old_plane_state->dc_state,
9086 dm_state->context)) {
9092 dc_plane_state_release(dm_old_plane_state->dc_state);
9093 dm_new_plane_state->dc_state = NULL;
9095 *lock_and_validation_needed = true;
9097 } else { /* Add new planes */
9098 struct dc_plane_state *dc_new_plane_state;
9100 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9103 if (!new_plane_crtc)
9106 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9107 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9109 if (!dm_new_crtc_state->stream)
9115 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9119 WARN_ON(dm_new_plane_state->dc_state);
9121 dc_new_plane_state = dc_create_plane_state(dc);
9122 if (!dc_new_plane_state)
9125 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9126 plane->base.id, new_plane_crtc->base.id);
9128 ret = fill_dc_plane_attributes(
9129 drm_to_adev(new_plane_crtc->dev),
9134 dc_plane_state_release(dc_new_plane_state);
9138 ret = dm_atomic_get_state(state, &dm_state);
9140 dc_plane_state_release(dc_new_plane_state);
9145 * Any atomic check errors that occur after this will
9146 * not need a release. The plane state will be attached
9147 * to the stream, and therefore part of the atomic
9148 * state. It'll be released when the atomic state is
9151 if (!dc_add_plane_to_context(
9153 dm_new_crtc_state->stream,
9155 dm_state->context)) {
9157 dc_plane_state_release(dc_new_plane_state);
9161 dm_new_plane_state->dc_state = dc_new_plane_state;
9163 /* Tell DC to do a full surface update every time there
9164 * is a plane change. Inefficient, but works for now.
9166 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9168 *lock_and_validation_needed = true;
9175 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9176 struct drm_crtc *crtc,
9177 struct drm_crtc_state *new_crtc_state)
9179 struct drm_plane_state *new_cursor_state, *new_primary_state;
9180 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9182 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9183 * cursor per pipe but it's going to inherit the scaling and
9184 * positioning from the underlying pipe. Check the cursor plane's
9185 * blending properties match the primary plane's. */
9187 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9188 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9189 if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9193 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9194 (new_cursor_state->src_w >> 16);
9195 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9196 (new_cursor_state->src_h >> 16);
9198 primary_scale_w = new_primary_state->crtc_w * 1000 /
9199 (new_primary_state->src_w >> 16);
9200 primary_scale_h = new_primary_state->crtc_h * 1000 /
9201 (new_primary_state->src_h >> 16);
9203 if (cursor_scale_w != primary_scale_w ||
9204 cursor_scale_h != primary_scale_h) {
9205 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9212 #if defined(CONFIG_DRM_AMD_DC_DCN)
9213 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9215 struct drm_connector *connector;
9216 struct drm_connector_state *conn_state;
9217 struct amdgpu_dm_connector *aconnector = NULL;
9219 for_each_new_connector_in_state(state, connector, conn_state, i) {
9220 if (conn_state->crtc != crtc)
9223 aconnector = to_amdgpu_dm_connector(connector);
9224 if (!aconnector->port || !aconnector->mst_port)
9233 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9238 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9239 * @dev: The DRM device
9240 * @state: The atomic state to commit
9242 * Validate that the given atomic state is programmable by DC into hardware.
9243 * This involves constructing a &struct dc_state reflecting the new hardware
9244 * state we wish to commit, then querying DC to see if it is programmable. It's
9245 * important not to modify the existing DC state. Otherwise, atomic_check
9246 * may unexpectedly commit hardware changes.
9248 * When validating the DC state, it's important that the right locks are
9249 * acquired. For full updates case which removes/adds/updates streams on one
9250 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9251 * that any such full update commit will wait for completion of any outstanding
9252 * flip using DRMs synchronization events.
9254 * Note that DM adds the affected connectors for all CRTCs in state, when that
9255 * might not seem necessary. This is because DC stream creation requires the
9256 * DC sink, which is tied to the DRM connector state. Cleaning this up should
9257 * be possible but non-trivial - a possible TODO item.
9259 * Return: -Error code if validation failed.
9261 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9262 struct drm_atomic_state *state)
9264 struct amdgpu_device *adev = drm_to_adev(dev);
9265 struct dm_atomic_state *dm_state = NULL;
9266 struct dc *dc = adev->dm.dc;
9267 struct drm_connector *connector;
9268 struct drm_connector_state *old_con_state, *new_con_state;
9269 struct drm_crtc *crtc;
9270 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9271 struct drm_plane *plane;
9272 struct drm_plane_state *old_plane_state, *new_plane_state;
9273 enum dc_status status;
9275 bool lock_and_validation_needed = false;
9276 struct dm_crtc_state *dm_old_crtc_state;
9278 trace_amdgpu_dm_atomic_check_begin(state);
9280 ret = drm_atomic_helper_check_modeset(dev, state);
9284 /* Check connector changes */
9285 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9286 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9287 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9289 /* Skip connectors that are disabled or part of modeset already. */
9290 if (!old_con_state->crtc && !new_con_state->crtc)
9293 if (!new_con_state->crtc)
9296 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9297 if (IS_ERR(new_crtc_state)) {
9298 ret = PTR_ERR(new_crtc_state);
9302 if (dm_old_con_state->abm_level !=
9303 dm_new_con_state->abm_level)
9304 new_crtc_state->connectors_changed = true;
9307 #if defined(CONFIG_DRM_AMD_DC_DCN)
9308 if (adev->asic_type >= CHIP_NAVI10) {
9309 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9310 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9311 ret = add_affected_mst_dsc_crtcs(state, crtc);
9318 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9319 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9321 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9322 !new_crtc_state->color_mgmt_changed &&
9323 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9324 dm_old_crtc_state->dsc_force_changed == false)
9327 if (!new_crtc_state->enable)
9330 ret = drm_atomic_add_affected_connectors(state, crtc);
9334 ret = drm_atomic_add_affected_planes(state, crtc);
9338 if (dm_old_crtc_state->dsc_force_changed)
9339 new_crtc_state->mode_changed = true;
9343 * Add all primary and overlay planes on the CRTC to the state
9344 * whenever a plane is enabled to maintain correct z-ordering
9345 * and to enable fast surface updates.
9347 drm_for_each_crtc(crtc, dev) {
9348 bool modified = false;
9350 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9351 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9354 if (new_plane_state->crtc == crtc ||
9355 old_plane_state->crtc == crtc) {
9364 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9365 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9369 drm_atomic_get_plane_state(state, plane);
9371 if (IS_ERR(new_plane_state)) {
9372 ret = PTR_ERR(new_plane_state);
9378 /* Remove exiting planes if they are modified */
9379 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9380 ret = dm_update_plane_state(dc, state, plane,
9384 &lock_and_validation_needed);
9389 /* Disable all crtcs which require disable */
9390 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9391 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9395 &lock_and_validation_needed);
9400 /* Enable all crtcs which require enable */
9401 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9402 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9406 &lock_and_validation_needed);
9411 /* Add new/modified planes */
9412 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9413 ret = dm_update_plane_state(dc, state, plane,
9417 &lock_and_validation_needed);
9422 /* Run this here since we want to validate the streams we created */
9423 ret = drm_atomic_helper_check_planes(dev, state);
9427 /* Check cursor planes scaling */
9428 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9429 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9434 if (state->legacy_cursor_update) {
9436 * This is a fast cursor update coming from the plane update
9437 * helper, check if it can be done asynchronously for better
9440 state->async_update =
9441 !drm_atomic_helper_async_check(dev, state);
9444 * Skip the remaining global validation if this is an async
9445 * update. Cursor updates can be done without affecting
9446 * state or bandwidth calcs and this avoids the performance
9447 * penalty of locking the private state object and
9448 * allocating a new dc_state.
9450 if (state->async_update)
9454 /* Check scaling and underscan changes*/
9455 /* TODO Removed scaling changes validation due to inability to commit
9456 * new stream into context w\o causing full reset. Need to
9457 * decide how to handle.
9459 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9460 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9461 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9462 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9464 /* Skip any modesets/resets */
9465 if (!acrtc || drm_atomic_crtc_needs_modeset(
9466 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9469 /* Skip any thing not scale or underscan changes */
9470 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9473 lock_and_validation_needed = true;
9477 * Streams and planes are reset when there are changes that affect
9478 * bandwidth. Anything that affects bandwidth needs to go through
9479 * DC global validation to ensure that the configuration can be applied
9482 * We have to currently stall out here in atomic_check for outstanding
9483 * commits to finish in this case because our IRQ handlers reference
9484 * DRM state directly - we can end up disabling interrupts too early
9487 * TODO: Remove this stall and drop DM state private objects.
9489 if (lock_and_validation_needed) {
9490 ret = dm_atomic_get_state(state, &dm_state);
9494 ret = do_aquire_global_lock(dev, state);
9498 #if defined(CONFIG_DRM_AMD_DC_DCN)
9499 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9502 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9508 * Perform validation of MST topology in the state:
9509 * We need to perform MST atomic check before calling
9510 * dc_validate_global_state(), or there is a chance
9511 * to get stuck in an infinite loop and hang eventually.
9513 ret = drm_dp_mst_atomic_check(state);
9516 status = dc_validate_global_state(dc, dm_state->context, false);
9517 if (status != DC_OK) {
9518 DC_LOG_WARNING("DC global validation failure: %s (%d)",
9519 dc_status_to_str(status), status);
9525 * The commit is a fast update. Fast updates shouldn't change
9526 * the DC context, affect global validation, and can have their
9527 * commit work done in parallel with other commits not touching
9528 * the same resource. If we have a new DC context as part of
9529 * the DM atomic state from validation we need to free it and
9530 * retain the existing one instead.
9532 * Furthermore, since the DM atomic state only contains the DC
9533 * context and can safely be annulled, we can free the state
9534 * and clear the associated private object now to free
9535 * some memory and avoid a possible use-after-free later.
9538 for (i = 0; i < state->num_private_objs; i++) {
9539 struct drm_private_obj *obj = state->private_objs[i].ptr;
9541 if (obj->funcs == adev->dm.atomic_obj.funcs) {
9542 int j = state->num_private_objs-1;
9544 dm_atomic_destroy_state(obj,
9545 state->private_objs[i].state);
9547 /* If i is not at the end of the array then the
9548 * last element needs to be moved to where i was
9549 * before the array can safely be truncated.
9552 state->private_objs[i] =
9553 state->private_objs[j];
9555 state->private_objs[j].ptr = NULL;
9556 state->private_objs[j].state = NULL;
9557 state->private_objs[j].old_state = NULL;
9558 state->private_objs[j].new_state = NULL;
9560 state->num_private_objs = j;
9566 /* Store the overall update type for use later in atomic check. */
9567 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9568 struct dm_crtc_state *dm_new_crtc_state =
9569 to_dm_crtc_state(new_crtc_state);
9571 dm_new_crtc_state->update_type = lock_and_validation_needed ?
9576 /* Must be success */
9579 trace_amdgpu_dm_atomic_check_finish(state, ret);
9584 if (ret == -EDEADLK)
9585 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9586 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9587 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9589 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9591 trace_amdgpu_dm_atomic_check_finish(state, ret);
9596 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9597 struct amdgpu_dm_connector *amdgpu_dm_connector)
9600 bool capable = false;
9602 if (amdgpu_dm_connector->dc_link &&
9603 dm_helpers_dp_read_dpcd(
9605 amdgpu_dm_connector->dc_link,
9606 DP_DOWN_STREAM_PORT_COUNT,
9608 sizeof(dpcd_data))) {
9609 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9614 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9618 bool edid_check_required;
9619 struct detailed_timing *timing;
9620 struct detailed_non_pixel *data;
9621 struct detailed_data_monitor_range *range;
9622 struct amdgpu_dm_connector *amdgpu_dm_connector =
9623 to_amdgpu_dm_connector(connector);
9624 struct dm_connector_state *dm_con_state = NULL;
9626 struct drm_device *dev = connector->dev;
9627 struct amdgpu_device *adev = drm_to_adev(dev);
9628 bool freesync_capable = false;
9630 if (!connector->state) {
9631 DRM_ERROR("%s - Connector has no state", __func__);
9636 dm_con_state = to_dm_connector_state(connector->state);
9638 amdgpu_dm_connector->min_vfreq = 0;
9639 amdgpu_dm_connector->max_vfreq = 0;
9640 amdgpu_dm_connector->pixel_clock_mhz = 0;
9645 dm_con_state = to_dm_connector_state(connector->state);
9647 edid_check_required = false;
9648 if (!amdgpu_dm_connector->dc_sink) {
9649 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9652 if (!adev->dm.freesync_module)
9655 * if edid non zero restrict freesync only for dp and edp
9658 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9659 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9660 edid_check_required = is_dp_capable_without_timing_msa(
9662 amdgpu_dm_connector);
9665 if (edid_check_required == true && (edid->version > 1 ||
9666 (edid->version == 1 && edid->revision > 1))) {
9667 for (i = 0; i < 4; i++) {
9669 timing = &edid->detailed_timings[i];
9670 data = &timing->data.other_data;
9671 range = &data->data.range;
9673 * Check if monitor has continuous frequency mode
9675 if (data->type != EDID_DETAIL_MONITOR_RANGE)
9678 * Check for flag range limits only. If flag == 1 then
9679 * no additional timing information provided.
9680 * Default GTF, GTF Secondary curve and CVT are not
9683 if (range->flags != 1)
9686 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9687 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9688 amdgpu_dm_connector->pixel_clock_mhz =
9689 range->pixel_clock_mhz * 10;
9693 if (amdgpu_dm_connector->max_vfreq -
9694 amdgpu_dm_connector->min_vfreq > 10) {
9696 freesync_capable = true;
9702 dm_con_state->freesync_capable = freesync_capable;
9704 if (connector->vrr_capable_property)
9705 drm_connector_set_vrr_capable_property(connector,
9709 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9711 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9713 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9715 if (link->type == dc_connection_none)
9717 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9718 dpcd_data, sizeof(dpcd_data))) {
9719 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9721 if (dpcd_data[0] == 0) {
9722 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9723 link->psr_settings.psr_feature_enabled = false;
9725 link->psr_settings.psr_version = DC_PSR_VERSION_1;
9726 link->psr_settings.psr_feature_enabled = true;
9729 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9734 * amdgpu_dm_link_setup_psr() - configure psr link
9735 * @stream: stream state
9737 * Return: true if success
9739 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9741 struct dc_link *link = NULL;
9742 struct psr_config psr_config = {0};
9743 struct psr_context psr_context = {0};
9749 link = stream->link;
9751 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9753 if (psr_config.psr_version > 0) {
9754 psr_config.psr_exit_link_training_required = 0x1;
9755 psr_config.psr_frame_capture_indication_req = 0;
9756 psr_config.psr_rfb_setup_time = 0x37;
9757 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9758 psr_config.allow_smu_optimizations = 0x0;
9760 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9763 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
9769 * amdgpu_dm_psr_enable() - enable psr f/w
9770 * @stream: stream state
9772 * Return: true if success
9774 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9776 struct dc_link *link = stream->link;
9777 unsigned int vsync_rate_hz = 0;
9778 struct dc_static_screen_params params = {0};
9779 /* Calculate number of static frames before generating interrupt to
9782 // Init fail safe of 2 frames static
9783 unsigned int num_frames_static = 2;
9785 DRM_DEBUG_DRIVER("Enabling psr...\n");
9787 vsync_rate_hz = div64_u64(div64_u64((
9788 stream->timing.pix_clk_100hz * 100),
9789 stream->timing.v_total),
9790 stream->timing.h_total);
9793 * Calculate number of frames such that at least 30 ms of time has
9796 if (vsync_rate_hz != 0) {
9797 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9798 num_frames_static = (30000 / frame_time_microsec) + 1;
9801 params.triggers.cursor_update = true;
9802 params.triggers.overlay_update = true;
9803 params.triggers.surface_update = true;
9804 params.num_frames = num_frames_static;
9806 dc_stream_set_static_screen_params(link->ctx->dc,
9810 return dc_link_set_psr_allow_active(link, true, false, false);
9814 * amdgpu_dm_psr_disable() - disable psr f/w
9815 * @stream: stream state
9817 * Return: true if success
9819 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9822 DRM_DEBUG_DRIVER("Disabling psr...\n");
9824 return dc_link_set_psr_allow_active(stream->link, false, true, false);
9828 * amdgpu_dm_psr_disable() - disable psr f/w
9829 * if psr is enabled on any stream
9831 * Return: true if success
9833 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9835 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9836 return dc_set_psr_allow_active(dm->dc, false);
9839 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9841 struct amdgpu_device *adev = drm_to_adev(dev);
9842 struct dc *dc = adev->dm.dc;
9845 mutex_lock(&adev->dm.dc_lock);
9846 if (dc->current_state) {
9847 for (i = 0; i < dc->current_state->stream_count; ++i)
9848 dc->current_state->streams[i]
9849 ->triggered_crtc_reset.enabled =
9850 adev->dm.force_timing_sync;
9852 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9853 dc_trigger_sync(dc, dc->current_state);
9855 mutex_unlock(&adev->dm.dc_lock);
9858 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
9859 uint32_t value, const char *func_name)
9861 #ifdef DM_CHECK_ADDR_0
9863 DC_ERR("invalid register write. address = 0");
9867 cgs_write_register(ctx->cgs_device, address, value);
9868 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
9871 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
9872 const char *func_name)
9875 #ifdef DM_CHECK_ADDR_0
9877 DC_ERR("invalid register read; address = 0\n");
9882 if (ctx->dmub_srv &&
9883 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
9884 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
9889 value = cgs_read_register(ctx->cgs_device, address);
9891 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);