2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
29 #include "dm_services_types.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "amdgpu_dm_trace.h"
41 #include "amdgpu_display.h"
42 #include "amdgpu_ucode.h"
44 #include "amdgpu_dm.h"
45 #ifdef CONFIG_DRM_AMD_DC_HDCP
46 #include "amdgpu_dm_hdcp.h"
47 #include <drm/drm_hdcp.h>
49 #include "amdgpu_pm.h"
51 #include "amd_shared.h"
52 #include "amdgpu_dm_irq.h"
53 #include "dm_helpers.h"
54 #include "amdgpu_dm_mst_types.h"
55 #if defined(CONFIG_DEBUG_FS)
56 #include "amdgpu_dm_debugfs.h"
59 #include "ivsrcid/ivsrcid_vislands30.h"
61 #include <linux/module.h>
62 #include <linux/moduleparam.h>
63 #include <linux/version.h>
64 #include <linux/types.h>
65 #include <linux/pm_runtime.h>
66 #include <linux/pci.h>
67 #include <linux/firmware.h>
68 #include <linux/component.h>
70 #include <drm/drm_atomic.h>
71 #include <drm/drm_atomic_uapi.h>
72 #include <drm/drm_atomic_helper.h>
73 #include <drm/drm_dp_mst_helper.h>
74 #include <drm/drm_fb_helper.h>
75 #include <drm/drm_fourcc.h>
76 #include <drm/drm_edid.h>
77 #include <drm/drm_vblank.h>
78 #include <drm/drm_audio_component.h>
79 #include <drm/drm_hdcp.h>
81 #if defined(CONFIG_DRM_AMD_DC_DCN)
82 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
84 #include "dcn/dcn_1_0_offset.h"
85 #include "dcn/dcn_1_0_sh_mask.h"
86 #include "soc15_hw_ip.h"
87 #include "vega10_ip_offset.h"
89 #include "soc15_common.h"
92 #include "modules/inc/mod_freesync.h"
93 #include "modules/power/power_helpers.h"
94 #include "modules/inc/mod_info_packet.h"
96 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
97 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
98 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
99 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
100 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
102 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
103 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
104 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
106 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
109 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
110 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
112 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
113 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
115 /* Number of bytes in PSP header for firmware. */
116 #define PSP_HEADER_BYTES 0x100
118 /* Number of bytes in PSP footer for firmware. */
119 #define PSP_FOOTER_BYTES 0x100
124 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
125 * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
126 * requests into DC requests, and DC responses into DRM responses.
128 * The root control structure is &struct amdgpu_display_manager.
131 /* basic init/fini API */
132 static int amdgpu_dm_init(struct amdgpu_device *adev);
133 static void amdgpu_dm_fini(struct amdgpu_device *adev);
135 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
137 switch (link->dpcd_caps.dongle_type) {
138 case DISPLAY_DONGLE_NONE:
139 return DRM_MODE_SUBCONNECTOR_Native;
140 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
141 return DRM_MODE_SUBCONNECTOR_VGA;
142 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
143 case DISPLAY_DONGLE_DP_DVI_DONGLE:
144 return DRM_MODE_SUBCONNECTOR_DVID;
145 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
146 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
147 return DRM_MODE_SUBCONNECTOR_HDMIA;
148 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
150 return DRM_MODE_SUBCONNECTOR_Unknown;
154 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
156 struct dc_link *link = aconnector->dc_link;
157 struct drm_connector *connector = &aconnector->base;
158 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
160 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
163 if (aconnector->dc_sink)
164 subconnector = get_subconnector_type(link);
166 drm_object_property_set_value(&connector->base,
167 connector->dev->mode_config.dp_subconnector_property,
172 * initializes drm_device display related structures, based on the information
173 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
174 * drm_encoder, drm_mode_config
176 * Returns 0 on success
178 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
179 /* removes and deallocates the drm structures, created by the above function */
180 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
182 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
183 struct drm_plane *plane,
184 unsigned long possible_crtcs,
185 const struct dc_plane_cap *plane_cap);
186 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
187 struct drm_plane *plane,
188 uint32_t link_index);
189 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
190 struct amdgpu_dm_connector *amdgpu_dm_connector,
192 struct amdgpu_encoder *amdgpu_encoder);
193 static int amdgpu_dm_encoder_init(struct drm_device *dev,
194 struct amdgpu_encoder *aencoder,
195 uint32_t link_index);
197 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
199 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
201 static int amdgpu_dm_atomic_check(struct drm_device *dev,
202 struct drm_atomic_state *state);
204 static void handle_cursor_update(struct drm_plane *plane,
205 struct drm_plane_state *old_plane_state);
207 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
208 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
209 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
211 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
213 static const struct drm_format_info *
214 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
217 * dm_vblank_get_counter
220 * Get counter for number of vertical blanks
223 * struct amdgpu_device *adev - [in] desired amdgpu device
224 * int disp_idx - [in] which CRTC to get the counter from
227 * Counter for vertical blanks
229 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
231 if (crtc >= adev->mode_info.num_crtc)
234 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
236 if (acrtc->dm_irq_params.stream == NULL) {
237 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
242 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
246 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
247 u32 *vbl, u32 *position)
249 uint32_t v_blank_start, v_blank_end, h_position, v_position;
251 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
254 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
256 if (acrtc->dm_irq_params.stream == NULL) {
257 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
263 * TODO rework base driver to use values directly.
264 * for now parse it back into reg-format
266 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
272 *position = v_position | (h_position << 16);
273 *vbl = v_blank_start | (v_blank_end << 16);
279 static bool dm_is_idle(void *handle)
285 static int dm_wait_for_idle(void *handle)
291 static bool dm_check_soft_reset(void *handle)
296 static int dm_soft_reset(void *handle)
302 static struct amdgpu_crtc *
303 get_crtc_by_otg_inst(struct amdgpu_device *adev,
306 struct drm_device *dev = adev_to_drm(adev);
307 struct drm_crtc *crtc;
308 struct amdgpu_crtc *amdgpu_crtc;
310 if (otg_inst == -1) {
312 return adev->mode_info.crtcs[0];
315 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
316 amdgpu_crtc = to_amdgpu_crtc(crtc);
318 if (amdgpu_crtc->otg_inst == otg_inst)
325 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
327 return acrtc->dm_irq_params.freesync_config.state ==
328 VRR_STATE_ACTIVE_VARIABLE ||
329 acrtc->dm_irq_params.freesync_config.state ==
330 VRR_STATE_ACTIVE_FIXED;
333 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
335 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
336 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
340 * dm_pflip_high_irq() - Handle pageflip interrupt
341 * @interrupt_params: ignored
343 * Handles the pageflip interrupt by notifying all interested parties
344 * that the pageflip has been completed.
346 static void dm_pflip_high_irq(void *interrupt_params)
348 struct amdgpu_crtc *amdgpu_crtc;
349 struct common_irq_params *irq_params = interrupt_params;
350 struct amdgpu_device *adev = irq_params->adev;
352 struct drm_pending_vblank_event *e;
353 uint32_t vpos, hpos, v_blank_start, v_blank_end;
356 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
358 /* IRQ could occur when in initial stage */
359 /* TODO work and BO cleanup */
360 if (amdgpu_crtc == NULL) {
361 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
365 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
367 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
368 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
369 amdgpu_crtc->pflip_status,
370 AMDGPU_FLIP_SUBMITTED,
371 amdgpu_crtc->crtc_id,
373 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
377 /* page flip completed. */
378 e = amdgpu_crtc->event;
379 amdgpu_crtc->event = NULL;
384 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
386 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
388 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
389 &v_blank_end, &hpos, &vpos) ||
390 (vpos < v_blank_start)) {
391 /* Update to correct count and vblank timestamp if racing with
392 * vblank irq. This also updates to the correct vblank timestamp
393 * even in VRR mode, as scanout is past the front-porch atm.
395 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
397 /* Wake up userspace by sending the pageflip event with proper
398 * count and timestamp of vblank of flip completion.
401 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
403 /* Event sent, so done with vblank for this flip */
404 drm_crtc_vblank_put(&amdgpu_crtc->base);
407 /* VRR active and inside front-porch: vblank count and
408 * timestamp for pageflip event will only be up to date after
409 * drm_crtc_handle_vblank() has been executed from late vblank
410 * irq handler after start of back-porch (vline 0). We queue the
411 * pageflip event for send-out by drm_crtc_handle_vblank() with
412 * updated timestamp and count, once it runs after us.
414 * We need to open-code this instead of using the helper
415 * drm_crtc_arm_vblank_event(), as that helper would
416 * call drm_crtc_accurate_vblank_count(), which we must
417 * not call in VRR mode while we are in front-porch!
420 /* sequence will be replaced by real count during send-out. */
421 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
422 e->pipe = amdgpu_crtc->crtc_id;
424 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
428 /* Keep track of vblank of this flip for flip throttling. We use the
429 * cooked hw counter, as that one incremented at start of this vblank
430 * of pageflip completion, so last_flip_vblank is the forbidden count
431 * for queueing new pageflips if vsync + VRR is enabled.
433 amdgpu_crtc->dm_irq_params.last_flip_vblank =
434 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
436 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
437 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
439 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
440 amdgpu_crtc->crtc_id, amdgpu_crtc,
441 vrr_active, (int) !e);
444 static void dm_vupdate_high_irq(void *interrupt_params)
446 struct common_irq_params *irq_params = interrupt_params;
447 struct amdgpu_device *adev = irq_params->adev;
448 struct amdgpu_crtc *acrtc;
452 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
455 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
457 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
461 /* Core vblank handling is done here after end of front-porch in
462 * vrr mode, as vblank timestamping will give valid results
463 * while now done after front-porch. This will also deliver
464 * page-flip completion events that have been queued to us
465 * if a pageflip happened inside front-porch.
468 drm_crtc_handle_vblank(&acrtc->base);
470 /* BTR processing for pre-DCE12 ASICs */
471 if (acrtc->dm_irq_params.stream &&
472 adev->family < AMDGPU_FAMILY_AI) {
473 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
474 mod_freesync_handle_v_update(
475 adev->dm.freesync_module,
476 acrtc->dm_irq_params.stream,
477 &acrtc->dm_irq_params.vrr_params);
479 dc_stream_adjust_vmin_vmax(
481 acrtc->dm_irq_params.stream,
482 &acrtc->dm_irq_params.vrr_params.adjust);
483 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
490 * dm_crtc_high_irq() - Handles CRTC interrupt
491 * @interrupt_params: used for determining the CRTC instance
493 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
496 static void dm_crtc_high_irq(void *interrupt_params)
498 struct common_irq_params *irq_params = interrupt_params;
499 struct amdgpu_device *adev = irq_params->adev;
500 struct amdgpu_crtc *acrtc;
504 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
508 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
510 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
511 vrr_active, acrtc->dm_irq_params.active_planes);
514 * Core vblank handling at start of front-porch is only possible
515 * in non-vrr mode, as only there vblank timestamping will give
516 * valid results while done in front-porch. Otherwise defer it
517 * to dm_vupdate_high_irq after end of front-porch.
520 drm_crtc_handle_vblank(&acrtc->base);
523 * Following stuff must happen at start of vblank, for crc
524 * computation and below-the-range btr support in vrr mode.
526 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
528 /* BTR updates need to happen before VUPDATE on Vega and above. */
529 if (adev->family < AMDGPU_FAMILY_AI)
532 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
534 if (acrtc->dm_irq_params.stream &&
535 acrtc->dm_irq_params.vrr_params.supported &&
536 acrtc->dm_irq_params.freesync_config.state ==
537 VRR_STATE_ACTIVE_VARIABLE) {
538 mod_freesync_handle_v_update(adev->dm.freesync_module,
539 acrtc->dm_irq_params.stream,
540 &acrtc->dm_irq_params.vrr_params);
542 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
543 &acrtc->dm_irq_params.vrr_params.adjust);
547 * If there aren't any active_planes then DCH HUBP may be clock-gated.
548 * In that case, pageflip completion interrupts won't fire and pageflip
549 * completion events won't get delivered. Prevent this by sending
550 * pending pageflip events from here if a flip is still pending.
552 * If any planes are enabled, use dm_pflip_high_irq() instead, to
553 * avoid race conditions between flip programming and completion,
554 * which could cause too early flip completion events.
556 if (adev->family >= AMDGPU_FAMILY_RV &&
557 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
558 acrtc->dm_irq_params.active_planes == 0) {
560 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
562 drm_crtc_vblank_put(&acrtc->base);
564 acrtc->pflip_status = AMDGPU_FLIP_NONE;
567 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
570 static int dm_set_clockgating_state(void *handle,
571 enum amd_clockgating_state state)
576 static int dm_set_powergating_state(void *handle,
577 enum amd_powergating_state state)
582 /* Prototypes of private functions */
583 static int dm_early_init(void* handle);
585 /* Allocate memory for FBC compressed data */
586 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
588 struct drm_device *dev = connector->dev;
589 struct amdgpu_device *adev = drm_to_adev(dev);
590 struct dm_compressor_info *compressor = &adev->dm.compressor;
591 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
592 struct drm_display_mode *mode;
593 unsigned long max_size = 0;
595 if (adev->dm.dc->fbc_compressor == NULL)
598 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
601 if (compressor->bo_ptr)
605 list_for_each_entry(mode, &connector->modes, head) {
606 if (max_size < mode->htotal * mode->vtotal)
607 max_size = mode->htotal * mode->vtotal;
611 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
612 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
613 &compressor->gpu_addr, &compressor->cpu_addr);
616 DRM_ERROR("DM: Failed to initialize FBC\n");
618 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
619 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
626 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
627 int pipe, bool *enabled,
628 unsigned char *buf, int max_bytes)
630 struct drm_device *dev = dev_get_drvdata(kdev);
631 struct amdgpu_device *adev = drm_to_adev(dev);
632 struct drm_connector *connector;
633 struct drm_connector_list_iter conn_iter;
634 struct amdgpu_dm_connector *aconnector;
639 mutex_lock(&adev->dm.audio_lock);
641 drm_connector_list_iter_begin(dev, &conn_iter);
642 drm_for_each_connector_iter(connector, &conn_iter) {
643 aconnector = to_amdgpu_dm_connector(connector);
644 if (aconnector->audio_inst != port)
648 ret = drm_eld_size(connector->eld);
649 memcpy(buf, connector->eld, min(max_bytes, ret));
653 drm_connector_list_iter_end(&conn_iter);
655 mutex_unlock(&adev->dm.audio_lock);
657 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
662 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
663 .get_eld = amdgpu_dm_audio_component_get_eld,
666 static int amdgpu_dm_audio_component_bind(struct device *kdev,
667 struct device *hda_kdev, void *data)
669 struct drm_device *dev = dev_get_drvdata(kdev);
670 struct amdgpu_device *adev = drm_to_adev(dev);
671 struct drm_audio_component *acomp = data;
673 acomp->ops = &amdgpu_dm_audio_component_ops;
675 adev->dm.audio_component = acomp;
680 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
681 struct device *hda_kdev, void *data)
683 struct drm_device *dev = dev_get_drvdata(kdev);
684 struct amdgpu_device *adev = drm_to_adev(dev);
685 struct drm_audio_component *acomp = data;
689 adev->dm.audio_component = NULL;
692 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
693 .bind = amdgpu_dm_audio_component_bind,
694 .unbind = amdgpu_dm_audio_component_unbind,
697 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
704 adev->mode_info.audio.enabled = true;
706 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
708 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
709 adev->mode_info.audio.pin[i].channels = -1;
710 adev->mode_info.audio.pin[i].rate = -1;
711 adev->mode_info.audio.pin[i].bits_per_sample = -1;
712 adev->mode_info.audio.pin[i].status_bits = 0;
713 adev->mode_info.audio.pin[i].category_code = 0;
714 adev->mode_info.audio.pin[i].connected = false;
715 adev->mode_info.audio.pin[i].id =
716 adev->dm.dc->res_pool->audios[i]->inst;
717 adev->mode_info.audio.pin[i].offset = 0;
720 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
724 adev->dm.audio_registered = true;
729 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
734 if (!adev->mode_info.audio.enabled)
737 if (adev->dm.audio_registered) {
738 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
739 adev->dm.audio_registered = false;
742 /* TODO: Disable audio? */
744 adev->mode_info.audio.enabled = false;
747 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
749 struct drm_audio_component *acomp = adev->dm.audio_component;
751 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
752 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
754 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
759 static int dm_dmub_hw_init(struct amdgpu_device *adev)
761 const struct dmcub_firmware_header_v1_0 *hdr;
762 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
763 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
764 const struct firmware *dmub_fw = adev->dm.dmub_fw;
765 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
766 struct abm *abm = adev->dm.dc->res_pool->abm;
767 struct dmub_srv_hw_params hw_params;
768 enum dmub_status status;
769 const unsigned char *fw_inst_const, *fw_bss_data;
770 uint32_t i, fw_inst_const_size, fw_bss_data_size;
774 /* DMUB isn't supported on the ASIC. */
778 DRM_ERROR("No framebuffer info for DMUB service.\n");
783 /* Firmware required for DMUB support. */
784 DRM_ERROR("No firmware provided for DMUB.\n");
788 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
789 if (status != DMUB_STATUS_OK) {
790 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
794 if (!has_hw_support) {
795 DRM_INFO("DMUB unsupported on ASIC\n");
799 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
801 fw_inst_const = dmub_fw->data +
802 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
805 fw_bss_data = dmub_fw->data +
806 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
807 le32_to_cpu(hdr->inst_const_bytes);
809 /* Copy firmware and bios info into FB memory. */
810 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
811 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
813 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
815 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
816 * amdgpu_ucode_init_single_fw will load dmub firmware
817 * fw_inst_const part to cw0; otherwise, the firmware back door load
818 * will be done by dm_dmub_hw_init
820 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
821 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
825 if (fw_bss_data_size)
826 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
827 fw_bss_data, fw_bss_data_size);
829 /* Copy firmware bios info into FB memory. */
830 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
833 /* Reset regions that need to be reset. */
834 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
835 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
837 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
838 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
840 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
841 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
843 /* Initialize hardware. */
844 memset(&hw_params, 0, sizeof(hw_params));
845 hw_params.fb_base = adev->gmc.fb_start;
846 hw_params.fb_offset = adev->gmc.aper_base;
848 /* backdoor load firmware and trigger dmub running */
849 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
850 hw_params.load_inst_const = true;
853 hw_params.psp_version = dmcu->psp_version;
855 for (i = 0; i < fb_info->num_fb; ++i)
856 hw_params.fb[i] = &fb_info->fb[i];
858 status = dmub_srv_hw_init(dmub_srv, &hw_params);
859 if (status != DMUB_STATUS_OK) {
860 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
864 /* Wait for firmware load to finish. */
865 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
866 if (status != DMUB_STATUS_OK)
867 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
869 /* Init DMCU and ABM if available. */
871 dmcu->funcs->dmcu_init(dmcu);
872 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
875 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
876 if (!adev->dm.dc->ctx->dmub_srv) {
877 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
881 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
882 adev->dm.dmcub_fw_version);
887 #if defined(CONFIG_DRM_AMD_DC_DCN)
888 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
891 uint32_t logical_addr_low;
892 uint32_t logical_addr_high;
893 uint32_t agp_base, agp_bot, agp_top;
894 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
896 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
897 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
899 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
901 * Raven2 has a HW issue that it is unable to use the vram which
902 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
903 * workaround that increase system aperture high address (add 1)
904 * to get rid of the VM fault and hardware hang.
906 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
908 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
911 agp_bot = adev->gmc.agp_start >> 24;
912 agp_top = adev->gmc.agp_end >> 24;
915 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
916 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
917 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
918 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
919 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
920 page_table_base.low_part = lower_32_bits(pt_base);
922 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
923 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
925 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
926 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
927 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
929 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
930 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
931 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
933 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
934 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
935 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
937 pa_config->is_hvm_enabled = 0;
942 static int amdgpu_dm_init(struct amdgpu_device *adev)
944 struct dc_init_data init_data;
945 #ifdef CONFIG_DRM_AMD_DC_HDCP
946 struct dc_callback_init init_params;
950 adev->dm.ddev = adev_to_drm(adev);
951 adev->dm.adev = adev;
953 /* Zero all the fields */
954 memset(&init_data, 0, sizeof(init_data));
955 #ifdef CONFIG_DRM_AMD_DC_HDCP
956 memset(&init_params, 0, sizeof(init_params));
959 mutex_init(&adev->dm.dc_lock);
960 mutex_init(&adev->dm.audio_lock);
962 if(amdgpu_dm_irq_init(adev)) {
963 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
967 init_data.asic_id.chip_family = adev->family;
969 init_data.asic_id.pci_revision_id = adev->pdev->revision;
970 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
972 init_data.asic_id.vram_width = adev->gmc.vram_width;
973 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
974 init_data.asic_id.atombios_base_address =
975 adev->mode_info.atom_context->bios;
977 init_data.driver = adev;
979 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
981 if (!adev->dm.cgs_device) {
982 DRM_ERROR("amdgpu: failed to create cgs device.\n");
986 init_data.cgs_device = adev->dm.cgs_device;
988 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
990 switch (adev->asic_type) {
995 init_data.flags.gpu_vm_support = true;
996 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
997 init_data.flags.disable_dmcu = true;
999 #if defined(CONFIG_DRM_AMD_DC_DCN)
1001 init_data.flags.gpu_vm_support = true;
1008 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1009 init_data.flags.fbc_support = true;
1011 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1012 init_data.flags.multi_mon_pp_mclk_switch = true;
1014 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1015 init_data.flags.disable_fractional_pwm = true;
1017 init_data.flags.power_down_display_on_boot = true;
1019 init_data.soc_bounding_box = adev->dm.soc_bounding_box;
1021 /* Display Core create. */
1022 adev->dm.dc = dc_create(&init_data);
1025 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1027 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1031 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1032 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1033 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1036 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1037 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1039 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1040 adev->dm.dc->debug.disable_stutter = true;
1042 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1043 adev->dm.dc->debug.disable_dsc = true;
1045 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1046 adev->dm.dc->debug.disable_clock_gate = true;
1048 r = dm_dmub_hw_init(adev);
1050 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1054 dc_hardware_init(adev->dm.dc);
1056 #if defined(CONFIG_DRM_AMD_DC_DCN)
1057 if (adev->apu_flags) {
1058 struct dc_phy_addr_space_config pa_config;
1060 mmhub_read_system_context(adev, &pa_config);
1062 // Call the DC init_memory func
1063 dc_setup_system_context(adev->dm.dc, &pa_config);
1067 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1068 if (!adev->dm.freesync_module) {
1070 "amdgpu: failed to initialize freesync_module.\n");
1072 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1073 adev->dm.freesync_module);
1075 amdgpu_dm_init_color_mod();
1077 #ifdef CONFIG_DRM_AMD_DC_HDCP
1078 if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1079 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1081 if (!adev->dm.hdcp_workqueue)
1082 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1084 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1086 dc_init_callbacks(adev->dm.dc, &init_params);
1089 if (amdgpu_dm_initialize_drm_device(adev)) {
1091 "amdgpu: failed to initialize sw for display support.\n");
1095 /* create fake encoders for MST */
1096 dm_dp_create_fake_mst_encoders(adev);
1098 /* TODO: Add_display_info? */
1100 /* TODO use dynamic cursor width */
1101 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1102 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1104 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1106 "amdgpu: failed to initialize sw for display support.\n");
1111 DRM_DEBUG_DRIVER("KMS initialized.\n");
1115 amdgpu_dm_fini(adev);
1120 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1124 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1125 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1128 amdgpu_dm_audio_fini(adev);
1130 amdgpu_dm_destroy_drm_device(&adev->dm);
1132 #ifdef CONFIG_DRM_AMD_DC_HDCP
1133 if (adev->dm.hdcp_workqueue) {
1134 hdcp_destroy(adev->dm.hdcp_workqueue);
1135 adev->dm.hdcp_workqueue = NULL;
1139 dc_deinit_callbacks(adev->dm.dc);
1141 if (adev->dm.dc->ctx->dmub_srv) {
1142 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1143 adev->dm.dc->ctx->dmub_srv = NULL;
1146 if (adev->dm.dmub_bo)
1147 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1148 &adev->dm.dmub_bo_gpu_addr,
1149 &adev->dm.dmub_bo_cpu_addr);
1151 /* DC Destroy TODO: Replace destroy DAL */
1153 dc_destroy(&adev->dm.dc);
1155 * TODO: pageflip, vlank interrupt
1157 * amdgpu_dm_irq_fini(adev);
1160 if (adev->dm.cgs_device) {
1161 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1162 adev->dm.cgs_device = NULL;
1164 if (adev->dm.freesync_module) {
1165 mod_freesync_destroy(adev->dm.freesync_module);
1166 adev->dm.freesync_module = NULL;
1169 mutex_destroy(&adev->dm.audio_lock);
1170 mutex_destroy(&adev->dm.dc_lock);
1175 static int load_dmcu_fw(struct amdgpu_device *adev)
1177 const char *fw_name_dmcu = NULL;
1179 const struct dmcu_firmware_header_v1_0 *hdr;
1181 switch(adev->asic_type) {
1182 #if defined(CONFIG_DRM_AMD_DC_SI)
1197 case CHIP_POLARIS11:
1198 case CHIP_POLARIS10:
1199 case CHIP_POLARIS12:
1207 case CHIP_SIENNA_CICHLID:
1208 case CHIP_NAVY_FLOUNDER:
1209 case CHIP_DIMGREY_CAVEFISH:
1213 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1216 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1217 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1218 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1219 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1224 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1228 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1229 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1233 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1235 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1236 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1237 adev->dm.fw_dmcu = NULL;
1241 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1246 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1248 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1250 release_firmware(adev->dm.fw_dmcu);
1251 adev->dm.fw_dmcu = NULL;
1255 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1256 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1257 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1258 adev->firmware.fw_size +=
1259 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1261 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1262 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1263 adev->firmware.fw_size +=
1264 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1266 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1268 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1273 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1275 struct amdgpu_device *adev = ctx;
1277 return dm_read_reg(adev->dm.dc->ctx, address);
1280 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1283 struct amdgpu_device *adev = ctx;
1285 return dm_write_reg(adev->dm.dc->ctx, address, value);
1288 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1290 struct dmub_srv_create_params create_params;
1291 struct dmub_srv_region_params region_params;
1292 struct dmub_srv_region_info region_info;
1293 struct dmub_srv_fb_params fb_params;
1294 struct dmub_srv_fb_info *fb_info;
1295 struct dmub_srv *dmub_srv;
1296 const struct dmcub_firmware_header_v1_0 *hdr;
1297 const char *fw_name_dmub;
1298 enum dmub_asic dmub_asic;
1299 enum dmub_status status;
1302 switch (adev->asic_type) {
1304 dmub_asic = DMUB_ASIC_DCN21;
1305 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1306 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1307 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1309 case CHIP_SIENNA_CICHLID:
1310 dmub_asic = DMUB_ASIC_DCN30;
1311 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1313 case CHIP_NAVY_FLOUNDER:
1314 dmub_asic = DMUB_ASIC_DCN30;
1315 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1318 dmub_asic = DMUB_ASIC_DCN301;
1319 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1321 case CHIP_DIMGREY_CAVEFISH:
1322 dmub_asic = DMUB_ASIC_DCN302;
1323 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1327 /* ASIC doesn't support DMUB. */
1331 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1333 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1337 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1339 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1343 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1345 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1346 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1347 AMDGPU_UCODE_ID_DMCUB;
1348 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1350 adev->firmware.fw_size +=
1351 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1353 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1354 adev->dm.dmcub_fw_version);
1357 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1359 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1360 dmub_srv = adev->dm.dmub_srv;
1363 DRM_ERROR("Failed to allocate DMUB service!\n");
1367 memset(&create_params, 0, sizeof(create_params));
1368 create_params.user_ctx = adev;
1369 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1370 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1371 create_params.asic = dmub_asic;
1373 /* Create the DMUB service. */
1374 status = dmub_srv_create(dmub_srv, &create_params);
1375 if (status != DMUB_STATUS_OK) {
1376 DRM_ERROR("Error creating DMUB service: %d\n", status);
1380 /* Calculate the size of all the regions for the DMUB service. */
1381 memset(®ion_params, 0, sizeof(region_params));
1383 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1384 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1385 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1386 region_params.vbios_size = adev->bios_size;
1387 region_params.fw_bss_data = region_params.bss_data_size ?
1388 adev->dm.dmub_fw->data +
1389 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1390 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1391 region_params.fw_inst_const =
1392 adev->dm.dmub_fw->data +
1393 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1396 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
1399 if (status != DMUB_STATUS_OK) {
1400 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1405 * Allocate a framebuffer based on the total size of all the regions.
1406 * TODO: Move this into GART.
1408 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1409 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1410 &adev->dm.dmub_bo_gpu_addr,
1411 &adev->dm.dmub_bo_cpu_addr);
1415 /* Rebase the regions on the framebuffer address. */
1416 memset(&fb_params, 0, sizeof(fb_params));
1417 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1418 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1419 fb_params.region_info = ®ion_info;
1421 adev->dm.dmub_fb_info =
1422 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1423 fb_info = adev->dm.dmub_fb_info;
1427 "Failed to allocate framebuffer info for DMUB service!\n");
1431 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1432 if (status != DMUB_STATUS_OK) {
1433 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1440 static int dm_sw_init(void *handle)
1442 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1445 r = dm_dmub_sw_init(adev);
1449 return load_dmcu_fw(adev);
1452 static int dm_sw_fini(void *handle)
1454 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1456 kfree(adev->dm.dmub_fb_info);
1457 adev->dm.dmub_fb_info = NULL;
1459 if (adev->dm.dmub_srv) {
1460 dmub_srv_destroy(adev->dm.dmub_srv);
1461 adev->dm.dmub_srv = NULL;
1464 release_firmware(adev->dm.dmub_fw);
1465 adev->dm.dmub_fw = NULL;
1467 release_firmware(adev->dm.fw_dmcu);
1468 adev->dm.fw_dmcu = NULL;
1473 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1475 struct amdgpu_dm_connector *aconnector;
1476 struct drm_connector *connector;
1477 struct drm_connector_list_iter iter;
1480 drm_connector_list_iter_begin(dev, &iter);
1481 drm_for_each_connector_iter(connector, &iter) {
1482 aconnector = to_amdgpu_dm_connector(connector);
1483 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1484 aconnector->mst_mgr.aux) {
1485 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1487 aconnector->base.base.id);
1489 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1491 DRM_ERROR("DM_MST: Failed to start MST\n");
1492 aconnector->dc_link->type =
1493 dc_connection_single;
1498 drm_connector_list_iter_end(&iter);
1503 static int dm_late_init(void *handle)
1505 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1507 struct dmcu_iram_parameters params;
1508 unsigned int linear_lut[16];
1510 struct dmcu *dmcu = NULL;
1513 dmcu = adev->dm.dc->res_pool->dmcu;
1515 for (i = 0; i < 16; i++)
1516 linear_lut[i] = 0xFFFF * i / 15;
1519 params.backlight_ramping_start = 0xCCCC;
1520 params.backlight_ramping_reduction = 0xCCCCCCCC;
1521 params.backlight_lut_array_size = 16;
1522 params.backlight_lut_array = linear_lut;
1524 /* Min backlight level after ABM reduction, Don't allow below 1%
1525 * 0xFFFF x 0.01 = 0x28F
1527 params.min_abm_backlight = 0x28F;
1529 /* In the case where abm is implemented on dmcub,
1530 * dmcu object will be null.
1531 * ABM 2.4 and up are implemented on dmcub.
1534 ret = dmcu_load_iram(dmcu, params);
1535 else if (adev->dm.dc->ctx->dmub_srv)
1536 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1541 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1544 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1546 struct amdgpu_dm_connector *aconnector;
1547 struct drm_connector *connector;
1548 struct drm_connector_list_iter iter;
1549 struct drm_dp_mst_topology_mgr *mgr;
1551 bool need_hotplug = false;
1553 drm_connector_list_iter_begin(dev, &iter);
1554 drm_for_each_connector_iter(connector, &iter) {
1555 aconnector = to_amdgpu_dm_connector(connector);
1556 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1557 aconnector->mst_port)
1560 mgr = &aconnector->mst_mgr;
1563 drm_dp_mst_topology_mgr_suspend(mgr);
1565 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1567 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1568 need_hotplug = true;
1572 drm_connector_list_iter_end(&iter);
1575 drm_kms_helper_hotplug_event(dev);
1578 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1580 struct smu_context *smu = &adev->smu;
1583 if (!is_support_sw_smu(adev))
1586 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1587 * on window driver dc implementation.
1588 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1589 * should be passed to smu during boot up and resume from s3.
1590 * boot up: dc calculate dcn watermark clock settings within dc_create,
1591 * dcn20_resource_construct
1592 * then call pplib functions below to pass the settings to smu:
1593 * smu_set_watermarks_for_clock_ranges
1594 * smu_set_watermarks_table
1595 * navi10_set_watermarks_table
1596 * smu_write_watermarks_table
1598 * For Renoir, clock settings of dcn watermark are also fixed values.
1599 * dc has implemented different flow for window driver:
1600 * dc_hardware_init / dc_set_power_state
1605 * smu_set_watermarks_for_clock_ranges
1606 * renoir_set_watermarks_table
1607 * smu_write_watermarks_table
1610 * dc_hardware_init -> amdgpu_dm_init
1611 * dc_set_power_state --> dm_resume
1613 * therefore, this function apply to navi10/12/14 but not Renoir
1616 switch(adev->asic_type) {
1625 ret = smu_write_watermarks_table(smu);
1627 DRM_ERROR("Failed to update WMTABLE!\n");
1635 * dm_hw_init() - Initialize DC device
1636 * @handle: The base driver device containing the amdgpu_dm device.
1638 * Initialize the &struct amdgpu_display_manager device. This involves calling
1639 * the initializers of each DM component, then populating the struct with them.
1641 * Although the function implies hardware initialization, both hardware and
1642 * software are initialized here. Splitting them out to their relevant init
1643 * hooks is a future TODO item.
1645 * Some notable things that are initialized here:
1647 * - Display Core, both software and hardware
1648 * - DC modules that we need (freesync and color management)
1649 * - DRM software states
1650 * - Interrupt sources and handlers
1652 * - Debug FS entries, if enabled
1654 static int dm_hw_init(void *handle)
1656 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1657 /* Create DAL display manager */
1658 amdgpu_dm_init(adev);
1659 amdgpu_dm_hpd_init(adev);
1665 * dm_hw_fini() - Teardown DC device
1666 * @handle: The base driver device containing the amdgpu_dm device.
1668 * Teardown components within &struct amdgpu_display_manager that require
1669 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1670 * were loaded. Also flush IRQ workqueues and disable them.
1672 static int dm_hw_fini(void *handle)
1674 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1676 amdgpu_dm_hpd_fini(adev);
1678 amdgpu_dm_irq_fini(adev);
1679 amdgpu_dm_fini(adev);
1684 static int dm_enable_vblank(struct drm_crtc *crtc);
1685 static void dm_disable_vblank(struct drm_crtc *crtc);
1687 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1688 struct dc_state *state, bool enable)
1690 enum dc_irq_source irq_source;
1691 struct amdgpu_crtc *acrtc;
1695 for (i = 0; i < state->stream_count; i++) {
1696 acrtc = get_crtc_by_otg_inst(
1697 adev, state->stream_status[i].primary_otg_inst);
1699 if (acrtc && state->stream_status[i].plane_count != 0) {
1700 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1701 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1702 DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1703 acrtc->crtc_id, enable ? "en" : "dis", rc);
1705 DRM_WARN("Failed to %s pflip interrupts\n",
1706 enable ? "enable" : "disable");
1709 rc = dm_enable_vblank(&acrtc->base);
1711 DRM_WARN("Failed to enable vblank interrupts\n");
1713 dm_disable_vblank(&acrtc->base);
1721 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1723 struct dc_state *context = NULL;
1724 enum dc_status res = DC_ERROR_UNEXPECTED;
1726 struct dc_stream_state *del_streams[MAX_PIPES];
1727 int del_streams_count = 0;
1729 memset(del_streams, 0, sizeof(del_streams));
1731 context = dc_create_state(dc);
1732 if (context == NULL)
1733 goto context_alloc_fail;
1735 dc_resource_state_copy_construct_current(dc, context);
1737 /* First remove from context all streams */
1738 for (i = 0; i < context->stream_count; i++) {
1739 struct dc_stream_state *stream = context->streams[i];
1741 del_streams[del_streams_count++] = stream;
1744 /* Remove all planes for removed streams and then remove the streams */
1745 for (i = 0; i < del_streams_count; i++) {
1746 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1747 res = DC_FAIL_DETACH_SURFACES;
1751 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1757 res = dc_validate_global_state(dc, context, false);
1760 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1764 res = dc_commit_state(dc, context);
1767 dc_release_state(context);
1773 static int dm_suspend(void *handle)
1775 struct amdgpu_device *adev = handle;
1776 struct amdgpu_display_manager *dm = &adev->dm;
1779 if (amdgpu_in_reset(adev)) {
1780 mutex_lock(&dm->dc_lock);
1781 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1783 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1785 amdgpu_dm_commit_zero_streams(dm->dc);
1787 amdgpu_dm_irq_suspend(adev);
1792 WARN_ON(adev->dm.cached_state);
1793 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1795 s3_handle_mst(adev_to_drm(adev), true);
1797 amdgpu_dm_irq_suspend(adev);
1800 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1805 static struct amdgpu_dm_connector *
1806 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1807 struct drm_crtc *crtc)
1810 struct drm_connector_state *new_con_state;
1811 struct drm_connector *connector;
1812 struct drm_crtc *crtc_from_state;
1814 for_each_new_connector_in_state(state, connector, new_con_state, i) {
1815 crtc_from_state = new_con_state->crtc;
1817 if (crtc_from_state == crtc)
1818 return to_amdgpu_dm_connector(connector);
1824 static void emulated_link_detect(struct dc_link *link)
1826 struct dc_sink_init_data sink_init_data = { 0 };
1827 struct display_sink_capability sink_caps = { 0 };
1828 enum dc_edid_status edid_status;
1829 struct dc_context *dc_ctx = link->ctx;
1830 struct dc_sink *sink = NULL;
1831 struct dc_sink *prev_sink = NULL;
1833 link->type = dc_connection_none;
1834 prev_sink = link->local_sink;
1836 if (prev_sink != NULL)
1837 dc_sink_retain(prev_sink);
1839 switch (link->connector_signal) {
1840 case SIGNAL_TYPE_HDMI_TYPE_A: {
1841 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1842 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1846 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1847 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1848 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1852 case SIGNAL_TYPE_DVI_DUAL_LINK: {
1853 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1854 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1858 case SIGNAL_TYPE_LVDS: {
1859 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1860 sink_caps.signal = SIGNAL_TYPE_LVDS;
1864 case SIGNAL_TYPE_EDP: {
1865 sink_caps.transaction_type =
1866 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1867 sink_caps.signal = SIGNAL_TYPE_EDP;
1871 case SIGNAL_TYPE_DISPLAY_PORT: {
1872 sink_caps.transaction_type =
1873 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1874 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1879 DC_ERROR("Invalid connector type! signal:%d\n",
1880 link->connector_signal);
1884 sink_init_data.link = link;
1885 sink_init_data.sink_signal = sink_caps.signal;
1887 sink = dc_sink_create(&sink_init_data);
1889 DC_ERROR("Failed to create sink!\n");
1893 /* dc_sink_create returns a new reference */
1894 link->local_sink = sink;
1896 edid_status = dm_helpers_read_local_edid(
1901 if (edid_status != EDID_OK)
1902 DC_ERROR("Failed to read EDID");
1906 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1907 struct amdgpu_display_manager *dm)
1910 struct dc_surface_update surface_updates[MAX_SURFACES];
1911 struct dc_plane_info plane_infos[MAX_SURFACES];
1912 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1913 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1914 struct dc_stream_update stream_update;
1918 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1921 dm_error("Failed to allocate update bundle\n");
1925 for (k = 0; k < dc_state->stream_count; k++) {
1926 bundle->stream_update.stream = dc_state->streams[k];
1928 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1929 bundle->surface_updates[m].surface =
1930 dc_state->stream_status->plane_states[m];
1931 bundle->surface_updates[m].surface->force_full_update =
1934 dc_commit_updates_for_stream(
1935 dm->dc, bundle->surface_updates,
1936 dc_state->stream_status->plane_count,
1937 dc_state->streams[k], &bundle->stream_update, dc_state);
1946 static void dm_set_dpms_off(struct dc_link *link)
1948 struct dc_stream_state *stream_state;
1949 struct amdgpu_dm_connector *aconnector = link->priv;
1950 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
1951 struct dc_stream_update stream_update;
1952 bool dpms_off = true;
1954 memset(&stream_update, 0, sizeof(stream_update));
1955 stream_update.dpms_off = &dpms_off;
1957 mutex_lock(&adev->dm.dc_lock);
1958 stream_state = dc_stream_find_from_link(link);
1960 if (stream_state == NULL) {
1961 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
1962 mutex_unlock(&adev->dm.dc_lock);
1966 stream_update.stream = stream_state;
1967 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
1968 stream_state, &stream_update,
1969 stream_state->ctx->dc->current_state);
1970 mutex_unlock(&adev->dm.dc_lock);
1973 static int dm_resume(void *handle)
1975 struct amdgpu_device *adev = handle;
1976 struct drm_device *ddev = adev_to_drm(adev);
1977 struct amdgpu_display_manager *dm = &adev->dm;
1978 struct amdgpu_dm_connector *aconnector;
1979 struct drm_connector *connector;
1980 struct drm_connector_list_iter iter;
1981 struct drm_crtc *crtc;
1982 struct drm_crtc_state *new_crtc_state;
1983 struct dm_crtc_state *dm_new_crtc_state;
1984 struct drm_plane *plane;
1985 struct drm_plane_state *new_plane_state;
1986 struct dm_plane_state *dm_new_plane_state;
1987 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
1988 enum dc_connection_type new_connection_type = dc_connection_none;
1989 struct dc_state *dc_state;
1992 if (amdgpu_in_reset(adev)) {
1993 dc_state = dm->cached_dc_state;
1995 r = dm_dmub_hw_init(adev);
1997 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1999 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2002 amdgpu_dm_irq_resume_early(adev);
2004 for (i = 0; i < dc_state->stream_count; i++) {
2005 dc_state->streams[i]->mode_changed = true;
2006 for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2007 dc_state->stream_status->plane_states[j]->update_flags.raw
2012 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2014 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2016 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2018 dc_release_state(dm->cached_dc_state);
2019 dm->cached_dc_state = NULL;
2021 amdgpu_dm_irq_resume_late(adev);
2023 mutex_unlock(&dm->dc_lock);
2027 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2028 dc_release_state(dm_state->context);
2029 dm_state->context = dc_create_state(dm->dc);
2030 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2031 dc_resource_state_construct(dm->dc, dm_state->context);
2033 /* Before powering on DC we need to re-initialize DMUB. */
2034 r = dm_dmub_hw_init(adev);
2036 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2038 /* power on hardware */
2039 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2041 /* program HPD filter */
2045 * early enable HPD Rx IRQ, should be done before set mode as short
2046 * pulse interrupts are used for MST
2048 amdgpu_dm_irq_resume_early(adev);
2050 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2051 s3_handle_mst(ddev, false);
2054 drm_connector_list_iter_begin(ddev, &iter);
2055 drm_for_each_connector_iter(connector, &iter) {
2056 aconnector = to_amdgpu_dm_connector(connector);
2059 * this is the case when traversing through already created
2060 * MST connectors, should be skipped
2062 if (aconnector->mst_port)
2065 mutex_lock(&aconnector->hpd_lock);
2066 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2067 DRM_ERROR("KMS: Failed to detect connector\n");
2069 if (aconnector->base.force && new_connection_type == dc_connection_none)
2070 emulated_link_detect(aconnector->dc_link);
2072 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2074 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2075 aconnector->fake_enable = false;
2077 if (aconnector->dc_sink)
2078 dc_sink_release(aconnector->dc_sink);
2079 aconnector->dc_sink = NULL;
2080 amdgpu_dm_update_connector_after_detect(aconnector);
2081 mutex_unlock(&aconnector->hpd_lock);
2083 drm_connector_list_iter_end(&iter);
2085 /* Force mode set in atomic commit */
2086 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2087 new_crtc_state->active_changed = true;
2090 * atomic_check is expected to create the dc states. We need to release
2091 * them here, since they were duplicated as part of the suspend
2094 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2095 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2096 if (dm_new_crtc_state->stream) {
2097 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2098 dc_stream_release(dm_new_crtc_state->stream);
2099 dm_new_crtc_state->stream = NULL;
2103 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2104 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2105 if (dm_new_plane_state->dc_state) {
2106 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2107 dc_plane_state_release(dm_new_plane_state->dc_state);
2108 dm_new_plane_state->dc_state = NULL;
2112 drm_atomic_helper_resume(ddev, dm->cached_state);
2114 dm->cached_state = NULL;
2116 amdgpu_dm_irq_resume_late(adev);
2118 amdgpu_dm_smu_write_watermarks_table(adev);
2126 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2127 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2128 * the base driver's device list to be initialized and torn down accordingly.
2130 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2133 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2135 .early_init = dm_early_init,
2136 .late_init = dm_late_init,
2137 .sw_init = dm_sw_init,
2138 .sw_fini = dm_sw_fini,
2139 .hw_init = dm_hw_init,
2140 .hw_fini = dm_hw_fini,
2141 .suspend = dm_suspend,
2142 .resume = dm_resume,
2143 .is_idle = dm_is_idle,
2144 .wait_for_idle = dm_wait_for_idle,
2145 .check_soft_reset = dm_check_soft_reset,
2146 .soft_reset = dm_soft_reset,
2147 .set_clockgating_state = dm_set_clockgating_state,
2148 .set_powergating_state = dm_set_powergating_state,
2151 const struct amdgpu_ip_block_version dm_ip_block =
2153 .type = AMD_IP_BLOCK_TYPE_DCE,
2157 .funcs = &amdgpu_dm_funcs,
2167 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2168 .fb_create = amdgpu_display_user_framebuffer_create,
2169 .get_format_info = amd_get_format_info,
2170 .output_poll_changed = drm_fb_helper_output_poll_changed,
2171 .atomic_check = amdgpu_dm_atomic_check,
2172 .atomic_commit = drm_atomic_helper_commit,
2175 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2176 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2179 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2181 u32 max_cll, min_cll, max, min, q, r;
2182 struct amdgpu_dm_backlight_caps *caps;
2183 struct amdgpu_display_manager *dm;
2184 struct drm_connector *conn_base;
2185 struct amdgpu_device *adev;
2186 struct dc_link *link = NULL;
2187 static const u8 pre_computed_values[] = {
2188 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2189 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2191 if (!aconnector || !aconnector->dc_link)
2194 link = aconnector->dc_link;
2195 if (link->connector_signal != SIGNAL_TYPE_EDP)
2198 conn_base = &aconnector->base;
2199 adev = drm_to_adev(conn_base->dev);
2201 caps = &dm->backlight_caps;
2202 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2203 caps->aux_support = false;
2204 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2205 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2207 if (caps->ext_caps->bits.oled == 1 ||
2208 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2209 caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2210 caps->aux_support = true;
2212 /* From the specification (CTA-861-G), for calculating the maximum
2213 * luminance we need to use:
2214 * Luminance = 50*2**(CV/32)
2215 * Where CV is a one-byte value.
2216 * For calculating this expression we may need float point precision;
2217 * to avoid this complexity level, we take advantage that CV is divided
2218 * by a constant. From the Euclids division algorithm, we know that CV
2219 * can be written as: CV = 32*q + r. Next, we replace CV in the
2220 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2221 * need to pre-compute the value of r/32. For pre-computing the values
2222 * We just used the following Ruby line:
2223 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2224 * The results of the above expressions can be verified at
2225 * pre_computed_values.
2229 max = (1 << q) * pre_computed_values[r];
2231 // min luminance: maxLum * (CV/255)^2 / 100
2232 q = DIV_ROUND_CLOSEST(min_cll, 255);
2233 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2235 caps->aux_max_input_signal = max;
2236 caps->aux_min_input_signal = min;
2239 void amdgpu_dm_update_connector_after_detect(
2240 struct amdgpu_dm_connector *aconnector)
2242 struct drm_connector *connector = &aconnector->base;
2243 struct drm_device *dev = connector->dev;
2244 struct dc_sink *sink;
2246 /* MST handled by drm_mst framework */
2247 if (aconnector->mst_mgr.mst_state == true)
2250 sink = aconnector->dc_link->local_sink;
2252 dc_sink_retain(sink);
2255 * Edid mgmt connector gets first update only in mode_valid hook and then
2256 * the connector sink is set to either fake or physical sink depends on link status.
2257 * Skip if already done during boot.
2259 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2260 && aconnector->dc_em_sink) {
2263 * For S3 resume with headless use eml_sink to fake stream
2264 * because on resume connector->sink is set to NULL
2266 mutex_lock(&dev->mode_config.mutex);
2269 if (aconnector->dc_sink) {
2270 amdgpu_dm_update_freesync_caps(connector, NULL);
2272 * retain and release below are used to
2273 * bump up refcount for sink because the link doesn't point
2274 * to it anymore after disconnect, so on next crtc to connector
2275 * reshuffle by UMD we will get into unwanted dc_sink release
2277 dc_sink_release(aconnector->dc_sink);
2279 aconnector->dc_sink = sink;
2280 dc_sink_retain(aconnector->dc_sink);
2281 amdgpu_dm_update_freesync_caps(connector,
2284 amdgpu_dm_update_freesync_caps(connector, NULL);
2285 if (!aconnector->dc_sink) {
2286 aconnector->dc_sink = aconnector->dc_em_sink;
2287 dc_sink_retain(aconnector->dc_sink);
2291 mutex_unlock(&dev->mode_config.mutex);
2294 dc_sink_release(sink);
2299 * TODO: temporary guard to look for proper fix
2300 * if this sink is MST sink, we should not do anything
2302 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2303 dc_sink_release(sink);
2307 if (aconnector->dc_sink == sink) {
2309 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2312 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2313 aconnector->connector_id);
2315 dc_sink_release(sink);
2319 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2320 aconnector->connector_id, aconnector->dc_sink, sink);
2322 mutex_lock(&dev->mode_config.mutex);
2325 * 1. Update status of the drm connector
2326 * 2. Send an event and let userspace tell us what to do
2330 * TODO: check if we still need the S3 mode update workaround.
2331 * If yes, put it here.
2333 if (aconnector->dc_sink)
2334 amdgpu_dm_update_freesync_caps(connector, NULL);
2336 aconnector->dc_sink = sink;
2337 dc_sink_retain(aconnector->dc_sink);
2338 if (sink->dc_edid.length == 0) {
2339 aconnector->edid = NULL;
2340 if (aconnector->dc_link->aux_mode) {
2341 drm_dp_cec_unset_edid(
2342 &aconnector->dm_dp_aux.aux);
2346 (struct edid *)sink->dc_edid.raw_edid;
2348 drm_connector_update_edid_property(connector,
2350 drm_add_edid_modes(connector, aconnector->edid);
2352 if (aconnector->dc_link->aux_mode)
2353 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2357 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2358 update_connector_ext_caps(aconnector);
2360 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2361 amdgpu_dm_update_freesync_caps(connector, NULL);
2362 drm_connector_update_edid_property(connector, NULL);
2363 aconnector->num_modes = 0;
2364 dc_sink_release(aconnector->dc_sink);
2365 aconnector->dc_sink = NULL;
2366 aconnector->edid = NULL;
2367 #ifdef CONFIG_DRM_AMD_DC_HDCP
2368 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2369 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2370 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2374 mutex_unlock(&dev->mode_config.mutex);
2376 update_subconnector_property(aconnector);
2379 dc_sink_release(sink);
2382 static void handle_hpd_irq(void *param)
2384 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2385 struct drm_connector *connector = &aconnector->base;
2386 struct drm_device *dev = connector->dev;
2387 enum dc_connection_type new_connection_type = dc_connection_none;
2388 #ifdef CONFIG_DRM_AMD_DC_HDCP
2389 struct amdgpu_device *adev = drm_to_adev(dev);
2390 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2394 * In case of failure or MST no need to update connector status or notify the OS
2395 * since (for MST case) MST does this in its own context.
2397 mutex_lock(&aconnector->hpd_lock);
2399 #ifdef CONFIG_DRM_AMD_DC_HDCP
2400 if (adev->dm.hdcp_workqueue) {
2401 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2402 dm_con_state->update_hdcp = true;
2405 if (aconnector->fake_enable)
2406 aconnector->fake_enable = false;
2408 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2409 DRM_ERROR("KMS: Failed to detect connector\n");
2411 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2412 emulated_link_detect(aconnector->dc_link);
2415 drm_modeset_lock_all(dev);
2416 dm_restore_drm_connector_state(dev, connector);
2417 drm_modeset_unlock_all(dev);
2419 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2420 drm_kms_helper_hotplug_event(dev);
2422 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2423 if (new_connection_type == dc_connection_none &&
2424 aconnector->dc_link->type == dc_connection_none)
2425 dm_set_dpms_off(aconnector->dc_link);
2427 amdgpu_dm_update_connector_after_detect(aconnector);
2429 drm_modeset_lock_all(dev);
2430 dm_restore_drm_connector_state(dev, connector);
2431 drm_modeset_unlock_all(dev);
2433 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2434 drm_kms_helper_hotplug_event(dev);
2436 mutex_unlock(&aconnector->hpd_lock);
2440 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2442 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2444 bool new_irq_handled = false;
2446 int dpcd_bytes_to_read;
2448 const int max_process_count = 30;
2449 int process_count = 0;
2451 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2453 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2454 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2455 /* DPCD 0x200 - 0x201 for downstream IRQ */
2456 dpcd_addr = DP_SINK_COUNT;
2458 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2459 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2460 dpcd_addr = DP_SINK_COUNT_ESI;
2463 dret = drm_dp_dpcd_read(
2464 &aconnector->dm_dp_aux.aux,
2467 dpcd_bytes_to_read);
2469 while (dret == dpcd_bytes_to_read &&
2470 process_count < max_process_count) {
2476 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2477 /* handle HPD short pulse irq */
2478 if (aconnector->mst_mgr.mst_state)
2480 &aconnector->mst_mgr,
2484 if (new_irq_handled) {
2485 /* ACK at DPCD to notify down stream */
2486 const int ack_dpcd_bytes_to_write =
2487 dpcd_bytes_to_read - 1;
2489 for (retry = 0; retry < 3; retry++) {
2492 wret = drm_dp_dpcd_write(
2493 &aconnector->dm_dp_aux.aux,
2496 ack_dpcd_bytes_to_write);
2497 if (wret == ack_dpcd_bytes_to_write)
2501 /* check if there is new irq to be handled */
2502 dret = drm_dp_dpcd_read(
2503 &aconnector->dm_dp_aux.aux,
2506 dpcd_bytes_to_read);
2508 new_irq_handled = false;
2514 if (process_count == max_process_count)
2515 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2518 static void handle_hpd_rx_irq(void *param)
2520 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2521 struct drm_connector *connector = &aconnector->base;
2522 struct drm_device *dev = connector->dev;
2523 struct dc_link *dc_link = aconnector->dc_link;
2524 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2525 bool result = false;
2526 enum dc_connection_type new_connection_type = dc_connection_none;
2527 struct amdgpu_device *adev = drm_to_adev(dev);
2528 union hpd_irq_data hpd_irq_data;
2530 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2533 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2534 * conflict, after implement i2c helper, this mutex should be
2537 if (dc_link->type != dc_connection_mst_branch)
2538 mutex_lock(&aconnector->hpd_lock);
2540 read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2542 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2543 (dc_link->type == dc_connection_mst_branch)) {
2544 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2546 dm_handle_hpd_rx_irq(aconnector);
2548 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2550 dm_handle_hpd_rx_irq(aconnector);
2555 mutex_lock(&adev->dm.dc_lock);
2556 #ifdef CONFIG_DRM_AMD_DC_HDCP
2557 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2559 result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2561 mutex_unlock(&adev->dm.dc_lock);
2564 if (result && !is_mst_root_connector) {
2565 /* Downstream Port status changed. */
2566 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2567 DRM_ERROR("KMS: Failed to detect connector\n");
2569 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2570 emulated_link_detect(dc_link);
2572 if (aconnector->fake_enable)
2573 aconnector->fake_enable = false;
2575 amdgpu_dm_update_connector_after_detect(aconnector);
2578 drm_modeset_lock_all(dev);
2579 dm_restore_drm_connector_state(dev, connector);
2580 drm_modeset_unlock_all(dev);
2582 drm_kms_helper_hotplug_event(dev);
2583 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2585 if (aconnector->fake_enable)
2586 aconnector->fake_enable = false;
2588 amdgpu_dm_update_connector_after_detect(aconnector);
2591 drm_modeset_lock_all(dev);
2592 dm_restore_drm_connector_state(dev, connector);
2593 drm_modeset_unlock_all(dev);
2595 drm_kms_helper_hotplug_event(dev);
2598 #ifdef CONFIG_DRM_AMD_DC_HDCP
2599 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2600 if (adev->dm.hdcp_workqueue)
2601 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
2605 if (dc_link->type != dc_connection_mst_branch) {
2606 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2607 mutex_unlock(&aconnector->hpd_lock);
2611 static void register_hpd_handlers(struct amdgpu_device *adev)
2613 struct drm_device *dev = adev_to_drm(adev);
2614 struct drm_connector *connector;
2615 struct amdgpu_dm_connector *aconnector;
2616 const struct dc_link *dc_link;
2617 struct dc_interrupt_params int_params = {0};
2619 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2620 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2622 list_for_each_entry(connector,
2623 &dev->mode_config.connector_list, head) {
2625 aconnector = to_amdgpu_dm_connector(connector);
2626 dc_link = aconnector->dc_link;
2628 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2629 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2630 int_params.irq_source = dc_link->irq_source_hpd;
2632 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2634 (void *) aconnector);
2637 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2639 /* Also register for DP short pulse (hpd_rx). */
2640 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2641 int_params.irq_source = dc_link->irq_source_hpd_rx;
2643 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2645 (void *) aconnector);
2650 #if defined(CONFIG_DRM_AMD_DC_SI)
2651 /* Register IRQ sources and initialize IRQ callbacks */
2652 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2654 struct dc *dc = adev->dm.dc;
2655 struct common_irq_params *c_irq_params;
2656 struct dc_interrupt_params int_params = {0};
2659 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2661 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2662 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2665 * Actions of amdgpu_irq_add_id():
2666 * 1. Register a set() function with base driver.
2667 * Base driver will call set() function to enable/disable an
2668 * interrupt in DC hardware.
2669 * 2. Register amdgpu_dm_irq_handler().
2670 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2671 * coming from DC hardware.
2672 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2673 * for acknowledging and handling. */
2675 /* Use VBLANK interrupt */
2676 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2677 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2679 DRM_ERROR("Failed to add crtc irq id!\n");
2683 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2684 int_params.irq_source =
2685 dc_interrupt_to_irq_source(dc, i+1 , 0);
2687 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2689 c_irq_params->adev = adev;
2690 c_irq_params->irq_src = int_params.irq_source;
2692 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2693 dm_crtc_high_irq, c_irq_params);
2696 /* Use GRPH_PFLIP interrupt */
2697 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2698 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2699 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2701 DRM_ERROR("Failed to add page flip irq id!\n");
2705 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2706 int_params.irq_source =
2707 dc_interrupt_to_irq_source(dc, i, 0);
2709 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2711 c_irq_params->adev = adev;
2712 c_irq_params->irq_src = int_params.irq_source;
2714 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2715 dm_pflip_high_irq, c_irq_params);
2720 r = amdgpu_irq_add_id(adev, client_id,
2721 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2723 DRM_ERROR("Failed to add hpd irq id!\n");
2727 register_hpd_handlers(adev);
2733 /* Register IRQ sources and initialize IRQ callbacks */
2734 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2736 struct dc *dc = adev->dm.dc;
2737 struct common_irq_params *c_irq_params;
2738 struct dc_interrupt_params int_params = {0};
2741 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2743 if (adev->asic_type >= CHIP_VEGA10)
2744 client_id = SOC15_IH_CLIENTID_DCE;
2746 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2747 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2750 * Actions of amdgpu_irq_add_id():
2751 * 1. Register a set() function with base driver.
2752 * Base driver will call set() function to enable/disable an
2753 * interrupt in DC hardware.
2754 * 2. Register amdgpu_dm_irq_handler().
2755 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2756 * coming from DC hardware.
2757 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2758 * for acknowledging and handling. */
2760 /* Use VBLANK interrupt */
2761 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2762 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2764 DRM_ERROR("Failed to add crtc irq id!\n");
2768 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2769 int_params.irq_source =
2770 dc_interrupt_to_irq_source(dc, i, 0);
2772 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2774 c_irq_params->adev = adev;
2775 c_irq_params->irq_src = int_params.irq_source;
2777 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2778 dm_crtc_high_irq, c_irq_params);
2781 /* Use VUPDATE interrupt */
2782 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2783 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2785 DRM_ERROR("Failed to add vupdate irq id!\n");
2789 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2790 int_params.irq_source =
2791 dc_interrupt_to_irq_source(dc, i, 0);
2793 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2795 c_irq_params->adev = adev;
2796 c_irq_params->irq_src = int_params.irq_source;
2798 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2799 dm_vupdate_high_irq, c_irq_params);
2802 /* Use GRPH_PFLIP interrupt */
2803 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2804 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2805 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2807 DRM_ERROR("Failed to add page flip irq id!\n");
2811 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2812 int_params.irq_source =
2813 dc_interrupt_to_irq_source(dc, i, 0);
2815 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2817 c_irq_params->adev = adev;
2818 c_irq_params->irq_src = int_params.irq_source;
2820 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2821 dm_pflip_high_irq, c_irq_params);
2826 r = amdgpu_irq_add_id(adev, client_id,
2827 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2829 DRM_ERROR("Failed to add hpd irq id!\n");
2833 register_hpd_handlers(adev);
2838 #if defined(CONFIG_DRM_AMD_DC_DCN)
2839 /* Register IRQ sources and initialize IRQ callbacks */
2840 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2842 struct dc *dc = adev->dm.dc;
2843 struct common_irq_params *c_irq_params;
2844 struct dc_interrupt_params int_params = {0};
2848 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2849 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2852 * Actions of amdgpu_irq_add_id():
2853 * 1. Register a set() function with base driver.
2854 * Base driver will call set() function to enable/disable an
2855 * interrupt in DC hardware.
2856 * 2. Register amdgpu_dm_irq_handler().
2857 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2858 * coming from DC hardware.
2859 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2860 * for acknowledging and handling.
2863 /* Use VSTARTUP interrupt */
2864 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2865 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2867 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2870 DRM_ERROR("Failed to add crtc irq id!\n");
2874 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2875 int_params.irq_source =
2876 dc_interrupt_to_irq_source(dc, i, 0);
2878 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2880 c_irq_params->adev = adev;
2881 c_irq_params->irq_src = int_params.irq_source;
2883 amdgpu_dm_irq_register_interrupt(
2884 adev, &int_params, dm_crtc_high_irq, c_irq_params);
2887 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2888 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2889 * to trigger at end of each vblank, regardless of state of the lock,
2890 * matching DCE behaviour.
2892 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2893 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2895 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2898 DRM_ERROR("Failed to add vupdate irq id!\n");
2902 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2903 int_params.irq_source =
2904 dc_interrupt_to_irq_source(dc, i, 0);
2906 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2908 c_irq_params->adev = adev;
2909 c_irq_params->irq_src = int_params.irq_source;
2911 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2912 dm_vupdate_high_irq, c_irq_params);
2915 /* Use GRPH_PFLIP interrupt */
2916 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2917 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2919 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2921 DRM_ERROR("Failed to add page flip irq id!\n");
2925 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2926 int_params.irq_source =
2927 dc_interrupt_to_irq_source(dc, i, 0);
2929 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2931 c_irq_params->adev = adev;
2932 c_irq_params->irq_src = int_params.irq_source;
2934 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2935 dm_pflip_high_irq, c_irq_params);
2940 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2943 DRM_ERROR("Failed to add hpd irq id!\n");
2947 register_hpd_handlers(adev);
2954 * Acquires the lock for the atomic state object and returns
2955 * the new atomic state.
2957 * This should only be called during atomic check.
2959 static int dm_atomic_get_state(struct drm_atomic_state *state,
2960 struct dm_atomic_state **dm_state)
2962 struct drm_device *dev = state->dev;
2963 struct amdgpu_device *adev = drm_to_adev(dev);
2964 struct amdgpu_display_manager *dm = &adev->dm;
2965 struct drm_private_state *priv_state;
2970 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
2971 if (IS_ERR(priv_state))
2972 return PTR_ERR(priv_state);
2974 *dm_state = to_dm_atomic_state(priv_state);
2979 static struct dm_atomic_state *
2980 dm_atomic_get_new_state(struct drm_atomic_state *state)
2982 struct drm_device *dev = state->dev;
2983 struct amdgpu_device *adev = drm_to_adev(dev);
2984 struct amdgpu_display_manager *dm = &adev->dm;
2985 struct drm_private_obj *obj;
2986 struct drm_private_state *new_obj_state;
2989 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
2990 if (obj->funcs == dm->atomic_obj.funcs)
2991 return to_dm_atomic_state(new_obj_state);
2997 static struct drm_private_state *
2998 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3000 struct dm_atomic_state *old_state, *new_state;
3002 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3006 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3008 old_state = to_dm_atomic_state(obj->state);
3010 if (old_state && old_state->context)
3011 new_state->context = dc_copy_state(old_state->context);
3013 if (!new_state->context) {
3018 return &new_state->base;
3021 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3022 struct drm_private_state *state)
3024 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3026 if (dm_state && dm_state->context)
3027 dc_release_state(dm_state->context);
3032 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3033 .atomic_duplicate_state = dm_atomic_duplicate_state,
3034 .atomic_destroy_state = dm_atomic_destroy_state,
3037 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3039 struct dm_atomic_state *state;
3042 adev->mode_info.mode_config_initialized = true;
3044 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3045 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3047 adev_to_drm(adev)->mode_config.max_width = 16384;
3048 adev_to_drm(adev)->mode_config.max_height = 16384;
3050 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3051 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3052 /* indicates support for immediate flip */
3053 adev_to_drm(adev)->mode_config.async_page_flip = true;
3055 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3057 state = kzalloc(sizeof(*state), GFP_KERNEL);
3061 state->context = dc_create_state(adev->dm.dc);
3062 if (!state->context) {
3067 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3069 drm_atomic_private_obj_init(adev_to_drm(adev),
3070 &adev->dm.atomic_obj,
3072 &dm_atomic_state_funcs);
3074 r = amdgpu_display_modeset_create_props(adev);
3076 dc_release_state(state->context);
3081 r = amdgpu_dm_audio_init(adev);
3083 dc_release_state(state->context);
3091 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3092 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3093 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3095 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3096 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3098 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3100 #if defined(CONFIG_ACPI)
3101 struct amdgpu_dm_backlight_caps caps;
3103 memset(&caps, 0, sizeof(caps));
3105 if (dm->backlight_caps.caps_valid)
3108 amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3109 if (caps.caps_valid) {
3110 dm->backlight_caps.caps_valid = true;
3111 if (caps.aux_support)
3113 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3114 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3116 dm->backlight_caps.min_input_signal =
3117 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3118 dm->backlight_caps.max_input_signal =
3119 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3122 if (dm->backlight_caps.aux_support)
3125 dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3126 dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3130 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3137 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3138 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3143 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3144 unsigned *min, unsigned *max)
3149 if (caps->aux_support) {
3150 // Firmware limits are in nits, DC API wants millinits.
3151 *max = 1000 * caps->aux_max_input_signal;
3152 *min = 1000 * caps->aux_min_input_signal;
3154 // Firmware limits are 8-bit, PWM control is 16-bit.
3155 *max = 0x101 * caps->max_input_signal;
3156 *min = 0x101 * caps->min_input_signal;
3161 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3162 uint32_t brightness)
3166 if (!get_brightness_range(caps, &min, &max))
3169 // Rescale 0..255 to min..max
3170 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3171 AMDGPU_MAX_BL_LEVEL);
3174 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3175 uint32_t brightness)
3179 if (!get_brightness_range(caps, &min, &max))
3182 if (brightness < min)
3184 // Rescale min..max to 0..255
3185 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3189 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3191 struct amdgpu_display_manager *dm = bl_get_data(bd);
3192 struct amdgpu_dm_backlight_caps caps;
3193 struct dc_link *link = NULL;
3197 amdgpu_dm_update_backlight_caps(dm);
3198 caps = dm->backlight_caps;
3200 link = (struct dc_link *)dm->backlight_link;
3202 brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3203 // Change brightness based on AUX property
3204 if (caps.aux_support)
3205 return set_backlight_via_aux(link, brightness);
3207 rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3212 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3214 struct amdgpu_display_manager *dm = bl_get_data(bd);
3215 int ret = dc_link_get_backlight_level(dm->backlight_link);
3217 if (ret == DC_ERROR_UNEXPECTED)
3218 return bd->props.brightness;
3219 return convert_brightness_to_user(&dm->backlight_caps, ret);
3222 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3223 .options = BL_CORE_SUSPENDRESUME,
3224 .get_brightness = amdgpu_dm_backlight_get_brightness,
3225 .update_status = amdgpu_dm_backlight_update_status,
3229 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3232 struct backlight_properties props = { 0 };
3234 amdgpu_dm_update_backlight_caps(dm);
3236 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3237 props.brightness = AMDGPU_MAX_BL_LEVEL;
3238 props.type = BACKLIGHT_RAW;
3240 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3241 adev_to_drm(dm->adev)->primary->index);
3243 dm->backlight_dev = backlight_device_register(bl_name,
3244 adev_to_drm(dm->adev)->dev,
3246 &amdgpu_dm_backlight_ops,
3249 if (IS_ERR(dm->backlight_dev))
3250 DRM_ERROR("DM: Backlight registration failed!\n");
3252 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3257 static int initialize_plane(struct amdgpu_display_manager *dm,
3258 struct amdgpu_mode_info *mode_info, int plane_id,
3259 enum drm_plane_type plane_type,
3260 const struct dc_plane_cap *plane_cap)
3262 struct drm_plane *plane;
3263 unsigned long possible_crtcs;
3266 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3268 DRM_ERROR("KMS: Failed to allocate plane\n");
3271 plane->type = plane_type;
3274 * HACK: IGT tests expect that the primary plane for a CRTC
3275 * can only have one possible CRTC. Only expose support for
3276 * any CRTC if they're not going to be used as a primary plane
3277 * for a CRTC - like overlay or underlay planes.
3279 possible_crtcs = 1 << plane_id;
3280 if (plane_id >= dm->dc->caps.max_streams)
3281 possible_crtcs = 0xff;
3283 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3286 DRM_ERROR("KMS: Failed to initialize plane\n");
3292 mode_info->planes[plane_id] = plane;
3298 static void register_backlight_device(struct amdgpu_display_manager *dm,
3299 struct dc_link *link)
3301 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3302 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3304 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3305 link->type != dc_connection_none) {
3307 * Event if registration failed, we should continue with
3308 * DM initialization because not having a backlight control
3309 * is better then a black screen.
3311 amdgpu_dm_register_backlight_device(dm);
3313 if (dm->backlight_dev)
3314 dm->backlight_link = link;
3321 * In this architecture, the association
3322 * connector -> encoder -> crtc
3323 * id not really requried. The crtc and connector will hold the
3324 * display_index as an abstraction to use with DAL component
3326 * Returns 0 on success
3328 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3330 struct amdgpu_display_manager *dm = &adev->dm;
3332 struct amdgpu_dm_connector *aconnector = NULL;
3333 struct amdgpu_encoder *aencoder = NULL;
3334 struct amdgpu_mode_info *mode_info = &adev->mode_info;
3336 int32_t primary_planes;
3337 enum dc_connection_type new_connection_type = dc_connection_none;
3338 const struct dc_plane_cap *plane;
3340 dm->display_indexes_num = dm->dc->caps.max_streams;
3341 /* Update the actual used number of crtc */
3342 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3344 link_cnt = dm->dc->caps.max_links;
3345 if (amdgpu_dm_mode_config_init(dm->adev)) {
3346 DRM_ERROR("DM: Failed to initialize mode config\n");
3350 /* There is one primary plane per CRTC */
3351 primary_planes = dm->dc->caps.max_streams;
3352 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3355 * Initialize primary planes, implicit planes for legacy IOCTLS.
3356 * Order is reversed to match iteration order in atomic check.
3358 for (i = (primary_planes - 1); i >= 0; i--) {
3359 plane = &dm->dc->caps.planes[i];
3361 if (initialize_plane(dm, mode_info, i,
3362 DRM_PLANE_TYPE_PRIMARY, plane)) {
3363 DRM_ERROR("KMS: Failed to initialize primary plane\n");
3369 * Initialize overlay planes, index starting after primary planes.
3370 * These planes have a higher DRM index than the primary planes since
3371 * they should be considered as having a higher z-order.
3372 * Order is reversed to match iteration order in atomic check.
3374 * Only support DCN for now, and only expose one so we don't encourage
3375 * userspace to use up all the pipes.
3377 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3378 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3380 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3383 if (!plane->blends_with_above || !plane->blends_with_below)
3386 if (!plane->pixel_format_support.argb8888)
3389 if (initialize_plane(dm, NULL, primary_planes + i,
3390 DRM_PLANE_TYPE_OVERLAY, plane)) {
3391 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3395 /* Only create one overlay plane. */
3399 for (i = 0; i < dm->dc->caps.max_streams; i++)
3400 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3401 DRM_ERROR("KMS: Failed to initialize crtc\n");
3405 /* loops over all connectors on the board */
3406 for (i = 0; i < link_cnt; i++) {
3407 struct dc_link *link = NULL;
3409 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3411 "KMS: Cannot support more than %d display indexes\n",
3412 AMDGPU_DM_MAX_DISPLAY_INDEX);
3416 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3420 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3424 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3425 DRM_ERROR("KMS: Failed to initialize encoder\n");
3429 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3430 DRM_ERROR("KMS: Failed to initialize connector\n");
3434 link = dc_get_link_at_index(dm->dc, i);
3436 if (!dc_link_detect_sink(link, &new_connection_type))
3437 DRM_ERROR("KMS: Failed to detect connector\n");
3439 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3440 emulated_link_detect(link);
3441 amdgpu_dm_update_connector_after_detect(aconnector);
3443 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3444 amdgpu_dm_update_connector_after_detect(aconnector);
3445 register_backlight_device(dm, link);
3446 if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3447 amdgpu_dm_set_psr_caps(link);
3453 /* Software is initialized. Now we can register interrupt handlers. */
3454 switch (adev->asic_type) {
3455 #if defined(CONFIG_DRM_AMD_DC_SI)
3460 if (dce60_register_irq_handlers(dm->adev)) {
3461 DRM_ERROR("DM: Failed to initialize IRQ\n");
3475 case CHIP_POLARIS11:
3476 case CHIP_POLARIS10:
3477 case CHIP_POLARIS12:
3482 if (dce110_register_irq_handlers(dm->adev)) {
3483 DRM_ERROR("DM: Failed to initialize IRQ\n");
3487 #if defined(CONFIG_DRM_AMD_DC_DCN)
3493 case CHIP_SIENNA_CICHLID:
3494 case CHIP_NAVY_FLOUNDER:
3495 case CHIP_DIMGREY_CAVEFISH:
3497 if (dcn10_register_irq_handlers(dm->adev)) {
3498 DRM_ERROR("DM: Failed to initialize IRQ\n");
3504 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3516 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3518 drm_mode_config_cleanup(dm->ddev);
3519 drm_atomic_private_obj_fini(&dm->atomic_obj);
3523 /******************************************************************************
3524 * amdgpu_display_funcs functions
3525 *****************************************************************************/
3528 * dm_bandwidth_update - program display watermarks
3530 * @adev: amdgpu_device pointer
3532 * Calculate and program the display watermarks and line buffer allocation.
3534 static void dm_bandwidth_update(struct amdgpu_device *adev)
3536 /* TODO: implement later */
3539 static const struct amdgpu_display_funcs dm_display_funcs = {
3540 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3541 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3542 .backlight_set_level = NULL, /* never called for DC */
3543 .backlight_get_level = NULL, /* never called for DC */
3544 .hpd_sense = NULL,/* called unconditionally */
3545 .hpd_set_polarity = NULL, /* called unconditionally */
3546 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3547 .page_flip_get_scanoutpos =
3548 dm_crtc_get_scanoutpos,/* called unconditionally */
3549 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3550 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3553 #if defined(CONFIG_DEBUG_KERNEL_DC)
3555 static ssize_t s3_debug_store(struct device *device,
3556 struct device_attribute *attr,
3562 struct drm_device *drm_dev = dev_get_drvdata(device);
3563 struct amdgpu_device *adev = drm_to_adev(drm_dev);
3565 ret = kstrtoint(buf, 0, &s3_state);
3570 drm_kms_helper_hotplug_event(adev_to_drm(adev));
3575 return ret == 0 ? count : 0;
3578 DEVICE_ATTR_WO(s3_debug);
3582 static int dm_early_init(void *handle)
3584 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3586 switch (adev->asic_type) {
3587 #if defined(CONFIG_DRM_AMD_DC_SI)
3591 adev->mode_info.num_crtc = 6;
3592 adev->mode_info.num_hpd = 6;
3593 adev->mode_info.num_dig = 6;
3596 adev->mode_info.num_crtc = 2;
3597 adev->mode_info.num_hpd = 2;
3598 adev->mode_info.num_dig = 2;
3603 adev->mode_info.num_crtc = 6;
3604 adev->mode_info.num_hpd = 6;
3605 adev->mode_info.num_dig = 6;
3608 adev->mode_info.num_crtc = 4;
3609 adev->mode_info.num_hpd = 6;
3610 adev->mode_info.num_dig = 7;
3614 adev->mode_info.num_crtc = 2;
3615 adev->mode_info.num_hpd = 6;
3616 adev->mode_info.num_dig = 6;
3620 adev->mode_info.num_crtc = 6;
3621 adev->mode_info.num_hpd = 6;
3622 adev->mode_info.num_dig = 7;
3625 adev->mode_info.num_crtc = 3;
3626 adev->mode_info.num_hpd = 6;
3627 adev->mode_info.num_dig = 9;
3630 adev->mode_info.num_crtc = 2;
3631 adev->mode_info.num_hpd = 6;
3632 adev->mode_info.num_dig = 9;
3634 case CHIP_POLARIS11:
3635 case CHIP_POLARIS12:
3636 adev->mode_info.num_crtc = 5;
3637 adev->mode_info.num_hpd = 5;
3638 adev->mode_info.num_dig = 5;
3640 case CHIP_POLARIS10:
3642 adev->mode_info.num_crtc = 6;
3643 adev->mode_info.num_hpd = 6;
3644 adev->mode_info.num_dig = 6;
3649 adev->mode_info.num_crtc = 6;
3650 adev->mode_info.num_hpd = 6;
3651 adev->mode_info.num_dig = 6;
3653 #if defined(CONFIG_DRM_AMD_DC_DCN)
3657 adev->mode_info.num_crtc = 4;
3658 adev->mode_info.num_hpd = 4;
3659 adev->mode_info.num_dig = 4;
3663 case CHIP_SIENNA_CICHLID:
3664 case CHIP_NAVY_FLOUNDER:
3665 adev->mode_info.num_crtc = 6;
3666 adev->mode_info.num_hpd = 6;
3667 adev->mode_info.num_dig = 6;
3670 case CHIP_DIMGREY_CAVEFISH:
3671 adev->mode_info.num_crtc = 5;
3672 adev->mode_info.num_hpd = 5;
3673 adev->mode_info.num_dig = 5;
3677 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3681 amdgpu_dm_set_irq_funcs(adev);
3683 if (adev->mode_info.funcs == NULL)
3684 adev->mode_info.funcs = &dm_display_funcs;
3687 * Note: Do NOT change adev->audio_endpt_rreg and
3688 * adev->audio_endpt_wreg because they are initialised in
3689 * amdgpu_device_init()
3691 #if defined(CONFIG_DEBUG_KERNEL_DC)
3693 adev_to_drm(adev)->dev,
3694 &dev_attr_s3_debug);
3700 static bool modeset_required(struct drm_crtc_state *crtc_state,
3701 struct dc_stream_state *new_stream,
3702 struct dc_stream_state *old_stream)
3704 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3707 static bool modereset_required(struct drm_crtc_state *crtc_state)
3709 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3712 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3714 drm_encoder_cleanup(encoder);
3718 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3719 .destroy = amdgpu_dm_encoder_destroy,
3723 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3724 struct dc_scaling_info *scaling_info)
3726 int scale_w, scale_h;
3728 memset(scaling_info, 0, sizeof(*scaling_info));
3730 /* Source is fixed 16.16 but we ignore mantissa for now... */
3731 scaling_info->src_rect.x = state->src_x >> 16;
3732 scaling_info->src_rect.y = state->src_y >> 16;
3734 scaling_info->src_rect.width = state->src_w >> 16;
3735 if (scaling_info->src_rect.width == 0)
3738 scaling_info->src_rect.height = state->src_h >> 16;
3739 if (scaling_info->src_rect.height == 0)
3742 scaling_info->dst_rect.x = state->crtc_x;
3743 scaling_info->dst_rect.y = state->crtc_y;
3745 if (state->crtc_w == 0)
3748 scaling_info->dst_rect.width = state->crtc_w;
3750 if (state->crtc_h == 0)
3753 scaling_info->dst_rect.height = state->crtc_h;
3755 /* DRM doesn't specify clipping on destination output. */
3756 scaling_info->clip_rect = scaling_info->dst_rect;
3758 /* TODO: Validate scaling per-format with DC plane caps */
3759 scale_w = scaling_info->dst_rect.width * 1000 /
3760 scaling_info->src_rect.width;
3762 if (scale_w < 250 || scale_w > 16000)
3765 scale_h = scaling_info->dst_rect.height * 1000 /
3766 scaling_info->src_rect.height;
3768 if (scale_h < 250 || scale_h > 16000)
3772 * The "scaling_quality" can be ignored for now, quality = 0 has DC
3773 * assume reasonable defaults based on the format.
3780 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3781 uint64_t tiling_flags)
3783 /* Fill GFX8 params */
3784 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3785 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3787 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3788 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3789 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3790 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3791 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3793 /* XXX fix me for VI */
3794 tiling_info->gfx8.num_banks = num_banks;
3795 tiling_info->gfx8.array_mode =
3796 DC_ARRAY_2D_TILED_THIN1;
3797 tiling_info->gfx8.tile_split = tile_split;
3798 tiling_info->gfx8.bank_width = bankw;
3799 tiling_info->gfx8.bank_height = bankh;
3800 tiling_info->gfx8.tile_aspect = mtaspect;
3801 tiling_info->gfx8.tile_mode =
3802 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3803 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3804 == DC_ARRAY_1D_TILED_THIN1) {
3805 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3808 tiling_info->gfx8.pipe_config =
3809 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3813 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
3814 union dc_tiling_info *tiling_info)
3816 tiling_info->gfx9.num_pipes =
3817 adev->gfx.config.gb_addr_config_fields.num_pipes;
3818 tiling_info->gfx9.num_banks =
3819 adev->gfx.config.gb_addr_config_fields.num_banks;
3820 tiling_info->gfx9.pipe_interleave =
3821 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3822 tiling_info->gfx9.num_shader_engines =
3823 adev->gfx.config.gb_addr_config_fields.num_se;
3824 tiling_info->gfx9.max_compressed_frags =
3825 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3826 tiling_info->gfx9.num_rb_per_se =
3827 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3828 tiling_info->gfx9.shaderEnable = 1;
3829 if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3830 adev->asic_type == CHIP_NAVY_FLOUNDER ||
3831 adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
3832 adev->asic_type == CHIP_VANGOGH)
3833 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3837 validate_dcc(struct amdgpu_device *adev,
3838 const enum surface_pixel_format format,
3839 const enum dc_rotation_angle rotation,
3840 const union dc_tiling_info *tiling_info,
3841 const struct dc_plane_dcc_param *dcc,
3842 const struct dc_plane_address *address,
3843 const struct plane_size *plane_size)
3845 struct dc *dc = adev->dm.dc;
3846 struct dc_dcc_surface_param input;
3847 struct dc_surface_dcc_cap output;
3849 memset(&input, 0, sizeof(input));
3850 memset(&output, 0, sizeof(output));
3855 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
3856 !dc->cap_funcs.get_dcc_compression_cap)
3859 input.format = format;
3860 input.surface_size.width = plane_size->surface_size.width;
3861 input.surface_size.height = plane_size->surface_size.height;
3862 input.swizzle_mode = tiling_info->gfx9.swizzle;
3864 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3865 input.scan = SCAN_DIRECTION_HORIZONTAL;
3866 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3867 input.scan = SCAN_DIRECTION_VERTICAL;
3869 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3872 if (!output.capable)
3875 if (dcc->independent_64b_blks == 0 &&
3876 output.grph.rgb.independent_64b_blks != 0)
3883 modifier_has_dcc(uint64_t modifier)
3885 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
3889 modifier_gfx9_swizzle_mode(uint64_t modifier)
3891 if (modifier == DRM_FORMAT_MOD_LINEAR)
3894 return AMD_FMT_MOD_GET(TILE, modifier);
3897 static const struct drm_format_info *
3898 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
3900 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
3904 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
3905 union dc_tiling_info *tiling_info,
3908 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
3909 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
3910 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
3911 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
3913 fill_gfx9_tiling_info_from_device(adev, tiling_info);
3915 if (!IS_AMD_FMT_MOD(modifier))
3918 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
3919 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
3921 if (adev->family >= AMDGPU_FAMILY_NV) {
3922 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
3924 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
3926 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
3930 enum dm_micro_swizzle {
3931 MICRO_SWIZZLE_Z = 0,
3932 MICRO_SWIZZLE_S = 1,
3933 MICRO_SWIZZLE_D = 2,
3937 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
3941 struct amdgpu_device *adev = drm_to_adev(plane->dev);
3942 const struct drm_format_info *info = drm_format_info(format);
3944 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
3950 * We always have to allow this modifier, because core DRM still
3951 * checks LINEAR support if userspace does not provide modifers.
3953 if (modifier == DRM_FORMAT_MOD_LINEAR)
3957 * The arbitrary tiling support for multiplane formats has not been hooked
3960 if (info->num_planes > 1)
3964 * For D swizzle the canonical modifier depends on the bpp, so check
3967 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
3968 adev->family >= AMDGPU_FAMILY_NV) {
3969 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
3973 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
3977 if (modifier_has_dcc(modifier)) {
3978 /* Per radeonsi comments 16/64 bpp are more complicated. */
3979 if (info->cpp[0] != 4)
3987 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
3992 if (*cap - *size < 1) {
3993 uint64_t new_cap = *cap * 2;
3994 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4002 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4008 (*mods)[*size] = mod;
4013 add_gfx9_modifiers(const struct amdgpu_device *adev,
4014 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4016 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4017 int pipe_xor_bits = min(8, pipes +
4018 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4019 int bank_xor_bits = min(8 - pipe_xor_bits,
4020 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4021 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4022 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4025 if (adev->family == AMDGPU_FAMILY_RV) {
4026 /* Raven2 and later */
4027 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4030 * No _D DCC swizzles yet because we only allow 32bpp, which
4031 * doesn't support _D on DCN
4034 if (has_constant_encode) {
4035 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4036 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4037 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4038 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4039 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4040 AMD_FMT_MOD_SET(DCC, 1) |
4041 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4042 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4043 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4046 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4047 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4048 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4049 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4050 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4051 AMD_FMT_MOD_SET(DCC, 1) |
4052 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4053 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4054 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4056 if (has_constant_encode) {
4057 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4058 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4059 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4060 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4061 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4062 AMD_FMT_MOD_SET(DCC, 1) |
4063 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4064 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4065 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4067 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4068 AMD_FMT_MOD_SET(RB, rb) |
4069 AMD_FMT_MOD_SET(PIPE, pipes));
4072 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4073 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4074 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4075 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4076 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4077 AMD_FMT_MOD_SET(DCC, 1) |
4078 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4079 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4080 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4081 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4082 AMD_FMT_MOD_SET(RB, rb) |
4083 AMD_FMT_MOD_SET(PIPE, pipes));
4087 * Only supported for 64bpp on Raven, will be filtered on format in
4088 * dm_plane_format_mod_supported.
4090 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4091 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4092 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4093 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4094 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4096 if (adev->family == AMDGPU_FAMILY_RV) {
4097 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4098 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4099 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4100 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4101 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4105 * Only supported for 64bpp on Raven, will be filtered on format in
4106 * dm_plane_format_mod_supported.
4108 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4109 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4110 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4112 if (adev->family == AMDGPU_FAMILY_RV) {
4113 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4114 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4115 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4120 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4121 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4123 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4125 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4126 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4127 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4128 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4129 AMD_FMT_MOD_SET(DCC, 1) |
4130 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4131 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4132 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4134 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4135 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4136 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4137 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4138 AMD_FMT_MOD_SET(DCC, 1) |
4139 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4140 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4141 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4142 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4144 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4145 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4146 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4147 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4149 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4150 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4151 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4152 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4155 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4156 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4157 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4158 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4160 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4161 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4162 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4166 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4167 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4169 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4170 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4172 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4173 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4174 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4175 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4176 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4177 AMD_FMT_MOD_SET(DCC, 1) |
4178 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4179 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4180 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4181 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4183 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4184 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4185 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4186 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4187 AMD_FMT_MOD_SET(PACKERS, pkrs) |
4188 AMD_FMT_MOD_SET(DCC, 1) |
4189 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4190 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4191 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4192 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4193 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4195 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4196 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4197 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4198 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4199 AMD_FMT_MOD_SET(PACKERS, pkrs));
4201 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4202 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4203 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4204 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4205 AMD_FMT_MOD_SET(PACKERS, pkrs));
4207 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4208 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4209 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4210 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4212 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4213 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4214 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4218 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4220 uint64_t size = 0, capacity = 128;
4223 /* We have not hooked up any pre-GFX9 modifiers. */
4224 if (adev->family < AMDGPU_FAMILY_AI)
4227 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4229 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4230 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4231 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4232 return *mods ? 0 : -ENOMEM;
4235 switch (adev->family) {
4236 case AMDGPU_FAMILY_AI:
4237 case AMDGPU_FAMILY_RV:
4238 add_gfx9_modifiers(adev, mods, &size, &capacity);
4240 case AMDGPU_FAMILY_NV:
4241 case AMDGPU_FAMILY_VGH:
4242 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4243 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4245 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4249 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4251 /* INVALID marks the end of the list. */
4252 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4261 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4262 const struct amdgpu_framebuffer *afb,
4263 const enum surface_pixel_format format,
4264 const enum dc_rotation_angle rotation,
4265 const struct plane_size *plane_size,
4266 union dc_tiling_info *tiling_info,
4267 struct dc_plane_dcc_param *dcc,
4268 struct dc_plane_address *address,
4269 const bool force_disable_dcc)
4271 const uint64_t modifier = afb->base.modifier;
4274 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4275 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4277 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4278 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4281 dcc->meta_pitch = afb->base.pitches[1];
4282 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4284 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4285 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4288 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4296 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4297 const struct amdgpu_framebuffer *afb,
4298 const enum surface_pixel_format format,
4299 const enum dc_rotation_angle rotation,
4300 const uint64_t tiling_flags,
4301 union dc_tiling_info *tiling_info,
4302 struct plane_size *plane_size,
4303 struct dc_plane_dcc_param *dcc,
4304 struct dc_plane_address *address,
4306 bool force_disable_dcc)
4308 const struct drm_framebuffer *fb = &afb->base;
4311 memset(tiling_info, 0, sizeof(*tiling_info));
4312 memset(plane_size, 0, sizeof(*plane_size));
4313 memset(dcc, 0, sizeof(*dcc));
4314 memset(address, 0, sizeof(*address));
4316 address->tmz_surface = tmz_surface;
4318 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4319 uint64_t addr = afb->address + fb->offsets[0];
4321 plane_size->surface_size.x = 0;
4322 plane_size->surface_size.y = 0;
4323 plane_size->surface_size.width = fb->width;
4324 plane_size->surface_size.height = fb->height;
4325 plane_size->surface_pitch =
4326 fb->pitches[0] / fb->format->cpp[0];
4328 address->type = PLN_ADDR_TYPE_GRAPHICS;
4329 address->grph.addr.low_part = lower_32_bits(addr);
4330 address->grph.addr.high_part = upper_32_bits(addr);
4331 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4332 uint64_t luma_addr = afb->address + fb->offsets[0];
4333 uint64_t chroma_addr = afb->address + fb->offsets[1];
4335 plane_size->surface_size.x = 0;
4336 plane_size->surface_size.y = 0;
4337 plane_size->surface_size.width = fb->width;
4338 plane_size->surface_size.height = fb->height;
4339 plane_size->surface_pitch =
4340 fb->pitches[0] / fb->format->cpp[0];
4342 plane_size->chroma_size.x = 0;
4343 plane_size->chroma_size.y = 0;
4344 /* TODO: set these based on surface format */
4345 plane_size->chroma_size.width = fb->width / 2;
4346 plane_size->chroma_size.height = fb->height / 2;
4348 plane_size->chroma_pitch =
4349 fb->pitches[1] / fb->format->cpp[1];
4351 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4352 address->video_progressive.luma_addr.low_part =
4353 lower_32_bits(luma_addr);
4354 address->video_progressive.luma_addr.high_part =
4355 upper_32_bits(luma_addr);
4356 address->video_progressive.chroma_addr.low_part =
4357 lower_32_bits(chroma_addr);
4358 address->video_progressive.chroma_addr.high_part =
4359 upper_32_bits(chroma_addr);
4362 if (adev->family >= AMDGPU_FAMILY_AI) {
4363 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4364 rotation, plane_size,
4371 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4378 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4379 bool *per_pixel_alpha, bool *global_alpha,
4380 int *global_alpha_value)
4382 *per_pixel_alpha = false;
4383 *global_alpha = false;
4384 *global_alpha_value = 0xff;
4386 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4389 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4390 static const uint32_t alpha_formats[] = {
4391 DRM_FORMAT_ARGB8888,
4392 DRM_FORMAT_RGBA8888,
4393 DRM_FORMAT_ABGR8888,
4395 uint32_t format = plane_state->fb->format->format;
4398 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4399 if (format == alpha_formats[i]) {
4400 *per_pixel_alpha = true;
4406 if (plane_state->alpha < 0xffff) {
4407 *global_alpha = true;
4408 *global_alpha_value = plane_state->alpha >> 8;
4413 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4414 const enum surface_pixel_format format,
4415 enum dc_color_space *color_space)
4419 *color_space = COLOR_SPACE_SRGB;
4421 /* DRM color properties only affect non-RGB formats. */
4422 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4425 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4427 switch (plane_state->color_encoding) {
4428 case DRM_COLOR_YCBCR_BT601:
4430 *color_space = COLOR_SPACE_YCBCR601;
4432 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4435 case DRM_COLOR_YCBCR_BT709:
4437 *color_space = COLOR_SPACE_YCBCR709;
4439 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4442 case DRM_COLOR_YCBCR_BT2020:
4444 *color_space = COLOR_SPACE_2020_YCBCR;
4457 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4458 const struct drm_plane_state *plane_state,
4459 const uint64_t tiling_flags,
4460 struct dc_plane_info *plane_info,
4461 struct dc_plane_address *address,
4463 bool force_disable_dcc)
4465 const struct drm_framebuffer *fb = plane_state->fb;
4466 const struct amdgpu_framebuffer *afb =
4467 to_amdgpu_framebuffer(plane_state->fb);
4468 struct drm_format_name_buf format_name;
4471 memset(plane_info, 0, sizeof(*plane_info));
4473 switch (fb->format->format) {
4475 plane_info->format =
4476 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4478 case DRM_FORMAT_RGB565:
4479 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4481 case DRM_FORMAT_XRGB8888:
4482 case DRM_FORMAT_ARGB8888:
4483 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4485 case DRM_FORMAT_XRGB2101010:
4486 case DRM_FORMAT_ARGB2101010:
4487 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4489 case DRM_FORMAT_XBGR2101010:
4490 case DRM_FORMAT_ABGR2101010:
4491 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4493 case DRM_FORMAT_XBGR8888:
4494 case DRM_FORMAT_ABGR8888:
4495 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4497 case DRM_FORMAT_NV21:
4498 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4500 case DRM_FORMAT_NV12:
4501 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4503 case DRM_FORMAT_P010:
4504 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4506 case DRM_FORMAT_XRGB16161616F:
4507 case DRM_FORMAT_ARGB16161616F:
4508 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4510 case DRM_FORMAT_XBGR16161616F:
4511 case DRM_FORMAT_ABGR16161616F:
4512 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4516 "Unsupported screen format %s\n",
4517 drm_get_format_name(fb->format->format, &format_name));
4521 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4522 case DRM_MODE_ROTATE_0:
4523 plane_info->rotation = ROTATION_ANGLE_0;
4525 case DRM_MODE_ROTATE_90:
4526 plane_info->rotation = ROTATION_ANGLE_90;
4528 case DRM_MODE_ROTATE_180:
4529 plane_info->rotation = ROTATION_ANGLE_180;
4531 case DRM_MODE_ROTATE_270:
4532 plane_info->rotation = ROTATION_ANGLE_270;
4535 plane_info->rotation = ROTATION_ANGLE_0;
4539 plane_info->visible = true;
4540 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4542 plane_info->layer_index = 0;
4544 ret = fill_plane_color_attributes(plane_state, plane_info->format,
4545 &plane_info->color_space);
4549 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4550 plane_info->rotation, tiling_flags,
4551 &plane_info->tiling_info,
4552 &plane_info->plane_size,
4553 &plane_info->dcc, address, tmz_surface,
4558 fill_blending_from_plane_state(
4559 plane_state, &plane_info->per_pixel_alpha,
4560 &plane_info->global_alpha, &plane_info->global_alpha_value);
4565 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4566 struct dc_plane_state *dc_plane_state,
4567 struct drm_plane_state *plane_state,
4568 struct drm_crtc_state *crtc_state)
4570 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4571 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4572 struct dc_scaling_info scaling_info;
4573 struct dc_plane_info plane_info;
4575 bool force_disable_dcc = false;
4577 ret = fill_dc_scaling_info(plane_state, &scaling_info);
4581 dc_plane_state->src_rect = scaling_info.src_rect;
4582 dc_plane_state->dst_rect = scaling_info.dst_rect;
4583 dc_plane_state->clip_rect = scaling_info.clip_rect;
4584 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4586 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4587 ret = fill_dc_plane_info_and_addr(adev, plane_state,
4590 &dc_plane_state->address,
4596 dc_plane_state->format = plane_info.format;
4597 dc_plane_state->color_space = plane_info.color_space;
4598 dc_plane_state->format = plane_info.format;
4599 dc_plane_state->plane_size = plane_info.plane_size;
4600 dc_plane_state->rotation = plane_info.rotation;
4601 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4602 dc_plane_state->stereo_format = plane_info.stereo_format;
4603 dc_plane_state->tiling_info = plane_info.tiling_info;
4604 dc_plane_state->visible = plane_info.visible;
4605 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4606 dc_plane_state->global_alpha = plane_info.global_alpha;
4607 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4608 dc_plane_state->dcc = plane_info.dcc;
4609 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4612 * Always set input transfer function, since plane state is refreshed
4615 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4622 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4623 const struct dm_connector_state *dm_state,
4624 struct dc_stream_state *stream)
4626 enum amdgpu_rmx_type rmx_type;
4628 struct rect src = { 0 }; /* viewport in composition space*/
4629 struct rect dst = { 0 }; /* stream addressable area */
4631 /* no mode. nothing to be done */
4635 /* Full screen scaling by default */
4636 src.width = mode->hdisplay;
4637 src.height = mode->vdisplay;
4638 dst.width = stream->timing.h_addressable;
4639 dst.height = stream->timing.v_addressable;
4642 rmx_type = dm_state->scaling;
4643 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4644 if (src.width * dst.height <
4645 src.height * dst.width) {
4646 /* height needs less upscaling/more downscaling */
4647 dst.width = src.width *
4648 dst.height / src.height;
4650 /* width needs less upscaling/more downscaling */
4651 dst.height = src.height *
4652 dst.width / src.width;
4654 } else if (rmx_type == RMX_CENTER) {
4658 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4659 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4661 if (dm_state->underscan_enable) {
4662 dst.x += dm_state->underscan_hborder / 2;
4663 dst.y += dm_state->underscan_vborder / 2;
4664 dst.width -= dm_state->underscan_hborder;
4665 dst.height -= dm_state->underscan_vborder;
4672 DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
4673 dst.x, dst.y, dst.width, dst.height);
4677 static enum dc_color_depth
4678 convert_color_depth_from_display_info(const struct drm_connector *connector,
4679 bool is_y420, int requested_bpc)
4686 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4687 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4689 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4691 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4694 bpc = (uint8_t)connector->display_info.bpc;
4695 /* Assume 8 bpc by default if no bpc is specified. */
4696 bpc = bpc ? bpc : 8;
4699 if (requested_bpc > 0) {
4701 * Cap display bpc based on the user requested value.
4703 * The value for state->max_bpc may not correctly updated
4704 * depending on when the connector gets added to the state
4705 * or if this was called outside of atomic check, so it
4706 * can't be used directly.
4708 bpc = min_t(u8, bpc, requested_bpc);
4710 /* Round down to the nearest even number. */
4711 bpc = bpc - (bpc & 1);
4717 * Temporary Work around, DRM doesn't parse color depth for
4718 * EDID revision before 1.4
4719 * TODO: Fix edid parsing
4721 return COLOR_DEPTH_888;
4723 return COLOR_DEPTH_666;
4725 return COLOR_DEPTH_888;
4727 return COLOR_DEPTH_101010;
4729 return COLOR_DEPTH_121212;
4731 return COLOR_DEPTH_141414;
4733 return COLOR_DEPTH_161616;
4735 return COLOR_DEPTH_UNDEFINED;
4739 static enum dc_aspect_ratio
4740 get_aspect_ratio(const struct drm_display_mode *mode_in)
4742 /* 1-1 mapping, since both enums follow the HDMI spec. */
4743 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4746 static enum dc_color_space
4747 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4749 enum dc_color_space color_space = COLOR_SPACE_SRGB;
4751 switch (dc_crtc_timing->pixel_encoding) {
4752 case PIXEL_ENCODING_YCBCR422:
4753 case PIXEL_ENCODING_YCBCR444:
4754 case PIXEL_ENCODING_YCBCR420:
4757 * 27030khz is the separation point between HDTV and SDTV
4758 * according to HDMI spec, we use YCbCr709 and YCbCr601
4761 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4762 if (dc_crtc_timing->flags.Y_ONLY)
4764 COLOR_SPACE_YCBCR709_LIMITED;
4766 color_space = COLOR_SPACE_YCBCR709;
4768 if (dc_crtc_timing->flags.Y_ONLY)
4770 COLOR_SPACE_YCBCR601_LIMITED;
4772 color_space = COLOR_SPACE_YCBCR601;
4777 case PIXEL_ENCODING_RGB:
4778 color_space = COLOR_SPACE_SRGB;
4789 static bool adjust_colour_depth_from_display_info(
4790 struct dc_crtc_timing *timing_out,
4791 const struct drm_display_info *info)
4793 enum dc_color_depth depth = timing_out->display_color_depth;
4796 normalized_clk = timing_out->pix_clk_100hz / 10;
4797 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4798 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4799 normalized_clk /= 2;
4800 /* Adjusting pix clock following on HDMI spec based on colour depth */
4802 case COLOR_DEPTH_888:
4804 case COLOR_DEPTH_101010:
4805 normalized_clk = (normalized_clk * 30) / 24;
4807 case COLOR_DEPTH_121212:
4808 normalized_clk = (normalized_clk * 36) / 24;
4810 case COLOR_DEPTH_161616:
4811 normalized_clk = (normalized_clk * 48) / 24;
4814 /* The above depths are the only ones valid for HDMI. */
4817 if (normalized_clk <= info->max_tmds_clock) {
4818 timing_out->display_color_depth = depth;
4821 } while (--depth > COLOR_DEPTH_666);
4825 static void fill_stream_properties_from_drm_display_mode(
4826 struct dc_stream_state *stream,
4827 const struct drm_display_mode *mode_in,
4828 const struct drm_connector *connector,
4829 const struct drm_connector_state *connector_state,
4830 const struct dc_stream_state *old_stream,
4833 struct dc_crtc_timing *timing_out = &stream->timing;
4834 const struct drm_display_info *info = &connector->display_info;
4835 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4836 struct hdmi_vendor_infoframe hv_frame;
4837 struct hdmi_avi_infoframe avi_frame;
4839 memset(&hv_frame, 0, sizeof(hv_frame));
4840 memset(&avi_frame, 0, sizeof(avi_frame));
4842 timing_out->h_border_left = 0;
4843 timing_out->h_border_right = 0;
4844 timing_out->v_border_top = 0;
4845 timing_out->v_border_bottom = 0;
4846 /* TODO: un-hardcode */
4847 if (drm_mode_is_420_only(info, mode_in)
4848 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4849 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4850 else if (drm_mode_is_420_also(info, mode_in)
4851 && aconnector->force_yuv420_output)
4852 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4853 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4854 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4855 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4857 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4859 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4860 timing_out->display_color_depth = convert_color_depth_from_display_info(
4862 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4864 timing_out->scan_type = SCANNING_TYPE_NODATA;
4865 timing_out->hdmi_vic = 0;
4868 timing_out->vic = old_stream->timing.vic;
4869 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4870 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4872 timing_out->vic = drm_match_cea_mode(mode_in);
4873 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4874 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4875 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4876 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4879 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4880 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4881 timing_out->vic = avi_frame.video_code;
4882 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4883 timing_out->hdmi_vic = hv_frame.vic;
4886 timing_out->h_addressable = mode_in->crtc_hdisplay;
4887 timing_out->h_total = mode_in->crtc_htotal;
4888 timing_out->h_sync_width =
4889 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4890 timing_out->h_front_porch =
4891 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4892 timing_out->v_total = mode_in->crtc_vtotal;
4893 timing_out->v_addressable = mode_in->crtc_vdisplay;
4894 timing_out->v_front_porch =
4895 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4896 timing_out->v_sync_width =
4897 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4898 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4899 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4901 stream->output_color_space = get_output_color_space(timing_out);
4903 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4904 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4905 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4906 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4907 drm_mode_is_420_also(info, mode_in) &&
4908 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4909 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4910 adjust_colour_depth_from_display_info(timing_out, info);
4915 static void fill_audio_info(struct audio_info *audio_info,
4916 const struct drm_connector *drm_connector,
4917 const struct dc_sink *dc_sink)
4920 int cea_revision = 0;
4921 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
4923 audio_info->manufacture_id = edid_caps->manufacturer_id;
4924 audio_info->product_id = edid_caps->product_id;
4926 cea_revision = drm_connector->display_info.cea_rev;
4928 strscpy(audio_info->display_name,
4929 edid_caps->display_name,
4930 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
4932 if (cea_revision >= 3) {
4933 audio_info->mode_count = edid_caps->audio_mode_count;
4935 for (i = 0; i < audio_info->mode_count; ++i) {
4936 audio_info->modes[i].format_code =
4937 (enum audio_format_code)
4938 (edid_caps->audio_modes[i].format_code);
4939 audio_info->modes[i].channel_count =
4940 edid_caps->audio_modes[i].channel_count;
4941 audio_info->modes[i].sample_rates.all =
4942 edid_caps->audio_modes[i].sample_rate;
4943 audio_info->modes[i].sample_size =
4944 edid_caps->audio_modes[i].sample_size;
4948 audio_info->flags.all = edid_caps->speaker_flags;
4950 /* TODO: We only check for the progressive mode, check for interlace mode too */
4951 if (drm_connector->latency_present[0]) {
4952 audio_info->video_latency = drm_connector->video_latency[0];
4953 audio_info->audio_latency = drm_connector->audio_latency[0];
4956 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
4961 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
4962 struct drm_display_mode *dst_mode)
4964 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
4965 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
4966 dst_mode->crtc_clock = src_mode->crtc_clock;
4967 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
4968 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
4969 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
4970 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
4971 dst_mode->crtc_htotal = src_mode->crtc_htotal;
4972 dst_mode->crtc_hskew = src_mode->crtc_hskew;
4973 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
4974 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
4975 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
4976 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
4977 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
4981 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
4982 const struct drm_display_mode *native_mode,
4985 if (scale_enabled) {
4986 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4987 } else if (native_mode->clock == drm_mode->clock &&
4988 native_mode->htotal == drm_mode->htotal &&
4989 native_mode->vtotal == drm_mode->vtotal) {
4990 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
4992 /* no scaling nor amdgpu inserted, no need to patch */
4996 static struct dc_sink *
4997 create_fake_sink(struct amdgpu_dm_connector *aconnector)
4999 struct dc_sink_init_data sink_init_data = { 0 };
5000 struct dc_sink *sink = NULL;
5001 sink_init_data.link = aconnector->dc_link;
5002 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5004 sink = dc_sink_create(&sink_init_data);
5006 DRM_ERROR("Failed to create sink!\n");
5009 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5014 static void set_multisync_trigger_params(
5015 struct dc_stream_state *stream)
5017 if (stream->triggered_crtc_reset.enabled) {
5018 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5019 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5023 static void set_master_stream(struct dc_stream_state *stream_set[],
5026 int j, highest_rfr = 0, master_stream = 0;
5028 for (j = 0; j < stream_count; j++) {
5029 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5030 int refresh_rate = 0;
5032 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5033 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5034 if (refresh_rate > highest_rfr) {
5035 highest_rfr = refresh_rate;
5040 for (j = 0; j < stream_count; j++) {
5042 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5046 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5050 if (context->stream_count < 2)
5052 for (i = 0; i < context->stream_count ; i++) {
5053 if (!context->streams[i])
5056 * TODO: add a function to read AMD VSDB bits and set
5057 * crtc_sync_master.multi_sync_enabled flag
5058 * For now it's set to false
5060 set_multisync_trigger_params(context->streams[i]);
5062 set_master_stream(context->streams, context->stream_count);
5065 static struct dc_stream_state *
5066 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5067 const struct drm_display_mode *drm_mode,
5068 const struct dm_connector_state *dm_state,
5069 const struct dc_stream_state *old_stream,
5072 struct drm_display_mode *preferred_mode = NULL;
5073 struct drm_connector *drm_connector;
5074 const struct drm_connector_state *con_state =
5075 dm_state ? &dm_state->base : NULL;
5076 struct dc_stream_state *stream = NULL;
5077 struct drm_display_mode mode = *drm_mode;
5078 bool native_mode_found = false;
5079 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5081 int preferred_refresh = 0;
5082 #if defined(CONFIG_DRM_AMD_DC_DCN)
5083 struct dsc_dec_dpcd_caps dsc_caps;
5084 uint32_t link_bandwidth_kbps;
5086 struct dc_sink *sink = NULL;
5087 if (aconnector == NULL) {
5088 DRM_ERROR("aconnector is NULL!\n");
5092 drm_connector = &aconnector->base;
5094 if (!aconnector->dc_sink) {
5095 sink = create_fake_sink(aconnector);
5099 sink = aconnector->dc_sink;
5100 dc_sink_retain(sink);
5103 stream = dc_create_stream_for_sink(sink);
5105 if (stream == NULL) {
5106 DRM_ERROR("Failed to create stream for sink!\n");
5110 stream->dm_stream_context = aconnector;
5112 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5113 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5115 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5116 /* Search for preferred mode */
5117 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5118 native_mode_found = true;
5122 if (!native_mode_found)
5123 preferred_mode = list_first_entry_or_null(
5124 &aconnector->base.modes,
5125 struct drm_display_mode,
5128 mode_refresh = drm_mode_vrefresh(&mode);
5130 if (preferred_mode == NULL) {
5132 * This may not be an error, the use case is when we have no
5133 * usermode calls to reset and set mode upon hotplug. In this
5134 * case, we call set mode ourselves to restore the previous mode
5135 * and the modelist may not be filled in in time.
5137 DRM_DEBUG_DRIVER("No preferred mode found\n");
5139 decide_crtc_timing_for_drm_display_mode(
5140 &mode, preferred_mode,
5141 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5142 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5146 drm_mode_set_crtcinfo(&mode, 0);
5149 * If scaling is enabled and refresh rate didn't change
5150 * we copy the vic and polarities of the old timings
5152 if (!scale || mode_refresh != preferred_refresh)
5153 fill_stream_properties_from_drm_display_mode(stream,
5154 &mode, &aconnector->base, con_state, NULL, requested_bpc);
5156 fill_stream_properties_from_drm_display_mode(stream,
5157 &mode, &aconnector->base, con_state, old_stream, requested_bpc);
5159 stream->timing.flags.DSC = 0;
5161 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5162 #if defined(CONFIG_DRM_AMD_DC_DCN)
5163 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5164 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5165 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5167 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5168 dc_link_get_link_cap(aconnector->dc_link));
5170 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5171 /* Set DSC policy according to dsc_clock_en */
5172 dc_dsc_policy_set_enable_dsc_when_not_needed(
5173 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5175 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5177 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5179 link_bandwidth_kbps,
5181 &stream->timing.dsc_cfg))
5182 stream->timing.flags.DSC = 1;
5183 /* Overwrite the stream flag if DSC is enabled through debugfs */
5184 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5185 stream->timing.flags.DSC = 1;
5187 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5188 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5190 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5191 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5193 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5194 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5199 update_stream_scaling_settings(&mode, dm_state, stream);
5202 &stream->audio_info,
5206 update_stream_signal(stream, sink);
5208 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5209 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5211 if (stream->link->psr_settings.psr_feature_enabled) {
5213 // should decide stream support vsc sdp colorimetry capability
5214 // before building vsc info packet
5216 stream->use_vsc_sdp_for_colorimetry = false;
5217 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5218 stream->use_vsc_sdp_for_colorimetry =
5219 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5221 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5222 stream->use_vsc_sdp_for_colorimetry = true;
5224 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5227 dc_sink_release(sink);
5232 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5234 drm_crtc_cleanup(crtc);
5238 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5239 struct drm_crtc_state *state)
5241 struct dm_crtc_state *cur = to_dm_crtc_state(state);
5243 /* TODO Destroy dc_stream objects are stream object is flattened */
5245 dc_stream_release(cur->stream);
5248 __drm_atomic_helper_crtc_destroy_state(state);
5254 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5256 struct dm_crtc_state *state;
5259 dm_crtc_destroy_state(crtc, crtc->state);
5261 state = kzalloc(sizeof(*state), GFP_KERNEL);
5262 if (WARN_ON(!state))
5265 __drm_atomic_helper_crtc_reset(crtc, &state->base);
5268 static struct drm_crtc_state *
5269 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5271 struct dm_crtc_state *state, *cur;
5273 cur = to_dm_crtc_state(crtc->state);
5275 if (WARN_ON(!crtc->state))
5278 state = kzalloc(sizeof(*state), GFP_KERNEL);
5282 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5285 state->stream = cur->stream;
5286 dc_stream_retain(state->stream);
5289 state->active_planes = cur->active_planes;
5290 state->vrr_infopacket = cur->vrr_infopacket;
5291 state->abm_level = cur->abm_level;
5292 state->vrr_supported = cur->vrr_supported;
5293 state->freesync_config = cur->freesync_config;
5294 state->crc_src = cur->crc_src;
5295 state->cm_has_degamma = cur->cm_has_degamma;
5296 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5298 /* TODO Duplicate dc_stream after objects are stream object is flattened */
5300 return &state->base;
5303 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5305 enum dc_irq_source irq_source;
5306 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5307 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5310 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5312 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5314 DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5315 acrtc->crtc_id, enable ? "en" : "dis", rc);
5319 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5321 enum dc_irq_source irq_source;
5322 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5323 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5324 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5328 /* vblank irq on -> Only need vupdate irq in vrr mode */
5329 if (amdgpu_dm_vrr_active(acrtc_state))
5330 rc = dm_set_vupdate_irq(crtc, true);
5332 /* vblank irq off -> vupdate irq off */
5333 rc = dm_set_vupdate_irq(crtc, false);
5339 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5340 return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5343 static int dm_enable_vblank(struct drm_crtc *crtc)
5345 return dm_set_vblank(crtc, true);
5348 static void dm_disable_vblank(struct drm_crtc *crtc)
5350 dm_set_vblank(crtc, false);
5353 /* Implemented only the options currently availible for the driver */
5354 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5355 .reset = dm_crtc_reset_state,
5356 .destroy = amdgpu_dm_crtc_destroy,
5357 .gamma_set = drm_atomic_helper_legacy_gamma_set,
5358 .set_config = drm_atomic_helper_set_config,
5359 .page_flip = drm_atomic_helper_page_flip,
5360 .atomic_duplicate_state = dm_crtc_duplicate_state,
5361 .atomic_destroy_state = dm_crtc_destroy_state,
5362 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5363 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5364 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5365 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5366 .enable_vblank = dm_enable_vblank,
5367 .disable_vblank = dm_disable_vblank,
5368 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5371 static enum drm_connector_status
5372 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5375 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5379 * 1. This interface is NOT called in context of HPD irq.
5380 * 2. This interface *is called* in context of user-mode ioctl. Which
5381 * makes it a bad place for *any* MST-related activity.
5384 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5385 !aconnector->fake_enable)
5386 connected = (aconnector->dc_sink != NULL);
5388 connected = (aconnector->base.force == DRM_FORCE_ON);
5390 update_subconnector_property(aconnector);
5392 return (connected ? connector_status_connected :
5393 connector_status_disconnected);
5396 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5397 struct drm_connector_state *connector_state,
5398 struct drm_property *property,
5401 struct drm_device *dev = connector->dev;
5402 struct amdgpu_device *adev = drm_to_adev(dev);
5403 struct dm_connector_state *dm_old_state =
5404 to_dm_connector_state(connector->state);
5405 struct dm_connector_state *dm_new_state =
5406 to_dm_connector_state(connector_state);
5410 if (property == dev->mode_config.scaling_mode_property) {
5411 enum amdgpu_rmx_type rmx_type;
5414 case DRM_MODE_SCALE_CENTER:
5415 rmx_type = RMX_CENTER;
5417 case DRM_MODE_SCALE_ASPECT:
5418 rmx_type = RMX_ASPECT;
5420 case DRM_MODE_SCALE_FULLSCREEN:
5421 rmx_type = RMX_FULL;
5423 case DRM_MODE_SCALE_NONE:
5429 if (dm_old_state->scaling == rmx_type)
5432 dm_new_state->scaling = rmx_type;
5434 } else if (property == adev->mode_info.underscan_hborder_property) {
5435 dm_new_state->underscan_hborder = val;
5437 } else if (property == adev->mode_info.underscan_vborder_property) {
5438 dm_new_state->underscan_vborder = val;
5440 } else if (property == adev->mode_info.underscan_property) {
5441 dm_new_state->underscan_enable = val;
5443 } else if (property == adev->mode_info.abm_level_property) {
5444 dm_new_state->abm_level = val;
5451 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5452 const struct drm_connector_state *state,
5453 struct drm_property *property,
5456 struct drm_device *dev = connector->dev;
5457 struct amdgpu_device *adev = drm_to_adev(dev);
5458 struct dm_connector_state *dm_state =
5459 to_dm_connector_state(state);
5462 if (property == dev->mode_config.scaling_mode_property) {
5463 switch (dm_state->scaling) {
5465 *val = DRM_MODE_SCALE_CENTER;
5468 *val = DRM_MODE_SCALE_ASPECT;
5471 *val = DRM_MODE_SCALE_FULLSCREEN;
5475 *val = DRM_MODE_SCALE_NONE;
5479 } else if (property == adev->mode_info.underscan_hborder_property) {
5480 *val = dm_state->underscan_hborder;
5482 } else if (property == adev->mode_info.underscan_vborder_property) {
5483 *val = dm_state->underscan_vborder;
5485 } else if (property == adev->mode_info.underscan_property) {
5486 *val = dm_state->underscan_enable;
5488 } else if (property == adev->mode_info.abm_level_property) {
5489 *val = dm_state->abm_level;
5496 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5498 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5500 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5503 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5505 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5506 const struct dc_link *link = aconnector->dc_link;
5507 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5508 struct amdgpu_display_manager *dm = &adev->dm;
5511 * Call only if mst_mgr was iniitalized before since it's not done
5512 * for all connector types.
5514 if (aconnector->mst_mgr.dev)
5515 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5517 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5518 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5520 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5521 link->type != dc_connection_none &&
5522 dm->backlight_dev) {
5523 backlight_device_unregister(dm->backlight_dev);
5524 dm->backlight_dev = NULL;
5528 if (aconnector->dc_em_sink)
5529 dc_sink_release(aconnector->dc_em_sink);
5530 aconnector->dc_em_sink = NULL;
5531 if (aconnector->dc_sink)
5532 dc_sink_release(aconnector->dc_sink);
5533 aconnector->dc_sink = NULL;
5535 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5536 drm_connector_unregister(connector);
5537 drm_connector_cleanup(connector);
5538 if (aconnector->i2c) {
5539 i2c_del_adapter(&aconnector->i2c->base);
5540 kfree(aconnector->i2c);
5542 kfree(aconnector->dm_dp_aux.aux.name);
5547 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5549 struct dm_connector_state *state =
5550 to_dm_connector_state(connector->state);
5552 if (connector->state)
5553 __drm_atomic_helper_connector_destroy_state(connector->state);
5557 state = kzalloc(sizeof(*state), GFP_KERNEL);
5560 state->scaling = RMX_OFF;
5561 state->underscan_enable = false;
5562 state->underscan_hborder = 0;
5563 state->underscan_vborder = 0;
5564 state->base.max_requested_bpc = 8;
5565 state->vcpi_slots = 0;
5567 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5568 state->abm_level = amdgpu_dm_abm_level;
5570 __drm_atomic_helper_connector_reset(connector, &state->base);
5574 struct drm_connector_state *
5575 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5577 struct dm_connector_state *state =
5578 to_dm_connector_state(connector->state);
5580 struct dm_connector_state *new_state =
5581 kmemdup(state, sizeof(*state), GFP_KERNEL);
5586 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5588 new_state->freesync_capable = state->freesync_capable;
5589 new_state->abm_level = state->abm_level;
5590 new_state->scaling = state->scaling;
5591 new_state->underscan_enable = state->underscan_enable;
5592 new_state->underscan_hborder = state->underscan_hborder;
5593 new_state->underscan_vborder = state->underscan_vborder;
5594 new_state->vcpi_slots = state->vcpi_slots;
5595 new_state->pbn = state->pbn;
5596 return &new_state->base;
5600 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5602 struct amdgpu_dm_connector *amdgpu_dm_connector =
5603 to_amdgpu_dm_connector(connector);
5606 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5607 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5608 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5609 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5614 #if defined(CONFIG_DEBUG_FS)
5615 connector_debugfs_init(amdgpu_dm_connector);
5621 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5622 .reset = amdgpu_dm_connector_funcs_reset,
5623 .detect = amdgpu_dm_connector_detect,
5624 .fill_modes = drm_helper_probe_single_connector_modes,
5625 .destroy = amdgpu_dm_connector_destroy,
5626 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5627 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5628 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5629 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5630 .late_register = amdgpu_dm_connector_late_register,
5631 .early_unregister = amdgpu_dm_connector_unregister
5634 static int get_modes(struct drm_connector *connector)
5636 return amdgpu_dm_connector_get_modes(connector);
5639 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5641 struct dc_sink_init_data init_params = {
5642 .link = aconnector->dc_link,
5643 .sink_signal = SIGNAL_TYPE_VIRTUAL
5647 if (!aconnector->base.edid_blob_ptr) {
5648 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5649 aconnector->base.name);
5651 aconnector->base.force = DRM_FORCE_OFF;
5652 aconnector->base.override_edid = false;
5656 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5658 aconnector->edid = edid;
5660 aconnector->dc_em_sink = dc_link_add_remote_sink(
5661 aconnector->dc_link,
5663 (edid->extensions + 1) * EDID_LENGTH,
5666 if (aconnector->base.force == DRM_FORCE_ON) {
5667 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5668 aconnector->dc_link->local_sink :
5669 aconnector->dc_em_sink;
5670 dc_sink_retain(aconnector->dc_sink);
5674 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5676 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5679 * In case of headless boot with force on for DP managed connector
5680 * Those settings have to be != 0 to get initial modeset
5682 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5683 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5684 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5688 aconnector->base.override_edid = true;
5689 create_eml_sink(aconnector);
5692 static struct dc_stream_state *
5693 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5694 const struct drm_display_mode *drm_mode,
5695 const struct dm_connector_state *dm_state,
5696 const struct dc_stream_state *old_stream)
5698 struct drm_connector *connector = &aconnector->base;
5699 struct amdgpu_device *adev = drm_to_adev(connector->dev);
5700 struct dc_stream_state *stream;
5701 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5702 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5703 enum dc_status dc_result = DC_OK;
5706 stream = create_stream_for_sink(aconnector, drm_mode,
5707 dm_state, old_stream,
5709 if (stream == NULL) {
5710 DRM_ERROR("Failed to create stream for sink!\n");
5714 dc_result = dc_validate_stream(adev->dm.dc, stream);
5716 if (dc_result != DC_OK) {
5717 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5722 dc_status_to_str(dc_result));
5724 dc_stream_release(stream);
5726 requested_bpc -= 2; /* lower bpc to retry validation */
5729 } while (stream == NULL && requested_bpc >= 6);
5734 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5735 struct drm_display_mode *mode)
5737 int result = MODE_ERROR;
5738 struct dc_sink *dc_sink;
5739 /* TODO: Unhardcode stream count */
5740 struct dc_stream_state *stream;
5741 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5743 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5744 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5748 * Only run this the first time mode_valid is called to initilialize
5751 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5752 !aconnector->dc_em_sink)
5753 handle_edid_mgmt(aconnector);
5755 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5757 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
5758 aconnector->base.force != DRM_FORCE_ON) {
5759 DRM_ERROR("dc_sink is NULL!\n");
5763 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5765 dc_stream_release(stream);
5770 /* TODO: error handling*/
5774 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5775 struct dc_info_packet *out)
5777 struct hdmi_drm_infoframe frame;
5778 unsigned char buf[30]; /* 26 + 4 */
5782 memset(out, 0, sizeof(*out));
5784 if (!state->hdr_output_metadata)
5787 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5791 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5795 /* Static metadata is a fixed 26 bytes + 4 byte header. */
5799 /* Prepare the infopacket for DC. */
5800 switch (state->connector->connector_type) {
5801 case DRM_MODE_CONNECTOR_HDMIA:
5802 out->hb0 = 0x87; /* type */
5803 out->hb1 = 0x01; /* version */
5804 out->hb2 = 0x1A; /* length */
5805 out->sb[0] = buf[3]; /* checksum */
5809 case DRM_MODE_CONNECTOR_DisplayPort:
5810 case DRM_MODE_CONNECTOR_eDP:
5811 out->hb0 = 0x00; /* sdp id, zero */
5812 out->hb1 = 0x87; /* type */
5813 out->hb2 = 0x1D; /* payload len - 1 */
5814 out->hb3 = (0x13 << 2); /* sdp version */
5815 out->sb[0] = 0x01; /* version */
5816 out->sb[1] = 0x1A; /* length */
5824 memcpy(&out->sb[i], &buf[4], 26);
5827 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5828 sizeof(out->sb), false);
5834 is_hdr_metadata_different(const struct drm_connector_state *old_state,
5835 const struct drm_connector_state *new_state)
5837 struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
5838 struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
5840 if (old_blob != new_blob) {
5841 if (old_blob && new_blob &&
5842 old_blob->length == new_blob->length)
5843 return memcmp(old_blob->data, new_blob->data,
5853 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
5854 struct drm_atomic_state *state)
5856 struct drm_connector_state *new_con_state =
5857 drm_atomic_get_new_connector_state(state, conn);
5858 struct drm_connector_state *old_con_state =
5859 drm_atomic_get_old_connector_state(state, conn);
5860 struct drm_crtc *crtc = new_con_state->crtc;
5861 struct drm_crtc_state *new_crtc_state;
5864 trace_amdgpu_dm_connector_atomic_check(new_con_state);
5869 if (is_hdr_metadata_different(old_con_state, new_con_state)) {
5870 struct dc_info_packet hdr_infopacket;
5872 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
5876 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
5877 if (IS_ERR(new_crtc_state))
5878 return PTR_ERR(new_crtc_state);
5881 * DC considers the stream backends changed if the
5882 * static metadata changes. Forcing the modeset also
5883 * gives a simple way for userspace to switch from
5884 * 8bpc to 10bpc when setting the metadata to enter
5887 * Changing the static metadata after it's been
5888 * set is permissible, however. So only force a
5889 * modeset if we're entering or exiting HDR.
5891 new_crtc_state->mode_changed =
5892 !old_con_state->hdr_output_metadata ||
5893 !new_con_state->hdr_output_metadata;
5899 static const struct drm_connector_helper_funcs
5900 amdgpu_dm_connector_helper_funcs = {
5902 * If hotplugging a second bigger display in FB Con mode, bigger resolution
5903 * modes will be filtered by drm_mode_validate_size(), and those modes
5904 * are missing after user start lightdm. So we need to renew modes list.
5905 * in get_modes call back, not just return the modes count
5907 .get_modes = get_modes,
5908 .mode_valid = amdgpu_dm_connector_mode_valid,
5909 .atomic_check = amdgpu_dm_connector_atomic_check,
5912 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
5916 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
5918 struct drm_atomic_state *state = new_crtc_state->state;
5919 struct drm_plane *plane;
5922 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
5923 struct drm_plane_state *new_plane_state;
5925 /* Cursor planes are "fake". */
5926 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5929 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
5931 if (!new_plane_state) {
5933 * The plane is enable on the CRTC and hasn't changed
5934 * state. This means that it previously passed
5935 * validation and is therefore enabled.
5941 /* We need a framebuffer to be considered enabled. */
5942 num_active += (new_plane_state->fb != NULL);
5948 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
5949 struct drm_crtc_state *new_crtc_state)
5951 struct dm_crtc_state *dm_new_crtc_state =
5952 to_dm_crtc_state(new_crtc_state);
5954 dm_new_crtc_state->active_planes = 0;
5956 if (!dm_new_crtc_state->stream)
5959 dm_new_crtc_state->active_planes =
5960 count_crtc_active_planes(new_crtc_state);
5963 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
5964 struct drm_atomic_state *state)
5966 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
5968 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5969 struct dc *dc = adev->dm.dc;
5970 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5973 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
5975 dm_update_crtc_active_planes(crtc, crtc_state);
5977 if (unlikely(!dm_crtc_state->stream &&
5978 modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
5984 * We require the primary plane to be enabled whenever the CRTC is, otherwise
5985 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
5986 * planes are disabled, which is not supported by the hardware. And there is legacy
5987 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
5989 if (crtc_state->enable &&
5990 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
5991 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
5995 /* In some use cases, like reset, no stream is attached */
5996 if (!dm_crtc_state->stream)
5999 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6002 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6006 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6007 const struct drm_display_mode *mode,
6008 struct drm_display_mode *adjusted_mode)
6013 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6014 .disable = dm_crtc_helper_disable,
6015 .atomic_check = dm_crtc_helper_atomic_check,
6016 .mode_fixup = dm_crtc_helper_mode_fixup,
6017 .get_scanout_position = amdgpu_crtc_get_scanout_position,
6020 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6025 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6027 switch (display_color_depth) {
6028 case COLOR_DEPTH_666:
6030 case COLOR_DEPTH_888:
6032 case COLOR_DEPTH_101010:
6034 case COLOR_DEPTH_121212:
6036 case COLOR_DEPTH_141414:
6038 case COLOR_DEPTH_161616:
6046 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6047 struct drm_crtc_state *crtc_state,
6048 struct drm_connector_state *conn_state)
6050 struct drm_atomic_state *state = crtc_state->state;
6051 struct drm_connector *connector = conn_state->connector;
6052 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6053 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6054 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6055 struct drm_dp_mst_topology_mgr *mst_mgr;
6056 struct drm_dp_mst_port *mst_port;
6057 enum dc_color_depth color_depth;
6059 bool is_y420 = false;
6061 if (!aconnector->port || !aconnector->dc_sink)
6064 mst_port = aconnector->port;
6065 mst_mgr = &aconnector->mst_port->mst_mgr;
6067 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6070 if (!state->duplicated) {
6071 int max_bpc = conn_state->max_requested_bpc;
6072 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6073 aconnector->force_yuv420_output;
6074 color_depth = convert_color_depth_from_display_info(connector,
6077 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6078 clock = adjusted_mode->clock;
6079 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6081 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6084 dm_new_connector_state->pbn,
6085 dm_mst_get_pbn_divider(aconnector->dc_link));
6086 if (dm_new_connector_state->vcpi_slots < 0) {
6087 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6088 return dm_new_connector_state->vcpi_slots;
6093 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6094 .disable = dm_encoder_helper_disable,
6095 .atomic_check = dm_encoder_helper_atomic_check
6098 #if defined(CONFIG_DRM_AMD_DC_DCN)
6099 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6100 struct dc_state *dc_state)
6102 struct dc_stream_state *stream = NULL;
6103 struct drm_connector *connector;
6104 struct drm_connector_state *new_con_state, *old_con_state;
6105 struct amdgpu_dm_connector *aconnector;
6106 struct dm_connector_state *dm_conn_state;
6107 int i, j, clock, bpp;
6108 int vcpi, pbn_div, pbn = 0;
6110 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6112 aconnector = to_amdgpu_dm_connector(connector);
6114 if (!aconnector->port)
6117 if (!new_con_state || !new_con_state->crtc)
6120 dm_conn_state = to_dm_connector_state(new_con_state);
6122 for (j = 0; j < dc_state->stream_count; j++) {
6123 stream = dc_state->streams[j];
6127 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6136 if (stream->timing.flags.DSC != 1) {
6137 drm_dp_mst_atomic_enable_dsc(state,
6145 pbn_div = dm_mst_get_pbn_divider(stream->link);
6146 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6147 clock = stream->timing.pix_clk_100hz / 10;
6148 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6149 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6156 dm_conn_state->pbn = pbn;
6157 dm_conn_state->vcpi_slots = vcpi;
6163 static void dm_drm_plane_reset(struct drm_plane *plane)
6165 struct dm_plane_state *amdgpu_state = NULL;
6168 plane->funcs->atomic_destroy_state(plane, plane->state);
6170 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6171 WARN_ON(amdgpu_state == NULL);
6174 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6177 static struct drm_plane_state *
6178 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6180 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6182 old_dm_plane_state = to_dm_plane_state(plane->state);
6183 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6184 if (!dm_plane_state)
6187 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6189 if (old_dm_plane_state->dc_state) {
6190 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6191 dc_plane_state_retain(dm_plane_state->dc_state);
6194 return &dm_plane_state->base;
6197 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6198 struct drm_plane_state *state)
6200 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6202 if (dm_plane_state->dc_state)
6203 dc_plane_state_release(dm_plane_state->dc_state);
6205 drm_atomic_helper_plane_destroy_state(plane, state);
6208 static const struct drm_plane_funcs dm_plane_funcs = {
6209 .update_plane = drm_atomic_helper_update_plane,
6210 .disable_plane = drm_atomic_helper_disable_plane,
6211 .destroy = drm_primary_helper_destroy,
6212 .reset = dm_drm_plane_reset,
6213 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6214 .atomic_destroy_state = dm_drm_plane_destroy_state,
6215 .format_mod_supported = dm_plane_format_mod_supported,
6218 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6219 struct drm_plane_state *new_state)
6221 struct amdgpu_framebuffer *afb;
6222 struct drm_gem_object *obj;
6223 struct amdgpu_device *adev;
6224 struct amdgpu_bo *rbo;
6225 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6226 struct list_head list;
6227 struct ttm_validate_buffer tv;
6228 struct ww_acquire_ctx ticket;
6232 if (!new_state->fb) {
6233 DRM_DEBUG_DRIVER("No FB bound\n");
6237 afb = to_amdgpu_framebuffer(new_state->fb);
6238 obj = new_state->fb->obj[0];
6239 rbo = gem_to_amdgpu_bo(obj);
6240 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6241 INIT_LIST_HEAD(&list);
6245 list_add(&tv.head, &list);
6247 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6249 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6253 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6254 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6256 domain = AMDGPU_GEM_DOMAIN_VRAM;
6258 r = amdgpu_bo_pin(rbo, domain);
6259 if (unlikely(r != 0)) {
6260 if (r != -ERESTARTSYS)
6261 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6262 ttm_eu_backoff_reservation(&ticket, &list);
6266 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6267 if (unlikely(r != 0)) {
6268 amdgpu_bo_unpin(rbo);
6269 ttm_eu_backoff_reservation(&ticket, &list);
6270 DRM_ERROR("%p bind failed\n", rbo);
6274 ttm_eu_backoff_reservation(&ticket, &list);
6276 afb->address = amdgpu_bo_gpu_offset(rbo);
6281 * We don't do surface updates on planes that have been newly created,
6282 * but we also don't have the afb->address during atomic check.
6284 * Fill in buffer attributes depending on the address here, but only on
6285 * newly created planes since they're not being used by DC yet and this
6286 * won't modify global state.
6288 dm_plane_state_old = to_dm_plane_state(plane->state);
6289 dm_plane_state_new = to_dm_plane_state(new_state);
6291 if (dm_plane_state_new->dc_state &&
6292 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6293 struct dc_plane_state *plane_state =
6294 dm_plane_state_new->dc_state;
6295 bool force_disable_dcc = !plane_state->dcc.enable;
6297 fill_plane_buffer_attributes(
6298 adev, afb, plane_state->format, plane_state->rotation,
6300 &plane_state->tiling_info, &plane_state->plane_size,
6301 &plane_state->dcc, &plane_state->address,
6302 afb->tmz_surface, force_disable_dcc);
6308 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6309 struct drm_plane_state *old_state)
6311 struct amdgpu_bo *rbo;
6317 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6318 r = amdgpu_bo_reserve(rbo, false);
6320 DRM_ERROR("failed to reserve rbo before unpin\n");
6324 amdgpu_bo_unpin(rbo);
6325 amdgpu_bo_unreserve(rbo);
6326 amdgpu_bo_unref(&rbo);
6329 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6330 struct drm_crtc_state *new_crtc_state)
6332 int max_downscale = 0;
6333 int max_upscale = INT_MAX;
6335 /* TODO: These should be checked against DC plane caps */
6336 return drm_atomic_helper_check_plane_state(
6337 state, new_crtc_state, max_downscale, max_upscale, true, true);
6340 static int dm_plane_atomic_check(struct drm_plane *plane,
6341 struct drm_plane_state *state)
6343 struct amdgpu_device *adev = drm_to_adev(plane->dev);
6344 struct dc *dc = adev->dm.dc;
6345 struct dm_plane_state *dm_plane_state;
6346 struct dc_scaling_info scaling_info;
6347 struct drm_crtc_state *new_crtc_state;
6350 trace_amdgpu_dm_plane_atomic_check(state);
6352 dm_plane_state = to_dm_plane_state(state);
6354 if (!dm_plane_state->dc_state)
6358 drm_atomic_get_new_crtc_state(state->state, state->crtc);
6359 if (!new_crtc_state)
6362 ret = dm_plane_helper_check_state(state, new_crtc_state);
6366 ret = fill_dc_scaling_info(state, &scaling_info);
6370 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6376 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6377 struct drm_plane_state *new_plane_state)
6379 /* Only support async updates on cursor planes. */
6380 if (plane->type != DRM_PLANE_TYPE_CURSOR)
6386 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6387 struct drm_plane_state *new_state)
6389 struct drm_plane_state *old_state =
6390 drm_atomic_get_old_plane_state(new_state->state, plane);
6392 trace_amdgpu_dm_atomic_update_cursor(new_state);
6394 swap(plane->state->fb, new_state->fb);
6396 plane->state->src_x = new_state->src_x;
6397 plane->state->src_y = new_state->src_y;
6398 plane->state->src_w = new_state->src_w;
6399 plane->state->src_h = new_state->src_h;
6400 plane->state->crtc_x = new_state->crtc_x;
6401 plane->state->crtc_y = new_state->crtc_y;
6402 plane->state->crtc_w = new_state->crtc_w;
6403 plane->state->crtc_h = new_state->crtc_h;
6405 handle_cursor_update(plane, old_state);
6408 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6409 .prepare_fb = dm_plane_helper_prepare_fb,
6410 .cleanup_fb = dm_plane_helper_cleanup_fb,
6411 .atomic_check = dm_plane_atomic_check,
6412 .atomic_async_check = dm_plane_atomic_async_check,
6413 .atomic_async_update = dm_plane_atomic_async_update
6417 * TODO: these are currently initialized to rgb formats only.
6418 * For future use cases we should either initialize them dynamically based on
6419 * plane capabilities, or initialize this array to all formats, so internal drm
6420 * check will succeed, and let DC implement proper check
6422 static const uint32_t rgb_formats[] = {
6423 DRM_FORMAT_XRGB8888,
6424 DRM_FORMAT_ARGB8888,
6425 DRM_FORMAT_RGBA8888,
6426 DRM_FORMAT_XRGB2101010,
6427 DRM_FORMAT_XBGR2101010,
6428 DRM_FORMAT_ARGB2101010,
6429 DRM_FORMAT_ABGR2101010,
6430 DRM_FORMAT_XBGR8888,
6431 DRM_FORMAT_ABGR8888,
6435 static const uint32_t overlay_formats[] = {
6436 DRM_FORMAT_XRGB8888,
6437 DRM_FORMAT_ARGB8888,
6438 DRM_FORMAT_RGBA8888,
6439 DRM_FORMAT_XBGR8888,
6440 DRM_FORMAT_ABGR8888,
6444 static const u32 cursor_formats[] = {
6448 static int get_plane_formats(const struct drm_plane *plane,
6449 const struct dc_plane_cap *plane_cap,
6450 uint32_t *formats, int max_formats)
6452 int i, num_formats = 0;
6455 * TODO: Query support for each group of formats directly from
6456 * DC plane caps. This will require adding more formats to the
6460 switch (plane->type) {
6461 case DRM_PLANE_TYPE_PRIMARY:
6462 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6463 if (num_formats >= max_formats)
6466 formats[num_formats++] = rgb_formats[i];
6469 if (plane_cap && plane_cap->pixel_format_support.nv12)
6470 formats[num_formats++] = DRM_FORMAT_NV12;
6471 if (plane_cap && plane_cap->pixel_format_support.p010)
6472 formats[num_formats++] = DRM_FORMAT_P010;
6473 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6474 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6475 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6476 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6477 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6481 case DRM_PLANE_TYPE_OVERLAY:
6482 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6483 if (num_formats >= max_formats)
6486 formats[num_formats++] = overlay_formats[i];
6490 case DRM_PLANE_TYPE_CURSOR:
6491 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6492 if (num_formats >= max_formats)
6495 formats[num_formats++] = cursor_formats[i];
6503 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6504 struct drm_plane *plane,
6505 unsigned long possible_crtcs,
6506 const struct dc_plane_cap *plane_cap)
6508 uint32_t formats[32];
6511 unsigned int supported_rotations;
6512 uint64_t *modifiers = NULL;
6514 num_formats = get_plane_formats(plane, plane_cap, formats,
6515 ARRAY_SIZE(formats));
6517 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6521 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6522 &dm_plane_funcs, formats, num_formats,
6523 modifiers, plane->type, NULL);
6528 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6529 plane_cap && plane_cap->per_pixel_alpha) {
6530 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6531 BIT(DRM_MODE_BLEND_PREMULTI);
6533 drm_plane_create_alpha_property(plane);
6534 drm_plane_create_blend_mode_property(plane, blend_caps);
6537 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6539 (plane_cap->pixel_format_support.nv12 ||
6540 plane_cap->pixel_format_support.p010)) {
6541 /* This only affects YUV formats. */
6542 drm_plane_create_color_properties(
6544 BIT(DRM_COLOR_YCBCR_BT601) |
6545 BIT(DRM_COLOR_YCBCR_BT709) |
6546 BIT(DRM_COLOR_YCBCR_BT2020),
6547 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6548 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6549 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6552 supported_rotations =
6553 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6554 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6556 if (dm->adev->asic_type >= CHIP_BONAIRE &&
6557 plane->type != DRM_PLANE_TYPE_CURSOR)
6558 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6559 supported_rotations);
6561 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6563 /* Create (reset) the plane state */
6564 if (plane->funcs->reset)
6565 plane->funcs->reset(plane);
6570 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6571 struct drm_plane *plane,
6572 uint32_t crtc_index)
6574 struct amdgpu_crtc *acrtc = NULL;
6575 struct drm_plane *cursor_plane;
6579 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6583 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6584 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6586 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6590 res = drm_crtc_init_with_planes(
6595 &amdgpu_dm_crtc_funcs, NULL);
6600 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6602 /* Create (reset) the plane state */
6603 if (acrtc->base.funcs->reset)
6604 acrtc->base.funcs->reset(&acrtc->base);
6606 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6607 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6609 acrtc->crtc_id = crtc_index;
6610 acrtc->base.enabled = false;
6611 acrtc->otg_inst = -1;
6613 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6614 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6615 true, MAX_COLOR_LUT_ENTRIES);
6616 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6622 kfree(cursor_plane);
6627 static int to_drm_connector_type(enum signal_type st)
6630 case SIGNAL_TYPE_HDMI_TYPE_A:
6631 return DRM_MODE_CONNECTOR_HDMIA;
6632 case SIGNAL_TYPE_EDP:
6633 return DRM_MODE_CONNECTOR_eDP;
6634 case SIGNAL_TYPE_LVDS:
6635 return DRM_MODE_CONNECTOR_LVDS;
6636 case SIGNAL_TYPE_RGB:
6637 return DRM_MODE_CONNECTOR_VGA;
6638 case SIGNAL_TYPE_DISPLAY_PORT:
6639 case SIGNAL_TYPE_DISPLAY_PORT_MST:
6640 return DRM_MODE_CONNECTOR_DisplayPort;
6641 case SIGNAL_TYPE_DVI_DUAL_LINK:
6642 case SIGNAL_TYPE_DVI_SINGLE_LINK:
6643 return DRM_MODE_CONNECTOR_DVID;
6644 case SIGNAL_TYPE_VIRTUAL:
6645 return DRM_MODE_CONNECTOR_VIRTUAL;
6648 return DRM_MODE_CONNECTOR_Unknown;
6652 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6654 struct drm_encoder *encoder;
6656 /* There is only one encoder per connector */
6657 drm_connector_for_each_possible_encoder(connector, encoder)
6663 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6665 struct drm_encoder *encoder;
6666 struct amdgpu_encoder *amdgpu_encoder;
6668 encoder = amdgpu_dm_connector_to_encoder(connector);
6670 if (encoder == NULL)
6673 amdgpu_encoder = to_amdgpu_encoder(encoder);
6675 amdgpu_encoder->native_mode.clock = 0;
6677 if (!list_empty(&connector->probed_modes)) {
6678 struct drm_display_mode *preferred_mode = NULL;
6680 list_for_each_entry(preferred_mode,
6681 &connector->probed_modes,
6683 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6684 amdgpu_encoder->native_mode = *preferred_mode;
6692 static struct drm_display_mode *
6693 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6695 int hdisplay, int vdisplay)
6697 struct drm_device *dev = encoder->dev;
6698 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6699 struct drm_display_mode *mode = NULL;
6700 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6702 mode = drm_mode_duplicate(dev, native_mode);
6707 mode->hdisplay = hdisplay;
6708 mode->vdisplay = vdisplay;
6709 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6710 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6716 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6717 struct drm_connector *connector)
6719 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6720 struct drm_display_mode *mode = NULL;
6721 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6722 struct amdgpu_dm_connector *amdgpu_dm_connector =
6723 to_amdgpu_dm_connector(connector);
6727 char name[DRM_DISPLAY_MODE_LEN];
6730 } common_modes[] = {
6731 { "640x480", 640, 480},
6732 { "800x600", 800, 600},
6733 { "1024x768", 1024, 768},
6734 { "1280x720", 1280, 720},
6735 { "1280x800", 1280, 800},
6736 {"1280x1024", 1280, 1024},
6737 { "1440x900", 1440, 900},
6738 {"1680x1050", 1680, 1050},
6739 {"1600x1200", 1600, 1200},
6740 {"1920x1080", 1920, 1080},
6741 {"1920x1200", 1920, 1200}
6744 n = ARRAY_SIZE(common_modes);
6746 for (i = 0; i < n; i++) {
6747 struct drm_display_mode *curmode = NULL;
6748 bool mode_existed = false;
6750 if (common_modes[i].w > native_mode->hdisplay ||
6751 common_modes[i].h > native_mode->vdisplay ||
6752 (common_modes[i].w == native_mode->hdisplay &&
6753 common_modes[i].h == native_mode->vdisplay))
6756 list_for_each_entry(curmode, &connector->probed_modes, head) {
6757 if (common_modes[i].w == curmode->hdisplay &&
6758 common_modes[i].h == curmode->vdisplay) {
6759 mode_existed = true;
6767 mode = amdgpu_dm_create_common_mode(encoder,
6768 common_modes[i].name, common_modes[i].w,
6770 drm_mode_probed_add(connector, mode);
6771 amdgpu_dm_connector->num_modes++;
6775 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6778 struct amdgpu_dm_connector *amdgpu_dm_connector =
6779 to_amdgpu_dm_connector(connector);
6782 /* empty probed_modes */
6783 INIT_LIST_HEAD(&connector->probed_modes);
6784 amdgpu_dm_connector->num_modes =
6785 drm_add_edid_modes(connector, edid);
6787 /* sorting the probed modes before calling function
6788 * amdgpu_dm_get_native_mode() since EDID can have
6789 * more than one preferred mode. The modes that are
6790 * later in the probed mode list could be of higher
6791 * and preferred resolution. For example, 3840x2160
6792 * resolution in base EDID preferred timing and 4096x2160
6793 * preferred resolution in DID extension block later.
6795 drm_mode_sort(&connector->probed_modes);
6796 amdgpu_dm_get_native_mode(connector);
6798 amdgpu_dm_connector->num_modes = 0;
6802 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
6804 struct amdgpu_dm_connector *amdgpu_dm_connector =
6805 to_amdgpu_dm_connector(connector);
6806 struct drm_encoder *encoder;
6807 struct edid *edid = amdgpu_dm_connector->edid;
6809 encoder = amdgpu_dm_connector_to_encoder(connector);
6811 if (!drm_edid_is_valid(edid)) {
6812 amdgpu_dm_connector->num_modes =
6813 drm_add_modes_noedid(connector, 640, 480);
6815 amdgpu_dm_connector_ddc_get_modes(connector, edid);
6816 amdgpu_dm_connector_add_common_modes(encoder, connector);
6818 amdgpu_dm_fbc_init(connector);
6820 return amdgpu_dm_connector->num_modes;
6823 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
6824 struct amdgpu_dm_connector *aconnector,
6826 struct dc_link *link,
6829 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
6832 * Some of the properties below require access to state, like bpc.
6833 * Allocate some default initial connector state with our reset helper.
6835 if (aconnector->base.funcs->reset)
6836 aconnector->base.funcs->reset(&aconnector->base);
6838 aconnector->connector_id = link_index;
6839 aconnector->dc_link = link;
6840 aconnector->base.interlace_allowed = false;
6841 aconnector->base.doublescan_allowed = false;
6842 aconnector->base.stereo_allowed = false;
6843 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
6844 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
6845 aconnector->audio_inst = -1;
6846 mutex_init(&aconnector->hpd_lock);
6849 * configure support HPD hot plug connector_>polled default value is 0
6850 * which means HPD hot plug not supported
6852 switch (connector_type) {
6853 case DRM_MODE_CONNECTOR_HDMIA:
6854 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6855 aconnector->base.ycbcr_420_allowed =
6856 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
6858 case DRM_MODE_CONNECTOR_DisplayPort:
6859 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6860 aconnector->base.ycbcr_420_allowed =
6861 link->link_enc->features.dp_ycbcr420_supported ? true : false;
6863 case DRM_MODE_CONNECTOR_DVID:
6864 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
6870 drm_object_attach_property(&aconnector->base.base,
6871 dm->ddev->mode_config.scaling_mode_property,
6872 DRM_MODE_SCALE_NONE);
6874 drm_object_attach_property(&aconnector->base.base,
6875 adev->mode_info.underscan_property,
6877 drm_object_attach_property(&aconnector->base.base,
6878 adev->mode_info.underscan_hborder_property,
6880 drm_object_attach_property(&aconnector->base.base,
6881 adev->mode_info.underscan_vborder_property,
6884 if (!aconnector->mst_port)
6885 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
6887 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
6888 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
6889 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
6891 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
6892 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
6893 drm_object_attach_property(&aconnector->base.base,
6894 adev->mode_info.abm_level_property, 0);
6897 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
6898 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
6899 connector_type == DRM_MODE_CONNECTOR_eDP) {
6900 drm_object_attach_property(
6901 &aconnector->base.base,
6902 dm->ddev->mode_config.hdr_output_metadata_property, 0);
6904 if (!aconnector->mst_port)
6905 drm_connector_attach_vrr_capable_property(&aconnector->base);
6907 #ifdef CONFIG_DRM_AMD_DC_HDCP
6908 if (adev->dm.hdcp_workqueue)
6909 drm_connector_attach_content_protection_property(&aconnector->base, true);
6914 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
6915 struct i2c_msg *msgs, int num)
6917 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
6918 struct ddc_service *ddc_service = i2c->ddc_service;
6919 struct i2c_command cmd;
6923 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
6928 cmd.number_of_payloads = num;
6929 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
6932 for (i = 0; i < num; i++) {
6933 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
6934 cmd.payloads[i].address = msgs[i].addr;
6935 cmd.payloads[i].length = msgs[i].len;
6936 cmd.payloads[i].data = msgs[i].buf;
6940 ddc_service->ctx->dc,
6941 ddc_service->ddc_pin->hw_info.ddc_channel,
6945 kfree(cmd.payloads);
6949 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
6951 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
6954 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
6955 .master_xfer = amdgpu_dm_i2c_xfer,
6956 .functionality = amdgpu_dm_i2c_func,
6959 static struct amdgpu_i2c_adapter *
6960 create_i2c(struct ddc_service *ddc_service,
6964 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
6965 struct amdgpu_i2c_adapter *i2c;
6967 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
6970 i2c->base.owner = THIS_MODULE;
6971 i2c->base.class = I2C_CLASS_DDC;
6972 i2c->base.dev.parent = &adev->pdev->dev;
6973 i2c->base.algo = &amdgpu_dm_i2c_algo;
6974 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
6975 i2c_set_adapdata(&i2c->base, i2c);
6976 i2c->ddc_service = ddc_service;
6977 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
6984 * Note: this function assumes that dc_link_detect() was called for the
6985 * dc_link which will be represented by this aconnector.
6987 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
6988 struct amdgpu_dm_connector *aconnector,
6989 uint32_t link_index,
6990 struct amdgpu_encoder *aencoder)
6994 struct dc *dc = dm->dc;
6995 struct dc_link *link = dc_get_link_at_index(dc, link_index);
6996 struct amdgpu_i2c_adapter *i2c;
6998 link->priv = aconnector;
7000 DRM_DEBUG_DRIVER("%s()\n", __func__);
7002 i2c = create_i2c(link->ddc, link->link_index, &res);
7004 DRM_ERROR("Failed to create i2c adapter data\n");
7008 aconnector->i2c = i2c;
7009 res = i2c_add_adapter(&i2c->base);
7012 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7016 connector_type = to_drm_connector_type(link->connector_signal);
7018 res = drm_connector_init_with_ddc(
7021 &amdgpu_dm_connector_funcs,
7026 DRM_ERROR("connector_init failed\n");
7027 aconnector->connector_id = -1;
7031 drm_connector_helper_add(
7033 &amdgpu_dm_connector_helper_funcs);
7035 amdgpu_dm_connector_init_helper(
7042 drm_connector_attach_encoder(
7043 &aconnector->base, &aencoder->base);
7045 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7046 || connector_type == DRM_MODE_CONNECTOR_eDP)
7047 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7052 aconnector->i2c = NULL;
7057 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7059 switch (adev->mode_info.num_crtc) {
7076 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7077 struct amdgpu_encoder *aencoder,
7078 uint32_t link_index)
7080 struct amdgpu_device *adev = drm_to_adev(dev);
7082 int res = drm_encoder_init(dev,
7084 &amdgpu_dm_encoder_funcs,
7085 DRM_MODE_ENCODER_TMDS,
7088 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7091 aencoder->encoder_id = link_index;
7093 aencoder->encoder_id = -1;
7095 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7100 static void manage_dm_interrupts(struct amdgpu_device *adev,
7101 struct amdgpu_crtc *acrtc,
7105 * We have no guarantee that the frontend index maps to the same
7106 * backend index - some even map to more than one.
7108 * TODO: Use a different interrupt or check DC itself for the mapping.
7111 amdgpu_display_crtc_idx_to_irq_type(
7116 drm_crtc_vblank_on(&acrtc->base);
7119 &adev->pageflip_irq,
7125 &adev->pageflip_irq,
7127 drm_crtc_vblank_off(&acrtc->base);
7131 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7132 struct amdgpu_crtc *acrtc)
7135 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7138 * This reads the current state for the IRQ and force reapplies
7139 * the setting to hardware.
7141 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7145 is_scaling_state_different(const struct dm_connector_state *dm_state,
7146 const struct dm_connector_state *old_dm_state)
7148 if (dm_state->scaling != old_dm_state->scaling)
7150 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7151 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7153 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7154 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7156 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7157 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7162 #ifdef CONFIG_DRM_AMD_DC_HDCP
7163 static bool is_content_protection_different(struct drm_connector_state *state,
7164 const struct drm_connector_state *old_state,
7165 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7167 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7168 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7170 /* Handle: Type0/1 change */
7171 if (old_state->hdcp_content_type != state->hdcp_content_type &&
7172 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7173 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7177 /* CP is being re enabled, ignore this
7179 * Handles: ENABLED -> DESIRED
7181 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7182 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7183 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7187 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7189 * Handles: UNDESIRED -> ENABLED
7191 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7192 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7193 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7195 /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7196 * hot-plug, headless s3, dpms
7198 * Handles: DESIRED -> DESIRED (Special case)
7200 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7201 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7202 dm_con_state->update_hdcp = false;
7207 * Handles: UNDESIRED -> UNDESIRED
7208 * DESIRED -> DESIRED
7209 * ENABLED -> ENABLED
7211 if (old_state->content_protection == state->content_protection)
7215 * Handles: UNDESIRED -> DESIRED
7216 * DESIRED -> UNDESIRED
7217 * ENABLED -> UNDESIRED
7219 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7223 * Handles: DESIRED -> ENABLED
7229 static void remove_stream(struct amdgpu_device *adev,
7230 struct amdgpu_crtc *acrtc,
7231 struct dc_stream_state *stream)
7233 /* this is the update mode case */
7235 acrtc->otg_inst = -1;
7236 acrtc->enabled = false;
7239 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7240 struct dc_cursor_position *position)
7242 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7244 int xorigin = 0, yorigin = 0;
7246 position->enable = false;
7250 if (!crtc || !plane->state->fb)
7253 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7254 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7255 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7257 plane->state->crtc_w,
7258 plane->state->crtc_h);
7262 x = plane->state->crtc_x;
7263 y = plane->state->crtc_y;
7265 if (x <= -amdgpu_crtc->max_cursor_width ||
7266 y <= -amdgpu_crtc->max_cursor_height)
7270 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7274 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7277 position->enable = true;
7278 position->translate_by_source = true;
7281 position->x_hotspot = xorigin;
7282 position->y_hotspot = yorigin;
7287 static void handle_cursor_update(struct drm_plane *plane,
7288 struct drm_plane_state *old_plane_state)
7290 struct amdgpu_device *adev = drm_to_adev(plane->dev);
7291 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7292 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7293 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7294 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7295 uint64_t address = afb ? afb->address : 0;
7296 struct dc_cursor_position position;
7297 struct dc_cursor_attributes attributes;
7300 if (!plane->state->fb && !old_plane_state->fb)
7303 DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
7305 amdgpu_crtc->crtc_id,
7306 plane->state->crtc_w,
7307 plane->state->crtc_h);
7309 ret = get_cursor_position(plane, crtc, &position);
7313 if (!position.enable) {
7314 /* turn off cursor */
7315 if (crtc_state && crtc_state->stream) {
7316 mutex_lock(&adev->dm.dc_lock);
7317 dc_stream_set_cursor_position(crtc_state->stream,
7319 mutex_unlock(&adev->dm.dc_lock);
7324 amdgpu_crtc->cursor_width = plane->state->crtc_w;
7325 amdgpu_crtc->cursor_height = plane->state->crtc_h;
7327 memset(&attributes, 0, sizeof(attributes));
7328 attributes.address.high_part = upper_32_bits(address);
7329 attributes.address.low_part = lower_32_bits(address);
7330 attributes.width = plane->state->crtc_w;
7331 attributes.height = plane->state->crtc_h;
7332 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7333 attributes.rotation_angle = 0;
7334 attributes.attribute_flags.value = 0;
7336 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7338 if (crtc_state->stream) {
7339 mutex_lock(&adev->dm.dc_lock);
7340 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7342 DRM_ERROR("DC failed to set cursor attributes\n");
7344 if (!dc_stream_set_cursor_position(crtc_state->stream,
7346 DRM_ERROR("DC failed to set cursor position\n");
7347 mutex_unlock(&adev->dm.dc_lock);
7351 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7354 assert_spin_locked(&acrtc->base.dev->event_lock);
7355 WARN_ON(acrtc->event);
7357 acrtc->event = acrtc->base.state->event;
7359 /* Set the flip status */
7360 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7362 /* Mark this event as consumed */
7363 acrtc->base.state->event = NULL;
7365 DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7369 static void update_freesync_state_on_stream(
7370 struct amdgpu_display_manager *dm,
7371 struct dm_crtc_state *new_crtc_state,
7372 struct dc_stream_state *new_stream,
7373 struct dc_plane_state *surface,
7374 u32 flip_timestamp_in_us)
7376 struct mod_vrr_params vrr_params;
7377 struct dc_info_packet vrr_infopacket = {0};
7378 struct amdgpu_device *adev = dm->adev;
7379 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7380 unsigned long flags;
7386 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7387 * For now it's sufficient to just guard against these conditions.
7390 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7393 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7394 vrr_params = acrtc->dm_irq_params.vrr_params;
7397 mod_freesync_handle_preflip(
7398 dm->freesync_module,
7401 flip_timestamp_in_us,
7404 if (adev->family < AMDGPU_FAMILY_AI &&
7405 amdgpu_dm_vrr_active(new_crtc_state)) {
7406 mod_freesync_handle_v_update(dm->freesync_module,
7407 new_stream, &vrr_params);
7409 /* Need to call this before the frame ends. */
7410 dc_stream_adjust_vmin_vmax(dm->dc,
7411 new_crtc_state->stream,
7412 &vrr_params.adjust);
7416 mod_freesync_build_vrr_infopacket(
7417 dm->freesync_module,
7421 TRANSFER_FUNC_UNKNOWN,
7424 new_crtc_state->freesync_timing_changed |=
7425 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7427 sizeof(vrr_params.adjust)) != 0);
7429 new_crtc_state->freesync_vrr_info_changed |=
7430 (memcmp(&new_crtc_state->vrr_infopacket,
7432 sizeof(vrr_infopacket)) != 0);
7434 acrtc->dm_irq_params.vrr_params = vrr_params;
7435 new_crtc_state->vrr_infopacket = vrr_infopacket;
7437 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7438 new_stream->vrr_infopacket = vrr_infopacket;
7440 if (new_crtc_state->freesync_vrr_info_changed)
7441 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7442 new_crtc_state->base.crtc->base.id,
7443 (int)new_crtc_state->base.vrr_enabled,
7444 (int)vrr_params.state);
7446 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7449 static void update_stream_irq_parameters(
7450 struct amdgpu_display_manager *dm,
7451 struct dm_crtc_state *new_crtc_state)
7453 struct dc_stream_state *new_stream = new_crtc_state->stream;
7454 struct mod_vrr_params vrr_params;
7455 struct mod_freesync_config config = new_crtc_state->freesync_config;
7456 struct amdgpu_device *adev = dm->adev;
7457 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7458 unsigned long flags;
7464 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7465 * For now it's sufficient to just guard against these conditions.
7467 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7470 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7471 vrr_params = acrtc->dm_irq_params.vrr_params;
7473 if (new_crtc_state->vrr_supported &&
7474 config.min_refresh_in_uhz &&
7475 config.max_refresh_in_uhz) {
7476 config.state = new_crtc_state->base.vrr_enabled ?
7477 VRR_STATE_ACTIVE_VARIABLE :
7480 config.state = VRR_STATE_UNSUPPORTED;
7483 mod_freesync_build_vrr_params(dm->freesync_module,
7485 &config, &vrr_params);
7487 new_crtc_state->freesync_timing_changed |=
7488 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7489 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7491 new_crtc_state->freesync_config = config;
7492 /* Copy state for access from DM IRQ handler */
7493 acrtc->dm_irq_params.freesync_config = config;
7494 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7495 acrtc->dm_irq_params.vrr_params = vrr_params;
7496 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7499 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7500 struct dm_crtc_state *new_state)
7502 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7503 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7505 if (!old_vrr_active && new_vrr_active) {
7506 /* Transition VRR inactive -> active:
7507 * While VRR is active, we must not disable vblank irq, as a
7508 * reenable after disable would compute bogus vblank/pflip
7509 * timestamps if it likely happened inside display front-porch.
7511 * We also need vupdate irq for the actual core vblank handling
7514 dm_set_vupdate_irq(new_state->base.crtc, true);
7515 drm_crtc_vblank_get(new_state->base.crtc);
7516 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7517 __func__, new_state->base.crtc->base.id);
7518 } else if (old_vrr_active && !new_vrr_active) {
7519 /* Transition VRR active -> inactive:
7520 * Allow vblank irq disable again for fixed refresh rate.
7522 dm_set_vupdate_irq(new_state->base.crtc, false);
7523 drm_crtc_vblank_put(new_state->base.crtc);
7524 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7525 __func__, new_state->base.crtc->base.id);
7529 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7531 struct drm_plane *plane;
7532 struct drm_plane_state *old_plane_state, *new_plane_state;
7536 * TODO: Make this per-stream so we don't issue redundant updates for
7537 * commits with multiple streams.
7539 for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7541 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7542 handle_cursor_update(plane, old_plane_state);
7545 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7546 struct dc_state *dc_state,
7547 struct drm_device *dev,
7548 struct amdgpu_display_manager *dm,
7549 struct drm_crtc *pcrtc,
7550 bool wait_for_vblank)
7553 uint64_t timestamp_ns;
7554 struct drm_plane *plane;
7555 struct drm_plane_state *old_plane_state, *new_plane_state;
7556 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7557 struct drm_crtc_state *new_pcrtc_state =
7558 drm_atomic_get_new_crtc_state(state, pcrtc);
7559 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7560 struct dm_crtc_state *dm_old_crtc_state =
7561 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7562 int planes_count = 0, vpos, hpos;
7564 unsigned long flags;
7565 struct amdgpu_bo *abo;
7566 uint32_t target_vblank, last_flip_vblank;
7567 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7568 bool pflip_present = false;
7570 struct dc_surface_update surface_updates[MAX_SURFACES];
7571 struct dc_plane_info plane_infos[MAX_SURFACES];
7572 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7573 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7574 struct dc_stream_update stream_update;
7577 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7580 dm_error("Failed to allocate update bundle\n");
7585 * Disable the cursor first if we're disabling all the planes.
7586 * It'll remain on the screen after the planes are re-enabled
7589 if (acrtc_state->active_planes == 0)
7590 amdgpu_dm_commit_cursors(state);
7592 /* update planes when needed */
7593 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7594 struct drm_crtc *crtc = new_plane_state->crtc;
7595 struct drm_crtc_state *new_crtc_state;
7596 struct drm_framebuffer *fb = new_plane_state->fb;
7597 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
7598 bool plane_needs_flip;
7599 struct dc_plane_state *dc_plane;
7600 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7602 /* Cursor plane is handled after stream updates */
7603 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7606 if (!fb || !crtc || pcrtc != crtc)
7609 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7610 if (!new_crtc_state->active)
7613 dc_plane = dm_new_plane_state->dc_state;
7615 bundle->surface_updates[planes_count].surface = dc_plane;
7616 if (new_pcrtc_state->color_mgmt_changed) {
7617 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7618 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7619 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7622 fill_dc_scaling_info(new_plane_state,
7623 &bundle->scaling_infos[planes_count]);
7625 bundle->surface_updates[planes_count].scaling_info =
7626 &bundle->scaling_infos[planes_count];
7628 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7630 pflip_present = pflip_present || plane_needs_flip;
7632 if (!plane_needs_flip) {
7637 abo = gem_to_amdgpu_bo(fb->obj[0]);
7640 * Wait for all fences on this FB. Do limited wait to avoid
7641 * deadlock during GPU reset when this fence will not signal
7642 * but we hold reservation lock for the BO.
7644 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7646 msecs_to_jiffies(5000));
7647 if (unlikely(r <= 0))
7648 DRM_ERROR("Waiting for fences timed out!");
7650 fill_dc_plane_info_and_addr(
7651 dm->adev, new_plane_state,
7653 &bundle->plane_infos[planes_count],
7654 &bundle->flip_addrs[planes_count].address,
7655 afb->tmz_surface, false);
7657 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7658 new_plane_state->plane->index,
7659 bundle->plane_infos[planes_count].dcc.enable);
7661 bundle->surface_updates[planes_count].plane_info =
7662 &bundle->plane_infos[planes_count];
7665 * Only allow immediate flips for fast updates that don't
7666 * change FB pitch, DCC state, rotation or mirroing.
7668 bundle->flip_addrs[planes_count].flip_immediate =
7669 crtc->state->async_flip &&
7670 acrtc_state->update_type == UPDATE_TYPE_FAST;
7672 timestamp_ns = ktime_get_ns();
7673 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7674 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7675 bundle->surface_updates[planes_count].surface = dc_plane;
7677 if (!bundle->surface_updates[planes_count].surface) {
7678 DRM_ERROR("No surface for CRTC: id=%d\n",
7679 acrtc_attach->crtc_id);
7683 if (plane == pcrtc->primary)
7684 update_freesync_state_on_stream(
7687 acrtc_state->stream,
7689 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7691 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7693 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7694 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7700 if (pflip_present) {
7702 /* Use old throttling in non-vrr fixed refresh rate mode
7703 * to keep flip scheduling based on target vblank counts
7704 * working in a backwards compatible way, e.g., for
7705 * clients using the GLX_OML_sync_control extension or
7706 * DRI3/Present extension with defined target_msc.
7708 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7711 /* For variable refresh rate mode only:
7712 * Get vblank of last completed flip to avoid > 1 vrr
7713 * flips per video frame by use of throttling, but allow
7714 * flip programming anywhere in the possibly large
7715 * variable vrr vblank interval for fine-grained flip
7716 * timing control and more opportunity to avoid stutter
7717 * on late submission of flips.
7719 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7720 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7721 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7724 target_vblank = last_flip_vblank + wait_for_vblank;
7727 * Wait until we're out of the vertical blank period before the one
7728 * targeted by the flip
7730 while ((acrtc_attach->enabled &&
7731 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7732 0, &vpos, &hpos, NULL,
7733 NULL, &pcrtc->hwmode)
7734 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7735 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7736 (int)(target_vblank -
7737 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7738 usleep_range(1000, 1100);
7742 * Prepare the flip event for the pageflip interrupt to handle.
7744 * This only works in the case where we've already turned on the
7745 * appropriate hardware blocks (eg. HUBP) so in the transition case
7746 * from 0 -> n planes we have to skip a hardware generated event
7747 * and rely on sending it from software.
7749 if (acrtc_attach->base.state->event &&
7750 acrtc_state->active_planes > 0) {
7751 drm_crtc_vblank_get(pcrtc);
7753 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7755 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7756 prepare_flip_isr(acrtc_attach);
7758 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7761 if (acrtc_state->stream) {
7762 if (acrtc_state->freesync_vrr_info_changed)
7763 bundle->stream_update.vrr_infopacket =
7764 &acrtc_state->stream->vrr_infopacket;
7768 /* Update the planes if changed or disable if we don't have any. */
7769 if ((planes_count || acrtc_state->active_planes == 0) &&
7770 acrtc_state->stream) {
7771 bundle->stream_update.stream = acrtc_state->stream;
7772 if (new_pcrtc_state->mode_changed) {
7773 bundle->stream_update.src = acrtc_state->stream->src;
7774 bundle->stream_update.dst = acrtc_state->stream->dst;
7777 if (new_pcrtc_state->color_mgmt_changed) {
7779 * TODO: This isn't fully correct since we've actually
7780 * already modified the stream in place.
7782 bundle->stream_update.gamut_remap =
7783 &acrtc_state->stream->gamut_remap_matrix;
7784 bundle->stream_update.output_csc_transform =
7785 &acrtc_state->stream->csc_color_matrix;
7786 bundle->stream_update.out_transfer_func =
7787 acrtc_state->stream->out_transfer_func;
7790 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7791 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7792 bundle->stream_update.abm_level = &acrtc_state->abm_level;
7795 * If FreeSync state on the stream has changed then we need to
7796 * re-adjust the min/max bounds now that DC doesn't handle this
7797 * as part of commit.
7799 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
7800 amdgpu_dm_vrr_active(acrtc_state)) {
7801 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7802 dc_stream_adjust_vmin_vmax(
7803 dm->dc, acrtc_state->stream,
7804 &acrtc_attach->dm_irq_params.vrr_params.adjust);
7805 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7807 mutex_lock(&dm->dc_lock);
7808 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7809 acrtc_state->stream->link->psr_settings.psr_allow_active)
7810 amdgpu_dm_psr_disable(acrtc_state->stream);
7812 dc_commit_updates_for_stream(dm->dc,
7813 bundle->surface_updates,
7815 acrtc_state->stream,
7816 &bundle->stream_update,
7820 * Enable or disable the interrupts on the backend.
7822 * Most pipes are put into power gating when unused.
7824 * When power gating is enabled on a pipe we lose the
7825 * interrupt enablement state when power gating is disabled.
7827 * So we need to update the IRQ control state in hardware
7828 * whenever the pipe turns on (since it could be previously
7829 * power gated) or off (since some pipes can't be power gated
7832 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
7833 dm_update_pflip_irq_state(drm_to_adev(dev),
7836 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
7837 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
7838 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
7839 amdgpu_dm_link_setup_psr(acrtc_state->stream);
7840 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
7841 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
7842 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
7843 amdgpu_dm_psr_enable(acrtc_state->stream);
7846 mutex_unlock(&dm->dc_lock);
7850 * Update cursor state *after* programming all the planes.
7851 * This avoids redundant programming in the case where we're going
7852 * to be disabling a single plane - those pipes are being disabled.
7854 if (acrtc_state->active_planes)
7855 amdgpu_dm_commit_cursors(state);
7861 static void amdgpu_dm_commit_audio(struct drm_device *dev,
7862 struct drm_atomic_state *state)
7864 struct amdgpu_device *adev = drm_to_adev(dev);
7865 struct amdgpu_dm_connector *aconnector;
7866 struct drm_connector *connector;
7867 struct drm_connector_state *old_con_state, *new_con_state;
7868 struct drm_crtc_state *new_crtc_state;
7869 struct dm_crtc_state *new_dm_crtc_state;
7870 const struct dc_stream_status *status;
7873 /* Notify device removals. */
7874 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
7875 if (old_con_state->crtc != new_con_state->crtc) {
7876 /* CRTC changes require notification. */
7880 if (!new_con_state->crtc)
7883 new_crtc_state = drm_atomic_get_new_crtc_state(
7884 state, new_con_state->crtc);
7886 if (!new_crtc_state)
7889 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7893 aconnector = to_amdgpu_dm_connector(connector);
7895 mutex_lock(&adev->dm.audio_lock);
7896 inst = aconnector->audio_inst;
7897 aconnector->audio_inst = -1;
7898 mutex_unlock(&adev->dm.audio_lock);
7900 amdgpu_dm_audio_eld_notify(adev, inst);
7903 /* Notify audio device additions. */
7904 for_each_new_connector_in_state(state, connector, new_con_state, i) {
7905 if (!new_con_state->crtc)
7908 new_crtc_state = drm_atomic_get_new_crtc_state(
7909 state, new_con_state->crtc);
7911 if (!new_crtc_state)
7914 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
7917 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
7918 if (!new_dm_crtc_state->stream)
7921 status = dc_stream_get_status(new_dm_crtc_state->stream);
7925 aconnector = to_amdgpu_dm_connector(connector);
7927 mutex_lock(&adev->dm.audio_lock);
7928 inst = status->audio_inst;
7929 aconnector->audio_inst = inst;
7930 mutex_unlock(&adev->dm.audio_lock);
7932 amdgpu_dm_audio_eld_notify(adev, inst);
7937 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
7938 * @crtc_state: the DRM CRTC state
7939 * @stream_state: the DC stream state.
7941 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
7942 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
7944 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
7945 struct dc_stream_state *stream_state)
7947 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
7951 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
7952 * @state: The atomic state to commit
7954 * This will tell DC to commit the constructed DC state from atomic_check,
7955 * programming the hardware. Any failures here implies a hardware failure, since
7956 * atomic check should have filtered anything non-kosher.
7958 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
7960 struct drm_device *dev = state->dev;
7961 struct amdgpu_device *adev = drm_to_adev(dev);
7962 struct amdgpu_display_manager *dm = &adev->dm;
7963 struct dm_atomic_state *dm_state;
7964 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
7966 struct drm_crtc *crtc;
7967 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
7968 unsigned long flags;
7969 bool wait_for_vblank = true;
7970 struct drm_connector *connector;
7971 struct drm_connector_state *old_con_state, *new_con_state;
7972 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
7973 int crtc_disable_count = 0;
7974 bool mode_set_reset_required = false;
7976 trace_amdgpu_dm_atomic_commit_tail_begin(state);
7978 drm_atomic_helper_update_legacy_modeset_state(dev, state);
7980 dm_state = dm_atomic_get_new_state(state);
7981 if (dm_state && dm_state->context) {
7982 dc_state = dm_state->context;
7984 /* No state changes, retain current state. */
7985 dc_state_temp = dc_create_state(dm->dc);
7986 ASSERT(dc_state_temp);
7987 dc_state = dc_state_temp;
7988 dc_resource_state_copy_construct_current(dm->dc, dc_state);
7991 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
7992 new_crtc_state, i) {
7993 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
7995 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
7997 if (old_crtc_state->active &&
7998 (!new_crtc_state->active ||
7999 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8000 manage_dm_interrupts(adev, acrtc, false);
8001 dc_stream_release(dm_old_crtc_state->stream);
8005 drm_atomic_helper_calc_timestamping_constants(state);
8007 /* update changed items */
8008 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8009 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8011 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8012 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8015 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8016 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8017 "connectors_changed:%d\n",
8019 new_crtc_state->enable,
8020 new_crtc_state->active,
8021 new_crtc_state->planes_changed,
8022 new_crtc_state->mode_changed,
8023 new_crtc_state->active_changed,
8024 new_crtc_state->connectors_changed);
8026 /* Disable cursor if disabling crtc */
8027 if (old_crtc_state->active && !new_crtc_state->active) {
8028 struct dc_cursor_position position;
8030 memset(&position, 0, sizeof(position));
8031 mutex_lock(&dm->dc_lock);
8032 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8033 mutex_unlock(&dm->dc_lock);
8036 /* Copy all transient state flags into dc state */
8037 if (dm_new_crtc_state->stream) {
8038 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8039 dm_new_crtc_state->stream);
8042 /* handles headless hotplug case, updating new_state and
8043 * aconnector as needed
8046 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8048 DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8050 if (!dm_new_crtc_state->stream) {
8052 * this could happen because of issues with
8053 * userspace notifications delivery.
8054 * In this case userspace tries to set mode on
8055 * display which is disconnected in fact.
8056 * dc_sink is NULL in this case on aconnector.
8057 * We expect reset mode will come soon.
8059 * This can also happen when unplug is done
8060 * during resume sequence ended
8062 * In this case, we want to pretend we still
8063 * have a sink to keep the pipe running so that
8064 * hw state is consistent with the sw state
8066 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8067 __func__, acrtc->base.base.id);
8071 if (dm_old_crtc_state->stream)
8072 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8074 pm_runtime_get_noresume(dev->dev);
8076 acrtc->enabled = true;
8077 acrtc->hw_mode = new_crtc_state->mode;
8078 crtc->hwmode = new_crtc_state->mode;
8079 mode_set_reset_required = true;
8080 } else if (modereset_required(new_crtc_state)) {
8081 DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8082 /* i.e. reset mode */
8083 if (dm_old_crtc_state->stream)
8084 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8085 mode_set_reset_required = true;
8087 } /* for_each_crtc_in_state() */
8090 /* if there mode set or reset, disable eDP PSR */
8091 if (mode_set_reset_required)
8092 amdgpu_dm_psr_disable_all(dm);
8094 dm_enable_per_frame_crtc_master_sync(dc_state);
8095 mutex_lock(&dm->dc_lock);
8096 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8097 mutex_unlock(&dm->dc_lock);
8100 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8101 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8103 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8105 if (dm_new_crtc_state->stream != NULL) {
8106 const struct dc_stream_status *status =
8107 dc_stream_get_status(dm_new_crtc_state->stream);
8110 status = dc_stream_get_status_from_state(dc_state,
8111 dm_new_crtc_state->stream);
8113 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8115 acrtc->otg_inst = status->primary_otg_inst;
8118 #ifdef CONFIG_DRM_AMD_DC_HDCP
8119 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8120 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8121 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8122 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8124 new_crtc_state = NULL;
8127 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8129 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8131 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8132 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8133 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8134 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8135 dm_new_con_state->update_hdcp = true;
8139 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8140 hdcp_update_display(
8141 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8142 new_con_state->hdcp_content_type,
8143 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
8148 /* Handle connector state changes */
8149 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8150 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8151 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8152 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8153 struct dc_surface_update dummy_updates[MAX_SURFACES];
8154 struct dc_stream_update stream_update;
8155 struct dc_info_packet hdr_packet;
8156 struct dc_stream_status *status = NULL;
8157 bool abm_changed, hdr_changed, scaling_changed;
8159 memset(&dummy_updates, 0, sizeof(dummy_updates));
8160 memset(&stream_update, 0, sizeof(stream_update));
8163 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8164 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8167 /* Skip any modesets/resets */
8168 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8171 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8172 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8174 scaling_changed = is_scaling_state_different(dm_new_con_state,
8177 abm_changed = dm_new_crtc_state->abm_level !=
8178 dm_old_crtc_state->abm_level;
8181 is_hdr_metadata_different(old_con_state, new_con_state);
8183 if (!scaling_changed && !abm_changed && !hdr_changed)
8186 stream_update.stream = dm_new_crtc_state->stream;
8187 if (scaling_changed) {
8188 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8189 dm_new_con_state, dm_new_crtc_state->stream);
8191 stream_update.src = dm_new_crtc_state->stream->src;
8192 stream_update.dst = dm_new_crtc_state->stream->dst;
8196 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8198 stream_update.abm_level = &dm_new_crtc_state->abm_level;
8202 fill_hdr_info_packet(new_con_state, &hdr_packet);
8203 stream_update.hdr_static_metadata = &hdr_packet;
8206 status = dc_stream_get_status(dm_new_crtc_state->stream);
8208 WARN_ON(!status->plane_count);
8211 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8212 * Here we create an empty update on each plane.
8213 * To fix this, DC should permit updating only stream properties.
8215 for (j = 0; j < status->plane_count; j++)
8216 dummy_updates[j].surface = status->plane_states[0];
8219 mutex_lock(&dm->dc_lock);
8220 dc_commit_updates_for_stream(dm->dc,
8222 status->plane_count,
8223 dm_new_crtc_state->stream,
8226 mutex_unlock(&dm->dc_lock);
8229 /* Count number of newly disabled CRTCs for dropping PM refs later. */
8230 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8231 new_crtc_state, i) {
8232 if (old_crtc_state->active && !new_crtc_state->active)
8233 crtc_disable_count++;
8235 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8236 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8238 /* For freesync config update on crtc state and params for irq */
8239 update_stream_irq_parameters(dm, dm_new_crtc_state);
8241 /* Handle vrr on->off / off->on transitions */
8242 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8247 * Enable interrupts for CRTCs that are newly enabled or went through
8248 * a modeset. It was intentionally deferred until after the front end
8249 * state was modified to wait until the OTG was on and so the IRQ
8250 * handlers didn't access stale or invalid state.
8252 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8253 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8255 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8257 if (new_crtc_state->active &&
8258 (!old_crtc_state->active ||
8259 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8260 dc_stream_retain(dm_new_crtc_state->stream);
8261 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8262 manage_dm_interrupts(adev, acrtc, true);
8264 #ifdef CONFIG_DEBUG_FS
8266 * Frontend may have changed so reapply the CRC capture
8267 * settings for the stream.
8269 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8271 if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
8272 amdgpu_dm_crtc_configure_crc_source(
8273 crtc, dm_new_crtc_state,
8274 dm_new_crtc_state->crc_src);
8280 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8281 if (new_crtc_state->async_flip)
8282 wait_for_vblank = false;
8284 /* update planes when needed per crtc*/
8285 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8286 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8288 if (dm_new_crtc_state->stream)
8289 amdgpu_dm_commit_planes(state, dc_state, dev,
8290 dm, crtc, wait_for_vblank);
8293 /* Update audio instances for each connector. */
8294 amdgpu_dm_commit_audio(dev, state);
8297 * send vblank event on all events not handled in flip and
8298 * mark consumed event for drm_atomic_helper_commit_hw_done
8300 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8301 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8303 if (new_crtc_state->event)
8304 drm_send_event_locked(dev, &new_crtc_state->event->base);
8306 new_crtc_state->event = NULL;
8308 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8310 /* Signal HW programming completion */
8311 drm_atomic_helper_commit_hw_done(state);
8313 if (wait_for_vblank)
8314 drm_atomic_helper_wait_for_flip_done(dev, state);
8316 drm_atomic_helper_cleanup_planes(dev, state);
8318 /* return the stolen vga memory back to VRAM */
8319 if (!adev->mman.keep_stolen_vga_memory)
8320 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8321 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8324 * Finally, drop a runtime PM reference for each newly disabled CRTC,
8325 * so we can put the GPU into runtime suspend if we're not driving any
8328 for (i = 0; i < crtc_disable_count; i++)
8329 pm_runtime_put_autosuspend(dev->dev);
8330 pm_runtime_mark_last_busy(dev->dev);
8333 dc_release_state(dc_state_temp);
8337 static int dm_force_atomic_commit(struct drm_connector *connector)
8340 struct drm_device *ddev = connector->dev;
8341 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8342 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8343 struct drm_plane *plane = disconnected_acrtc->base.primary;
8344 struct drm_connector_state *conn_state;
8345 struct drm_crtc_state *crtc_state;
8346 struct drm_plane_state *plane_state;
8351 state->acquire_ctx = ddev->mode_config.acquire_ctx;
8353 /* Construct an atomic state to restore previous display setting */
8356 * Attach connectors to drm_atomic_state
8358 conn_state = drm_atomic_get_connector_state(state, connector);
8360 ret = PTR_ERR_OR_ZERO(conn_state);
8364 /* Attach crtc to drm_atomic_state*/
8365 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8367 ret = PTR_ERR_OR_ZERO(crtc_state);
8371 /* force a restore */
8372 crtc_state->mode_changed = true;
8374 /* Attach plane to drm_atomic_state */
8375 plane_state = drm_atomic_get_plane_state(state, plane);
8377 ret = PTR_ERR_OR_ZERO(plane_state);
8382 /* Call commit internally with the state we just constructed */
8383 ret = drm_atomic_commit(state);
8388 DRM_ERROR("Restoring old state failed with %i\n", ret);
8389 drm_atomic_state_put(state);
8395 * This function handles all cases when set mode does not come upon hotplug.
8396 * This includes when a display is unplugged then plugged back into the
8397 * same port and when running without usermode desktop manager supprot
8399 void dm_restore_drm_connector_state(struct drm_device *dev,
8400 struct drm_connector *connector)
8402 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8403 struct amdgpu_crtc *disconnected_acrtc;
8404 struct dm_crtc_state *acrtc_state;
8406 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8409 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8410 if (!disconnected_acrtc)
8413 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8414 if (!acrtc_state->stream)
8418 * If the previous sink is not released and different from the current,
8419 * we deduce we are in a state where we can not rely on usermode call
8420 * to turn on the display, so we do it here
8422 if (acrtc_state->stream->sink != aconnector->dc_sink)
8423 dm_force_atomic_commit(&aconnector->base);
8427 * Grabs all modesetting locks to serialize against any blocking commits,
8428 * Waits for completion of all non blocking commits.
8430 static int do_aquire_global_lock(struct drm_device *dev,
8431 struct drm_atomic_state *state)
8433 struct drm_crtc *crtc;
8434 struct drm_crtc_commit *commit;
8438 * Adding all modeset locks to aquire_ctx will
8439 * ensure that when the framework release it the
8440 * extra locks we are locking here will get released to
8442 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8446 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8447 spin_lock(&crtc->commit_lock);
8448 commit = list_first_entry_or_null(&crtc->commit_list,
8449 struct drm_crtc_commit, commit_entry);
8451 drm_crtc_commit_get(commit);
8452 spin_unlock(&crtc->commit_lock);
8458 * Make sure all pending HW programming completed and
8461 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8464 ret = wait_for_completion_interruptible_timeout(
8465 &commit->flip_done, 10*HZ);
8468 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8469 "timed out\n", crtc->base.id, crtc->name);
8471 drm_crtc_commit_put(commit);
8474 return ret < 0 ? ret : 0;
8477 static void get_freesync_config_for_crtc(
8478 struct dm_crtc_state *new_crtc_state,
8479 struct dm_connector_state *new_con_state)
8481 struct mod_freesync_config config = {0};
8482 struct amdgpu_dm_connector *aconnector =
8483 to_amdgpu_dm_connector(new_con_state->base.connector);
8484 struct drm_display_mode *mode = &new_crtc_state->base.mode;
8485 int vrefresh = drm_mode_vrefresh(mode);
8487 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8488 vrefresh >= aconnector->min_vfreq &&
8489 vrefresh <= aconnector->max_vfreq;
8491 if (new_crtc_state->vrr_supported) {
8492 new_crtc_state->stream->ignore_msa_timing_param = true;
8493 config.state = new_crtc_state->base.vrr_enabled ?
8494 VRR_STATE_ACTIVE_VARIABLE :
8496 config.min_refresh_in_uhz =
8497 aconnector->min_vfreq * 1000000;
8498 config.max_refresh_in_uhz =
8499 aconnector->max_vfreq * 1000000;
8500 config.vsif_supported = true;
8504 new_crtc_state->freesync_config = config;
8507 static void reset_freesync_config_for_crtc(
8508 struct dm_crtc_state *new_crtc_state)
8510 new_crtc_state->vrr_supported = false;
8512 memset(&new_crtc_state->vrr_infopacket, 0,
8513 sizeof(new_crtc_state->vrr_infopacket));
8516 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8517 struct drm_atomic_state *state,
8518 struct drm_crtc *crtc,
8519 struct drm_crtc_state *old_crtc_state,
8520 struct drm_crtc_state *new_crtc_state,
8522 bool *lock_and_validation_needed)
8524 struct dm_atomic_state *dm_state = NULL;
8525 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8526 struct dc_stream_state *new_stream;
8530 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8531 * update changed items
8533 struct amdgpu_crtc *acrtc = NULL;
8534 struct amdgpu_dm_connector *aconnector = NULL;
8535 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8536 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8540 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8541 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8542 acrtc = to_amdgpu_crtc(crtc);
8543 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8545 /* TODO This hack should go away */
8546 if (aconnector && enable) {
8547 /* Make sure fake sink is created in plug-in scenario */
8548 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8550 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8553 if (IS_ERR(drm_new_conn_state)) {
8554 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8558 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8559 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8561 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8564 new_stream = create_validate_stream_for_sink(aconnector,
8565 &new_crtc_state->mode,
8567 dm_old_crtc_state->stream);
8570 * we can have no stream on ACTION_SET if a display
8571 * was disconnected during S3, in this case it is not an
8572 * error, the OS will be updated after detection, and
8573 * will do the right thing on next atomic commit
8577 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8578 __func__, acrtc->base.base.id);
8584 * TODO: Check VSDB bits to decide whether this should
8585 * be enabled or not.
8587 new_stream->triggered_crtc_reset.enabled =
8588 dm->force_timing_sync;
8590 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8592 ret = fill_hdr_info_packet(drm_new_conn_state,
8593 &new_stream->hdr_static_metadata);
8598 * If we already removed the old stream from the context
8599 * (and set the new stream to NULL) then we can't reuse
8600 * the old stream even if the stream and scaling are unchanged.
8601 * We'll hit the BUG_ON and black screen.
8603 * TODO: Refactor this function to allow this check to work
8604 * in all conditions.
8606 if (dm_new_crtc_state->stream &&
8607 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8608 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8609 new_crtc_state->mode_changed = false;
8610 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8611 new_crtc_state->mode_changed);
8615 /* mode_changed flag may get updated above, need to check again */
8616 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8620 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8621 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8622 "connectors_changed:%d\n",
8624 new_crtc_state->enable,
8625 new_crtc_state->active,
8626 new_crtc_state->planes_changed,
8627 new_crtc_state->mode_changed,
8628 new_crtc_state->active_changed,
8629 new_crtc_state->connectors_changed);
8631 /* Remove stream for any changed/disabled CRTC */
8634 if (!dm_old_crtc_state->stream)
8637 ret = dm_atomic_get_state(state, &dm_state);
8641 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8644 /* i.e. reset mode */
8645 if (dc_remove_stream_from_ctx(
8648 dm_old_crtc_state->stream) != DC_OK) {
8653 dc_stream_release(dm_old_crtc_state->stream);
8654 dm_new_crtc_state->stream = NULL;
8656 reset_freesync_config_for_crtc(dm_new_crtc_state);
8658 *lock_and_validation_needed = true;
8660 } else {/* Add stream for any updated/enabled CRTC */
8662 * Quick fix to prevent NULL pointer on new_stream when
8663 * added MST connectors not found in existing crtc_state in the chained mode
8664 * TODO: need to dig out the root cause of that
8666 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8669 if (modereset_required(new_crtc_state))
8672 if (modeset_required(new_crtc_state, new_stream,
8673 dm_old_crtc_state->stream)) {
8675 WARN_ON(dm_new_crtc_state->stream);
8677 ret = dm_atomic_get_state(state, &dm_state);
8681 dm_new_crtc_state->stream = new_stream;
8683 dc_stream_retain(new_stream);
8685 DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8688 if (dc_add_stream_to_ctx(
8691 dm_new_crtc_state->stream) != DC_OK) {
8696 *lock_and_validation_needed = true;
8701 /* Release extra reference */
8703 dc_stream_release(new_stream);
8706 * We want to do dc stream updates that do not require a
8707 * full modeset below.
8709 if (!(enable && aconnector && new_crtc_state->active))
8712 * Given above conditions, the dc state cannot be NULL because:
8713 * 1. We're in the process of enabling CRTCs (just been added
8714 * to the dc context, or already is on the context)
8715 * 2. Has a valid connector attached, and
8716 * 3. Is currently active and enabled.
8717 * => The dc stream state currently exists.
8719 BUG_ON(dm_new_crtc_state->stream == NULL);
8721 /* Scaling or underscan settings */
8722 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8723 update_stream_scaling_settings(
8724 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8727 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8730 * Color management settings. We also update color properties
8731 * when a modeset is needed, to ensure it gets reprogrammed.
8733 if (dm_new_crtc_state->base.color_mgmt_changed ||
8734 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8735 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8740 /* Update Freesync settings. */
8741 get_freesync_config_for_crtc(dm_new_crtc_state,
8748 dc_stream_release(new_stream);
8752 static bool should_reset_plane(struct drm_atomic_state *state,
8753 struct drm_plane *plane,
8754 struct drm_plane_state *old_plane_state,
8755 struct drm_plane_state *new_plane_state)
8757 struct drm_plane *other;
8758 struct drm_plane_state *old_other_state, *new_other_state;
8759 struct drm_crtc_state *new_crtc_state;
8763 * TODO: Remove this hack once the checks below are sufficient
8764 * enough to determine when we need to reset all the planes on
8767 if (state->allow_modeset)
8770 /* Exit early if we know that we're adding or removing the plane. */
8771 if (old_plane_state->crtc != new_plane_state->crtc)
8774 /* old crtc == new_crtc == NULL, plane not in context. */
8775 if (!new_plane_state->crtc)
8779 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8781 if (!new_crtc_state)
8784 /* CRTC Degamma changes currently require us to recreate planes. */
8785 if (new_crtc_state->color_mgmt_changed)
8788 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8792 * If there are any new primary or overlay planes being added or
8793 * removed then the z-order can potentially change. To ensure
8794 * correct z-order and pipe acquisition the current DC architecture
8795 * requires us to remove and recreate all existing planes.
8797 * TODO: Come up with a more elegant solution for this.
8799 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
8800 struct amdgpu_framebuffer *old_afb, *new_afb;
8801 if (other->type == DRM_PLANE_TYPE_CURSOR)
8804 if (old_other_state->crtc != new_plane_state->crtc &&
8805 new_other_state->crtc != new_plane_state->crtc)
8808 if (old_other_state->crtc != new_other_state->crtc)
8811 /* Src/dst size and scaling updates. */
8812 if (old_other_state->src_w != new_other_state->src_w ||
8813 old_other_state->src_h != new_other_state->src_h ||
8814 old_other_state->crtc_w != new_other_state->crtc_w ||
8815 old_other_state->crtc_h != new_other_state->crtc_h)
8818 /* Rotation / mirroring updates. */
8819 if (old_other_state->rotation != new_other_state->rotation)
8822 /* Blending updates. */
8823 if (old_other_state->pixel_blend_mode !=
8824 new_other_state->pixel_blend_mode)
8827 /* Alpha updates. */
8828 if (old_other_state->alpha != new_other_state->alpha)
8831 /* Colorspace changes. */
8832 if (old_other_state->color_range != new_other_state->color_range ||
8833 old_other_state->color_encoding != new_other_state->color_encoding)
8836 /* Framebuffer checks fall at the end. */
8837 if (!old_other_state->fb || !new_other_state->fb)
8840 /* Pixel format changes can require bandwidth updates. */
8841 if (old_other_state->fb->format != new_other_state->fb->format)
8844 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
8845 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
8847 /* Tiling and DCC changes also require bandwidth updates. */
8848 if (old_afb->tiling_flags != new_afb->tiling_flags ||
8849 old_afb->base.modifier != new_afb->base.modifier)
8856 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
8857 struct drm_plane_state *new_plane_state,
8858 struct drm_framebuffer *fb)
8860 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
8861 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
8865 if (fb->width > new_acrtc->max_cursor_width ||
8866 fb->height > new_acrtc->max_cursor_height) {
8867 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
8868 new_plane_state->fb->width,
8869 new_plane_state->fb->height);
8872 if (new_plane_state->src_w != fb->width << 16 ||
8873 new_plane_state->src_h != fb->height << 16) {
8874 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
8878 /* Pitch in pixels */
8879 pitch = fb->pitches[0] / fb->format->cpp[0];
8881 if (fb->width != pitch) {
8882 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
8891 /* FB pitch is supported by cursor plane */
8894 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
8898 /* Core DRM takes care of checking FB modifiers, so we only need to
8899 * check tiling flags when the FB doesn't have a modifier. */
8900 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
8901 if (adev->family < AMDGPU_FAMILY_AI) {
8902 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
8903 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
8904 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
8906 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
8909 DRM_DEBUG_ATOMIC("Cursor FB not linear");
8917 static int dm_update_plane_state(struct dc *dc,
8918 struct drm_atomic_state *state,
8919 struct drm_plane *plane,
8920 struct drm_plane_state *old_plane_state,
8921 struct drm_plane_state *new_plane_state,
8923 bool *lock_and_validation_needed)
8926 struct dm_atomic_state *dm_state = NULL;
8927 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
8928 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8929 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
8930 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
8931 struct amdgpu_crtc *new_acrtc;
8936 new_plane_crtc = new_plane_state->crtc;
8937 old_plane_crtc = old_plane_state->crtc;
8938 dm_new_plane_state = to_dm_plane_state(new_plane_state);
8939 dm_old_plane_state = to_dm_plane_state(old_plane_state);
8941 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8942 if (!enable || !new_plane_crtc ||
8943 drm_atomic_plane_disabling(plane->state, new_plane_state))
8946 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
8948 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
8949 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
8953 if (new_plane_state->fb) {
8954 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
8955 new_plane_state->fb);
8963 needs_reset = should_reset_plane(state, plane, old_plane_state,
8966 /* Remove any changed/removed planes */
8971 if (!old_plane_crtc)
8974 old_crtc_state = drm_atomic_get_old_crtc_state(
8975 state, old_plane_crtc);
8976 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8978 if (!dm_old_crtc_state->stream)
8981 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
8982 plane->base.id, old_plane_crtc->base.id);
8984 ret = dm_atomic_get_state(state, &dm_state);
8988 if (!dc_remove_plane_from_context(
8990 dm_old_crtc_state->stream,
8991 dm_old_plane_state->dc_state,
8992 dm_state->context)) {
8998 dc_plane_state_release(dm_old_plane_state->dc_state);
8999 dm_new_plane_state->dc_state = NULL;
9001 *lock_and_validation_needed = true;
9003 } else { /* Add new planes */
9004 struct dc_plane_state *dc_new_plane_state;
9006 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9009 if (!new_plane_crtc)
9012 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9013 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9015 if (!dm_new_crtc_state->stream)
9021 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9025 WARN_ON(dm_new_plane_state->dc_state);
9027 dc_new_plane_state = dc_create_plane_state(dc);
9028 if (!dc_new_plane_state)
9031 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9032 plane->base.id, new_plane_crtc->base.id);
9034 ret = fill_dc_plane_attributes(
9035 drm_to_adev(new_plane_crtc->dev),
9040 dc_plane_state_release(dc_new_plane_state);
9044 ret = dm_atomic_get_state(state, &dm_state);
9046 dc_plane_state_release(dc_new_plane_state);
9051 * Any atomic check errors that occur after this will
9052 * not need a release. The plane state will be attached
9053 * to the stream, and therefore part of the atomic
9054 * state. It'll be released when the atomic state is
9057 if (!dc_add_plane_to_context(
9059 dm_new_crtc_state->stream,
9061 dm_state->context)) {
9063 dc_plane_state_release(dc_new_plane_state);
9067 dm_new_plane_state->dc_state = dc_new_plane_state;
9069 /* Tell DC to do a full surface update every time there
9070 * is a plane change. Inefficient, but works for now.
9072 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9074 *lock_and_validation_needed = true;
9081 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9082 struct drm_crtc *crtc,
9083 struct drm_crtc_state *new_crtc_state)
9085 struct drm_plane_state *new_cursor_state, *new_primary_state;
9086 int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9088 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9089 * cursor per pipe but it's going to inherit the scaling and
9090 * positioning from the underlying pipe. Check the cursor plane's
9091 * blending properties match the primary plane's. */
9093 new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9094 new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9095 if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9099 cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9100 (new_cursor_state->src_w >> 16);
9101 cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9102 (new_cursor_state->src_h >> 16);
9104 primary_scale_w = new_primary_state->crtc_w * 1000 /
9105 (new_primary_state->src_w >> 16);
9106 primary_scale_h = new_primary_state->crtc_h * 1000 /
9107 (new_primary_state->src_h >> 16);
9109 if (cursor_scale_w != primary_scale_w ||
9110 cursor_scale_h != primary_scale_h) {
9111 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9118 #if defined(CONFIG_DRM_AMD_DC_DCN)
9119 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9121 struct drm_connector *connector;
9122 struct drm_connector_state *conn_state;
9123 struct amdgpu_dm_connector *aconnector = NULL;
9125 for_each_new_connector_in_state(state, connector, conn_state, i) {
9126 if (conn_state->crtc != crtc)
9129 aconnector = to_amdgpu_dm_connector(connector);
9130 if (!aconnector->port || !aconnector->mst_port)
9139 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9144 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9145 * @dev: The DRM device
9146 * @state: The atomic state to commit
9148 * Validate that the given atomic state is programmable by DC into hardware.
9149 * This involves constructing a &struct dc_state reflecting the new hardware
9150 * state we wish to commit, then querying DC to see if it is programmable. It's
9151 * important not to modify the existing DC state. Otherwise, atomic_check
9152 * may unexpectedly commit hardware changes.
9154 * When validating the DC state, it's important that the right locks are
9155 * acquired. For full updates case which removes/adds/updates streams on one
9156 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9157 * that any such full update commit will wait for completion of any outstanding
9158 * flip using DRMs synchronization events.
9160 * Note that DM adds the affected connectors for all CRTCs in state, when that
9161 * might not seem necessary. This is because DC stream creation requires the
9162 * DC sink, which is tied to the DRM connector state. Cleaning this up should
9163 * be possible but non-trivial - a possible TODO item.
9165 * Return: -Error code if validation failed.
9167 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9168 struct drm_atomic_state *state)
9170 struct amdgpu_device *adev = drm_to_adev(dev);
9171 struct dm_atomic_state *dm_state = NULL;
9172 struct dc *dc = adev->dm.dc;
9173 struct drm_connector *connector;
9174 struct drm_connector_state *old_con_state, *new_con_state;
9175 struct drm_crtc *crtc;
9176 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9177 struct drm_plane *plane;
9178 struct drm_plane_state *old_plane_state, *new_plane_state;
9179 enum dc_status status;
9181 bool lock_and_validation_needed = false;
9182 struct dm_crtc_state *dm_old_crtc_state;
9184 trace_amdgpu_dm_atomic_check_begin(state);
9186 ret = drm_atomic_helper_check_modeset(dev, state);
9190 /* Check connector changes */
9191 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9192 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9193 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9195 /* Skip connectors that are disabled or part of modeset already. */
9196 if (!old_con_state->crtc && !new_con_state->crtc)
9199 if (!new_con_state->crtc)
9202 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9203 if (IS_ERR(new_crtc_state)) {
9204 ret = PTR_ERR(new_crtc_state);
9208 if (dm_old_con_state->abm_level !=
9209 dm_new_con_state->abm_level)
9210 new_crtc_state->connectors_changed = true;
9213 #if defined(CONFIG_DRM_AMD_DC_DCN)
9214 if (adev->asic_type >= CHIP_NAVI10) {
9215 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9216 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9217 ret = add_affected_mst_dsc_crtcs(state, crtc);
9224 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9225 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9227 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9228 !new_crtc_state->color_mgmt_changed &&
9229 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9230 dm_old_crtc_state->dsc_force_changed == false)
9233 if (!new_crtc_state->enable)
9236 ret = drm_atomic_add_affected_connectors(state, crtc);
9240 ret = drm_atomic_add_affected_planes(state, crtc);
9244 if (dm_old_crtc_state->dsc_force_changed)
9245 new_crtc_state->mode_changed = true;
9249 * Add all primary and overlay planes on the CRTC to the state
9250 * whenever a plane is enabled to maintain correct z-ordering
9251 * and to enable fast surface updates.
9253 drm_for_each_crtc(crtc, dev) {
9254 bool modified = false;
9256 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9257 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9260 if (new_plane_state->crtc == crtc ||
9261 old_plane_state->crtc == crtc) {
9270 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9271 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9275 drm_atomic_get_plane_state(state, plane);
9277 if (IS_ERR(new_plane_state)) {
9278 ret = PTR_ERR(new_plane_state);
9284 /* Remove exiting planes if they are modified */
9285 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9286 ret = dm_update_plane_state(dc, state, plane,
9290 &lock_and_validation_needed);
9295 /* Disable all crtcs which require disable */
9296 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9297 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9301 &lock_and_validation_needed);
9306 /* Enable all crtcs which require enable */
9307 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9308 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9312 &lock_and_validation_needed);
9317 /* Add new/modified planes */
9318 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9319 ret = dm_update_plane_state(dc, state, plane,
9323 &lock_and_validation_needed);
9328 /* Run this here since we want to validate the streams we created */
9329 ret = drm_atomic_helper_check_planes(dev, state);
9333 /* Check cursor planes scaling */
9334 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9335 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9340 if (state->legacy_cursor_update) {
9342 * This is a fast cursor update coming from the plane update
9343 * helper, check if it can be done asynchronously for better
9346 state->async_update =
9347 !drm_atomic_helper_async_check(dev, state);
9350 * Skip the remaining global validation if this is an async
9351 * update. Cursor updates can be done without affecting
9352 * state or bandwidth calcs and this avoids the performance
9353 * penalty of locking the private state object and
9354 * allocating a new dc_state.
9356 if (state->async_update)
9360 /* Check scaling and underscan changes*/
9361 /* TODO Removed scaling changes validation due to inability to commit
9362 * new stream into context w\o causing full reset. Need to
9363 * decide how to handle.
9365 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9366 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9367 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9368 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9370 /* Skip any modesets/resets */
9371 if (!acrtc || drm_atomic_crtc_needs_modeset(
9372 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9375 /* Skip any thing not scale or underscan changes */
9376 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9379 lock_and_validation_needed = true;
9383 * Streams and planes are reset when there are changes that affect
9384 * bandwidth. Anything that affects bandwidth needs to go through
9385 * DC global validation to ensure that the configuration can be applied
9388 * We have to currently stall out here in atomic_check for outstanding
9389 * commits to finish in this case because our IRQ handlers reference
9390 * DRM state directly - we can end up disabling interrupts too early
9393 * TODO: Remove this stall and drop DM state private objects.
9395 if (lock_and_validation_needed) {
9396 ret = dm_atomic_get_state(state, &dm_state);
9400 ret = do_aquire_global_lock(dev, state);
9404 #if defined(CONFIG_DRM_AMD_DC_DCN)
9405 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9408 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9414 * Perform validation of MST topology in the state:
9415 * We need to perform MST atomic check before calling
9416 * dc_validate_global_state(), or there is a chance
9417 * to get stuck in an infinite loop and hang eventually.
9419 ret = drm_dp_mst_atomic_check(state);
9422 status = dc_validate_global_state(dc, dm_state->context, false);
9423 if (status != DC_OK) {
9424 DC_LOG_WARNING("DC global validation failure: %s (%d)",
9425 dc_status_to_str(status), status);
9431 * The commit is a fast update. Fast updates shouldn't change
9432 * the DC context, affect global validation, and can have their
9433 * commit work done in parallel with other commits not touching
9434 * the same resource. If we have a new DC context as part of
9435 * the DM atomic state from validation we need to free it and
9436 * retain the existing one instead.
9438 * Furthermore, since the DM atomic state only contains the DC
9439 * context and can safely be annulled, we can free the state
9440 * and clear the associated private object now to free
9441 * some memory and avoid a possible use-after-free later.
9444 for (i = 0; i < state->num_private_objs; i++) {
9445 struct drm_private_obj *obj = state->private_objs[i].ptr;
9447 if (obj->funcs == adev->dm.atomic_obj.funcs) {
9448 int j = state->num_private_objs-1;
9450 dm_atomic_destroy_state(obj,
9451 state->private_objs[i].state);
9453 /* If i is not at the end of the array then the
9454 * last element needs to be moved to where i was
9455 * before the array can safely be truncated.
9458 state->private_objs[i] =
9459 state->private_objs[j];
9461 state->private_objs[j].ptr = NULL;
9462 state->private_objs[j].state = NULL;
9463 state->private_objs[j].old_state = NULL;
9464 state->private_objs[j].new_state = NULL;
9466 state->num_private_objs = j;
9472 /* Store the overall update type for use later in atomic check. */
9473 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9474 struct dm_crtc_state *dm_new_crtc_state =
9475 to_dm_crtc_state(new_crtc_state);
9477 dm_new_crtc_state->update_type = lock_and_validation_needed ?
9482 /* Must be success */
9485 trace_amdgpu_dm_atomic_check_finish(state, ret);
9490 if (ret == -EDEADLK)
9491 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9492 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9493 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9495 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9497 trace_amdgpu_dm_atomic_check_finish(state, ret);
9502 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9503 struct amdgpu_dm_connector *amdgpu_dm_connector)
9506 bool capable = false;
9508 if (amdgpu_dm_connector->dc_link &&
9509 dm_helpers_dp_read_dpcd(
9511 amdgpu_dm_connector->dc_link,
9512 DP_DOWN_STREAM_PORT_COUNT,
9514 sizeof(dpcd_data))) {
9515 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9520 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9524 bool edid_check_required;
9525 struct detailed_timing *timing;
9526 struct detailed_non_pixel *data;
9527 struct detailed_data_monitor_range *range;
9528 struct amdgpu_dm_connector *amdgpu_dm_connector =
9529 to_amdgpu_dm_connector(connector);
9530 struct dm_connector_state *dm_con_state = NULL;
9532 struct drm_device *dev = connector->dev;
9533 struct amdgpu_device *adev = drm_to_adev(dev);
9534 bool freesync_capable = false;
9536 if (!connector->state) {
9537 DRM_ERROR("%s - Connector has no state", __func__);
9542 dm_con_state = to_dm_connector_state(connector->state);
9544 amdgpu_dm_connector->min_vfreq = 0;
9545 amdgpu_dm_connector->max_vfreq = 0;
9546 amdgpu_dm_connector->pixel_clock_mhz = 0;
9551 dm_con_state = to_dm_connector_state(connector->state);
9553 edid_check_required = false;
9554 if (!amdgpu_dm_connector->dc_sink) {
9555 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9558 if (!adev->dm.freesync_module)
9561 * if edid non zero restrict freesync only for dp and edp
9564 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9565 || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9566 edid_check_required = is_dp_capable_without_timing_msa(
9568 amdgpu_dm_connector);
9571 if (edid_check_required == true && (edid->version > 1 ||
9572 (edid->version == 1 && edid->revision > 1))) {
9573 for (i = 0; i < 4; i++) {
9575 timing = &edid->detailed_timings[i];
9576 data = &timing->data.other_data;
9577 range = &data->data.range;
9579 * Check if monitor has continuous frequency mode
9581 if (data->type != EDID_DETAIL_MONITOR_RANGE)
9584 * Check for flag range limits only. If flag == 1 then
9585 * no additional timing information provided.
9586 * Default GTF, GTF Secondary curve and CVT are not
9589 if (range->flags != 1)
9592 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9593 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9594 amdgpu_dm_connector->pixel_clock_mhz =
9595 range->pixel_clock_mhz * 10;
9599 if (amdgpu_dm_connector->max_vfreq -
9600 amdgpu_dm_connector->min_vfreq > 10) {
9602 freesync_capable = true;
9608 dm_con_state->freesync_capable = freesync_capable;
9610 if (connector->vrr_capable_property)
9611 drm_connector_set_vrr_capable_property(connector,
9615 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9617 uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9619 if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9621 if (link->type == dc_connection_none)
9623 if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9624 dpcd_data, sizeof(dpcd_data))) {
9625 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9627 if (dpcd_data[0] == 0) {
9628 link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9629 link->psr_settings.psr_feature_enabled = false;
9631 link->psr_settings.psr_version = DC_PSR_VERSION_1;
9632 link->psr_settings.psr_feature_enabled = true;
9635 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9640 * amdgpu_dm_link_setup_psr() - configure psr link
9641 * @stream: stream state
9643 * Return: true if success
9645 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9647 struct dc_link *link = NULL;
9648 struct psr_config psr_config = {0};
9649 struct psr_context psr_context = {0};
9655 link = stream->link;
9657 psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9659 if (psr_config.psr_version > 0) {
9660 psr_config.psr_exit_link_training_required = 0x1;
9661 psr_config.psr_frame_capture_indication_req = 0;
9662 psr_config.psr_rfb_setup_time = 0x37;
9663 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9664 psr_config.allow_smu_optimizations = 0x0;
9666 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9669 DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
9675 * amdgpu_dm_psr_enable() - enable psr f/w
9676 * @stream: stream state
9678 * Return: true if success
9680 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9682 struct dc_link *link = stream->link;
9683 unsigned int vsync_rate_hz = 0;
9684 struct dc_static_screen_params params = {0};
9685 /* Calculate number of static frames before generating interrupt to
9688 // Init fail safe of 2 frames static
9689 unsigned int num_frames_static = 2;
9691 DRM_DEBUG_DRIVER("Enabling psr...\n");
9693 vsync_rate_hz = div64_u64(div64_u64((
9694 stream->timing.pix_clk_100hz * 100),
9695 stream->timing.v_total),
9696 stream->timing.h_total);
9699 * Calculate number of frames such that at least 30 ms of time has
9702 if (vsync_rate_hz != 0) {
9703 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9704 num_frames_static = (30000 / frame_time_microsec) + 1;
9707 params.triggers.cursor_update = true;
9708 params.triggers.overlay_update = true;
9709 params.triggers.surface_update = true;
9710 params.num_frames = num_frames_static;
9712 dc_stream_set_static_screen_params(link->ctx->dc,
9716 return dc_link_set_psr_allow_active(link, true, false, false);
9720 * amdgpu_dm_psr_disable() - disable psr f/w
9721 * @stream: stream state
9723 * Return: true if success
9725 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9728 DRM_DEBUG_DRIVER("Disabling psr...\n");
9730 return dc_link_set_psr_allow_active(stream->link, false, true, false);
9734 * amdgpu_dm_psr_disable() - disable psr f/w
9735 * if psr is enabled on any stream
9737 * Return: true if success
9739 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9741 DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9742 return dc_set_psr_allow_active(dm->dc, false);
9745 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9747 struct amdgpu_device *adev = drm_to_adev(dev);
9748 struct dc *dc = adev->dm.dc;
9751 mutex_lock(&adev->dm.dc_lock);
9752 if (dc->current_state) {
9753 for (i = 0; i < dc->current_state->stream_count; ++i)
9754 dc->current_state->streams[i]
9755 ->triggered_crtc_reset.enabled =
9756 adev->dm.force_timing_sync;
9758 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9759 dc_trigger_sync(dc, dc->current_state);
9761 mutex_unlock(&adev->dm.dc_lock);
9764 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
9765 uint32_t value, const char *func_name)
9767 #ifdef DM_CHECK_ADDR_0
9769 DC_ERR("invalid register write. address = 0");
9773 cgs_write_register(ctx->cgs_device, address, value);
9774 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
9777 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
9778 const char *func_name)
9781 #ifdef DM_CHECK_ADDR_0
9783 DC_ERR("invalid register read; address = 0\n");
9788 if (ctx->dmub_srv &&
9789 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
9790 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
9795 value = cgs_read_register(ctx->cgs_device, address);
9797 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);