2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include "dm_services_types.h"
31 #include "amdgpu_display.h"
33 #include "amdgpu_dm.h"
34 #include "amdgpu_dm_types.h"
36 #include "amd_shared.h"
37 #include "amdgpu_dm_irq.h"
38 #include "dm_helpers.h"
40 #include "ivsrcid/ivsrcid_vislands30.h"
42 #include <linux/module.h>
43 #include <linux/moduleparam.h>
44 #include <linux/version.h>
46 #include <drm/drm_atomic.h>
47 #include <drm/drm_atomic_helper.h>
48 #include <drm/drm_dp_mst_helper.h>
50 #include "modules/inc/mod_freesync.h"
52 static enum drm_plane_type dm_surfaces_type_default[AMDGPU_MAX_PLANES] = {
53 DRM_PLANE_TYPE_PRIMARY,
54 DRM_PLANE_TYPE_PRIMARY,
55 DRM_PLANE_TYPE_PRIMARY,
56 DRM_PLANE_TYPE_PRIMARY,
57 DRM_PLANE_TYPE_PRIMARY,
58 DRM_PLANE_TYPE_PRIMARY,
61 static enum drm_plane_type dm_surfaces_type_carizzo[AMDGPU_MAX_PLANES] = {
62 DRM_PLANE_TYPE_PRIMARY,
63 DRM_PLANE_TYPE_PRIMARY,
64 DRM_PLANE_TYPE_PRIMARY,
65 DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */
68 static enum drm_plane_type dm_surfaces_type_stoney[AMDGPU_MAX_PLANES] = {
69 DRM_PLANE_TYPE_PRIMARY,
70 DRM_PLANE_TYPE_PRIMARY,
71 DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */
75 * dm_vblank_get_counter
78 * Get counter for number of vertical blanks
81 * struct amdgpu_device *adev - [in] desired amdgpu device
82 * int disp_idx - [in] which CRTC to get the counter from
85 * Counter for vertical blanks
87 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
89 if (crtc >= adev->mode_info.num_crtc)
92 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
94 if (NULL == acrtc->stream) {
95 DRM_ERROR("dc_stream is NULL for crtc '%d'!\n", crtc);
99 return dc_stream_get_vblank_counter(acrtc->stream);
103 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
104 u32 *vbl, u32 *position)
106 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
109 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
111 if (NULL == acrtc->stream) {
112 DRM_ERROR("dc_stream is NULL for crtc '%d'!\n", crtc);
116 return dc_stream_get_scanoutpos(acrtc->stream, vbl, position);
122 static bool dm_is_idle(void *handle)
128 static int dm_wait_for_idle(void *handle)
134 static bool dm_check_soft_reset(void *handle)
139 static int dm_soft_reset(void *handle)
145 static struct amdgpu_crtc *get_crtc_by_otg_inst(
146 struct amdgpu_device *adev,
149 struct drm_device *dev = adev->ddev;
150 struct drm_crtc *crtc;
151 struct amdgpu_crtc *amdgpu_crtc;
154 * following if is check inherited from both functions where this one is
155 * used now. Need to be checked why it could happen.
157 if (otg_inst == -1) {
159 return adev->mode_info.crtcs[0];
162 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
163 amdgpu_crtc = to_amdgpu_crtc(crtc);
165 if (amdgpu_crtc->otg_inst == otg_inst)
172 static void dm_pflip_high_irq(void *interrupt_params)
174 struct amdgpu_crtc *amdgpu_crtc;
175 struct common_irq_params *irq_params = interrupt_params;
176 struct amdgpu_device *adev = irq_params->adev;
179 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
181 /* IRQ could occur when in initial stage */
182 /*TODO work and BO cleanup */
183 if (amdgpu_crtc == NULL) {
184 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
188 spin_lock_irqsave(&adev->ddev->event_lock, flags);
190 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
191 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
192 amdgpu_crtc->pflip_status,
193 AMDGPU_FLIP_SUBMITTED,
194 amdgpu_crtc->crtc_id,
196 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
201 /* wakeup usersapce */
202 if (amdgpu_crtc->event
203 && amdgpu_crtc->event->event.base.type
204 == DRM_EVENT_FLIP_COMPLETE) {
205 /* Update to correct count/ts if racing with vblank irq */
206 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
208 drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
209 /* page flip completed. clean up */
210 amdgpu_crtc->event = NULL;
214 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
215 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
217 DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
218 __func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
220 drm_crtc_vblank_put(&amdgpu_crtc->base);
223 static void dm_crtc_high_irq(void *interrupt_params)
225 struct common_irq_params *irq_params = interrupt_params;
226 struct amdgpu_device *adev = irq_params->adev;
227 uint8_t crtc_index = 0;
228 struct amdgpu_crtc *acrtc;
230 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
233 crtc_index = acrtc->crtc_id;
235 drm_handle_vblank(adev->ddev, crtc_index);
238 static int dm_set_clockgating_state(void *handle,
239 enum amd_clockgating_state state)
244 static int dm_set_powergating_state(void *handle,
245 enum amd_powergating_state state)
250 /* Prototypes of private functions */
251 static int dm_early_init(void* handle);
253 static void hotplug_notify_work_func(struct work_struct *work)
255 struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
256 struct drm_device *dev = dm->ddev;
258 drm_kms_helper_hotplug_event(dev);
263 * Returns 0 on success
265 int amdgpu_dm_init(struct amdgpu_device *adev)
267 struct dc_init_data init_data;
268 adev->dm.ddev = adev->ddev;
269 adev->dm.adev = adev;
271 DRM_INFO("DAL is enabled\n");
272 /* Zero all the fields */
273 memset(&init_data, 0, sizeof(init_data));
275 /* initialize DAL's lock (for SYNC context use) */
276 spin_lock_init(&adev->dm.dal_lock);
278 /* initialize DAL's mutex */
279 mutex_init(&adev->dm.dal_mutex);
281 if(amdgpu_dm_irq_init(adev)) {
282 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
286 init_data.asic_id.chip_family = adev->family;
288 init_data.asic_id.pci_revision_id = adev->rev_id;
289 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
291 init_data.asic_id.vram_width = adev->mc.vram_width;
292 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
293 init_data.asic_id.atombios_base_address =
294 adev->mode_info.atom_context->bios;
296 init_data.driver = adev;
298 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
300 if (!adev->dm.cgs_device) {
301 DRM_ERROR("amdgpu: failed to create cgs device.\n");
305 init_data.cgs_device = adev->dm.cgs_device;
309 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
311 /* Display Core create. */
312 adev->dm.dc = dc_create(&init_data);
315 DRM_INFO("Display Core failed to initialize!\n");
317 INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
319 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
320 if (!adev->dm.freesync_module) {
322 "amdgpu: failed to initialize freesync_module.\n");
324 DRM_INFO("amdgpu: freesync_module init done %p.\n",
325 adev->dm.freesync_module);
327 if (amdgpu_dm_initialize_drm_device(adev)) {
329 "amdgpu: failed to initialize sw for display support.\n");
333 /* Update the actual used number of crtc */
334 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
336 /* TODO: Add_display_info? */
338 /* TODO use dynamic cursor width */
339 adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
340 adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
342 if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
344 "amdgpu: failed to initialize sw for display support.\n");
348 DRM_INFO("KMS initialized.\n");
352 amdgpu_dm_fini(adev);
357 void amdgpu_dm_fini(struct amdgpu_device *adev)
359 amdgpu_dm_destroy_drm_device(&adev->dm);
361 * TODO: pageflip, vlank interrupt
363 * amdgpu_dm_irq_fini(adev);
366 if (adev->dm.cgs_device) {
367 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
368 adev->dm.cgs_device = NULL;
370 if (adev->dm.freesync_module) {
371 mod_freesync_destroy(adev->dm.freesync_module);
372 adev->dm.freesync_module = NULL;
374 /* DC Destroy TODO: Replace destroy DAL */
376 dc_destroy(&adev->dm.dc);
381 /* moved from amdgpu_dm_kms.c */
382 void amdgpu_dm_destroy()
386 static int dm_sw_init(void *handle)
391 static int dm_sw_fini(void *handle)
396 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
398 struct amdgpu_connector *aconnector;
399 struct drm_connector *connector;
402 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
404 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
405 aconnector = to_amdgpu_connector(connector);
406 if (aconnector->dc_link->type == dc_connection_mst_branch) {
407 DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
408 aconnector, aconnector->base.base.id);
410 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
412 DRM_ERROR("DM_MST: Failed to start MST\n");
413 ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
419 drm_modeset_unlock(&dev->mode_config.connection_mutex);
423 static int dm_late_init(void *handle)
425 struct drm_device *dev = ((struct amdgpu_device *)handle)->ddev;
426 int r = detect_mst_link_for_all_connectors(dev);
431 static void s3_handle_mst(struct drm_device *dev, bool suspend)
433 struct amdgpu_connector *aconnector;
434 struct drm_connector *connector;
436 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
438 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
439 aconnector = to_amdgpu_connector(connector);
440 if (aconnector->dc_link->type == dc_connection_mst_branch &&
441 !aconnector->mst_port) {
444 drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
446 drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
450 drm_modeset_unlock(&dev->mode_config.connection_mutex);
453 static int dm_hw_init(void *handle)
455 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
456 /* Create DAL display manager */
457 amdgpu_dm_init(adev);
458 amdgpu_dm_hpd_init(adev);
463 static int dm_hw_fini(void *handle)
465 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
467 amdgpu_dm_hpd_fini(adev);
469 amdgpu_dm_irq_fini(adev);
474 static int dm_suspend(void *handle)
476 struct amdgpu_device *adev = handle;
477 struct amdgpu_display_manager *dm = &adev->dm;
480 s3_handle_mst(adev->ddev, true);
482 amdgpu_dm_irq_suspend(adev);
484 adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
488 DC_ACPI_CM_POWER_STATE_D3
494 struct amdgpu_connector *amdgpu_dm_find_first_crct_matching_connector(
495 struct drm_atomic_state *state,
496 struct drm_crtc *crtc,
500 struct drm_connector_state *conn_state;
501 struct drm_connector *connector;
502 struct drm_crtc *crtc_from_state;
504 for_each_connector_in_state(
512 connector->state->crtc;
514 if (crtc_from_state == crtc)
515 return to_amdgpu_connector(connector);
521 static int dm_resume(void *handle)
523 struct amdgpu_device *adev = handle;
524 struct amdgpu_display_manager *dm = &adev->dm;
526 /* power on hardware */
529 DC_ACPI_CM_POWER_STATE_D0
535 int amdgpu_dm_display_resume(struct amdgpu_device *adev )
537 struct drm_device *ddev = adev->ddev;
538 struct amdgpu_display_manager *dm = &adev->dm;
539 struct amdgpu_connector *aconnector;
540 struct drm_connector *connector;
541 struct drm_crtc *crtc;
542 struct drm_crtc_state *crtc_state;
546 /* program HPD filter */
549 /* On resume we need to rewrite the MSTM control bits to enamble MST*/
550 s3_handle_mst(ddev, false);
553 * early enable HPD Rx IRQ, should be done before set mode as short
554 * pulse interrupts are used for MST
556 amdgpu_dm_irq_resume_early(adev);
559 list_for_each_entry(connector,
560 &ddev->mode_config.connector_list, head) {
561 aconnector = to_amdgpu_connector(connector);
564 * this is the case when traversing through already created
565 * MST connectors, should be skipped
567 if (aconnector->mst_port)
570 dc_link_detect(aconnector->dc_link, false);
571 aconnector->dc_sink = NULL;
572 amdgpu_dm_update_connector_after_detect(aconnector);
575 /* Force mode set in atomic comit */
576 for_each_crtc_in_state(adev->dm.cached_state, crtc, crtc_state, i)
577 crtc_state->active_changed = true;
579 ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
581 amdgpu_dm_irq_resume_late(adev);
586 static const struct amd_ip_funcs amdgpu_dm_funcs = {
588 .early_init = dm_early_init,
589 .late_init = dm_late_init,
590 .sw_init = dm_sw_init,
591 .sw_fini = dm_sw_fini,
592 .hw_init = dm_hw_init,
593 .hw_fini = dm_hw_fini,
594 .suspend = dm_suspend,
596 .is_idle = dm_is_idle,
597 .wait_for_idle = dm_wait_for_idle,
598 .check_soft_reset = dm_check_soft_reset,
599 .soft_reset = dm_soft_reset,
600 .set_clockgating_state = dm_set_clockgating_state,
601 .set_powergating_state = dm_set_powergating_state,
604 const struct amdgpu_ip_block_version dm_ip_block =
606 .type = AMD_IP_BLOCK_TYPE_DCE,
610 .funcs = &amdgpu_dm_funcs,
613 /* TODO: it is temporary non-const, should fixed later */
614 static struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
615 .fb_create = amdgpu_user_framebuffer_create,
616 .output_poll_changed = amdgpu_output_poll_changed,
617 .atomic_check = amdgpu_dm_atomic_check,
618 .atomic_commit = drm_atomic_helper_commit
621 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
622 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
625 void amdgpu_dm_update_connector_after_detect(
626 struct amdgpu_connector *aconnector)
628 struct drm_connector *connector = &aconnector->base;
629 struct drm_device *dev = connector->dev;
630 const struct dc_sink *sink;
632 /* MST handled by drm_mst framework */
633 if (aconnector->mst_mgr.mst_state == true)
637 sink = aconnector->dc_link->local_sink;
639 /* Edid mgmt connector gets first update only in mode_valid hook and then
640 * the connector sink is set to either fake or physical sink depends on link status.
641 * don't do it here if u are during boot
643 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
644 && aconnector->dc_em_sink) {
646 /* For S3 resume with headless use eml_sink to fake stream
647 * because on resume connecotr->sink is set ti NULL
649 mutex_lock(&dev->mode_config.mutex);
652 if (aconnector->dc_sink) {
653 amdgpu_dm_remove_sink_from_freesync_module(
655 /* retain and release bellow are used for
656 * bump up refcount for sink because the link don't point
657 * to it anymore after disconnect so on next crtc to connector
658 * reshuffle by UMD we will get into unwanted dc_sink release
660 if (aconnector->dc_sink != aconnector->dc_em_sink)
661 dc_sink_release(aconnector->dc_sink);
663 aconnector->dc_sink = sink;
664 amdgpu_dm_add_sink_to_freesync_module(
665 connector, aconnector->edid);
667 amdgpu_dm_remove_sink_from_freesync_module(connector);
668 if (!aconnector->dc_sink)
669 aconnector->dc_sink = aconnector->dc_em_sink;
670 else if (aconnector->dc_sink != aconnector->dc_em_sink)
671 dc_sink_retain(aconnector->dc_sink);
674 mutex_unlock(&dev->mode_config.mutex);
679 * TODO: temporary guard to look for proper fix
680 * if this sink is MST sink, we should not do anything
682 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
685 if (aconnector->dc_sink == sink) {
686 /* We got a DP short pulse (Link Loss, DP CTS, etc...).
688 DRM_INFO("DCHPD: connector_id=%d: dc_sink didn't change.\n",
689 aconnector->connector_id);
693 DRM_INFO("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
694 aconnector->connector_id, aconnector->dc_sink, sink);
696 mutex_lock(&dev->mode_config.mutex);
698 /* 1. Update status of the drm connector
699 * 2. Send an event and let userspace tell us what to do */
701 /* TODO: check if we still need the S3 mode update workaround.
702 * If yes, put it here. */
703 if (aconnector->dc_sink)
704 amdgpu_dm_remove_sink_from_freesync_module(
707 aconnector->dc_sink = sink;
708 if (sink->dc_edid.length == 0)
709 aconnector->edid = NULL;
712 (struct edid *) sink->dc_edid.raw_edid;
715 drm_mode_connector_update_edid_property(connector,
718 amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
721 amdgpu_dm_remove_sink_from_freesync_module(connector);
722 drm_mode_connector_update_edid_property(connector, NULL);
723 aconnector->num_modes = 0;
724 aconnector->dc_sink = NULL;
727 mutex_unlock(&dev->mode_config.mutex);
730 static void handle_hpd_irq(void *param)
732 struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
733 struct drm_connector *connector = &aconnector->base;
734 struct drm_device *dev = connector->dev;
736 /* In case of failure or MST no need to update connector status or notify the OS
737 * since (for MST case) MST does this in it's own context.
739 mutex_lock(&aconnector->hpd_lock);
740 if (dc_link_detect(aconnector->dc_link, false)) {
741 amdgpu_dm_update_connector_after_detect(aconnector);
744 drm_modeset_lock_all(dev);
745 dm_restore_drm_connector_state(dev, connector);
746 drm_modeset_unlock_all(dev);
748 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
749 drm_kms_helper_hotplug_event(dev);
751 mutex_unlock(&aconnector->hpd_lock);
755 static void dm_handle_hpd_rx_irq(struct amdgpu_connector *aconnector)
757 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
759 bool new_irq_handled = false;
761 int dpcd_bytes_to_read;
763 const int max_process_count = 30;
764 int process_count = 0;
766 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
768 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
769 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
770 /* DPCD 0x200 - 0x201 for downstream IRQ */
771 dpcd_addr = DP_SINK_COUNT;
773 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
774 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
775 dpcd_addr = DP_SINK_COUNT_ESI;
778 dret = drm_dp_dpcd_read(
779 &aconnector->dm_dp_aux.aux,
784 while (dret == dpcd_bytes_to_read &&
785 process_count < max_process_count) {
791 DRM_DEBUG_KMS("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
792 /* handle HPD short pulse irq */
793 if (aconnector->mst_mgr.mst_state)
795 &aconnector->mst_mgr,
799 if (new_irq_handled) {
800 /* ACK at DPCD to notify down stream */
801 const int ack_dpcd_bytes_to_write =
802 dpcd_bytes_to_read - 1;
804 for (retry = 0; retry < 3; retry++) {
807 wret = drm_dp_dpcd_write(
808 &aconnector->dm_dp_aux.aux,
811 ack_dpcd_bytes_to_write);
812 if (wret == ack_dpcd_bytes_to_write)
816 /* check if there is new irq to be handle */
817 dret = drm_dp_dpcd_read(
818 &aconnector->dm_dp_aux.aux,
823 new_irq_handled = false;
828 if (process_count == max_process_count)
829 DRM_DEBUG_KMS("Loop exceeded max iterations\n");
832 static void handle_hpd_rx_irq(void *param)
834 struct amdgpu_connector *aconnector = (struct amdgpu_connector *)param;
835 struct drm_connector *connector = &aconnector->base;
836 struct drm_device *dev = connector->dev;
837 const struct dc_link *dc_link = aconnector->dc_link;
838 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
840 /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
841 * conflict, after implement i2c helper, this mutex should be
844 if (aconnector->dc_link->type != dc_connection_mst_branch)
845 mutex_lock(&aconnector->hpd_lock);
847 if (dc_link_handle_hpd_rx_irq(aconnector->dc_link) &&
848 !is_mst_root_connector) {
849 /* Downstream Port status changed. */
850 if (dc_link_detect(aconnector->dc_link, false)) {
851 amdgpu_dm_update_connector_after_detect(aconnector);
854 drm_modeset_lock_all(dev);
855 dm_restore_drm_connector_state(dev, connector);
856 drm_modeset_unlock_all(dev);
858 drm_kms_helper_hotplug_event(dev);
861 if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
862 (dc_link->type == dc_connection_mst_branch))
863 dm_handle_hpd_rx_irq(aconnector);
865 if (aconnector->dc_link->type != dc_connection_mst_branch)
866 mutex_unlock(&aconnector->hpd_lock);
869 static void register_hpd_handlers(struct amdgpu_device *adev)
871 struct drm_device *dev = adev->ddev;
872 struct drm_connector *connector;
873 struct amdgpu_connector *aconnector;
874 const struct dc_link *dc_link;
875 struct dc_interrupt_params int_params = {0};
877 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
878 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
880 list_for_each_entry(connector,
881 &dev->mode_config.connector_list, head) {
883 aconnector = to_amdgpu_connector(connector);
884 dc_link = aconnector->dc_link;
886 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
887 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
888 int_params.irq_source = dc_link->irq_source_hpd;
890 amdgpu_dm_irq_register_interrupt(adev, &int_params,
892 (void *) aconnector);
895 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
897 /* Also register for DP short pulse (hpd_rx). */
898 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
899 int_params.irq_source = dc_link->irq_source_hpd_rx;
901 amdgpu_dm_irq_register_interrupt(adev, &int_params,
903 (void *) aconnector);
908 /* Register IRQ sources and initialize IRQ callbacks */
909 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
911 struct dc *dc = adev->dm.dc;
912 struct common_irq_params *c_irq_params;
913 struct dc_interrupt_params int_params = {0};
916 unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY;
918 if (adev->asic_type == CHIP_VEGA10)
919 client_id = AMDGPU_IH_CLIENTID_DCE;
921 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
922 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
924 /* Actions of amdgpu_irq_add_id():
925 * 1. Register a set() function with base driver.
926 * Base driver will call set() function to enable/disable an
927 * interrupt in DC hardware.
928 * 2. Register amdgpu_dm_irq_handler().
929 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
930 * coming from DC hardware.
931 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
932 * for acknowledging and handling. */
934 /* Use VBLANK interrupt */
935 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
936 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
938 DRM_ERROR("Failed to add crtc irq id!\n");
942 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
943 int_params.irq_source =
944 dc_interrupt_to_irq_source(dc, i, 0);
946 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
948 c_irq_params->adev = adev;
949 c_irq_params->irq_src = int_params.irq_source;
951 amdgpu_dm_irq_register_interrupt(adev, &int_params,
952 dm_crtc_high_irq, c_irq_params);
955 /* Use GRPH_PFLIP interrupt */
956 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
957 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
958 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
960 DRM_ERROR("Failed to add page flip irq id!\n");
964 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
965 int_params.irq_source =
966 dc_interrupt_to_irq_source(dc, i, 0);
968 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
970 c_irq_params->adev = adev;
971 c_irq_params->irq_src = int_params.irq_source;
973 amdgpu_dm_irq_register_interrupt(adev, &int_params,
974 dm_pflip_high_irq, c_irq_params);
979 r = amdgpu_irq_add_id(adev, client_id,
980 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
982 DRM_ERROR("Failed to add hpd irq id!\n");
986 register_hpd_handlers(adev);
991 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
995 adev->mode_info.mode_config_initialized = true;
997 adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
998 adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
1000 adev->ddev->mode_config.max_width = 16384;
1001 adev->ddev->mode_config.max_height = 16384;
1003 adev->ddev->mode_config.preferred_depth = 24;
1004 adev->ddev->mode_config.prefer_shadow = 1;
1005 /* indicate support of immediate flip */
1006 adev->ddev->mode_config.async_page_flip = true;
1008 adev->ddev->mode_config.fb_base = adev->mc.aper_base;
1010 r = amdgpu_modeset_create_props(adev);
1017 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
1018 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
1020 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
1022 struct amdgpu_display_manager *dm = bl_get_data(bd);
1024 if (dc_link_set_backlight_level(dm->backlight_link,
1025 bd->props.brightness, 0, 0))
1031 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
1033 return bd->props.brightness;
1036 static const struct backlight_ops amdgpu_dm_backlight_ops = {
1037 .get_brightness = amdgpu_dm_backlight_get_brightness,
1038 .update_status = amdgpu_dm_backlight_update_status,
1041 void amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
1044 struct backlight_properties props = { 0 };
1046 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
1047 props.type = BACKLIGHT_RAW;
1049 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
1050 dm->adev->ddev->primary->index);
1052 dm->backlight_dev = backlight_device_register(bl_name,
1053 dm->adev->ddev->dev,
1055 &amdgpu_dm_backlight_ops,
1058 if (NULL == dm->backlight_dev)
1059 DRM_ERROR("DM: Backlight registration failed!\n");
1061 DRM_INFO("DM: Registered Backlight device: %s\n", bl_name);
1066 /* In this architecture, the association
1067 * connector -> encoder -> crtc
1068 * id not really requried. The crtc and connector will hold the
1069 * display_index as an abstraction to use with DAL component
1071 * Returns 0 on success
1073 int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1075 struct amdgpu_display_manager *dm = &adev->dm;
1077 struct amdgpu_connector *aconnector = NULL;
1078 struct amdgpu_encoder *aencoder = NULL;
1079 struct amdgpu_mode_info *mode_info = &adev->mode_info;
1082 link_cnt = dm->dc->caps.max_links;
1083 if (amdgpu_dm_mode_config_init(dm->adev)) {
1084 DRM_ERROR("DM: Failed to initialize mode config\n");
1088 for (i = 0; i < dm->dc->caps.max_surfaces; i++) {
1089 mode_info->planes[i] = kzalloc(sizeof(struct amdgpu_plane),
1091 if (!mode_info->planes[i]) {
1092 DRM_ERROR("KMS: Failed to allocate surface\n");
1093 goto fail_free_planes;
1095 mode_info->planes[i]->plane_type = mode_info->plane_type[i];
1096 if (amdgpu_dm_plane_init(dm, mode_info->planes[i], 0xff)) {
1097 DRM_ERROR("KMS: Failed to initialize plane\n");
1098 goto fail_free_planes;
1102 for (i = 0; i < dm->dc->caps.max_streams; i++)
1103 if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) {
1104 DRM_ERROR("KMS: Failed to initialize crtc\n");
1105 goto fail_free_planes;
1108 dm->display_indexes_num = dm->dc->caps.max_streams;
1110 /* loops over all connectors on the board */
1111 for (i = 0; i < link_cnt; i++) {
1113 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
1115 "KMS: Cannot support more than %d display indexes\n",
1116 AMDGPU_DM_MAX_DISPLAY_INDEX);
1120 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
1122 goto fail_free_planes;
1124 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
1126 goto fail_free_connector;
1129 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
1130 DRM_ERROR("KMS: Failed to initialize encoder\n");
1131 goto fail_free_encoder;
1134 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
1135 DRM_ERROR("KMS: Failed to initialize connector\n");
1136 goto fail_free_encoder;
1139 if (dc_link_detect(dc_get_link_at_index(dm->dc, i), true))
1140 amdgpu_dm_update_connector_after_detect(aconnector);
1143 /* Software is initialized. Now we can register interrupt handlers. */
1144 switch (adev->asic_type) {
1151 case CHIP_POLARIS11:
1152 case CHIP_POLARIS10:
1153 case CHIP_POLARIS12:
1155 if (dce110_register_irq_handlers(dm->adev)) {
1156 DRM_ERROR("DM: Failed to initialize IRQ\n");
1157 goto fail_free_encoder;
1161 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
1162 goto fail_free_encoder;
1165 drm_mode_config_reset(dm->ddev);
1170 fail_free_connector:
1173 for (i = 0; i < dm->dc->caps.max_surfaces; i++)
1174 kfree(mode_info->planes[i]);
1178 void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
1180 drm_mode_config_cleanup(dm->ddev);
1184 /******************************************************************************
1185 * amdgpu_display_funcs functions
1186 *****************************************************************************/
1189 * dm_bandwidth_update - program display watermarks
1191 * @adev: amdgpu_device pointer
1193 * Calculate and program the display watermarks and line buffer allocation.
1195 static void dm_bandwidth_update(struct amdgpu_device *adev)
1197 /* TODO: implement later */
1200 static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
1203 /* TODO: translate amdgpu_encoder to display_index and call DAL */
1206 static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
1208 /* TODO: translate amdgpu_encoder to display_index and call DAL */
1212 /******************************************************************************
1213 * Page Flip functions
1214 ******************************************************************************/
1217 * dm_page_flip - called by amdgpu_flip_work_func(), which is triggered
1218 * via DRM IOCTL, by user mode.
1220 * @adev: amdgpu_device pointer
1221 * @crtc_id: crtc to cleanup pageflip on
1222 * @crtc_base: new address of the crtc (GPU MC address)
1224 * Does the actual pageflip (surface address update).
1226 static void dm_page_flip(struct amdgpu_device *adev,
1227 int crtc_id, u64 crtc_base, bool async)
1229 struct amdgpu_crtc *acrtc;
1230 const struct dc_stream *stream;
1231 struct dc_flip_addrs addr = { {0} };
1234 * TODO risk of concurrency issues
1236 * This should guarded by the dal_mutex but we can't do this since the
1237 * caller uses a spin_lock on event_lock.
1239 * If we wait on the dal_mutex a second page flip interrupt might come,
1240 * spin on the event_lock, disabling interrupts while it does so. At
1241 * this point the core can no longer be pre-empted and return to the
1242 * thread that waited on the dal_mutex and we're deadlocked.
1244 * With multiple cores the same essentially happens but might just take
1245 * a little longer to lock up all cores.
1247 * The reason we should lock on dal_mutex is so that we can be sure
1248 * nobody messes with acrtc->stream after we read and check its value.
1250 * We might be able to fix our concurrency issues with a work queue
1251 * where we schedule all work items (mode_set, page_flip, etc.) and
1252 * execute them one by one. Care needs to be taken to still deal with
1253 * any potential concurrency issues arising from interrupt calls.
1256 acrtc = adev->mode_info.crtcs[crtc_id];
1257 stream = acrtc->stream;
1260 if (acrtc->pflip_status != AMDGPU_FLIP_NONE) {
1261 DRM_ERROR("flip queue: acrtc %d, already busy\n", acrtc->crtc_id);
1262 /* In commit tail framework this cannot happen */
1268 * Received a page flip call after the display has been reset.
1269 * Just return in this case. Everything should be clean-up on reset.
1277 addr.address.grph.addr.low_part = lower_32_bits(crtc_base);
1278 addr.address.grph.addr.high_part = upper_32_bits(crtc_base);
1279 addr.flip_immediate = async;
1282 if (acrtc->base.state->event &&
1283 acrtc->base.state->event->event.base.type ==
1284 DRM_EVENT_FLIP_COMPLETE) {
1285 acrtc->event = acrtc->base.state->event;
1287 /* Set the flip status */
1288 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
1290 /* Mark this event as consumed */
1291 acrtc->base.state->event = NULL;
1294 dc_flip_surface_addrs(adev->dm.dc,
1295 dc_stream_get_status(stream)->surfaces,
1298 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
1300 addr.address.grph.addr.high_part,
1301 addr.address.grph.addr.low_part);
1305 static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
1306 struct drm_file *filp)
1308 struct mod_freesync_params freesync_params;
1309 uint8_t num_streams;
1312 struct amdgpu_device *adev = dev->dev_private;
1315 /* Get freesync enable flag from DRM */
1317 num_streams = dc_get_current_stream_count(adev->dm.dc);
1319 for (i = 0; i < num_streams; i++) {
1320 const struct dc_stream *stream;
1321 stream = dc_get_stream_at_index(adev->dm.dc, i);
1323 mod_freesync_update_state(adev->dm.freesync_module,
1324 &stream, 1, &freesync_params);
1330 static const struct amdgpu_display_funcs dm_display_funcs = {
1331 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
1332 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
1333 .vblank_wait = NULL,
1334 .backlight_set_level =
1335 dm_set_backlight_level,/* called unconditionally */
1336 .backlight_get_level =
1337 dm_get_backlight_level,/* called unconditionally */
1338 .hpd_sense = NULL,/* called unconditionally */
1339 .hpd_set_polarity = NULL, /* called unconditionally */
1340 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
1341 .page_flip = dm_page_flip, /* called unconditionally */
1342 .page_flip_get_scanoutpos =
1343 dm_crtc_get_scanoutpos,/* called unconditionally */
1344 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
1345 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
1346 .notify_freesync = amdgpu_notify_freesync,
1351 #if defined(CONFIG_DEBUG_KERNEL_DC)
1353 static ssize_t s3_debug_store(
1354 struct device *device,
1355 struct device_attribute *attr,
1361 struct pci_dev *pdev = to_pci_dev(device);
1362 struct drm_device *drm_dev = pci_get_drvdata(pdev);
1363 struct amdgpu_device *adev = drm_dev->dev_private;
1365 ret = kstrtoint(buf, 0, &s3_state);
1370 amdgpu_dm_display_resume(adev);
1371 drm_kms_helper_hotplug_event(adev->ddev);
1376 return ret == 0 ? count : 0;
1379 DEVICE_ATTR_WO(s3_debug);
1383 static int dm_early_init(void *handle)
1385 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1387 amdgpu_dm_set_irq_funcs(adev);
1389 switch (adev->asic_type) {
1392 adev->mode_info.num_crtc = 6;
1393 adev->mode_info.num_hpd = 6;
1394 adev->mode_info.num_dig = 6;
1395 adev->mode_info.plane_type = dm_surfaces_type_default;
1399 adev->mode_info.num_crtc = 6;
1400 adev->mode_info.num_hpd = 6;
1401 adev->mode_info.num_dig = 7;
1402 adev->mode_info.plane_type = dm_surfaces_type_default;
1405 adev->mode_info.num_crtc = 3;
1406 adev->mode_info.num_hpd = 6;
1407 adev->mode_info.num_dig = 9;
1408 adev->mode_info.plane_type = dm_surfaces_type_carizzo;
1411 adev->mode_info.num_crtc = 2;
1412 adev->mode_info.num_hpd = 6;
1413 adev->mode_info.num_dig = 9;
1414 adev->mode_info.plane_type = dm_surfaces_type_stoney;
1416 case CHIP_POLARIS11:
1417 case CHIP_POLARIS12:
1418 adev->mode_info.num_crtc = 5;
1419 adev->mode_info.num_hpd = 5;
1420 adev->mode_info.num_dig = 5;
1421 adev->mode_info.plane_type = dm_surfaces_type_default;
1423 case CHIP_POLARIS10:
1424 adev->mode_info.num_crtc = 6;
1425 adev->mode_info.num_hpd = 6;
1426 adev->mode_info.num_dig = 6;
1427 adev->mode_info.plane_type = dm_surfaces_type_default;
1430 adev->mode_info.num_crtc = 6;
1431 adev->mode_info.num_hpd = 6;
1432 adev->mode_info.num_dig = 6;
1433 adev->mode_info.plane_type = dm_surfaces_type_default;
1436 DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
1440 if (adev->mode_info.funcs == NULL)
1441 adev->mode_info.funcs = &dm_display_funcs;
1443 /* Note: Do NOT change adev->audio_endpt_rreg and
1444 * adev->audio_endpt_wreg because they are initialised in
1445 * amdgpu_device_init() */
1446 #if defined(CONFIG_DEBUG_KERNEL_DC)
1449 &dev_attr_s3_debug);
1455 bool amdgpu_dm_acquire_dal_lock(struct amdgpu_display_manager *dm)
1461 bool amdgpu_dm_release_dal_lock(struct amdgpu_display_manager *dm)