| 1 | /* |
| 2 | * Copyright 2015 Advanced Micro Devices, Inc. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice shall be included in |
| 12 | * all copies or substantial portions of the Software. |
| 13 | * |
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 20 | * OTHER DEALINGS IN THE SOFTWARE. |
| 21 | * |
| 22 | * Authors: AMD |
| 23 | * |
| 24 | */ |
| 25 | |
| 26 | #include "dm_services_types.h" |
| 27 | #include "dc.h" |
| 28 | #include "dc/inc/core_types.h" |
| 29 | |
| 30 | #include "vid.h" |
| 31 | #include "amdgpu.h" |
| 32 | #include "amdgpu_display.h" |
| 33 | #include "atom.h" |
| 34 | #include "amdgpu_dm.h" |
| 35 | #include "amdgpu_pm.h" |
| 36 | |
| 37 | #include "amd_shared.h" |
| 38 | #include "amdgpu_dm_irq.h" |
| 39 | #include "dm_helpers.h" |
| 40 | #include "dm_services_types.h" |
| 41 | #include "amdgpu_dm_mst_types.h" |
| 42 | |
| 43 | #include "ivsrcid/ivsrcid_vislands30.h" |
| 44 | |
| 45 | #include <linux/module.h> |
| 46 | #include <linux/moduleparam.h> |
| 47 | #include <linux/version.h> |
| 48 | #include <linux/types.h> |
| 49 | |
| 50 | #include <drm/drmP.h> |
| 51 | #include <drm/drm_atomic.h> |
| 52 | #include <drm/drm_atomic_helper.h> |
| 53 | #include <drm/drm_dp_mst_helper.h> |
| 54 | #include <drm/drm_fb_helper.h> |
| 55 | #include <drm/drm_edid.h> |
| 56 | |
| 57 | #include "modules/inc/mod_freesync.h" |
| 58 | |
| 59 | #if defined(CONFIG_DRM_AMD_DC_DCN1_0) |
| 60 | #include "ivsrcid/irqsrcs_dcn_1_0.h" |
| 61 | |
| 62 | #include "raven1/DCN/dcn_1_0_offset.h" |
| 63 | #include "raven1/DCN/dcn_1_0_sh_mask.h" |
| 64 | #include "vega10/soc15ip.h" |
| 65 | |
| 66 | #include "soc15_common.h" |
| 67 | #endif |
| 68 | |
| 69 | #include "modules/inc/mod_freesync.h" |
| 70 | |
| 71 | #include "i2caux_interface.h" |
| 72 | |
| 73 | /* basic init/fini API */ |
| 74 | static int amdgpu_dm_init(struct amdgpu_device *adev); |
| 75 | static void amdgpu_dm_fini(struct amdgpu_device *adev); |
| 76 | |
| 77 | /* initializes drm_device display related structures, based on the information |
| 78 | * provided by DAL. The drm strcutures are: drm_crtc, drm_connector, |
| 79 | * drm_encoder, drm_mode_config |
| 80 | * |
| 81 | * Returns 0 on success |
| 82 | */ |
| 83 | static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev); |
| 84 | /* removes and deallocates the drm structures, created by the above function */ |
| 85 | static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm); |
| 86 | |
| 87 | static void |
| 88 | amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector); |
| 89 | |
| 90 | static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, |
| 91 | struct amdgpu_plane *aplane, |
| 92 | unsigned long possible_crtcs); |
| 93 | static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, |
| 94 | struct drm_plane *plane, |
| 95 | uint32_t link_index); |
| 96 | static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, |
| 97 | struct amdgpu_dm_connector *amdgpu_dm_connector, |
| 98 | uint32_t link_index, |
| 99 | struct amdgpu_encoder *amdgpu_encoder); |
| 100 | static int amdgpu_dm_encoder_init(struct drm_device *dev, |
| 101 | struct amdgpu_encoder *aencoder, |
| 102 | uint32_t link_index); |
| 103 | |
| 104 | static int amdgpu_dm_connector_get_modes(struct drm_connector *connector); |
| 105 | |
| 106 | static int amdgpu_dm_atomic_commit(struct drm_device *dev, |
| 107 | struct drm_atomic_state *state, |
| 108 | bool nonblock); |
| 109 | |
| 110 | static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state); |
| 111 | |
| 112 | static int amdgpu_dm_atomic_check(struct drm_device *dev, |
| 113 | struct drm_atomic_state *state); |
| 114 | |
| 115 | |
| 116 | |
| 117 | |
| 118 | static const enum drm_plane_type dm_plane_type_default[AMDGPU_MAX_PLANES] = { |
| 119 | DRM_PLANE_TYPE_PRIMARY, |
| 120 | DRM_PLANE_TYPE_PRIMARY, |
| 121 | DRM_PLANE_TYPE_PRIMARY, |
| 122 | DRM_PLANE_TYPE_PRIMARY, |
| 123 | DRM_PLANE_TYPE_PRIMARY, |
| 124 | DRM_PLANE_TYPE_PRIMARY, |
| 125 | }; |
| 126 | |
| 127 | static const enum drm_plane_type dm_plane_type_carizzo[AMDGPU_MAX_PLANES] = { |
| 128 | DRM_PLANE_TYPE_PRIMARY, |
| 129 | DRM_PLANE_TYPE_PRIMARY, |
| 130 | DRM_PLANE_TYPE_PRIMARY, |
| 131 | DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */ |
| 132 | }; |
| 133 | |
| 134 | static const enum drm_plane_type dm_plane_type_stoney[AMDGPU_MAX_PLANES] = { |
| 135 | DRM_PLANE_TYPE_PRIMARY, |
| 136 | DRM_PLANE_TYPE_PRIMARY, |
| 137 | DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */ |
| 138 | }; |
| 139 | |
| 140 | /* |
| 141 | * dm_vblank_get_counter |
| 142 | * |
| 143 | * @brief |
| 144 | * Get counter for number of vertical blanks |
| 145 | * |
| 146 | * @param |
| 147 | * struct amdgpu_device *adev - [in] desired amdgpu device |
| 148 | * int disp_idx - [in] which CRTC to get the counter from |
| 149 | * |
| 150 | * @return |
| 151 | * Counter for vertical blanks |
| 152 | */ |
| 153 | static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc) |
| 154 | { |
| 155 | if (crtc >= adev->mode_info.num_crtc) |
| 156 | return 0; |
| 157 | else { |
| 158 | struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc]; |
| 159 | struct dm_crtc_state *acrtc_state = to_dm_crtc_state( |
| 160 | acrtc->base.state); |
| 161 | |
| 162 | |
| 163 | if (acrtc_state->stream == NULL) { |
| 164 | DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", |
| 165 | crtc); |
| 166 | return 0; |
| 167 | } |
| 168 | |
| 169 | return dc_stream_get_vblank_counter(acrtc_state->stream); |
| 170 | } |
| 171 | } |
| 172 | |
| 173 | static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, |
| 174 | u32 *vbl, u32 *position) |
| 175 | { |
| 176 | uint32_t v_blank_start, v_blank_end, h_position, v_position; |
| 177 | |
| 178 | if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) |
| 179 | return -EINVAL; |
| 180 | else { |
| 181 | struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc]; |
| 182 | struct dm_crtc_state *acrtc_state = to_dm_crtc_state( |
| 183 | acrtc->base.state); |
| 184 | |
| 185 | if (acrtc_state->stream == NULL) { |
| 186 | DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", |
| 187 | crtc); |
| 188 | return 0; |
| 189 | } |
| 190 | |
| 191 | /* |
| 192 | * TODO rework base driver to use values directly. |
| 193 | * for now parse it back into reg-format |
| 194 | */ |
| 195 | dc_stream_get_scanoutpos(acrtc_state->stream, |
| 196 | &v_blank_start, |
| 197 | &v_blank_end, |
| 198 | &h_position, |
| 199 | &v_position); |
| 200 | |
| 201 | *position = v_position | (h_position << 16); |
| 202 | *vbl = v_blank_start | (v_blank_end << 16); |
| 203 | } |
| 204 | |
| 205 | return 0; |
| 206 | } |
| 207 | |
| 208 | static bool dm_is_idle(void *handle) |
| 209 | { |
| 210 | /* XXX todo */ |
| 211 | return true; |
| 212 | } |
| 213 | |
| 214 | static int dm_wait_for_idle(void *handle) |
| 215 | { |
| 216 | /* XXX todo */ |
| 217 | return 0; |
| 218 | } |
| 219 | |
| 220 | static bool dm_check_soft_reset(void *handle) |
| 221 | { |
| 222 | return false; |
| 223 | } |
| 224 | |
| 225 | static int dm_soft_reset(void *handle) |
| 226 | { |
| 227 | /* XXX todo */ |
| 228 | return 0; |
| 229 | } |
| 230 | |
| 231 | static struct amdgpu_crtc * |
| 232 | get_crtc_by_otg_inst(struct amdgpu_device *adev, |
| 233 | int otg_inst) |
| 234 | { |
| 235 | struct drm_device *dev = adev->ddev; |
| 236 | struct drm_crtc *crtc; |
| 237 | struct amdgpu_crtc *amdgpu_crtc; |
| 238 | |
| 239 | /* |
| 240 | * following if is check inherited from both functions where this one is |
| 241 | * used now. Need to be checked why it could happen. |
| 242 | */ |
| 243 | if (otg_inst == -1) { |
| 244 | WARN_ON(1); |
| 245 | return adev->mode_info.crtcs[0]; |
| 246 | } |
| 247 | |
| 248 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
| 249 | amdgpu_crtc = to_amdgpu_crtc(crtc); |
| 250 | |
| 251 | if (amdgpu_crtc->otg_inst == otg_inst) |
| 252 | return amdgpu_crtc; |
| 253 | } |
| 254 | |
| 255 | return NULL; |
| 256 | } |
| 257 | |
| 258 | static void dm_pflip_high_irq(void *interrupt_params) |
| 259 | { |
| 260 | struct amdgpu_crtc *amdgpu_crtc; |
| 261 | struct common_irq_params *irq_params = interrupt_params; |
| 262 | struct amdgpu_device *adev = irq_params->adev; |
| 263 | unsigned long flags; |
| 264 | |
| 265 | amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP); |
| 266 | |
| 267 | /* IRQ could occur when in initial stage */ |
| 268 | /*TODO work and BO cleanup */ |
| 269 | if (amdgpu_crtc == NULL) { |
| 270 | DRM_DEBUG_DRIVER("CRTC is null, returning.\n"); |
| 271 | return; |
| 272 | } |
| 273 | |
| 274 | spin_lock_irqsave(&adev->ddev->event_lock, flags); |
| 275 | |
| 276 | if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ |
| 277 | DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n", |
| 278 | amdgpu_crtc->pflip_status, |
| 279 | AMDGPU_FLIP_SUBMITTED, |
| 280 | amdgpu_crtc->crtc_id, |
| 281 | amdgpu_crtc); |
| 282 | spin_unlock_irqrestore(&adev->ddev->event_lock, flags); |
| 283 | return; |
| 284 | } |
| 285 | |
| 286 | |
| 287 | /* wakeup usersapce */ |
| 288 | if (amdgpu_crtc->event) { |
| 289 | /* Update to correct count/ts if racing with vblank irq */ |
| 290 | drm_crtc_accurate_vblank_count(&amdgpu_crtc->base); |
| 291 | |
| 292 | drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event); |
| 293 | |
| 294 | /* page flip completed. clean up */ |
| 295 | amdgpu_crtc->event = NULL; |
| 296 | |
| 297 | } else |
| 298 | WARN_ON(1); |
| 299 | |
| 300 | amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; |
| 301 | spin_unlock_irqrestore(&adev->ddev->event_lock, flags); |
| 302 | |
| 303 | DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n", |
| 304 | __func__, amdgpu_crtc->crtc_id, amdgpu_crtc); |
| 305 | |
| 306 | drm_crtc_vblank_put(&amdgpu_crtc->base); |
| 307 | } |
| 308 | |
| 309 | static void dm_crtc_high_irq(void *interrupt_params) |
| 310 | { |
| 311 | struct common_irq_params *irq_params = interrupt_params; |
| 312 | struct amdgpu_device *adev = irq_params->adev; |
| 313 | uint8_t crtc_index = 0; |
| 314 | struct amdgpu_crtc *acrtc; |
| 315 | |
| 316 | acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK); |
| 317 | |
| 318 | if (acrtc) |
| 319 | crtc_index = acrtc->crtc_id; |
| 320 | |
| 321 | drm_handle_vblank(adev->ddev, crtc_index); |
| 322 | } |
| 323 | |
| 324 | static int dm_set_clockgating_state(void *handle, |
| 325 | enum amd_clockgating_state state) |
| 326 | { |
| 327 | return 0; |
| 328 | } |
| 329 | |
| 330 | static int dm_set_powergating_state(void *handle, |
| 331 | enum amd_powergating_state state) |
| 332 | { |
| 333 | return 0; |
| 334 | } |
| 335 | |
| 336 | /* Prototypes of private functions */ |
| 337 | static int dm_early_init(void* handle); |
| 338 | |
| 339 | static void hotplug_notify_work_func(struct work_struct *work) |
| 340 | { |
| 341 | struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work); |
| 342 | struct drm_device *dev = dm->ddev; |
| 343 | |
| 344 | drm_kms_helper_hotplug_event(dev); |
| 345 | } |
| 346 | |
| 347 | #ifdef ENABLE_FBC |
| 348 | #include "dal_asic_id.h" |
| 349 | /* Allocate memory for FBC compressed data */ |
| 350 | /* TODO: Dynamic allocation */ |
| 351 | #define AMDGPU_FBC_SIZE (3840 * 2160 * 4) |
| 352 | |
| 353 | static void amdgpu_dm_initialize_fbc(struct amdgpu_device *adev) |
| 354 | { |
| 355 | int r; |
| 356 | struct dm_comressor_info *compressor = &adev->dm.compressor; |
| 357 | |
| 358 | if (!compressor->bo_ptr) { |
| 359 | r = amdgpu_bo_create_kernel(adev, AMDGPU_FBC_SIZE, PAGE_SIZE, |
| 360 | AMDGPU_GEM_DOMAIN_VRAM, &compressor->bo_ptr, |
| 361 | &compressor->gpu_addr, &compressor->cpu_addr); |
| 362 | |
| 363 | if (r) |
| 364 | DRM_ERROR("DM: Failed to initialize fbc\n"); |
| 365 | } |
| 366 | |
| 367 | } |
| 368 | #endif |
| 369 | |
| 370 | |
| 371 | /* Init display KMS |
| 372 | * |
| 373 | * Returns 0 on success |
| 374 | */ |
| 375 | static int amdgpu_dm_init(struct amdgpu_device *adev) |
| 376 | { |
| 377 | struct dc_init_data init_data; |
| 378 | adev->dm.ddev = adev->ddev; |
| 379 | adev->dm.adev = adev; |
| 380 | |
| 381 | /* Zero all the fields */ |
| 382 | memset(&init_data, 0, sizeof(init_data)); |
| 383 | |
| 384 | /* initialize DAL's lock (for SYNC context use) */ |
| 385 | spin_lock_init(&adev->dm.dal_lock); |
| 386 | |
| 387 | /* initialize DAL's mutex */ |
| 388 | mutex_init(&adev->dm.dal_mutex); |
| 389 | |
| 390 | if(amdgpu_dm_irq_init(adev)) { |
| 391 | DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n"); |
| 392 | goto error; |
| 393 | } |
| 394 | |
| 395 | init_data.asic_id.chip_family = adev->family; |
| 396 | |
| 397 | init_data.asic_id.pci_revision_id = adev->rev_id; |
| 398 | init_data.asic_id.hw_internal_rev = adev->external_rev_id; |
| 399 | |
| 400 | init_data.asic_id.vram_width = adev->mc.vram_width; |
| 401 | /* TODO: initialize init_data.asic_id.vram_type here!!!! */ |
| 402 | init_data.asic_id.atombios_base_address = |
| 403 | adev->mode_info.atom_context->bios; |
| 404 | |
| 405 | init_data.driver = adev; |
| 406 | |
| 407 | adev->dm.cgs_device = amdgpu_cgs_create_device(adev); |
| 408 | |
| 409 | if (!adev->dm.cgs_device) { |
| 410 | DRM_ERROR("amdgpu: failed to create cgs device.\n"); |
| 411 | goto error; |
| 412 | } |
| 413 | |
| 414 | init_data.cgs_device = adev->dm.cgs_device; |
| 415 | |
| 416 | adev->dm.dal = NULL; |
| 417 | |
| 418 | init_data.dce_environment = DCE_ENV_PRODUCTION_DRV; |
| 419 | |
| 420 | if (amdgpu_dc_log) |
| 421 | init_data.log_mask = DC_DEFAULT_LOG_MASK; |
| 422 | else |
| 423 | init_data.log_mask = DC_MIN_LOG_MASK; |
| 424 | |
| 425 | #ifdef ENABLE_FBC |
| 426 | if (adev->family == FAMILY_CZ) |
| 427 | amdgpu_dm_initialize_fbc(adev); |
| 428 | init_data.fbc_gpu_addr = adev->dm.compressor.gpu_addr; |
| 429 | #endif |
| 430 | /* Display Core create. */ |
| 431 | adev->dm.dc = dc_create(&init_data); |
| 432 | |
| 433 | if (adev->dm.dc) |
| 434 | DRM_INFO("Display Core initialized!\n"); |
| 435 | else |
| 436 | DRM_INFO("Display Core failed to initialize!\n"); |
| 437 | |
| 438 | INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func); |
| 439 | |
| 440 | adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); |
| 441 | if (!adev->dm.freesync_module) { |
| 442 | DRM_ERROR( |
| 443 | "amdgpu: failed to initialize freesync_module.\n"); |
| 444 | } else |
| 445 | DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n", |
| 446 | adev->dm.freesync_module); |
| 447 | |
| 448 | if (amdgpu_dm_initialize_drm_device(adev)) { |
| 449 | DRM_ERROR( |
| 450 | "amdgpu: failed to initialize sw for display support.\n"); |
| 451 | goto error; |
| 452 | } |
| 453 | |
| 454 | /* Update the actual used number of crtc */ |
| 455 | adev->mode_info.num_crtc = adev->dm.display_indexes_num; |
| 456 | |
| 457 | /* TODO: Add_display_info? */ |
| 458 | |
| 459 | /* TODO use dynamic cursor width */ |
| 460 | adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size; |
| 461 | adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size; |
| 462 | |
| 463 | if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) { |
| 464 | DRM_ERROR( |
| 465 | "amdgpu: failed to initialize sw for display support.\n"); |
| 466 | goto error; |
| 467 | } |
| 468 | |
| 469 | DRM_DEBUG_DRIVER("KMS initialized.\n"); |
| 470 | |
| 471 | return 0; |
| 472 | error: |
| 473 | amdgpu_dm_fini(adev); |
| 474 | |
| 475 | return -1; |
| 476 | } |
| 477 | |
| 478 | static void amdgpu_dm_fini(struct amdgpu_device *adev) |
| 479 | { |
| 480 | amdgpu_dm_destroy_drm_device(&adev->dm); |
| 481 | /* |
| 482 | * TODO: pageflip, vlank interrupt |
| 483 | * |
| 484 | * amdgpu_dm_irq_fini(adev); |
| 485 | */ |
| 486 | |
| 487 | if (adev->dm.cgs_device) { |
| 488 | amdgpu_cgs_destroy_device(adev->dm.cgs_device); |
| 489 | adev->dm.cgs_device = NULL; |
| 490 | } |
| 491 | if (adev->dm.freesync_module) { |
| 492 | mod_freesync_destroy(adev->dm.freesync_module); |
| 493 | adev->dm.freesync_module = NULL; |
| 494 | } |
| 495 | /* DC Destroy TODO: Replace destroy DAL */ |
| 496 | if (adev->dm.dc) |
| 497 | dc_destroy(&adev->dm.dc); |
| 498 | return; |
| 499 | } |
| 500 | |
| 501 | static int dm_sw_init(void *handle) |
| 502 | { |
| 503 | return 0; |
| 504 | } |
| 505 | |
| 506 | static int dm_sw_fini(void *handle) |
| 507 | { |
| 508 | return 0; |
| 509 | } |
| 510 | |
| 511 | static int detect_mst_link_for_all_connectors(struct drm_device *dev) |
| 512 | { |
| 513 | struct amdgpu_dm_connector *aconnector; |
| 514 | struct drm_connector *connector; |
| 515 | int ret = 0; |
| 516 | |
| 517 | drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); |
| 518 | |
| 519 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
| 520 | aconnector = to_amdgpu_dm_connector(connector); |
| 521 | if (aconnector->dc_link->type == dc_connection_mst_branch) { |
| 522 | DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n", |
| 523 | aconnector, aconnector->base.base.id); |
| 524 | |
| 525 | ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); |
| 526 | if (ret < 0) { |
| 527 | DRM_ERROR("DM_MST: Failed to start MST\n"); |
| 528 | ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single; |
| 529 | return ret; |
| 530 | } |
| 531 | } |
| 532 | } |
| 533 | |
| 534 | drm_modeset_unlock(&dev->mode_config.connection_mutex); |
| 535 | return ret; |
| 536 | } |
| 537 | |
| 538 | static int dm_late_init(void *handle) |
| 539 | { |
| 540 | struct drm_device *dev = ((struct amdgpu_device *)handle)->ddev; |
| 541 | |
| 542 | return detect_mst_link_for_all_connectors(dev); |
| 543 | } |
| 544 | |
| 545 | static void s3_handle_mst(struct drm_device *dev, bool suspend) |
| 546 | { |
| 547 | struct amdgpu_dm_connector *aconnector; |
| 548 | struct drm_connector *connector; |
| 549 | |
| 550 | drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); |
| 551 | |
| 552 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
| 553 | aconnector = to_amdgpu_dm_connector(connector); |
| 554 | if (aconnector->dc_link->type == dc_connection_mst_branch && |
| 555 | !aconnector->mst_port) { |
| 556 | |
| 557 | if (suspend) |
| 558 | drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr); |
| 559 | else |
| 560 | drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr); |
| 561 | } |
| 562 | } |
| 563 | |
| 564 | drm_modeset_unlock(&dev->mode_config.connection_mutex); |
| 565 | } |
| 566 | |
| 567 | static int dm_hw_init(void *handle) |
| 568 | { |
| 569 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 570 | /* Create DAL display manager */ |
| 571 | amdgpu_dm_init(adev); |
| 572 | amdgpu_dm_hpd_init(adev); |
| 573 | |
| 574 | return 0; |
| 575 | } |
| 576 | |
| 577 | static int dm_hw_fini(void *handle) |
| 578 | { |
| 579 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 580 | |
| 581 | amdgpu_dm_hpd_fini(adev); |
| 582 | |
| 583 | amdgpu_dm_irq_fini(adev); |
| 584 | amdgpu_dm_fini(adev); |
| 585 | return 0; |
| 586 | } |
| 587 | |
| 588 | static int dm_suspend(void *handle) |
| 589 | { |
| 590 | struct amdgpu_device *adev = handle; |
| 591 | struct amdgpu_display_manager *dm = &adev->dm; |
| 592 | int ret = 0; |
| 593 | |
| 594 | s3_handle_mst(adev->ddev, true); |
| 595 | |
| 596 | amdgpu_dm_irq_suspend(adev); |
| 597 | |
| 598 | WARN_ON(adev->dm.cached_state); |
| 599 | adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev); |
| 600 | |
| 601 | dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); |
| 602 | |
| 603 | return ret; |
| 604 | } |
| 605 | |
| 606 | static struct amdgpu_dm_connector * |
| 607 | amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, |
| 608 | struct drm_crtc *crtc) |
| 609 | { |
| 610 | uint32_t i; |
| 611 | struct drm_connector_state *new_con_state; |
| 612 | struct drm_connector *connector; |
| 613 | struct drm_crtc *crtc_from_state; |
| 614 | |
| 615 | for_each_new_connector_in_state(state, connector, new_con_state, i) { |
| 616 | crtc_from_state = new_con_state->crtc; |
| 617 | |
| 618 | if (crtc_from_state == crtc) |
| 619 | return to_amdgpu_dm_connector(connector); |
| 620 | } |
| 621 | |
| 622 | return NULL; |
| 623 | } |
| 624 | |
| 625 | static int dm_resume(void *handle) |
| 626 | { |
| 627 | struct amdgpu_device *adev = handle; |
| 628 | struct amdgpu_display_manager *dm = &adev->dm; |
| 629 | |
| 630 | /* power on hardware */ |
| 631 | dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); |
| 632 | |
| 633 | return 0; |
| 634 | } |
| 635 | |
| 636 | int amdgpu_dm_display_resume(struct amdgpu_device *adev) |
| 637 | { |
| 638 | struct drm_device *ddev = adev->ddev; |
| 639 | struct amdgpu_display_manager *dm = &adev->dm; |
| 640 | struct amdgpu_dm_connector *aconnector; |
| 641 | struct drm_connector *connector; |
| 642 | struct drm_crtc *crtc; |
| 643 | struct drm_crtc_state *new_crtc_state; |
| 644 | int ret = 0; |
| 645 | int i; |
| 646 | |
| 647 | /* program HPD filter */ |
| 648 | dc_resume(dm->dc); |
| 649 | |
| 650 | /* On resume we need to rewrite the MSTM control bits to enamble MST*/ |
| 651 | s3_handle_mst(ddev, false); |
| 652 | |
| 653 | /* |
| 654 | * early enable HPD Rx IRQ, should be done before set mode as short |
| 655 | * pulse interrupts are used for MST |
| 656 | */ |
| 657 | amdgpu_dm_irq_resume_early(adev); |
| 658 | |
| 659 | /* Do detection*/ |
| 660 | list_for_each_entry(connector, |
| 661 | &ddev->mode_config.connector_list, head) { |
| 662 | aconnector = to_amdgpu_dm_connector(connector); |
| 663 | |
| 664 | /* |
| 665 | * this is the case when traversing through already created |
| 666 | * MST connectors, should be skipped |
| 667 | */ |
| 668 | if (aconnector->mst_port) |
| 669 | continue; |
| 670 | |
| 671 | mutex_lock(&aconnector->hpd_lock); |
| 672 | dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); |
| 673 | aconnector->dc_sink = NULL; |
| 674 | amdgpu_dm_update_connector_after_detect(aconnector); |
| 675 | mutex_unlock(&aconnector->hpd_lock); |
| 676 | } |
| 677 | |
| 678 | /* Force mode set in atomic comit */ |
| 679 | for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i) |
| 680 | new_crtc_state->active_changed = true; |
| 681 | |
| 682 | ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state); |
| 683 | |
| 684 | drm_atomic_state_put(adev->dm.cached_state); |
| 685 | adev->dm.cached_state = NULL; |
| 686 | |
| 687 | amdgpu_dm_irq_resume_late(adev); |
| 688 | |
| 689 | return ret; |
| 690 | } |
| 691 | |
| 692 | static const struct amd_ip_funcs amdgpu_dm_funcs = { |
| 693 | .name = "dm", |
| 694 | .early_init = dm_early_init, |
| 695 | .late_init = dm_late_init, |
| 696 | .sw_init = dm_sw_init, |
| 697 | .sw_fini = dm_sw_fini, |
| 698 | .hw_init = dm_hw_init, |
| 699 | .hw_fini = dm_hw_fini, |
| 700 | .suspend = dm_suspend, |
| 701 | .resume = dm_resume, |
| 702 | .is_idle = dm_is_idle, |
| 703 | .wait_for_idle = dm_wait_for_idle, |
| 704 | .check_soft_reset = dm_check_soft_reset, |
| 705 | .soft_reset = dm_soft_reset, |
| 706 | .set_clockgating_state = dm_set_clockgating_state, |
| 707 | .set_powergating_state = dm_set_powergating_state, |
| 708 | }; |
| 709 | |
| 710 | const struct amdgpu_ip_block_version dm_ip_block = |
| 711 | { |
| 712 | .type = AMD_IP_BLOCK_TYPE_DCE, |
| 713 | .major = 1, |
| 714 | .minor = 0, |
| 715 | .rev = 0, |
| 716 | .funcs = &amdgpu_dm_funcs, |
| 717 | }; |
| 718 | |
| 719 | |
| 720 | static struct drm_atomic_state * |
| 721 | dm_atomic_state_alloc(struct drm_device *dev) |
| 722 | { |
| 723 | struct dm_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL); |
| 724 | |
| 725 | if (!state) |
| 726 | return NULL; |
| 727 | |
| 728 | if (drm_atomic_state_init(dev, &state->base) < 0) |
| 729 | goto fail; |
| 730 | |
| 731 | return &state->base; |
| 732 | |
| 733 | fail: |
| 734 | kfree(state); |
| 735 | return NULL; |
| 736 | } |
| 737 | |
| 738 | static void |
| 739 | dm_atomic_state_clear(struct drm_atomic_state *state) |
| 740 | { |
| 741 | struct dm_atomic_state *dm_state = to_dm_atomic_state(state); |
| 742 | |
| 743 | if (dm_state->context) { |
| 744 | dc_release_state(dm_state->context); |
| 745 | dm_state->context = NULL; |
| 746 | } |
| 747 | |
| 748 | drm_atomic_state_default_clear(state); |
| 749 | } |
| 750 | |
| 751 | static void |
| 752 | dm_atomic_state_alloc_free(struct drm_atomic_state *state) |
| 753 | { |
| 754 | struct dm_atomic_state *dm_state = to_dm_atomic_state(state); |
| 755 | drm_atomic_state_default_release(state); |
| 756 | kfree(dm_state); |
| 757 | } |
| 758 | |
| 759 | static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { |
| 760 | .fb_create = amdgpu_user_framebuffer_create, |
| 761 | .output_poll_changed = amdgpu_output_poll_changed, |
| 762 | .atomic_check = amdgpu_dm_atomic_check, |
| 763 | .atomic_commit = amdgpu_dm_atomic_commit, |
| 764 | .atomic_state_alloc = dm_atomic_state_alloc, |
| 765 | .atomic_state_clear = dm_atomic_state_clear, |
| 766 | .atomic_state_free = dm_atomic_state_alloc_free |
| 767 | }; |
| 768 | |
| 769 | static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { |
| 770 | .atomic_commit_tail = amdgpu_dm_atomic_commit_tail |
| 771 | }; |
| 772 | |
| 773 | static void |
| 774 | amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) |
| 775 | { |
| 776 | struct drm_connector *connector = &aconnector->base; |
| 777 | struct drm_device *dev = connector->dev; |
| 778 | struct dc_sink *sink; |
| 779 | |
| 780 | /* MST handled by drm_mst framework */ |
| 781 | if (aconnector->mst_mgr.mst_state == true) |
| 782 | return; |
| 783 | |
| 784 | |
| 785 | sink = aconnector->dc_link->local_sink; |
| 786 | |
| 787 | /* Edid mgmt connector gets first update only in mode_valid hook and then |
| 788 | * the connector sink is set to either fake or physical sink depends on link status. |
| 789 | * don't do it here if u are during boot |
| 790 | */ |
| 791 | if (aconnector->base.force != DRM_FORCE_UNSPECIFIED |
| 792 | && aconnector->dc_em_sink) { |
| 793 | |
| 794 | /* For S3 resume with headless use eml_sink to fake stream |
| 795 | * because on resume connecotr->sink is set ti NULL |
| 796 | */ |
| 797 | mutex_lock(&dev->mode_config.mutex); |
| 798 | |
| 799 | if (sink) { |
| 800 | if (aconnector->dc_sink) { |
| 801 | amdgpu_dm_remove_sink_from_freesync_module( |
| 802 | connector); |
| 803 | /* retain and release bellow are used for |
| 804 | * bump up refcount for sink because the link don't point |
| 805 | * to it anymore after disconnect so on next crtc to connector |
| 806 | * reshuffle by UMD we will get into unwanted dc_sink release |
| 807 | */ |
| 808 | if (aconnector->dc_sink != aconnector->dc_em_sink) |
| 809 | dc_sink_release(aconnector->dc_sink); |
| 810 | } |
| 811 | aconnector->dc_sink = sink; |
| 812 | amdgpu_dm_add_sink_to_freesync_module( |
| 813 | connector, aconnector->edid); |
| 814 | } else { |
| 815 | amdgpu_dm_remove_sink_from_freesync_module(connector); |
| 816 | if (!aconnector->dc_sink) |
| 817 | aconnector->dc_sink = aconnector->dc_em_sink; |
| 818 | else if (aconnector->dc_sink != aconnector->dc_em_sink) |
| 819 | dc_sink_retain(aconnector->dc_sink); |
| 820 | } |
| 821 | |
| 822 | mutex_unlock(&dev->mode_config.mutex); |
| 823 | return; |
| 824 | } |
| 825 | |
| 826 | /* |
| 827 | * TODO: temporary guard to look for proper fix |
| 828 | * if this sink is MST sink, we should not do anything |
| 829 | */ |
| 830 | if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) |
| 831 | return; |
| 832 | |
| 833 | if (aconnector->dc_sink == sink) { |
| 834 | /* We got a DP short pulse (Link Loss, DP CTS, etc...). |
| 835 | * Do nothing!! */ |
| 836 | DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n", |
| 837 | aconnector->connector_id); |
| 838 | return; |
| 839 | } |
| 840 | |
| 841 | DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n", |
| 842 | aconnector->connector_id, aconnector->dc_sink, sink); |
| 843 | |
| 844 | mutex_lock(&dev->mode_config.mutex); |
| 845 | |
| 846 | /* 1. Update status of the drm connector |
| 847 | * 2. Send an event and let userspace tell us what to do */ |
| 848 | if (sink) { |
| 849 | /* TODO: check if we still need the S3 mode update workaround. |
| 850 | * If yes, put it here. */ |
| 851 | if (aconnector->dc_sink) |
| 852 | amdgpu_dm_remove_sink_from_freesync_module( |
| 853 | connector); |
| 854 | |
| 855 | aconnector->dc_sink = sink; |
| 856 | if (sink->dc_edid.length == 0) { |
| 857 | aconnector->edid = NULL; |
| 858 | } else { |
| 859 | aconnector->edid = |
| 860 | (struct edid *) sink->dc_edid.raw_edid; |
| 861 | |
| 862 | |
| 863 | drm_mode_connector_update_edid_property(connector, |
| 864 | aconnector->edid); |
| 865 | } |
| 866 | amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid); |
| 867 | |
| 868 | } else { |
| 869 | amdgpu_dm_remove_sink_from_freesync_module(connector); |
| 870 | drm_mode_connector_update_edid_property(connector, NULL); |
| 871 | aconnector->num_modes = 0; |
| 872 | aconnector->dc_sink = NULL; |
| 873 | } |
| 874 | |
| 875 | mutex_unlock(&dev->mode_config.mutex); |
| 876 | } |
| 877 | |
| 878 | static void handle_hpd_irq(void *param) |
| 879 | { |
| 880 | struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; |
| 881 | struct drm_connector *connector = &aconnector->base; |
| 882 | struct drm_device *dev = connector->dev; |
| 883 | |
| 884 | /* In case of failure or MST no need to update connector status or notify the OS |
| 885 | * since (for MST case) MST does this in it's own context. |
| 886 | */ |
| 887 | mutex_lock(&aconnector->hpd_lock); |
| 888 | |
| 889 | if (aconnector->fake_enable) |
| 890 | aconnector->fake_enable = false; |
| 891 | |
| 892 | if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) { |
| 893 | amdgpu_dm_update_connector_after_detect(aconnector); |
| 894 | |
| 895 | |
| 896 | drm_modeset_lock_all(dev); |
| 897 | dm_restore_drm_connector_state(dev, connector); |
| 898 | drm_modeset_unlock_all(dev); |
| 899 | |
| 900 | if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) |
| 901 | drm_kms_helper_hotplug_event(dev); |
| 902 | } |
| 903 | mutex_unlock(&aconnector->hpd_lock); |
| 904 | |
| 905 | } |
| 906 | |
| 907 | static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector) |
| 908 | { |
| 909 | uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; |
| 910 | uint8_t dret; |
| 911 | bool new_irq_handled = false; |
| 912 | int dpcd_addr; |
| 913 | int dpcd_bytes_to_read; |
| 914 | |
| 915 | const int max_process_count = 30; |
| 916 | int process_count = 0; |
| 917 | |
| 918 | const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link); |
| 919 | |
| 920 | if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) { |
| 921 | dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT; |
| 922 | /* DPCD 0x200 - 0x201 for downstream IRQ */ |
| 923 | dpcd_addr = DP_SINK_COUNT; |
| 924 | } else { |
| 925 | dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI; |
| 926 | /* DPCD 0x2002 - 0x2005 for downstream IRQ */ |
| 927 | dpcd_addr = DP_SINK_COUNT_ESI; |
| 928 | } |
| 929 | |
| 930 | dret = drm_dp_dpcd_read( |
| 931 | &aconnector->dm_dp_aux.aux, |
| 932 | dpcd_addr, |
| 933 | esi, |
| 934 | dpcd_bytes_to_read); |
| 935 | |
| 936 | while (dret == dpcd_bytes_to_read && |
| 937 | process_count < max_process_count) { |
| 938 | uint8_t retry; |
| 939 | dret = 0; |
| 940 | |
| 941 | process_count++; |
| 942 | |
| 943 | DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]); |
| 944 | /* handle HPD short pulse irq */ |
| 945 | if (aconnector->mst_mgr.mst_state) |
| 946 | drm_dp_mst_hpd_irq( |
| 947 | &aconnector->mst_mgr, |
| 948 | esi, |
| 949 | &new_irq_handled); |
| 950 | |
| 951 | if (new_irq_handled) { |
| 952 | /* ACK at DPCD to notify down stream */ |
| 953 | const int ack_dpcd_bytes_to_write = |
| 954 | dpcd_bytes_to_read - 1; |
| 955 | |
| 956 | for (retry = 0; retry < 3; retry++) { |
| 957 | uint8_t wret; |
| 958 | |
| 959 | wret = drm_dp_dpcd_write( |
| 960 | &aconnector->dm_dp_aux.aux, |
| 961 | dpcd_addr + 1, |
| 962 | &esi[1], |
| 963 | ack_dpcd_bytes_to_write); |
| 964 | if (wret == ack_dpcd_bytes_to_write) |
| 965 | break; |
| 966 | } |
| 967 | |
| 968 | /* check if there is new irq to be handle */ |
| 969 | dret = drm_dp_dpcd_read( |
| 970 | &aconnector->dm_dp_aux.aux, |
| 971 | dpcd_addr, |
| 972 | esi, |
| 973 | dpcd_bytes_to_read); |
| 974 | |
| 975 | new_irq_handled = false; |
| 976 | } else { |
| 977 | break; |
| 978 | } |
| 979 | } |
| 980 | |
| 981 | if (process_count == max_process_count) |
| 982 | DRM_DEBUG_DRIVER("Loop exceeded max iterations\n"); |
| 983 | } |
| 984 | |
| 985 | static void handle_hpd_rx_irq(void *param) |
| 986 | { |
| 987 | struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; |
| 988 | struct drm_connector *connector = &aconnector->base; |
| 989 | struct drm_device *dev = connector->dev; |
| 990 | struct dc_link *dc_link = aconnector->dc_link; |
| 991 | bool is_mst_root_connector = aconnector->mst_mgr.mst_state; |
| 992 | |
| 993 | /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio |
| 994 | * conflict, after implement i2c helper, this mutex should be |
| 995 | * retired. |
| 996 | */ |
| 997 | if (dc_link->type != dc_connection_mst_branch) |
| 998 | mutex_lock(&aconnector->hpd_lock); |
| 999 | |
| 1000 | if (dc_link_handle_hpd_rx_irq(dc_link, NULL) && |
| 1001 | !is_mst_root_connector) { |
| 1002 | /* Downstream Port status changed. */ |
| 1003 | if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) { |
| 1004 | amdgpu_dm_update_connector_after_detect(aconnector); |
| 1005 | |
| 1006 | |
| 1007 | drm_modeset_lock_all(dev); |
| 1008 | dm_restore_drm_connector_state(dev, connector); |
| 1009 | drm_modeset_unlock_all(dev); |
| 1010 | |
| 1011 | drm_kms_helper_hotplug_event(dev); |
| 1012 | } |
| 1013 | } |
| 1014 | if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) || |
| 1015 | (dc_link->type == dc_connection_mst_branch)) |
| 1016 | dm_handle_hpd_rx_irq(aconnector); |
| 1017 | |
| 1018 | if (dc_link->type != dc_connection_mst_branch) |
| 1019 | mutex_unlock(&aconnector->hpd_lock); |
| 1020 | } |
| 1021 | |
| 1022 | static void register_hpd_handlers(struct amdgpu_device *adev) |
| 1023 | { |
| 1024 | struct drm_device *dev = adev->ddev; |
| 1025 | struct drm_connector *connector; |
| 1026 | struct amdgpu_dm_connector *aconnector; |
| 1027 | const struct dc_link *dc_link; |
| 1028 | struct dc_interrupt_params int_params = {0}; |
| 1029 | |
| 1030 | int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; |
| 1031 | int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; |
| 1032 | |
| 1033 | list_for_each_entry(connector, |
| 1034 | &dev->mode_config.connector_list, head) { |
| 1035 | |
| 1036 | aconnector = to_amdgpu_dm_connector(connector); |
| 1037 | dc_link = aconnector->dc_link; |
| 1038 | |
| 1039 | if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) { |
| 1040 | int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; |
| 1041 | int_params.irq_source = dc_link->irq_source_hpd; |
| 1042 | |
| 1043 | amdgpu_dm_irq_register_interrupt(adev, &int_params, |
| 1044 | handle_hpd_irq, |
| 1045 | (void *) aconnector); |
| 1046 | } |
| 1047 | |
| 1048 | if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) { |
| 1049 | |
| 1050 | /* Also register for DP short pulse (hpd_rx). */ |
| 1051 | int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; |
| 1052 | int_params.irq_source = dc_link->irq_source_hpd_rx; |
| 1053 | |
| 1054 | amdgpu_dm_irq_register_interrupt(adev, &int_params, |
| 1055 | handle_hpd_rx_irq, |
| 1056 | (void *) aconnector); |
| 1057 | } |
| 1058 | } |
| 1059 | } |
| 1060 | |
| 1061 | /* Register IRQ sources and initialize IRQ callbacks */ |
| 1062 | static int dce110_register_irq_handlers(struct amdgpu_device *adev) |
| 1063 | { |
| 1064 | struct dc *dc = adev->dm.dc; |
| 1065 | struct common_irq_params *c_irq_params; |
| 1066 | struct dc_interrupt_params int_params = {0}; |
| 1067 | int r; |
| 1068 | int i; |
| 1069 | unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY; |
| 1070 | |
| 1071 | if (adev->asic_type == CHIP_VEGA10 || |
| 1072 | adev->asic_type == CHIP_RAVEN) |
| 1073 | client_id = AMDGPU_IH_CLIENTID_DCE; |
| 1074 | |
| 1075 | int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; |
| 1076 | int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; |
| 1077 | |
| 1078 | /* Actions of amdgpu_irq_add_id(): |
| 1079 | * 1. Register a set() function with base driver. |
| 1080 | * Base driver will call set() function to enable/disable an |
| 1081 | * interrupt in DC hardware. |
| 1082 | * 2. Register amdgpu_dm_irq_handler(). |
| 1083 | * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts |
| 1084 | * coming from DC hardware. |
| 1085 | * amdgpu_dm_irq_handler() will re-direct the interrupt to DC |
| 1086 | * for acknowledging and handling. */ |
| 1087 | |
| 1088 | /* Use VBLANK interrupt */ |
| 1089 | for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) { |
| 1090 | r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq); |
| 1091 | if (r) { |
| 1092 | DRM_ERROR("Failed to add crtc irq id!\n"); |
| 1093 | return r; |
| 1094 | } |
| 1095 | |
| 1096 | int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; |
| 1097 | int_params.irq_source = |
| 1098 | dc_interrupt_to_irq_source(dc, i, 0); |
| 1099 | |
| 1100 | c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; |
| 1101 | |
| 1102 | c_irq_params->adev = adev; |
| 1103 | c_irq_params->irq_src = int_params.irq_source; |
| 1104 | |
| 1105 | amdgpu_dm_irq_register_interrupt(adev, &int_params, |
| 1106 | dm_crtc_high_irq, c_irq_params); |
| 1107 | } |
| 1108 | |
| 1109 | /* Use GRPH_PFLIP interrupt */ |
| 1110 | for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; |
| 1111 | i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { |
| 1112 | r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); |
| 1113 | if (r) { |
| 1114 | DRM_ERROR("Failed to add page flip irq id!\n"); |
| 1115 | return r; |
| 1116 | } |
| 1117 | |
| 1118 | int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; |
| 1119 | int_params.irq_source = |
| 1120 | dc_interrupt_to_irq_source(dc, i, 0); |
| 1121 | |
| 1122 | c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; |
| 1123 | |
| 1124 | c_irq_params->adev = adev; |
| 1125 | c_irq_params->irq_src = int_params.irq_source; |
| 1126 | |
| 1127 | amdgpu_dm_irq_register_interrupt(adev, &int_params, |
| 1128 | dm_pflip_high_irq, c_irq_params); |
| 1129 | |
| 1130 | } |
| 1131 | |
| 1132 | /* HPD */ |
| 1133 | r = amdgpu_irq_add_id(adev, client_id, |
| 1134 | VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); |
| 1135 | if (r) { |
| 1136 | DRM_ERROR("Failed to add hpd irq id!\n"); |
| 1137 | return r; |
| 1138 | } |
| 1139 | |
| 1140 | register_hpd_handlers(adev); |
| 1141 | |
| 1142 | return 0; |
| 1143 | } |
| 1144 | |
| 1145 | #if defined(CONFIG_DRM_AMD_DC_DCN1_0) |
| 1146 | /* Register IRQ sources and initialize IRQ callbacks */ |
| 1147 | static int dcn10_register_irq_handlers(struct amdgpu_device *adev) |
| 1148 | { |
| 1149 | struct dc *dc = adev->dm.dc; |
| 1150 | struct common_irq_params *c_irq_params; |
| 1151 | struct dc_interrupt_params int_params = {0}; |
| 1152 | int r; |
| 1153 | int i; |
| 1154 | |
| 1155 | int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; |
| 1156 | int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; |
| 1157 | |
| 1158 | /* Actions of amdgpu_irq_add_id(): |
| 1159 | * 1. Register a set() function with base driver. |
| 1160 | * Base driver will call set() function to enable/disable an |
| 1161 | * interrupt in DC hardware. |
| 1162 | * 2. Register amdgpu_dm_irq_handler(). |
| 1163 | * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts |
| 1164 | * coming from DC hardware. |
| 1165 | * amdgpu_dm_irq_handler() will re-direct the interrupt to DC |
| 1166 | * for acknowledging and handling. |
| 1167 | * */ |
| 1168 | |
| 1169 | /* Use VSTARTUP interrupt */ |
| 1170 | for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP; |
| 1171 | i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1; |
| 1172 | i++) { |
| 1173 | r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->crtc_irq); |
| 1174 | |
| 1175 | if (r) { |
| 1176 | DRM_ERROR("Failed to add crtc irq id!\n"); |
| 1177 | return r; |
| 1178 | } |
| 1179 | |
| 1180 | int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; |
| 1181 | int_params.irq_source = |
| 1182 | dc_interrupt_to_irq_source(dc, i, 0); |
| 1183 | |
| 1184 | c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; |
| 1185 | |
| 1186 | c_irq_params->adev = adev; |
| 1187 | c_irq_params->irq_src = int_params.irq_source; |
| 1188 | |
| 1189 | amdgpu_dm_irq_register_interrupt(adev, &int_params, |
| 1190 | dm_crtc_high_irq, c_irq_params); |
| 1191 | } |
| 1192 | |
| 1193 | /* Use GRPH_PFLIP interrupt */ |
| 1194 | for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT; |
| 1195 | i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1; |
| 1196 | i++) { |
| 1197 | r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->pageflip_irq); |
| 1198 | if (r) { |
| 1199 | DRM_ERROR("Failed to add page flip irq id!\n"); |
| 1200 | return r; |
| 1201 | } |
| 1202 | |
| 1203 | int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; |
| 1204 | int_params.irq_source = |
| 1205 | dc_interrupt_to_irq_source(dc, i, 0); |
| 1206 | |
| 1207 | c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; |
| 1208 | |
| 1209 | c_irq_params->adev = adev; |
| 1210 | c_irq_params->irq_src = int_params.irq_source; |
| 1211 | |
| 1212 | amdgpu_dm_irq_register_interrupt(adev, &int_params, |
| 1213 | dm_pflip_high_irq, c_irq_params); |
| 1214 | |
| 1215 | } |
| 1216 | |
| 1217 | /* HPD */ |
| 1218 | r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT, |
| 1219 | &adev->hpd_irq); |
| 1220 | if (r) { |
| 1221 | DRM_ERROR("Failed to add hpd irq id!\n"); |
| 1222 | return r; |
| 1223 | } |
| 1224 | |
| 1225 | register_hpd_handlers(adev); |
| 1226 | |
| 1227 | return 0; |
| 1228 | } |
| 1229 | #endif |
| 1230 | |
| 1231 | static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) |
| 1232 | { |
| 1233 | int r; |
| 1234 | |
| 1235 | adev->mode_info.mode_config_initialized = true; |
| 1236 | |
| 1237 | adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs; |
| 1238 | adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs; |
| 1239 | |
| 1240 | adev->ddev->mode_config.max_width = 16384; |
| 1241 | adev->ddev->mode_config.max_height = 16384; |
| 1242 | |
| 1243 | adev->ddev->mode_config.preferred_depth = 24; |
| 1244 | adev->ddev->mode_config.prefer_shadow = 1; |
| 1245 | /* indicate support of immediate flip */ |
| 1246 | adev->ddev->mode_config.async_page_flip = true; |
| 1247 | |
| 1248 | adev->ddev->mode_config.fb_base = adev->mc.aper_base; |
| 1249 | |
| 1250 | r = amdgpu_modeset_create_props(adev); |
| 1251 | if (r) |
| 1252 | return r; |
| 1253 | |
| 1254 | return 0; |
| 1255 | } |
| 1256 | |
| 1257 | #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ |
| 1258 | defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) |
| 1259 | |
| 1260 | static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) |
| 1261 | { |
| 1262 | struct amdgpu_display_manager *dm = bl_get_data(bd); |
| 1263 | |
| 1264 | if (dc_link_set_backlight_level(dm->backlight_link, |
| 1265 | bd->props.brightness, 0, 0)) |
| 1266 | return 0; |
| 1267 | else |
| 1268 | return 1; |
| 1269 | } |
| 1270 | |
| 1271 | static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd) |
| 1272 | { |
| 1273 | return bd->props.brightness; |
| 1274 | } |
| 1275 | |
| 1276 | static const struct backlight_ops amdgpu_dm_backlight_ops = { |
| 1277 | .get_brightness = amdgpu_dm_backlight_get_brightness, |
| 1278 | .update_status = amdgpu_dm_backlight_update_status, |
| 1279 | }; |
| 1280 | |
| 1281 | static void |
| 1282 | amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm) |
| 1283 | { |
| 1284 | char bl_name[16]; |
| 1285 | struct backlight_properties props = { 0 }; |
| 1286 | |
| 1287 | props.max_brightness = AMDGPU_MAX_BL_LEVEL; |
| 1288 | props.type = BACKLIGHT_RAW; |
| 1289 | |
| 1290 | snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d", |
| 1291 | dm->adev->ddev->primary->index); |
| 1292 | |
| 1293 | dm->backlight_dev = backlight_device_register(bl_name, |
| 1294 | dm->adev->ddev->dev, |
| 1295 | dm, |
| 1296 | &amdgpu_dm_backlight_ops, |
| 1297 | &props); |
| 1298 | |
| 1299 | if (IS_ERR(dm->backlight_dev)) |
| 1300 | DRM_ERROR("DM: Backlight registration failed!\n"); |
| 1301 | else |
| 1302 | DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name); |
| 1303 | } |
| 1304 | |
| 1305 | #endif |
| 1306 | |
| 1307 | /* In this architecture, the association |
| 1308 | * connector -> encoder -> crtc |
| 1309 | * id not really requried. The crtc and connector will hold the |
| 1310 | * display_index as an abstraction to use with DAL component |
| 1311 | * |
| 1312 | * Returns 0 on success |
| 1313 | */ |
| 1314 | static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) |
| 1315 | { |
| 1316 | struct amdgpu_display_manager *dm = &adev->dm; |
| 1317 | uint32_t i; |
| 1318 | struct amdgpu_dm_connector *aconnector = NULL; |
| 1319 | struct amdgpu_encoder *aencoder = NULL; |
| 1320 | struct amdgpu_mode_info *mode_info = &adev->mode_info; |
| 1321 | uint32_t link_cnt; |
| 1322 | unsigned long possible_crtcs; |
| 1323 | |
| 1324 | link_cnt = dm->dc->caps.max_links; |
| 1325 | if (amdgpu_dm_mode_config_init(dm->adev)) { |
| 1326 | DRM_ERROR("DM: Failed to initialize mode config\n"); |
| 1327 | return -1; |
| 1328 | } |
| 1329 | |
| 1330 | for (i = 0; i < dm->dc->caps.max_planes; i++) { |
| 1331 | struct amdgpu_plane *plane; |
| 1332 | |
| 1333 | plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL); |
| 1334 | mode_info->planes[i] = plane; |
| 1335 | |
| 1336 | if (!plane) { |
| 1337 | DRM_ERROR("KMS: Failed to allocate plane\n"); |
| 1338 | goto fail; |
| 1339 | } |
| 1340 | plane->base.type = mode_info->plane_type[i]; |
| 1341 | |
| 1342 | /* |
| 1343 | * HACK: IGT tests expect that each plane can only have one |
| 1344 | * one possible CRTC. For now, set one CRTC for each |
| 1345 | * plane that is not an underlay, but still allow multiple |
| 1346 | * CRTCs for underlay planes. |
| 1347 | */ |
| 1348 | possible_crtcs = 1 << i; |
| 1349 | if (i >= dm->dc->caps.max_streams) |
| 1350 | possible_crtcs = 0xff; |
| 1351 | |
| 1352 | if (amdgpu_dm_plane_init(dm, mode_info->planes[i], possible_crtcs)) { |
| 1353 | DRM_ERROR("KMS: Failed to initialize plane\n"); |
| 1354 | goto fail; |
| 1355 | } |
| 1356 | } |
| 1357 | |
| 1358 | for (i = 0; i < dm->dc->caps.max_streams; i++) |
| 1359 | if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) { |
| 1360 | DRM_ERROR("KMS: Failed to initialize crtc\n"); |
| 1361 | goto fail; |
| 1362 | } |
| 1363 | |
| 1364 | dm->display_indexes_num = dm->dc->caps.max_streams; |
| 1365 | |
| 1366 | /* loops over all connectors on the board */ |
| 1367 | for (i = 0; i < link_cnt; i++) { |
| 1368 | |
| 1369 | if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) { |
| 1370 | DRM_ERROR( |
| 1371 | "KMS: Cannot support more than %d display indexes\n", |
| 1372 | AMDGPU_DM_MAX_DISPLAY_INDEX); |
| 1373 | continue; |
| 1374 | } |
| 1375 | |
| 1376 | aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); |
| 1377 | if (!aconnector) |
| 1378 | goto fail; |
| 1379 | |
| 1380 | aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL); |
| 1381 | if (!aencoder) |
| 1382 | goto fail; |
| 1383 | |
| 1384 | if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) { |
| 1385 | DRM_ERROR("KMS: Failed to initialize encoder\n"); |
| 1386 | goto fail; |
| 1387 | } |
| 1388 | |
| 1389 | if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) { |
| 1390 | DRM_ERROR("KMS: Failed to initialize connector\n"); |
| 1391 | goto fail; |
| 1392 | } |
| 1393 | |
| 1394 | if (dc_link_detect(dc_get_link_at_index(dm->dc, i), |
| 1395 | DETECT_REASON_BOOT)) |
| 1396 | amdgpu_dm_update_connector_after_detect(aconnector); |
| 1397 | } |
| 1398 | |
| 1399 | /* Software is initialized. Now we can register interrupt handlers. */ |
| 1400 | switch (adev->asic_type) { |
| 1401 | case CHIP_BONAIRE: |
| 1402 | case CHIP_HAWAII: |
| 1403 | case CHIP_KAVERI: |
| 1404 | case CHIP_KABINI: |
| 1405 | case CHIP_MULLINS: |
| 1406 | case CHIP_TONGA: |
| 1407 | case CHIP_FIJI: |
| 1408 | case CHIP_CARRIZO: |
| 1409 | case CHIP_STONEY: |
| 1410 | case CHIP_POLARIS11: |
| 1411 | case CHIP_POLARIS10: |
| 1412 | case CHIP_POLARIS12: |
| 1413 | case CHIP_VEGA10: |
| 1414 | if (dce110_register_irq_handlers(dm->adev)) { |
| 1415 | DRM_ERROR("DM: Failed to initialize IRQ\n"); |
| 1416 | goto fail; |
| 1417 | } |
| 1418 | break; |
| 1419 | #if defined(CONFIG_DRM_AMD_DC_DCN1_0) |
| 1420 | case CHIP_RAVEN: |
| 1421 | if (dcn10_register_irq_handlers(dm->adev)) { |
| 1422 | DRM_ERROR("DM: Failed to initialize IRQ\n"); |
| 1423 | goto fail; |
| 1424 | } |
| 1425 | /* |
| 1426 | * Temporary disable until pplib/smu interaction is implemented |
| 1427 | */ |
| 1428 | dm->dc->debug.disable_stutter = true; |
| 1429 | break; |
| 1430 | #endif |
| 1431 | default: |
| 1432 | DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type); |
| 1433 | goto fail; |
| 1434 | } |
| 1435 | |
| 1436 | return 0; |
| 1437 | fail: |
| 1438 | kfree(aencoder); |
| 1439 | kfree(aconnector); |
| 1440 | for (i = 0; i < dm->dc->caps.max_planes; i++) |
| 1441 | kfree(mode_info->planes[i]); |
| 1442 | return -1; |
| 1443 | } |
| 1444 | |
| 1445 | static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm) |
| 1446 | { |
| 1447 | drm_mode_config_cleanup(dm->ddev); |
| 1448 | return; |
| 1449 | } |
| 1450 | |
| 1451 | /****************************************************************************** |
| 1452 | * amdgpu_display_funcs functions |
| 1453 | *****************************************************************************/ |
| 1454 | |
| 1455 | /** |
| 1456 | * dm_bandwidth_update - program display watermarks |
| 1457 | * |
| 1458 | * @adev: amdgpu_device pointer |
| 1459 | * |
| 1460 | * Calculate and program the display watermarks and line buffer allocation. |
| 1461 | */ |
| 1462 | static void dm_bandwidth_update(struct amdgpu_device *adev) |
| 1463 | { |
| 1464 | /* TODO: implement later */ |
| 1465 | } |
| 1466 | |
| 1467 | static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder, |
| 1468 | u8 level) |
| 1469 | { |
| 1470 | /* TODO: translate amdgpu_encoder to display_index and call DAL */ |
| 1471 | } |
| 1472 | |
| 1473 | static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder) |
| 1474 | { |
| 1475 | /* TODO: translate amdgpu_encoder to display_index and call DAL */ |
| 1476 | return 0; |
| 1477 | } |
| 1478 | |
| 1479 | static int amdgpu_notify_freesync(struct drm_device *dev, void *data, |
| 1480 | struct drm_file *filp) |
| 1481 | { |
| 1482 | struct mod_freesync_params freesync_params; |
| 1483 | uint8_t num_streams; |
| 1484 | uint8_t i; |
| 1485 | |
| 1486 | struct amdgpu_device *adev = dev->dev_private; |
| 1487 | int r = 0; |
| 1488 | |
| 1489 | /* Get freesync enable flag from DRM */ |
| 1490 | |
| 1491 | num_streams = dc_get_current_stream_count(adev->dm.dc); |
| 1492 | |
| 1493 | for (i = 0; i < num_streams; i++) { |
| 1494 | struct dc_stream_state *stream; |
| 1495 | stream = dc_get_stream_at_index(adev->dm.dc, i); |
| 1496 | |
| 1497 | mod_freesync_update_state(adev->dm.freesync_module, |
| 1498 | &stream, 1, &freesync_params); |
| 1499 | } |
| 1500 | |
| 1501 | return r; |
| 1502 | } |
| 1503 | |
| 1504 | static const struct amdgpu_display_funcs dm_display_funcs = { |
| 1505 | .bandwidth_update = dm_bandwidth_update, /* called unconditionally */ |
| 1506 | .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */ |
| 1507 | .vblank_wait = NULL, |
| 1508 | .backlight_set_level = |
| 1509 | dm_set_backlight_level,/* called unconditionally */ |
| 1510 | .backlight_get_level = |
| 1511 | dm_get_backlight_level,/* called unconditionally */ |
| 1512 | .hpd_sense = NULL,/* called unconditionally */ |
| 1513 | .hpd_set_polarity = NULL, /* called unconditionally */ |
| 1514 | .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */ |
| 1515 | .page_flip_get_scanoutpos = |
| 1516 | dm_crtc_get_scanoutpos,/* called unconditionally */ |
| 1517 | .add_encoder = NULL, /* VBIOS parsing. DAL does it. */ |
| 1518 | .add_connector = NULL, /* VBIOS parsing. DAL does it. */ |
| 1519 | .notify_freesync = amdgpu_notify_freesync, |
| 1520 | |
| 1521 | }; |
| 1522 | |
| 1523 | #if defined(CONFIG_DEBUG_KERNEL_DC) |
| 1524 | |
| 1525 | static ssize_t s3_debug_store(struct device *device, |
| 1526 | struct device_attribute *attr, |
| 1527 | const char *buf, |
| 1528 | size_t count) |
| 1529 | { |
| 1530 | int ret; |
| 1531 | int s3_state; |
| 1532 | struct pci_dev *pdev = to_pci_dev(device); |
| 1533 | struct drm_device *drm_dev = pci_get_drvdata(pdev); |
| 1534 | struct amdgpu_device *adev = drm_dev->dev_private; |
| 1535 | |
| 1536 | ret = kstrtoint(buf, 0, &s3_state); |
| 1537 | |
| 1538 | if (ret == 0) { |
| 1539 | if (s3_state) { |
| 1540 | dm_resume(adev); |
| 1541 | amdgpu_dm_display_resume(adev); |
| 1542 | drm_kms_helper_hotplug_event(adev->ddev); |
| 1543 | } else |
| 1544 | dm_suspend(adev); |
| 1545 | } |
| 1546 | |
| 1547 | return ret == 0 ? count : 0; |
| 1548 | } |
| 1549 | |
| 1550 | DEVICE_ATTR_WO(s3_debug); |
| 1551 | |
| 1552 | #endif |
| 1553 | |
| 1554 | static int dm_early_init(void *handle) |
| 1555 | { |
| 1556 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 1557 | |
| 1558 | adev->ddev->driver->driver_features |= DRIVER_ATOMIC; |
| 1559 | amdgpu_dm_set_irq_funcs(adev); |
| 1560 | |
| 1561 | switch (adev->asic_type) { |
| 1562 | case CHIP_BONAIRE: |
| 1563 | case CHIP_HAWAII: |
| 1564 | adev->mode_info.num_crtc = 6; |
| 1565 | adev->mode_info.num_hpd = 6; |
| 1566 | adev->mode_info.num_dig = 6; |
| 1567 | adev->mode_info.plane_type = dm_plane_type_default; |
| 1568 | break; |
| 1569 | case CHIP_KAVERI: |
| 1570 | adev->mode_info.num_crtc = 4; |
| 1571 | adev->mode_info.num_hpd = 6; |
| 1572 | adev->mode_info.num_dig = 7; |
| 1573 | adev->mode_info.plane_type = dm_plane_type_default; |
| 1574 | break; |
| 1575 | case CHIP_KABINI: |
| 1576 | case CHIP_MULLINS: |
| 1577 | adev->mode_info.num_crtc = 2; |
| 1578 | adev->mode_info.num_hpd = 6; |
| 1579 | adev->mode_info.num_dig = 6; |
| 1580 | adev->mode_info.plane_type = dm_plane_type_default; |
| 1581 | break; |
| 1582 | case CHIP_FIJI: |
| 1583 | case CHIP_TONGA: |
| 1584 | adev->mode_info.num_crtc = 6; |
| 1585 | adev->mode_info.num_hpd = 6; |
| 1586 | adev->mode_info.num_dig = 7; |
| 1587 | adev->mode_info.plane_type = dm_plane_type_default; |
| 1588 | break; |
| 1589 | case CHIP_CARRIZO: |
| 1590 | adev->mode_info.num_crtc = 3; |
| 1591 | adev->mode_info.num_hpd = 6; |
| 1592 | adev->mode_info.num_dig = 9; |
| 1593 | adev->mode_info.plane_type = dm_plane_type_carizzo; |
| 1594 | break; |
| 1595 | case CHIP_STONEY: |
| 1596 | adev->mode_info.num_crtc = 2; |
| 1597 | adev->mode_info.num_hpd = 6; |
| 1598 | adev->mode_info.num_dig = 9; |
| 1599 | adev->mode_info.plane_type = dm_plane_type_stoney; |
| 1600 | break; |
| 1601 | case CHIP_POLARIS11: |
| 1602 | case CHIP_POLARIS12: |
| 1603 | adev->mode_info.num_crtc = 5; |
| 1604 | adev->mode_info.num_hpd = 5; |
| 1605 | adev->mode_info.num_dig = 5; |
| 1606 | adev->mode_info.plane_type = dm_plane_type_default; |
| 1607 | break; |
| 1608 | case CHIP_POLARIS10: |
| 1609 | adev->mode_info.num_crtc = 6; |
| 1610 | adev->mode_info.num_hpd = 6; |
| 1611 | adev->mode_info.num_dig = 6; |
| 1612 | adev->mode_info.plane_type = dm_plane_type_default; |
| 1613 | break; |
| 1614 | case CHIP_VEGA10: |
| 1615 | adev->mode_info.num_crtc = 6; |
| 1616 | adev->mode_info.num_hpd = 6; |
| 1617 | adev->mode_info.num_dig = 6; |
| 1618 | adev->mode_info.plane_type = dm_plane_type_default; |
| 1619 | break; |
| 1620 | #if defined(CONFIG_DRM_AMD_DC_DCN1_0) |
| 1621 | case CHIP_RAVEN: |
| 1622 | adev->mode_info.num_crtc = 4; |
| 1623 | adev->mode_info.num_hpd = 4; |
| 1624 | adev->mode_info.num_dig = 4; |
| 1625 | adev->mode_info.plane_type = dm_plane_type_default; |
| 1626 | break; |
| 1627 | #endif |
| 1628 | default: |
| 1629 | DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type); |
| 1630 | return -EINVAL; |
| 1631 | } |
| 1632 | |
| 1633 | if (adev->mode_info.funcs == NULL) |
| 1634 | adev->mode_info.funcs = &dm_display_funcs; |
| 1635 | |
| 1636 | /* Note: Do NOT change adev->audio_endpt_rreg and |
| 1637 | * adev->audio_endpt_wreg because they are initialised in |
| 1638 | * amdgpu_device_init() */ |
| 1639 | #if defined(CONFIG_DEBUG_KERNEL_DC) |
| 1640 | device_create_file( |
| 1641 | adev->ddev->dev, |
| 1642 | &dev_attr_s3_debug); |
| 1643 | #endif |
| 1644 | |
| 1645 | return 0; |
| 1646 | } |
| 1647 | |
| 1648 | struct dm_connector_state { |
| 1649 | struct drm_connector_state base; |
| 1650 | |
| 1651 | enum amdgpu_rmx_type scaling; |
| 1652 | uint8_t underscan_vborder; |
| 1653 | uint8_t underscan_hborder; |
| 1654 | bool underscan_enable; |
| 1655 | }; |
| 1656 | |
| 1657 | #define to_dm_connector_state(x)\ |
| 1658 | container_of((x), struct dm_connector_state, base) |
| 1659 | |
| 1660 | static bool modeset_required(struct drm_crtc_state *crtc_state, |
| 1661 | struct dc_stream_state *new_stream, |
| 1662 | struct dc_stream_state *old_stream) |
| 1663 | { |
| 1664 | if (!drm_atomic_crtc_needs_modeset(crtc_state)) |
| 1665 | return false; |
| 1666 | |
| 1667 | if (!crtc_state->enable) |
| 1668 | return false; |
| 1669 | |
| 1670 | return crtc_state->active; |
| 1671 | } |
| 1672 | |
| 1673 | static bool modereset_required(struct drm_crtc_state *crtc_state) |
| 1674 | { |
| 1675 | if (!drm_atomic_crtc_needs_modeset(crtc_state)) |
| 1676 | return false; |
| 1677 | |
| 1678 | return !crtc_state->enable || !crtc_state->active; |
| 1679 | } |
| 1680 | |
| 1681 | static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) |
| 1682 | { |
| 1683 | drm_encoder_cleanup(encoder); |
| 1684 | kfree(encoder); |
| 1685 | } |
| 1686 | |
| 1687 | static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { |
| 1688 | .destroy = amdgpu_dm_encoder_destroy, |
| 1689 | }; |
| 1690 | |
| 1691 | static bool fill_rects_from_plane_state(const struct drm_plane_state *state, |
| 1692 | struct dc_plane_state *plane_state) |
| 1693 | { |
| 1694 | plane_state->src_rect.x = state->src_x >> 16; |
| 1695 | plane_state->src_rect.y = state->src_y >> 16; |
| 1696 | /*we ignore for now mantissa and do not to deal with floating pixels :(*/ |
| 1697 | plane_state->src_rect.width = state->src_w >> 16; |
| 1698 | |
| 1699 | if (plane_state->src_rect.width == 0) |
| 1700 | return false; |
| 1701 | |
| 1702 | plane_state->src_rect.height = state->src_h >> 16; |
| 1703 | if (plane_state->src_rect.height == 0) |
| 1704 | return false; |
| 1705 | |
| 1706 | plane_state->dst_rect.x = state->crtc_x; |
| 1707 | plane_state->dst_rect.y = state->crtc_y; |
| 1708 | |
| 1709 | if (state->crtc_w == 0) |
| 1710 | return false; |
| 1711 | |
| 1712 | plane_state->dst_rect.width = state->crtc_w; |
| 1713 | |
| 1714 | if (state->crtc_h == 0) |
| 1715 | return false; |
| 1716 | |
| 1717 | plane_state->dst_rect.height = state->crtc_h; |
| 1718 | |
| 1719 | plane_state->clip_rect = plane_state->dst_rect; |
| 1720 | |
| 1721 | switch (state->rotation & DRM_MODE_ROTATE_MASK) { |
| 1722 | case DRM_MODE_ROTATE_0: |
| 1723 | plane_state->rotation = ROTATION_ANGLE_0; |
| 1724 | break; |
| 1725 | case DRM_MODE_ROTATE_90: |
| 1726 | plane_state->rotation = ROTATION_ANGLE_90; |
| 1727 | break; |
| 1728 | case DRM_MODE_ROTATE_180: |
| 1729 | plane_state->rotation = ROTATION_ANGLE_180; |
| 1730 | break; |
| 1731 | case DRM_MODE_ROTATE_270: |
| 1732 | plane_state->rotation = ROTATION_ANGLE_270; |
| 1733 | break; |
| 1734 | default: |
| 1735 | plane_state->rotation = ROTATION_ANGLE_0; |
| 1736 | break; |
| 1737 | } |
| 1738 | |
| 1739 | return true; |
| 1740 | } |
| 1741 | static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb, |
| 1742 | uint64_t *tiling_flags, |
| 1743 | uint64_t *fb_location) |
| 1744 | { |
| 1745 | struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); |
| 1746 | int r = amdgpu_bo_reserve(rbo, false); |
| 1747 | |
| 1748 | if (unlikely(r)) { |
| 1749 | // Don't show error msg. when return -ERESTARTSYS |
| 1750 | if (r != -ERESTARTSYS) |
| 1751 | DRM_ERROR("Unable to reserve buffer: %d\n", r); |
| 1752 | return r; |
| 1753 | } |
| 1754 | |
| 1755 | if (fb_location) |
| 1756 | *fb_location = amdgpu_bo_gpu_offset(rbo); |
| 1757 | |
| 1758 | if (tiling_flags) |
| 1759 | amdgpu_bo_get_tiling_flags(rbo, tiling_flags); |
| 1760 | |
| 1761 | amdgpu_bo_unreserve(rbo); |
| 1762 | |
| 1763 | return r; |
| 1764 | } |
| 1765 | |
| 1766 | static int fill_plane_attributes_from_fb(struct amdgpu_device *adev, |
| 1767 | struct dc_plane_state *plane_state, |
| 1768 | const struct amdgpu_framebuffer *amdgpu_fb, |
| 1769 | bool addReq) |
| 1770 | { |
| 1771 | uint64_t tiling_flags; |
| 1772 | uint64_t fb_location = 0; |
| 1773 | uint64_t chroma_addr = 0; |
| 1774 | unsigned int awidth; |
| 1775 | const struct drm_framebuffer *fb = &amdgpu_fb->base; |
| 1776 | int ret = 0; |
| 1777 | struct drm_format_name_buf format_name; |
| 1778 | |
| 1779 | ret = get_fb_info( |
| 1780 | amdgpu_fb, |
| 1781 | &tiling_flags, |
| 1782 | addReq == true ? &fb_location:NULL); |
| 1783 | |
| 1784 | if (ret) |
| 1785 | return ret; |
| 1786 | |
| 1787 | switch (fb->format->format) { |
| 1788 | case DRM_FORMAT_C8: |
| 1789 | plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS; |
| 1790 | break; |
| 1791 | case DRM_FORMAT_RGB565: |
| 1792 | plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565; |
| 1793 | break; |
| 1794 | case DRM_FORMAT_XRGB8888: |
| 1795 | case DRM_FORMAT_ARGB8888: |
| 1796 | plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; |
| 1797 | break; |
| 1798 | case DRM_FORMAT_XRGB2101010: |
| 1799 | case DRM_FORMAT_ARGB2101010: |
| 1800 | plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010; |
| 1801 | break; |
| 1802 | case DRM_FORMAT_XBGR2101010: |
| 1803 | case DRM_FORMAT_ABGR2101010: |
| 1804 | plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010; |
| 1805 | break; |
| 1806 | case DRM_FORMAT_NV21: |
| 1807 | plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr; |
| 1808 | break; |
| 1809 | case DRM_FORMAT_NV12: |
| 1810 | plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb; |
| 1811 | break; |
| 1812 | default: |
| 1813 | DRM_ERROR("Unsupported screen format %s\n", |
| 1814 | drm_get_format_name(fb->format->format, &format_name)); |
| 1815 | return -EINVAL; |
| 1816 | } |
| 1817 | |
| 1818 | if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { |
| 1819 | plane_state->address.type = PLN_ADDR_TYPE_GRAPHICS; |
| 1820 | plane_state->address.grph.addr.low_part = lower_32_bits(fb_location); |
| 1821 | plane_state->address.grph.addr.high_part = upper_32_bits(fb_location); |
| 1822 | plane_state->plane_size.grph.surface_size.x = 0; |
| 1823 | plane_state->plane_size.grph.surface_size.y = 0; |
| 1824 | plane_state->plane_size.grph.surface_size.width = fb->width; |
| 1825 | plane_state->plane_size.grph.surface_size.height = fb->height; |
| 1826 | plane_state->plane_size.grph.surface_pitch = |
| 1827 | fb->pitches[0] / fb->format->cpp[0]; |
| 1828 | /* TODO: unhardcode */ |
| 1829 | plane_state->color_space = COLOR_SPACE_SRGB; |
| 1830 | |
| 1831 | } else { |
| 1832 | awidth = ALIGN(fb->width, 64); |
| 1833 | plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE; |
| 1834 | plane_state->address.video_progressive.luma_addr.low_part |
| 1835 | = lower_32_bits(fb_location); |
| 1836 | plane_state->address.video_progressive.luma_addr.high_part |
| 1837 | = upper_32_bits(fb_location); |
| 1838 | chroma_addr = fb_location + (u64)(awidth * fb->height); |
| 1839 | plane_state->address.video_progressive.chroma_addr.low_part |
| 1840 | = lower_32_bits(chroma_addr); |
| 1841 | plane_state->address.video_progressive.chroma_addr.high_part |
| 1842 | = upper_32_bits(chroma_addr); |
| 1843 | plane_state->plane_size.video.luma_size.x = 0; |
| 1844 | plane_state->plane_size.video.luma_size.y = 0; |
| 1845 | plane_state->plane_size.video.luma_size.width = awidth; |
| 1846 | plane_state->plane_size.video.luma_size.height = fb->height; |
| 1847 | /* TODO: unhardcode */ |
| 1848 | plane_state->plane_size.video.luma_pitch = awidth; |
| 1849 | |
| 1850 | plane_state->plane_size.video.chroma_size.x = 0; |
| 1851 | plane_state->plane_size.video.chroma_size.y = 0; |
| 1852 | plane_state->plane_size.video.chroma_size.width = awidth; |
| 1853 | plane_state->plane_size.video.chroma_size.height = fb->height; |
| 1854 | plane_state->plane_size.video.chroma_pitch = awidth / 2; |
| 1855 | |
| 1856 | /* TODO: unhardcode */ |
| 1857 | plane_state->color_space = COLOR_SPACE_YCBCR709; |
| 1858 | } |
| 1859 | |
| 1860 | memset(&plane_state->tiling_info, 0, sizeof(plane_state->tiling_info)); |
| 1861 | |
| 1862 | /* Fill GFX8 params */ |
| 1863 | if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) { |
| 1864 | unsigned int bankw, bankh, mtaspect, tile_split, num_banks; |
| 1865 | |
| 1866 | bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH); |
| 1867 | bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT); |
| 1868 | mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT); |
| 1869 | tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); |
| 1870 | num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); |
| 1871 | |
| 1872 | /* XXX fix me for VI */ |
| 1873 | plane_state->tiling_info.gfx8.num_banks = num_banks; |
| 1874 | plane_state->tiling_info.gfx8.array_mode = |
| 1875 | DC_ARRAY_2D_TILED_THIN1; |
| 1876 | plane_state->tiling_info.gfx8.tile_split = tile_split; |
| 1877 | plane_state->tiling_info.gfx8.bank_width = bankw; |
| 1878 | plane_state->tiling_info.gfx8.bank_height = bankh; |
| 1879 | plane_state->tiling_info.gfx8.tile_aspect = mtaspect; |
| 1880 | plane_state->tiling_info.gfx8.tile_mode = |
| 1881 | DC_ADDR_SURF_MICRO_TILING_DISPLAY; |
| 1882 | } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) |
| 1883 | == DC_ARRAY_1D_TILED_THIN1) { |
| 1884 | plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1; |
| 1885 | } |
| 1886 | |
| 1887 | plane_state->tiling_info.gfx8.pipe_config = |
| 1888 | AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); |
| 1889 | |
| 1890 | if (adev->asic_type == CHIP_VEGA10 || |
| 1891 | adev->asic_type == CHIP_RAVEN) { |
| 1892 | /* Fill GFX9 params */ |
| 1893 | plane_state->tiling_info.gfx9.num_pipes = |
| 1894 | adev->gfx.config.gb_addr_config_fields.num_pipes; |
| 1895 | plane_state->tiling_info.gfx9.num_banks = |
| 1896 | adev->gfx.config.gb_addr_config_fields.num_banks; |
| 1897 | plane_state->tiling_info.gfx9.pipe_interleave = |
| 1898 | adev->gfx.config.gb_addr_config_fields.pipe_interleave_size; |
| 1899 | plane_state->tiling_info.gfx9.num_shader_engines = |
| 1900 | adev->gfx.config.gb_addr_config_fields.num_se; |
| 1901 | plane_state->tiling_info.gfx9.max_compressed_frags = |
| 1902 | adev->gfx.config.gb_addr_config_fields.max_compress_frags; |
| 1903 | plane_state->tiling_info.gfx9.num_rb_per_se = |
| 1904 | adev->gfx.config.gb_addr_config_fields.num_rb_per_se; |
| 1905 | plane_state->tiling_info.gfx9.swizzle = |
| 1906 | AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE); |
| 1907 | plane_state->tiling_info.gfx9.shaderEnable = 1; |
| 1908 | } |
| 1909 | |
| 1910 | plane_state->visible = true; |
| 1911 | plane_state->scaling_quality.h_taps_c = 0; |
| 1912 | plane_state->scaling_quality.v_taps_c = 0; |
| 1913 | |
| 1914 | /* is this needed? is plane_state zeroed at allocation? */ |
| 1915 | plane_state->scaling_quality.h_taps = 0; |
| 1916 | plane_state->scaling_quality.v_taps = 0; |
| 1917 | plane_state->stereo_format = PLANE_STEREO_FORMAT_NONE; |
| 1918 | |
| 1919 | return ret; |
| 1920 | |
| 1921 | } |
| 1922 | |
| 1923 | static void fill_gamma_from_crtc_state(const struct drm_crtc_state *crtc_state, |
| 1924 | struct dc_plane_state *plane_state) |
| 1925 | { |
| 1926 | int i; |
| 1927 | struct dc_gamma *gamma; |
| 1928 | struct drm_color_lut *lut = |
| 1929 | (struct drm_color_lut *) crtc_state->gamma_lut->data; |
| 1930 | |
| 1931 | gamma = dc_create_gamma(); |
| 1932 | |
| 1933 | if (gamma == NULL) { |
| 1934 | WARN_ON(1); |
| 1935 | return; |
| 1936 | } |
| 1937 | |
| 1938 | gamma->type = GAMMA_RGB_256; |
| 1939 | gamma->num_entries = GAMMA_RGB_256_ENTRIES; |
| 1940 | for (i = 0; i < GAMMA_RGB_256_ENTRIES; i++) { |
| 1941 | gamma->entries.red[i] = dal_fixed31_32_from_int(lut[i].red); |
| 1942 | gamma->entries.green[i] = dal_fixed31_32_from_int(lut[i].green); |
| 1943 | gamma->entries.blue[i] = dal_fixed31_32_from_int(lut[i].blue); |
| 1944 | } |
| 1945 | |
| 1946 | plane_state->gamma_correction = gamma; |
| 1947 | } |
| 1948 | |
| 1949 | static int fill_plane_attributes(struct amdgpu_device *adev, |
| 1950 | struct dc_plane_state *dc_plane_state, |
| 1951 | struct drm_plane_state *plane_state, |
| 1952 | struct drm_crtc_state *crtc_state, |
| 1953 | bool addrReq) |
| 1954 | { |
| 1955 | const struct amdgpu_framebuffer *amdgpu_fb = |
| 1956 | to_amdgpu_framebuffer(plane_state->fb); |
| 1957 | const struct drm_crtc *crtc = plane_state->crtc; |
| 1958 | struct dc_transfer_func *input_tf; |
| 1959 | int ret = 0; |
| 1960 | |
| 1961 | if (!fill_rects_from_plane_state(plane_state, dc_plane_state)) |
| 1962 | return -EINVAL; |
| 1963 | |
| 1964 | ret = fill_plane_attributes_from_fb( |
| 1965 | crtc->dev->dev_private, |
| 1966 | dc_plane_state, |
| 1967 | amdgpu_fb, |
| 1968 | addrReq); |
| 1969 | |
| 1970 | if (ret) |
| 1971 | return ret; |
| 1972 | |
| 1973 | input_tf = dc_create_transfer_func(); |
| 1974 | |
| 1975 | if (input_tf == NULL) |
| 1976 | return -ENOMEM; |
| 1977 | |
| 1978 | input_tf->type = TF_TYPE_PREDEFINED; |
| 1979 | input_tf->tf = TRANSFER_FUNCTION_SRGB; |
| 1980 | |
| 1981 | dc_plane_state->in_transfer_func = input_tf; |
| 1982 | |
| 1983 | /* In case of gamma set, update gamma value */ |
| 1984 | if (crtc_state->gamma_lut) |
| 1985 | fill_gamma_from_crtc_state(crtc_state, dc_plane_state); |
| 1986 | |
| 1987 | return ret; |
| 1988 | } |
| 1989 | |
| 1990 | /*****************************************************************************/ |
| 1991 | |
| 1992 | static void update_stream_scaling_settings(const struct drm_display_mode *mode, |
| 1993 | const struct dm_connector_state *dm_state, |
| 1994 | struct dc_stream_state *stream) |
| 1995 | { |
| 1996 | enum amdgpu_rmx_type rmx_type; |
| 1997 | |
| 1998 | struct rect src = { 0 }; /* viewport in composition space*/ |
| 1999 | struct rect dst = { 0 }; /* stream addressable area */ |
| 2000 | |
| 2001 | /* no mode. nothing to be done */ |
| 2002 | if (!mode) |
| 2003 | return; |
| 2004 | |
| 2005 | /* Full screen scaling by default */ |
| 2006 | src.width = mode->hdisplay; |
| 2007 | src.height = mode->vdisplay; |
| 2008 | dst.width = stream->timing.h_addressable; |
| 2009 | dst.height = stream->timing.v_addressable; |
| 2010 | |
| 2011 | rmx_type = dm_state->scaling; |
| 2012 | if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) { |
| 2013 | if (src.width * dst.height < |
| 2014 | src.height * dst.width) { |
| 2015 | /* height needs less upscaling/more downscaling */ |
| 2016 | dst.width = src.width * |
| 2017 | dst.height / src.height; |
| 2018 | } else { |
| 2019 | /* width needs less upscaling/more downscaling */ |
| 2020 | dst.height = src.height * |
| 2021 | dst.width / src.width; |
| 2022 | } |
| 2023 | } else if (rmx_type == RMX_CENTER) { |
| 2024 | dst = src; |
| 2025 | } |
| 2026 | |
| 2027 | dst.x = (stream->timing.h_addressable - dst.width) / 2; |
| 2028 | dst.y = (stream->timing.v_addressable - dst.height) / 2; |
| 2029 | |
| 2030 | if (dm_state->underscan_enable) { |
| 2031 | dst.x += dm_state->underscan_hborder / 2; |
| 2032 | dst.y += dm_state->underscan_vborder / 2; |
| 2033 | dst.width -= dm_state->underscan_hborder; |
| 2034 | dst.height -= dm_state->underscan_vborder; |
| 2035 | } |
| 2036 | |
| 2037 | stream->src = src; |
| 2038 | stream->dst = dst; |
| 2039 | |
| 2040 | DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n", |
| 2041 | dst.x, dst.y, dst.width, dst.height); |
| 2042 | |
| 2043 | } |
| 2044 | |
| 2045 | static enum dc_color_depth |
| 2046 | convert_color_depth_from_display_info(const struct drm_connector *connector) |
| 2047 | { |
| 2048 | uint32_t bpc = connector->display_info.bpc; |
| 2049 | |
| 2050 | /* Limited color depth to 8bit |
| 2051 | * TODO: Still need to handle deep color |
| 2052 | */ |
| 2053 | if (bpc > 8) |
| 2054 | bpc = 8; |
| 2055 | |
| 2056 | switch (bpc) { |
| 2057 | case 0: |
| 2058 | /* Temporary Work around, DRM don't parse color depth for |
| 2059 | * EDID revision before 1.4 |
| 2060 | * TODO: Fix edid parsing |
| 2061 | */ |
| 2062 | return COLOR_DEPTH_888; |
| 2063 | case 6: |
| 2064 | return COLOR_DEPTH_666; |
| 2065 | case 8: |
| 2066 | return COLOR_DEPTH_888; |
| 2067 | case 10: |
| 2068 | return COLOR_DEPTH_101010; |
| 2069 | case 12: |
| 2070 | return COLOR_DEPTH_121212; |
| 2071 | case 14: |
| 2072 | return COLOR_DEPTH_141414; |
| 2073 | case 16: |
| 2074 | return COLOR_DEPTH_161616; |
| 2075 | default: |
| 2076 | return COLOR_DEPTH_UNDEFINED; |
| 2077 | } |
| 2078 | } |
| 2079 | |
| 2080 | static enum dc_aspect_ratio |
| 2081 | get_aspect_ratio(const struct drm_display_mode *mode_in) |
| 2082 | { |
| 2083 | int32_t width = mode_in->crtc_hdisplay * 9; |
| 2084 | int32_t height = mode_in->crtc_vdisplay * 16; |
| 2085 | |
| 2086 | if ((width - height) < 10 && (width - height) > -10) |
| 2087 | return ASPECT_RATIO_16_9; |
| 2088 | else |
| 2089 | return ASPECT_RATIO_4_3; |
| 2090 | } |
| 2091 | |
| 2092 | static enum dc_color_space |
| 2093 | get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing) |
| 2094 | { |
| 2095 | enum dc_color_space color_space = COLOR_SPACE_SRGB; |
| 2096 | |
| 2097 | switch (dc_crtc_timing->pixel_encoding) { |
| 2098 | case PIXEL_ENCODING_YCBCR422: |
| 2099 | case PIXEL_ENCODING_YCBCR444: |
| 2100 | case PIXEL_ENCODING_YCBCR420: |
| 2101 | { |
| 2102 | /* |
| 2103 | * 27030khz is the separation point between HDTV and SDTV |
| 2104 | * according to HDMI spec, we use YCbCr709 and YCbCr601 |
| 2105 | * respectively |
| 2106 | */ |
| 2107 | if (dc_crtc_timing->pix_clk_khz > 27030) { |
| 2108 | if (dc_crtc_timing->flags.Y_ONLY) |
| 2109 | color_space = |
| 2110 | COLOR_SPACE_YCBCR709_LIMITED; |
| 2111 | else |
| 2112 | color_space = COLOR_SPACE_YCBCR709; |
| 2113 | } else { |
| 2114 | if (dc_crtc_timing->flags.Y_ONLY) |
| 2115 | color_space = |
| 2116 | COLOR_SPACE_YCBCR601_LIMITED; |
| 2117 | else |
| 2118 | color_space = COLOR_SPACE_YCBCR601; |
| 2119 | } |
| 2120 | |
| 2121 | } |
| 2122 | break; |
| 2123 | case PIXEL_ENCODING_RGB: |
| 2124 | color_space = COLOR_SPACE_SRGB; |
| 2125 | break; |
| 2126 | |
| 2127 | default: |
| 2128 | WARN_ON(1); |
| 2129 | break; |
| 2130 | } |
| 2131 | |
| 2132 | return color_space; |
| 2133 | } |
| 2134 | |
| 2135 | /*****************************************************************************/ |
| 2136 | |
| 2137 | static void |
| 2138 | fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream, |
| 2139 | const struct drm_display_mode *mode_in, |
| 2140 | const struct drm_connector *connector) |
| 2141 | { |
| 2142 | struct dc_crtc_timing *timing_out = &stream->timing; |
| 2143 | |
| 2144 | memset(timing_out, 0, sizeof(struct dc_crtc_timing)); |
| 2145 | |
| 2146 | timing_out->h_border_left = 0; |
| 2147 | timing_out->h_border_right = 0; |
| 2148 | timing_out->v_border_top = 0; |
| 2149 | timing_out->v_border_bottom = 0; |
| 2150 | /* TODO: un-hardcode */ |
| 2151 | |
| 2152 | if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444) |
| 2153 | && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) |
| 2154 | timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; |
| 2155 | else |
| 2156 | timing_out->pixel_encoding = PIXEL_ENCODING_RGB; |
| 2157 | |
| 2158 | timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE; |
| 2159 | timing_out->display_color_depth = convert_color_depth_from_display_info( |
| 2160 | connector); |
| 2161 | timing_out->scan_type = SCANNING_TYPE_NODATA; |
| 2162 | timing_out->hdmi_vic = 0; |
| 2163 | timing_out->vic = drm_match_cea_mode(mode_in); |
| 2164 | |
| 2165 | timing_out->h_addressable = mode_in->crtc_hdisplay; |
| 2166 | timing_out->h_total = mode_in->crtc_htotal; |
| 2167 | timing_out->h_sync_width = |
| 2168 | mode_in->crtc_hsync_end - mode_in->crtc_hsync_start; |
| 2169 | timing_out->h_front_porch = |
| 2170 | mode_in->crtc_hsync_start - mode_in->crtc_hdisplay; |
| 2171 | timing_out->v_total = mode_in->crtc_vtotal; |
| 2172 | timing_out->v_addressable = mode_in->crtc_vdisplay; |
| 2173 | timing_out->v_front_porch = |
| 2174 | mode_in->crtc_vsync_start - mode_in->crtc_vdisplay; |
| 2175 | timing_out->v_sync_width = |
| 2176 | mode_in->crtc_vsync_end - mode_in->crtc_vsync_start; |
| 2177 | timing_out->pix_clk_khz = mode_in->crtc_clock; |
| 2178 | timing_out->aspect_ratio = get_aspect_ratio(mode_in); |
| 2179 | if (mode_in->flags & DRM_MODE_FLAG_PHSYNC) |
| 2180 | timing_out->flags.HSYNC_POSITIVE_POLARITY = 1; |
| 2181 | if (mode_in->flags & DRM_MODE_FLAG_PVSYNC) |
| 2182 | timing_out->flags.VSYNC_POSITIVE_POLARITY = 1; |
| 2183 | |
| 2184 | stream->output_color_space = get_output_color_space(timing_out); |
| 2185 | |
| 2186 | { |
| 2187 | struct dc_transfer_func *tf = dc_create_transfer_func(); |
| 2188 | |
| 2189 | tf->type = TF_TYPE_PREDEFINED; |
| 2190 | tf->tf = TRANSFER_FUNCTION_SRGB; |
| 2191 | stream->out_transfer_func = tf; |
| 2192 | } |
| 2193 | } |
| 2194 | |
| 2195 | static void fill_audio_info(struct audio_info *audio_info, |
| 2196 | const struct drm_connector *drm_connector, |
| 2197 | const struct dc_sink *dc_sink) |
| 2198 | { |
| 2199 | int i = 0; |
| 2200 | int cea_revision = 0; |
| 2201 | const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps; |
| 2202 | |
| 2203 | audio_info->manufacture_id = edid_caps->manufacturer_id; |
| 2204 | audio_info->product_id = edid_caps->product_id; |
| 2205 | |
| 2206 | cea_revision = drm_connector->display_info.cea_rev; |
| 2207 | |
| 2208 | strncpy(audio_info->display_name, |
| 2209 | edid_caps->display_name, |
| 2210 | AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1); |
| 2211 | |
| 2212 | if (cea_revision >= 3) { |
| 2213 | audio_info->mode_count = edid_caps->audio_mode_count; |
| 2214 | |
| 2215 | for (i = 0; i < audio_info->mode_count; ++i) { |
| 2216 | audio_info->modes[i].format_code = |
| 2217 | (enum audio_format_code) |
| 2218 | (edid_caps->audio_modes[i].format_code); |
| 2219 | audio_info->modes[i].channel_count = |
| 2220 | edid_caps->audio_modes[i].channel_count; |
| 2221 | audio_info->modes[i].sample_rates.all = |
| 2222 | edid_caps->audio_modes[i].sample_rate; |
| 2223 | audio_info->modes[i].sample_size = |
| 2224 | edid_caps->audio_modes[i].sample_size; |
| 2225 | } |
| 2226 | } |
| 2227 | |
| 2228 | audio_info->flags.all = edid_caps->speaker_flags; |
| 2229 | |
| 2230 | /* TODO: We only check for the progressive mode, check for interlace mode too */ |
| 2231 | if (drm_connector->latency_present[0]) { |
| 2232 | audio_info->video_latency = drm_connector->video_latency[0]; |
| 2233 | audio_info->audio_latency = drm_connector->audio_latency[0]; |
| 2234 | } |
| 2235 | |
| 2236 | /* TODO: For DP, video and audio latency should be calculated from DPCD caps */ |
| 2237 | |
| 2238 | } |
| 2239 | |
| 2240 | static void |
| 2241 | copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode, |
| 2242 | struct drm_display_mode *dst_mode) |
| 2243 | { |
| 2244 | dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay; |
| 2245 | dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay; |
| 2246 | dst_mode->crtc_clock = src_mode->crtc_clock; |
| 2247 | dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start; |
| 2248 | dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end; |
| 2249 | dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start; |
| 2250 | dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end; |
| 2251 | dst_mode->crtc_htotal = src_mode->crtc_htotal; |
| 2252 | dst_mode->crtc_hskew = src_mode->crtc_hskew; |
| 2253 | dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start; |
| 2254 | dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end; |
| 2255 | dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start; |
| 2256 | dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end; |
| 2257 | dst_mode->crtc_vtotal = src_mode->crtc_vtotal; |
| 2258 | } |
| 2259 | |
| 2260 | static void |
| 2261 | decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, |
| 2262 | const struct drm_display_mode *native_mode, |
| 2263 | bool scale_enabled) |
| 2264 | { |
| 2265 | if (scale_enabled) { |
| 2266 | copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); |
| 2267 | } else if (native_mode->clock == drm_mode->clock && |
| 2268 | native_mode->htotal == drm_mode->htotal && |
| 2269 | native_mode->vtotal == drm_mode->vtotal) { |
| 2270 | copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); |
| 2271 | } else { |
| 2272 | /* no scaling nor amdgpu inserted, no need to patch */ |
| 2273 | } |
| 2274 | } |
| 2275 | |
| 2276 | static void create_fake_sink(struct amdgpu_dm_connector *aconnector) |
| 2277 | { |
| 2278 | struct dc_sink *sink = NULL; |
| 2279 | struct dc_sink_init_data sink_init_data = { 0 }; |
| 2280 | |
| 2281 | sink_init_data.link = aconnector->dc_link; |
| 2282 | sink_init_data.sink_signal = aconnector->dc_link->connector_signal; |
| 2283 | |
| 2284 | sink = dc_sink_create(&sink_init_data); |
| 2285 | if (!sink) |
| 2286 | DRM_ERROR("Failed to create sink!\n"); |
| 2287 | |
| 2288 | sink->sink_signal = SIGNAL_TYPE_VIRTUAL; |
| 2289 | aconnector->fake_enable = true; |
| 2290 | |
| 2291 | aconnector->dc_sink = sink; |
| 2292 | aconnector->dc_link->local_sink = sink; |
| 2293 | } |
| 2294 | |
| 2295 | static struct dc_stream_state * |
| 2296 | create_stream_for_sink(struct amdgpu_dm_connector *aconnector, |
| 2297 | const struct drm_display_mode *drm_mode, |
| 2298 | const struct dm_connector_state *dm_state) |
| 2299 | { |
| 2300 | struct drm_display_mode *preferred_mode = NULL; |
| 2301 | const struct drm_connector *drm_connector; |
| 2302 | struct dc_stream_state *stream = NULL; |
| 2303 | struct drm_display_mode mode = *drm_mode; |
| 2304 | bool native_mode_found = false; |
| 2305 | |
| 2306 | if (aconnector == NULL) { |
| 2307 | DRM_ERROR("aconnector is NULL!\n"); |
| 2308 | goto drm_connector_null; |
| 2309 | } |
| 2310 | |
| 2311 | if (dm_state == NULL) { |
| 2312 | DRM_ERROR("dm_state is NULL!\n"); |
| 2313 | goto dm_state_null; |
| 2314 | } |
| 2315 | |
| 2316 | drm_connector = &aconnector->base; |
| 2317 | |
| 2318 | if (!aconnector->dc_sink) { |
| 2319 | /* |
| 2320 | * Exclude MST from creating fake_sink |
| 2321 | * TODO: need to enable MST into fake_sink feature |
| 2322 | */ |
| 2323 | if (aconnector->mst_port) |
| 2324 | goto stream_create_fail; |
| 2325 | |
| 2326 | create_fake_sink(aconnector); |
| 2327 | } |
| 2328 | |
| 2329 | stream = dc_create_stream_for_sink(aconnector->dc_sink); |
| 2330 | |
| 2331 | if (stream == NULL) { |
| 2332 | DRM_ERROR("Failed to create stream for sink!\n"); |
| 2333 | goto stream_create_fail; |
| 2334 | } |
| 2335 | |
| 2336 | list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { |
| 2337 | /* Search for preferred mode */ |
| 2338 | if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) { |
| 2339 | native_mode_found = true; |
| 2340 | break; |
| 2341 | } |
| 2342 | } |
| 2343 | if (!native_mode_found) |
| 2344 | preferred_mode = list_first_entry_or_null( |
| 2345 | &aconnector->base.modes, |
| 2346 | struct drm_display_mode, |
| 2347 | head); |
| 2348 | |
| 2349 | if (preferred_mode == NULL) { |
| 2350 | /* This may not be an error, the use case is when we we have no |
| 2351 | * usermode calls to reset and set mode upon hotplug. In this |
| 2352 | * case, we call set mode ourselves to restore the previous mode |
| 2353 | * and the modelist may not be filled in in time. |
| 2354 | */ |
| 2355 | DRM_DEBUG_DRIVER("No preferred mode found\n"); |
| 2356 | } else { |
| 2357 | decide_crtc_timing_for_drm_display_mode( |
| 2358 | &mode, preferred_mode, |
| 2359 | dm_state->scaling != RMX_OFF); |
| 2360 | } |
| 2361 | |
| 2362 | fill_stream_properties_from_drm_display_mode(stream, |
| 2363 | &mode, &aconnector->base); |
| 2364 | update_stream_scaling_settings(&mode, dm_state, stream); |
| 2365 | |
| 2366 | fill_audio_info( |
| 2367 | &stream->audio_info, |
| 2368 | drm_connector, |
| 2369 | aconnector->dc_sink); |
| 2370 | |
| 2371 | stream_create_fail: |
| 2372 | dm_state_null: |
| 2373 | drm_connector_null: |
| 2374 | return stream; |
| 2375 | } |
| 2376 | |
| 2377 | static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc) |
| 2378 | { |
| 2379 | drm_crtc_cleanup(crtc); |
| 2380 | kfree(crtc); |
| 2381 | } |
| 2382 | |
| 2383 | static void dm_crtc_destroy_state(struct drm_crtc *crtc, |
| 2384 | struct drm_crtc_state *state) |
| 2385 | { |
| 2386 | struct dm_crtc_state *cur = to_dm_crtc_state(state); |
| 2387 | |
| 2388 | /* TODO Destroy dc_stream objects are stream object is flattened */ |
| 2389 | if (cur->stream) |
| 2390 | dc_stream_release(cur->stream); |
| 2391 | |
| 2392 | |
| 2393 | __drm_atomic_helper_crtc_destroy_state(state); |
| 2394 | |
| 2395 | |
| 2396 | kfree(state); |
| 2397 | } |
| 2398 | |
| 2399 | static void dm_crtc_reset_state(struct drm_crtc *crtc) |
| 2400 | { |
| 2401 | struct dm_crtc_state *state; |
| 2402 | |
| 2403 | if (crtc->state) |
| 2404 | dm_crtc_destroy_state(crtc, crtc->state); |
| 2405 | |
| 2406 | state = kzalloc(sizeof(*state), GFP_KERNEL); |
| 2407 | if (WARN_ON(!state)) |
| 2408 | return; |
| 2409 | |
| 2410 | crtc->state = &state->base; |
| 2411 | crtc->state->crtc = crtc; |
| 2412 | |
| 2413 | } |
| 2414 | |
| 2415 | static struct drm_crtc_state * |
| 2416 | dm_crtc_duplicate_state(struct drm_crtc *crtc) |
| 2417 | { |
| 2418 | struct dm_crtc_state *state, *cur; |
| 2419 | |
| 2420 | cur = to_dm_crtc_state(crtc->state); |
| 2421 | |
| 2422 | if (WARN_ON(!crtc->state)) |
| 2423 | return NULL; |
| 2424 | |
| 2425 | state = kzalloc(sizeof(*state), GFP_KERNEL); |
| 2426 | if (!state) |
| 2427 | return NULL; |
| 2428 | |
| 2429 | __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base); |
| 2430 | |
| 2431 | if (cur->stream) { |
| 2432 | state->stream = cur->stream; |
| 2433 | dc_stream_retain(state->stream); |
| 2434 | } |
| 2435 | |
| 2436 | /* TODO Duplicate dc_stream after objects are stream object is flattened */ |
| 2437 | |
| 2438 | return &state->base; |
| 2439 | } |
| 2440 | |
| 2441 | /* Implemented only the options currently availible for the driver */ |
| 2442 | static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { |
| 2443 | .reset = dm_crtc_reset_state, |
| 2444 | .destroy = amdgpu_dm_crtc_destroy, |
| 2445 | .gamma_set = drm_atomic_helper_legacy_gamma_set, |
| 2446 | .set_config = drm_atomic_helper_set_config, |
| 2447 | .page_flip = drm_atomic_helper_page_flip, |
| 2448 | .atomic_duplicate_state = dm_crtc_duplicate_state, |
| 2449 | .atomic_destroy_state = dm_crtc_destroy_state, |
| 2450 | }; |
| 2451 | |
| 2452 | static enum drm_connector_status |
| 2453 | amdgpu_dm_connector_detect(struct drm_connector *connector, bool force) |
| 2454 | { |
| 2455 | bool connected; |
| 2456 | struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); |
| 2457 | |
| 2458 | /* Notes: |
| 2459 | * 1. This interface is NOT called in context of HPD irq. |
| 2460 | * 2. This interface *is called* in context of user-mode ioctl. Which |
| 2461 | * makes it a bad place for *any* MST-related activit. */ |
| 2462 | |
| 2463 | if (aconnector->base.force == DRM_FORCE_UNSPECIFIED && |
| 2464 | !aconnector->fake_enable) |
| 2465 | connected = (aconnector->dc_sink != NULL); |
| 2466 | else |
| 2467 | connected = (aconnector->base.force == DRM_FORCE_ON); |
| 2468 | |
| 2469 | return (connected ? connector_status_connected : |
| 2470 | connector_status_disconnected); |
| 2471 | } |
| 2472 | |
| 2473 | int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, |
| 2474 | struct drm_connector_state *connector_state, |
| 2475 | struct drm_property *property, |
| 2476 | uint64_t val) |
| 2477 | { |
| 2478 | struct drm_device *dev = connector->dev; |
| 2479 | struct amdgpu_device *adev = dev->dev_private; |
| 2480 | struct dm_connector_state *dm_old_state = |
| 2481 | to_dm_connector_state(connector->state); |
| 2482 | struct dm_connector_state *dm_new_state = |
| 2483 | to_dm_connector_state(connector_state); |
| 2484 | |
| 2485 | int ret = -EINVAL; |
| 2486 | |
| 2487 | if (property == dev->mode_config.scaling_mode_property) { |
| 2488 | enum amdgpu_rmx_type rmx_type; |
| 2489 | |
| 2490 | switch (val) { |
| 2491 | case DRM_MODE_SCALE_CENTER: |
| 2492 | rmx_type = RMX_CENTER; |
| 2493 | break; |
| 2494 | case DRM_MODE_SCALE_ASPECT: |
| 2495 | rmx_type = RMX_ASPECT; |
| 2496 | break; |
| 2497 | case DRM_MODE_SCALE_FULLSCREEN: |
| 2498 | rmx_type = RMX_FULL; |
| 2499 | break; |
| 2500 | case DRM_MODE_SCALE_NONE: |
| 2501 | default: |
| 2502 | rmx_type = RMX_OFF; |
| 2503 | break; |
| 2504 | } |
| 2505 | |
| 2506 | if (dm_old_state->scaling == rmx_type) |
| 2507 | return 0; |
| 2508 | |
| 2509 | dm_new_state->scaling = rmx_type; |
| 2510 | ret = 0; |
| 2511 | } else if (property == adev->mode_info.underscan_hborder_property) { |
| 2512 | dm_new_state->underscan_hborder = val; |
| 2513 | ret = 0; |
| 2514 | } else if (property == adev->mode_info.underscan_vborder_property) { |
| 2515 | dm_new_state->underscan_vborder = val; |
| 2516 | ret = 0; |
| 2517 | } else if (property == adev->mode_info.underscan_property) { |
| 2518 | dm_new_state->underscan_enable = val; |
| 2519 | ret = 0; |
| 2520 | } |
| 2521 | |
| 2522 | return ret; |
| 2523 | } |
| 2524 | |
| 2525 | int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, |
| 2526 | const struct drm_connector_state *state, |
| 2527 | struct drm_property *property, |
| 2528 | uint64_t *val) |
| 2529 | { |
| 2530 | struct drm_device *dev = connector->dev; |
| 2531 | struct amdgpu_device *adev = dev->dev_private; |
| 2532 | struct dm_connector_state *dm_state = |
| 2533 | to_dm_connector_state(state); |
| 2534 | int ret = -EINVAL; |
| 2535 | |
| 2536 | if (property == dev->mode_config.scaling_mode_property) { |
| 2537 | switch (dm_state->scaling) { |
| 2538 | case RMX_CENTER: |
| 2539 | *val = DRM_MODE_SCALE_CENTER; |
| 2540 | break; |
| 2541 | case RMX_ASPECT: |
| 2542 | *val = DRM_MODE_SCALE_ASPECT; |
| 2543 | break; |
| 2544 | case RMX_FULL: |
| 2545 | *val = DRM_MODE_SCALE_FULLSCREEN; |
| 2546 | break; |
| 2547 | case RMX_OFF: |
| 2548 | default: |
| 2549 | *val = DRM_MODE_SCALE_NONE; |
| 2550 | break; |
| 2551 | } |
| 2552 | ret = 0; |
| 2553 | } else if (property == adev->mode_info.underscan_hborder_property) { |
| 2554 | *val = dm_state->underscan_hborder; |
| 2555 | ret = 0; |
| 2556 | } else if (property == adev->mode_info.underscan_vborder_property) { |
| 2557 | *val = dm_state->underscan_vborder; |
| 2558 | ret = 0; |
| 2559 | } else if (property == adev->mode_info.underscan_property) { |
| 2560 | *val = dm_state->underscan_enable; |
| 2561 | ret = 0; |
| 2562 | } |
| 2563 | return ret; |
| 2564 | } |
| 2565 | |
| 2566 | static void amdgpu_dm_connector_destroy(struct drm_connector *connector) |
| 2567 | { |
| 2568 | struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); |
| 2569 | const struct dc_link *link = aconnector->dc_link; |
| 2570 | struct amdgpu_device *adev = connector->dev->dev_private; |
| 2571 | struct amdgpu_display_manager *dm = &adev->dm; |
| 2572 | #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ |
| 2573 | defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) |
| 2574 | |
| 2575 | if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) { |
| 2576 | amdgpu_dm_register_backlight_device(dm); |
| 2577 | |
| 2578 | if (dm->backlight_dev) { |
| 2579 | backlight_device_unregister(dm->backlight_dev); |
| 2580 | dm->backlight_dev = NULL; |
| 2581 | } |
| 2582 | |
| 2583 | } |
| 2584 | #endif |
| 2585 | drm_connector_unregister(connector); |
| 2586 | drm_connector_cleanup(connector); |
| 2587 | kfree(connector); |
| 2588 | } |
| 2589 | |
| 2590 | void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) |
| 2591 | { |
| 2592 | struct dm_connector_state *state = |
| 2593 | to_dm_connector_state(connector->state); |
| 2594 | |
| 2595 | kfree(state); |
| 2596 | |
| 2597 | state = kzalloc(sizeof(*state), GFP_KERNEL); |
| 2598 | |
| 2599 | if (state) { |
| 2600 | state->scaling = RMX_OFF; |
| 2601 | state->underscan_enable = false; |
| 2602 | state->underscan_hborder = 0; |
| 2603 | state->underscan_vborder = 0; |
| 2604 | |
| 2605 | connector->state = &state->base; |
| 2606 | connector->state->connector = connector; |
| 2607 | } |
| 2608 | } |
| 2609 | |
| 2610 | struct drm_connector_state * |
| 2611 | amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector) |
| 2612 | { |
| 2613 | struct dm_connector_state *state = |
| 2614 | to_dm_connector_state(connector->state); |
| 2615 | |
| 2616 | struct dm_connector_state *new_state = |
| 2617 | kmemdup(state, sizeof(*state), GFP_KERNEL); |
| 2618 | |
| 2619 | if (new_state) { |
| 2620 | __drm_atomic_helper_connector_duplicate_state(connector, |
| 2621 | &new_state->base); |
| 2622 | return &new_state->base; |
| 2623 | } |
| 2624 | |
| 2625 | return NULL; |
| 2626 | } |
| 2627 | |
| 2628 | static const struct drm_connector_funcs amdgpu_dm_connector_funcs = { |
| 2629 | .reset = amdgpu_dm_connector_funcs_reset, |
| 2630 | .detect = amdgpu_dm_connector_detect, |
| 2631 | .fill_modes = drm_helper_probe_single_connector_modes, |
| 2632 | .destroy = amdgpu_dm_connector_destroy, |
| 2633 | .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state, |
| 2634 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, |
| 2635 | .atomic_set_property = amdgpu_dm_connector_atomic_set_property, |
| 2636 | .atomic_get_property = amdgpu_dm_connector_atomic_get_property |
| 2637 | }; |
| 2638 | |
| 2639 | static struct drm_encoder *best_encoder(struct drm_connector *connector) |
| 2640 | { |
| 2641 | int enc_id = connector->encoder_ids[0]; |
| 2642 | struct drm_mode_object *obj; |
| 2643 | struct drm_encoder *encoder; |
| 2644 | |
| 2645 | DRM_DEBUG_DRIVER("Finding the best encoder\n"); |
| 2646 | |
| 2647 | /* pick the encoder ids */ |
| 2648 | if (enc_id) { |
| 2649 | obj = drm_mode_object_find(connector->dev, NULL, enc_id, DRM_MODE_OBJECT_ENCODER); |
| 2650 | if (!obj) { |
| 2651 | DRM_ERROR("Couldn't find a matching encoder for our connector\n"); |
| 2652 | return NULL; |
| 2653 | } |
| 2654 | encoder = obj_to_encoder(obj); |
| 2655 | return encoder; |
| 2656 | } |
| 2657 | DRM_ERROR("No encoder id\n"); |
| 2658 | return NULL; |
| 2659 | } |
| 2660 | |
| 2661 | static int get_modes(struct drm_connector *connector) |
| 2662 | { |
| 2663 | return amdgpu_dm_connector_get_modes(connector); |
| 2664 | } |
| 2665 | |
| 2666 | static void create_eml_sink(struct amdgpu_dm_connector *aconnector) |
| 2667 | { |
| 2668 | struct dc_sink_init_data init_params = { |
| 2669 | .link = aconnector->dc_link, |
| 2670 | .sink_signal = SIGNAL_TYPE_VIRTUAL |
| 2671 | }; |
| 2672 | struct edid *edid = (struct edid *) aconnector->base.edid_blob_ptr->data; |
| 2673 | |
| 2674 | if (!aconnector->base.edid_blob_ptr || |
| 2675 | !aconnector->base.edid_blob_ptr->data) { |
| 2676 | DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n", |
| 2677 | aconnector->base.name); |
| 2678 | |
| 2679 | aconnector->base.force = DRM_FORCE_OFF; |
| 2680 | aconnector->base.override_edid = false; |
| 2681 | return; |
| 2682 | } |
| 2683 | |
| 2684 | aconnector->edid = edid; |
| 2685 | |
| 2686 | aconnector->dc_em_sink = dc_link_add_remote_sink( |
| 2687 | aconnector->dc_link, |
| 2688 | (uint8_t *)edid, |
| 2689 | (edid->extensions + 1) * EDID_LENGTH, |
| 2690 | &init_params); |
| 2691 | |
| 2692 | if (aconnector->base.force == DRM_FORCE_ON) |
| 2693 | aconnector->dc_sink = aconnector->dc_link->local_sink ? |
| 2694 | aconnector->dc_link->local_sink : |
| 2695 | aconnector->dc_em_sink; |
| 2696 | } |
| 2697 | |
| 2698 | static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector) |
| 2699 | { |
| 2700 | struct dc_link *link = (struct dc_link *)aconnector->dc_link; |
| 2701 | |
| 2702 | /* In case of headless boot with force on for DP managed connector |
| 2703 | * Those settings have to be != 0 to get initial modeset |
| 2704 | */ |
| 2705 | if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) { |
| 2706 | link->verified_link_cap.lane_count = LANE_COUNT_FOUR; |
| 2707 | link->verified_link_cap.link_rate = LINK_RATE_HIGH2; |
| 2708 | } |
| 2709 | |
| 2710 | |
| 2711 | aconnector->base.override_edid = true; |
| 2712 | create_eml_sink(aconnector); |
| 2713 | } |
| 2714 | |
| 2715 | int amdgpu_dm_connector_mode_valid(struct drm_connector *connector, |
| 2716 | struct drm_display_mode *mode) |
| 2717 | { |
| 2718 | int result = MODE_ERROR; |
| 2719 | struct dc_sink *dc_sink; |
| 2720 | struct amdgpu_device *adev = connector->dev->dev_private; |
| 2721 | /* TODO: Unhardcode stream count */ |
| 2722 | struct dc_stream_state *stream; |
| 2723 | struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); |
| 2724 | |
| 2725 | if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || |
| 2726 | (mode->flags & DRM_MODE_FLAG_DBLSCAN)) |
| 2727 | return result; |
| 2728 | |
| 2729 | /* Only run this the first time mode_valid is called to initilialize |
| 2730 | * EDID mgmt |
| 2731 | */ |
| 2732 | if (aconnector->base.force != DRM_FORCE_UNSPECIFIED && |
| 2733 | !aconnector->dc_em_sink) |
| 2734 | handle_edid_mgmt(aconnector); |
| 2735 | |
| 2736 | dc_sink = to_amdgpu_dm_connector(connector)->dc_sink; |
| 2737 | |
| 2738 | if (dc_sink == NULL) { |
| 2739 | DRM_ERROR("dc_sink is NULL!\n"); |
| 2740 | goto fail; |
| 2741 | } |
| 2742 | |
| 2743 | stream = dc_create_stream_for_sink(dc_sink); |
| 2744 | if (stream == NULL) { |
| 2745 | DRM_ERROR("Failed to create stream for sink!\n"); |
| 2746 | goto fail; |
| 2747 | } |
| 2748 | |
| 2749 | drm_mode_set_crtcinfo(mode, 0); |
| 2750 | fill_stream_properties_from_drm_display_mode(stream, mode, connector); |
| 2751 | |
| 2752 | stream->src.width = mode->hdisplay; |
| 2753 | stream->src.height = mode->vdisplay; |
| 2754 | stream->dst = stream->src; |
| 2755 | |
| 2756 | if (dc_validate_stream(adev->dm.dc, stream) == DC_OK) |
| 2757 | result = MODE_OK; |
| 2758 | |
| 2759 | dc_stream_release(stream); |
| 2760 | |
| 2761 | fail: |
| 2762 | /* TODO: error handling*/ |
| 2763 | return result; |
| 2764 | } |
| 2765 | |
| 2766 | static const struct drm_connector_helper_funcs |
| 2767 | amdgpu_dm_connector_helper_funcs = { |
| 2768 | /* |
| 2769 | * If hotplug a second bigger display in FB Con mode, bigger resolution |
| 2770 | * modes will be filtered by drm_mode_validate_size(), and those modes |
| 2771 | * is missing after user start lightdm. So we need to renew modes list. |
| 2772 | * in get_modes call back, not just return the modes count |
| 2773 | */ |
| 2774 | .get_modes = get_modes, |
| 2775 | .mode_valid = amdgpu_dm_connector_mode_valid, |
| 2776 | .best_encoder = best_encoder |
| 2777 | }; |
| 2778 | |
| 2779 | static void dm_crtc_helper_disable(struct drm_crtc *crtc) |
| 2780 | { |
| 2781 | } |
| 2782 | |
| 2783 | static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, |
| 2784 | struct drm_crtc_state *state) |
| 2785 | { |
| 2786 | struct amdgpu_device *adev = crtc->dev->dev_private; |
| 2787 | struct dc *dc = adev->dm.dc; |
| 2788 | struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state); |
| 2789 | int ret = -EINVAL; |
| 2790 | |
| 2791 | if (unlikely(!dm_crtc_state->stream && |
| 2792 | modeset_required(state, NULL, dm_crtc_state->stream))) { |
| 2793 | WARN_ON(1); |
| 2794 | return ret; |
| 2795 | } |
| 2796 | |
| 2797 | /* In some use cases, like reset, no stream is attached */ |
| 2798 | if (!dm_crtc_state->stream) |
| 2799 | return 0; |
| 2800 | |
| 2801 | if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK) |
| 2802 | return 0; |
| 2803 | |
| 2804 | return ret; |
| 2805 | } |
| 2806 | |
| 2807 | static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc, |
| 2808 | const struct drm_display_mode *mode, |
| 2809 | struct drm_display_mode *adjusted_mode) |
| 2810 | { |
| 2811 | return true; |
| 2812 | } |
| 2813 | |
| 2814 | static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = { |
| 2815 | .disable = dm_crtc_helper_disable, |
| 2816 | .atomic_check = dm_crtc_helper_atomic_check, |
| 2817 | .mode_fixup = dm_crtc_helper_mode_fixup |
| 2818 | }; |
| 2819 | |
| 2820 | static void dm_encoder_helper_disable(struct drm_encoder *encoder) |
| 2821 | { |
| 2822 | |
| 2823 | } |
| 2824 | |
| 2825 | static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, |
| 2826 | struct drm_crtc_state *crtc_state, |
| 2827 | struct drm_connector_state *conn_state) |
| 2828 | { |
| 2829 | return 0; |
| 2830 | } |
| 2831 | |
| 2832 | const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = { |
| 2833 | .disable = dm_encoder_helper_disable, |
| 2834 | .atomic_check = dm_encoder_helper_atomic_check |
| 2835 | }; |
| 2836 | |
| 2837 | static void dm_drm_plane_reset(struct drm_plane *plane) |
| 2838 | { |
| 2839 | struct dm_plane_state *amdgpu_state = NULL; |
| 2840 | |
| 2841 | if (plane->state) |
| 2842 | plane->funcs->atomic_destroy_state(plane, plane->state); |
| 2843 | |
| 2844 | amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL); |
| 2845 | WARN_ON(amdgpu_state == NULL); |
| 2846 | |
| 2847 | if (amdgpu_state) { |
| 2848 | plane->state = &amdgpu_state->base; |
| 2849 | plane->state->plane = plane; |
| 2850 | plane->state->rotation = DRM_MODE_ROTATE_0; |
| 2851 | } |
| 2852 | } |
| 2853 | |
| 2854 | static struct drm_plane_state * |
| 2855 | dm_drm_plane_duplicate_state(struct drm_plane *plane) |
| 2856 | { |
| 2857 | struct dm_plane_state *dm_plane_state, *old_dm_plane_state; |
| 2858 | |
| 2859 | old_dm_plane_state = to_dm_plane_state(plane->state); |
| 2860 | dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL); |
| 2861 | if (!dm_plane_state) |
| 2862 | return NULL; |
| 2863 | |
| 2864 | __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base); |
| 2865 | |
| 2866 | if (old_dm_plane_state->dc_state) { |
| 2867 | dm_plane_state->dc_state = old_dm_plane_state->dc_state; |
| 2868 | dc_plane_state_retain(dm_plane_state->dc_state); |
| 2869 | } |
| 2870 | |
| 2871 | return &dm_plane_state->base; |
| 2872 | } |
| 2873 | |
| 2874 | void dm_drm_plane_destroy_state(struct drm_plane *plane, |
| 2875 | struct drm_plane_state *state) |
| 2876 | { |
| 2877 | struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); |
| 2878 | |
| 2879 | if (dm_plane_state->dc_state) |
| 2880 | dc_plane_state_release(dm_plane_state->dc_state); |
| 2881 | |
| 2882 | drm_atomic_helper_plane_destroy_state(plane, state); |
| 2883 | } |
| 2884 | |
| 2885 | static const struct drm_plane_funcs dm_plane_funcs = { |
| 2886 | .update_plane = drm_atomic_helper_update_plane, |
| 2887 | .disable_plane = drm_atomic_helper_disable_plane, |
| 2888 | .destroy = drm_plane_cleanup, |
| 2889 | .reset = dm_drm_plane_reset, |
| 2890 | .atomic_duplicate_state = dm_drm_plane_duplicate_state, |
| 2891 | .atomic_destroy_state = dm_drm_plane_destroy_state, |
| 2892 | }; |
| 2893 | |
| 2894 | static int dm_plane_helper_prepare_fb(struct drm_plane *plane, |
| 2895 | struct drm_plane_state *new_state) |
| 2896 | { |
| 2897 | struct amdgpu_framebuffer *afb; |
| 2898 | struct drm_gem_object *obj; |
| 2899 | struct amdgpu_bo *rbo; |
| 2900 | uint64_t chroma_addr = 0; |
| 2901 | int r; |
| 2902 | struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old; |
| 2903 | unsigned int awidth; |
| 2904 | |
| 2905 | dm_plane_state_old = to_dm_plane_state(plane->state); |
| 2906 | dm_plane_state_new = to_dm_plane_state(new_state); |
| 2907 | |
| 2908 | if (!new_state->fb) { |
| 2909 | DRM_DEBUG_DRIVER("No FB bound\n"); |
| 2910 | return 0; |
| 2911 | } |
| 2912 | |
| 2913 | afb = to_amdgpu_framebuffer(new_state->fb); |
| 2914 | |
| 2915 | obj = afb->obj; |
| 2916 | rbo = gem_to_amdgpu_bo(obj); |
| 2917 | r = amdgpu_bo_reserve(rbo, false); |
| 2918 | if (unlikely(r != 0)) |
| 2919 | return r; |
| 2920 | |
| 2921 | r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &afb->address); |
| 2922 | |
| 2923 | |
| 2924 | amdgpu_bo_unreserve(rbo); |
| 2925 | |
| 2926 | if (unlikely(r != 0)) { |
| 2927 | if (r != -ERESTARTSYS) |
| 2928 | DRM_ERROR("Failed to pin framebuffer with error %d\n", r); |
| 2929 | return r; |
| 2930 | } |
| 2931 | |
| 2932 | amdgpu_bo_ref(rbo); |
| 2933 | |
| 2934 | if (dm_plane_state_new->dc_state && |
| 2935 | dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) { |
| 2936 | struct dc_plane_state *plane_state = dm_plane_state_new->dc_state; |
| 2937 | |
| 2938 | if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { |
| 2939 | plane_state->address.grph.addr.low_part = lower_32_bits(afb->address); |
| 2940 | plane_state->address.grph.addr.high_part = upper_32_bits(afb->address); |
| 2941 | } else { |
| 2942 | awidth = ALIGN(new_state->fb->width, 64); |
| 2943 | plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE; |
| 2944 | plane_state->address.video_progressive.luma_addr.low_part |
| 2945 | = lower_32_bits(afb->address); |
| 2946 | plane_state->address.video_progressive.luma_addr.high_part |
| 2947 | = upper_32_bits(afb->address); |
| 2948 | chroma_addr = afb->address + (u64)(awidth * new_state->fb->height); |
| 2949 | plane_state->address.video_progressive.chroma_addr.low_part |
| 2950 | = lower_32_bits(chroma_addr); |
| 2951 | plane_state->address.video_progressive.chroma_addr.high_part |
| 2952 | = upper_32_bits(chroma_addr); |
| 2953 | } |
| 2954 | } |
| 2955 | |
| 2956 | /* It's a hack for s3 since in 4.9 kernel filter out cursor buffer |
| 2957 | * prepare and cleanup in drm_atomic_helper_prepare_planes |
| 2958 | * and drm_atomic_helper_cleanup_planes because fb doens't in s3. |
| 2959 | * IN 4.10 kernel this code should be removed and amdgpu_device_suspend |
| 2960 | * code touching fram buffers should be avoided for DC. |
| 2961 | */ |
| 2962 | if (plane->type == DRM_PLANE_TYPE_CURSOR) { |
| 2963 | struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_state->crtc); |
| 2964 | |
| 2965 | acrtc->cursor_bo = obj; |
| 2966 | } |
| 2967 | return 0; |
| 2968 | } |
| 2969 | |
| 2970 | static void dm_plane_helper_cleanup_fb(struct drm_plane *plane, |
| 2971 | struct drm_plane_state *old_state) |
| 2972 | { |
| 2973 | struct amdgpu_bo *rbo; |
| 2974 | struct amdgpu_framebuffer *afb; |
| 2975 | int r; |
| 2976 | |
| 2977 | if (!old_state->fb) |
| 2978 | return; |
| 2979 | |
| 2980 | afb = to_amdgpu_framebuffer(old_state->fb); |
| 2981 | rbo = gem_to_amdgpu_bo(afb->obj); |
| 2982 | r = amdgpu_bo_reserve(rbo, false); |
| 2983 | if (unlikely(r)) { |
| 2984 | DRM_ERROR("failed to reserve rbo before unpin\n"); |
| 2985 | return; |
| 2986 | } |
| 2987 | |
| 2988 | amdgpu_bo_unpin(rbo); |
| 2989 | amdgpu_bo_unreserve(rbo); |
| 2990 | amdgpu_bo_unref(&rbo); |
| 2991 | } |
| 2992 | |
| 2993 | static int dm_plane_atomic_check(struct drm_plane *plane, |
| 2994 | struct drm_plane_state *state) |
| 2995 | { |
| 2996 | struct amdgpu_device *adev = plane->dev->dev_private; |
| 2997 | struct dc *dc = adev->dm.dc; |
| 2998 | struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); |
| 2999 | |
| 3000 | if (!dm_plane_state->dc_state) |
| 3001 | return 0; |
| 3002 | |
| 3003 | if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK) |
| 3004 | return 0; |
| 3005 | |
| 3006 | return -EINVAL; |
| 3007 | } |
| 3008 | |
| 3009 | static const struct drm_plane_helper_funcs dm_plane_helper_funcs = { |
| 3010 | .prepare_fb = dm_plane_helper_prepare_fb, |
| 3011 | .cleanup_fb = dm_plane_helper_cleanup_fb, |
| 3012 | .atomic_check = dm_plane_atomic_check, |
| 3013 | }; |
| 3014 | |
| 3015 | /* |
| 3016 | * TODO: these are currently initialized to rgb formats only. |
| 3017 | * For future use cases we should either initialize them dynamically based on |
| 3018 | * plane capabilities, or initialize this array to all formats, so internal drm |
| 3019 | * check will succeed, and let DC to implement proper check |
| 3020 | */ |
| 3021 | static const uint32_t rgb_formats[] = { |
| 3022 | DRM_FORMAT_RGB888, |
| 3023 | DRM_FORMAT_XRGB8888, |
| 3024 | DRM_FORMAT_ARGB8888, |
| 3025 | DRM_FORMAT_RGBA8888, |
| 3026 | DRM_FORMAT_XRGB2101010, |
| 3027 | DRM_FORMAT_XBGR2101010, |
| 3028 | DRM_FORMAT_ARGB2101010, |
| 3029 | DRM_FORMAT_ABGR2101010, |
| 3030 | }; |
| 3031 | |
| 3032 | static const uint32_t yuv_formats[] = { |
| 3033 | DRM_FORMAT_NV12, |
| 3034 | DRM_FORMAT_NV21, |
| 3035 | }; |
| 3036 | |
| 3037 | static const u32 cursor_formats[] = { |
| 3038 | DRM_FORMAT_ARGB8888 |
| 3039 | }; |
| 3040 | |
| 3041 | static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, |
| 3042 | struct amdgpu_plane *aplane, |
| 3043 | unsigned long possible_crtcs) |
| 3044 | { |
| 3045 | int res = -EPERM; |
| 3046 | |
| 3047 | switch (aplane->base.type) { |
| 3048 | case DRM_PLANE_TYPE_PRIMARY: |
| 3049 | aplane->base.format_default = true; |
| 3050 | |
| 3051 | res = drm_universal_plane_init( |
| 3052 | dm->adev->ddev, |
| 3053 | &aplane->base, |
| 3054 | possible_crtcs, |
| 3055 | &dm_plane_funcs, |
| 3056 | rgb_formats, |
| 3057 | ARRAY_SIZE(rgb_formats), |
| 3058 | NULL, aplane->base.type, NULL); |
| 3059 | break; |
| 3060 | case DRM_PLANE_TYPE_OVERLAY: |
| 3061 | res = drm_universal_plane_init( |
| 3062 | dm->adev->ddev, |
| 3063 | &aplane->base, |
| 3064 | possible_crtcs, |
| 3065 | &dm_plane_funcs, |
| 3066 | yuv_formats, |
| 3067 | ARRAY_SIZE(yuv_formats), |
| 3068 | NULL, aplane->base.type, NULL); |
| 3069 | break; |
| 3070 | case DRM_PLANE_TYPE_CURSOR: |
| 3071 | res = drm_universal_plane_init( |
| 3072 | dm->adev->ddev, |
| 3073 | &aplane->base, |
| 3074 | possible_crtcs, |
| 3075 | &dm_plane_funcs, |
| 3076 | cursor_formats, |
| 3077 | ARRAY_SIZE(cursor_formats), |
| 3078 | NULL, aplane->base.type, NULL); |
| 3079 | break; |
| 3080 | } |
| 3081 | |
| 3082 | drm_plane_helper_add(&aplane->base, &dm_plane_helper_funcs); |
| 3083 | |
| 3084 | /* Create (reset) the plane state */ |
| 3085 | if (aplane->base.funcs->reset) |
| 3086 | aplane->base.funcs->reset(&aplane->base); |
| 3087 | |
| 3088 | |
| 3089 | return res; |
| 3090 | } |
| 3091 | |
| 3092 | static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, |
| 3093 | struct drm_plane *plane, |
| 3094 | uint32_t crtc_index) |
| 3095 | { |
| 3096 | struct amdgpu_crtc *acrtc = NULL; |
| 3097 | struct amdgpu_plane *cursor_plane; |
| 3098 | |
| 3099 | int res = -ENOMEM; |
| 3100 | |
| 3101 | cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL); |
| 3102 | if (!cursor_plane) |
| 3103 | goto fail; |
| 3104 | |
| 3105 | cursor_plane->base.type = DRM_PLANE_TYPE_CURSOR; |
| 3106 | res = amdgpu_dm_plane_init(dm, cursor_plane, 0); |
| 3107 | |
| 3108 | acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL); |
| 3109 | if (!acrtc) |
| 3110 | goto fail; |
| 3111 | |
| 3112 | res = drm_crtc_init_with_planes( |
| 3113 | dm->ddev, |
| 3114 | &acrtc->base, |
| 3115 | plane, |
| 3116 | &cursor_plane->base, |
| 3117 | &amdgpu_dm_crtc_funcs, NULL); |
| 3118 | |
| 3119 | if (res) |
| 3120 | goto fail; |
| 3121 | |
| 3122 | drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs); |
| 3123 | |
| 3124 | /* Create (reset) the plane state */ |
| 3125 | if (acrtc->base.funcs->reset) |
| 3126 | acrtc->base.funcs->reset(&acrtc->base); |
| 3127 | |
| 3128 | acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size; |
| 3129 | acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size; |
| 3130 | |
| 3131 | acrtc->crtc_id = crtc_index; |
| 3132 | acrtc->base.enabled = false; |
| 3133 | |
| 3134 | dm->adev->mode_info.crtcs[crtc_index] = acrtc; |
| 3135 | drm_mode_crtc_set_gamma_size(&acrtc->base, 256); |
| 3136 | |
| 3137 | return 0; |
| 3138 | |
| 3139 | fail: |
| 3140 | kfree(acrtc); |
| 3141 | kfree(cursor_plane); |
| 3142 | return res; |
| 3143 | } |
| 3144 | |
| 3145 | |
| 3146 | static int to_drm_connector_type(enum signal_type st) |
| 3147 | { |
| 3148 | switch (st) { |
| 3149 | case SIGNAL_TYPE_HDMI_TYPE_A: |
| 3150 | return DRM_MODE_CONNECTOR_HDMIA; |
| 3151 | case SIGNAL_TYPE_EDP: |
| 3152 | return DRM_MODE_CONNECTOR_eDP; |
| 3153 | case SIGNAL_TYPE_RGB: |
| 3154 | return DRM_MODE_CONNECTOR_VGA; |
| 3155 | case SIGNAL_TYPE_DISPLAY_PORT: |
| 3156 | case SIGNAL_TYPE_DISPLAY_PORT_MST: |
| 3157 | return DRM_MODE_CONNECTOR_DisplayPort; |
| 3158 | case SIGNAL_TYPE_DVI_DUAL_LINK: |
| 3159 | case SIGNAL_TYPE_DVI_SINGLE_LINK: |
| 3160 | return DRM_MODE_CONNECTOR_DVID; |
| 3161 | case SIGNAL_TYPE_VIRTUAL: |
| 3162 | return DRM_MODE_CONNECTOR_VIRTUAL; |
| 3163 | |
| 3164 | default: |
| 3165 | return DRM_MODE_CONNECTOR_Unknown; |
| 3166 | } |
| 3167 | } |
| 3168 | |
| 3169 | static void amdgpu_dm_get_native_mode(struct drm_connector *connector) |
| 3170 | { |
| 3171 | const struct drm_connector_helper_funcs *helper = |
| 3172 | connector->helper_private; |
| 3173 | struct drm_encoder *encoder; |
| 3174 | struct amdgpu_encoder *amdgpu_encoder; |
| 3175 | |
| 3176 | encoder = helper->best_encoder(connector); |
| 3177 | |
| 3178 | if (encoder == NULL) |
| 3179 | return; |
| 3180 | |
| 3181 | amdgpu_encoder = to_amdgpu_encoder(encoder); |
| 3182 | |
| 3183 | amdgpu_encoder->native_mode.clock = 0; |
| 3184 | |
| 3185 | if (!list_empty(&connector->probed_modes)) { |
| 3186 | struct drm_display_mode *preferred_mode = NULL; |
| 3187 | |
| 3188 | list_for_each_entry(preferred_mode, |
| 3189 | &connector->probed_modes, |
| 3190 | head) { |
| 3191 | if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) |
| 3192 | amdgpu_encoder->native_mode = *preferred_mode; |
| 3193 | |
| 3194 | break; |
| 3195 | } |
| 3196 | |
| 3197 | } |
| 3198 | } |
| 3199 | |
| 3200 | static struct drm_display_mode * |
| 3201 | amdgpu_dm_create_common_mode(struct drm_encoder *encoder, |
| 3202 | char *name, |
| 3203 | int hdisplay, int vdisplay) |
| 3204 | { |
| 3205 | struct drm_device *dev = encoder->dev; |
| 3206 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); |
| 3207 | struct drm_display_mode *mode = NULL; |
| 3208 | struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; |
| 3209 | |
| 3210 | mode = drm_mode_duplicate(dev, native_mode); |
| 3211 | |
| 3212 | if (mode == NULL) |
| 3213 | return NULL; |
| 3214 | |
| 3215 | mode->hdisplay = hdisplay; |
| 3216 | mode->vdisplay = vdisplay; |
| 3217 | mode->type &= ~DRM_MODE_TYPE_PREFERRED; |
| 3218 | strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN); |
| 3219 | |
| 3220 | return mode; |
| 3221 | |
| 3222 | } |
| 3223 | |
| 3224 | static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, |
| 3225 | struct drm_connector *connector) |
| 3226 | { |
| 3227 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); |
| 3228 | struct drm_display_mode *mode = NULL; |
| 3229 | struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; |
| 3230 | struct amdgpu_dm_connector *amdgpu_dm_connector = |
| 3231 | to_amdgpu_dm_connector(connector); |
| 3232 | int i; |
| 3233 | int n; |
| 3234 | struct mode_size { |
| 3235 | char name[DRM_DISPLAY_MODE_LEN]; |
| 3236 | int w; |
| 3237 | int h; |
| 3238 | } common_modes[] = { |
| 3239 | { "640x480", 640, 480}, |
| 3240 | { "800x600", 800, 600}, |
| 3241 | { "1024x768", 1024, 768}, |
| 3242 | { "1280x720", 1280, 720}, |
| 3243 | { "1280x800", 1280, 800}, |
| 3244 | {"1280x1024", 1280, 1024}, |
| 3245 | { "1440x900", 1440, 900}, |
| 3246 | {"1680x1050", 1680, 1050}, |
| 3247 | {"1600x1200", 1600, 1200}, |
| 3248 | {"1920x1080", 1920, 1080}, |
| 3249 | {"1920x1200", 1920, 1200} |
| 3250 | }; |
| 3251 | |
| 3252 | n = ARRAY_SIZE(common_modes); |
| 3253 | |
| 3254 | for (i = 0; i < n; i++) { |
| 3255 | struct drm_display_mode *curmode = NULL; |
| 3256 | bool mode_existed = false; |
| 3257 | |
| 3258 | if (common_modes[i].w > native_mode->hdisplay || |
| 3259 | common_modes[i].h > native_mode->vdisplay || |
| 3260 | (common_modes[i].w == native_mode->hdisplay && |
| 3261 | common_modes[i].h == native_mode->vdisplay)) |
| 3262 | continue; |
| 3263 | |
| 3264 | list_for_each_entry(curmode, &connector->probed_modes, head) { |
| 3265 | if (common_modes[i].w == curmode->hdisplay && |
| 3266 | common_modes[i].h == curmode->vdisplay) { |
| 3267 | mode_existed = true; |
| 3268 | break; |
| 3269 | } |
| 3270 | } |
| 3271 | |
| 3272 | if (mode_existed) |
| 3273 | continue; |
| 3274 | |
| 3275 | mode = amdgpu_dm_create_common_mode(encoder, |
| 3276 | common_modes[i].name, common_modes[i].w, |
| 3277 | common_modes[i].h); |
| 3278 | drm_mode_probed_add(connector, mode); |
| 3279 | amdgpu_dm_connector->num_modes++; |
| 3280 | } |
| 3281 | } |
| 3282 | |
| 3283 | static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, |
| 3284 | struct edid *edid) |
| 3285 | { |
| 3286 | struct amdgpu_dm_connector *amdgpu_dm_connector = |
| 3287 | to_amdgpu_dm_connector(connector); |
| 3288 | |
| 3289 | if (edid) { |
| 3290 | /* empty probed_modes */ |
| 3291 | INIT_LIST_HEAD(&connector->probed_modes); |
| 3292 | amdgpu_dm_connector->num_modes = |
| 3293 | drm_add_edid_modes(connector, edid); |
| 3294 | |
| 3295 | drm_edid_to_eld(connector, edid); |
| 3296 | |
| 3297 | amdgpu_dm_get_native_mode(connector); |
| 3298 | } else { |
| 3299 | amdgpu_dm_connector->num_modes = 0; |
| 3300 | } |
| 3301 | } |
| 3302 | |
| 3303 | static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) |
| 3304 | { |
| 3305 | const struct drm_connector_helper_funcs *helper = |
| 3306 | connector->helper_private; |
| 3307 | struct amdgpu_dm_connector *amdgpu_dm_connector = |
| 3308 | to_amdgpu_dm_connector(connector); |
| 3309 | struct drm_encoder *encoder; |
| 3310 | struct edid *edid = amdgpu_dm_connector->edid; |
| 3311 | |
| 3312 | encoder = helper->best_encoder(connector); |
| 3313 | |
| 3314 | amdgpu_dm_connector_ddc_get_modes(connector, edid); |
| 3315 | amdgpu_dm_connector_add_common_modes(encoder, connector); |
| 3316 | return amdgpu_dm_connector->num_modes; |
| 3317 | } |
| 3318 | |
| 3319 | void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, |
| 3320 | struct amdgpu_dm_connector *aconnector, |
| 3321 | int connector_type, |
| 3322 | struct dc_link *link, |
| 3323 | int link_index) |
| 3324 | { |
| 3325 | struct amdgpu_device *adev = dm->ddev->dev_private; |
| 3326 | |
| 3327 | aconnector->connector_id = link_index; |
| 3328 | aconnector->dc_link = link; |
| 3329 | aconnector->base.interlace_allowed = false; |
| 3330 | aconnector->base.doublescan_allowed = false; |
| 3331 | aconnector->base.stereo_allowed = false; |
| 3332 | aconnector->base.dpms = DRM_MODE_DPMS_OFF; |
| 3333 | aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */ |
| 3334 | |
| 3335 | mutex_init(&aconnector->hpd_lock); |
| 3336 | |
| 3337 | /* configure support HPD hot plug connector_>polled default value is 0 |
| 3338 | * which means HPD hot plug not supported |
| 3339 | */ |
| 3340 | switch (connector_type) { |
| 3341 | case DRM_MODE_CONNECTOR_HDMIA: |
| 3342 | aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; |
| 3343 | break; |
| 3344 | case DRM_MODE_CONNECTOR_DisplayPort: |
| 3345 | aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; |
| 3346 | break; |
| 3347 | case DRM_MODE_CONNECTOR_DVID: |
| 3348 | aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; |
| 3349 | break; |
| 3350 | default: |
| 3351 | break; |
| 3352 | } |
| 3353 | |
| 3354 | drm_object_attach_property(&aconnector->base.base, |
| 3355 | dm->ddev->mode_config.scaling_mode_property, |
| 3356 | DRM_MODE_SCALE_NONE); |
| 3357 | |
| 3358 | drm_object_attach_property(&aconnector->base.base, |
| 3359 | adev->mode_info.underscan_property, |
| 3360 | UNDERSCAN_OFF); |
| 3361 | drm_object_attach_property(&aconnector->base.base, |
| 3362 | adev->mode_info.underscan_hborder_property, |
| 3363 | 0); |
| 3364 | drm_object_attach_property(&aconnector->base.base, |
| 3365 | adev->mode_info.underscan_vborder_property, |
| 3366 | 0); |
| 3367 | |
| 3368 | } |
| 3369 | |
| 3370 | static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, |
| 3371 | struct i2c_msg *msgs, int num) |
| 3372 | { |
| 3373 | struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap); |
| 3374 | struct ddc_service *ddc_service = i2c->ddc_service; |
| 3375 | struct i2c_command cmd; |
| 3376 | int i; |
| 3377 | int result = -EIO; |
| 3378 | |
| 3379 | cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL); |
| 3380 | |
| 3381 | if (!cmd.payloads) |
| 3382 | return result; |
| 3383 | |
| 3384 | cmd.number_of_payloads = num; |
| 3385 | cmd.engine = I2C_COMMAND_ENGINE_DEFAULT; |
| 3386 | cmd.speed = 100; |
| 3387 | |
| 3388 | for (i = 0; i < num; i++) { |
| 3389 | cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD); |
| 3390 | cmd.payloads[i].address = msgs[i].addr; |
| 3391 | cmd.payloads[i].length = msgs[i].len; |
| 3392 | cmd.payloads[i].data = msgs[i].buf; |
| 3393 | } |
| 3394 | |
| 3395 | if (dal_i2caux_submit_i2c_command( |
| 3396 | ddc_service->ctx->i2caux, |
| 3397 | ddc_service->ddc_pin, |
| 3398 | &cmd)) |
| 3399 | result = num; |
| 3400 | |
| 3401 | kfree(cmd.payloads); |
| 3402 | return result; |
| 3403 | } |
| 3404 | |
| 3405 | static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap) |
| 3406 | { |
| 3407 | return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; |
| 3408 | } |
| 3409 | |
| 3410 | static const struct i2c_algorithm amdgpu_dm_i2c_algo = { |
| 3411 | .master_xfer = amdgpu_dm_i2c_xfer, |
| 3412 | .functionality = amdgpu_dm_i2c_func, |
| 3413 | }; |
| 3414 | |
| 3415 | static struct amdgpu_i2c_adapter * |
| 3416 | create_i2c(struct ddc_service *ddc_service, |
| 3417 | int link_index, |
| 3418 | int *res) |
| 3419 | { |
| 3420 | struct amdgpu_device *adev = ddc_service->ctx->driver_context; |
| 3421 | struct amdgpu_i2c_adapter *i2c; |
| 3422 | |
| 3423 | i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL); |
| 3424 | if (!i2c) |
| 3425 | return NULL; |
| 3426 | i2c->base.owner = THIS_MODULE; |
| 3427 | i2c->base.class = I2C_CLASS_DDC; |
| 3428 | i2c->base.dev.parent = &adev->pdev->dev; |
| 3429 | i2c->base.algo = &amdgpu_dm_i2c_algo; |
| 3430 | snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index); |
| 3431 | i2c_set_adapdata(&i2c->base, i2c); |
| 3432 | i2c->ddc_service = ddc_service; |
| 3433 | |
| 3434 | return i2c; |
| 3435 | } |
| 3436 | |
| 3437 | /* Note: this function assumes that dc_link_detect() was called for the |
| 3438 | * dc_link which will be represented by this aconnector. |
| 3439 | */ |
| 3440 | static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, |
| 3441 | struct amdgpu_dm_connector *aconnector, |
| 3442 | uint32_t link_index, |
| 3443 | struct amdgpu_encoder *aencoder) |
| 3444 | { |
| 3445 | int res = 0; |
| 3446 | int connector_type; |
| 3447 | struct dc *dc = dm->dc; |
| 3448 | struct dc_link *link = dc_get_link_at_index(dc, link_index); |
| 3449 | struct amdgpu_i2c_adapter *i2c; |
| 3450 | |
| 3451 | link->priv = aconnector; |
| 3452 | |
| 3453 | DRM_DEBUG_DRIVER("%s()\n", __func__); |
| 3454 | |
| 3455 | i2c = create_i2c(link->ddc, link->link_index, &res); |
| 3456 | if (!i2c) { |
| 3457 | DRM_ERROR("Failed to create i2c adapter data\n"); |
| 3458 | return -ENOMEM; |
| 3459 | } |
| 3460 | |
| 3461 | aconnector->i2c = i2c; |
| 3462 | res = i2c_add_adapter(&i2c->base); |
| 3463 | |
| 3464 | if (res) { |
| 3465 | DRM_ERROR("Failed to register hw i2c %d\n", link->link_index); |
| 3466 | goto out_free; |
| 3467 | } |
| 3468 | |
| 3469 | connector_type = to_drm_connector_type(link->connector_signal); |
| 3470 | |
| 3471 | res = drm_connector_init( |
| 3472 | dm->ddev, |
| 3473 | &aconnector->base, |
| 3474 | &amdgpu_dm_connector_funcs, |
| 3475 | connector_type); |
| 3476 | |
| 3477 | if (res) { |
| 3478 | DRM_ERROR("connector_init failed\n"); |
| 3479 | aconnector->connector_id = -1; |
| 3480 | goto out_free; |
| 3481 | } |
| 3482 | |
| 3483 | drm_connector_helper_add( |
| 3484 | &aconnector->base, |
| 3485 | &amdgpu_dm_connector_helper_funcs); |
| 3486 | |
| 3487 | if (aconnector->base.funcs->reset) |
| 3488 | aconnector->base.funcs->reset(&aconnector->base); |
| 3489 | |
| 3490 | amdgpu_dm_connector_init_helper( |
| 3491 | dm, |
| 3492 | aconnector, |
| 3493 | connector_type, |
| 3494 | link, |
| 3495 | link_index); |
| 3496 | |
| 3497 | drm_mode_connector_attach_encoder( |
| 3498 | &aconnector->base, &aencoder->base); |
| 3499 | |
| 3500 | drm_connector_register(&aconnector->base); |
| 3501 | |
| 3502 | if (connector_type == DRM_MODE_CONNECTOR_DisplayPort |
| 3503 | || connector_type == DRM_MODE_CONNECTOR_eDP) |
| 3504 | amdgpu_dm_initialize_dp_connector(dm, aconnector); |
| 3505 | |
| 3506 | #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ |
| 3507 | defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) |
| 3508 | |
| 3509 | /* NOTE: this currently will create backlight device even if a panel |
| 3510 | * is not connected to the eDP/LVDS connector. |
| 3511 | * |
| 3512 | * This is less than ideal but we don't have sink information at this |
| 3513 | * stage since detection happens after. We can't do detection earlier |
| 3514 | * since MST detection needs connectors to be created first. |
| 3515 | */ |
| 3516 | if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) { |
| 3517 | /* Event if registration failed, we should continue with |
| 3518 | * DM initialization because not having a backlight control |
| 3519 | * is better then a black screen. |
| 3520 | */ |
| 3521 | amdgpu_dm_register_backlight_device(dm); |
| 3522 | |
| 3523 | if (dm->backlight_dev) |
| 3524 | dm->backlight_link = link; |
| 3525 | } |
| 3526 | #endif |
| 3527 | |
| 3528 | out_free: |
| 3529 | if (res) { |
| 3530 | kfree(i2c); |
| 3531 | aconnector->i2c = NULL; |
| 3532 | } |
| 3533 | return res; |
| 3534 | } |
| 3535 | |
| 3536 | int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev) |
| 3537 | { |
| 3538 | switch (adev->mode_info.num_crtc) { |
| 3539 | case 1: |
| 3540 | return 0x1; |
| 3541 | case 2: |
| 3542 | return 0x3; |
| 3543 | case 3: |
| 3544 | return 0x7; |
| 3545 | case 4: |
| 3546 | return 0xf; |
| 3547 | case 5: |
| 3548 | return 0x1f; |
| 3549 | case 6: |
| 3550 | default: |
| 3551 | return 0x3f; |
| 3552 | } |
| 3553 | } |
| 3554 | |
| 3555 | static int amdgpu_dm_encoder_init(struct drm_device *dev, |
| 3556 | struct amdgpu_encoder *aencoder, |
| 3557 | uint32_t link_index) |
| 3558 | { |
| 3559 | struct amdgpu_device *adev = dev->dev_private; |
| 3560 | |
| 3561 | int res = drm_encoder_init(dev, |
| 3562 | &aencoder->base, |
| 3563 | &amdgpu_dm_encoder_funcs, |
| 3564 | DRM_MODE_ENCODER_TMDS, |
| 3565 | NULL); |
| 3566 | |
| 3567 | aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev); |
| 3568 | |
| 3569 | if (!res) |
| 3570 | aencoder->encoder_id = link_index; |
| 3571 | else |
| 3572 | aencoder->encoder_id = -1; |
| 3573 | |
| 3574 | drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs); |
| 3575 | |
| 3576 | return res; |
| 3577 | } |
| 3578 | |
| 3579 | static void manage_dm_interrupts(struct amdgpu_device *adev, |
| 3580 | struct amdgpu_crtc *acrtc, |
| 3581 | bool enable) |
| 3582 | { |
| 3583 | /* |
| 3584 | * this is not correct translation but will work as soon as VBLANK |
| 3585 | * constant is the same as PFLIP |
| 3586 | */ |
| 3587 | int irq_type = |
| 3588 | amdgpu_crtc_idx_to_irq_type( |
| 3589 | adev, |
| 3590 | acrtc->crtc_id); |
| 3591 | |
| 3592 | if (enable) { |
| 3593 | drm_crtc_vblank_on(&acrtc->base); |
| 3594 | amdgpu_irq_get( |
| 3595 | adev, |
| 3596 | &adev->pageflip_irq, |
| 3597 | irq_type); |
| 3598 | } else { |
| 3599 | |
| 3600 | amdgpu_irq_put( |
| 3601 | adev, |
| 3602 | &adev->pageflip_irq, |
| 3603 | irq_type); |
| 3604 | drm_crtc_vblank_off(&acrtc->base); |
| 3605 | } |
| 3606 | } |
| 3607 | |
| 3608 | static bool |
| 3609 | is_scaling_state_different(const struct dm_connector_state *dm_state, |
| 3610 | const struct dm_connector_state *old_dm_state) |
| 3611 | { |
| 3612 | if (dm_state->scaling != old_dm_state->scaling) |
| 3613 | return true; |
| 3614 | if (!dm_state->underscan_enable && old_dm_state->underscan_enable) { |
| 3615 | if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0) |
| 3616 | return true; |
| 3617 | } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) { |
| 3618 | if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0) |
| 3619 | return true; |
| 3620 | } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder || |
| 3621 | dm_state->underscan_vborder != old_dm_state->underscan_vborder) |
| 3622 | return true; |
| 3623 | return false; |
| 3624 | } |
| 3625 | |
| 3626 | static void remove_stream(struct amdgpu_device *adev, |
| 3627 | struct amdgpu_crtc *acrtc, |
| 3628 | struct dc_stream_state *stream) |
| 3629 | { |
| 3630 | /* this is the update mode case */ |
| 3631 | if (adev->dm.freesync_module) |
| 3632 | mod_freesync_remove_stream(adev->dm.freesync_module, stream); |
| 3633 | |
| 3634 | acrtc->otg_inst = -1; |
| 3635 | acrtc->enabled = false; |
| 3636 | } |
| 3637 | |
| 3638 | static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, |
| 3639 | struct dc_cursor_position *position) |
| 3640 | { |
| 3641 | struct amdgpu_crtc *amdgpu_crtc = amdgpu_crtc = to_amdgpu_crtc(crtc); |
| 3642 | int x, y; |
| 3643 | int xorigin = 0, yorigin = 0; |
| 3644 | |
| 3645 | if (!crtc || !plane->state->fb) { |
| 3646 | position->enable = false; |
| 3647 | position->x = 0; |
| 3648 | position->y = 0; |
| 3649 | return 0; |
| 3650 | } |
| 3651 | |
| 3652 | if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) || |
| 3653 | (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) { |
| 3654 | DRM_ERROR("%s: bad cursor width or height %d x %d\n", |
| 3655 | __func__, |
| 3656 | plane->state->crtc_w, |
| 3657 | plane->state->crtc_h); |
| 3658 | return -EINVAL; |
| 3659 | } |
| 3660 | |
| 3661 | x = plane->state->crtc_x; |
| 3662 | y = plane->state->crtc_y; |
| 3663 | /* avivo cursor are offset into the total surface */ |
| 3664 | x += crtc->primary->state->src_x >> 16; |
| 3665 | y += crtc->primary->state->src_y >> 16; |
| 3666 | if (x < 0) { |
| 3667 | xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); |
| 3668 | x = 0; |
| 3669 | } |
| 3670 | if (y < 0) { |
| 3671 | yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); |
| 3672 | y = 0; |
| 3673 | } |
| 3674 | position->enable = true; |
| 3675 | position->x = x; |
| 3676 | position->y = y; |
| 3677 | position->x_hotspot = xorigin; |
| 3678 | position->y_hotspot = yorigin; |
| 3679 | |
| 3680 | return 0; |
| 3681 | } |
| 3682 | |
| 3683 | static void handle_cursor_update(struct drm_plane *plane, |
| 3684 | struct drm_plane_state *old_plane_state) |
| 3685 | { |
| 3686 | struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb); |
| 3687 | struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc; |
| 3688 | struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL; |
| 3689 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); |
| 3690 | uint64_t address = afb ? afb->address : 0; |
| 3691 | struct dc_cursor_position position; |
| 3692 | struct dc_cursor_attributes attributes; |
| 3693 | int ret; |
| 3694 | |
| 3695 | if (!plane->state->fb && !old_plane_state->fb) |
| 3696 | return; |
| 3697 | |
| 3698 | DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n", |
| 3699 | __func__, |
| 3700 | amdgpu_crtc->crtc_id, |
| 3701 | plane->state->crtc_w, |
| 3702 | plane->state->crtc_h); |
| 3703 | |
| 3704 | ret = get_cursor_position(plane, crtc, &position); |
| 3705 | if (ret) |
| 3706 | return; |
| 3707 | |
| 3708 | if (!position.enable) { |
| 3709 | /* turn off cursor */ |
| 3710 | if (crtc_state && crtc_state->stream) |
| 3711 | dc_stream_set_cursor_position(crtc_state->stream, |
| 3712 | &position); |
| 3713 | return; |
| 3714 | } |
| 3715 | |
| 3716 | amdgpu_crtc->cursor_width = plane->state->crtc_w; |
| 3717 | amdgpu_crtc->cursor_height = plane->state->crtc_h; |
| 3718 | |
| 3719 | attributes.address.high_part = upper_32_bits(address); |
| 3720 | attributes.address.low_part = lower_32_bits(address); |
| 3721 | attributes.width = plane->state->crtc_w; |
| 3722 | attributes.height = plane->state->crtc_h; |
| 3723 | attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA; |
| 3724 | attributes.rotation_angle = 0; |
| 3725 | attributes.attribute_flags.value = 0; |
| 3726 | |
| 3727 | attributes.pitch = attributes.width; |
| 3728 | |
| 3729 | if (crtc_state->stream) { |
| 3730 | if (!dc_stream_set_cursor_attributes(crtc_state->stream, |
| 3731 | &attributes)) |
| 3732 | DRM_ERROR("DC failed to set cursor attributes\n"); |
| 3733 | |
| 3734 | if (!dc_stream_set_cursor_position(crtc_state->stream, |
| 3735 | &position)) |
| 3736 | DRM_ERROR("DC failed to set cursor position\n"); |
| 3737 | } |
| 3738 | } |
| 3739 | |
| 3740 | static void prepare_flip_isr(struct amdgpu_crtc *acrtc) |
| 3741 | { |
| 3742 | |
| 3743 | assert_spin_locked(&acrtc->base.dev->event_lock); |
| 3744 | WARN_ON(acrtc->event); |
| 3745 | |
| 3746 | acrtc->event = acrtc->base.state->event; |
| 3747 | |
| 3748 | /* Set the flip status */ |
| 3749 | acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED; |
| 3750 | |
| 3751 | /* Mark this event as consumed */ |
| 3752 | acrtc->base.state->event = NULL; |
| 3753 | |
| 3754 | DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n", |
| 3755 | acrtc->crtc_id); |
| 3756 | } |
| 3757 | |
| 3758 | /* |
| 3759 | * Executes flip |
| 3760 | * |
| 3761 | * Waits on all BO's fences and for proper vblank count |
| 3762 | */ |
| 3763 | static void amdgpu_dm_do_flip(struct drm_crtc *crtc, |
| 3764 | struct drm_framebuffer *fb, |
| 3765 | uint32_t target, |
| 3766 | struct dc_state *state) |
| 3767 | { |
| 3768 | unsigned long flags; |
| 3769 | uint32_t target_vblank; |
| 3770 | int r, vpos, hpos; |
| 3771 | struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); |
| 3772 | struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb); |
| 3773 | struct amdgpu_bo *abo = gem_to_amdgpu_bo(afb->obj); |
| 3774 | struct amdgpu_device *adev = crtc->dev->dev_private; |
| 3775 | bool async_flip = (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0; |
| 3776 | struct dc_flip_addrs addr = { {0} }; |
| 3777 | /* TODO eliminate or rename surface_update */ |
| 3778 | struct dc_surface_update surface_updates[1] = { {0} }; |
| 3779 | struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state); |
| 3780 | |
| 3781 | |
| 3782 | /* Prepare wait for target vblank early - before the fence-waits */ |
| 3783 | target_vblank = target - drm_crtc_vblank_count(crtc) + |
| 3784 | amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id); |
| 3785 | |
| 3786 | /* TODO This might fail and hence better not used, wait |
| 3787 | * explicitly on fences instead |
| 3788 | * and in general should be called for |
| 3789 | * blocking commit to as per framework helpers |
| 3790 | */ |
| 3791 | r = amdgpu_bo_reserve(abo, true); |
| 3792 | if (unlikely(r != 0)) { |
| 3793 | DRM_ERROR("failed to reserve buffer before flip\n"); |
| 3794 | WARN_ON(1); |
| 3795 | } |
| 3796 | |
| 3797 | /* Wait for all fences on this FB */ |
| 3798 | WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false, |
| 3799 | MAX_SCHEDULE_TIMEOUT) < 0); |
| 3800 | |
| 3801 | amdgpu_bo_unreserve(abo); |
| 3802 | |
| 3803 | /* Wait until we're out of the vertical blank period before the one |
| 3804 | * targeted by the flip |
| 3805 | */ |
| 3806 | while ((acrtc->enabled && |
| 3807 | (amdgpu_get_crtc_scanoutpos(adev->ddev, acrtc->crtc_id, 0, |
| 3808 | &vpos, &hpos, NULL, NULL, |
| 3809 | &crtc->hwmode) |
| 3810 | & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == |
| 3811 | (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && |
| 3812 | (int)(target_vblank - |
| 3813 | amdgpu_get_vblank_counter_kms(adev->ddev, acrtc->crtc_id)) > 0)) { |
| 3814 | usleep_range(1000, 1100); |
| 3815 | } |
| 3816 | |
| 3817 | /* Flip */ |
| 3818 | spin_lock_irqsave(&crtc->dev->event_lock, flags); |
| 3819 | /* update crtc fb */ |
| 3820 | crtc->primary->fb = fb; |
| 3821 | |
| 3822 | WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE); |
| 3823 | WARN_ON(!acrtc_state->stream); |
| 3824 | |
| 3825 | addr.address.grph.addr.low_part = lower_32_bits(afb->address); |
| 3826 | addr.address.grph.addr.high_part = upper_32_bits(afb->address); |
| 3827 | addr.flip_immediate = async_flip; |
| 3828 | |
| 3829 | |
| 3830 | if (acrtc->base.state->event) |
| 3831 | prepare_flip_isr(acrtc); |
| 3832 | |
| 3833 | surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->plane_states[0]; |
| 3834 | surface_updates->flip_addr = &addr; |
| 3835 | |
| 3836 | |
| 3837 | dc_commit_updates_for_stream(adev->dm.dc, |
| 3838 | surface_updates, |
| 3839 | 1, |
| 3840 | acrtc_state->stream, |
| 3841 | NULL, |
| 3842 | &surface_updates->surface, |
| 3843 | state); |
| 3844 | |
| 3845 | DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n", |
| 3846 | __func__, |
| 3847 | addr.address.grph.addr.high_part, |
| 3848 | addr.address.grph.addr.low_part); |
| 3849 | |
| 3850 | |
| 3851 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); |
| 3852 | } |
| 3853 | |
| 3854 | static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, |
| 3855 | struct drm_device *dev, |
| 3856 | struct amdgpu_display_manager *dm, |
| 3857 | struct drm_crtc *pcrtc, |
| 3858 | bool *wait_for_vblank) |
| 3859 | { |
| 3860 | uint32_t i; |
| 3861 | struct drm_plane *plane; |
| 3862 | struct drm_plane_state *old_plane_state, *new_plane_state; |
| 3863 | struct dc_stream_state *dc_stream_attach; |
| 3864 | struct dc_plane_state *plane_states_constructed[MAX_SURFACES]; |
| 3865 | struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc); |
| 3866 | struct drm_crtc_state *new_pcrtc_state = |
| 3867 | drm_atomic_get_new_crtc_state(state, pcrtc); |
| 3868 | struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state); |
| 3869 | struct dm_atomic_state *dm_state = to_dm_atomic_state(state); |
| 3870 | int planes_count = 0; |
| 3871 | unsigned long flags; |
| 3872 | |
| 3873 | /* update planes when needed */ |
| 3874 | for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { |
| 3875 | struct drm_crtc *crtc = new_plane_state->crtc; |
| 3876 | struct drm_crtc_state *new_crtc_state; |
| 3877 | struct drm_framebuffer *fb = new_plane_state->fb; |
| 3878 | bool pflip_needed; |
| 3879 | struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state); |
| 3880 | |
| 3881 | if (plane->type == DRM_PLANE_TYPE_CURSOR) { |
| 3882 | handle_cursor_update(plane, old_plane_state); |
| 3883 | continue; |
| 3884 | } |
| 3885 | |
| 3886 | if (!fb || !crtc || pcrtc != crtc) |
| 3887 | continue; |
| 3888 | |
| 3889 | new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); |
| 3890 | if (!new_crtc_state->active) |
| 3891 | continue; |
| 3892 | |
| 3893 | pflip_needed = !state->allow_modeset; |
| 3894 | |
| 3895 | spin_lock_irqsave(&crtc->dev->event_lock, flags); |
| 3896 | if (acrtc_attach->pflip_status != AMDGPU_FLIP_NONE) { |
| 3897 | DRM_ERROR("%s: acrtc %d, already busy\n", |
| 3898 | __func__, |
| 3899 | acrtc_attach->crtc_id); |
| 3900 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); |
| 3901 | /* In commit tail framework this cannot happen */ |
| 3902 | WARN_ON(1); |
| 3903 | } |
| 3904 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); |
| 3905 | |
| 3906 | if (!pflip_needed) { |
| 3907 | WARN_ON(!dm_new_plane_state->dc_state); |
| 3908 | |
| 3909 | plane_states_constructed[planes_count] = dm_new_plane_state->dc_state; |
| 3910 | |
| 3911 | dc_stream_attach = acrtc_state->stream; |
| 3912 | planes_count++; |
| 3913 | |
| 3914 | } else if (new_crtc_state->planes_changed) { |
| 3915 | /* Assume even ONE crtc with immediate flip means |
| 3916 | * entire can't wait for VBLANK |
| 3917 | * TODO Check if it's correct |
| 3918 | */ |
| 3919 | *wait_for_vblank = |
| 3920 | new_pcrtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC ? |
| 3921 | false : true; |
| 3922 | |
| 3923 | /* TODO: Needs rework for multiplane flip */ |
| 3924 | if (plane->type == DRM_PLANE_TYPE_PRIMARY) |
| 3925 | drm_crtc_vblank_get(crtc); |
| 3926 | |
| 3927 | amdgpu_dm_do_flip( |
| 3928 | crtc, |
| 3929 | fb, |
| 3930 | drm_crtc_vblank_count(crtc) + *wait_for_vblank, |
| 3931 | dm_state->context); |
| 3932 | } |
| 3933 | |
| 3934 | } |
| 3935 | |
| 3936 | if (planes_count) { |
| 3937 | unsigned long flags; |
| 3938 | |
| 3939 | if (new_pcrtc_state->event) { |
| 3940 | |
| 3941 | drm_crtc_vblank_get(pcrtc); |
| 3942 | |
| 3943 | spin_lock_irqsave(&pcrtc->dev->event_lock, flags); |
| 3944 | prepare_flip_isr(acrtc_attach); |
| 3945 | spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); |
| 3946 | } |
| 3947 | |
| 3948 | if (false == dc_commit_planes_to_stream(dm->dc, |
| 3949 | plane_states_constructed, |
| 3950 | planes_count, |
| 3951 | dc_stream_attach, |
| 3952 | dm_state->context)) |
| 3953 | dm_error("%s: Failed to attach plane!\n", __func__); |
| 3954 | } else { |
| 3955 | /*TODO BUG Here should go disable planes on CRTC. */ |
| 3956 | } |
| 3957 | } |
| 3958 | |
| 3959 | |
| 3960 | static int amdgpu_dm_atomic_commit(struct drm_device *dev, |
| 3961 | struct drm_atomic_state *state, |
| 3962 | bool nonblock) |
| 3963 | { |
| 3964 | struct drm_crtc *crtc; |
| 3965 | struct drm_crtc_state *old_crtc_state, *new_crtc_state; |
| 3966 | struct amdgpu_device *adev = dev->dev_private; |
| 3967 | int i; |
| 3968 | |
| 3969 | /* |
| 3970 | * We evade vblanks and pflips on crtc that |
| 3971 | * should be changed. We do it here to flush & disable |
| 3972 | * interrupts before drm_swap_state is called in drm_atomic_helper_commit |
| 3973 | * it will update crtc->dm_crtc_state->stream pointer which is used in |
| 3974 | * the ISRs. |
| 3975 | */ |
| 3976 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { |
| 3977 | struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); |
| 3978 | struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); |
| 3979 | |
| 3980 | if (drm_atomic_crtc_needs_modeset(new_crtc_state) && dm_old_crtc_state->stream) |
| 3981 | manage_dm_interrupts(adev, acrtc, false); |
| 3982 | } |
| 3983 | /* Add check here for SoC's that support hardware cursor plane, to |
| 3984 | * unset legacy_cursor_update */ |
| 3985 | |
| 3986 | return drm_atomic_helper_commit(dev, state, nonblock); |
| 3987 | |
| 3988 | /*TODO Handle EINTR, reenable IRQ*/ |
| 3989 | } |
| 3990 | |
| 3991 | static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) |
| 3992 | { |
| 3993 | struct drm_device *dev = state->dev; |
| 3994 | struct amdgpu_device *adev = dev->dev_private; |
| 3995 | struct amdgpu_display_manager *dm = &adev->dm; |
| 3996 | struct dm_atomic_state *dm_state; |
| 3997 | uint32_t i, j; |
| 3998 | uint32_t new_crtcs_count = 0; |
| 3999 | struct drm_crtc *crtc; |
| 4000 | struct drm_crtc_state *old_crtc_state, *new_crtc_state; |
| 4001 | struct amdgpu_crtc *new_crtcs[MAX_STREAMS]; |
| 4002 | struct dc_stream_state *new_stream = NULL; |
| 4003 | unsigned long flags; |
| 4004 | bool wait_for_vblank = true; |
| 4005 | struct drm_connector *connector; |
| 4006 | struct drm_connector_state *old_con_state, *new_con_state; |
| 4007 | struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; |
| 4008 | |
| 4009 | drm_atomic_helper_update_legacy_modeset_state(dev, state); |
| 4010 | |
| 4011 | dm_state = to_dm_atomic_state(state); |
| 4012 | |
| 4013 | /* update changed items */ |
| 4014 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { |
| 4015 | struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); |
| 4016 | |
| 4017 | dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); |
| 4018 | dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); |
| 4019 | |
| 4020 | DRM_DEBUG_DRIVER( |
| 4021 | "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, " |
| 4022 | "planes_changed:%d, mode_changed:%d,active_changed:%d," |
| 4023 | "connectors_changed:%d\n", |
| 4024 | acrtc->crtc_id, |
| 4025 | new_crtc_state->enable, |
| 4026 | new_crtc_state->active, |
| 4027 | new_crtc_state->planes_changed, |
| 4028 | new_crtc_state->mode_changed, |
| 4029 | new_crtc_state->active_changed, |
| 4030 | new_crtc_state->connectors_changed); |
| 4031 | |
| 4032 | /* handles headless hotplug case, updating new_state and |
| 4033 | * aconnector as needed |
| 4034 | */ |
| 4035 | |
| 4036 | if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) { |
| 4037 | |
| 4038 | DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc); |
| 4039 | |
| 4040 | if (!dm_new_crtc_state->stream) { |
| 4041 | /* |
| 4042 | * this could happen because of issues with |
| 4043 | * userspace notifications delivery. |
| 4044 | * In this case userspace tries to set mode on |
| 4045 | * display which is disconnect in fact. |
| 4046 | * dc_sink in NULL in this case on aconnector. |
| 4047 | * We expect reset mode will come soon. |
| 4048 | * |
| 4049 | * This can also happen when unplug is done |
| 4050 | * during resume sequence ended |
| 4051 | * |
| 4052 | * In this case, we want to pretend we still |
| 4053 | * have a sink to keep the pipe running so that |
| 4054 | * hw state is consistent with the sw state |
| 4055 | */ |
| 4056 | DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n", |
| 4057 | __func__, acrtc->base.base.id); |
| 4058 | continue; |
| 4059 | } |
| 4060 | |
| 4061 | |
| 4062 | if (dm_old_crtc_state->stream) |
| 4063 | remove_stream(adev, acrtc, dm_old_crtc_state->stream); |
| 4064 | |
| 4065 | |
| 4066 | /* |
| 4067 | * this loop saves set mode crtcs |
| 4068 | * we needed to enable vblanks once all |
| 4069 | * resources acquired in dc after dc_commit_streams |
| 4070 | */ |
| 4071 | |
| 4072 | /*TODO move all this into dm_crtc_state, get rid of |
| 4073 | * new_crtcs array and use old and new atomic states |
| 4074 | * instead |
| 4075 | */ |
| 4076 | new_crtcs[new_crtcs_count] = acrtc; |
| 4077 | new_crtcs_count++; |
| 4078 | |
| 4079 | new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); |
| 4080 | acrtc->enabled = true; |
| 4081 | acrtc->hw_mode = new_crtc_state->mode; |
| 4082 | crtc->hwmode = new_crtc_state->mode; |
| 4083 | } else if (modereset_required(new_crtc_state)) { |
| 4084 | DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc); |
| 4085 | |
| 4086 | /* i.e. reset mode */ |
| 4087 | if (dm_old_crtc_state->stream) |
| 4088 | remove_stream(adev, acrtc, dm_old_crtc_state->stream); |
| 4089 | } |
| 4090 | } /* for_each_crtc_in_state() */ |
| 4091 | |
| 4092 | /* |
| 4093 | * Add streams after required streams from new and replaced streams |
| 4094 | * are removed from freesync module |
| 4095 | */ |
| 4096 | if (adev->dm.freesync_module) { |
| 4097 | for (i = 0; i < new_crtcs_count; i++) { |
| 4098 | struct amdgpu_dm_connector *aconnector = NULL; |
| 4099 | |
| 4100 | new_crtc_state = drm_atomic_get_new_crtc_state(state, |
| 4101 | &new_crtcs[i]->base); |
| 4102 | dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); |
| 4103 | |
| 4104 | new_stream = dm_new_crtc_state->stream; |
| 4105 | aconnector = amdgpu_dm_find_first_crtc_matching_connector( |
| 4106 | state, |
| 4107 | &new_crtcs[i]->base); |
| 4108 | if (!aconnector) { |
| 4109 | DRM_DEBUG_DRIVER("Atomic commit: Failed to find connector for acrtc id:%d " |
| 4110 | "skipping freesync init\n", |
| 4111 | new_crtcs[i]->crtc_id); |
| 4112 | continue; |
| 4113 | } |
| 4114 | |
| 4115 | mod_freesync_add_stream(adev->dm.freesync_module, |
| 4116 | new_stream, &aconnector->caps); |
| 4117 | } |
| 4118 | } |
| 4119 | |
| 4120 | if (dm_state->context) |
| 4121 | WARN_ON(!dc_commit_state(dm->dc, dm_state->context)); |
| 4122 | |
| 4123 | for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { |
| 4124 | struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); |
| 4125 | |
| 4126 | dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); |
| 4127 | |
| 4128 | if (dm_new_crtc_state->stream != NULL) { |
| 4129 | const struct dc_stream_status *status = |
| 4130 | dc_stream_get_status(dm_new_crtc_state->stream); |
| 4131 | |
| 4132 | if (!status) |
| 4133 | DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc); |
| 4134 | else |
| 4135 | acrtc->otg_inst = status->primary_otg_inst; |
| 4136 | } |
| 4137 | } |
| 4138 | |
| 4139 | /* Handle scaling and underscan changes*/ |
| 4140 | for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { |
| 4141 | struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); |
| 4142 | struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); |
| 4143 | struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); |
| 4144 | struct dc_stream_status *status = NULL; |
| 4145 | |
| 4146 | if (acrtc) |
| 4147 | new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); |
| 4148 | |
| 4149 | /* Skip any modesets/resets */ |
| 4150 | if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state)) |
| 4151 | continue; |
| 4152 | |
| 4153 | /* Skip any thing not scale or underscan changes */ |
| 4154 | if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state)) |
| 4155 | continue; |
| 4156 | |
| 4157 | dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); |
| 4158 | |
| 4159 | update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode, |
| 4160 | dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream); |
| 4161 | |
| 4162 | status = dc_stream_get_status(dm_new_crtc_state->stream); |
| 4163 | WARN_ON(!status); |
| 4164 | WARN_ON(!status->plane_count); |
| 4165 | |
| 4166 | if (!dm_new_crtc_state->stream) |
| 4167 | continue; |
| 4168 | |
| 4169 | /*TODO How it works with MPO ?*/ |
| 4170 | if (!dc_commit_planes_to_stream( |
| 4171 | dm->dc, |
| 4172 | status->plane_states, |
| 4173 | status->plane_count, |
| 4174 | dm_new_crtc_state->stream, |
| 4175 | dm_state->context)) |
| 4176 | dm_error("%s: Failed to update stream scaling!\n", __func__); |
| 4177 | } |
| 4178 | |
| 4179 | for (i = 0; i < new_crtcs_count; i++) { |
| 4180 | /* |
| 4181 | * loop to enable interrupts on newly arrived crtc |
| 4182 | */ |
| 4183 | struct amdgpu_crtc *acrtc = new_crtcs[i]; |
| 4184 | |
| 4185 | new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); |
| 4186 | dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); |
| 4187 | |
| 4188 | if (adev->dm.freesync_module) |
| 4189 | mod_freesync_notify_mode_change( |
| 4190 | adev->dm.freesync_module, &dm_new_crtc_state->stream, 1); |
| 4191 | |
| 4192 | manage_dm_interrupts(adev, acrtc, true); |
| 4193 | } |
| 4194 | |
| 4195 | /* update planes when needed per crtc*/ |
| 4196 | for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) { |
| 4197 | dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); |
| 4198 | |
| 4199 | if (dm_new_crtc_state->stream) |
| 4200 | amdgpu_dm_commit_planes(state, dev, dm, crtc, &wait_for_vblank); |
| 4201 | } |
| 4202 | |
| 4203 | |
| 4204 | /* |
| 4205 | * send vblank event on all events not handled in flip and |
| 4206 | * mark consumed event for drm_atomic_helper_commit_hw_done |
| 4207 | */ |
| 4208 | spin_lock_irqsave(&adev->ddev->event_lock, flags); |
| 4209 | for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { |
| 4210 | |
| 4211 | if (new_crtc_state->event) |
| 4212 | drm_send_event_locked(dev, &new_crtc_state->event->base); |
| 4213 | |
| 4214 | new_crtc_state->event = NULL; |
| 4215 | } |
| 4216 | spin_unlock_irqrestore(&adev->ddev->event_lock, flags); |
| 4217 | |
| 4218 | /* Signal HW programming completion */ |
| 4219 | drm_atomic_helper_commit_hw_done(state); |
| 4220 | |
| 4221 | if (wait_for_vblank) |
| 4222 | drm_atomic_helper_wait_for_vblanks(dev, state); |
| 4223 | |
| 4224 | drm_atomic_helper_cleanup_planes(dev, state); |
| 4225 | } |
| 4226 | |
| 4227 | |
| 4228 | static int dm_force_atomic_commit(struct drm_connector *connector) |
| 4229 | { |
| 4230 | int ret = 0; |
| 4231 | struct drm_device *ddev = connector->dev; |
| 4232 | struct drm_atomic_state *state = drm_atomic_state_alloc(ddev); |
| 4233 | struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); |
| 4234 | struct drm_plane *plane = disconnected_acrtc->base.primary; |
| 4235 | struct drm_connector_state *conn_state; |
| 4236 | struct drm_crtc_state *crtc_state; |
| 4237 | struct drm_plane_state *plane_state; |
| 4238 | |
| 4239 | if (!state) |
| 4240 | return -ENOMEM; |
| 4241 | |
| 4242 | state->acquire_ctx = ddev->mode_config.acquire_ctx; |
| 4243 | |
| 4244 | /* Construct an atomic state to restore previous display setting */ |
| 4245 | |
| 4246 | /* |
| 4247 | * Attach connectors to drm_atomic_state |
| 4248 | */ |
| 4249 | conn_state = drm_atomic_get_connector_state(state, connector); |
| 4250 | |
| 4251 | ret = PTR_ERR_OR_ZERO(conn_state); |
| 4252 | if (ret) |
| 4253 | goto err; |
| 4254 | |
| 4255 | /* Attach crtc to drm_atomic_state*/ |
| 4256 | crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base); |
| 4257 | |
| 4258 | ret = PTR_ERR_OR_ZERO(crtc_state); |
| 4259 | if (ret) |
| 4260 | goto err; |
| 4261 | |
| 4262 | /* force a restore */ |
| 4263 | crtc_state->mode_changed = true; |
| 4264 | |
| 4265 | /* Attach plane to drm_atomic_state */ |
| 4266 | plane_state = drm_atomic_get_plane_state(state, plane); |
| 4267 | |
| 4268 | ret = PTR_ERR_OR_ZERO(plane_state); |
| 4269 | if (ret) |
| 4270 | goto err; |
| 4271 | |
| 4272 | |
| 4273 | /* Call commit internally with the state we just constructed */ |
| 4274 | ret = drm_atomic_commit(state); |
| 4275 | if (!ret) |
| 4276 | return 0; |
| 4277 | |
| 4278 | err: |
| 4279 | DRM_ERROR("Restoring old state failed with %i\n", ret); |
| 4280 | drm_atomic_state_put(state); |
| 4281 | |
| 4282 | return ret; |
| 4283 | } |
| 4284 | |
| 4285 | /* |
| 4286 | * This functions handle all cases when set mode does not come upon hotplug. |
| 4287 | * This include when the same display is unplugged then plugged back into the |
| 4288 | * same port and when we are running without usermode desktop manager supprot |
| 4289 | */ |
| 4290 | void dm_restore_drm_connector_state(struct drm_device *dev, |
| 4291 | struct drm_connector *connector) |
| 4292 | { |
| 4293 | struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); |
| 4294 | struct amdgpu_crtc *disconnected_acrtc; |
| 4295 | struct dm_crtc_state *acrtc_state; |
| 4296 | |
| 4297 | if (!aconnector->dc_sink || !connector->state || !connector->encoder) |
| 4298 | return; |
| 4299 | |
| 4300 | disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); |
| 4301 | acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state); |
| 4302 | |
| 4303 | if (!disconnected_acrtc || !acrtc_state->stream) |
| 4304 | return; |
| 4305 | |
| 4306 | /* |
| 4307 | * If the previous sink is not released and different from the current, |
| 4308 | * we deduce we are in a state where we can not rely on usermode call |
| 4309 | * to turn on the display, so we do it here |
| 4310 | */ |
| 4311 | if (acrtc_state->stream->sink != aconnector->dc_sink) |
| 4312 | dm_force_atomic_commit(&aconnector->base); |
| 4313 | } |
| 4314 | |
| 4315 | /*` |
| 4316 | * Grabs all modesetting locks to serialize against any blocking commits, |
| 4317 | * Waits for completion of all non blocking commits. |
| 4318 | */ |
| 4319 | static int do_aquire_global_lock(struct drm_device *dev, |
| 4320 | struct drm_atomic_state *state) |
| 4321 | { |
| 4322 | struct drm_crtc *crtc; |
| 4323 | struct drm_crtc_commit *commit; |
| 4324 | long ret; |
| 4325 | |
| 4326 | /* Adding all modeset locks to aquire_ctx will |
| 4327 | * ensure that when the framework release it the |
| 4328 | * extra locks we are locking here will get released to |
| 4329 | */ |
| 4330 | ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx); |
| 4331 | if (ret) |
| 4332 | return ret; |
| 4333 | |
| 4334 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
| 4335 | spin_lock(&crtc->commit_lock); |
| 4336 | commit = list_first_entry_or_null(&crtc->commit_list, |
| 4337 | struct drm_crtc_commit, commit_entry); |
| 4338 | if (commit) |
| 4339 | drm_crtc_commit_get(commit); |
| 4340 | spin_unlock(&crtc->commit_lock); |
| 4341 | |
| 4342 | if (!commit) |
| 4343 | continue; |
| 4344 | |
| 4345 | /* Make sure all pending HW programming completed and |
| 4346 | * page flips done |
| 4347 | */ |
| 4348 | ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ); |
| 4349 | |
| 4350 | if (ret > 0) |
| 4351 | ret = wait_for_completion_interruptible_timeout( |
| 4352 | &commit->flip_done, 10*HZ); |
| 4353 | |
| 4354 | if (ret == 0) |
| 4355 | DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done " |
| 4356 | "timed out\n", crtc->base.id, crtc->name); |
| 4357 | |
| 4358 | drm_crtc_commit_put(commit); |
| 4359 | } |
| 4360 | |
| 4361 | return ret < 0 ? ret : 0; |
| 4362 | } |
| 4363 | |
| 4364 | static int dm_update_crtcs_state(struct dc *dc, |
| 4365 | struct drm_atomic_state *state, |
| 4366 | bool enable, |
| 4367 | bool *lock_and_validation_needed) |
| 4368 | { |
| 4369 | struct drm_crtc *crtc; |
| 4370 | struct drm_crtc_state *old_crtc_state, *new_crtc_state; |
| 4371 | int i; |
| 4372 | struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; |
| 4373 | struct dm_atomic_state *dm_state = to_dm_atomic_state(state); |
| 4374 | struct dc_stream_state *new_stream; |
| 4375 | int ret = 0; |
| 4376 | |
| 4377 | /*TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set */ |
| 4378 | /* update changed items */ |
| 4379 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { |
| 4380 | struct amdgpu_crtc *acrtc = NULL; |
| 4381 | struct amdgpu_dm_connector *aconnector = NULL; |
| 4382 | struct drm_connector_state *new_con_state = NULL; |
| 4383 | struct dm_connector_state *dm_conn_state = NULL; |
| 4384 | |
| 4385 | new_stream = NULL; |
| 4386 | |
| 4387 | dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); |
| 4388 | dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); |
| 4389 | acrtc = to_amdgpu_crtc(crtc); |
| 4390 | |
| 4391 | aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); |
| 4392 | |
| 4393 | /* TODO This hack should go away */ |
| 4394 | if (aconnector && enable) { |
| 4395 | // Make sure fake sink is created in plug-in scenario |
| 4396 | new_con_state = drm_atomic_get_connector_state(state, |
| 4397 | &aconnector->base); |
| 4398 | |
| 4399 | if (IS_ERR(new_con_state)) { |
| 4400 | ret = PTR_ERR_OR_ZERO(new_con_state); |
| 4401 | break; |
| 4402 | } |
| 4403 | |
| 4404 | dm_conn_state = to_dm_connector_state(new_con_state); |
| 4405 | |
| 4406 | new_stream = create_stream_for_sink(aconnector, |
| 4407 | &new_crtc_state->mode, |
| 4408 | dm_conn_state); |
| 4409 | |
| 4410 | /* |
| 4411 | * we can have no stream on ACTION_SET if a display |
| 4412 | * was disconnected during S3, in this case it not and |
| 4413 | * error, the OS will be updated after detection, and |
| 4414 | * do the right thing on next atomic commit |
| 4415 | */ |
| 4416 | |
| 4417 | if (!new_stream) { |
| 4418 | DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n", |
| 4419 | __func__, acrtc->base.base.id); |
| 4420 | break; |
| 4421 | } |
| 4422 | } |
| 4423 | |
| 4424 | if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && |
| 4425 | dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) { |
| 4426 | |
| 4427 | new_crtc_state->mode_changed = false; |
| 4428 | |
| 4429 | DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d", |
| 4430 | new_crtc_state->mode_changed); |
| 4431 | } |
| 4432 | |
| 4433 | |
| 4434 | if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) |
| 4435 | goto next_crtc; |
| 4436 | |
| 4437 | DRM_DEBUG_DRIVER( |
| 4438 | "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, " |
| 4439 | "planes_changed:%d, mode_changed:%d,active_changed:%d," |
| 4440 | "connectors_changed:%d\n", |
| 4441 | acrtc->crtc_id, |
| 4442 | new_crtc_state->enable, |
| 4443 | new_crtc_state->active, |
| 4444 | new_crtc_state->planes_changed, |
| 4445 | new_crtc_state->mode_changed, |
| 4446 | new_crtc_state->active_changed, |
| 4447 | new_crtc_state->connectors_changed); |
| 4448 | |
| 4449 | /* Remove stream for any changed/disabled CRTC */ |
| 4450 | if (!enable) { |
| 4451 | |
| 4452 | if (!dm_old_crtc_state->stream) |
| 4453 | goto next_crtc; |
| 4454 | |
| 4455 | DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n", |
| 4456 | crtc->base.id); |
| 4457 | |
| 4458 | /* i.e. reset mode */ |
| 4459 | if (dc_remove_stream_from_ctx( |
| 4460 | dc, |
| 4461 | dm_state->context, |
| 4462 | dm_old_crtc_state->stream) != DC_OK) { |
| 4463 | ret = -EINVAL; |
| 4464 | goto fail; |
| 4465 | } |
| 4466 | |
| 4467 | dc_stream_release(dm_old_crtc_state->stream); |
| 4468 | dm_new_crtc_state->stream = NULL; |
| 4469 | |
| 4470 | *lock_and_validation_needed = true; |
| 4471 | |
| 4472 | } else {/* Add stream for any updated/enabled CRTC */ |
| 4473 | /* |
| 4474 | * Quick fix to prevent NULL pointer on new_stream when |
| 4475 | * added MST connectors not found in existing crtc_state in the chained mode |
| 4476 | * TODO: need to dig out the root cause of that |
| 4477 | */ |
| 4478 | if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port)) |
| 4479 | goto next_crtc; |
| 4480 | |
| 4481 | if (modereset_required(new_crtc_state)) |
| 4482 | goto next_crtc; |
| 4483 | |
| 4484 | if (modeset_required(new_crtc_state, new_stream, |
| 4485 | dm_old_crtc_state->stream)) { |
| 4486 | |
| 4487 | WARN_ON(dm_new_crtc_state->stream); |
| 4488 | |
| 4489 | dm_new_crtc_state->stream = new_stream; |
| 4490 | dc_stream_retain(new_stream); |
| 4491 | |
| 4492 | DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n", |
| 4493 | crtc->base.id); |
| 4494 | |
| 4495 | if (dc_add_stream_to_ctx( |
| 4496 | dc, |
| 4497 | dm_state->context, |
| 4498 | dm_new_crtc_state->stream) != DC_OK) { |
| 4499 | ret = -EINVAL; |
| 4500 | goto fail; |
| 4501 | } |
| 4502 | |
| 4503 | *lock_and_validation_needed = true; |
| 4504 | } |
| 4505 | } |
| 4506 | |
| 4507 | next_crtc: |
| 4508 | /* Release extra reference */ |
| 4509 | if (new_stream) |
| 4510 | dc_stream_release(new_stream); |
| 4511 | } |
| 4512 | |
| 4513 | return ret; |
| 4514 | |
| 4515 | fail: |
| 4516 | if (new_stream) |
| 4517 | dc_stream_release(new_stream); |
| 4518 | return ret; |
| 4519 | } |
| 4520 | |
| 4521 | static int dm_update_planes_state(struct dc *dc, |
| 4522 | struct drm_atomic_state *state, |
| 4523 | bool enable, |
| 4524 | bool *lock_and_validation_needed) |
| 4525 | { |
| 4526 | struct drm_crtc *new_plane_crtc, *old_plane_crtc; |
| 4527 | struct drm_crtc_state *old_crtc_state, *new_crtc_state; |
| 4528 | struct drm_plane *plane; |
| 4529 | struct drm_plane_state *old_plane_state, *new_plane_state; |
| 4530 | struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state; |
| 4531 | struct dm_atomic_state *dm_state = to_dm_atomic_state(state); |
| 4532 | struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state; |
| 4533 | int i ; |
| 4534 | /* TODO return page_flip_needed() function */ |
| 4535 | bool pflip_needed = !state->allow_modeset; |
| 4536 | int ret = 0; |
| 4537 | |
| 4538 | if (pflip_needed) |
| 4539 | return ret; |
| 4540 | |
| 4541 | /* Add new planes */ |
| 4542 | for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { |
| 4543 | new_plane_crtc = new_plane_state->crtc; |
| 4544 | old_plane_crtc = old_plane_state->crtc; |
| 4545 | dm_new_plane_state = to_dm_plane_state(new_plane_state); |
| 4546 | dm_old_plane_state = to_dm_plane_state(old_plane_state); |
| 4547 | |
| 4548 | /*TODO Implement atomic check for cursor plane */ |
| 4549 | if (plane->type == DRM_PLANE_TYPE_CURSOR) |
| 4550 | continue; |
| 4551 | |
| 4552 | /* Remove any changed/removed planes */ |
| 4553 | if (!enable) { |
| 4554 | |
| 4555 | if (!old_plane_crtc) |
| 4556 | continue; |
| 4557 | |
| 4558 | old_crtc_state = drm_atomic_get_old_crtc_state( |
| 4559 | state, old_plane_crtc); |
| 4560 | dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); |
| 4561 | |
| 4562 | if (!dm_old_crtc_state->stream) |
| 4563 | continue; |
| 4564 | |
| 4565 | DRM_DEBUG_DRIVER("Disabling DRM plane: %d on DRM crtc %d\n", |
| 4566 | plane->base.id, old_plane_crtc->base.id); |
| 4567 | |
| 4568 | if (!dc_remove_plane_from_context( |
| 4569 | dc, |
| 4570 | dm_old_crtc_state->stream, |
| 4571 | dm_old_plane_state->dc_state, |
| 4572 | dm_state->context)) { |
| 4573 | |
| 4574 | ret = EINVAL; |
| 4575 | return ret; |
| 4576 | } |
| 4577 | |
| 4578 | |
| 4579 | dc_plane_state_release(dm_old_plane_state->dc_state); |
| 4580 | dm_new_plane_state->dc_state = NULL; |
| 4581 | |
| 4582 | *lock_and_validation_needed = true; |
| 4583 | |
| 4584 | } else { /* Add new planes */ |
| 4585 | |
| 4586 | if (drm_atomic_plane_disabling(plane->state, new_plane_state)) |
| 4587 | continue; |
| 4588 | |
| 4589 | if (!new_plane_crtc) |
| 4590 | continue; |
| 4591 | |
| 4592 | new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc); |
| 4593 | dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); |
| 4594 | |
| 4595 | if (!dm_new_crtc_state->stream) |
| 4596 | continue; |
| 4597 | |
| 4598 | |
| 4599 | WARN_ON(dm_new_plane_state->dc_state); |
| 4600 | |
| 4601 | dm_new_plane_state->dc_state = dc_create_plane_state(dc); |
| 4602 | |
| 4603 | DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n", |
| 4604 | plane->base.id, new_plane_crtc->base.id); |
| 4605 | |
| 4606 | if (!dm_new_plane_state->dc_state) { |
| 4607 | ret = -EINVAL; |
| 4608 | return ret; |
| 4609 | } |
| 4610 | |
| 4611 | ret = fill_plane_attributes( |
| 4612 | new_plane_crtc->dev->dev_private, |
| 4613 | dm_new_plane_state->dc_state, |
| 4614 | new_plane_state, |
| 4615 | new_crtc_state, |
| 4616 | false); |
| 4617 | if (ret) |
| 4618 | return ret; |
| 4619 | |
| 4620 | |
| 4621 | if (!dc_add_plane_to_context( |
| 4622 | dc, |
| 4623 | dm_new_crtc_state->stream, |
| 4624 | dm_new_plane_state->dc_state, |
| 4625 | dm_state->context)) { |
| 4626 | |
| 4627 | ret = -EINVAL; |
| 4628 | return ret; |
| 4629 | } |
| 4630 | |
| 4631 | *lock_and_validation_needed = true; |
| 4632 | } |
| 4633 | } |
| 4634 | |
| 4635 | |
| 4636 | return ret; |
| 4637 | } |
| 4638 | |
| 4639 | static int amdgpu_dm_atomic_check(struct drm_device *dev, |
| 4640 | struct drm_atomic_state *state) |
| 4641 | { |
| 4642 | int i; |
| 4643 | int ret; |
| 4644 | struct amdgpu_device *adev = dev->dev_private; |
| 4645 | struct dc *dc = adev->dm.dc; |
| 4646 | struct dm_atomic_state *dm_state = to_dm_atomic_state(state); |
| 4647 | struct drm_connector *connector; |
| 4648 | struct drm_connector_state *old_con_state, *new_con_state; |
| 4649 | struct drm_crtc *crtc; |
| 4650 | struct drm_crtc_state *old_crtc_state, *new_crtc_state; |
| 4651 | |
| 4652 | /* |
| 4653 | * This bool will be set for true for any modeset/reset |
| 4654 | * or plane update which implies non fast surface update. |
| 4655 | */ |
| 4656 | bool lock_and_validation_needed = false; |
| 4657 | |
| 4658 | ret = drm_atomic_helper_check_modeset(dev, state); |
| 4659 | if (ret) { |
| 4660 | DRM_ERROR("Atomic state validation failed with error :%d !\n", ret); |
| 4661 | return ret; |
| 4662 | } |
| 4663 | |
| 4664 | /* |
| 4665 | * legacy_cursor_update should be made false for SoC's having |
| 4666 | * a dedicated hardware plane for cursor in amdgpu_dm_atomic_commit(), |
| 4667 | * otherwise for software cursor plane, |
| 4668 | * we should not add it to list of affected planes. |
| 4669 | */ |
| 4670 | if (state->legacy_cursor_update) { |
| 4671 | for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { |
| 4672 | if (new_crtc_state->color_mgmt_changed) { |
| 4673 | ret = drm_atomic_add_affected_planes(state, crtc); |
| 4674 | if (ret) |
| 4675 | goto fail; |
| 4676 | } |
| 4677 | } |
| 4678 | } else { |
| 4679 | for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { |
| 4680 | if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) |
| 4681 | continue; |
| 4682 | |
| 4683 | if (!new_crtc_state->enable) |
| 4684 | continue; |
| 4685 | |
| 4686 | ret = drm_atomic_add_affected_connectors(state, crtc); |
| 4687 | if (ret) |
| 4688 | return ret; |
| 4689 | |
| 4690 | ret = drm_atomic_add_affected_planes(state, crtc); |
| 4691 | if (ret) |
| 4692 | goto fail; |
| 4693 | } |
| 4694 | } |
| 4695 | |
| 4696 | dm_state->context = dc_create_state(); |
| 4697 | ASSERT(dm_state->context); |
| 4698 | dc_resource_state_copy_construct_current(dc, dm_state->context); |
| 4699 | |
| 4700 | /* Remove exiting planes if they are modified */ |
| 4701 | ret = dm_update_planes_state(dc, state, false, &lock_and_validation_needed); |
| 4702 | if (ret) { |
| 4703 | goto fail; |
| 4704 | } |
| 4705 | |
| 4706 | /* Disable all crtcs which require disable */ |
| 4707 | ret = dm_update_crtcs_state(dc, state, false, &lock_and_validation_needed); |
| 4708 | if (ret) { |
| 4709 | goto fail; |
| 4710 | } |
| 4711 | |
| 4712 | /* Enable all crtcs which require enable */ |
| 4713 | ret = dm_update_crtcs_state(dc, state, true, &lock_and_validation_needed); |
| 4714 | if (ret) { |
| 4715 | goto fail; |
| 4716 | } |
| 4717 | |
| 4718 | /* Add new/modified planes */ |
| 4719 | ret = dm_update_planes_state(dc, state, true, &lock_and_validation_needed); |
| 4720 | if (ret) { |
| 4721 | goto fail; |
| 4722 | } |
| 4723 | |
| 4724 | /* Run this here since we want to validate the streams we created */ |
| 4725 | ret = drm_atomic_helper_check_planes(dev, state); |
| 4726 | if (ret) |
| 4727 | goto fail; |
| 4728 | |
| 4729 | /* Check scaling and underscan changes*/ |
| 4730 | /*TODO Removed scaling changes validation due to inability to commit |
| 4731 | * new stream into context w\o causing full reset. Need to |
| 4732 | * decide how to handle. |
| 4733 | */ |
| 4734 | for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { |
| 4735 | struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); |
| 4736 | struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); |
| 4737 | struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); |
| 4738 | |
| 4739 | /* Skip any modesets/resets */ |
| 4740 | if (!acrtc || drm_atomic_crtc_needs_modeset( |
| 4741 | drm_atomic_get_new_crtc_state(state, &acrtc->base))) |
| 4742 | continue; |
| 4743 | |
| 4744 | /* Skip any thing not scale or underscan changes */ |
| 4745 | if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state)) |
| 4746 | continue; |
| 4747 | |
| 4748 | lock_and_validation_needed = true; |
| 4749 | } |
| 4750 | |
| 4751 | /* |
| 4752 | * For full updates case when |
| 4753 | * removing/adding/updating streams on once CRTC while flipping |
| 4754 | * on another CRTC, |
| 4755 | * acquiring global lock will guarantee that any such full |
| 4756 | * update commit |
| 4757 | * will wait for completion of any outstanding flip using DRMs |
| 4758 | * synchronization events. |
| 4759 | */ |
| 4760 | |
| 4761 | if (lock_and_validation_needed) { |
| 4762 | |
| 4763 | ret = do_aquire_global_lock(dev, state); |
| 4764 | if (ret) |
| 4765 | goto fail; |
| 4766 | |
| 4767 | if (dc_validate_global_state(dc, dm_state->context) != DC_OK) { |
| 4768 | ret = -EINVAL; |
| 4769 | goto fail; |
| 4770 | } |
| 4771 | } |
| 4772 | |
| 4773 | /* Must be success */ |
| 4774 | WARN_ON(ret); |
| 4775 | return ret; |
| 4776 | |
| 4777 | fail: |
| 4778 | if (ret == -EDEADLK) |
| 4779 | DRM_DEBUG_DRIVER("Atomic check stopped due to to deadlock.\n"); |
| 4780 | else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS) |
| 4781 | DRM_DEBUG_DRIVER("Atomic check stopped due to to signal.\n"); |
| 4782 | else |
| 4783 | DRM_ERROR("Atomic check failed with err: %d \n", ret); |
| 4784 | |
| 4785 | return ret; |
| 4786 | } |
| 4787 | |
| 4788 | static bool is_dp_capable_without_timing_msa(struct dc *dc, |
| 4789 | struct amdgpu_dm_connector *amdgpu_dm_connector) |
| 4790 | { |
| 4791 | uint8_t dpcd_data; |
| 4792 | bool capable = false; |
| 4793 | |
| 4794 | if (amdgpu_dm_connector->dc_link && |
| 4795 | dm_helpers_dp_read_dpcd( |
| 4796 | NULL, |
| 4797 | amdgpu_dm_connector->dc_link, |
| 4798 | DP_DOWN_STREAM_PORT_COUNT, |
| 4799 | &dpcd_data, |
| 4800 | sizeof(dpcd_data))) { |
| 4801 | capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false; |
| 4802 | } |
| 4803 | |
| 4804 | return capable; |
| 4805 | } |
| 4806 | void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector, |
| 4807 | struct edid *edid) |
| 4808 | { |
| 4809 | int i; |
| 4810 | uint64_t val_capable; |
| 4811 | bool edid_check_required; |
| 4812 | struct detailed_timing *timing; |
| 4813 | struct detailed_non_pixel *data; |
| 4814 | struct detailed_data_monitor_range *range; |
| 4815 | struct amdgpu_dm_connector *amdgpu_dm_connector = |
| 4816 | to_amdgpu_dm_connector(connector); |
| 4817 | |
| 4818 | struct drm_device *dev = connector->dev; |
| 4819 | struct amdgpu_device *adev = dev->dev_private; |
| 4820 | |
| 4821 | edid_check_required = false; |
| 4822 | if (!amdgpu_dm_connector->dc_sink) { |
| 4823 | DRM_ERROR("dc_sink NULL, could not add free_sync module.\n"); |
| 4824 | return; |
| 4825 | } |
| 4826 | if (!adev->dm.freesync_module) |
| 4827 | return; |
| 4828 | /* |
| 4829 | * if edid non zero restrict freesync only for dp and edp |
| 4830 | */ |
| 4831 | if (edid) { |
| 4832 | if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT |
| 4833 | || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) { |
| 4834 | edid_check_required = is_dp_capable_without_timing_msa( |
| 4835 | adev->dm.dc, |
| 4836 | amdgpu_dm_connector); |
| 4837 | } |
| 4838 | } |
| 4839 | val_capable = 0; |
| 4840 | if (edid_check_required == true && (edid->version > 1 || |
| 4841 | (edid->version == 1 && edid->revision > 1))) { |
| 4842 | for (i = 0; i < 4; i++) { |
| 4843 | |
| 4844 | timing = &edid->detailed_timings[i]; |
| 4845 | data = &timing->data.other_data; |
| 4846 | range = &data->data.range; |
| 4847 | /* |
| 4848 | * Check if monitor has continuous frequency mode |
| 4849 | */ |
| 4850 | if (data->type != EDID_DETAIL_MONITOR_RANGE) |
| 4851 | continue; |
| 4852 | /* |
| 4853 | * Check for flag range limits only. If flag == 1 then |
| 4854 | * no additional timing information provided. |
| 4855 | * Default GTF, GTF Secondary curve and CVT are not |
| 4856 | * supported |
| 4857 | */ |
| 4858 | if (range->flags != 1) |
| 4859 | continue; |
| 4860 | |
| 4861 | amdgpu_dm_connector->min_vfreq = range->min_vfreq; |
| 4862 | amdgpu_dm_connector->max_vfreq = range->max_vfreq; |
| 4863 | amdgpu_dm_connector->pixel_clock_mhz = |
| 4864 | range->pixel_clock_mhz * 10; |
| 4865 | break; |
| 4866 | } |
| 4867 | |
| 4868 | if (amdgpu_dm_connector->max_vfreq - |
| 4869 | amdgpu_dm_connector->min_vfreq > 10) { |
| 4870 | amdgpu_dm_connector->caps.supported = true; |
| 4871 | amdgpu_dm_connector->caps.min_refresh_in_micro_hz = |
| 4872 | amdgpu_dm_connector->min_vfreq * 1000000; |
| 4873 | amdgpu_dm_connector->caps.max_refresh_in_micro_hz = |
| 4874 | amdgpu_dm_connector->max_vfreq * 1000000; |
| 4875 | val_capable = 1; |
| 4876 | } |
| 4877 | } |
| 4878 | |
| 4879 | /* |
| 4880 | * TODO figure out how to notify user-mode or DRM of freesync caps |
| 4881 | * once we figure out how to deal with freesync in an upstreamable |
| 4882 | * fashion |
| 4883 | */ |
| 4884 | |
| 4885 | } |
| 4886 | |
| 4887 | void amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector *connector) |
| 4888 | { |
| 4889 | /* |
| 4890 | * TODO fill in once we figure out how to deal with freesync in |
| 4891 | * an upstreamable fashion |
| 4892 | */ |
| 4893 | } |