drm/amd/display: Reduce stack size for dml31 UseMinimumDCFCLK
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
... / ...
CommitLineData
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26/* The caprices of the preprocessor require that this be declared right here */
27#define CREATE_TRACE_POINTS
28
29#include "dm_services_types.h"
30#include "dc.h"
31#include "dc_link_dp.h"
32#include "link_enc_cfg.h"
33#include "dc/inc/core_types.h"
34#include "dal_asic_id.h"
35#include "dmub/dmub_srv.h"
36#include "dc/inc/hw/dmcu.h"
37#include "dc/inc/hw/abm.h"
38#include "dc/dc_dmub_srv.h"
39#include "dc/dc_edid_parser.h"
40#include "dc/dc_stat.h"
41#include "amdgpu_dm_trace.h"
42
43#include "vid.h"
44#include "amdgpu.h"
45#include "amdgpu_display.h"
46#include "amdgpu_ucode.h"
47#include "atom.h"
48#include "amdgpu_dm.h"
49#ifdef CONFIG_DRM_AMD_DC_HDCP
50#include "amdgpu_dm_hdcp.h"
51#include <drm/drm_hdcp.h>
52#endif
53#include "amdgpu_pm.h"
54#include "amdgpu_atombios.h"
55
56#include "amd_shared.h"
57#include "amdgpu_dm_irq.h"
58#include "dm_helpers.h"
59#include "amdgpu_dm_mst_types.h"
60#if defined(CONFIG_DEBUG_FS)
61#include "amdgpu_dm_debugfs.h"
62#endif
63#include "amdgpu_dm_psr.h"
64
65#include "ivsrcid/ivsrcid_vislands30.h"
66
67#include "i2caux_interface.h"
68#include <linux/module.h>
69#include <linux/moduleparam.h>
70#include <linux/types.h>
71#include <linux/pm_runtime.h>
72#include <linux/pci.h>
73#include <linux/firmware.h>
74#include <linux/component.h>
75
76#include <drm/drm_atomic.h>
77#include <drm/drm_atomic_uapi.h>
78#include <drm/drm_atomic_helper.h>
79#include <drm/drm_dp_mst_helper.h>
80#include <drm/drm_fb_helper.h>
81#include <drm/drm_fourcc.h>
82#include <drm/drm_edid.h>
83#include <drm/drm_vblank.h>
84#include <drm/drm_audio_component.h>
85
86#if defined(CONFIG_DRM_AMD_DC_DCN)
87#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
88
89#include "dcn/dcn_1_0_offset.h"
90#include "dcn/dcn_1_0_sh_mask.h"
91#include "soc15_hw_ip.h"
92#include "vega10_ip_offset.h"
93
94#include "soc15_common.h"
95#endif
96
97#include "modules/inc/mod_freesync.h"
98#include "modules/power/power_helpers.h"
99#include "modules/inc/mod_info_packet.h"
100
101#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
102MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
103#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
104MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
105#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
106MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
107#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
108MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
109#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
110MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
111#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
112MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
113#define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
114MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
115#define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
116MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
117
118#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
119MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
120
121#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
122MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
123
124/* Number of bytes in PSP header for firmware. */
125#define PSP_HEADER_BYTES 0x100
126
127/* Number of bytes in PSP footer for firmware. */
128#define PSP_FOOTER_BYTES 0x100
129
130/**
131 * DOC: overview
132 *
133 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
134 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
135 * requests into DC requests, and DC responses into DRM responses.
136 *
137 * The root control structure is &struct amdgpu_display_manager.
138 */
139
140/* basic init/fini API */
141static int amdgpu_dm_init(struct amdgpu_device *adev);
142static void amdgpu_dm_fini(struct amdgpu_device *adev);
143static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
144
145static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
146{
147 switch (link->dpcd_caps.dongle_type) {
148 case DISPLAY_DONGLE_NONE:
149 return DRM_MODE_SUBCONNECTOR_Native;
150 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
151 return DRM_MODE_SUBCONNECTOR_VGA;
152 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
153 case DISPLAY_DONGLE_DP_DVI_DONGLE:
154 return DRM_MODE_SUBCONNECTOR_DVID;
155 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
156 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
157 return DRM_MODE_SUBCONNECTOR_HDMIA;
158 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
159 default:
160 return DRM_MODE_SUBCONNECTOR_Unknown;
161 }
162}
163
164static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
165{
166 struct dc_link *link = aconnector->dc_link;
167 struct drm_connector *connector = &aconnector->base;
168 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
169
170 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
171 return;
172
173 if (aconnector->dc_sink)
174 subconnector = get_subconnector_type(link);
175
176 drm_object_property_set_value(&connector->base,
177 connector->dev->mode_config.dp_subconnector_property,
178 subconnector);
179}
180
181/*
182 * initializes drm_device display related structures, based on the information
183 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
184 * drm_encoder, drm_mode_config
185 *
186 * Returns 0 on success
187 */
188static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
189/* removes and deallocates the drm structures, created by the above function */
190static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
191
192static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
193 struct drm_plane *plane,
194 unsigned long possible_crtcs,
195 const struct dc_plane_cap *plane_cap);
196static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
197 struct drm_plane *plane,
198 uint32_t link_index);
199static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
200 struct amdgpu_dm_connector *amdgpu_dm_connector,
201 uint32_t link_index,
202 struct amdgpu_encoder *amdgpu_encoder);
203static int amdgpu_dm_encoder_init(struct drm_device *dev,
204 struct amdgpu_encoder *aencoder,
205 uint32_t link_index);
206
207static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
208
209static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
210
211static int amdgpu_dm_atomic_check(struct drm_device *dev,
212 struct drm_atomic_state *state);
213
214static void handle_cursor_update(struct drm_plane *plane,
215 struct drm_plane_state *old_plane_state);
216
217static const struct drm_format_info *
218amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
219
220static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
221static void handle_hpd_rx_irq(void *param);
222
223static bool
224is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
225 struct drm_crtc_state *new_crtc_state);
226/*
227 * dm_vblank_get_counter
228 *
229 * @brief
230 * Get counter for number of vertical blanks
231 *
232 * @param
233 * struct amdgpu_device *adev - [in] desired amdgpu device
234 * int disp_idx - [in] which CRTC to get the counter from
235 *
236 * @return
237 * Counter for vertical blanks
238 */
239static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
240{
241 if (crtc >= adev->mode_info.num_crtc)
242 return 0;
243 else {
244 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
245
246 if (acrtc->dm_irq_params.stream == NULL) {
247 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
248 crtc);
249 return 0;
250 }
251
252 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
253 }
254}
255
256static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
257 u32 *vbl, u32 *position)
258{
259 uint32_t v_blank_start, v_blank_end, h_position, v_position;
260
261 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
262 return -EINVAL;
263 else {
264 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
265
266 if (acrtc->dm_irq_params.stream == NULL) {
267 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
268 crtc);
269 return 0;
270 }
271
272 /*
273 * TODO rework base driver to use values directly.
274 * for now parse it back into reg-format
275 */
276 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
277 &v_blank_start,
278 &v_blank_end,
279 &h_position,
280 &v_position);
281
282 *position = v_position | (h_position << 16);
283 *vbl = v_blank_start | (v_blank_end << 16);
284 }
285
286 return 0;
287}
288
289static bool dm_is_idle(void *handle)
290{
291 /* XXX todo */
292 return true;
293}
294
295static int dm_wait_for_idle(void *handle)
296{
297 /* XXX todo */
298 return 0;
299}
300
301static bool dm_check_soft_reset(void *handle)
302{
303 return false;
304}
305
306static int dm_soft_reset(void *handle)
307{
308 /* XXX todo */
309 return 0;
310}
311
312static struct amdgpu_crtc *
313get_crtc_by_otg_inst(struct amdgpu_device *adev,
314 int otg_inst)
315{
316 struct drm_device *dev = adev_to_drm(adev);
317 struct drm_crtc *crtc;
318 struct amdgpu_crtc *amdgpu_crtc;
319
320 if (WARN_ON(otg_inst == -1))
321 return adev->mode_info.crtcs[0];
322
323 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
324 amdgpu_crtc = to_amdgpu_crtc(crtc);
325
326 if (amdgpu_crtc->otg_inst == otg_inst)
327 return amdgpu_crtc;
328 }
329
330 return NULL;
331}
332
333static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
334{
335 return acrtc->dm_irq_params.freesync_config.state ==
336 VRR_STATE_ACTIVE_VARIABLE ||
337 acrtc->dm_irq_params.freesync_config.state ==
338 VRR_STATE_ACTIVE_FIXED;
339}
340
341static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
342{
343 return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
344 dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
345}
346
347static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
348 struct dm_crtc_state *new_state)
349{
350 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
351 return true;
352 else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
353 return true;
354 else
355 return false;
356}
357
358/**
359 * dm_pflip_high_irq() - Handle pageflip interrupt
360 * @interrupt_params: ignored
361 *
362 * Handles the pageflip interrupt by notifying all interested parties
363 * that the pageflip has been completed.
364 */
365static void dm_pflip_high_irq(void *interrupt_params)
366{
367 struct amdgpu_crtc *amdgpu_crtc;
368 struct common_irq_params *irq_params = interrupt_params;
369 struct amdgpu_device *adev = irq_params->adev;
370 unsigned long flags;
371 struct drm_pending_vblank_event *e;
372 uint32_t vpos, hpos, v_blank_start, v_blank_end;
373 bool vrr_active;
374
375 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
376
377 /* IRQ could occur when in initial stage */
378 /* TODO work and BO cleanup */
379 if (amdgpu_crtc == NULL) {
380 DC_LOG_PFLIP("CRTC is null, returning.\n");
381 return;
382 }
383
384 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
385
386 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
387 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
388 amdgpu_crtc->pflip_status,
389 AMDGPU_FLIP_SUBMITTED,
390 amdgpu_crtc->crtc_id,
391 amdgpu_crtc);
392 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
393 return;
394 }
395
396 /* page flip completed. */
397 e = amdgpu_crtc->event;
398 amdgpu_crtc->event = NULL;
399
400 WARN_ON(!e);
401
402 vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
403
404 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
405 if (!vrr_active ||
406 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
407 &v_blank_end, &hpos, &vpos) ||
408 (vpos < v_blank_start)) {
409 /* Update to correct count and vblank timestamp if racing with
410 * vblank irq. This also updates to the correct vblank timestamp
411 * even in VRR mode, as scanout is past the front-porch atm.
412 */
413 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
414
415 /* Wake up userspace by sending the pageflip event with proper
416 * count and timestamp of vblank of flip completion.
417 */
418 if (e) {
419 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
420
421 /* Event sent, so done with vblank for this flip */
422 drm_crtc_vblank_put(&amdgpu_crtc->base);
423 }
424 } else if (e) {
425 /* VRR active and inside front-porch: vblank count and
426 * timestamp for pageflip event will only be up to date after
427 * drm_crtc_handle_vblank() has been executed from late vblank
428 * irq handler after start of back-porch (vline 0). We queue the
429 * pageflip event for send-out by drm_crtc_handle_vblank() with
430 * updated timestamp and count, once it runs after us.
431 *
432 * We need to open-code this instead of using the helper
433 * drm_crtc_arm_vblank_event(), as that helper would
434 * call drm_crtc_accurate_vblank_count(), which we must
435 * not call in VRR mode while we are in front-porch!
436 */
437
438 /* sequence will be replaced by real count during send-out. */
439 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
440 e->pipe = amdgpu_crtc->crtc_id;
441
442 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
443 e = NULL;
444 }
445
446 /* Keep track of vblank of this flip for flip throttling. We use the
447 * cooked hw counter, as that one incremented at start of this vblank
448 * of pageflip completion, so last_flip_vblank is the forbidden count
449 * for queueing new pageflips if vsync + VRR is enabled.
450 */
451 amdgpu_crtc->dm_irq_params.last_flip_vblank =
452 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
453
454 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
455 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
456
457 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
458 amdgpu_crtc->crtc_id, amdgpu_crtc,
459 vrr_active, (int) !e);
460}
461
462static void dm_vupdate_high_irq(void *interrupt_params)
463{
464 struct common_irq_params *irq_params = interrupt_params;
465 struct amdgpu_device *adev = irq_params->adev;
466 struct amdgpu_crtc *acrtc;
467 struct drm_device *drm_dev;
468 struct drm_vblank_crtc *vblank;
469 ktime_t frame_duration_ns, previous_timestamp;
470 unsigned long flags;
471 int vrr_active;
472
473 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
474
475 if (acrtc) {
476 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
477 drm_dev = acrtc->base.dev;
478 vblank = &drm_dev->vblank[acrtc->base.index];
479 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
480 frame_duration_ns = vblank->time - previous_timestamp;
481
482 if (frame_duration_ns > 0) {
483 trace_amdgpu_refresh_rate_track(acrtc->base.index,
484 frame_duration_ns,
485 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
486 atomic64_set(&irq_params->previous_timestamp, vblank->time);
487 }
488
489 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
490 acrtc->crtc_id,
491 vrr_active);
492
493 /* Core vblank handling is done here after end of front-porch in
494 * vrr mode, as vblank timestamping will give valid results
495 * while now done after front-porch. This will also deliver
496 * page-flip completion events that have been queued to us
497 * if a pageflip happened inside front-porch.
498 */
499 if (vrr_active) {
500 drm_crtc_handle_vblank(&acrtc->base);
501
502 /* BTR processing for pre-DCE12 ASICs */
503 if (acrtc->dm_irq_params.stream &&
504 adev->family < AMDGPU_FAMILY_AI) {
505 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
506 mod_freesync_handle_v_update(
507 adev->dm.freesync_module,
508 acrtc->dm_irq_params.stream,
509 &acrtc->dm_irq_params.vrr_params);
510
511 dc_stream_adjust_vmin_vmax(
512 adev->dm.dc,
513 acrtc->dm_irq_params.stream,
514 &acrtc->dm_irq_params.vrr_params.adjust);
515 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
516 }
517 }
518 }
519}
520
521/**
522 * dm_crtc_high_irq() - Handles CRTC interrupt
523 * @interrupt_params: used for determining the CRTC instance
524 *
525 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
526 * event handler.
527 */
528static void dm_crtc_high_irq(void *interrupt_params)
529{
530 struct common_irq_params *irq_params = interrupt_params;
531 struct amdgpu_device *adev = irq_params->adev;
532 struct amdgpu_crtc *acrtc;
533 unsigned long flags;
534 int vrr_active;
535
536 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
537 if (!acrtc)
538 return;
539
540 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
541
542 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
543 vrr_active, acrtc->dm_irq_params.active_planes);
544
545 /**
546 * Core vblank handling at start of front-porch is only possible
547 * in non-vrr mode, as only there vblank timestamping will give
548 * valid results while done in front-porch. Otherwise defer it
549 * to dm_vupdate_high_irq after end of front-porch.
550 */
551 if (!vrr_active)
552 drm_crtc_handle_vblank(&acrtc->base);
553
554 /**
555 * Following stuff must happen at start of vblank, for crc
556 * computation and below-the-range btr support in vrr mode.
557 */
558 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
559
560 /* BTR updates need to happen before VUPDATE on Vega and above. */
561 if (adev->family < AMDGPU_FAMILY_AI)
562 return;
563
564 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
565
566 if (acrtc->dm_irq_params.stream &&
567 acrtc->dm_irq_params.vrr_params.supported &&
568 acrtc->dm_irq_params.freesync_config.state ==
569 VRR_STATE_ACTIVE_VARIABLE) {
570 mod_freesync_handle_v_update(adev->dm.freesync_module,
571 acrtc->dm_irq_params.stream,
572 &acrtc->dm_irq_params.vrr_params);
573
574 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
575 &acrtc->dm_irq_params.vrr_params.adjust);
576 }
577
578 /*
579 * If there aren't any active_planes then DCH HUBP may be clock-gated.
580 * In that case, pageflip completion interrupts won't fire and pageflip
581 * completion events won't get delivered. Prevent this by sending
582 * pending pageflip events from here if a flip is still pending.
583 *
584 * If any planes are enabled, use dm_pflip_high_irq() instead, to
585 * avoid race conditions between flip programming and completion,
586 * which could cause too early flip completion events.
587 */
588 if (adev->family >= AMDGPU_FAMILY_RV &&
589 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
590 acrtc->dm_irq_params.active_planes == 0) {
591 if (acrtc->event) {
592 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
593 acrtc->event = NULL;
594 drm_crtc_vblank_put(&acrtc->base);
595 }
596 acrtc->pflip_status = AMDGPU_FLIP_NONE;
597 }
598
599 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
600}
601
602#if defined(CONFIG_DRM_AMD_DC_DCN)
603#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
604/**
605 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
606 * DCN generation ASICs
607 * @interrupt_params: interrupt parameters
608 *
609 * Used to set crc window/read out crc value at vertical line 0 position
610 */
611static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
612{
613 struct common_irq_params *irq_params = interrupt_params;
614 struct amdgpu_device *adev = irq_params->adev;
615 struct amdgpu_crtc *acrtc;
616
617 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
618
619 if (!acrtc)
620 return;
621
622 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
623}
624#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
625
626/**
627 * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
628 * @adev: amdgpu_device pointer
629 * @notify: dmub notification structure
630 *
631 * Dmub AUX or SET_CONFIG command completion processing callback
632 * Copies dmub notification to DM which is to be read by AUX command.
633 * issuing thread and also signals the event to wake up the thread.
634 */
635void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
636{
637 if (adev->dm.dmub_notify)
638 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
639 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
640 complete(&adev->dm.dmub_aux_transfer_done);
641}
642
643/**
644 * dmub_hpd_callback - DMUB HPD interrupt processing callback.
645 * @adev: amdgpu_device pointer
646 * @notify: dmub notification structure
647 *
648 * Dmub Hpd interrupt processing callback. Gets displayindex through the
649 * ink index and calls helper to do the processing.
650 */
651void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
652{
653 struct amdgpu_dm_connector *aconnector;
654 struct amdgpu_dm_connector *hpd_aconnector = NULL;
655 struct drm_connector *connector;
656 struct drm_connector_list_iter iter;
657 struct dc_link *link;
658 uint8_t link_index = 0;
659 struct drm_device *dev = adev->dm.ddev;
660
661 if (adev == NULL)
662 return;
663
664 if (notify == NULL) {
665 DRM_ERROR("DMUB HPD callback notification was NULL");
666 return;
667 }
668
669 if (notify->link_index > adev->dm.dc->link_count) {
670 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
671 return;
672 }
673
674 link_index = notify->link_index;
675 link = adev->dm.dc->links[link_index];
676
677 drm_connector_list_iter_begin(dev, &iter);
678 drm_for_each_connector_iter(connector, &iter) {
679 aconnector = to_amdgpu_dm_connector(connector);
680 if (link && aconnector->dc_link == link) {
681 DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
682 hpd_aconnector = aconnector;
683 break;
684 }
685 }
686 drm_connector_list_iter_end(&iter);
687
688 if (hpd_aconnector) {
689 if (notify->type == DMUB_NOTIFICATION_HPD)
690 handle_hpd_irq_helper(hpd_aconnector);
691 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
692 handle_hpd_rx_irq(hpd_aconnector);
693 }
694}
695
696/**
697 * register_dmub_notify_callback - Sets callback for DMUB notify
698 * @adev: amdgpu_device pointer
699 * @type: Type of dmub notification
700 * @callback: Dmub interrupt callback function
701 * @dmub_int_thread_offload: offload indicator
702 *
703 * API to register a dmub callback handler for a dmub notification
704 * Also sets indicator whether callback processing to be offloaded.
705 * to dmub interrupt handling thread
706 * Return: true if successfully registered, false if there is existing registration
707 */
708bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
709dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
710{
711 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
712 adev->dm.dmub_callback[type] = callback;
713 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
714 } else
715 return false;
716
717 return true;
718}
719
720static void dm_handle_hpd_work(struct work_struct *work)
721{
722 struct dmub_hpd_work *dmub_hpd_wrk;
723
724 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
725
726 if (!dmub_hpd_wrk->dmub_notify) {
727 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
728 return;
729 }
730
731 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
732 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
733 dmub_hpd_wrk->dmub_notify);
734 }
735
736 kfree(dmub_hpd_wrk->dmub_notify);
737 kfree(dmub_hpd_wrk);
738
739}
740
741#define DMUB_TRACE_MAX_READ 64
742/**
743 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
744 * @interrupt_params: used for determining the Outbox instance
745 *
746 * Handles the Outbox Interrupt
747 * event handler.
748 */
749static void dm_dmub_outbox1_low_irq(void *interrupt_params)
750{
751 struct dmub_notification notify;
752 struct common_irq_params *irq_params = interrupt_params;
753 struct amdgpu_device *adev = irq_params->adev;
754 struct amdgpu_display_manager *dm = &adev->dm;
755 struct dmcub_trace_buf_entry entry = { 0 };
756 uint32_t count = 0;
757 struct dmub_hpd_work *dmub_hpd_wrk;
758 struct dc_link *plink = NULL;
759
760 if (dc_enable_dmub_notifications(adev->dm.dc) &&
761 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
762
763 do {
764 dc_stat_get_dmub_notification(adev->dm.dc, &notify);
765 if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
766 DRM_ERROR("DM: notify type %d invalid!", notify.type);
767 continue;
768 }
769 if (!dm->dmub_callback[notify.type]) {
770 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
771 continue;
772 }
773 if (dm->dmub_thread_offload[notify.type] == true) {
774 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
775 if (!dmub_hpd_wrk) {
776 DRM_ERROR("Failed to allocate dmub_hpd_wrk");
777 return;
778 }
779 dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
780 if (!dmub_hpd_wrk->dmub_notify) {
781 kfree(dmub_hpd_wrk);
782 DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
783 return;
784 }
785 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
786 if (dmub_hpd_wrk->dmub_notify)
787 memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
788 dmub_hpd_wrk->adev = adev;
789 if (notify.type == DMUB_NOTIFICATION_HPD) {
790 plink = adev->dm.dc->links[notify.link_index];
791 if (plink) {
792 plink->hpd_status =
793 notify.hpd_status == DP_HPD_PLUG;
794 }
795 }
796 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
797 } else {
798 dm->dmub_callback[notify.type](adev, &notify);
799 }
800 } while (notify.pending_notification);
801 }
802
803
804 do {
805 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
806 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
807 entry.param0, entry.param1);
808
809 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
810 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
811 } else
812 break;
813
814 count++;
815
816 } while (count <= DMUB_TRACE_MAX_READ);
817
818 if (count > DMUB_TRACE_MAX_READ)
819 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
820}
821#endif /* CONFIG_DRM_AMD_DC_DCN */
822
823static int dm_set_clockgating_state(void *handle,
824 enum amd_clockgating_state state)
825{
826 return 0;
827}
828
829static int dm_set_powergating_state(void *handle,
830 enum amd_powergating_state state)
831{
832 return 0;
833}
834
835/* Prototypes of private functions */
836static int dm_early_init(void* handle);
837
838/* Allocate memory for FBC compressed data */
839static void amdgpu_dm_fbc_init(struct drm_connector *connector)
840{
841 struct drm_device *dev = connector->dev;
842 struct amdgpu_device *adev = drm_to_adev(dev);
843 struct dm_compressor_info *compressor = &adev->dm.compressor;
844 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
845 struct drm_display_mode *mode;
846 unsigned long max_size = 0;
847
848 if (adev->dm.dc->fbc_compressor == NULL)
849 return;
850
851 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
852 return;
853
854 if (compressor->bo_ptr)
855 return;
856
857
858 list_for_each_entry(mode, &connector->modes, head) {
859 if (max_size < mode->htotal * mode->vtotal)
860 max_size = mode->htotal * mode->vtotal;
861 }
862
863 if (max_size) {
864 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
865 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
866 &compressor->gpu_addr, &compressor->cpu_addr);
867
868 if (r)
869 DRM_ERROR("DM: Failed to initialize FBC\n");
870 else {
871 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
872 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
873 }
874
875 }
876
877}
878
879static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
880 int pipe, bool *enabled,
881 unsigned char *buf, int max_bytes)
882{
883 struct drm_device *dev = dev_get_drvdata(kdev);
884 struct amdgpu_device *adev = drm_to_adev(dev);
885 struct drm_connector *connector;
886 struct drm_connector_list_iter conn_iter;
887 struct amdgpu_dm_connector *aconnector;
888 int ret = 0;
889
890 *enabled = false;
891
892 mutex_lock(&adev->dm.audio_lock);
893
894 drm_connector_list_iter_begin(dev, &conn_iter);
895 drm_for_each_connector_iter(connector, &conn_iter) {
896 aconnector = to_amdgpu_dm_connector(connector);
897 if (aconnector->audio_inst != port)
898 continue;
899
900 *enabled = true;
901 ret = drm_eld_size(connector->eld);
902 memcpy(buf, connector->eld, min(max_bytes, ret));
903
904 break;
905 }
906 drm_connector_list_iter_end(&conn_iter);
907
908 mutex_unlock(&adev->dm.audio_lock);
909
910 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
911
912 return ret;
913}
914
915static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
916 .get_eld = amdgpu_dm_audio_component_get_eld,
917};
918
919static int amdgpu_dm_audio_component_bind(struct device *kdev,
920 struct device *hda_kdev, void *data)
921{
922 struct drm_device *dev = dev_get_drvdata(kdev);
923 struct amdgpu_device *adev = drm_to_adev(dev);
924 struct drm_audio_component *acomp = data;
925
926 acomp->ops = &amdgpu_dm_audio_component_ops;
927 acomp->dev = kdev;
928 adev->dm.audio_component = acomp;
929
930 return 0;
931}
932
933static void amdgpu_dm_audio_component_unbind(struct device *kdev,
934 struct device *hda_kdev, void *data)
935{
936 struct drm_device *dev = dev_get_drvdata(kdev);
937 struct amdgpu_device *adev = drm_to_adev(dev);
938 struct drm_audio_component *acomp = data;
939
940 acomp->ops = NULL;
941 acomp->dev = NULL;
942 adev->dm.audio_component = NULL;
943}
944
945static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
946 .bind = amdgpu_dm_audio_component_bind,
947 .unbind = amdgpu_dm_audio_component_unbind,
948};
949
950static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
951{
952 int i, ret;
953
954 if (!amdgpu_audio)
955 return 0;
956
957 adev->mode_info.audio.enabled = true;
958
959 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
960
961 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
962 adev->mode_info.audio.pin[i].channels = -1;
963 adev->mode_info.audio.pin[i].rate = -1;
964 adev->mode_info.audio.pin[i].bits_per_sample = -1;
965 adev->mode_info.audio.pin[i].status_bits = 0;
966 adev->mode_info.audio.pin[i].category_code = 0;
967 adev->mode_info.audio.pin[i].connected = false;
968 adev->mode_info.audio.pin[i].id =
969 adev->dm.dc->res_pool->audios[i]->inst;
970 adev->mode_info.audio.pin[i].offset = 0;
971 }
972
973 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
974 if (ret < 0)
975 return ret;
976
977 adev->dm.audio_registered = true;
978
979 return 0;
980}
981
982static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
983{
984 if (!amdgpu_audio)
985 return;
986
987 if (!adev->mode_info.audio.enabled)
988 return;
989
990 if (adev->dm.audio_registered) {
991 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
992 adev->dm.audio_registered = false;
993 }
994
995 /* TODO: Disable audio? */
996
997 adev->mode_info.audio.enabled = false;
998}
999
1000static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1001{
1002 struct drm_audio_component *acomp = adev->dm.audio_component;
1003
1004 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1005 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1006
1007 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1008 pin, -1);
1009 }
1010}
1011
1012static int dm_dmub_hw_init(struct amdgpu_device *adev)
1013{
1014 const struct dmcub_firmware_header_v1_0 *hdr;
1015 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1016 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1017 const struct firmware *dmub_fw = adev->dm.dmub_fw;
1018 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1019 struct abm *abm = adev->dm.dc->res_pool->abm;
1020 struct dmub_srv_hw_params hw_params;
1021 enum dmub_status status;
1022 const unsigned char *fw_inst_const, *fw_bss_data;
1023 uint32_t i, fw_inst_const_size, fw_bss_data_size;
1024 bool has_hw_support;
1025 struct dc *dc = adev->dm.dc;
1026
1027 if (!dmub_srv)
1028 /* DMUB isn't supported on the ASIC. */
1029 return 0;
1030
1031 if (!fb_info) {
1032 DRM_ERROR("No framebuffer info for DMUB service.\n");
1033 return -EINVAL;
1034 }
1035
1036 if (!dmub_fw) {
1037 /* Firmware required for DMUB support. */
1038 DRM_ERROR("No firmware provided for DMUB.\n");
1039 return -EINVAL;
1040 }
1041
1042 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1043 if (status != DMUB_STATUS_OK) {
1044 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1045 return -EINVAL;
1046 }
1047
1048 if (!has_hw_support) {
1049 DRM_INFO("DMUB unsupported on ASIC\n");
1050 return 0;
1051 }
1052
1053 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1054
1055 fw_inst_const = dmub_fw->data +
1056 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1057 PSP_HEADER_BYTES;
1058
1059 fw_bss_data = dmub_fw->data +
1060 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1061 le32_to_cpu(hdr->inst_const_bytes);
1062
1063 /* Copy firmware and bios info into FB memory. */
1064 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1065 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1066
1067 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1068
1069 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1070 * amdgpu_ucode_init_single_fw will load dmub firmware
1071 * fw_inst_const part to cw0; otherwise, the firmware back door load
1072 * will be done by dm_dmub_hw_init
1073 */
1074 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1075 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1076 fw_inst_const_size);
1077 }
1078
1079 if (fw_bss_data_size)
1080 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1081 fw_bss_data, fw_bss_data_size);
1082
1083 /* Copy firmware bios info into FB memory. */
1084 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1085 adev->bios_size);
1086
1087 /* Reset regions that need to be reset. */
1088 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1089 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1090
1091 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1092 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1093
1094 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1095 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1096
1097 /* Initialize hardware. */
1098 memset(&hw_params, 0, sizeof(hw_params));
1099 hw_params.fb_base = adev->gmc.fb_start;
1100 hw_params.fb_offset = adev->gmc.aper_base;
1101
1102 /* backdoor load firmware and trigger dmub running */
1103 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1104 hw_params.load_inst_const = true;
1105
1106 if (dmcu)
1107 hw_params.psp_version = dmcu->psp_version;
1108
1109 for (i = 0; i < fb_info->num_fb; ++i)
1110 hw_params.fb[i] = &fb_info->fb[i];
1111
1112 switch (adev->asic_type) {
1113 case CHIP_YELLOW_CARP:
1114 if (dc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_A0) {
1115 hw_params.dpia_supported = true;
1116#if defined(CONFIG_DRM_AMD_DC_DCN)
1117 hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia;
1118#endif
1119 }
1120 break;
1121 default:
1122 break;
1123 }
1124
1125 status = dmub_srv_hw_init(dmub_srv, &hw_params);
1126 if (status != DMUB_STATUS_OK) {
1127 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1128 return -EINVAL;
1129 }
1130
1131 /* Wait for firmware load to finish. */
1132 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1133 if (status != DMUB_STATUS_OK)
1134 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1135
1136 /* Init DMCU and ABM if available. */
1137 if (dmcu && abm) {
1138 dmcu->funcs->dmcu_init(dmcu);
1139 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1140 }
1141
1142 if (!adev->dm.dc->ctx->dmub_srv)
1143 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1144 if (!adev->dm.dc->ctx->dmub_srv) {
1145 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1146 return -ENOMEM;
1147 }
1148
1149 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1150 adev->dm.dmcub_fw_version);
1151
1152 return 0;
1153}
1154
1155#if defined(CONFIG_DRM_AMD_DC_DCN)
1156static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1157{
1158 uint64_t pt_base;
1159 uint32_t logical_addr_low;
1160 uint32_t logical_addr_high;
1161 uint32_t agp_base, agp_bot, agp_top;
1162 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1163
1164 memset(pa_config, 0, sizeof(*pa_config));
1165
1166 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1167 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1168
1169 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1170 /*
1171 * Raven2 has a HW issue that it is unable to use the vram which
1172 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1173 * workaround that increase system aperture high address (add 1)
1174 * to get rid of the VM fault and hardware hang.
1175 */
1176 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1177 else
1178 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1179
1180 agp_base = 0;
1181 agp_bot = adev->gmc.agp_start >> 24;
1182 agp_top = adev->gmc.agp_end >> 24;
1183
1184
1185 page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1186 page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1187 page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1188 page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1189 page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1190 page_table_base.low_part = lower_32_bits(pt_base);
1191
1192 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1193 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1194
1195 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1196 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1197 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1198
1199 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1200 pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1201 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1202
1203 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1204 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1205 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1206
1207 pa_config->is_hvm_enabled = 0;
1208
1209}
1210#endif
1211#if defined(CONFIG_DRM_AMD_DC_DCN)
1212static void vblank_control_worker(struct work_struct *work)
1213{
1214 struct vblank_control_work *vblank_work =
1215 container_of(work, struct vblank_control_work, work);
1216 struct amdgpu_display_manager *dm = vblank_work->dm;
1217
1218 mutex_lock(&dm->dc_lock);
1219
1220 if (vblank_work->enable)
1221 dm->active_vblank_irq_count++;
1222 else if(dm->active_vblank_irq_count)
1223 dm->active_vblank_irq_count--;
1224
1225 dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1226
1227 DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1228
1229 /* Control PSR based on vblank requirements from OS */
1230 if (vblank_work->stream && vblank_work->stream->link) {
1231 if (vblank_work->enable) {
1232 if (vblank_work->stream->link->psr_settings.psr_allow_active)
1233 amdgpu_dm_psr_disable(vblank_work->stream);
1234 } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1235 !vblank_work->stream->link->psr_settings.psr_allow_active &&
1236 vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1237 amdgpu_dm_psr_enable(vblank_work->stream);
1238 }
1239 }
1240
1241 mutex_unlock(&dm->dc_lock);
1242
1243 dc_stream_release(vblank_work->stream);
1244
1245 kfree(vblank_work);
1246}
1247
1248#endif
1249
1250static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1251{
1252 struct hpd_rx_irq_offload_work *offload_work;
1253 struct amdgpu_dm_connector *aconnector;
1254 struct dc_link *dc_link;
1255 struct amdgpu_device *adev;
1256 enum dc_connection_type new_connection_type = dc_connection_none;
1257 unsigned long flags;
1258
1259 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1260 aconnector = offload_work->offload_wq->aconnector;
1261
1262 if (!aconnector) {
1263 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1264 goto skip;
1265 }
1266
1267 adev = drm_to_adev(aconnector->base.dev);
1268 dc_link = aconnector->dc_link;
1269
1270 mutex_lock(&aconnector->hpd_lock);
1271 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1272 DRM_ERROR("KMS: Failed to detect connector\n");
1273 mutex_unlock(&aconnector->hpd_lock);
1274
1275 if (new_connection_type == dc_connection_none)
1276 goto skip;
1277
1278 if (amdgpu_in_reset(adev))
1279 goto skip;
1280
1281 mutex_lock(&adev->dm.dc_lock);
1282 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1283 dc_link_dp_handle_automated_test(dc_link);
1284 else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1285 hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1286 dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1287 dc_link_dp_handle_link_loss(dc_link);
1288 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1289 offload_work->offload_wq->is_handling_link_loss = false;
1290 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1291 }
1292 mutex_unlock(&adev->dm.dc_lock);
1293
1294skip:
1295 kfree(offload_work);
1296
1297}
1298
1299static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1300{
1301 int max_caps = dc->caps.max_links;
1302 int i = 0;
1303 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1304
1305 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1306
1307 if (!hpd_rx_offload_wq)
1308 return NULL;
1309
1310
1311 for (i = 0; i < max_caps; i++) {
1312 hpd_rx_offload_wq[i].wq =
1313 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1314
1315 if (hpd_rx_offload_wq[i].wq == NULL) {
1316 DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1317 return NULL;
1318 }
1319
1320 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1321 }
1322
1323 return hpd_rx_offload_wq;
1324}
1325
1326struct amdgpu_stutter_quirk {
1327 u16 chip_vendor;
1328 u16 chip_device;
1329 u16 subsys_vendor;
1330 u16 subsys_device;
1331 u8 revision;
1332};
1333
1334static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1335 /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1336 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1337 { 0, 0, 0, 0, 0 },
1338};
1339
1340static bool dm_should_disable_stutter(struct pci_dev *pdev)
1341{
1342 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1343
1344 while (p && p->chip_device != 0) {
1345 if (pdev->vendor == p->chip_vendor &&
1346 pdev->device == p->chip_device &&
1347 pdev->subsystem_vendor == p->subsys_vendor &&
1348 pdev->subsystem_device == p->subsys_device &&
1349 pdev->revision == p->revision) {
1350 return true;
1351 }
1352 ++p;
1353 }
1354 return false;
1355}
1356
1357static int amdgpu_dm_init(struct amdgpu_device *adev)
1358{
1359 struct dc_init_data init_data;
1360#ifdef CONFIG_DRM_AMD_DC_HDCP
1361 struct dc_callback_init init_params;
1362#endif
1363 int r;
1364
1365 adev->dm.ddev = adev_to_drm(adev);
1366 adev->dm.adev = adev;
1367
1368 /* Zero all the fields */
1369 memset(&init_data, 0, sizeof(init_data));
1370#ifdef CONFIG_DRM_AMD_DC_HDCP
1371 memset(&init_params, 0, sizeof(init_params));
1372#endif
1373
1374 mutex_init(&adev->dm.dc_lock);
1375 mutex_init(&adev->dm.audio_lock);
1376#if defined(CONFIG_DRM_AMD_DC_DCN)
1377 spin_lock_init(&adev->dm.vblank_lock);
1378#endif
1379
1380 if(amdgpu_dm_irq_init(adev)) {
1381 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1382 goto error;
1383 }
1384
1385 init_data.asic_id.chip_family = adev->family;
1386
1387 init_data.asic_id.pci_revision_id = adev->pdev->revision;
1388 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1389 init_data.asic_id.chip_id = adev->pdev->device;
1390
1391 init_data.asic_id.vram_width = adev->gmc.vram_width;
1392 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1393 init_data.asic_id.atombios_base_address =
1394 adev->mode_info.atom_context->bios;
1395
1396 init_data.driver = adev;
1397
1398 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1399
1400 if (!adev->dm.cgs_device) {
1401 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1402 goto error;
1403 }
1404
1405 init_data.cgs_device = adev->dm.cgs_device;
1406
1407 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1408
1409 switch (adev->asic_type) {
1410 case CHIP_CARRIZO:
1411 case CHIP_STONEY:
1412 init_data.flags.gpu_vm_support = true;
1413 break;
1414 default:
1415 switch (adev->ip_versions[DCE_HWIP][0]) {
1416 case IP_VERSION(2, 1, 0):
1417 init_data.flags.gpu_vm_support = true;
1418 switch (adev->dm.dmcub_fw_version) {
1419 case 0: /* development */
1420 case 0x1: /* linux-firmware.git hash 6d9f399 */
1421 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1422 init_data.flags.disable_dmcu = false;
1423 break;
1424 default:
1425 init_data.flags.disable_dmcu = true;
1426 }
1427 break;
1428 case IP_VERSION(1, 0, 0):
1429 case IP_VERSION(1, 0, 1):
1430 case IP_VERSION(3, 0, 1):
1431 case IP_VERSION(3, 1, 2):
1432 case IP_VERSION(3, 1, 3):
1433 init_data.flags.gpu_vm_support = true;
1434 break;
1435 case IP_VERSION(2, 0, 3):
1436 init_data.flags.disable_dmcu = true;
1437 break;
1438 default:
1439 break;
1440 }
1441 break;
1442 }
1443
1444 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1445 init_data.flags.fbc_support = true;
1446
1447 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1448 init_data.flags.multi_mon_pp_mclk_switch = true;
1449
1450 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1451 init_data.flags.disable_fractional_pwm = true;
1452
1453 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1454 init_data.flags.edp_no_power_sequencing = true;
1455
1456#ifdef CONFIG_DRM_AMD_DC_DCN
1457 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1458 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1459 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1460 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1461#endif
1462
1463 init_data.flags.power_down_display_on_boot = true;
1464
1465 if (check_seamless_boot_capability(adev)) {
1466 init_data.flags.power_down_display_on_boot = false;
1467 init_data.flags.allow_seamless_boot_optimization = true;
1468 DRM_INFO("Seamless boot condition check passed\n");
1469 }
1470
1471 INIT_LIST_HEAD(&adev->dm.da_list);
1472 /* Display Core create. */
1473 adev->dm.dc = dc_create(&init_data);
1474
1475 if (adev->dm.dc) {
1476 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1477 } else {
1478 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1479 goto error;
1480 }
1481
1482 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1483 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1484 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1485 }
1486
1487 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1488 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1489 if (dm_should_disable_stutter(adev->pdev))
1490 adev->dm.dc->debug.disable_stutter = true;
1491
1492 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1493 adev->dm.dc->debug.disable_stutter = true;
1494
1495 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
1496 adev->dm.dc->debug.disable_dsc = true;
1497 adev->dm.dc->debug.disable_dsc_edp = true;
1498 }
1499
1500 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1501 adev->dm.dc->debug.disable_clock_gate = true;
1502
1503 r = dm_dmub_hw_init(adev);
1504 if (r) {
1505 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1506 goto error;
1507 }
1508
1509 dc_hardware_init(adev->dm.dc);
1510
1511 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1512 if (!adev->dm.hpd_rx_offload_wq) {
1513 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1514 goto error;
1515 }
1516
1517#if defined(CONFIG_DRM_AMD_DC_DCN)
1518 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1519 struct dc_phy_addr_space_config pa_config;
1520
1521 mmhub_read_system_context(adev, &pa_config);
1522
1523 // Call the DC init_memory func
1524 dc_setup_system_context(adev->dm.dc, &pa_config);
1525 }
1526#endif
1527
1528 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1529 if (!adev->dm.freesync_module) {
1530 DRM_ERROR(
1531 "amdgpu: failed to initialize freesync_module.\n");
1532 } else
1533 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1534 adev->dm.freesync_module);
1535
1536 amdgpu_dm_init_color_mod();
1537
1538#if defined(CONFIG_DRM_AMD_DC_DCN)
1539 if (adev->dm.dc->caps.max_links > 0) {
1540 adev->dm.vblank_control_workqueue =
1541 create_singlethread_workqueue("dm_vblank_control_workqueue");
1542 if (!adev->dm.vblank_control_workqueue)
1543 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1544 }
1545#endif
1546
1547#ifdef CONFIG_DRM_AMD_DC_HDCP
1548 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1549 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1550
1551 if (!adev->dm.hdcp_workqueue)
1552 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1553 else
1554 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1555
1556 dc_init_callbacks(adev->dm.dc, &init_params);
1557 }
1558#endif
1559#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1560 adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1561#endif
1562 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1563 init_completion(&adev->dm.dmub_aux_transfer_done);
1564 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1565 if (!adev->dm.dmub_notify) {
1566 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1567 goto error;
1568 }
1569
1570 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1571 if (!adev->dm.delayed_hpd_wq) {
1572 DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1573 goto error;
1574 }
1575
1576 amdgpu_dm_outbox_init(adev);
1577#if defined(CONFIG_DRM_AMD_DC_DCN)
1578 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1579 dmub_aux_setconfig_callback, false)) {
1580 DRM_ERROR("amdgpu: fail to register dmub aux callback");
1581 goto error;
1582 }
1583 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1584 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1585 goto error;
1586 }
1587 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) {
1588 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1589 goto error;
1590 }
1591#endif /* CONFIG_DRM_AMD_DC_DCN */
1592 }
1593
1594 if (amdgpu_dm_initialize_drm_device(adev)) {
1595 DRM_ERROR(
1596 "amdgpu: failed to initialize sw for display support.\n");
1597 goto error;
1598 }
1599
1600 /* create fake encoders for MST */
1601 dm_dp_create_fake_mst_encoders(adev);
1602
1603 /* TODO: Add_display_info? */
1604
1605 /* TODO use dynamic cursor width */
1606 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1607 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1608
1609 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1610 DRM_ERROR(
1611 "amdgpu: failed to initialize sw for display support.\n");
1612 goto error;
1613 }
1614
1615
1616 DRM_DEBUG_DRIVER("KMS initialized.\n");
1617
1618 return 0;
1619error:
1620 amdgpu_dm_fini(adev);
1621
1622 return -EINVAL;
1623}
1624
1625static int amdgpu_dm_early_fini(void *handle)
1626{
1627 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1628
1629 amdgpu_dm_audio_fini(adev);
1630
1631 return 0;
1632}
1633
1634static void amdgpu_dm_fini(struct amdgpu_device *adev)
1635{
1636 int i;
1637
1638#if defined(CONFIG_DRM_AMD_DC_DCN)
1639 if (adev->dm.vblank_control_workqueue) {
1640 destroy_workqueue(adev->dm.vblank_control_workqueue);
1641 adev->dm.vblank_control_workqueue = NULL;
1642 }
1643#endif
1644
1645 for (i = 0; i < adev->dm.display_indexes_num; i++) {
1646 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1647 }
1648
1649 amdgpu_dm_destroy_drm_device(&adev->dm);
1650
1651#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1652 if (adev->dm.crc_rd_wrk) {
1653 flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1654 kfree(adev->dm.crc_rd_wrk);
1655 adev->dm.crc_rd_wrk = NULL;
1656 }
1657#endif
1658#ifdef CONFIG_DRM_AMD_DC_HDCP
1659 if (adev->dm.hdcp_workqueue) {
1660 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1661 adev->dm.hdcp_workqueue = NULL;
1662 }
1663
1664 if (adev->dm.dc)
1665 dc_deinit_callbacks(adev->dm.dc);
1666#endif
1667
1668 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1669
1670 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1671 kfree(adev->dm.dmub_notify);
1672 adev->dm.dmub_notify = NULL;
1673 destroy_workqueue(adev->dm.delayed_hpd_wq);
1674 adev->dm.delayed_hpd_wq = NULL;
1675 }
1676
1677 if (adev->dm.dmub_bo)
1678 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1679 &adev->dm.dmub_bo_gpu_addr,
1680 &adev->dm.dmub_bo_cpu_addr);
1681
1682 if (adev->dm.hpd_rx_offload_wq) {
1683 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1684 if (adev->dm.hpd_rx_offload_wq[i].wq) {
1685 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1686 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1687 }
1688 }
1689
1690 kfree(adev->dm.hpd_rx_offload_wq);
1691 adev->dm.hpd_rx_offload_wq = NULL;
1692 }
1693
1694 /* DC Destroy TODO: Replace destroy DAL */
1695 if (adev->dm.dc)
1696 dc_destroy(&adev->dm.dc);
1697 /*
1698 * TODO: pageflip, vlank interrupt
1699 *
1700 * amdgpu_dm_irq_fini(adev);
1701 */
1702
1703 if (adev->dm.cgs_device) {
1704 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1705 adev->dm.cgs_device = NULL;
1706 }
1707 if (adev->dm.freesync_module) {
1708 mod_freesync_destroy(adev->dm.freesync_module);
1709 adev->dm.freesync_module = NULL;
1710 }
1711
1712 mutex_destroy(&adev->dm.audio_lock);
1713 mutex_destroy(&adev->dm.dc_lock);
1714
1715 return;
1716}
1717
1718static int load_dmcu_fw(struct amdgpu_device *adev)
1719{
1720 const char *fw_name_dmcu = NULL;
1721 int r;
1722 const struct dmcu_firmware_header_v1_0 *hdr;
1723
1724 switch(adev->asic_type) {
1725#if defined(CONFIG_DRM_AMD_DC_SI)
1726 case CHIP_TAHITI:
1727 case CHIP_PITCAIRN:
1728 case CHIP_VERDE:
1729 case CHIP_OLAND:
1730#endif
1731 case CHIP_BONAIRE:
1732 case CHIP_HAWAII:
1733 case CHIP_KAVERI:
1734 case CHIP_KABINI:
1735 case CHIP_MULLINS:
1736 case CHIP_TONGA:
1737 case CHIP_FIJI:
1738 case CHIP_CARRIZO:
1739 case CHIP_STONEY:
1740 case CHIP_POLARIS11:
1741 case CHIP_POLARIS10:
1742 case CHIP_POLARIS12:
1743 case CHIP_VEGAM:
1744 case CHIP_VEGA10:
1745 case CHIP_VEGA12:
1746 case CHIP_VEGA20:
1747 return 0;
1748 case CHIP_NAVI12:
1749 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1750 break;
1751 case CHIP_RAVEN:
1752 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1753 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1754 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1755 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1756 else
1757 return 0;
1758 break;
1759 default:
1760 switch (adev->ip_versions[DCE_HWIP][0]) {
1761 case IP_VERSION(2, 0, 2):
1762 case IP_VERSION(2, 0, 3):
1763 case IP_VERSION(2, 0, 0):
1764 case IP_VERSION(2, 1, 0):
1765 case IP_VERSION(3, 0, 0):
1766 case IP_VERSION(3, 0, 2):
1767 case IP_VERSION(3, 0, 3):
1768 case IP_VERSION(3, 0, 1):
1769 case IP_VERSION(3, 1, 2):
1770 case IP_VERSION(3, 1, 3):
1771 return 0;
1772 default:
1773 break;
1774 }
1775 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1776 return -EINVAL;
1777 }
1778
1779 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1780 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1781 return 0;
1782 }
1783
1784 r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1785 if (r == -ENOENT) {
1786 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1787 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1788 adev->dm.fw_dmcu = NULL;
1789 return 0;
1790 }
1791 if (r) {
1792 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1793 fw_name_dmcu);
1794 return r;
1795 }
1796
1797 r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1798 if (r) {
1799 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1800 fw_name_dmcu);
1801 release_firmware(adev->dm.fw_dmcu);
1802 adev->dm.fw_dmcu = NULL;
1803 return r;
1804 }
1805
1806 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1807 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1808 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1809 adev->firmware.fw_size +=
1810 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1811
1812 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1813 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1814 adev->firmware.fw_size +=
1815 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1816
1817 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1818
1819 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1820
1821 return 0;
1822}
1823
1824static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1825{
1826 struct amdgpu_device *adev = ctx;
1827
1828 return dm_read_reg(adev->dm.dc->ctx, address);
1829}
1830
1831static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1832 uint32_t value)
1833{
1834 struct amdgpu_device *adev = ctx;
1835
1836 return dm_write_reg(adev->dm.dc->ctx, address, value);
1837}
1838
1839static int dm_dmub_sw_init(struct amdgpu_device *adev)
1840{
1841 struct dmub_srv_create_params create_params;
1842 struct dmub_srv_region_params region_params;
1843 struct dmub_srv_region_info region_info;
1844 struct dmub_srv_fb_params fb_params;
1845 struct dmub_srv_fb_info *fb_info;
1846 struct dmub_srv *dmub_srv;
1847 const struct dmcub_firmware_header_v1_0 *hdr;
1848 const char *fw_name_dmub;
1849 enum dmub_asic dmub_asic;
1850 enum dmub_status status;
1851 int r;
1852
1853 switch (adev->ip_versions[DCE_HWIP][0]) {
1854 case IP_VERSION(2, 1, 0):
1855 dmub_asic = DMUB_ASIC_DCN21;
1856 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1857 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1858 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1859 break;
1860 case IP_VERSION(3, 0, 0):
1861 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
1862 dmub_asic = DMUB_ASIC_DCN30;
1863 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1864 } else {
1865 dmub_asic = DMUB_ASIC_DCN30;
1866 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1867 }
1868 break;
1869 case IP_VERSION(3, 0, 1):
1870 dmub_asic = DMUB_ASIC_DCN301;
1871 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1872 break;
1873 case IP_VERSION(3, 0, 2):
1874 dmub_asic = DMUB_ASIC_DCN302;
1875 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1876 break;
1877 case IP_VERSION(3, 0, 3):
1878 dmub_asic = DMUB_ASIC_DCN303;
1879 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1880 break;
1881 case IP_VERSION(3, 1, 2):
1882 case IP_VERSION(3, 1, 3):
1883 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
1884 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1885 break;
1886
1887 default:
1888 /* ASIC doesn't support DMUB. */
1889 return 0;
1890 }
1891
1892 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1893 if (r) {
1894 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1895 return 0;
1896 }
1897
1898 r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1899 if (r) {
1900 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1901 return 0;
1902 }
1903
1904 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1905 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1906
1907 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1908 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1909 AMDGPU_UCODE_ID_DMCUB;
1910 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1911 adev->dm.dmub_fw;
1912 adev->firmware.fw_size +=
1913 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1914
1915 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1916 adev->dm.dmcub_fw_version);
1917 }
1918
1919
1920 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1921 dmub_srv = adev->dm.dmub_srv;
1922
1923 if (!dmub_srv) {
1924 DRM_ERROR("Failed to allocate DMUB service!\n");
1925 return -ENOMEM;
1926 }
1927
1928 memset(&create_params, 0, sizeof(create_params));
1929 create_params.user_ctx = adev;
1930 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1931 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1932 create_params.asic = dmub_asic;
1933
1934 /* Create the DMUB service. */
1935 status = dmub_srv_create(dmub_srv, &create_params);
1936 if (status != DMUB_STATUS_OK) {
1937 DRM_ERROR("Error creating DMUB service: %d\n", status);
1938 return -EINVAL;
1939 }
1940
1941 /* Calculate the size of all the regions for the DMUB service. */
1942 memset(&region_params, 0, sizeof(region_params));
1943
1944 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1945 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1946 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1947 region_params.vbios_size = adev->bios_size;
1948 region_params.fw_bss_data = region_params.bss_data_size ?
1949 adev->dm.dmub_fw->data +
1950 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1951 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1952 region_params.fw_inst_const =
1953 adev->dm.dmub_fw->data +
1954 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1955 PSP_HEADER_BYTES;
1956
1957 status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1958 &region_info);
1959
1960 if (status != DMUB_STATUS_OK) {
1961 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1962 return -EINVAL;
1963 }
1964
1965 /*
1966 * Allocate a framebuffer based on the total size of all the regions.
1967 * TODO: Move this into GART.
1968 */
1969 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1970 AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1971 &adev->dm.dmub_bo_gpu_addr,
1972 &adev->dm.dmub_bo_cpu_addr);
1973 if (r)
1974 return r;
1975
1976 /* Rebase the regions on the framebuffer address. */
1977 memset(&fb_params, 0, sizeof(fb_params));
1978 fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1979 fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1980 fb_params.region_info = &region_info;
1981
1982 adev->dm.dmub_fb_info =
1983 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1984 fb_info = adev->dm.dmub_fb_info;
1985
1986 if (!fb_info) {
1987 DRM_ERROR(
1988 "Failed to allocate framebuffer info for DMUB service!\n");
1989 return -ENOMEM;
1990 }
1991
1992 status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1993 if (status != DMUB_STATUS_OK) {
1994 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1995 return -EINVAL;
1996 }
1997
1998 return 0;
1999}
2000
2001static int dm_sw_init(void *handle)
2002{
2003 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2004 int r;
2005
2006 r = dm_dmub_sw_init(adev);
2007 if (r)
2008 return r;
2009
2010 return load_dmcu_fw(adev);
2011}
2012
2013static int dm_sw_fini(void *handle)
2014{
2015 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2016
2017 kfree(adev->dm.dmub_fb_info);
2018 adev->dm.dmub_fb_info = NULL;
2019
2020 if (adev->dm.dmub_srv) {
2021 dmub_srv_destroy(adev->dm.dmub_srv);
2022 adev->dm.dmub_srv = NULL;
2023 }
2024
2025 release_firmware(adev->dm.dmub_fw);
2026 adev->dm.dmub_fw = NULL;
2027
2028 release_firmware(adev->dm.fw_dmcu);
2029 adev->dm.fw_dmcu = NULL;
2030
2031 return 0;
2032}
2033
2034static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2035{
2036 struct amdgpu_dm_connector *aconnector;
2037 struct drm_connector *connector;
2038 struct drm_connector_list_iter iter;
2039 int ret = 0;
2040
2041 drm_connector_list_iter_begin(dev, &iter);
2042 drm_for_each_connector_iter(connector, &iter) {
2043 aconnector = to_amdgpu_dm_connector(connector);
2044 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2045 aconnector->mst_mgr.aux) {
2046 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2047 aconnector,
2048 aconnector->base.base.id);
2049
2050 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2051 if (ret < 0) {
2052 DRM_ERROR("DM_MST: Failed to start MST\n");
2053 aconnector->dc_link->type =
2054 dc_connection_single;
2055 break;
2056 }
2057 }
2058 }
2059 drm_connector_list_iter_end(&iter);
2060
2061 return ret;
2062}
2063
2064static int dm_late_init(void *handle)
2065{
2066 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2067
2068 struct dmcu_iram_parameters params;
2069 unsigned int linear_lut[16];
2070 int i;
2071 struct dmcu *dmcu = NULL;
2072
2073 dmcu = adev->dm.dc->res_pool->dmcu;
2074
2075 for (i = 0; i < 16; i++)
2076 linear_lut[i] = 0xFFFF * i / 15;
2077
2078 params.set = 0;
2079 params.backlight_ramping_override = false;
2080 params.backlight_ramping_start = 0xCCCC;
2081 params.backlight_ramping_reduction = 0xCCCCCCCC;
2082 params.backlight_lut_array_size = 16;
2083 params.backlight_lut_array = linear_lut;
2084
2085 /* Min backlight level after ABM reduction, Don't allow below 1%
2086 * 0xFFFF x 0.01 = 0x28F
2087 */
2088 params.min_abm_backlight = 0x28F;
2089 /* In the case where abm is implemented on dmcub,
2090 * dmcu object will be null.
2091 * ABM 2.4 and up are implemented on dmcub.
2092 */
2093 if (dmcu) {
2094 if (!dmcu_load_iram(dmcu, params))
2095 return -EINVAL;
2096 } else if (adev->dm.dc->ctx->dmub_srv) {
2097 struct dc_link *edp_links[MAX_NUM_EDP];
2098 int edp_num;
2099
2100 get_edp_links(adev->dm.dc, edp_links, &edp_num);
2101 for (i = 0; i < edp_num; i++) {
2102 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2103 return -EINVAL;
2104 }
2105 }
2106
2107 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2108}
2109
2110static void s3_handle_mst(struct drm_device *dev, bool suspend)
2111{
2112 struct amdgpu_dm_connector *aconnector;
2113 struct drm_connector *connector;
2114 struct drm_connector_list_iter iter;
2115 struct drm_dp_mst_topology_mgr *mgr;
2116 int ret;
2117 bool need_hotplug = false;
2118
2119 drm_connector_list_iter_begin(dev, &iter);
2120 drm_for_each_connector_iter(connector, &iter) {
2121 aconnector = to_amdgpu_dm_connector(connector);
2122 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2123 aconnector->mst_port)
2124 continue;
2125
2126 mgr = &aconnector->mst_mgr;
2127
2128 if (suspend) {
2129 drm_dp_mst_topology_mgr_suspend(mgr);
2130 } else {
2131 ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2132 if (ret < 0) {
2133 drm_dp_mst_topology_mgr_set_mst(mgr, false);
2134 need_hotplug = true;
2135 }
2136 }
2137 }
2138 drm_connector_list_iter_end(&iter);
2139
2140 if (need_hotplug)
2141 drm_kms_helper_hotplug_event(dev);
2142}
2143
2144static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2145{
2146 struct smu_context *smu = &adev->smu;
2147 int ret = 0;
2148
2149 if (!is_support_sw_smu(adev))
2150 return 0;
2151
2152 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2153 * on window driver dc implementation.
2154 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2155 * should be passed to smu during boot up and resume from s3.
2156 * boot up: dc calculate dcn watermark clock settings within dc_create,
2157 * dcn20_resource_construct
2158 * then call pplib functions below to pass the settings to smu:
2159 * smu_set_watermarks_for_clock_ranges
2160 * smu_set_watermarks_table
2161 * navi10_set_watermarks_table
2162 * smu_write_watermarks_table
2163 *
2164 * For Renoir, clock settings of dcn watermark are also fixed values.
2165 * dc has implemented different flow for window driver:
2166 * dc_hardware_init / dc_set_power_state
2167 * dcn10_init_hw
2168 * notify_wm_ranges
2169 * set_wm_ranges
2170 * -- Linux
2171 * smu_set_watermarks_for_clock_ranges
2172 * renoir_set_watermarks_table
2173 * smu_write_watermarks_table
2174 *
2175 * For Linux,
2176 * dc_hardware_init -> amdgpu_dm_init
2177 * dc_set_power_state --> dm_resume
2178 *
2179 * therefore, this function apply to navi10/12/14 but not Renoir
2180 * *
2181 */
2182 switch (adev->ip_versions[DCE_HWIP][0]) {
2183 case IP_VERSION(2, 0, 2):
2184 case IP_VERSION(2, 0, 0):
2185 break;
2186 default:
2187 return 0;
2188 }
2189
2190 ret = smu_write_watermarks_table(smu);
2191 if (ret) {
2192 DRM_ERROR("Failed to update WMTABLE!\n");
2193 return ret;
2194 }
2195
2196 return 0;
2197}
2198
2199/**
2200 * dm_hw_init() - Initialize DC device
2201 * @handle: The base driver device containing the amdgpu_dm device.
2202 *
2203 * Initialize the &struct amdgpu_display_manager device. This involves calling
2204 * the initializers of each DM component, then populating the struct with them.
2205 *
2206 * Although the function implies hardware initialization, both hardware and
2207 * software are initialized here. Splitting them out to their relevant init
2208 * hooks is a future TODO item.
2209 *
2210 * Some notable things that are initialized here:
2211 *
2212 * - Display Core, both software and hardware
2213 * - DC modules that we need (freesync and color management)
2214 * - DRM software states
2215 * - Interrupt sources and handlers
2216 * - Vblank support
2217 * - Debug FS entries, if enabled
2218 */
2219static int dm_hw_init(void *handle)
2220{
2221 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2222 /* Create DAL display manager */
2223 amdgpu_dm_init(adev);
2224 amdgpu_dm_hpd_init(adev);
2225
2226 return 0;
2227}
2228
2229/**
2230 * dm_hw_fini() - Teardown DC device
2231 * @handle: The base driver device containing the amdgpu_dm device.
2232 *
2233 * Teardown components within &struct amdgpu_display_manager that require
2234 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2235 * were loaded. Also flush IRQ workqueues and disable them.
2236 */
2237static int dm_hw_fini(void *handle)
2238{
2239 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2240
2241 amdgpu_dm_hpd_fini(adev);
2242
2243 amdgpu_dm_irq_fini(adev);
2244 amdgpu_dm_fini(adev);
2245 return 0;
2246}
2247
2248
2249static int dm_enable_vblank(struct drm_crtc *crtc);
2250static void dm_disable_vblank(struct drm_crtc *crtc);
2251
2252static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2253 struct dc_state *state, bool enable)
2254{
2255 enum dc_irq_source irq_source;
2256 struct amdgpu_crtc *acrtc;
2257 int rc = -EBUSY;
2258 int i = 0;
2259
2260 for (i = 0; i < state->stream_count; i++) {
2261 acrtc = get_crtc_by_otg_inst(
2262 adev, state->stream_status[i].primary_otg_inst);
2263
2264 if (acrtc && state->stream_status[i].plane_count != 0) {
2265 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2266 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2267 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2268 acrtc->crtc_id, enable ? "en" : "dis", rc);
2269 if (rc)
2270 DRM_WARN("Failed to %s pflip interrupts\n",
2271 enable ? "enable" : "disable");
2272
2273 if (enable) {
2274 rc = dm_enable_vblank(&acrtc->base);
2275 if (rc)
2276 DRM_WARN("Failed to enable vblank interrupts\n");
2277 } else {
2278 dm_disable_vblank(&acrtc->base);
2279 }
2280
2281 }
2282 }
2283
2284}
2285
2286static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2287{
2288 struct dc_state *context = NULL;
2289 enum dc_status res = DC_ERROR_UNEXPECTED;
2290 int i;
2291 struct dc_stream_state *del_streams[MAX_PIPES];
2292 int del_streams_count = 0;
2293
2294 memset(del_streams, 0, sizeof(del_streams));
2295
2296 context = dc_create_state(dc);
2297 if (context == NULL)
2298 goto context_alloc_fail;
2299
2300 dc_resource_state_copy_construct_current(dc, context);
2301
2302 /* First remove from context all streams */
2303 for (i = 0; i < context->stream_count; i++) {
2304 struct dc_stream_state *stream = context->streams[i];
2305
2306 del_streams[del_streams_count++] = stream;
2307 }
2308
2309 /* Remove all planes for removed streams and then remove the streams */
2310 for (i = 0; i < del_streams_count; i++) {
2311 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2312 res = DC_FAIL_DETACH_SURFACES;
2313 goto fail;
2314 }
2315
2316 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2317 if (res != DC_OK)
2318 goto fail;
2319 }
2320
2321 res = dc_commit_state(dc, context);
2322
2323fail:
2324 dc_release_state(context);
2325
2326context_alloc_fail:
2327 return res;
2328}
2329
2330static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2331{
2332 int i;
2333
2334 if (dm->hpd_rx_offload_wq) {
2335 for (i = 0; i < dm->dc->caps.max_links; i++)
2336 flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2337 }
2338}
2339
2340static int dm_suspend(void *handle)
2341{
2342 struct amdgpu_device *adev = handle;
2343 struct amdgpu_display_manager *dm = &adev->dm;
2344 int ret = 0;
2345
2346 if (amdgpu_in_reset(adev)) {
2347 mutex_lock(&dm->dc_lock);
2348
2349#if defined(CONFIG_DRM_AMD_DC_DCN)
2350 dc_allow_idle_optimizations(adev->dm.dc, false);
2351#endif
2352
2353 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2354
2355 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2356
2357 amdgpu_dm_commit_zero_streams(dm->dc);
2358
2359 amdgpu_dm_irq_suspend(adev);
2360
2361 hpd_rx_irq_work_suspend(dm);
2362
2363 return ret;
2364 }
2365
2366 WARN_ON(adev->dm.cached_state);
2367 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2368
2369 s3_handle_mst(adev_to_drm(adev), true);
2370
2371 amdgpu_dm_irq_suspend(adev);
2372
2373 hpd_rx_irq_work_suspend(dm);
2374
2375 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2376
2377 return 0;
2378}
2379
2380static struct amdgpu_dm_connector *
2381amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2382 struct drm_crtc *crtc)
2383{
2384 uint32_t i;
2385 struct drm_connector_state *new_con_state;
2386 struct drm_connector *connector;
2387 struct drm_crtc *crtc_from_state;
2388
2389 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2390 crtc_from_state = new_con_state->crtc;
2391
2392 if (crtc_from_state == crtc)
2393 return to_amdgpu_dm_connector(connector);
2394 }
2395
2396 return NULL;
2397}
2398
2399static void emulated_link_detect(struct dc_link *link)
2400{
2401 struct dc_sink_init_data sink_init_data = { 0 };
2402 struct display_sink_capability sink_caps = { 0 };
2403 enum dc_edid_status edid_status;
2404 struct dc_context *dc_ctx = link->ctx;
2405 struct dc_sink *sink = NULL;
2406 struct dc_sink *prev_sink = NULL;
2407
2408 link->type = dc_connection_none;
2409 prev_sink = link->local_sink;
2410
2411 if (prev_sink)
2412 dc_sink_release(prev_sink);
2413
2414 switch (link->connector_signal) {
2415 case SIGNAL_TYPE_HDMI_TYPE_A: {
2416 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2417 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2418 break;
2419 }
2420
2421 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2422 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2423 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2424 break;
2425 }
2426
2427 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2428 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2429 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2430 break;
2431 }
2432
2433 case SIGNAL_TYPE_LVDS: {
2434 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2435 sink_caps.signal = SIGNAL_TYPE_LVDS;
2436 break;
2437 }
2438
2439 case SIGNAL_TYPE_EDP: {
2440 sink_caps.transaction_type =
2441 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2442 sink_caps.signal = SIGNAL_TYPE_EDP;
2443 break;
2444 }
2445
2446 case SIGNAL_TYPE_DISPLAY_PORT: {
2447 sink_caps.transaction_type =
2448 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2449 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2450 break;
2451 }
2452
2453 default:
2454 DC_ERROR("Invalid connector type! signal:%d\n",
2455 link->connector_signal);
2456 return;
2457 }
2458
2459 sink_init_data.link = link;
2460 sink_init_data.sink_signal = sink_caps.signal;
2461
2462 sink = dc_sink_create(&sink_init_data);
2463 if (!sink) {
2464 DC_ERROR("Failed to create sink!\n");
2465 return;
2466 }
2467
2468 /* dc_sink_create returns a new reference */
2469 link->local_sink = sink;
2470
2471 edid_status = dm_helpers_read_local_edid(
2472 link->ctx,
2473 link,
2474 sink);
2475
2476 if (edid_status != EDID_OK)
2477 DC_ERROR("Failed to read EDID");
2478
2479}
2480
2481static void dm_gpureset_commit_state(struct dc_state *dc_state,
2482 struct amdgpu_display_manager *dm)
2483{
2484 struct {
2485 struct dc_surface_update surface_updates[MAX_SURFACES];
2486 struct dc_plane_info plane_infos[MAX_SURFACES];
2487 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2488 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2489 struct dc_stream_update stream_update;
2490 } * bundle;
2491 int k, m;
2492
2493 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2494
2495 if (!bundle) {
2496 dm_error("Failed to allocate update bundle\n");
2497 goto cleanup;
2498 }
2499
2500 for (k = 0; k < dc_state->stream_count; k++) {
2501 bundle->stream_update.stream = dc_state->streams[k];
2502
2503 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2504 bundle->surface_updates[m].surface =
2505 dc_state->stream_status->plane_states[m];
2506 bundle->surface_updates[m].surface->force_full_update =
2507 true;
2508 }
2509 dc_commit_updates_for_stream(
2510 dm->dc, bundle->surface_updates,
2511 dc_state->stream_status->plane_count,
2512 dc_state->streams[k], &bundle->stream_update, dc_state);
2513 }
2514
2515cleanup:
2516 kfree(bundle);
2517
2518 return;
2519}
2520
2521static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
2522{
2523 struct dc_stream_state *stream_state;
2524 struct amdgpu_dm_connector *aconnector = link->priv;
2525 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2526 struct dc_stream_update stream_update;
2527 bool dpms_off = true;
2528
2529 memset(&stream_update, 0, sizeof(stream_update));
2530 stream_update.dpms_off = &dpms_off;
2531
2532 mutex_lock(&adev->dm.dc_lock);
2533 stream_state = dc_stream_find_from_link(link);
2534
2535 if (stream_state == NULL) {
2536 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2537 mutex_unlock(&adev->dm.dc_lock);
2538 return;
2539 }
2540
2541 stream_update.stream = stream_state;
2542 acrtc_state->force_dpms_off = true;
2543 dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2544 stream_state, &stream_update,
2545 stream_state->ctx->dc->current_state);
2546 mutex_unlock(&adev->dm.dc_lock);
2547}
2548
2549static int dm_resume(void *handle)
2550{
2551 struct amdgpu_device *adev = handle;
2552 struct drm_device *ddev = adev_to_drm(adev);
2553 struct amdgpu_display_manager *dm = &adev->dm;
2554 struct amdgpu_dm_connector *aconnector;
2555 struct drm_connector *connector;
2556 struct drm_connector_list_iter iter;
2557 struct drm_crtc *crtc;
2558 struct drm_crtc_state *new_crtc_state;
2559 struct dm_crtc_state *dm_new_crtc_state;
2560 struct drm_plane *plane;
2561 struct drm_plane_state *new_plane_state;
2562 struct dm_plane_state *dm_new_plane_state;
2563 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2564 enum dc_connection_type new_connection_type = dc_connection_none;
2565 struct dc_state *dc_state;
2566 int i, r, j;
2567
2568 if (amdgpu_in_reset(adev)) {
2569 dc_state = dm->cached_dc_state;
2570
2571 /*
2572 * The dc->current_state is backed up into dm->cached_dc_state
2573 * before we commit 0 streams.
2574 *
2575 * DC will clear link encoder assignments on the real state
2576 * but the changes won't propagate over to the copy we made
2577 * before the 0 streams commit.
2578 *
2579 * DC expects that link encoder assignments are *not* valid
2580 * when committing a state, so as a workaround it needs to be
2581 * cleared here.
2582 */
2583 link_enc_cfg_init(dm->dc, dc_state);
2584
2585 if (dc_enable_dmub_notifications(adev->dm.dc))
2586 amdgpu_dm_outbox_init(adev);
2587
2588 r = dm_dmub_hw_init(adev);
2589 if (r)
2590 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2591
2592 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2593 dc_resume(dm->dc);
2594
2595 amdgpu_dm_irq_resume_early(adev);
2596
2597 for (i = 0; i < dc_state->stream_count; i++) {
2598 dc_state->streams[i]->mode_changed = true;
2599 for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2600 dc_state->stream_status[i].plane_states[j]->update_flags.raw
2601 = 0xffffffff;
2602 }
2603 }
2604
2605 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2606
2607 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2608
2609 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2610
2611 dc_release_state(dm->cached_dc_state);
2612 dm->cached_dc_state = NULL;
2613
2614 amdgpu_dm_irq_resume_late(adev);
2615
2616 mutex_unlock(&dm->dc_lock);
2617
2618 return 0;
2619 }
2620 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2621 dc_release_state(dm_state->context);
2622 dm_state->context = dc_create_state(dm->dc);
2623 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2624 dc_resource_state_construct(dm->dc, dm_state->context);
2625
2626 /* Re-enable outbox interrupts for DPIA. */
2627 if (dc_enable_dmub_notifications(adev->dm.dc))
2628 amdgpu_dm_outbox_init(adev);
2629
2630 /* Before powering on DC we need to re-initialize DMUB. */
2631 r = dm_dmub_hw_init(adev);
2632 if (r)
2633 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2634
2635 /* power on hardware */
2636 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2637
2638 /* program HPD filter */
2639 dc_resume(dm->dc);
2640
2641 /*
2642 * early enable HPD Rx IRQ, should be done before set mode as short
2643 * pulse interrupts are used for MST
2644 */
2645 amdgpu_dm_irq_resume_early(adev);
2646
2647 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2648 s3_handle_mst(ddev, false);
2649
2650 /* Do detection*/
2651 drm_connector_list_iter_begin(ddev, &iter);
2652 drm_for_each_connector_iter(connector, &iter) {
2653 aconnector = to_amdgpu_dm_connector(connector);
2654
2655 /*
2656 * this is the case when traversing through already created
2657 * MST connectors, should be skipped
2658 */
2659 if (aconnector->mst_port)
2660 continue;
2661
2662 mutex_lock(&aconnector->hpd_lock);
2663 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2664 DRM_ERROR("KMS: Failed to detect connector\n");
2665
2666 if (aconnector->base.force && new_connection_type == dc_connection_none)
2667 emulated_link_detect(aconnector->dc_link);
2668 else
2669 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2670
2671 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2672 aconnector->fake_enable = false;
2673
2674 if (aconnector->dc_sink)
2675 dc_sink_release(aconnector->dc_sink);
2676 aconnector->dc_sink = NULL;
2677 amdgpu_dm_update_connector_after_detect(aconnector);
2678 mutex_unlock(&aconnector->hpd_lock);
2679 }
2680 drm_connector_list_iter_end(&iter);
2681
2682 /* Force mode set in atomic commit */
2683 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2684 new_crtc_state->active_changed = true;
2685
2686 /*
2687 * atomic_check is expected to create the dc states. We need to release
2688 * them here, since they were duplicated as part of the suspend
2689 * procedure.
2690 */
2691 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2692 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2693 if (dm_new_crtc_state->stream) {
2694 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2695 dc_stream_release(dm_new_crtc_state->stream);
2696 dm_new_crtc_state->stream = NULL;
2697 }
2698 }
2699
2700 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2701 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2702 if (dm_new_plane_state->dc_state) {
2703 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2704 dc_plane_state_release(dm_new_plane_state->dc_state);
2705 dm_new_plane_state->dc_state = NULL;
2706 }
2707 }
2708
2709 drm_atomic_helper_resume(ddev, dm->cached_state);
2710
2711 dm->cached_state = NULL;
2712
2713 amdgpu_dm_irq_resume_late(adev);
2714
2715 amdgpu_dm_smu_write_watermarks_table(adev);
2716
2717 return 0;
2718}
2719
2720/**
2721 * DOC: DM Lifecycle
2722 *
2723 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2724 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2725 * the base driver's device list to be initialized and torn down accordingly.
2726 *
2727 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2728 */
2729
2730static const struct amd_ip_funcs amdgpu_dm_funcs = {
2731 .name = "dm",
2732 .early_init = dm_early_init,
2733 .late_init = dm_late_init,
2734 .sw_init = dm_sw_init,
2735 .sw_fini = dm_sw_fini,
2736 .early_fini = amdgpu_dm_early_fini,
2737 .hw_init = dm_hw_init,
2738 .hw_fini = dm_hw_fini,
2739 .suspend = dm_suspend,
2740 .resume = dm_resume,
2741 .is_idle = dm_is_idle,
2742 .wait_for_idle = dm_wait_for_idle,
2743 .check_soft_reset = dm_check_soft_reset,
2744 .soft_reset = dm_soft_reset,
2745 .set_clockgating_state = dm_set_clockgating_state,
2746 .set_powergating_state = dm_set_powergating_state,
2747};
2748
2749const struct amdgpu_ip_block_version dm_ip_block =
2750{
2751 .type = AMD_IP_BLOCK_TYPE_DCE,
2752 .major = 1,
2753 .minor = 0,
2754 .rev = 0,
2755 .funcs = &amdgpu_dm_funcs,
2756};
2757
2758
2759/**
2760 * DOC: atomic
2761 *
2762 * *WIP*
2763 */
2764
2765static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2766 .fb_create = amdgpu_display_user_framebuffer_create,
2767 .get_format_info = amd_get_format_info,
2768 .output_poll_changed = drm_fb_helper_output_poll_changed,
2769 .atomic_check = amdgpu_dm_atomic_check,
2770 .atomic_commit = drm_atomic_helper_commit,
2771};
2772
2773static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2774 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2775};
2776
2777static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2778{
2779 u32 max_cll, min_cll, max, min, q, r;
2780 struct amdgpu_dm_backlight_caps *caps;
2781 struct amdgpu_display_manager *dm;
2782 struct drm_connector *conn_base;
2783 struct amdgpu_device *adev;
2784 struct dc_link *link = NULL;
2785 static const u8 pre_computed_values[] = {
2786 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2787 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2788 int i;
2789
2790 if (!aconnector || !aconnector->dc_link)
2791 return;
2792
2793 link = aconnector->dc_link;
2794 if (link->connector_signal != SIGNAL_TYPE_EDP)
2795 return;
2796
2797 conn_base = &aconnector->base;
2798 adev = drm_to_adev(conn_base->dev);
2799 dm = &adev->dm;
2800 for (i = 0; i < dm->num_of_edps; i++) {
2801 if (link == dm->backlight_link[i])
2802 break;
2803 }
2804 if (i >= dm->num_of_edps)
2805 return;
2806 caps = &dm->backlight_caps[i];
2807 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2808 caps->aux_support = false;
2809 max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2810 min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2811
2812 if (caps->ext_caps->bits.oled == 1 /*||
2813 caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2814 caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2815 caps->aux_support = true;
2816
2817 if (amdgpu_backlight == 0)
2818 caps->aux_support = false;
2819 else if (amdgpu_backlight == 1)
2820 caps->aux_support = true;
2821
2822 /* From the specification (CTA-861-G), for calculating the maximum
2823 * luminance we need to use:
2824 * Luminance = 50*2**(CV/32)
2825 * Where CV is a one-byte value.
2826 * For calculating this expression we may need float point precision;
2827 * to avoid this complexity level, we take advantage that CV is divided
2828 * by a constant. From the Euclids division algorithm, we know that CV
2829 * can be written as: CV = 32*q + r. Next, we replace CV in the
2830 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2831 * need to pre-compute the value of r/32. For pre-computing the values
2832 * We just used the following Ruby line:
2833 * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2834 * The results of the above expressions can be verified at
2835 * pre_computed_values.
2836 */
2837 q = max_cll >> 5;
2838 r = max_cll % 32;
2839 max = (1 << q) * pre_computed_values[r];
2840
2841 // min luminance: maxLum * (CV/255)^2 / 100
2842 q = DIV_ROUND_CLOSEST(min_cll, 255);
2843 min = max * DIV_ROUND_CLOSEST((q * q), 100);
2844
2845 caps->aux_max_input_signal = max;
2846 caps->aux_min_input_signal = min;
2847}
2848
2849void amdgpu_dm_update_connector_after_detect(
2850 struct amdgpu_dm_connector *aconnector)
2851{
2852 struct drm_connector *connector = &aconnector->base;
2853 struct drm_device *dev = connector->dev;
2854 struct dc_sink *sink;
2855
2856 /* MST handled by drm_mst framework */
2857 if (aconnector->mst_mgr.mst_state == true)
2858 return;
2859
2860 sink = aconnector->dc_link->local_sink;
2861 if (sink)
2862 dc_sink_retain(sink);
2863
2864 /*
2865 * Edid mgmt connector gets first update only in mode_valid hook and then
2866 * the connector sink is set to either fake or physical sink depends on link status.
2867 * Skip if already done during boot.
2868 */
2869 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2870 && aconnector->dc_em_sink) {
2871
2872 /*
2873 * For S3 resume with headless use eml_sink to fake stream
2874 * because on resume connector->sink is set to NULL
2875 */
2876 mutex_lock(&dev->mode_config.mutex);
2877
2878 if (sink) {
2879 if (aconnector->dc_sink) {
2880 amdgpu_dm_update_freesync_caps(connector, NULL);
2881 /*
2882 * retain and release below are used to
2883 * bump up refcount for sink because the link doesn't point
2884 * to it anymore after disconnect, so on next crtc to connector
2885 * reshuffle by UMD we will get into unwanted dc_sink release
2886 */
2887 dc_sink_release(aconnector->dc_sink);
2888 }
2889 aconnector->dc_sink = sink;
2890 dc_sink_retain(aconnector->dc_sink);
2891 amdgpu_dm_update_freesync_caps(connector,
2892 aconnector->edid);
2893 } else {
2894 amdgpu_dm_update_freesync_caps(connector, NULL);
2895 if (!aconnector->dc_sink) {
2896 aconnector->dc_sink = aconnector->dc_em_sink;
2897 dc_sink_retain(aconnector->dc_sink);
2898 }
2899 }
2900
2901 mutex_unlock(&dev->mode_config.mutex);
2902
2903 if (sink)
2904 dc_sink_release(sink);
2905 return;
2906 }
2907
2908 /*
2909 * TODO: temporary guard to look for proper fix
2910 * if this sink is MST sink, we should not do anything
2911 */
2912 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2913 dc_sink_release(sink);
2914 return;
2915 }
2916
2917 if (aconnector->dc_sink == sink) {
2918 /*
2919 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2920 * Do nothing!!
2921 */
2922 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2923 aconnector->connector_id);
2924 if (sink)
2925 dc_sink_release(sink);
2926 return;
2927 }
2928
2929 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2930 aconnector->connector_id, aconnector->dc_sink, sink);
2931
2932 mutex_lock(&dev->mode_config.mutex);
2933
2934 /*
2935 * 1. Update status of the drm connector
2936 * 2. Send an event and let userspace tell us what to do
2937 */
2938 if (sink) {
2939 /*
2940 * TODO: check if we still need the S3 mode update workaround.
2941 * If yes, put it here.
2942 */
2943 if (aconnector->dc_sink) {
2944 amdgpu_dm_update_freesync_caps(connector, NULL);
2945 dc_sink_release(aconnector->dc_sink);
2946 }
2947
2948 aconnector->dc_sink = sink;
2949 dc_sink_retain(aconnector->dc_sink);
2950 if (sink->dc_edid.length == 0) {
2951 aconnector->edid = NULL;
2952 if (aconnector->dc_link->aux_mode) {
2953 drm_dp_cec_unset_edid(
2954 &aconnector->dm_dp_aux.aux);
2955 }
2956 } else {
2957 aconnector->edid =
2958 (struct edid *)sink->dc_edid.raw_edid;
2959
2960 if (aconnector->dc_link->aux_mode)
2961 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2962 aconnector->edid);
2963 }
2964
2965 drm_connector_update_edid_property(connector, aconnector->edid);
2966 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2967 update_connector_ext_caps(aconnector);
2968 } else {
2969 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2970 amdgpu_dm_update_freesync_caps(connector, NULL);
2971 drm_connector_update_edid_property(connector, NULL);
2972 aconnector->num_modes = 0;
2973 dc_sink_release(aconnector->dc_sink);
2974 aconnector->dc_sink = NULL;
2975 aconnector->edid = NULL;
2976#ifdef CONFIG_DRM_AMD_DC_HDCP
2977 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2978 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2979 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2980#endif
2981 }
2982
2983 mutex_unlock(&dev->mode_config.mutex);
2984
2985 update_subconnector_property(aconnector);
2986
2987 if (sink)
2988 dc_sink_release(sink);
2989}
2990
2991static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
2992{
2993 struct drm_connector *connector = &aconnector->base;
2994 struct drm_device *dev = connector->dev;
2995 enum dc_connection_type new_connection_type = dc_connection_none;
2996 struct amdgpu_device *adev = drm_to_adev(dev);
2997 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2998 struct dm_crtc_state *dm_crtc_state = NULL;
2999
3000 if (adev->dm.disable_hpd_irq)
3001 return;
3002
3003 if (dm_con_state->base.state && dm_con_state->base.crtc)
3004 dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
3005 dm_con_state->base.state,
3006 dm_con_state->base.crtc));
3007 /*
3008 * In case of failure or MST no need to update connector status or notify the OS
3009 * since (for MST case) MST does this in its own context.
3010 */
3011 mutex_lock(&aconnector->hpd_lock);
3012
3013#ifdef CONFIG_DRM_AMD_DC_HDCP
3014 if (adev->dm.hdcp_workqueue) {
3015 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3016 dm_con_state->update_hdcp = true;
3017 }
3018#endif
3019 if (aconnector->fake_enable)
3020 aconnector->fake_enable = false;
3021
3022 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3023 DRM_ERROR("KMS: Failed to detect connector\n");
3024
3025 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3026 emulated_link_detect(aconnector->dc_link);
3027
3028 drm_modeset_lock_all(dev);
3029 dm_restore_drm_connector_state(dev, connector);
3030 drm_modeset_unlock_all(dev);
3031
3032 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3033 drm_kms_helper_hotplug_event(dev);
3034
3035 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3036 if (new_connection_type == dc_connection_none &&
3037 aconnector->dc_link->type == dc_connection_none &&
3038 dm_crtc_state)
3039 dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
3040
3041 amdgpu_dm_update_connector_after_detect(aconnector);
3042
3043 drm_modeset_lock_all(dev);
3044 dm_restore_drm_connector_state(dev, connector);
3045 drm_modeset_unlock_all(dev);
3046
3047 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3048 drm_kms_helper_hotplug_event(dev);
3049 }
3050 mutex_unlock(&aconnector->hpd_lock);
3051
3052}
3053
3054static void handle_hpd_irq(void *param)
3055{
3056 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3057
3058 handle_hpd_irq_helper(aconnector);
3059
3060}
3061
3062static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3063{
3064 uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3065 uint8_t dret;
3066 bool new_irq_handled = false;
3067 int dpcd_addr;
3068 int dpcd_bytes_to_read;
3069
3070 const int max_process_count = 30;
3071 int process_count = 0;
3072
3073 const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3074
3075 if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3076 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3077 /* DPCD 0x200 - 0x201 for downstream IRQ */
3078 dpcd_addr = DP_SINK_COUNT;
3079 } else {
3080 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3081 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
3082 dpcd_addr = DP_SINK_COUNT_ESI;
3083 }
3084
3085 dret = drm_dp_dpcd_read(
3086 &aconnector->dm_dp_aux.aux,
3087 dpcd_addr,
3088 esi,
3089 dpcd_bytes_to_read);
3090
3091 while (dret == dpcd_bytes_to_read &&
3092 process_count < max_process_count) {
3093 uint8_t retry;
3094 dret = 0;
3095
3096 process_count++;
3097
3098 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3099 /* handle HPD short pulse irq */
3100 if (aconnector->mst_mgr.mst_state)
3101 drm_dp_mst_hpd_irq(
3102 &aconnector->mst_mgr,
3103 esi,
3104 &new_irq_handled);
3105
3106 if (new_irq_handled) {
3107 /* ACK at DPCD to notify down stream */
3108 const int ack_dpcd_bytes_to_write =
3109 dpcd_bytes_to_read - 1;
3110
3111 for (retry = 0; retry < 3; retry++) {
3112 uint8_t wret;
3113
3114 wret = drm_dp_dpcd_write(
3115 &aconnector->dm_dp_aux.aux,
3116 dpcd_addr + 1,
3117 &esi[1],
3118 ack_dpcd_bytes_to_write);
3119 if (wret == ack_dpcd_bytes_to_write)
3120 break;
3121 }
3122
3123 /* check if there is new irq to be handled */
3124 dret = drm_dp_dpcd_read(
3125 &aconnector->dm_dp_aux.aux,
3126 dpcd_addr,
3127 esi,
3128 dpcd_bytes_to_read);
3129
3130 new_irq_handled = false;
3131 } else {
3132 break;
3133 }
3134 }
3135
3136 if (process_count == max_process_count)
3137 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3138}
3139
3140static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3141 union hpd_irq_data hpd_irq_data)
3142{
3143 struct hpd_rx_irq_offload_work *offload_work =
3144 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3145
3146 if (!offload_work) {
3147 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3148 return;
3149 }
3150
3151 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3152 offload_work->data = hpd_irq_data;
3153 offload_work->offload_wq = offload_wq;
3154
3155 queue_work(offload_wq->wq, &offload_work->work);
3156 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3157}
3158
3159static void handle_hpd_rx_irq(void *param)
3160{
3161 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3162 struct drm_connector *connector = &aconnector->base;
3163 struct drm_device *dev = connector->dev;
3164 struct dc_link *dc_link = aconnector->dc_link;
3165 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3166 bool result = false;
3167 enum dc_connection_type new_connection_type = dc_connection_none;
3168 struct amdgpu_device *adev = drm_to_adev(dev);
3169 union hpd_irq_data hpd_irq_data;
3170 bool link_loss = false;
3171 bool has_left_work = false;
3172 int idx = aconnector->base.index;
3173 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3174
3175 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3176
3177 if (adev->dm.disable_hpd_irq)
3178 return;
3179
3180 /*
3181 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3182 * conflict, after implement i2c helper, this mutex should be
3183 * retired.
3184 */
3185 mutex_lock(&aconnector->hpd_lock);
3186
3187 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3188 &link_loss, true, &has_left_work);
3189
3190 if (!has_left_work)
3191 goto out;
3192
3193 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3194 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3195 goto out;
3196 }
3197
3198 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3199 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3200 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3201 dm_handle_mst_sideband_msg(aconnector);
3202 goto out;
3203 }
3204
3205 if (link_loss) {
3206 bool skip = false;
3207
3208 spin_lock(&offload_wq->offload_lock);
3209 skip = offload_wq->is_handling_link_loss;
3210
3211 if (!skip)
3212 offload_wq->is_handling_link_loss = true;
3213
3214 spin_unlock(&offload_wq->offload_lock);
3215
3216 if (!skip)
3217 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3218
3219 goto out;
3220 }
3221 }
3222
3223out:
3224 if (result && !is_mst_root_connector) {
3225 /* Downstream Port status changed. */
3226 if (!dc_link_detect_sink(dc_link, &new_connection_type))
3227 DRM_ERROR("KMS: Failed to detect connector\n");
3228
3229 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3230 emulated_link_detect(dc_link);
3231
3232 if (aconnector->fake_enable)
3233 aconnector->fake_enable = false;
3234
3235 amdgpu_dm_update_connector_after_detect(aconnector);
3236
3237
3238 drm_modeset_lock_all(dev);
3239 dm_restore_drm_connector_state(dev, connector);
3240 drm_modeset_unlock_all(dev);
3241
3242 drm_kms_helper_hotplug_event(dev);
3243 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3244
3245 if (aconnector->fake_enable)
3246 aconnector->fake_enable = false;
3247
3248 amdgpu_dm_update_connector_after_detect(aconnector);
3249
3250
3251 drm_modeset_lock_all(dev);
3252 dm_restore_drm_connector_state(dev, connector);
3253 drm_modeset_unlock_all(dev);
3254
3255 drm_kms_helper_hotplug_event(dev);
3256 }
3257 }
3258#ifdef CONFIG_DRM_AMD_DC_HDCP
3259 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3260 if (adev->dm.hdcp_workqueue)
3261 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
3262 }
3263#endif
3264
3265 if (dc_link->type != dc_connection_mst_branch)
3266 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3267
3268 mutex_unlock(&aconnector->hpd_lock);
3269}
3270
3271static void register_hpd_handlers(struct amdgpu_device *adev)
3272{
3273 struct drm_device *dev = adev_to_drm(adev);
3274 struct drm_connector *connector;
3275 struct amdgpu_dm_connector *aconnector;
3276 const struct dc_link *dc_link;
3277 struct dc_interrupt_params int_params = {0};
3278
3279 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3280 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3281
3282 list_for_each_entry(connector,
3283 &dev->mode_config.connector_list, head) {
3284
3285 aconnector = to_amdgpu_dm_connector(connector);
3286 dc_link = aconnector->dc_link;
3287
3288 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3289 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3290 int_params.irq_source = dc_link->irq_source_hpd;
3291
3292 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3293 handle_hpd_irq,
3294 (void *) aconnector);
3295 }
3296
3297 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3298
3299 /* Also register for DP short pulse (hpd_rx). */
3300 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3301 int_params.irq_source = dc_link->irq_source_hpd_rx;
3302
3303 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3304 handle_hpd_rx_irq,
3305 (void *) aconnector);
3306
3307 if (adev->dm.hpd_rx_offload_wq)
3308 adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3309 aconnector;
3310 }
3311 }
3312}
3313
3314#if defined(CONFIG_DRM_AMD_DC_SI)
3315/* Register IRQ sources and initialize IRQ callbacks */
3316static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3317{
3318 struct dc *dc = adev->dm.dc;
3319 struct common_irq_params *c_irq_params;
3320 struct dc_interrupt_params int_params = {0};
3321 int r;
3322 int i;
3323 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3324
3325 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3326 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3327
3328 /*
3329 * Actions of amdgpu_irq_add_id():
3330 * 1. Register a set() function with base driver.
3331 * Base driver will call set() function to enable/disable an
3332 * interrupt in DC hardware.
3333 * 2. Register amdgpu_dm_irq_handler().
3334 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3335 * coming from DC hardware.
3336 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3337 * for acknowledging and handling. */
3338
3339 /* Use VBLANK interrupt */
3340 for (i = 0; i < adev->mode_info.num_crtc; i++) {
3341 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3342 if (r) {
3343 DRM_ERROR("Failed to add crtc irq id!\n");
3344 return r;
3345 }
3346
3347 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3348 int_params.irq_source =
3349 dc_interrupt_to_irq_source(dc, i+1 , 0);
3350
3351 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3352
3353 c_irq_params->adev = adev;
3354 c_irq_params->irq_src = int_params.irq_source;
3355
3356 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3357 dm_crtc_high_irq, c_irq_params);
3358 }
3359
3360 /* Use GRPH_PFLIP interrupt */
3361 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3362 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3363 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3364 if (r) {
3365 DRM_ERROR("Failed to add page flip irq id!\n");
3366 return r;
3367 }
3368
3369 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3370 int_params.irq_source =
3371 dc_interrupt_to_irq_source(dc, i, 0);
3372
3373 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3374
3375 c_irq_params->adev = adev;
3376 c_irq_params->irq_src = int_params.irq_source;
3377
3378 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3379 dm_pflip_high_irq, c_irq_params);
3380
3381 }
3382
3383 /* HPD */
3384 r = amdgpu_irq_add_id(adev, client_id,
3385 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3386 if (r) {
3387 DRM_ERROR("Failed to add hpd irq id!\n");
3388 return r;
3389 }
3390
3391 register_hpd_handlers(adev);
3392
3393 return 0;
3394}
3395#endif
3396
3397/* Register IRQ sources and initialize IRQ callbacks */
3398static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3399{
3400 struct dc *dc = adev->dm.dc;
3401 struct common_irq_params *c_irq_params;
3402 struct dc_interrupt_params int_params = {0};
3403 int r;
3404 int i;
3405 unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3406
3407 if (adev->family >= AMDGPU_FAMILY_AI)
3408 client_id = SOC15_IH_CLIENTID_DCE;
3409
3410 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3411 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3412
3413 /*
3414 * Actions of amdgpu_irq_add_id():
3415 * 1. Register a set() function with base driver.
3416 * Base driver will call set() function to enable/disable an
3417 * interrupt in DC hardware.
3418 * 2. Register amdgpu_dm_irq_handler().
3419 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3420 * coming from DC hardware.
3421 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3422 * for acknowledging and handling. */
3423
3424 /* Use VBLANK interrupt */
3425 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3426 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3427 if (r) {
3428 DRM_ERROR("Failed to add crtc irq id!\n");
3429 return r;
3430 }
3431
3432 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3433 int_params.irq_source =
3434 dc_interrupt_to_irq_source(dc, i, 0);
3435
3436 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3437
3438 c_irq_params->adev = adev;
3439 c_irq_params->irq_src = int_params.irq_source;
3440
3441 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3442 dm_crtc_high_irq, c_irq_params);
3443 }
3444
3445 /* Use VUPDATE interrupt */
3446 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3447 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3448 if (r) {
3449 DRM_ERROR("Failed to add vupdate irq id!\n");
3450 return r;
3451 }
3452
3453 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3454 int_params.irq_source =
3455 dc_interrupt_to_irq_source(dc, i, 0);
3456
3457 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3458
3459 c_irq_params->adev = adev;
3460 c_irq_params->irq_src = int_params.irq_source;
3461
3462 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3463 dm_vupdate_high_irq, c_irq_params);
3464 }
3465
3466 /* Use GRPH_PFLIP interrupt */
3467 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3468 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3469 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3470 if (r) {
3471 DRM_ERROR("Failed to add page flip irq id!\n");
3472 return r;
3473 }
3474
3475 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3476 int_params.irq_source =
3477 dc_interrupt_to_irq_source(dc, i, 0);
3478
3479 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3480
3481 c_irq_params->adev = adev;
3482 c_irq_params->irq_src = int_params.irq_source;
3483
3484 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3485 dm_pflip_high_irq, c_irq_params);
3486
3487 }
3488
3489 /* HPD */
3490 r = amdgpu_irq_add_id(adev, client_id,
3491 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3492 if (r) {
3493 DRM_ERROR("Failed to add hpd irq id!\n");
3494 return r;
3495 }
3496
3497 register_hpd_handlers(adev);
3498
3499 return 0;
3500}
3501
3502#if defined(CONFIG_DRM_AMD_DC_DCN)
3503/* Register IRQ sources and initialize IRQ callbacks */
3504static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3505{
3506 struct dc *dc = adev->dm.dc;
3507 struct common_irq_params *c_irq_params;
3508 struct dc_interrupt_params int_params = {0};
3509 int r;
3510 int i;
3511#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3512 static const unsigned int vrtl_int_srcid[] = {
3513 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3514 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3515 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3516 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3517 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3518 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3519 };
3520#endif
3521
3522 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3523 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3524
3525 /*
3526 * Actions of amdgpu_irq_add_id():
3527 * 1. Register a set() function with base driver.
3528 * Base driver will call set() function to enable/disable an
3529 * interrupt in DC hardware.
3530 * 2. Register amdgpu_dm_irq_handler().
3531 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3532 * coming from DC hardware.
3533 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3534 * for acknowledging and handling.
3535 */
3536
3537 /* Use VSTARTUP interrupt */
3538 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3539 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3540 i++) {
3541 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3542
3543 if (r) {
3544 DRM_ERROR("Failed to add crtc irq id!\n");
3545 return r;
3546 }
3547
3548 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3549 int_params.irq_source =
3550 dc_interrupt_to_irq_source(dc, i, 0);
3551
3552 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3553
3554 c_irq_params->adev = adev;
3555 c_irq_params->irq_src = int_params.irq_source;
3556
3557 amdgpu_dm_irq_register_interrupt(
3558 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3559 }
3560
3561 /* Use otg vertical line interrupt */
3562#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3563 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3564 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3565 vrtl_int_srcid[i], &adev->vline0_irq);
3566
3567 if (r) {
3568 DRM_ERROR("Failed to add vline0 irq id!\n");
3569 return r;
3570 }
3571
3572 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3573 int_params.irq_source =
3574 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3575
3576 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3577 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3578 break;
3579 }
3580
3581 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3582 - DC_IRQ_SOURCE_DC1_VLINE0];
3583
3584 c_irq_params->adev = adev;
3585 c_irq_params->irq_src = int_params.irq_source;
3586
3587 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3588 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3589 }
3590#endif
3591
3592 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3593 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3594 * to trigger at end of each vblank, regardless of state of the lock,
3595 * matching DCE behaviour.
3596 */
3597 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3598 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3599 i++) {
3600 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3601
3602 if (r) {
3603 DRM_ERROR("Failed to add vupdate irq id!\n");
3604 return r;
3605 }
3606
3607 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3608 int_params.irq_source =
3609 dc_interrupt_to_irq_source(dc, i, 0);
3610
3611 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3612
3613 c_irq_params->adev = adev;
3614 c_irq_params->irq_src = int_params.irq_source;
3615
3616 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3617 dm_vupdate_high_irq, c_irq_params);
3618 }
3619
3620 /* Use GRPH_PFLIP interrupt */
3621 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3622 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
3623 i++) {
3624 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3625 if (r) {
3626 DRM_ERROR("Failed to add page flip irq id!\n");
3627 return r;
3628 }
3629
3630 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3631 int_params.irq_source =
3632 dc_interrupt_to_irq_source(dc, i, 0);
3633
3634 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3635
3636 c_irq_params->adev = adev;
3637 c_irq_params->irq_src = int_params.irq_source;
3638
3639 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3640 dm_pflip_high_irq, c_irq_params);
3641
3642 }
3643
3644 /* HPD */
3645 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3646 &adev->hpd_irq);
3647 if (r) {
3648 DRM_ERROR("Failed to add hpd irq id!\n");
3649 return r;
3650 }
3651
3652 register_hpd_handlers(adev);
3653
3654 return 0;
3655}
3656/* Register Outbox IRQ sources and initialize IRQ callbacks */
3657static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3658{
3659 struct dc *dc = adev->dm.dc;
3660 struct common_irq_params *c_irq_params;
3661 struct dc_interrupt_params int_params = {0};
3662 int r, i;
3663
3664 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3665 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3666
3667 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3668 &adev->dmub_outbox_irq);
3669 if (r) {
3670 DRM_ERROR("Failed to add outbox irq id!\n");
3671 return r;
3672 }
3673
3674 if (dc->ctx->dmub_srv) {
3675 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3676 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3677 int_params.irq_source =
3678 dc_interrupt_to_irq_source(dc, i, 0);
3679
3680 c_irq_params = &adev->dm.dmub_outbox_params[0];
3681
3682 c_irq_params->adev = adev;
3683 c_irq_params->irq_src = int_params.irq_source;
3684
3685 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3686 dm_dmub_outbox1_low_irq, c_irq_params);
3687 }
3688
3689 return 0;
3690}
3691#endif
3692
3693/*
3694 * Acquires the lock for the atomic state object and returns
3695 * the new atomic state.
3696 *
3697 * This should only be called during atomic check.
3698 */
3699static int dm_atomic_get_state(struct drm_atomic_state *state,
3700 struct dm_atomic_state **dm_state)
3701{
3702 struct drm_device *dev = state->dev;
3703 struct amdgpu_device *adev = drm_to_adev(dev);
3704 struct amdgpu_display_manager *dm = &adev->dm;
3705 struct drm_private_state *priv_state;
3706
3707 if (*dm_state)
3708 return 0;
3709
3710 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3711 if (IS_ERR(priv_state))
3712 return PTR_ERR(priv_state);
3713
3714 *dm_state = to_dm_atomic_state(priv_state);
3715
3716 return 0;
3717}
3718
3719static struct dm_atomic_state *
3720dm_atomic_get_new_state(struct drm_atomic_state *state)
3721{
3722 struct drm_device *dev = state->dev;
3723 struct amdgpu_device *adev = drm_to_adev(dev);
3724 struct amdgpu_display_manager *dm = &adev->dm;
3725 struct drm_private_obj *obj;
3726 struct drm_private_state *new_obj_state;
3727 int i;
3728
3729 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3730 if (obj->funcs == dm->atomic_obj.funcs)
3731 return to_dm_atomic_state(new_obj_state);
3732 }
3733
3734 return NULL;
3735}
3736
3737static struct drm_private_state *
3738dm_atomic_duplicate_state(struct drm_private_obj *obj)
3739{
3740 struct dm_atomic_state *old_state, *new_state;
3741
3742 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3743 if (!new_state)
3744 return NULL;
3745
3746 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3747
3748 old_state = to_dm_atomic_state(obj->state);
3749
3750 if (old_state && old_state->context)
3751 new_state->context = dc_copy_state(old_state->context);
3752
3753 if (!new_state->context) {
3754 kfree(new_state);
3755 return NULL;
3756 }
3757
3758 return &new_state->base;
3759}
3760
3761static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3762 struct drm_private_state *state)
3763{
3764 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3765
3766 if (dm_state && dm_state->context)
3767 dc_release_state(dm_state->context);
3768
3769 kfree(dm_state);
3770}
3771
3772static struct drm_private_state_funcs dm_atomic_state_funcs = {
3773 .atomic_duplicate_state = dm_atomic_duplicate_state,
3774 .atomic_destroy_state = dm_atomic_destroy_state,
3775};
3776
3777static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3778{
3779 struct dm_atomic_state *state;
3780 int r;
3781
3782 adev->mode_info.mode_config_initialized = true;
3783
3784 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3785 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3786
3787 adev_to_drm(adev)->mode_config.max_width = 16384;
3788 adev_to_drm(adev)->mode_config.max_height = 16384;
3789
3790 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3791 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3792 /* indicates support for immediate flip */
3793 adev_to_drm(adev)->mode_config.async_page_flip = true;
3794
3795 adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3796
3797 state = kzalloc(sizeof(*state), GFP_KERNEL);
3798 if (!state)
3799 return -ENOMEM;
3800
3801 state->context = dc_create_state(adev->dm.dc);
3802 if (!state->context) {
3803 kfree(state);
3804 return -ENOMEM;
3805 }
3806
3807 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3808
3809 drm_atomic_private_obj_init(adev_to_drm(adev),
3810 &adev->dm.atomic_obj,
3811 &state->base,
3812 &dm_atomic_state_funcs);
3813
3814 r = amdgpu_display_modeset_create_props(adev);
3815 if (r) {
3816 dc_release_state(state->context);
3817 kfree(state);
3818 return r;
3819 }
3820
3821 r = amdgpu_dm_audio_init(adev);
3822 if (r) {
3823 dc_release_state(state->context);
3824 kfree(state);
3825 return r;
3826 }
3827
3828 return 0;
3829}
3830
3831#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3832#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3833#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3834
3835#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3836 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3837
3838static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3839 int bl_idx)
3840{
3841#if defined(CONFIG_ACPI)
3842 struct amdgpu_dm_backlight_caps caps;
3843
3844 memset(&caps, 0, sizeof(caps));
3845
3846 if (dm->backlight_caps[bl_idx].caps_valid)
3847 return;
3848
3849 amdgpu_acpi_get_backlight_caps(&caps);
3850 if (caps.caps_valid) {
3851 dm->backlight_caps[bl_idx].caps_valid = true;
3852 if (caps.aux_support)
3853 return;
3854 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3855 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3856 } else {
3857 dm->backlight_caps[bl_idx].min_input_signal =
3858 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3859 dm->backlight_caps[bl_idx].max_input_signal =
3860 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3861 }
3862#else
3863 if (dm->backlight_caps[bl_idx].aux_support)
3864 return;
3865
3866 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3867 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3868#endif
3869}
3870
3871static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3872 unsigned *min, unsigned *max)
3873{
3874 if (!caps)
3875 return 0;
3876
3877 if (caps->aux_support) {
3878 // Firmware limits are in nits, DC API wants millinits.
3879 *max = 1000 * caps->aux_max_input_signal;
3880 *min = 1000 * caps->aux_min_input_signal;
3881 } else {
3882 // Firmware limits are 8-bit, PWM control is 16-bit.
3883 *max = 0x101 * caps->max_input_signal;
3884 *min = 0x101 * caps->min_input_signal;
3885 }
3886 return 1;
3887}
3888
3889static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3890 uint32_t brightness)
3891{
3892 unsigned min, max;
3893
3894 if (!get_brightness_range(caps, &min, &max))
3895 return brightness;
3896
3897 // Rescale 0..255 to min..max
3898 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3899 AMDGPU_MAX_BL_LEVEL);
3900}
3901
3902static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3903 uint32_t brightness)
3904{
3905 unsigned min, max;
3906
3907 if (!get_brightness_range(caps, &min, &max))
3908 return brightness;
3909
3910 if (brightness < min)
3911 return 0;
3912 // Rescale min..max to 0..255
3913 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3914 max - min);
3915}
3916
3917static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3918 int bl_idx,
3919 u32 user_brightness)
3920{
3921 struct amdgpu_dm_backlight_caps caps;
3922 struct dc_link *link;
3923 u32 brightness;
3924 bool rc;
3925
3926 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3927 caps = dm->backlight_caps[bl_idx];
3928
3929 dm->brightness[bl_idx] = user_brightness;
3930 /* update scratch register */
3931 if (bl_idx == 0)
3932 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
3933 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3934 link = (struct dc_link *)dm->backlight_link[bl_idx];
3935
3936 /* Change brightness based on AUX property */
3937 if (caps.aux_support) {
3938 rc = dc_link_set_backlight_level_nits(link, true, brightness,
3939 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3940 if (!rc)
3941 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3942 } else {
3943 rc = dc_link_set_backlight_level(link, brightness, 0);
3944 if (!rc)
3945 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3946 }
3947
3948 return rc ? 0 : 1;
3949}
3950
3951static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3952{
3953 struct amdgpu_display_manager *dm = bl_get_data(bd);
3954 int i;
3955
3956 for (i = 0; i < dm->num_of_edps; i++) {
3957 if (bd == dm->backlight_dev[i])
3958 break;
3959 }
3960 if (i >= AMDGPU_DM_MAX_NUM_EDP)
3961 i = 0;
3962 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3963
3964 return 0;
3965}
3966
3967static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3968 int bl_idx)
3969{
3970 struct amdgpu_dm_backlight_caps caps;
3971 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
3972
3973 amdgpu_dm_update_backlight_caps(dm, bl_idx);
3974 caps = dm->backlight_caps[bl_idx];
3975
3976 if (caps.aux_support) {
3977 u32 avg, peak;
3978 bool rc;
3979
3980 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3981 if (!rc)
3982 return dm->brightness[bl_idx];
3983 return convert_brightness_to_user(&caps, avg);
3984 } else {
3985 int ret = dc_link_get_backlight_level(link);
3986
3987 if (ret == DC_ERROR_UNEXPECTED)
3988 return dm->brightness[bl_idx];
3989 return convert_brightness_to_user(&caps, ret);
3990 }
3991}
3992
3993static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3994{
3995 struct amdgpu_display_manager *dm = bl_get_data(bd);
3996 int i;
3997
3998 for (i = 0; i < dm->num_of_edps; i++) {
3999 if (bd == dm->backlight_dev[i])
4000 break;
4001 }
4002 if (i >= AMDGPU_DM_MAX_NUM_EDP)
4003 i = 0;
4004 return amdgpu_dm_backlight_get_level(dm, i);
4005}
4006
4007static const struct backlight_ops amdgpu_dm_backlight_ops = {
4008 .options = BL_CORE_SUSPENDRESUME,
4009 .get_brightness = amdgpu_dm_backlight_get_brightness,
4010 .update_status = amdgpu_dm_backlight_update_status,
4011};
4012
4013static void
4014amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4015{
4016 char bl_name[16];
4017 struct backlight_properties props = { 0 };
4018
4019 amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4020 dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4021
4022 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4023 props.brightness = AMDGPU_MAX_BL_LEVEL;
4024 props.type = BACKLIGHT_RAW;
4025
4026 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4027 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4028
4029 dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4030 adev_to_drm(dm->adev)->dev,
4031 dm,
4032 &amdgpu_dm_backlight_ops,
4033 &props);
4034
4035 if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4036 DRM_ERROR("DM: Backlight registration failed!\n");
4037 else
4038 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4039}
4040#endif
4041
4042static int initialize_plane(struct amdgpu_display_manager *dm,
4043 struct amdgpu_mode_info *mode_info, int plane_id,
4044 enum drm_plane_type plane_type,
4045 const struct dc_plane_cap *plane_cap)
4046{
4047 struct drm_plane *plane;
4048 unsigned long possible_crtcs;
4049 int ret = 0;
4050
4051 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4052 if (!plane) {
4053 DRM_ERROR("KMS: Failed to allocate plane\n");
4054 return -ENOMEM;
4055 }
4056 plane->type = plane_type;
4057
4058 /*
4059 * HACK: IGT tests expect that the primary plane for a CRTC
4060 * can only have one possible CRTC. Only expose support for
4061 * any CRTC if they're not going to be used as a primary plane
4062 * for a CRTC - like overlay or underlay planes.
4063 */
4064 possible_crtcs = 1 << plane_id;
4065 if (plane_id >= dm->dc->caps.max_streams)
4066 possible_crtcs = 0xff;
4067
4068 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4069
4070 if (ret) {
4071 DRM_ERROR("KMS: Failed to initialize plane\n");
4072 kfree(plane);
4073 return ret;
4074 }
4075
4076 if (mode_info)
4077 mode_info->planes[plane_id] = plane;
4078
4079 return ret;
4080}
4081
4082
4083static void register_backlight_device(struct amdgpu_display_manager *dm,
4084 struct dc_link *link)
4085{
4086#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4087 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4088
4089 if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4090 link->type != dc_connection_none) {
4091 /*
4092 * Event if registration failed, we should continue with
4093 * DM initialization because not having a backlight control
4094 * is better then a black screen.
4095 */
4096 if (!dm->backlight_dev[dm->num_of_edps])
4097 amdgpu_dm_register_backlight_device(dm);
4098
4099 if (dm->backlight_dev[dm->num_of_edps]) {
4100 dm->backlight_link[dm->num_of_edps] = link;
4101 dm->num_of_edps++;
4102 }
4103 }
4104#endif
4105}
4106
4107
4108/*
4109 * In this architecture, the association
4110 * connector -> encoder -> crtc
4111 * id not really requried. The crtc and connector will hold the
4112 * display_index as an abstraction to use with DAL component
4113 *
4114 * Returns 0 on success
4115 */
4116static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4117{
4118 struct amdgpu_display_manager *dm = &adev->dm;
4119 int32_t i;
4120 struct amdgpu_dm_connector *aconnector = NULL;
4121 struct amdgpu_encoder *aencoder = NULL;
4122 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4123 uint32_t link_cnt;
4124 int32_t primary_planes;
4125 enum dc_connection_type new_connection_type = dc_connection_none;
4126 const struct dc_plane_cap *plane;
4127 bool psr_feature_enabled = false;
4128
4129 dm->display_indexes_num = dm->dc->caps.max_streams;
4130 /* Update the actual used number of crtc */
4131 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4132
4133 link_cnt = dm->dc->caps.max_links;
4134 if (amdgpu_dm_mode_config_init(dm->adev)) {
4135 DRM_ERROR("DM: Failed to initialize mode config\n");
4136 return -EINVAL;
4137 }
4138
4139 /* There is one primary plane per CRTC */
4140 primary_planes = dm->dc->caps.max_streams;
4141 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4142
4143 /*
4144 * Initialize primary planes, implicit planes for legacy IOCTLS.
4145 * Order is reversed to match iteration order in atomic check.
4146 */
4147 for (i = (primary_planes - 1); i >= 0; i--) {
4148 plane = &dm->dc->caps.planes[i];
4149
4150 if (initialize_plane(dm, mode_info, i,
4151 DRM_PLANE_TYPE_PRIMARY, plane)) {
4152 DRM_ERROR("KMS: Failed to initialize primary plane\n");
4153 goto fail;
4154 }
4155 }
4156
4157 /*
4158 * Initialize overlay planes, index starting after primary planes.
4159 * These planes have a higher DRM index than the primary planes since
4160 * they should be considered as having a higher z-order.
4161 * Order is reversed to match iteration order in atomic check.
4162 *
4163 * Only support DCN for now, and only expose one so we don't encourage
4164 * userspace to use up all the pipes.
4165 */
4166 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4167 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4168
4169 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4170 continue;
4171
4172 if (!plane->blends_with_above || !plane->blends_with_below)
4173 continue;
4174
4175 if (!plane->pixel_format_support.argb8888)
4176 continue;
4177
4178 if (initialize_plane(dm, NULL, primary_planes + i,
4179 DRM_PLANE_TYPE_OVERLAY, plane)) {
4180 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4181 goto fail;
4182 }
4183
4184 /* Only create one overlay plane. */
4185 break;
4186 }
4187
4188 for (i = 0; i < dm->dc->caps.max_streams; i++)
4189 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4190 DRM_ERROR("KMS: Failed to initialize crtc\n");
4191 goto fail;
4192 }
4193
4194#if defined(CONFIG_DRM_AMD_DC_DCN)
4195 /* Use Outbox interrupt */
4196 switch (adev->ip_versions[DCE_HWIP][0]) {
4197 case IP_VERSION(3, 0, 0):
4198 case IP_VERSION(3, 1, 2):
4199 case IP_VERSION(3, 1, 3):
4200 case IP_VERSION(2, 1, 0):
4201 if (register_outbox_irq_handlers(dm->adev)) {
4202 DRM_ERROR("DM: Failed to initialize IRQ\n");
4203 goto fail;
4204 }
4205 break;
4206 default:
4207 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4208 adev->ip_versions[DCE_HWIP][0]);
4209 }
4210
4211 /* Determine whether to enable PSR support by default. */
4212 if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4213 switch (adev->ip_versions[DCE_HWIP][0]) {
4214 case IP_VERSION(3, 1, 2):
4215 case IP_VERSION(3, 1, 3):
4216 psr_feature_enabled = true;
4217 break;
4218 default:
4219 psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4220 break;
4221 }
4222 }
4223#endif
4224
4225 /* loops over all connectors on the board */
4226 for (i = 0; i < link_cnt; i++) {
4227 struct dc_link *link = NULL;
4228
4229 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4230 DRM_ERROR(
4231 "KMS: Cannot support more than %d display indexes\n",
4232 AMDGPU_DM_MAX_DISPLAY_INDEX);
4233 continue;
4234 }
4235
4236 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4237 if (!aconnector)
4238 goto fail;
4239
4240 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4241 if (!aencoder)
4242 goto fail;
4243
4244 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4245 DRM_ERROR("KMS: Failed to initialize encoder\n");
4246 goto fail;
4247 }
4248
4249 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4250 DRM_ERROR("KMS: Failed to initialize connector\n");
4251 goto fail;
4252 }
4253
4254 link = dc_get_link_at_index(dm->dc, i);
4255
4256 if (!dc_link_detect_sink(link, &new_connection_type))
4257 DRM_ERROR("KMS: Failed to detect connector\n");
4258
4259 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4260 emulated_link_detect(link);
4261 amdgpu_dm_update_connector_after_detect(aconnector);
4262
4263 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4264 amdgpu_dm_update_connector_after_detect(aconnector);
4265 register_backlight_device(dm, link);
4266 if (dm->num_of_edps)
4267 update_connector_ext_caps(aconnector);
4268 if (psr_feature_enabled)
4269 amdgpu_dm_set_psr_caps(link);
4270 }
4271
4272
4273 }
4274
4275 /*
4276 * Disable vblank IRQs aggressively for power-saving.
4277 *
4278 * TODO: Fix vblank control helpers to delay PSR entry to allow this when PSR
4279 * is also supported.
4280 */
4281 adev_to_drm(adev)->vblank_disable_immediate = !psr_feature_enabled;
4282
4283 /* Software is initialized. Now we can register interrupt handlers. */
4284 switch (adev->asic_type) {
4285#if defined(CONFIG_DRM_AMD_DC_SI)
4286 case CHIP_TAHITI:
4287 case CHIP_PITCAIRN:
4288 case CHIP_VERDE:
4289 case CHIP_OLAND:
4290 if (dce60_register_irq_handlers(dm->adev)) {
4291 DRM_ERROR("DM: Failed to initialize IRQ\n");
4292 goto fail;
4293 }
4294 break;
4295#endif
4296 case CHIP_BONAIRE:
4297 case CHIP_HAWAII:
4298 case CHIP_KAVERI:
4299 case CHIP_KABINI:
4300 case CHIP_MULLINS:
4301 case CHIP_TONGA:
4302 case CHIP_FIJI:
4303 case CHIP_CARRIZO:
4304 case CHIP_STONEY:
4305 case CHIP_POLARIS11:
4306 case CHIP_POLARIS10:
4307 case CHIP_POLARIS12:
4308 case CHIP_VEGAM:
4309 case CHIP_VEGA10:
4310 case CHIP_VEGA12:
4311 case CHIP_VEGA20:
4312 if (dce110_register_irq_handlers(dm->adev)) {
4313 DRM_ERROR("DM: Failed to initialize IRQ\n");
4314 goto fail;
4315 }
4316 break;
4317 default:
4318#if defined(CONFIG_DRM_AMD_DC_DCN)
4319 switch (adev->ip_versions[DCE_HWIP][0]) {
4320 case IP_VERSION(1, 0, 0):
4321 case IP_VERSION(1, 0, 1):
4322 case IP_VERSION(2, 0, 2):
4323 case IP_VERSION(2, 0, 3):
4324 case IP_VERSION(2, 0, 0):
4325 case IP_VERSION(2, 1, 0):
4326 case IP_VERSION(3, 0, 0):
4327 case IP_VERSION(3, 0, 2):
4328 case IP_VERSION(3, 0, 3):
4329 case IP_VERSION(3, 0, 1):
4330 case IP_VERSION(3, 1, 2):
4331 case IP_VERSION(3, 1, 3):
4332 if (dcn10_register_irq_handlers(dm->adev)) {
4333 DRM_ERROR("DM: Failed to initialize IRQ\n");
4334 goto fail;
4335 }
4336 break;
4337 default:
4338 DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4339 adev->ip_versions[DCE_HWIP][0]);
4340 goto fail;
4341 }
4342#endif
4343 break;
4344 }
4345
4346 return 0;
4347fail:
4348 kfree(aencoder);
4349 kfree(aconnector);
4350
4351 return -EINVAL;
4352}
4353
4354static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4355{
4356 drm_atomic_private_obj_fini(&dm->atomic_obj);
4357 return;
4358}
4359
4360/******************************************************************************
4361 * amdgpu_display_funcs functions
4362 *****************************************************************************/
4363
4364/*
4365 * dm_bandwidth_update - program display watermarks
4366 *
4367 * @adev: amdgpu_device pointer
4368 *
4369 * Calculate and program the display watermarks and line buffer allocation.
4370 */
4371static void dm_bandwidth_update(struct amdgpu_device *adev)
4372{
4373 /* TODO: implement later */
4374}
4375
4376static const struct amdgpu_display_funcs dm_display_funcs = {
4377 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4378 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4379 .backlight_set_level = NULL, /* never called for DC */
4380 .backlight_get_level = NULL, /* never called for DC */
4381 .hpd_sense = NULL,/* called unconditionally */
4382 .hpd_set_polarity = NULL, /* called unconditionally */
4383 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4384 .page_flip_get_scanoutpos =
4385 dm_crtc_get_scanoutpos,/* called unconditionally */
4386 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4387 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4388};
4389
4390#if defined(CONFIG_DEBUG_KERNEL_DC)
4391
4392static ssize_t s3_debug_store(struct device *device,
4393 struct device_attribute *attr,
4394 const char *buf,
4395 size_t count)
4396{
4397 int ret;
4398 int s3_state;
4399 struct drm_device *drm_dev = dev_get_drvdata(device);
4400 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4401
4402 ret = kstrtoint(buf, 0, &s3_state);
4403
4404 if (ret == 0) {
4405 if (s3_state) {
4406 dm_resume(adev);
4407 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4408 } else
4409 dm_suspend(adev);
4410 }
4411
4412 return ret == 0 ? count : 0;
4413}
4414
4415DEVICE_ATTR_WO(s3_debug);
4416
4417#endif
4418
4419static int dm_early_init(void *handle)
4420{
4421 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4422
4423 switch (adev->asic_type) {
4424#if defined(CONFIG_DRM_AMD_DC_SI)
4425 case CHIP_TAHITI:
4426 case CHIP_PITCAIRN:
4427 case CHIP_VERDE:
4428 adev->mode_info.num_crtc = 6;
4429 adev->mode_info.num_hpd = 6;
4430 adev->mode_info.num_dig = 6;
4431 break;
4432 case CHIP_OLAND:
4433 adev->mode_info.num_crtc = 2;
4434 adev->mode_info.num_hpd = 2;
4435 adev->mode_info.num_dig = 2;
4436 break;
4437#endif
4438 case CHIP_BONAIRE:
4439 case CHIP_HAWAII:
4440 adev->mode_info.num_crtc = 6;
4441 adev->mode_info.num_hpd = 6;
4442 adev->mode_info.num_dig = 6;
4443 break;
4444 case CHIP_KAVERI:
4445 adev->mode_info.num_crtc = 4;
4446 adev->mode_info.num_hpd = 6;
4447 adev->mode_info.num_dig = 7;
4448 break;
4449 case CHIP_KABINI:
4450 case CHIP_MULLINS:
4451 adev->mode_info.num_crtc = 2;
4452 adev->mode_info.num_hpd = 6;
4453 adev->mode_info.num_dig = 6;
4454 break;
4455 case CHIP_FIJI:
4456 case CHIP_TONGA:
4457 adev->mode_info.num_crtc = 6;
4458 adev->mode_info.num_hpd = 6;
4459 adev->mode_info.num_dig = 7;
4460 break;
4461 case CHIP_CARRIZO:
4462 adev->mode_info.num_crtc = 3;
4463 adev->mode_info.num_hpd = 6;
4464 adev->mode_info.num_dig = 9;
4465 break;
4466 case CHIP_STONEY:
4467 adev->mode_info.num_crtc = 2;
4468 adev->mode_info.num_hpd = 6;
4469 adev->mode_info.num_dig = 9;
4470 break;
4471 case CHIP_POLARIS11:
4472 case CHIP_POLARIS12:
4473 adev->mode_info.num_crtc = 5;
4474 adev->mode_info.num_hpd = 5;
4475 adev->mode_info.num_dig = 5;
4476 break;
4477 case CHIP_POLARIS10:
4478 case CHIP_VEGAM:
4479 adev->mode_info.num_crtc = 6;
4480 adev->mode_info.num_hpd = 6;
4481 adev->mode_info.num_dig = 6;
4482 break;
4483 case CHIP_VEGA10:
4484 case CHIP_VEGA12:
4485 case CHIP_VEGA20:
4486 adev->mode_info.num_crtc = 6;
4487 adev->mode_info.num_hpd = 6;
4488 adev->mode_info.num_dig = 6;
4489 break;
4490 default:
4491#if defined(CONFIG_DRM_AMD_DC_DCN)
4492 switch (adev->ip_versions[DCE_HWIP][0]) {
4493 case IP_VERSION(2, 0, 2):
4494 case IP_VERSION(3, 0, 0):
4495 adev->mode_info.num_crtc = 6;
4496 adev->mode_info.num_hpd = 6;
4497 adev->mode_info.num_dig = 6;
4498 break;
4499 case IP_VERSION(2, 0, 0):
4500 case IP_VERSION(3, 0, 2):
4501 adev->mode_info.num_crtc = 5;
4502 adev->mode_info.num_hpd = 5;
4503 adev->mode_info.num_dig = 5;
4504 break;
4505 case IP_VERSION(2, 0, 3):
4506 case IP_VERSION(3, 0, 3):
4507 adev->mode_info.num_crtc = 2;
4508 adev->mode_info.num_hpd = 2;
4509 adev->mode_info.num_dig = 2;
4510 break;
4511 case IP_VERSION(1, 0, 0):
4512 case IP_VERSION(1, 0, 1):
4513 case IP_VERSION(3, 0, 1):
4514 case IP_VERSION(2, 1, 0):
4515 case IP_VERSION(3, 1, 2):
4516 case IP_VERSION(3, 1, 3):
4517 adev->mode_info.num_crtc = 4;
4518 adev->mode_info.num_hpd = 4;
4519 adev->mode_info.num_dig = 4;
4520 break;
4521 default:
4522 DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4523 adev->ip_versions[DCE_HWIP][0]);
4524 return -EINVAL;
4525 }
4526#endif
4527 break;
4528 }
4529
4530 amdgpu_dm_set_irq_funcs(adev);
4531
4532 if (adev->mode_info.funcs == NULL)
4533 adev->mode_info.funcs = &dm_display_funcs;
4534
4535 /*
4536 * Note: Do NOT change adev->audio_endpt_rreg and
4537 * adev->audio_endpt_wreg because they are initialised in
4538 * amdgpu_device_init()
4539 */
4540#if defined(CONFIG_DEBUG_KERNEL_DC)
4541 device_create_file(
4542 adev_to_drm(adev)->dev,
4543 &dev_attr_s3_debug);
4544#endif
4545
4546 return 0;
4547}
4548
4549static bool modeset_required(struct drm_crtc_state *crtc_state,
4550 struct dc_stream_state *new_stream,
4551 struct dc_stream_state *old_stream)
4552{
4553 return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4554}
4555
4556static bool modereset_required(struct drm_crtc_state *crtc_state)
4557{
4558 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4559}
4560
4561static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4562{
4563 drm_encoder_cleanup(encoder);
4564 kfree(encoder);
4565}
4566
4567static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4568 .destroy = amdgpu_dm_encoder_destroy,
4569};
4570
4571
4572static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4573 struct drm_framebuffer *fb,
4574 int *min_downscale, int *max_upscale)
4575{
4576 struct amdgpu_device *adev = drm_to_adev(dev);
4577 struct dc *dc = adev->dm.dc;
4578 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4579 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4580
4581 switch (fb->format->format) {
4582 case DRM_FORMAT_P010:
4583 case DRM_FORMAT_NV12:
4584 case DRM_FORMAT_NV21:
4585 *max_upscale = plane_cap->max_upscale_factor.nv12;
4586 *min_downscale = plane_cap->max_downscale_factor.nv12;
4587 break;
4588
4589 case DRM_FORMAT_XRGB16161616F:
4590 case DRM_FORMAT_ARGB16161616F:
4591 case DRM_FORMAT_XBGR16161616F:
4592 case DRM_FORMAT_ABGR16161616F:
4593 *max_upscale = plane_cap->max_upscale_factor.fp16;
4594 *min_downscale = plane_cap->max_downscale_factor.fp16;
4595 break;
4596
4597 default:
4598 *max_upscale = plane_cap->max_upscale_factor.argb8888;
4599 *min_downscale = plane_cap->max_downscale_factor.argb8888;
4600 break;
4601 }
4602
4603 /*
4604 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4605 * scaling factor of 1.0 == 1000 units.
4606 */
4607 if (*max_upscale == 1)
4608 *max_upscale = 1000;
4609
4610 if (*min_downscale == 1)
4611 *min_downscale = 1000;
4612}
4613
4614
4615static int fill_dc_scaling_info(struct amdgpu_device *adev,
4616 const struct drm_plane_state *state,
4617 struct dc_scaling_info *scaling_info)
4618{
4619 int scale_w, scale_h, min_downscale, max_upscale;
4620
4621 memset(scaling_info, 0, sizeof(*scaling_info));
4622
4623 /* Source is fixed 16.16 but we ignore mantissa for now... */
4624 scaling_info->src_rect.x = state->src_x >> 16;
4625 scaling_info->src_rect.y = state->src_y >> 16;
4626
4627 /*
4628 * For reasons we don't (yet) fully understand a non-zero
4629 * src_y coordinate into an NV12 buffer can cause a
4630 * system hang on DCN1x.
4631 * To avoid hangs (and maybe be overly cautious)
4632 * let's reject both non-zero src_x and src_y.
4633 *
4634 * We currently know of only one use-case to reproduce a
4635 * scenario with non-zero src_x and src_y for NV12, which
4636 * is to gesture the YouTube Android app into full screen
4637 * on ChromeOS.
4638 */
4639 if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
4640 (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) &&
4641 (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
4642 (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
4643 return -EINVAL;
4644
4645 scaling_info->src_rect.width = state->src_w >> 16;
4646 if (scaling_info->src_rect.width == 0)
4647 return -EINVAL;
4648
4649 scaling_info->src_rect.height = state->src_h >> 16;
4650 if (scaling_info->src_rect.height == 0)
4651 return -EINVAL;
4652
4653 scaling_info->dst_rect.x = state->crtc_x;
4654 scaling_info->dst_rect.y = state->crtc_y;
4655
4656 if (state->crtc_w == 0)
4657 return -EINVAL;
4658
4659 scaling_info->dst_rect.width = state->crtc_w;
4660
4661 if (state->crtc_h == 0)
4662 return -EINVAL;
4663
4664 scaling_info->dst_rect.height = state->crtc_h;
4665
4666 /* DRM doesn't specify clipping on destination output. */
4667 scaling_info->clip_rect = scaling_info->dst_rect;
4668
4669 /* Validate scaling per-format with DC plane caps */
4670 if (state->plane && state->plane->dev && state->fb) {
4671 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4672 &min_downscale, &max_upscale);
4673 } else {
4674 min_downscale = 250;
4675 max_upscale = 16000;
4676 }
4677
4678 scale_w = scaling_info->dst_rect.width * 1000 /
4679 scaling_info->src_rect.width;
4680
4681 if (scale_w < min_downscale || scale_w > max_upscale)
4682 return -EINVAL;
4683
4684 scale_h = scaling_info->dst_rect.height * 1000 /
4685 scaling_info->src_rect.height;
4686
4687 if (scale_h < min_downscale || scale_h > max_upscale)
4688 return -EINVAL;
4689
4690 /*
4691 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4692 * assume reasonable defaults based on the format.
4693 */
4694
4695 return 0;
4696}
4697
4698static void
4699fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4700 uint64_t tiling_flags)
4701{
4702 /* Fill GFX8 params */
4703 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4704 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4705
4706 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4707 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4708 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4709 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4710 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4711
4712 /* XXX fix me for VI */
4713 tiling_info->gfx8.num_banks = num_banks;
4714 tiling_info->gfx8.array_mode =
4715 DC_ARRAY_2D_TILED_THIN1;
4716 tiling_info->gfx8.tile_split = tile_split;
4717 tiling_info->gfx8.bank_width = bankw;
4718 tiling_info->gfx8.bank_height = bankh;
4719 tiling_info->gfx8.tile_aspect = mtaspect;
4720 tiling_info->gfx8.tile_mode =
4721 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4722 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4723 == DC_ARRAY_1D_TILED_THIN1) {
4724 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4725 }
4726
4727 tiling_info->gfx8.pipe_config =
4728 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4729}
4730
4731static void
4732fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4733 union dc_tiling_info *tiling_info)
4734{
4735 tiling_info->gfx9.num_pipes =
4736 adev->gfx.config.gb_addr_config_fields.num_pipes;
4737 tiling_info->gfx9.num_banks =
4738 adev->gfx.config.gb_addr_config_fields.num_banks;
4739 tiling_info->gfx9.pipe_interleave =
4740 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4741 tiling_info->gfx9.num_shader_engines =
4742 adev->gfx.config.gb_addr_config_fields.num_se;
4743 tiling_info->gfx9.max_compressed_frags =
4744 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4745 tiling_info->gfx9.num_rb_per_se =
4746 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4747 tiling_info->gfx9.shaderEnable = 1;
4748 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
4749 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4750}
4751
4752static int
4753validate_dcc(struct amdgpu_device *adev,
4754 const enum surface_pixel_format format,
4755 const enum dc_rotation_angle rotation,
4756 const union dc_tiling_info *tiling_info,
4757 const struct dc_plane_dcc_param *dcc,
4758 const struct dc_plane_address *address,
4759 const struct plane_size *plane_size)
4760{
4761 struct dc *dc = adev->dm.dc;
4762 struct dc_dcc_surface_param input;
4763 struct dc_surface_dcc_cap output;
4764
4765 memset(&input, 0, sizeof(input));
4766 memset(&output, 0, sizeof(output));
4767
4768 if (!dcc->enable)
4769 return 0;
4770
4771 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4772 !dc->cap_funcs.get_dcc_compression_cap)
4773 return -EINVAL;
4774
4775 input.format = format;
4776 input.surface_size.width = plane_size->surface_size.width;
4777 input.surface_size.height = plane_size->surface_size.height;
4778 input.swizzle_mode = tiling_info->gfx9.swizzle;
4779
4780 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4781 input.scan = SCAN_DIRECTION_HORIZONTAL;
4782 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4783 input.scan = SCAN_DIRECTION_VERTICAL;
4784
4785 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4786 return -EINVAL;
4787
4788 if (!output.capable)
4789 return -EINVAL;
4790
4791 if (dcc->independent_64b_blks == 0 &&
4792 output.grph.rgb.independent_64b_blks != 0)
4793 return -EINVAL;
4794
4795 return 0;
4796}
4797
4798static bool
4799modifier_has_dcc(uint64_t modifier)
4800{
4801 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4802}
4803
4804static unsigned
4805modifier_gfx9_swizzle_mode(uint64_t modifier)
4806{
4807 if (modifier == DRM_FORMAT_MOD_LINEAR)
4808 return 0;
4809
4810 return AMD_FMT_MOD_GET(TILE, modifier);
4811}
4812
4813static const struct drm_format_info *
4814amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4815{
4816 return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4817}
4818
4819static void
4820fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4821 union dc_tiling_info *tiling_info,
4822 uint64_t modifier)
4823{
4824 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4825 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4826 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4827 unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4828
4829 fill_gfx9_tiling_info_from_device(adev, tiling_info);
4830
4831 if (!IS_AMD_FMT_MOD(modifier))
4832 return;
4833
4834 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4835 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4836
4837 if (adev->family >= AMDGPU_FAMILY_NV) {
4838 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4839 } else {
4840 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4841
4842 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4843 }
4844}
4845
4846enum dm_micro_swizzle {
4847 MICRO_SWIZZLE_Z = 0,
4848 MICRO_SWIZZLE_S = 1,
4849 MICRO_SWIZZLE_D = 2,
4850 MICRO_SWIZZLE_R = 3
4851};
4852
4853static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4854 uint32_t format,
4855 uint64_t modifier)
4856{
4857 struct amdgpu_device *adev = drm_to_adev(plane->dev);
4858 const struct drm_format_info *info = drm_format_info(format);
4859 int i;
4860
4861 enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4862
4863 if (!info)
4864 return false;
4865
4866 /*
4867 * We always have to allow these modifiers:
4868 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4869 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4870 */
4871 if (modifier == DRM_FORMAT_MOD_LINEAR ||
4872 modifier == DRM_FORMAT_MOD_INVALID) {
4873 return true;
4874 }
4875
4876 /* Check that the modifier is on the list of the plane's supported modifiers. */
4877 for (i = 0; i < plane->modifier_count; i++) {
4878 if (modifier == plane->modifiers[i])
4879 break;
4880 }
4881 if (i == plane->modifier_count)
4882 return false;
4883
4884 /*
4885 * For D swizzle the canonical modifier depends on the bpp, so check
4886 * it here.
4887 */
4888 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4889 adev->family >= AMDGPU_FAMILY_NV) {
4890 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4891 return false;
4892 }
4893
4894 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4895 info->cpp[0] < 8)
4896 return false;
4897
4898 if (modifier_has_dcc(modifier)) {
4899 /* Per radeonsi comments 16/64 bpp are more complicated. */
4900 if (info->cpp[0] != 4)
4901 return false;
4902 /* We support multi-planar formats, but not when combined with
4903 * additional DCC metadata planes. */
4904 if (info->num_planes > 1)
4905 return false;
4906 }
4907
4908 return true;
4909}
4910
4911static void
4912add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4913{
4914 if (!*mods)
4915 return;
4916
4917 if (*cap - *size < 1) {
4918 uint64_t new_cap = *cap * 2;
4919 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4920
4921 if (!new_mods) {
4922 kfree(*mods);
4923 *mods = NULL;
4924 return;
4925 }
4926
4927 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4928 kfree(*mods);
4929 *mods = new_mods;
4930 *cap = new_cap;
4931 }
4932
4933 (*mods)[*size] = mod;
4934 *size += 1;
4935}
4936
4937static void
4938add_gfx9_modifiers(const struct amdgpu_device *adev,
4939 uint64_t **mods, uint64_t *size, uint64_t *capacity)
4940{
4941 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4942 int pipe_xor_bits = min(8, pipes +
4943 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4944 int bank_xor_bits = min(8 - pipe_xor_bits,
4945 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4946 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4947 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4948
4949
4950 if (adev->family == AMDGPU_FAMILY_RV) {
4951 /* Raven2 and later */
4952 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4953
4954 /*
4955 * No _D DCC swizzles yet because we only allow 32bpp, which
4956 * doesn't support _D on DCN
4957 */
4958
4959 if (has_constant_encode) {
4960 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4961 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4962 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4963 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4964 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4965 AMD_FMT_MOD_SET(DCC, 1) |
4966 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4967 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4968 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4969 }
4970
4971 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4972 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4973 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4974 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4975 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4976 AMD_FMT_MOD_SET(DCC, 1) |
4977 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4978 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4979 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4980
4981 if (has_constant_encode) {
4982 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4983 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4984 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4985 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4986 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4987 AMD_FMT_MOD_SET(DCC, 1) |
4988 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4989 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4990 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4991
4992 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4993 AMD_FMT_MOD_SET(RB, rb) |
4994 AMD_FMT_MOD_SET(PIPE, pipes));
4995 }
4996
4997 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4998 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4999 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5000 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5001 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
5002 AMD_FMT_MOD_SET(DCC, 1) |
5003 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5004 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5005 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
5006 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
5007 AMD_FMT_MOD_SET(RB, rb) |
5008 AMD_FMT_MOD_SET(PIPE, pipes));
5009 }
5010
5011 /*
5012 * Only supported for 64bpp on Raven, will be filtered on format in
5013 * dm_plane_format_mod_supported.
5014 */
5015 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5016 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
5017 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5018 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5019 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5020
5021 if (adev->family == AMDGPU_FAMILY_RV) {
5022 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5023 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5024 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
5025 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5026 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
5027 }
5028
5029 /*
5030 * Only supported for 64bpp on Raven, will be filtered on format in
5031 * dm_plane_format_mod_supported.
5032 */
5033 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5034 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5035 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5036
5037 if (adev->family == AMDGPU_FAMILY_RV) {
5038 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5039 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5040 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5041 }
5042}
5043
5044static void
5045add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5046 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5047{
5048 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5049
5050 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5051 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5052 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5053 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5054 AMD_FMT_MOD_SET(DCC, 1) |
5055 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5056 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5057 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5058
5059 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5060 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5061 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5062 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5063 AMD_FMT_MOD_SET(DCC, 1) |
5064 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5065 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5066 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5067 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5068
5069 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5070 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5071 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5072 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5073
5074 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5075 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5076 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5077 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5078
5079
5080 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5081 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5082 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5083 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5084
5085 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5086 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5087 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5088}
5089
5090static void
5091add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5092 uint64_t **mods, uint64_t *size, uint64_t *capacity)
5093{
5094 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5095 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5096
5097 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5098 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5099 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5100 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5101 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5102 AMD_FMT_MOD_SET(DCC, 1) |
5103 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5104 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5105 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5106 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5107
5108 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5109 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5110 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5111 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5112 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5113 AMD_FMT_MOD_SET(DCC, 1) |
5114 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5115 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5116 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5117
5118 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5119 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5120 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5121 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5122 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5123 AMD_FMT_MOD_SET(DCC, 1) |
5124 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5125 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5126 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5127 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5128 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5129
5130 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5131 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5132 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5133 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5134 AMD_FMT_MOD_SET(PACKERS, pkrs) |
5135 AMD_FMT_MOD_SET(DCC, 1) |
5136 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5137 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5138 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5139 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
5140
5141 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5142 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5143 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5144 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5145 AMD_FMT_MOD_SET(PACKERS, pkrs));
5146
5147 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5148 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5149 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5150 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5151 AMD_FMT_MOD_SET(PACKERS, pkrs));
5152
5153 /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5154 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5155 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5156 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5157
5158 add_modifier(mods, size, capacity, AMD_FMT_MOD |
5159 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5160 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5161}
5162
5163static int
5164get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5165{
5166 uint64_t size = 0, capacity = 128;
5167 *mods = NULL;
5168
5169 /* We have not hooked up any pre-GFX9 modifiers. */
5170 if (adev->family < AMDGPU_FAMILY_AI)
5171 return 0;
5172
5173 *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5174
5175 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5176 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5177 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5178 return *mods ? 0 : -ENOMEM;
5179 }
5180
5181 switch (adev->family) {
5182 case AMDGPU_FAMILY_AI:
5183 case AMDGPU_FAMILY_RV:
5184 add_gfx9_modifiers(adev, mods, &size, &capacity);
5185 break;
5186 case AMDGPU_FAMILY_NV:
5187 case AMDGPU_FAMILY_VGH:
5188 case AMDGPU_FAMILY_YC:
5189 if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
5190 add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5191 else
5192 add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5193 break;
5194 }
5195
5196 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5197
5198 /* INVALID marks the end of the list. */
5199 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5200
5201 if (!*mods)
5202 return -ENOMEM;
5203
5204 return 0;
5205}
5206
5207static int
5208fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5209 const struct amdgpu_framebuffer *afb,
5210 const enum surface_pixel_format format,
5211 const enum dc_rotation_angle rotation,
5212 const struct plane_size *plane_size,
5213 union dc_tiling_info *tiling_info,
5214 struct dc_plane_dcc_param *dcc,
5215 struct dc_plane_address *address,
5216 const bool force_disable_dcc)
5217{
5218 const uint64_t modifier = afb->base.modifier;
5219 int ret = 0;
5220
5221 fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5222 tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5223
5224 if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5225 uint64_t dcc_address = afb->address + afb->base.offsets[1];
5226 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5227 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
5228
5229 dcc->enable = 1;
5230 dcc->meta_pitch = afb->base.pitches[1];
5231 dcc->independent_64b_blks = independent_64b_blks;
5232 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
5233 if (independent_64b_blks && independent_128b_blks)
5234 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
5235 else if (independent_128b_blks)
5236 dcc->dcc_ind_blk = hubp_ind_block_128b;
5237 else if (independent_64b_blks && !independent_128b_blks)
5238 dcc->dcc_ind_blk = hubp_ind_block_64b;
5239 else
5240 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5241 } else {
5242 if (independent_64b_blks)
5243 dcc->dcc_ind_blk = hubp_ind_block_64b;
5244 else
5245 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
5246 }
5247
5248 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5249 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5250 }
5251
5252 ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5253 if (ret)
5254 drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5255
5256 return ret;
5257}
5258
5259static int
5260fill_plane_buffer_attributes(struct amdgpu_device *adev,
5261 const struct amdgpu_framebuffer *afb,
5262 const enum surface_pixel_format format,
5263 const enum dc_rotation_angle rotation,
5264 const uint64_t tiling_flags,
5265 union dc_tiling_info *tiling_info,
5266 struct plane_size *plane_size,
5267 struct dc_plane_dcc_param *dcc,
5268 struct dc_plane_address *address,
5269 bool tmz_surface,
5270 bool force_disable_dcc)
5271{
5272 const struct drm_framebuffer *fb = &afb->base;
5273 int ret;
5274
5275 memset(tiling_info, 0, sizeof(*tiling_info));
5276 memset(plane_size, 0, sizeof(*plane_size));
5277 memset(dcc, 0, sizeof(*dcc));
5278 memset(address, 0, sizeof(*address));
5279
5280 address->tmz_surface = tmz_surface;
5281
5282 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5283 uint64_t addr = afb->address + fb->offsets[0];
5284
5285 plane_size->surface_size.x = 0;
5286 plane_size->surface_size.y = 0;
5287 plane_size->surface_size.width = fb->width;
5288 plane_size->surface_size.height = fb->height;
5289 plane_size->surface_pitch =
5290 fb->pitches[0] / fb->format->cpp[0];
5291
5292 address->type = PLN_ADDR_TYPE_GRAPHICS;
5293 address->grph.addr.low_part = lower_32_bits(addr);
5294 address->grph.addr.high_part = upper_32_bits(addr);
5295 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5296 uint64_t luma_addr = afb->address + fb->offsets[0];
5297 uint64_t chroma_addr = afb->address + fb->offsets[1];
5298
5299 plane_size->surface_size.x = 0;
5300 plane_size->surface_size.y = 0;
5301 plane_size->surface_size.width = fb->width;
5302 plane_size->surface_size.height = fb->height;
5303 plane_size->surface_pitch =
5304 fb->pitches[0] / fb->format->cpp[0];
5305
5306 plane_size->chroma_size.x = 0;
5307 plane_size->chroma_size.y = 0;
5308 /* TODO: set these based on surface format */
5309 plane_size->chroma_size.width = fb->width / 2;
5310 plane_size->chroma_size.height = fb->height / 2;
5311
5312 plane_size->chroma_pitch =
5313 fb->pitches[1] / fb->format->cpp[1];
5314
5315 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5316 address->video_progressive.luma_addr.low_part =
5317 lower_32_bits(luma_addr);
5318 address->video_progressive.luma_addr.high_part =
5319 upper_32_bits(luma_addr);
5320 address->video_progressive.chroma_addr.low_part =
5321 lower_32_bits(chroma_addr);
5322 address->video_progressive.chroma_addr.high_part =
5323 upper_32_bits(chroma_addr);
5324 }
5325
5326 if (adev->family >= AMDGPU_FAMILY_AI) {
5327 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5328 rotation, plane_size,
5329 tiling_info, dcc,
5330 address,
5331 force_disable_dcc);
5332 if (ret)
5333 return ret;
5334 } else {
5335 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5336 }
5337
5338 return 0;
5339}
5340
5341static void
5342fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5343 bool *per_pixel_alpha, bool *global_alpha,
5344 int *global_alpha_value)
5345{
5346 *per_pixel_alpha = false;
5347 *global_alpha = false;
5348 *global_alpha_value = 0xff;
5349
5350 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5351 return;
5352
5353 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5354 static const uint32_t alpha_formats[] = {
5355 DRM_FORMAT_ARGB8888,
5356 DRM_FORMAT_RGBA8888,
5357 DRM_FORMAT_ABGR8888,
5358 };
5359 uint32_t format = plane_state->fb->format->format;
5360 unsigned int i;
5361
5362 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5363 if (format == alpha_formats[i]) {
5364 *per_pixel_alpha = true;
5365 break;
5366 }
5367 }
5368 }
5369
5370 if (plane_state->alpha < 0xffff) {
5371 *global_alpha = true;
5372 *global_alpha_value = plane_state->alpha >> 8;
5373 }
5374}
5375
5376static int
5377fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5378 const enum surface_pixel_format format,
5379 enum dc_color_space *color_space)
5380{
5381 bool full_range;
5382
5383 *color_space = COLOR_SPACE_SRGB;
5384
5385 /* DRM color properties only affect non-RGB formats. */
5386 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5387 return 0;
5388
5389 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5390
5391 switch (plane_state->color_encoding) {
5392 case DRM_COLOR_YCBCR_BT601:
5393 if (full_range)
5394 *color_space = COLOR_SPACE_YCBCR601;
5395 else
5396 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
5397 break;
5398
5399 case DRM_COLOR_YCBCR_BT709:
5400 if (full_range)
5401 *color_space = COLOR_SPACE_YCBCR709;
5402 else
5403 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
5404 break;
5405
5406 case DRM_COLOR_YCBCR_BT2020:
5407 if (full_range)
5408 *color_space = COLOR_SPACE_2020_YCBCR;
5409 else
5410 return -EINVAL;
5411 break;
5412
5413 default:
5414 return -EINVAL;
5415 }
5416
5417 return 0;
5418}
5419
5420static int
5421fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5422 const struct drm_plane_state *plane_state,
5423 const uint64_t tiling_flags,
5424 struct dc_plane_info *plane_info,
5425 struct dc_plane_address *address,
5426 bool tmz_surface,
5427 bool force_disable_dcc)
5428{
5429 const struct drm_framebuffer *fb = plane_state->fb;
5430 const struct amdgpu_framebuffer *afb =
5431 to_amdgpu_framebuffer(plane_state->fb);
5432 int ret;
5433
5434 memset(plane_info, 0, sizeof(*plane_info));
5435
5436 switch (fb->format->format) {
5437 case DRM_FORMAT_C8:
5438 plane_info->format =
5439 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5440 break;
5441 case DRM_FORMAT_RGB565:
5442 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5443 break;
5444 case DRM_FORMAT_XRGB8888:
5445 case DRM_FORMAT_ARGB8888:
5446 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5447 break;
5448 case DRM_FORMAT_XRGB2101010:
5449 case DRM_FORMAT_ARGB2101010:
5450 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5451 break;
5452 case DRM_FORMAT_XBGR2101010:
5453 case DRM_FORMAT_ABGR2101010:
5454 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5455 break;
5456 case DRM_FORMAT_XBGR8888:
5457 case DRM_FORMAT_ABGR8888:
5458 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5459 break;
5460 case DRM_FORMAT_NV21:
5461 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5462 break;
5463 case DRM_FORMAT_NV12:
5464 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5465 break;
5466 case DRM_FORMAT_P010:
5467 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5468 break;
5469 case DRM_FORMAT_XRGB16161616F:
5470 case DRM_FORMAT_ARGB16161616F:
5471 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5472 break;
5473 case DRM_FORMAT_XBGR16161616F:
5474 case DRM_FORMAT_ABGR16161616F:
5475 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5476 break;
5477 case DRM_FORMAT_XRGB16161616:
5478 case DRM_FORMAT_ARGB16161616:
5479 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5480 break;
5481 case DRM_FORMAT_XBGR16161616:
5482 case DRM_FORMAT_ABGR16161616:
5483 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5484 break;
5485 default:
5486 DRM_ERROR(
5487 "Unsupported screen format %p4cc\n",
5488 &fb->format->format);
5489 return -EINVAL;
5490 }
5491
5492 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5493 case DRM_MODE_ROTATE_0:
5494 plane_info->rotation = ROTATION_ANGLE_0;
5495 break;
5496 case DRM_MODE_ROTATE_90:
5497 plane_info->rotation = ROTATION_ANGLE_90;
5498 break;
5499 case DRM_MODE_ROTATE_180:
5500 plane_info->rotation = ROTATION_ANGLE_180;
5501 break;
5502 case DRM_MODE_ROTATE_270:
5503 plane_info->rotation = ROTATION_ANGLE_270;
5504 break;
5505 default:
5506 plane_info->rotation = ROTATION_ANGLE_0;
5507 break;
5508 }
5509
5510 plane_info->visible = true;
5511 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5512
5513 plane_info->layer_index = 0;
5514
5515 ret = fill_plane_color_attributes(plane_state, plane_info->format,
5516 &plane_info->color_space);
5517 if (ret)
5518 return ret;
5519
5520 ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5521 plane_info->rotation, tiling_flags,
5522 &plane_info->tiling_info,
5523 &plane_info->plane_size,
5524 &plane_info->dcc, address, tmz_surface,
5525 force_disable_dcc);
5526 if (ret)
5527 return ret;
5528
5529 fill_blending_from_plane_state(
5530 plane_state, &plane_info->per_pixel_alpha,
5531 &plane_info->global_alpha, &plane_info->global_alpha_value);
5532
5533 return 0;
5534}
5535
5536static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5537 struct dc_plane_state *dc_plane_state,
5538 struct drm_plane_state *plane_state,
5539 struct drm_crtc_state *crtc_state)
5540{
5541 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5542 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5543 struct dc_scaling_info scaling_info;
5544 struct dc_plane_info plane_info;
5545 int ret;
5546 bool force_disable_dcc = false;
5547
5548 ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
5549 if (ret)
5550 return ret;
5551
5552 dc_plane_state->src_rect = scaling_info.src_rect;
5553 dc_plane_state->dst_rect = scaling_info.dst_rect;
5554 dc_plane_state->clip_rect = scaling_info.clip_rect;
5555 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5556
5557 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5558 ret = fill_dc_plane_info_and_addr(adev, plane_state,
5559 afb->tiling_flags,
5560 &plane_info,
5561 &dc_plane_state->address,
5562 afb->tmz_surface,
5563 force_disable_dcc);
5564 if (ret)
5565 return ret;
5566
5567 dc_plane_state->format = plane_info.format;
5568 dc_plane_state->color_space = plane_info.color_space;
5569 dc_plane_state->format = plane_info.format;
5570 dc_plane_state->plane_size = plane_info.plane_size;
5571 dc_plane_state->rotation = plane_info.rotation;
5572 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5573 dc_plane_state->stereo_format = plane_info.stereo_format;
5574 dc_plane_state->tiling_info = plane_info.tiling_info;
5575 dc_plane_state->visible = plane_info.visible;
5576 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5577 dc_plane_state->global_alpha = plane_info.global_alpha;
5578 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5579 dc_plane_state->dcc = plane_info.dcc;
5580 dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5581 dc_plane_state->flip_int_enabled = true;
5582
5583 /*
5584 * Always set input transfer function, since plane state is refreshed
5585 * every time.
5586 */
5587 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5588 if (ret)
5589 return ret;
5590
5591 return 0;
5592}
5593
5594static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5595 const struct dm_connector_state *dm_state,
5596 struct dc_stream_state *stream)
5597{
5598 enum amdgpu_rmx_type rmx_type;
5599
5600 struct rect src = { 0 }; /* viewport in composition space*/
5601 struct rect dst = { 0 }; /* stream addressable area */
5602
5603 /* no mode. nothing to be done */
5604 if (!mode)
5605 return;
5606
5607 /* Full screen scaling by default */
5608 src.width = mode->hdisplay;
5609 src.height = mode->vdisplay;
5610 dst.width = stream->timing.h_addressable;
5611 dst.height = stream->timing.v_addressable;
5612
5613 if (dm_state) {
5614 rmx_type = dm_state->scaling;
5615 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5616 if (src.width * dst.height <
5617 src.height * dst.width) {
5618 /* height needs less upscaling/more downscaling */
5619 dst.width = src.width *
5620 dst.height / src.height;
5621 } else {
5622 /* width needs less upscaling/more downscaling */
5623 dst.height = src.height *
5624 dst.width / src.width;
5625 }
5626 } else if (rmx_type == RMX_CENTER) {
5627 dst = src;
5628 }
5629
5630 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5631 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5632
5633 if (dm_state->underscan_enable) {
5634 dst.x += dm_state->underscan_hborder / 2;
5635 dst.y += dm_state->underscan_vborder / 2;
5636 dst.width -= dm_state->underscan_hborder;
5637 dst.height -= dm_state->underscan_vborder;
5638 }
5639 }
5640
5641 stream->src = src;
5642 stream->dst = dst;
5643
5644 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5645 dst.x, dst.y, dst.width, dst.height);
5646
5647}
5648
5649static enum dc_color_depth
5650convert_color_depth_from_display_info(const struct drm_connector *connector,
5651 bool is_y420, int requested_bpc)
5652{
5653 uint8_t bpc;
5654
5655 if (is_y420) {
5656 bpc = 8;
5657
5658 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5659 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5660 bpc = 16;
5661 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5662 bpc = 12;
5663 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5664 bpc = 10;
5665 } else {
5666 bpc = (uint8_t)connector->display_info.bpc;
5667 /* Assume 8 bpc by default if no bpc is specified. */
5668 bpc = bpc ? bpc : 8;
5669 }
5670
5671 if (requested_bpc > 0) {
5672 /*
5673 * Cap display bpc based on the user requested value.
5674 *
5675 * The value for state->max_bpc may not correctly updated
5676 * depending on when the connector gets added to the state
5677 * or if this was called outside of atomic check, so it
5678 * can't be used directly.
5679 */
5680 bpc = min_t(u8, bpc, requested_bpc);
5681
5682 /* Round down to the nearest even number. */
5683 bpc = bpc - (bpc & 1);
5684 }
5685
5686 switch (bpc) {
5687 case 0:
5688 /*
5689 * Temporary Work around, DRM doesn't parse color depth for
5690 * EDID revision before 1.4
5691 * TODO: Fix edid parsing
5692 */
5693 return COLOR_DEPTH_888;
5694 case 6:
5695 return COLOR_DEPTH_666;
5696 case 8:
5697 return COLOR_DEPTH_888;
5698 case 10:
5699 return COLOR_DEPTH_101010;
5700 case 12:
5701 return COLOR_DEPTH_121212;
5702 case 14:
5703 return COLOR_DEPTH_141414;
5704 case 16:
5705 return COLOR_DEPTH_161616;
5706 default:
5707 return COLOR_DEPTH_UNDEFINED;
5708 }
5709}
5710
5711static enum dc_aspect_ratio
5712get_aspect_ratio(const struct drm_display_mode *mode_in)
5713{
5714 /* 1-1 mapping, since both enums follow the HDMI spec. */
5715 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5716}
5717
5718static enum dc_color_space
5719get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5720{
5721 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5722
5723 switch (dc_crtc_timing->pixel_encoding) {
5724 case PIXEL_ENCODING_YCBCR422:
5725 case PIXEL_ENCODING_YCBCR444:
5726 case PIXEL_ENCODING_YCBCR420:
5727 {
5728 /*
5729 * 27030khz is the separation point between HDTV and SDTV
5730 * according to HDMI spec, we use YCbCr709 and YCbCr601
5731 * respectively
5732 */
5733 if (dc_crtc_timing->pix_clk_100hz > 270300) {
5734 if (dc_crtc_timing->flags.Y_ONLY)
5735 color_space =
5736 COLOR_SPACE_YCBCR709_LIMITED;
5737 else
5738 color_space = COLOR_SPACE_YCBCR709;
5739 } else {
5740 if (dc_crtc_timing->flags.Y_ONLY)
5741 color_space =
5742 COLOR_SPACE_YCBCR601_LIMITED;
5743 else
5744 color_space = COLOR_SPACE_YCBCR601;
5745 }
5746
5747 }
5748 break;
5749 case PIXEL_ENCODING_RGB:
5750 color_space = COLOR_SPACE_SRGB;
5751 break;
5752
5753 default:
5754 WARN_ON(1);
5755 break;
5756 }
5757
5758 return color_space;
5759}
5760
5761static bool adjust_colour_depth_from_display_info(
5762 struct dc_crtc_timing *timing_out,
5763 const struct drm_display_info *info)
5764{
5765 enum dc_color_depth depth = timing_out->display_color_depth;
5766 int normalized_clk;
5767 do {
5768 normalized_clk = timing_out->pix_clk_100hz / 10;
5769 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5770 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5771 normalized_clk /= 2;
5772 /* Adjusting pix clock following on HDMI spec based on colour depth */
5773 switch (depth) {
5774 case COLOR_DEPTH_888:
5775 break;
5776 case COLOR_DEPTH_101010:
5777 normalized_clk = (normalized_clk * 30) / 24;
5778 break;
5779 case COLOR_DEPTH_121212:
5780 normalized_clk = (normalized_clk * 36) / 24;
5781 break;
5782 case COLOR_DEPTH_161616:
5783 normalized_clk = (normalized_clk * 48) / 24;
5784 break;
5785 default:
5786 /* The above depths are the only ones valid for HDMI. */
5787 return false;
5788 }
5789 if (normalized_clk <= info->max_tmds_clock) {
5790 timing_out->display_color_depth = depth;
5791 return true;
5792 }
5793 } while (--depth > COLOR_DEPTH_666);
5794 return false;
5795}
5796
5797static void fill_stream_properties_from_drm_display_mode(
5798 struct dc_stream_state *stream,
5799 const struct drm_display_mode *mode_in,
5800 const struct drm_connector *connector,
5801 const struct drm_connector_state *connector_state,
5802 const struct dc_stream_state *old_stream,
5803 int requested_bpc)
5804{
5805 struct dc_crtc_timing *timing_out = &stream->timing;
5806 const struct drm_display_info *info = &connector->display_info;
5807 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5808 struct hdmi_vendor_infoframe hv_frame;
5809 struct hdmi_avi_infoframe avi_frame;
5810
5811 memset(&hv_frame, 0, sizeof(hv_frame));
5812 memset(&avi_frame, 0, sizeof(avi_frame));
5813
5814 timing_out->h_border_left = 0;
5815 timing_out->h_border_right = 0;
5816 timing_out->v_border_top = 0;
5817 timing_out->v_border_bottom = 0;
5818 /* TODO: un-hardcode */
5819 if (drm_mode_is_420_only(info, mode_in)
5820 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5821 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5822 else if (drm_mode_is_420_also(info, mode_in)
5823 && aconnector->force_yuv420_output)
5824 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5825 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5826 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5827 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5828 else
5829 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5830
5831 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5832 timing_out->display_color_depth = convert_color_depth_from_display_info(
5833 connector,
5834 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5835 requested_bpc);
5836 timing_out->scan_type = SCANNING_TYPE_NODATA;
5837 timing_out->hdmi_vic = 0;
5838
5839 if(old_stream) {
5840 timing_out->vic = old_stream->timing.vic;
5841 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5842 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5843 } else {
5844 timing_out->vic = drm_match_cea_mode(mode_in);
5845 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5846 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5847 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5848 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5849 }
5850
5851 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5852 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5853 timing_out->vic = avi_frame.video_code;
5854 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5855 timing_out->hdmi_vic = hv_frame.vic;
5856 }
5857
5858 if (is_freesync_video_mode(mode_in, aconnector)) {
5859 timing_out->h_addressable = mode_in->hdisplay;
5860 timing_out->h_total = mode_in->htotal;
5861 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5862 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5863 timing_out->v_total = mode_in->vtotal;
5864 timing_out->v_addressable = mode_in->vdisplay;
5865 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5866 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5867 timing_out->pix_clk_100hz = mode_in->clock * 10;
5868 } else {
5869 timing_out->h_addressable = mode_in->crtc_hdisplay;
5870 timing_out->h_total = mode_in->crtc_htotal;
5871 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5872 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5873 timing_out->v_total = mode_in->crtc_vtotal;
5874 timing_out->v_addressable = mode_in->crtc_vdisplay;
5875 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5876 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5877 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5878 }
5879
5880 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5881
5882 stream->output_color_space = get_output_color_space(timing_out);
5883
5884 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5885 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5886 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5887 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5888 drm_mode_is_420_also(info, mode_in) &&
5889 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5890 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5891 adjust_colour_depth_from_display_info(timing_out, info);
5892 }
5893 }
5894}
5895
5896static void fill_audio_info(struct audio_info *audio_info,
5897 const struct drm_connector *drm_connector,
5898 const struct dc_sink *dc_sink)
5899{
5900 int i = 0;
5901 int cea_revision = 0;
5902 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5903
5904 audio_info->manufacture_id = edid_caps->manufacturer_id;
5905 audio_info->product_id = edid_caps->product_id;
5906
5907 cea_revision = drm_connector->display_info.cea_rev;
5908
5909 strscpy(audio_info->display_name,
5910 edid_caps->display_name,
5911 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5912
5913 if (cea_revision >= 3) {
5914 audio_info->mode_count = edid_caps->audio_mode_count;
5915
5916 for (i = 0; i < audio_info->mode_count; ++i) {
5917 audio_info->modes[i].format_code =
5918 (enum audio_format_code)
5919 (edid_caps->audio_modes[i].format_code);
5920 audio_info->modes[i].channel_count =
5921 edid_caps->audio_modes[i].channel_count;
5922 audio_info->modes[i].sample_rates.all =
5923 edid_caps->audio_modes[i].sample_rate;
5924 audio_info->modes[i].sample_size =
5925 edid_caps->audio_modes[i].sample_size;
5926 }
5927 }
5928
5929 audio_info->flags.all = edid_caps->speaker_flags;
5930
5931 /* TODO: We only check for the progressive mode, check for interlace mode too */
5932 if (drm_connector->latency_present[0]) {
5933 audio_info->video_latency = drm_connector->video_latency[0];
5934 audio_info->audio_latency = drm_connector->audio_latency[0];
5935 }
5936
5937 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5938
5939}
5940
5941static void
5942copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5943 struct drm_display_mode *dst_mode)
5944{
5945 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5946 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5947 dst_mode->crtc_clock = src_mode->crtc_clock;
5948 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5949 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5950 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
5951 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5952 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5953 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5954 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5955 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5956 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5957 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5958 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5959}
5960
5961static void
5962decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5963 const struct drm_display_mode *native_mode,
5964 bool scale_enabled)
5965{
5966 if (scale_enabled) {
5967 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5968 } else if (native_mode->clock == drm_mode->clock &&
5969 native_mode->htotal == drm_mode->htotal &&
5970 native_mode->vtotal == drm_mode->vtotal) {
5971 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5972 } else {
5973 /* no scaling nor amdgpu inserted, no need to patch */
5974 }
5975}
5976
5977static struct dc_sink *
5978create_fake_sink(struct amdgpu_dm_connector *aconnector)
5979{
5980 struct dc_sink_init_data sink_init_data = { 0 };
5981 struct dc_sink *sink = NULL;
5982 sink_init_data.link = aconnector->dc_link;
5983 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5984
5985 sink = dc_sink_create(&sink_init_data);
5986 if (!sink) {
5987 DRM_ERROR("Failed to create sink!\n");
5988 return NULL;
5989 }
5990 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5991
5992 return sink;
5993}
5994
5995static void set_multisync_trigger_params(
5996 struct dc_stream_state *stream)
5997{
5998 struct dc_stream_state *master = NULL;
5999
6000 if (stream->triggered_crtc_reset.enabled) {
6001 master = stream->triggered_crtc_reset.event_source;
6002 stream->triggered_crtc_reset.event =
6003 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
6004 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
6005 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
6006 }
6007}
6008
6009static void set_master_stream(struct dc_stream_state *stream_set[],
6010 int stream_count)
6011{
6012 int j, highest_rfr = 0, master_stream = 0;
6013
6014 for (j = 0; j < stream_count; j++) {
6015 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
6016 int refresh_rate = 0;
6017
6018 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
6019 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
6020 if (refresh_rate > highest_rfr) {
6021 highest_rfr = refresh_rate;
6022 master_stream = j;
6023 }
6024 }
6025 }
6026 for (j = 0; j < stream_count; j++) {
6027 if (stream_set[j])
6028 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
6029 }
6030}
6031
6032static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
6033{
6034 int i = 0;
6035 struct dc_stream_state *stream;
6036
6037 if (context->stream_count < 2)
6038 return;
6039 for (i = 0; i < context->stream_count ; i++) {
6040 if (!context->streams[i])
6041 continue;
6042 /*
6043 * TODO: add a function to read AMD VSDB bits and set
6044 * crtc_sync_master.multi_sync_enabled flag
6045 * For now it's set to false
6046 */
6047 }
6048
6049 set_master_stream(context->streams, context->stream_count);
6050
6051 for (i = 0; i < context->stream_count ; i++) {
6052 stream = context->streams[i];
6053
6054 if (!stream)
6055 continue;
6056
6057 set_multisync_trigger_params(stream);
6058 }
6059}
6060
6061#if defined(CONFIG_DRM_AMD_DC_DCN)
6062static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6063 struct dc_sink *sink, struct dc_stream_state *stream,
6064 struct dsc_dec_dpcd_caps *dsc_caps)
6065{
6066 stream->timing.flags.DSC = 0;
6067
6068 if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
6069 sink->sink_signal == SIGNAL_TYPE_EDP)) {
6070 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
6071 sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
6072 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6073 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6074 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6075 dsc_caps);
6076 }
6077}
6078
6079static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
6080 struct dc_sink *sink, struct dc_stream_state *stream,
6081 struct dsc_dec_dpcd_caps *dsc_caps,
6082 uint32_t max_dsc_target_bpp_limit_override)
6083{
6084 const struct dc_link_settings *verified_link_cap = NULL;
6085 uint32_t link_bw_in_kbps;
6086 uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
6087 struct dc *dc = sink->ctx->dc;
6088 struct dc_dsc_bw_range bw_range = {0};
6089 struct dc_dsc_config dsc_cfg = {0};
6090
6091 verified_link_cap = dc_link_get_link_cap(stream->link);
6092 link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
6093 edp_min_bpp_x16 = 8 * 16;
6094 edp_max_bpp_x16 = 8 * 16;
6095
6096 if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
6097 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
6098
6099 if (edp_max_bpp_x16 < edp_min_bpp_x16)
6100 edp_min_bpp_x16 = edp_max_bpp_x16;
6101
6102 if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
6103 dc->debug.dsc_min_slice_height_override,
6104 edp_min_bpp_x16, edp_max_bpp_x16,
6105 dsc_caps,
6106 &stream->timing,
6107 &bw_range)) {
6108
6109 if (bw_range.max_kbps < link_bw_in_kbps) {
6110 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6111 dsc_caps,
6112 dc->debug.dsc_min_slice_height_override,
6113 max_dsc_target_bpp_limit_override,
6114 0,
6115 &stream->timing,
6116 &dsc_cfg)) {
6117 stream->timing.dsc_cfg = dsc_cfg;
6118 stream->timing.flags.DSC = 1;
6119 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
6120 }
6121 return;
6122 }
6123 }
6124
6125 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
6126 dsc_caps,
6127 dc->debug.dsc_min_slice_height_override,
6128 max_dsc_target_bpp_limit_override,
6129 link_bw_in_kbps,
6130 &stream->timing,
6131 &dsc_cfg)) {
6132 stream->timing.dsc_cfg = dsc_cfg;
6133 stream->timing.flags.DSC = 1;
6134 }
6135}
6136
6137static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6138 struct dc_sink *sink, struct dc_stream_state *stream,
6139 struct dsc_dec_dpcd_caps *dsc_caps)
6140{
6141 struct drm_connector *drm_connector = &aconnector->base;
6142 uint32_t link_bandwidth_kbps;
6143 uint32_t max_dsc_target_bpp_limit_override = 0;
6144 struct dc *dc = sink->ctx->dc;
6145 uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
6146 uint32_t dsc_max_supported_bw_in_kbps;
6147
6148 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6149 dc_link_get_link_cap(aconnector->dc_link));
6150
6151 if (stream->link && stream->link->local_sink)
6152 max_dsc_target_bpp_limit_override =
6153 stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
6154
6155 /* Set DSC policy according to dsc_clock_en */
6156 dc_dsc_policy_set_enable_dsc_when_not_needed(
6157 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6158
6159 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp &&
6160 dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
6161
6162 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
6163
6164 } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6165 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
6166 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6167 dsc_caps,
6168 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6169 max_dsc_target_bpp_limit_override,
6170 link_bandwidth_kbps,
6171 &stream->timing,
6172 &stream->timing.dsc_cfg)) {
6173 stream->timing.flags.DSC = 1;
6174 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n",
6175 __func__, drm_connector->name);
6176 }
6177 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
6178 timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
6179 max_supported_bw_in_kbps = link_bandwidth_kbps;
6180 dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
6181
6182 if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
6183 max_supported_bw_in_kbps > 0 &&
6184 dsc_max_supported_bw_in_kbps > 0)
6185 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6186 dsc_caps,
6187 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6188 max_dsc_target_bpp_limit_override,
6189 dsc_max_supported_bw_in_kbps,
6190 &stream->timing,
6191 &stream->timing.dsc_cfg)) {
6192 stream->timing.flags.DSC = 1;
6193 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
6194 __func__, drm_connector->name);
6195 }
6196 }
6197 }
6198
6199 /* Overwrite the stream flag if DSC is enabled through debugfs */
6200 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6201 stream->timing.flags.DSC = 1;
6202
6203 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6204 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6205
6206 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6207 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6208
6209 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6210 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6211}
6212#endif /* CONFIG_DRM_AMD_DC_DCN */
6213
6214/**
6215 * DOC: FreeSync Video
6216 *
6217 * When a userspace application wants to play a video, the content follows a
6218 * standard format definition that usually specifies the FPS for that format.
6219 * The below list illustrates some video format and the expected FPS,
6220 * respectively:
6221 *
6222 * - TV/NTSC (23.976 FPS)
6223 * - Cinema (24 FPS)
6224 * - TV/PAL (25 FPS)
6225 * - TV/NTSC (29.97 FPS)
6226 * - TV/NTSC (30 FPS)
6227 * - Cinema HFR (48 FPS)
6228 * - TV/PAL (50 FPS)
6229 * - Commonly used (60 FPS)
6230 * - Multiples of 24 (48,72,96,120 FPS)
6231 *
6232 * The list of standards video format is not huge and can be added to the
6233 * connector modeset list beforehand. With that, userspace can leverage
6234 * FreeSync to extends the front porch in order to attain the target refresh
6235 * rate. Such a switch will happen seamlessly, without screen blanking or
6236 * reprogramming of the output in any other way. If the userspace requests a
6237 * modesetting change compatible with FreeSync modes that only differ in the
6238 * refresh rate, DC will skip the full update and avoid blink during the
6239 * transition. For example, the video player can change the modesetting from
6240 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6241 * causing any display blink. This same concept can be applied to a mode
6242 * setting change.
6243 */
6244static struct drm_display_mode *
6245get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6246 bool use_probed_modes)
6247{
6248 struct drm_display_mode *m, *m_pref = NULL;
6249 u16 current_refresh, highest_refresh;
6250 struct list_head *list_head = use_probed_modes ?
6251 &aconnector->base.probed_modes :
6252 &aconnector->base.modes;
6253
6254 if (aconnector->freesync_vid_base.clock != 0)
6255 return &aconnector->freesync_vid_base;
6256
6257 /* Find the preferred mode */
6258 list_for_each_entry (m, list_head, head) {
6259 if (m->type & DRM_MODE_TYPE_PREFERRED) {
6260 m_pref = m;
6261 break;
6262 }
6263 }
6264
6265 if (!m_pref) {
6266 /* Probably an EDID with no preferred mode. Fallback to first entry */
6267 m_pref = list_first_entry_or_null(
6268 &aconnector->base.modes, struct drm_display_mode, head);
6269 if (!m_pref) {
6270 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6271 return NULL;
6272 }
6273 }
6274
6275 highest_refresh = drm_mode_vrefresh(m_pref);
6276
6277 /*
6278 * Find the mode with highest refresh rate with same resolution.
6279 * For some monitors, preferred mode is not the mode with highest
6280 * supported refresh rate.
6281 */
6282 list_for_each_entry (m, list_head, head) {
6283 current_refresh = drm_mode_vrefresh(m);
6284
6285 if (m->hdisplay == m_pref->hdisplay &&
6286 m->vdisplay == m_pref->vdisplay &&
6287 highest_refresh < current_refresh) {
6288 highest_refresh = current_refresh;
6289 m_pref = m;
6290 }
6291 }
6292
6293 aconnector->freesync_vid_base = *m_pref;
6294 return m_pref;
6295}
6296
6297static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6298 struct amdgpu_dm_connector *aconnector)
6299{
6300 struct drm_display_mode *high_mode;
6301 int timing_diff;
6302
6303 high_mode = get_highest_refresh_rate_mode(aconnector, false);
6304 if (!high_mode || !mode)
6305 return false;
6306
6307 timing_diff = high_mode->vtotal - mode->vtotal;
6308
6309 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6310 high_mode->hdisplay != mode->hdisplay ||
6311 high_mode->vdisplay != mode->vdisplay ||
6312 high_mode->hsync_start != mode->hsync_start ||
6313 high_mode->hsync_end != mode->hsync_end ||
6314 high_mode->htotal != mode->htotal ||
6315 high_mode->hskew != mode->hskew ||
6316 high_mode->vscan != mode->vscan ||
6317 high_mode->vsync_start - mode->vsync_start != timing_diff ||
6318 high_mode->vsync_end - mode->vsync_end != timing_diff)
6319 return false;
6320 else
6321 return true;
6322}
6323
6324static struct dc_stream_state *
6325create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6326 const struct drm_display_mode *drm_mode,
6327 const struct dm_connector_state *dm_state,
6328 const struct dc_stream_state *old_stream,
6329 int requested_bpc)
6330{
6331 struct drm_display_mode *preferred_mode = NULL;
6332 struct drm_connector *drm_connector;
6333 const struct drm_connector_state *con_state =
6334 dm_state ? &dm_state->base : NULL;
6335 struct dc_stream_state *stream = NULL;
6336 struct drm_display_mode mode = *drm_mode;
6337 struct drm_display_mode saved_mode;
6338 struct drm_display_mode *freesync_mode = NULL;
6339 bool native_mode_found = false;
6340 bool recalculate_timing = false;
6341 bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6342 int mode_refresh;
6343 int preferred_refresh = 0;
6344#if defined(CONFIG_DRM_AMD_DC_DCN)
6345 struct dsc_dec_dpcd_caps dsc_caps;
6346#endif
6347 struct dc_sink *sink = NULL;
6348
6349 memset(&saved_mode, 0, sizeof(saved_mode));
6350
6351 if (aconnector == NULL) {
6352 DRM_ERROR("aconnector is NULL!\n");
6353 return stream;
6354 }
6355
6356 drm_connector = &aconnector->base;
6357
6358 if (!aconnector->dc_sink) {
6359 sink = create_fake_sink(aconnector);
6360 if (!sink)
6361 return stream;
6362 } else {
6363 sink = aconnector->dc_sink;
6364 dc_sink_retain(sink);
6365 }
6366
6367 stream = dc_create_stream_for_sink(sink);
6368
6369 if (stream == NULL) {
6370 DRM_ERROR("Failed to create stream for sink!\n");
6371 goto finish;
6372 }
6373
6374 stream->dm_stream_context = aconnector;
6375
6376 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6377 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6378
6379 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6380 /* Search for preferred mode */
6381 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6382 native_mode_found = true;
6383 break;
6384 }
6385 }
6386 if (!native_mode_found)
6387 preferred_mode = list_first_entry_or_null(
6388 &aconnector->base.modes,
6389 struct drm_display_mode,
6390 head);
6391
6392 mode_refresh = drm_mode_vrefresh(&mode);
6393
6394 if (preferred_mode == NULL) {
6395 /*
6396 * This may not be an error, the use case is when we have no
6397 * usermode calls to reset and set mode upon hotplug. In this
6398 * case, we call set mode ourselves to restore the previous mode
6399 * and the modelist may not be filled in in time.
6400 */
6401 DRM_DEBUG_DRIVER("No preferred mode found\n");
6402 } else {
6403 recalculate_timing = amdgpu_freesync_vid_mode &&
6404 is_freesync_video_mode(&mode, aconnector);
6405 if (recalculate_timing) {
6406 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6407 saved_mode = mode;
6408 mode = *freesync_mode;
6409 } else {
6410 decide_crtc_timing_for_drm_display_mode(
6411 &mode, preferred_mode, scale);
6412
6413 preferred_refresh = drm_mode_vrefresh(preferred_mode);
6414 }
6415 }
6416
6417 if (recalculate_timing)
6418 drm_mode_set_crtcinfo(&saved_mode, 0);
6419 else if (!dm_state)
6420 drm_mode_set_crtcinfo(&mode, 0);
6421
6422 /*
6423 * If scaling is enabled and refresh rate didn't change
6424 * we copy the vic and polarities of the old timings
6425 */
6426 if (!scale || mode_refresh != preferred_refresh)
6427 fill_stream_properties_from_drm_display_mode(
6428 stream, &mode, &aconnector->base, con_state, NULL,
6429 requested_bpc);
6430 else
6431 fill_stream_properties_from_drm_display_mode(
6432 stream, &mode, &aconnector->base, con_state, old_stream,
6433 requested_bpc);
6434
6435#if defined(CONFIG_DRM_AMD_DC_DCN)
6436 /* SST DSC determination policy */
6437 update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6438 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6439 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6440#endif
6441
6442 update_stream_scaling_settings(&mode, dm_state, stream);
6443
6444 fill_audio_info(
6445 &stream->audio_info,
6446 drm_connector,
6447 sink);
6448
6449 update_stream_signal(stream, sink);
6450
6451 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6452 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6453
6454 if (stream->link->psr_settings.psr_feature_enabled) {
6455 //
6456 // should decide stream support vsc sdp colorimetry capability
6457 // before building vsc info packet
6458 //
6459 stream->use_vsc_sdp_for_colorimetry = false;
6460 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6461 stream->use_vsc_sdp_for_colorimetry =
6462 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6463 } else {
6464 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6465 stream->use_vsc_sdp_for_colorimetry = true;
6466 }
6467 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
6468 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6469
6470 }
6471finish:
6472 dc_sink_release(sink);
6473
6474 return stream;
6475}
6476
6477static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6478{
6479 drm_crtc_cleanup(crtc);
6480 kfree(crtc);
6481}
6482
6483static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6484 struct drm_crtc_state *state)
6485{
6486 struct dm_crtc_state *cur = to_dm_crtc_state(state);
6487
6488 /* TODO Destroy dc_stream objects are stream object is flattened */
6489 if (cur->stream)
6490 dc_stream_release(cur->stream);
6491
6492
6493 __drm_atomic_helper_crtc_destroy_state(state);
6494
6495
6496 kfree(state);
6497}
6498
6499static void dm_crtc_reset_state(struct drm_crtc *crtc)
6500{
6501 struct dm_crtc_state *state;
6502
6503 if (crtc->state)
6504 dm_crtc_destroy_state(crtc, crtc->state);
6505
6506 state = kzalloc(sizeof(*state), GFP_KERNEL);
6507 if (WARN_ON(!state))
6508 return;
6509
6510 __drm_atomic_helper_crtc_reset(crtc, &state->base);
6511}
6512
6513static struct drm_crtc_state *
6514dm_crtc_duplicate_state(struct drm_crtc *crtc)
6515{
6516 struct dm_crtc_state *state, *cur;
6517
6518 cur = to_dm_crtc_state(crtc->state);
6519
6520 if (WARN_ON(!crtc->state))
6521 return NULL;
6522
6523 state = kzalloc(sizeof(*state), GFP_KERNEL);
6524 if (!state)
6525 return NULL;
6526
6527 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6528
6529 if (cur->stream) {
6530 state->stream = cur->stream;
6531 dc_stream_retain(state->stream);
6532 }
6533
6534 state->active_planes = cur->active_planes;
6535 state->vrr_infopacket = cur->vrr_infopacket;
6536 state->abm_level = cur->abm_level;
6537 state->vrr_supported = cur->vrr_supported;
6538 state->freesync_config = cur->freesync_config;
6539 state->cm_has_degamma = cur->cm_has_degamma;
6540 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6541 state->force_dpms_off = cur->force_dpms_off;
6542 /* TODO Duplicate dc_stream after objects are stream object is flattened */
6543
6544 return &state->base;
6545}
6546
6547#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6548static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6549{
6550 crtc_debugfs_init(crtc);
6551
6552 return 0;
6553}
6554#endif
6555
6556static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6557{
6558 enum dc_irq_source irq_source;
6559 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6560 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6561 int rc;
6562
6563 irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6564
6565 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6566
6567 DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6568 acrtc->crtc_id, enable ? "en" : "dis", rc);
6569 return rc;
6570}
6571
6572static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6573{
6574 enum dc_irq_source irq_source;
6575 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6576 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6577 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6578#if defined(CONFIG_DRM_AMD_DC_DCN)
6579 struct amdgpu_display_manager *dm = &adev->dm;
6580 struct vblank_control_work *work;
6581#endif
6582 int rc = 0;
6583
6584 if (enable) {
6585 /* vblank irq on -> Only need vupdate irq in vrr mode */
6586 if (amdgpu_dm_vrr_active(acrtc_state))
6587 rc = dm_set_vupdate_irq(crtc, true);
6588 } else {
6589 /* vblank irq off -> vupdate irq off */
6590 rc = dm_set_vupdate_irq(crtc, false);
6591 }
6592
6593 if (rc)
6594 return rc;
6595
6596 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6597
6598 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6599 return -EBUSY;
6600
6601 if (amdgpu_in_reset(adev))
6602 return 0;
6603
6604#if defined(CONFIG_DRM_AMD_DC_DCN)
6605 if (dm->vblank_control_workqueue) {
6606 work = kzalloc(sizeof(*work), GFP_ATOMIC);
6607 if (!work)
6608 return -ENOMEM;
6609
6610 INIT_WORK(&work->work, vblank_control_worker);
6611 work->dm = dm;
6612 work->acrtc = acrtc;
6613 work->enable = enable;
6614
6615 if (acrtc_state->stream) {
6616 dc_stream_retain(acrtc_state->stream);
6617 work->stream = acrtc_state->stream;
6618 }
6619
6620 queue_work(dm->vblank_control_workqueue, &work->work);
6621 }
6622#endif
6623
6624 return 0;
6625}
6626
6627static int dm_enable_vblank(struct drm_crtc *crtc)
6628{
6629 return dm_set_vblank(crtc, true);
6630}
6631
6632static void dm_disable_vblank(struct drm_crtc *crtc)
6633{
6634 dm_set_vblank(crtc, false);
6635}
6636
6637/* Implemented only the options currently availible for the driver */
6638static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6639 .reset = dm_crtc_reset_state,
6640 .destroy = amdgpu_dm_crtc_destroy,
6641 .set_config = drm_atomic_helper_set_config,
6642 .page_flip = drm_atomic_helper_page_flip,
6643 .atomic_duplicate_state = dm_crtc_duplicate_state,
6644 .atomic_destroy_state = dm_crtc_destroy_state,
6645 .set_crc_source = amdgpu_dm_crtc_set_crc_source,
6646 .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6647 .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6648 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
6649 .enable_vblank = dm_enable_vblank,
6650 .disable_vblank = dm_disable_vblank,
6651 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6652#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6653 .late_register = amdgpu_dm_crtc_late_register,
6654#endif
6655};
6656
6657static enum drm_connector_status
6658amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6659{
6660 bool connected;
6661 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6662
6663 /*
6664 * Notes:
6665 * 1. This interface is NOT called in context of HPD irq.
6666 * 2. This interface *is called* in context of user-mode ioctl. Which
6667 * makes it a bad place for *any* MST-related activity.
6668 */
6669
6670 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6671 !aconnector->fake_enable)
6672 connected = (aconnector->dc_sink != NULL);
6673 else
6674 connected = (aconnector->base.force == DRM_FORCE_ON);
6675
6676 update_subconnector_property(aconnector);
6677
6678 return (connected ? connector_status_connected :
6679 connector_status_disconnected);
6680}
6681
6682int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6683 struct drm_connector_state *connector_state,
6684 struct drm_property *property,
6685 uint64_t val)
6686{
6687 struct drm_device *dev = connector->dev;
6688 struct amdgpu_device *adev = drm_to_adev(dev);
6689 struct dm_connector_state *dm_old_state =
6690 to_dm_connector_state(connector->state);
6691 struct dm_connector_state *dm_new_state =
6692 to_dm_connector_state(connector_state);
6693
6694 int ret = -EINVAL;
6695
6696 if (property == dev->mode_config.scaling_mode_property) {
6697 enum amdgpu_rmx_type rmx_type;
6698
6699 switch (val) {
6700 case DRM_MODE_SCALE_CENTER:
6701 rmx_type = RMX_CENTER;
6702 break;
6703 case DRM_MODE_SCALE_ASPECT:
6704 rmx_type = RMX_ASPECT;
6705 break;
6706 case DRM_MODE_SCALE_FULLSCREEN:
6707 rmx_type = RMX_FULL;
6708 break;
6709 case DRM_MODE_SCALE_NONE:
6710 default:
6711 rmx_type = RMX_OFF;
6712 break;
6713 }
6714
6715 if (dm_old_state->scaling == rmx_type)
6716 return 0;
6717
6718 dm_new_state->scaling = rmx_type;
6719 ret = 0;
6720 } else if (property == adev->mode_info.underscan_hborder_property) {
6721 dm_new_state->underscan_hborder = val;
6722 ret = 0;
6723 } else if (property == adev->mode_info.underscan_vborder_property) {
6724 dm_new_state->underscan_vborder = val;
6725 ret = 0;
6726 } else if (property == adev->mode_info.underscan_property) {
6727 dm_new_state->underscan_enable = val;
6728 ret = 0;
6729 } else if (property == adev->mode_info.abm_level_property) {
6730 dm_new_state->abm_level = val;
6731 ret = 0;
6732 }
6733
6734 return ret;
6735}
6736
6737int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6738 const struct drm_connector_state *state,
6739 struct drm_property *property,
6740 uint64_t *val)
6741{
6742 struct drm_device *dev = connector->dev;
6743 struct amdgpu_device *adev = drm_to_adev(dev);
6744 struct dm_connector_state *dm_state =
6745 to_dm_connector_state(state);
6746 int ret = -EINVAL;
6747
6748 if (property == dev->mode_config.scaling_mode_property) {
6749 switch (dm_state->scaling) {
6750 case RMX_CENTER:
6751 *val = DRM_MODE_SCALE_CENTER;
6752 break;
6753 case RMX_ASPECT:
6754 *val = DRM_MODE_SCALE_ASPECT;
6755 break;
6756 case RMX_FULL:
6757 *val = DRM_MODE_SCALE_FULLSCREEN;
6758 break;
6759 case RMX_OFF:
6760 default:
6761 *val = DRM_MODE_SCALE_NONE;
6762 break;
6763 }
6764 ret = 0;
6765 } else if (property == adev->mode_info.underscan_hborder_property) {
6766 *val = dm_state->underscan_hborder;
6767 ret = 0;
6768 } else if (property == adev->mode_info.underscan_vborder_property) {
6769 *val = dm_state->underscan_vborder;
6770 ret = 0;
6771 } else if (property == adev->mode_info.underscan_property) {
6772 *val = dm_state->underscan_enable;
6773 ret = 0;
6774 } else if (property == adev->mode_info.abm_level_property) {
6775 *val = dm_state->abm_level;
6776 ret = 0;
6777 }
6778
6779 return ret;
6780}
6781
6782static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6783{
6784 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6785
6786 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6787}
6788
6789static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6790{
6791 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6792 const struct dc_link *link = aconnector->dc_link;
6793 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6794 struct amdgpu_display_manager *dm = &adev->dm;
6795 int i;
6796
6797 /*
6798 * Call only if mst_mgr was iniitalized before since it's not done
6799 * for all connector types.
6800 */
6801 if (aconnector->mst_mgr.dev)
6802 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6803
6804#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6805 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6806 for (i = 0; i < dm->num_of_edps; i++) {
6807 if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6808 backlight_device_unregister(dm->backlight_dev[i]);
6809 dm->backlight_dev[i] = NULL;
6810 }
6811 }
6812#endif
6813
6814 if (aconnector->dc_em_sink)
6815 dc_sink_release(aconnector->dc_em_sink);
6816 aconnector->dc_em_sink = NULL;
6817 if (aconnector->dc_sink)
6818 dc_sink_release(aconnector->dc_sink);
6819 aconnector->dc_sink = NULL;
6820
6821 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6822 drm_connector_unregister(connector);
6823 drm_connector_cleanup(connector);
6824 if (aconnector->i2c) {
6825 i2c_del_adapter(&aconnector->i2c->base);
6826 kfree(aconnector->i2c);
6827 }
6828 kfree(aconnector->dm_dp_aux.aux.name);
6829
6830 kfree(connector);
6831}
6832
6833void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6834{
6835 struct dm_connector_state *state =
6836 to_dm_connector_state(connector->state);
6837
6838 if (connector->state)
6839 __drm_atomic_helper_connector_destroy_state(connector->state);
6840
6841 kfree(state);
6842
6843 state = kzalloc(sizeof(*state), GFP_KERNEL);
6844
6845 if (state) {
6846 state->scaling = RMX_OFF;
6847 state->underscan_enable = false;
6848 state->underscan_hborder = 0;
6849 state->underscan_vborder = 0;
6850 state->base.max_requested_bpc = 8;
6851 state->vcpi_slots = 0;
6852 state->pbn = 0;
6853 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6854 state->abm_level = amdgpu_dm_abm_level;
6855
6856 __drm_atomic_helper_connector_reset(connector, &state->base);
6857 }
6858}
6859
6860struct drm_connector_state *
6861amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6862{
6863 struct dm_connector_state *state =
6864 to_dm_connector_state(connector->state);
6865
6866 struct dm_connector_state *new_state =
6867 kmemdup(state, sizeof(*state), GFP_KERNEL);
6868
6869 if (!new_state)
6870 return NULL;
6871
6872 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6873
6874 new_state->freesync_capable = state->freesync_capable;
6875 new_state->abm_level = state->abm_level;
6876 new_state->scaling = state->scaling;
6877 new_state->underscan_enable = state->underscan_enable;
6878 new_state->underscan_hborder = state->underscan_hborder;
6879 new_state->underscan_vborder = state->underscan_vborder;
6880 new_state->vcpi_slots = state->vcpi_slots;
6881 new_state->pbn = state->pbn;
6882 return &new_state->base;
6883}
6884
6885static int
6886amdgpu_dm_connector_late_register(struct drm_connector *connector)
6887{
6888 struct amdgpu_dm_connector *amdgpu_dm_connector =
6889 to_amdgpu_dm_connector(connector);
6890 int r;
6891
6892 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6893 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6894 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6895 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6896 if (r)
6897 return r;
6898 }
6899
6900#if defined(CONFIG_DEBUG_FS)
6901 connector_debugfs_init(amdgpu_dm_connector);
6902#endif
6903
6904 return 0;
6905}
6906
6907static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6908 .reset = amdgpu_dm_connector_funcs_reset,
6909 .detect = amdgpu_dm_connector_detect,
6910 .fill_modes = drm_helper_probe_single_connector_modes,
6911 .destroy = amdgpu_dm_connector_destroy,
6912 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6913 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6914 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6915 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6916 .late_register = amdgpu_dm_connector_late_register,
6917 .early_unregister = amdgpu_dm_connector_unregister
6918};
6919
6920static int get_modes(struct drm_connector *connector)
6921{
6922 return amdgpu_dm_connector_get_modes(connector);
6923}
6924
6925static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6926{
6927 struct dc_sink_init_data init_params = {
6928 .link = aconnector->dc_link,
6929 .sink_signal = SIGNAL_TYPE_VIRTUAL
6930 };
6931 struct edid *edid;
6932
6933 if (!aconnector->base.edid_blob_ptr) {
6934 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6935 aconnector->base.name);
6936
6937 aconnector->base.force = DRM_FORCE_OFF;
6938 aconnector->base.override_edid = false;
6939 return;
6940 }
6941
6942 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6943
6944 aconnector->edid = edid;
6945
6946 aconnector->dc_em_sink = dc_link_add_remote_sink(
6947 aconnector->dc_link,
6948 (uint8_t *)edid,
6949 (edid->extensions + 1) * EDID_LENGTH,
6950 &init_params);
6951
6952 if (aconnector->base.force == DRM_FORCE_ON) {
6953 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6954 aconnector->dc_link->local_sink :
6955 aconnector->dc_em_sink;
6956 dc_sink_retain(aconnector->dc_sink);
6957 }
6958}
6959
6960static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6961{
6962 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6963
6964 /*
6965 * In case of headless boot with force on for DP managed connector
6966 * Those settings have to be != 0 to get initial modeset
6967 */
6968 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6969 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6970 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6971 }
6972
6973
6974 aconnector->base.override_edid = true;
6975 create_eml_sink(aconnector);
6976}
6977
6978static struct dc_stream_state *
6979create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6980 const struct drm_display_mode *drm_mode,
6981 const struct dm_connector_state *dm_state,
6982 const struct dc_stream_state *old_stream)
6983{
6984 struct drm_connector *connector = &aconnector->base;
6985 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6986 struct dc_stream_state *stream;
6987 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6988 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6989 enum dc_status dc_result = DC_OK;
6990
6991 do {
6992 stream = create_stream_for_sink(aconnector, drm_mode,
6993 dm_state, old_stream,
6994 requested_bpc);
6995 if (stream == NULL) {
6996 DRM_ERROR("Failed to create stream for sink!\n");
6997 break;
6998 }
6999
7000 dc_result = dc_validate_stream(adev->dm.dc, stream);
7001
7002 if (dc_result != DC_OK) {
7003 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
7004 drm_mode->hdisplay,
7005 drm_mode->vdisplay,
7006 drm_mode->clock,
7007 dc_result,
7008 dc_status_to_str(dc_result));
7009
7010 dc_stream_release(stream);
7011 stream = NULL;
7012 requested_bpc -= 2; /* lower bpc to retry validation */
7013 }
7014
7015 } while (stream == NULL && requested_bpc >= 6);
7016
7017 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
7018 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
7019
7020 aconnector->force_yuv420_output = true;
7021 stream = create_validate_stream_for_sink(aconnector, drm_mode,
7022 dm_state, old_stream);
7023 aconnector->force_yuv420_output = false;
7024 }
7025
7026 return stream;
7027}
7028
7029enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
7030 struct drm_display_mode *mode)
7031{
7032 int result = MODE_ERROR;
7033 struct dc_sink *dc_sink;
7034 /* TODO: Unhardcode stream count */
7035 struct dc_stream_state *stream;
7036 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7037
7038 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
7039 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
7040 return result;
7041
7042 /*
7043 * Only run this the first time mode_valid is called to initilialize
7044 * EDID mgmt
7045 */
7046 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
7047 !aconnector->dc_em_sink)
7048 handle_edid_mgmt(aconnector);
7049
7050 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
7051
7052 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
7053 aconnector->base.force != DRM_FORCE_ON) {
7054 DRM_ERROR("dc_sink is NULL!\n");
7055 goto fail;
7056 }
7057
7058 stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
7059 if (stream) {
7060 dc_stream_release(stream);
7061 result = MODE_OK;
7062 }
7063
7064fail:
7065 /* TODO: error handling*/
7066 return result;
7067}
7068
7069static int fill_hdr_info_packet(const struct drm_connector_state *state,
7070 struct dc_info_packet *out)
7071{
7072 struct hdmi_drm_infoframe frame;
7073 unsigned char buf[30]; /* 26 + 4 */
7074 ssize_t len;
7075 int ret, i;
7076
7077 memset(out, 0, sizeof(*out));
7078
7079 if (!state->hdr_output_metadata)
7080 return 0;
7081
7082 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
7083 if (ret)
7084 return ret;
7085
7086 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
7087 if (len < 0)
7088 return (int)len;
7089
7090 /* Static metadata is a fixed 26 bytes + 4 byte header. */
7091 if (len != 30)
7092 return -EINVAL;
7093
7094 /* Prepare the infopacket for DC. */
7095 switch (state->connector->connector_type) {
7096 case DRM_MODE_CONNECTOR_HDMIA:
7097 out->hb0 = 0x87; /* type */
7098 out->hb1 = 0x01; /* version */
7099 out->hb2 = 0x1A; /* length */
7100 out->sb[0] = buf[3]; /* checksum */
7101 i = 1;
7102 break;
7103
7104 case DRM_MODE_CONNECTOR_DisplayPort:
7105 case DRM_MODE_CONNECTOR_eDP:
7106 out->hb0 = 0x00; /* sdp id, zero */
7107 out->hb1 = 0x87; /* type */
7108 out->hb2 = 0x1D; /* payload len - 1 */
7109 out->hb3 = (0x13 << 2); /* sdp version */
7110 out->sb[0] = 0x01; /* version */
7111 out->sb[1] = 0x1A; /* length */
7112 i = 2;
7113 break;
7114
7115 default:
7116 return -EINVAL;
7117 }
7118
7119 memcpy(&out->sb[i], &buf[4], 26);
7120 out->valid = true;
7121
7122 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
7123 sizeof(out->sb), false);
7124
7125 return 0;
7126}
7127
7128static int
7129amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
7130 struct drm_atomic_state *state)
7131{
7132 struct drm_connector_state *new_con_state =
7133 drm_atomic_get_new_connector_state(state, conn);
7134 struct drm_connector_state *old_con_state =
7135 drm_atomic_get_old_connector_state(state, conn);
7136 struct drm_crtc *crtc = new_con_state->crtc;
7137 struct drm_crtc_state *new_crtc_state;
7138 int ret;
7139
7140 trace_amdgpu_dm_connector_atomic_check(new_con_state);
7141
7142 if (!crtc)
7143 return 0;
7144
7145 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
7146 struct dc_info_packet hdr_infopacket;
7147
7148 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
7149 if (ret)
7150 return ret;
7151
7152 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
7153 if (IS_ERR(new_crtc_state))
7154 return PTR_ERR(new_crtc_state);
7155
7156 /*
7157 * DC considers the stream backends changed if the
7158 * static metadata changes. Forcing the modeset also
7159 * gives a simple way for userspace to switch from
7160 * 8bpc to 10bpc when setting the metadata to enter
7161 * or exit HDR.
7162 *
7163 * Changing the static metadata after it's been
7164 * set is permissible, however. So only force a
7165 * modeset if we're entering or exiting HDR.
7166 */
7167 new_crtc_state->mode_changed =
7168 !old_con_state->hdr_output_metadata ||
7169 !new_con_state->hdr_output_metadata;
7170 }
7171
7172 return 0;
7173}
7174
7175static const struct drm_connector_helper_funcs
7176amdgpu_dm_connector_helper_funcs = {
7177 /*
7178 * If hotplugging a second bigger display in FB Con mode, bigger resolution
7179 * modes will be filtered by drm_mode_validate_size(), and those modes
7180 * are missing after user start lightdm. So we need to renew modes list.
7181 * in get_modes call back, not just return the modes count
7182 */
7183 .get_modes = get_modes,
7184 .mode_valid = amdgpu_dm_connector_mode_valid,
7185 .atomic_check = amdgpu_dm_connector_atomic_check,
7186};
7187
7188static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7189{
7190}
7191
7192static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7193{
7194 struct drm_atomic_state *state = new_crtc_state->state;
7195 struct drm_plane *plane;
7196 int num_active = 0;
7197
7198 drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7199 struct drm_plane_state *new_plane_state;
7200
7201 /* Cursor planes are "fake". */
7202 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7203 continue;
7204
7205 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7206
7207 if (!new_plane_state) {
7208 /*
7209 * The plane is enable on the CRTC and hasn't changed
7210 * state. This means that it previously passed
7211 * validation and is therefore enabled.
7212 */
7213 num_active += 1;
7214 continue;
7215 }
7216
7217 /* We need a framebuffer to be considered enabled. */
7218 num_active += (new_plane_state->fb != NULL);
7219 }
7220
7221 return num_active;
7222}
7223
7224static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7225 struct drm_crtc_state *new_crtc_state)
7226{
7227 struct dm_crtc_state *dm_new_crtc_state =
7228 to_dm_crtc_state(new_crtc_state);
7229
7230 dm_new_crtc_state->active_planes = 0;
7231
7232 if (!dm_new_crtc_state->stream)
7233 return;
7234
7235 dm_new_crtc_state->active_planes =
7236 count_crtc_active_planes(new_crtc_state);
7237}
7238
7239static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7240 struct drm_atomic_state *state)
7241{
7242 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7243 crtc);
7244 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7245 struct dc *dc = adev->dm.dc;
7246 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7247 int ret = -EINVAL;
7248
7249 trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7250
7251 dm_update_crtc_active_planes(crtc, crtc_state);
7252
7253 if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7254 modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7255 return ret;
7256 }
7257
7258 /*
7259 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7260 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7261 * planes are disabled, which is not supported by the hardware. And there is legacy
7262 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7263 */
7264 if (crtc_state->enable &&
7265 !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7266 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7267 return -EINVAL;
7268 }
7269
7270 /* In some use cases, like reset, no stream is attached */
7271 if (!dm_crtc_state->stream)
7272 return 0;
7273
7274 if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7275 return 0;
7276
7277 DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7278 return ret;
7279}
7280
7281static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7282 const struct drm_display_mode *mode,
7283 struct drm_display_mode *adjusted_mode)
7284{
7285 return true;
7286}
7287
7288static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7289 .disable = dm_crtc_helper_disable,
7290 .atomic_check = dm_crtc_helper_atomic_check,
7291 .mode_fixup = dm_crtc_helper_mode_fixup,
7292 .get_scanout_position = amdgpu_crtc_get_scanout_position,
7293};
7294
7295static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7296{
7297
7298}
7299
7300static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7301{
7302 switch (display_color_depth) {
7303 case COLOR_DEPTH_666:
7304 return 6;
7305 case COLOR_DEPTH_888:
7306 return 8;
7307 case COLOR_DEPTH_101010:
7308 return 10;
7309 case COLOR_DEPTH_121212:
7310 return 12;
7311 case COLOR_DEPTH_141414:
7312 return 14;
7313 case COLOR_DEPTH_161616:
7314 return 16;
7315 default:
7316 break;
7317 }
7318 return 0;
7319}
7320
7321static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7322 struct drm_crtc_state *crtc_state,
7323 struct drm_connector_state *conn_state)
7324{
7325 struct drm_atomic_state *state = crtc_state->state;
7326 struct drm_connector *connector = conn_state->connector;
7327 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7328 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7329 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7330 struct drm_dp_mst_topology_mgr *mst_mgr;
7331 struct drm_dp_mst_port *mst_port;
7332 enum dc_color_depth color_depth;
7333 int clock, bpp = 0;
7334 bool is_y420 = false;
7335
7336 if (!aconnector->port || !aconnector->dc_sink)
7337 return 0;
7338
7339 mst_port = aconnector->port;
7340 mst_mgr = &aconnector->mst_port->mst_mgr;
7341
7342 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7343 return 0;
7344
7345 if (!state->duplicated) {
7346 int max_bpc = conn_state->max_requested_bpc;
7347 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7348 aconnector->force_yuv420_output;
7349 color_depth = convert_color_depth_from_display_info(connector,
7350 is_y420,
7351 max_bpc);
7352 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7353 clock = adjusted_mode->clock;
7354 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7355 }
7356 dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7357 mst_mgr,
7358 mst_port,
7359 dm_new_connector_state->pbn,
7360 dm_mst_get_pbn_divider(aconnector->dc_link));
7361 if (dm_new_connector_state->vcpi_slots < 0) {
7362 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7363 return dm_new_connector_state->vcpi_slots;
7364 }
7365 return 0;
7366}
7367
7368const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7369 .disable = dm_encoder_helper_disable,
7370 .atomic_check = dm_encoder_helper_atomic_check
7371};
7372
7373#if defined(CONFIG_DRM_AMD_DC_DCN)
7374static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7375 struct dc_state *dc_state,
7376 struct dsc_mst_fairness_vars *vars)
7377{
7378 struct dc_stream_state *stream = NULL;
7379 struct drm_connector *connector;
7380 struct drm_connector_state *new_con_state;
7381 struct amdgpu_dm_connector *aconnector;
7382 struct dm_connector_state *dm_conn_state;
7383 int i, j;
7384 int vcpi, pbn_div, pbn, slot_num = 0;
7385
7386 for_each_new_connector_in_state(state, connector, new_con_state, i) {
7387
7388 aconnector = to_amdgpu_dm_connector(connector);
7389
7390 if (!aconnector->port)
7391 continue;
7392
7393 if (!new_con_state || !new_con_state->crtc)
7394 continue;
7395
7396 dm_conn_state = to_dm_connector_state(new_con_state);
7397
7398 for (j = 0; j < dc_state->stream_count; j++) {
7399 stream = dc_state->streams[j];
7400 if (!stream)
7401 continue;
7402
7403 if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7404 break;
7405
7406 stream = NULL;
7407 }
7408
7409 if (!stream)
7410 continue;
7411
7412 pbn_div = dm_mst_get_pbn_divider(stream->link);
7413 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
7414 for (j = 0; j < dc_state->stream_count; j++) {
7415 if (vars[j].aconnector == aconnector) {
7416 pbn = vars[j].pbn;
7417 break;
7418 }
7419 }
7420
7421 if (j == dc_state->stream_count)
7422 continue;
7423
7424 slot_num = DIV_ROUND_UP(pbn, pbn_div);
7425
7426 if (stream->timing.flags.DSC != 1) {
7427 dm_conn_state->pbn = pbn;
7428 dm_conn_state->vcpi_slots = slot_num;
7429
7430 drm_dp_mst_atomic_enable_dsc(state,
7431 aconnector->port,
7432 dm_conn_state->pbn,
7433 0,
7434 false);
7435 continue;
7436 }
7437
7438 vcpi = drm_dp_mst_atomic_enable_dsc(state,
7439 aconnector->port,
7440 pbn, pbn_div,
7441 true);
7442 if (vcpi < 0)
7443 return vcpi;
7444
7445 dm_conn_state->pbn = pbn;
7446 dm_conn_state->vcpi_slots = vcpi;
7447 }
7448 return 0;
7449}
7450#endif
7451
7452static void dm_drm_plane_reset(struct drm_plane *plane)
7453{
7454 struct dm_plane_state *amdgpu_state = NULL;
7455
7456 if (plane->state)
7457 plane->funcs->atomic_destroy_state(plane, plane->state);
7458
7459 amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7460 WARN_ON(amdgpu_state == NULL);
7461
7462 if (amdgpu_state)
7463 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7464}
7465
7466static struct drm_plane_state *
7467dm_drm_plane_duplicate_state(struct drm_plane *plane)
7468{
7469 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7470
7471 old_dm_plane_state = to_dm_plane_state(plane->state);
7472 dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7473 if (!dm_plane_state)
7474 return NULL;
7475
7476 __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7477
7478 if (old_dm_plane_state->dc_state) {
7479 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7480 dc_plane_state_retain(dm_plane_state->dc_state);
7481 }
7482
7483 return &dm_plane_state->base;
7484}
7485
7486static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7487 struct drm_plane_state *state)
7488{
7489 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7490
7491 if (dm_plane_state->dc_state)
7492 dc_plane_state_release(dm_plane_state->dc_state);
7493
7494 drm_atomic_helper_plane_destroy_state(plane, state);
7495}
7496
7497static const struct drm_plane_funcs dm_plane_funcs = {
7498 .update_plane = drm_atomic_helper_update_plane,
7499 .disable_plane = drm_atomic_helper_disable_plane,
7500 .destroy = drm_primary_helper_destroy,
7501 .reset = dm_drm_plane_reset,
7502 .atomic_duplicate_state = dm_drm_plane_duplicate_state,
7503 .atomic_destroy_state = dm_drm_plane_destroy_state,
7504 .format_mod_supported = dm_plane_format_mod_supported,
7505};
7506
7507static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7508 struct drm_plane_state *new_state)
7509{
7510 struct amdgpu_framebuffer *afb;
7511 struct drm_gem_object *obj;
7512 struct amdgpu_device *adev;
7513 struct amdgpu_bo *rbo;
7514 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7515 struct list_head list;
7516 struct ttm_validate_buffer tv;
7517 struct ww_acquire_ctx ticket;
7518 uint32_t domain;
7519 int r;
7520
7521 if (!new_state->fb) {
7522 DRM_DEBUG_KMS("No FB bound\n");
7523 return 0;
7524 }
7525
7526 afb = to_amdgpu_framebuffer(new_state->fb);
7527 obj = new_state->fb->obj[0];
7528 rbo = gem_to_amdgpu_bo(obj);
7529 adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7530 INIT_LIST_HEAD(&list);
7531
7532 tv.bo = &rbo->tbo;
7533 tv.num_shared = 1;
7534 list_add(&tv.head, &list);
7535
7536 r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7537 if (r) {
7538 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7539 return r;
7540 }
7541
7542 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7543 domain = amdgpu_display_supported_domains(adev, rbo->flags);
7544 else
7545 domain = AMDGPU_GEM_DOMAIN_VRAM;
7546
7547 r = amdgpu_bo_pin(rbo, domain);
7548 if (unlikely(r != 0)) {
7549 if (r != -ERESTARTSYS)
7550 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7551 ttm_eu_backoff_reservation(&ticket, &list);
7552 return r;
7553 }
7554
7555 r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7556 if (unlikely(r != 0)) {
7557 amdgpu_bo_unpin(rbo);
7558 ttm_eu_backoff_reservation(&ticket, &list);
7559 DRM_ERROR("%p bind failed\n", rbo);
7560 return r;
7561 }
7562
7563 ttm_eu_backoff_reservation(&ticket, &list);
7564
7565 afb->address = amdgpu_bo_gpu_offset(rbo);
7566
7567 amdgpu_bo_ref(rbo);
7568
7569 /**
7570 * We don't do surface updates on planes that have been newly created,
7571 * but we also don't have the afb->address during atomic check.
7572 *
7573 * Fill in buffer attributes depending on the address here, but only on
7574 * newly created planes since they're not being used by DC yet and this
7575 * won't modify global state.
7576 */
7577 dm_plane_state_old = to_dm_plane_state(plane->state);
7578 dm_plane_state_new = to_dm_plane_state(new_state);
7579
7580 if (dm_plane_state_new->dc_state &&
7581 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7582 struct dc_plane_state *plane_state =
7583 dm_plane_state_new->dc_state;
7584 bool force_disable_dcc = !plane_state->dcc.enable;
7585
7586 fill_plane_buffer_attributes(
7587 adev, afb, plane_state->format, plane_state->rotation,
7588 afb->tiling_flags,
7589 &plane_state->tiling_info, &plane_state->plane_size,
7590 &plane_state->dcc, &plane_state->address,
7591 afb->tmz_surface, force_disable_dcc);
7592 }
7593
7594 return 0;
7595}
7596
7597static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7598 struct drm_plane_state *old_state)
7599{
7600 struct amdgpu_bo *rbo;
7601 int r;
7602
7603 if (!old_state->fb)
7604 return;
7605
7606 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7607 r = amdgpu_bo_reserve(rbo, false);
7608 if (unlikely(r)) {
7609 DRM_ERROR("failed to reserve rbo before unpin\n");
7610 return;
7611 }
7612
7613 amdgpu_bo_unpin(rbo);
7614 amdgpu_bo_unreserve(rbo);
7615 amdgpu_bo_unref(&rbo);
7616}
7617
7618static int dm_plane_helper_check_state(struct drm_plane_state *state,
7619 struct drm_crtc_state *new_crtc_state)
7620{
7621 struct drm_framebuffer *fb = state->fb;
7622 int min_downscale, max_upscale;
7623 int min_scale = 0;
7624 int max_scale = INT_MAX;
7625
7626 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7627 if (fb && state->crtc) {
7628 /* Validate viewport to cover the case when only the position changes */
7629 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7630 int viewport_width = state->crtc_w;
7631 int viewport_height = state->crtc_h;
7632
7633 if (state->crtc_x < 0)
7634 viewport_width += state->crtc_x;
7635 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7636 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7637
7638 if (state->crtc_y < 0)
7639 viewport_height += state->crtc_y;
7640 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7641 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7642
7643 if (viewport_width < 0 || viewport_height < 0) {
7644 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7645 return -EINVAL;
7646 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7647 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7648 return -EINVAL;
7649 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
7650 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7651 return -EINVAL;
7652 }
7653
7654 }
7655
7656 /* Get min/max allowed scaling factors from plane caps. */
7657 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7658 &min_downscale, &max_upscale);
7659 /*
7660 * Convert to drm convention: 16.16 fixed point, instead of dc's
7661 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7662 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7663 */
7664 min_scale = (1000 << 16) / max_upscale;
7665 max_scale = (1000 << 16) / min_downscale;
7666 }
7667
7668 return drm_atomic_helper_check_plane_state(
7669 state, new_crtc_state, min_scale, max_scale, true, true);
7670}
7671
7672static int dm_plane_atomic_check(struct drm_plane *plane,
7673 struct drm_atomic_state *state)
7674{
7675 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7676 plane);
7677 struct amdgpu_device *adev = drm_to_adev(plane->dev);
7678 struct dc *dc = adev->dm.dc;
7679 struct dm_plane_state *dm_plane_state;
7680 struct dc_scaling_info scaling_info;
7681 struct drm_crtc_state *new_crtc_state;
7682 int ret;
7683
7684 trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7685
7686 dm_plane_state = to_dm_plane_state(new_plane_state);
7687
7688 if (!dm_plane_state->dc_state)
7689 return 0;
7690
7691 new_crtc_state =
7692 drm_atomic_get_new_crtc_state(state,
7693 new_plane_state->crtc);
7694 if (!new_crtc_state)
7695 return -EINVAL;
7696
7697 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7698 if (ret)
7699 return ret;
7700
7701 ret = fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
7702 if (ret)
7703 return ret;
7704
7705 if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7706 return 0;
7707
7708 return -EINVAL;
7709}
7710
7711static int dm_plane_atomic_async_check(struct drm_plane *plane,
7712 struct drm_atomic_state *state)
7713{
7714 /* Only support async updates on cursor planes. */
7715 if (plane->type != DRM_PLANE_TYPE_CURSOR)
7716 return -EINVAL;
7717
7718 return 0;
7719}
7720
7721static void dm_plane_atomic_async_update(struct drm_plane *plane,
7722 struct drm_atomic_state *state)
7723{
7724 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7725 plane);
7726 struct drm_plane_state *old_state =
7727 drm_atomic_get_old_plane_state(state, plane);
7728
7729 trace_amdgpu_dm_atomic_update_cursor(new_state);
7730
7731 swap(plane->state->fb, new_state->fb);
7732
7733 plane->state->src_x = new_state->src_x;
7734 plane->state->src_y = new_state->src_y;
7735 plane->state->src_w = new_state->src_w;
7736 plane->state->src_h = new_state->src_h;
7737 plane->state->crtc_x = new_state->crtc_x;
7738 plane->state->crtc_y = new_state->crtc_y;
7739 plane->state->crtc_w = new_state->crtc_w;
7740 plane->state->crtc_h = new_state->crtc_h;
7741
7742 handle_cursor_update(plane, old_state);
7743}
7744
7745static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7746 .prepare_fb = dm_plane_helper_prepare_fb,
7747 .cleanup_fb = dm_plane_helper_cleanup_fb,
7748 .atomic_check = dm_plane_atomic_check,
7749 .atomic_async_check = dm_plane_atomic_async_check,
7750 .atomic_async_update = dm_plane_atomic_async_update
7751};
7752
7753/*
7754 * TODO: these are currently initialized to rgb formats only.
7755 * For future use cases we should either initialize them dynamically based on
7756 * plane capabilities, or initialize this array to all formats, so internal drm
7757 * check will succeed, and let DC implement proper check
7758 */
7759static const uint32_t rgb_formats[] = {
7760 DRM_FORMAT_XRGB8888,
7761 DRM_FORMAT_ARGB8888,
7762 DRM_FORMAT_RGBA8888,
7763 DRM_FORMAT_XRGB2101010,
7764 DRM_FORMAT_XBGR2101010,
7765 DRM_FORMAT_ARGB2101010,
7766 DRM_FORMAT_ABGR2101010,
7767 DRM_FORMAT_XRGB16161616,
7768 DRM_FORMAT_XBGR16161616,
7769 DRM_FORMAT_ARGB16161616,
7770 DRM_FORMAT_ABGR16161616,
7771 DRM_FORMAT_XBGR8888,
7772 DRM_FORMAT_ABGR8888,
7773 DRM_FORMAT_RGB565,
7774};
7775
7776static const uint32_t overlay_formats[] = {
7777 DRM_FORMAT_XRGB8888,
7778 DRM_FORMAT_ARGB8888,
7779 DRM_FORMAT_RGBA8888,
7780 DRM_FORMAT_XBGR8888,
7781 DRM_FORMAT_ABGR8888,
7782 DRM_FORMAT_RGB565
7783};
7784
7785static const u32 cursor_formats[] = {
7786 DRM_FORMAT_ARGB8888
7787};
7788
7789static int get_plane_formats(const struct drm_plane *plane,
7790 const struct dc_plane_cap *plane_cap,
7791 uint32_t *formats, int max_formats)
7792{
7793 int i, num_formats = 0;
7794
7795 /*
7796 * TODO: Query support for each group of formats directly from
7797 * DC plane caps. This will require adding more formats to the
7798 * caps list.
7799 */
7800
7801 switch (plane->type) {
7802 case DRM_PLANE_TYPE_PRIMARY:
7803 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7804 if (num_formats >= max_formats)
7805 break;
7806
7807 formats[num_formats++] = rgb_formats[i];
7808 }
7809
7810 if (plane_cap && plane_cap->pixel_format_support.nv12)
7811 formats[num_formats++] = DRM_FORMAT_NV12;
7812 if (plane_cap && plane_cap->pixel_format_support.p010)
7813 formats[num_formats++] = DRM_FORMAT_P010;
7814 if (plane_cap && plane_cap->pixel_format_support.fp16) {
7815 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7816 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7817 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7818 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7819 }
7820 break;
7821
7822 case DRM_PLANE_TYPE_OVERLAY:
7823 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7824 if (num_formats >= max_formats)
7825 break;
7826
7827 formats[num_formats++] = overlay_formats[i];
7828 }
7829 break;
7830
7831 case DRM_PLANE_TYPE_CURSOR:
7832 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7833 if (num_formats >= max_formats)
7834 break;
7835
7836 formats[num_formats++] = cursor_formats[i];
7837 }
7838 break;
7839 }
7840
7841 return num_formats;
7842}
7843
7844static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7845 struct drm_plane *plane,
7846 unsigned long possible_crtcs,
7847 const struct dc_plane_cap *plane_cap)
7848{
7849 uint32_t formats[32];
7850 int num_formats;
7851 int res = -EPERM;
7852 unsigned int supported_rotations;
7853 uint64_t *modifiers = NULL;
7854
7855 num_formats = get_plane_formats(plane, plane_cap, formats,
7856 ARRAY_SIZE(formats));
7857
7858 res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7859 if (res)
7860 return res;
7861
7862 res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7863 &dm_plane_funcs, formats, num_formats,
7864 modifiers, plane->type, NULL);
7865 kfree(modifiers);
7866 if (res)
7867 return res;
7868
7869 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7870 plane_cap && plane_cap->per_pixel_alpha) {
7871 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7872 BIT(DRM_MODE_BLEND_PREMULTI);
7873
7874 drm_plane_create_alpha_property(plane);
7875 drm_plane_create_blend_mode_property(plane, blend_caps);
7876 }
7877
7878 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7879 plane_cap &&
7880 (plane_cap->pixel_format_support.nv12 ||
7881 plane_cap->pixel_format_support.p010)) {
7882 /* This only affects YUV formats. */
7883 drm_plane_create_color_properties(
7884 plane,
7885 BIT(DRM_COLOR_YCBCR_BT601) |
7886 BIT(DRM_COLOR_YCBCR_BT709) |
7887 BIT(DRM_COLOR_YCBCR_BT2020),
7888 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7889 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7890 DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7891 }
7892
7893 supported_rotations =
7894 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7895 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7896
7897 if (dm->adev->asic_type >= CHIP_BONAIRE &&
7898 plane->type != DRM_PLANE_TYPE_CURSOR)
7899 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7900 supported_rotations);
7901
7902 drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7903
7904 /* Create (reset) the plane state */
7905 if (plane->funcs->reset)
7906 plane->funcs->reset(plane);
7907
7908 return 0;
7909}
7910
7911static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7912 struct drm_plane *plane,
7913 uint32_t crtc_index)
7914{
7915 struct amdgpu_crtc *acrtc = NULL;
7916 struct drm_plane *cursor_plane;
7917
7918 int res = -ENOMEM;
7919
7920 cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7921 if (!cursor_plane)
7922 goto fail;
7923
7924 cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7925 res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7926
7927 acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7928 if (!acrtc)
7929 goto fail;
7930
7931 res = drm_crtc_init_with_planes(
7932 dm->ddev,
7933 &acrtc->base,
7934 plane,
7935 cursor_plane,
7936 &amdgpu_dm_crtc_funcs, NULL);
7937
7938 if (res)
7939 goto fail;
7940
7941 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7942
7943 /* Create (reset) the plane state */
7944 if (acrtc->base.funcs->reset)
7945 acrtc->base.funcs->reset(&acrtc->base);
7946
7947 acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7948 acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7949
7950 acrtc->crtc_id = crtc_index;
7951 acrtc->base.enabled = false;
7952 acrtc->otg_inst = -1;
7953
7954 dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7955 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7956 true, MAX_COLOR_LUT_ENTRIES);
7957 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7958
7959 return 0;
7960
7961fail:
7962 kfree(acrtc);
7963 kfree(cursor_plane);
7964 return res;
7965}
7966
7967
7968static int to_drm_connector_type(enum signal_type st)
7969{
7970 switch (st) {
7971 case SIGNAL_TYPE_HDMI_TYPE_A:
7972 return DRM_MODE_CONNECTOR_HDMIA;
7973 case SIGNAL_TYPE_EDP:
7974 return DRM_MODE_CONNECTOR_eDP;
7975 case SIGNAL_TYPE_LVDS:
7976 return DRM_MODE_CONNECTOR_LVDS;
7977 case SIGNAL_TYPE_RGB:
7978 return DRM_MODE_CONNECTOR_VGA;
7979 case SIGNAL_TYPE_DISPLAY_PORT:
7980 case SIGNAL_TYPE_DISPLAY_PORT_MST:
7981 return DRM_MODE_CONNECTOR_DisplayPort;
7982 case SIGNAL_TYPE_DVI_DUAL_LINK:
7983 case SIGNAL_TYPE_DVI_SINGLE_LINK:
7984 return DRM_MODE_CONNECTOR_DVID;
7985 case SIGNAL_TYPE_VIRTUAL:
7986 return DRM_MODE_CONNECTOR_VIRTUAL;
7987
7988 default:
7989 return DRM_MODE_CONNECTOR_Unknown;
7990 }
7991}
7992
7993static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7994{
7995 struct drm_encoder *encoder;
7996
7997 /* There is only one encoder per connector */
7998 drm_connector_for_each_possible_encoder(connector, encoder)
7999 return encoder;
8000
8001 return NULL;
8002}
8003
8004static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
8005{
8006 struct drm_encoder *encoder;
8007 struct amdgpu_encoder *amdgpu_encoder;
8008
8009 encoder = amdgpu_dm_connector_to_encoder(connector);
8010
8011 if (encoder == NULL)
8012 return;
8013
8014 amdgpu_encoder = to_amdgpu_encoder(encoder);
8015
8016 amdgpu_encoder->native_mode.clock = 0;
8017
8018 if (!list_empty(&connector->probed_modes)) {
8019 struct drm_display_mode *preferred_mode = NULL;
8020
8021 list_for_each_entry(preferred_mode,
8022 &connector->probed_modes,
8023 head) {
8024 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
8025 amdgpu_encoder->native_mode = *preferred_mode;
8026
8027 break;
8028 }
8029
8030 }
8031}
8032
8033static struct drm_display_mode *
8034amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
8035 char *name,
8036 int hdisplay, int vdisplay)
8037{
8038 struct drm_device *dev = encoder->dev;
8039 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8040 struct drm_display_mode *mode = NULL;
8041 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8042
8043 mode = drm_mode_duplicate(dev, native_mode);
8044
8045 if (mode == NULL)
8046 return NULL;
8047
8048 mode->hdisplay = hdisplay;
8049 mode->vdisplay = vdisplay;
8050 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8051 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
8052
8053 return mode;
8054
8055}
8056
8057static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
8058 struct drm_connector *connector)
8059{
8060 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
8061 struct drm_display_mode *mode = NULL;
8062 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
8063 struct amdgpu_dm_connector *amdgpu_dm_connector =
8064 to_amdgpu_dm_connector(connector);
8065 int i;
8066 int n;
8067 struct mode_size {
8068 char name[DRM_DISPLAY_MODE_LEN];
8069 int w;
8070 int h;
8071 } common_modes[] = {
8072 { "640x480", 640, 480},
8073 { "800x600", 800, 600},
8074 { "1024x768", 1024, 768},
8075 { "1280x720", 1280, 720},
8076 { "1280x800", 1280, 800},
8077 {"1280x1024", 1280, 1024},
8078 { "1440x900", 1440, 900},
8079 {"1680x1050", 1680, 1050},
8080 {"1600x1200", 1600, 1200},
8081 {"1920x1080", 1920, 1080},
8082 {"1920x1200", 1920, 1200}
8083 };
8084
8085 n = ARRAY_SIZE(common_modes);
8086
8087 for (i = 0; i < n; i++) {
8088 struct drm_display_mode *curmode = NULL;
8089 bool mode_existed = false;
8090
8091 if (common_modes[i].w > native_mode->hdisplay ||
8092 common_modes[i].h > native_mode->vdisplay ||
8093 (common_modes[i].w == native_mode->hdisplay &&
8094 common_modes[i].h == native_mode->vdisplay))
8095 continue;
8096
8097 list_for_each_entry(curmode, &connector->probed_modes, head) {
8098 if (common_modes[i].w == curmode->hdisplay &&
8099 common_modes[i].h == curmode->vdisplay) {
8100 mode_existed = true;
8101 break;
8102 }
8103 }
8104
8105 if (mode_existed)
8106 continue;
8107
8108 mode = amdgpu_dm_create_common_mode(encoder,
8109 common_modes[i].name, common_modes[i].w,
8110 common_modes[i].h);
8111 drm_mode_probed_add(connector, mode);
8112 amdgpu_dm_connector->num_modes++;
8113 }
8114}
8115
8116static void amdgpu_set_panel_orientation(struct drm_connector *connector)
8117{
8118 struct drm_encoder *encoder;
8119 struct amdgpu_encoder *amdgpu_encoder;
8120 const struct drm_display_mode *native_mode;
8121
8122 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
8123 connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
8124 return;
8125
8126 encoder = amdgpu_dm_connector_to_encoder(connector);
8127 if (!encoder)
8128 return;
8129
8130 amdgpu_encoder = to_amdgpu_encoder(encoder);
8131
8132 native_mode = &amdgpu_encoder->native_mode;
8133 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
8134 return;
8135
8136 drm_connector_set_panel_orientation_with_quirk(connector,
8137 DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
8138 native_mode->hdisplay,
8139 native_mode->vdisplay);
8140}
8141
8142static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
8143 struct edid *edid)
8144{
8145 struct amdgpu_dm_connector *amdgpu_dm_connector =
8146 to_amdgpu_dm_connector(connector);
8147
8148 if (edid) {
8149 /* empty probed_modes */
8150 INIT_LIST_HEAD(&connector->probed_modes);
8151 amdgpu_dm_connector->num_modes =
8152 drm_add_edid_modes(connector, edid);
8153
8154 /* sorting the probed modes before calling function
8155 * amdgpu_dm_get_native_mode() since EDID can have
8156 * more than one preferred mode. The modes that are
8157 * later in the probed mode list could be of higher
8158 * and preferred resolution. For example, 3840x2160
8159 * resolution in base EDID preferred timing and 4096x2160
8160 * preferred resolution in DID extension block later.
8161 */
8162 drm_mode_sort(&connector->probed_modes);
8163 amdgpu_dm_get_native_mode(connector);
8164
8165 /* Freesync capabilities are reset by calling
8166 * drm_add_edid_modes() and need to be
8167 * restored here.
8168 */
8169 amdgpu_dm_update_freesync_caps(connector, edid);
8170
8171 amdgpu_set_panel_orientation(connector);
8172 } else {
8173 amdgpu_dm_connector->num_modes = 0;
8174 }
8175}
8176
8177static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8178 struct drm_display_mode *mode)
8179{
8180 struct drm_display_mode *m;
8181
8182 list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8183 if (drm_mode_equal(m, mode))
8184 return true;
8185 }
8186
8187 return false;
8188}
8189
8190static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8191{
8192 const struct drm_display_mode *m;
8193 struct drm_display_mode *new_mode;
8194 uint i;
8195 uint32_t new_modes_count = 0;
8196
8197 /* Standard FPS values
8198 *
8199 * 23.976 - TV/NTSC
8200 * 24 - Cinema
8201 * 25 - TV/PAL
8202 * 29.97 - TV/NTSC
8203 * 30 - TV/NTSC
8204 * 48 - Cinema HFR
8205 * 50 - TV/PAL
8206 * 60 - Commonly used
8207 * 48,72,96,120 - Multiples of 24
8208 */
8209 static const uint32_t common_rates[] = {
8210 23976, 24000, 25000, 29970, 30000,
8211 48000, 50000, 60000, 72000, 96000, 120000
8212 };
8213
8214 /*
8215 * Find mode with highest refresh rate with the same resolution
8216 * as the preferred mode. Some monitors report a preferred mode
8217 * with lower resolution than the highest refresh rate supported.
8218 */
8219
8220 m = get_highest_refresh_rate_mode(aconnector, true);
8221 if (!m)
8222 return 0;
8223
8224 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8225 uint64_t target_vtotal, target_vtotal_diff;
8226 uint64_t num, den;
8227
8228 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8229 continue;
8230
8231 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8232 common_rates[i] > aconnector->max_vfreq * 1000)
8233 continue;
8234
8235 num = (unsigned long long)m->clock * 1000 * 1000;
8236 den = common_rates[i] * (unsigned long long)m->htotal;
8237 target_vtotal = div_u64(num, den);
8238 target_vtotal_diff = target_vtotal - m->vtotal;
8239
8240 /* Check for illegal modes */
8241 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8242 m->vsync_end + target_vtotal_diff < m->vsync_start ||
8243 m->vtotal + target_vtotal_diff < m->vsync_end)
8244 continue;
8245
8246 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8247 if (!new_mode)
8248 goto out;
8249
8250 new_mode->vtotal += (u16)target_vtotal_diff;
8251 new_mode->vsync_start += (u16)target_vtotal_diff;
8252 new_mode->vsync_end += (u16)target_vtotal_diff;
8253 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8254 new_mode->type |= DRM_MODE_TYPE_DRIVER;
8255
8256 if (!is_duplicate_mode(aconnector, new_mode)) {
8257 drm_mode_probed_add(&aconnector->base, new_mode);
8258 new_modes_count += 1;
8259 } else
8260 drm_mode_destroy(aconnector->base.dev, new_mode);
8261 }
8262 out:
8263 return new_modes_count;
8264}
8265
8266static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8267 struct edid *edid)
8268{
8269 struct amdgpu_dm_connector *amdgpu_dm_connector =
8270 to_amdgpu_dm_connector(connector);
8271
8272 if (!(amdgpu_freesync_vid_mode && edid))
8273 return;
8274
8275 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8276 amdgpu_dm_connector->num_modes +=
8277 add_fs_modes(amdgpu_dm_connector);
8278}
8279
8280static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8281{
8282 struct amdgpu_dm_connector *amdgpu_dm_connector =
8283 to_amdgpu_dm_connector(connector);
8284 struct drm_encoder *encoder;
8285 struct edid *edid = amdgpu_dm_connector->edid;
8286
8287 encoder = amdgpu_dm_connector_to_encoder(connector);
8288
8289 if (!drm_edid_is_valid(edid)) {
8290 amdgpu_dm_connector->num_modes =
8291 drm_add_modes_noedid(connector, 640, 480);
8292 } else {
8293 amdgpu_dm_connector_ddc_get_modes(connector, edid);
8294 amdgpu_dm_connector_add_common_modes(encoder, connector);
8295 amdgpu_dm_connector_add_freesync_modes(connector, edid);
8296 }
8297 amdgpu_dm_fbc_init(connector);
8298
8299 return amdgpu_dm_connector->num_modes;
8300}
8301
8302void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8303 struct amdgpu_dm_connector *aconnector,
8304 int connector_type,
8305 struct dc_link *link,
8306 int link_index)
8307{
8308 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8309
8310 /*
8311 * Some of the properties below require access to state, like bpc.
8312 * Allocate some default initial connector state with our reset helper.
8313 */
8314 if (aconnector->base.funcs->reset)
8315 aconnector->base.funcs->reset(&aconnector->base);
8316
8317 aconnector->connector_id = link_index;
8318 aconnector->dc_link = link;
8319 aconnector->base.interlace_allowed = false;
8320 aconnector->base.doublescan_allowed = false;
8321 aconnector->base.stereo_allowed = false;
8322 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8323 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8324 aconnector->audio_inst = -1;
8325 mutex_init(&aconnector->hpd_lock);
8326
8327 /*
8328 * configure support HPD hot plug connector_>polled default value is 0
8329 * which means HPD hot plug not supported
8330 */
8331 switch (connector_type) {
8332 case DRM_MODE_CONNECTOR_HDMIA:
8333 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8334 aconnector->base.ycbcr_420_allowed =
8335 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8336 break;
8337 case DRM_MODE_CONNECTOR_DisplayPort:
8338 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8339 link->link_enc = dp_get_link_enc(link);
8340 ASSERT(link->link_enc);
8341 if (link->link_enc)
8342 aconnector->base.ycbcr_420_allowed =
8343 link->link_enc->features.dp_ycbcr420_supported ? true : false;
8344 break;
8345 case DRM_MODE_CONNECTOR_DVID:
8346 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8347 break;
8348 default:
8349 break;
8350 }
8351
8352 drm_object_attach_property(&aconnector->base.base,
8353 dm->ddev->mode_config.scaling_mode_property,
8354 DRM_MODE_SCALE_NONE);
8355
8356 drm_object_attach_property(&aconnector->base.base,
8357 adev->mode_info.underscan_property,
8358 UNDERSCAN_OFF);
8359 drm_object_attach_property(&aconnector->base.base,
8360 adev->mode_info.underscan_hborder_property,
8361 0);
8362 drm_object_attach_property(&aconnector->base.base,
8363 adev->mode_info.underscan_vborder_property,
8364 0);
8365
8366 if (!aconnector->mst_port)
8367 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8368
8369 /* This defaults to the max in the range, but we want 8bpc for non-edp. */
8370 aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8371 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8372
8373 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8374 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8375 drm_object_attach_property(&aconnector->base.base,
8376 adev->mode_info.abm_level_property, 0);
8377 }
8378
8379 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8380 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8381 connector_type == DRM_MODE_CONNECTOR_eDP) {
8382 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8383
8384 if (!aconnector->mst_port)
8385 drm_connector_attach_vrr_capable_property(&aconnector->base);
8386
8387#ifdef CONFIG_DRM_AMD_DC_HDCP
8388 if (adev->dm.hdcp_workqueue)
8389 drm_connector_attach_content_protection_property(&aconnector->base, true);
8390#endif
8391 }
8392}
8393
8394static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8395 struct i2c_msg *msgs, int num)
8396{
8397 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8398 struct ddc_service *ddc_service = i2c->ddc_service;
8399 struct i2c_command cmd;
8400 int i;
8401 int result = -EIO;
8402
8403 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8404
8405 if (!cmd.payloads)
8406 return result;
8407
8408 cmd.number_of_payloads = num;
8409 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8410 cmd.speed = 100;
8411
8412 for (i = 0; i < num; i++) {
8413 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8414 cmd.payloads[i].address = msgs[i].addr;
8415 cmd.payloads[i].length = msgs[i].len;
8416 cmd.payloads[i].data = msgs[i].buf;
8417 }
8418
8419 if (dc_submit_i2c(
8420 ddc_service->ctx->dc,
8421 ddc_service->ddc_pin->hw_info.ddc_channel,
8422 &cmd))
8423 result = num;
8424
8425 kfree(cmd.payloads);
8426 return result;
8427}
8428
8429static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8430{
8431 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8432}
8433
8434static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8435 .master_xfer = amdgpu_dm_i2c_xfer,
8436 .functionality = amdgpu_dm_i2c_func,
8437};
8438
8439static struct amdgpu_i2c_adapter *
8440create_i2c(struct ddc_service *ddc_service,
8441 int link_index,
8442 int *res)
8443{
8444 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8445 struct amdgpu_i2c_adapter *i2c;
8446
8447 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8448 if (!i2c)
8449 return NULL;
8450 i2c->base.owner = THIS_MODULE;
8451 i2c->base.class = I2C_CLASS_DDC;
8452 i2c->base.dev.parent = &adev->pdev->dev;
8453 i2c->base.algo = &amdgpu_dm_i2c_algo;
8454 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8455 i2c_set_adapdata(&i2c->base, i2c);
8456 i2c->ddc_service = ddc_service;
8457 if (i2c->ddc_service->ddc_pin)
8458 i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8459
8460 return i2c;
8461}
8462
8463
8464/*
8465 * Note: this function assumes that dc_link_detect() was called for the
8466 * dc_link which will be represented by this aconnector.
8467 */
8468static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8469 struct amdgpu_dm_connector *aconnector,
8470 uint32_t link_index,
8471 struct amdgpu_encoder *aencoder)
8472{
8473 int res = 0;
8474 int connector_type;
8475 struct dc *dc = dm->dc;
8476 struct dc_link *link = dc_get_link_at_index(dc, link_index);
8477 struct amdgpu_i2c_adapter *i2c;
8478
8479 link->priv = aconnector;
8480
8481 DRM_DEBUG_DRIVER("%s()\n", __func__);
8482
8483 i2c = create_i2c(link->ddc, link->link_index, &res);
8484 if (!i2c) {
8485 DRM_ERROR("Failed to create i2c adapter data\n");
8486 return -ENOMEM;
8487 }
8488
8489 aconnector->i2c = i2c;
8490 res = i2c_add_adapter(&i2c->base);
8491
8492 if (res) {
8493 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8494 goto out_free;
8495 }
8496
8497 connector_type = to_drm_connector_type(link->connector_signal);
8498
8499 res = drm_connector_init_with_ddc(
8500 dm->ddev,
8501 &aconnector->base,
8502 &amdgpu_dm_connector_funcs,
8503 connector_type,
8504 &i2c->base);
8505
8506 if (res) {
8507 DRM_ERROR("connector_init failed\n");
8508 aconnector->connector_id = -1;
8509 goto out_free;
8510 }
8511
8512 drm_connector_helper_add(
8513 &aconnector->base,
8514 &amdgpu_dm_connector_helper_funcs);
8515
8516 amdgpu_dm_connector_init_helper(
8517 dm,
8518 aconnector,
8519 connector_type,
8520 link,
8521 link_index);
8522
8523 drm_connector_attach_encoder(
8524 &aconnector->base, &aencoder->base);
8525
8526 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8527 || connector_type == DRM_MODE_CONNECTOR_eDP)
8528 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8529
8530out_free:
8531 if (res) {
8532 kfree(i2c);
8533 aconnector->i2c = NULL;
8534 }
8535 return res;
8536}
8537
8538int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8539{
8540 switch (adev->mode_info.num_crtc) {
8541 case 1:
8542 return 0x1;
8543 case 2:
8544 return 0x3;
8545 case 3:
8546 return 0x7;
8547 case 4:
8548 return 0xf;
8549 case 5:
8550 return 0x1f;
8551 case 6:
8552 default:
8553 return 0x3f;
8554 }
8555}
8556
8557static int amdgpu_dm_encoder_init(struct drm_device *dev,
8558 struct amdgpu_encoder *aencoder,
8559 uint32_t link_index)
8560{
8561 struct amdgpu_device *adev = drm_to_adev(dev);
8562
8563 int res = drm_encoder_init(dev,
8564 &aencoder->base,
8565 &amdgpu_dm_encoder_funcs,
8566 DRM_MODE_ENCODER_TMDS,
8567 NULL);
8568
8569 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8570
8571 if (!res)
8572 aencoder->encoder_id = link_index;
8573 else
8574 aencoder->encoder_id = -1;
8575
8576 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8577
8578 return res;
8579}
8580
8581static void manage_dm_interrupts(struct amdgpu_device *adev,
8582 struct amdgpu_crtc *acrtc,
8583 bool enable)
8584{
8585 /*
8586 * We have no guarantee that the frontend index maps to the same
8587 * backend index - some even map to more than one.
8588 *
8589 * TODO: Use a different interrupt or check DC itself for the mapping.
8590 */
8591 int irq_type =
8592 amdgpu_display_crtc_idx_to_irq_type(
8593 adev,
8594 acrtc->crtc_id);
8595
8596 if (enable) {
8597 drm_crtc_vblank_on(&acrtc->base);
8598 amdgpu_irq_get(
8599 adev,
8600 &adev->pageflip_irq,
8601 irq_type);
8602#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8603 amdgpu_irq_get(
8604 adev,
8605 &adev->vline0_irq,
8606 irq_type);
8607#endif
8608 } else {
8609#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8610 amdgpu_irq_put(
8611 adev,
8612 &adev->vline0_irq,
8613 irq_type);
8614#endif
8615 amdgpu_irq_put(
8616 adev,
8617 &adev->pageflip_irq,
8618 irq_type);
8619 drm_crtc_vblank_off(&acrtc->base);
8620 }
8621}
8622
8623static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8624 struct amdgpu_crtc *acrtc)
8625{
8626 int irq_type =
8627 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8628
8629 /**
8630 * This reads the current state for the IRQ and force reapplies
8631 * the setting to hardware.
8632 */
8633 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8634}
8635
8636static bool
8637is_scaling_state_different(const struct dm_connector_state *dm_state,
8638 const struct dm_connector_state *old_dm_state)
8639{
8640 if (dm_state->scaling != old_dm_state->scaling)
8641 return true;
8642 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8643 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8644 return true;
8645 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8646 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8647 return true;
8648 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8649 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8650 return true;
8651 return false;
8652}
8653
8654#ifdef CONFIG_DRM_AMD_DC_HDCP
8655static bool is_content_protection_different(struct drm_connector_state *state,
8656 const struct drm_connector_state *old_state,
8657 const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8658{
8659 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8660 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8661
8662 /* Handle: Type0/1 change */
8663 if (old_state->hdcp_content_type != state->hdcp_content_type &&
8664 state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8665 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8666 return true;
8667 }
8668
8669 /* CP is being re enabled, ignore this
8670 *
8671 * Handles: ENABLED -> DESIRED
8672 */
8673 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8674 state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8675 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8676 return false;
8677 }
8678
8679 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8680 *
8681 * Handles: UNDESIRED -> ENABLED
8682 */
8683 if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8684 state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8685 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8686
8687 /* Stream removed and re-enabled
8688 *
8689 * Can sometimes overlap with the HPD case,
8690 * thus set update_hdcp to false to avoid
8691 * setting HDCP multiple times.
8692 *
8693 * Handles: DESIRED -> DESIRED (Special case)
8694 */
8695 if (!(old_state->crtc && old_state->crtc->enabled) &&
8696 state->crtc && state->crtc->enabled &&
8697 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8698 dm_con_state->update_hdcp = false;
8699 return true;
8700 }
8701
8702 /* Hot-plug, headless s3, dpms
8703 *
8704 * Only start HDCP if the display is connected/enabled.
8705 * update_hdcp flag will be set to false until the next
8706 * HPD comes in.
8707 *
8708 * Handles: DESIRED -> DESIRED (Special case)
8709 */
8710 if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8711 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8712 dm_con_state->update_hdcp = false;
8713 return true;
8714 }
8715
8716 /*
8717 * Handles: UNDESIRED -> UNDESIRED
8718 * DESIRED -> DESIRED
8719 * ENABLED -> ENABLED
8720 */
8721 if (old_state->content_protection == state->content_protection)
8722 return false;
8723
8724 /*
8725 * Handles: UNDESIRED -> DESIRED
8726 * DESIRED -> UNDESIRED
8727 * ENABLED -> UNDESIRED
8728 */
8729 if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8730 return true;
8731
8732 /*
8733 * Handles: DESIRED -> ENABLED
8734 */
8735 return false;
8736}
8737
8738#endif
8739static void remove_stream(struct amdgpu_device *adev,
8740 struct amdgpu_crtc *acrtc,
8741 struct dc_stream_state *stream)
8742{
8743 /* this is the update mode case */
8744
8745 acrtc->otg_inst = -1;
8746 acrtc->enabled = false;
8747}
8748
8749static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8750 struct dc_cursor_position *position)
8751{
8752 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8753 int x, y;
8754 int xorigin = 0, yorigin = 0;
8755
8756 if (!crtc || !plane->state->fb)
8757 return 0;
8758
8759 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8760 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8761 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8762 __func__,
8763 plane->state->crtc_w,
8764 plane->state->crtc_h);
8765 return -EINVAL;
8766 }
8767
8768 x = plane->state->crtc_x;
8769 y = plane->state->crtc_y;
8770
8771 if (x <= -amdgpu_crtc->max_cursor_width ||
8772 y <= -amdgpu_crtc->max_cursor_height)
8773 return 0;
8774
8775 if (x < 0) {
8776 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8777 x = 0;
8778 }
8779 if (y < 0) {
8780 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8781 y = 0;
8782 }
8783 position->enable = true;
8784 position->translate_by_source = true;
8785 position->x = x;
8786 position->y = y;
8787 position->x_hotspot = xorigin;
8788 position->y_hotspot = yorigin;
8789
8790 return 0;
8791}
8792
8793static void handle_cursor_update(struct drm_plane *plane,
8794 struct drm_plane_state *old_plane_state)
8795{
8796 struct amdgpu_device *adev = drm_to_adev(plane->dev);
8797 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8798 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8799 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8800 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8801 uint64_t address = afb ? afb->address : 0;
8802 struct dc_cursor_position position = {0};
8803 struct dc_cursor_attributes attributes;
8804 int ret;
8805
8806 if (!plane->state->fb && !old_plane_state->fb)
8807 return;
8808
8809 DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8810 __func__,
8811 amdgpu_crtc->crtc_id,
8812 plane->state->crtc_w,
8813 plane->state->crtc_h);
8814
8815 ret = get_cursor_position(plane, crtc, &position);
8816 if (ret)
8817 return;
8818
8819 if (!position.enable) {
8820 /* turn off cursor */
8821 if (crtc_state && crtc_state->stream) {
8822 mutex_lock(&adev->dm.dc_lock);
8823 dc_stream_set_cursor_position(crtc_state->stream,
8824 &position);
8825 mutex_unlock(&adev->dm.dc_lock);
8826 }
8827 return;
8828 }
8829
8830 amdgpu_crtc->cursor_width = plane->state->crtc_w;
8831 amdgpu_crtc->cursor_height = plane->state->crtc_h;
8832
8833 memset(&attributes, 0, sizeof(attributes));
8834 attributes.address.high_part = upper_32_bits(address);
8835 attributes.address.low_part = lower_32_bits(address);
8836 attributes.width = plane->state->crtc_w;
8837 attributes.height = plane->state->crtc_h;
8838 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8839 attributes.rotation_angle = 0;
8840 attributes.attribute_flags.value = 0;
8841
8842 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8843
8844 if (crtc_state->stream) {
8845 mutex_lock(&adev->dm.dc_lock);
8846 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8847 &attributes))
8848 DRM_ERROR("DC failed to set cursor attributes\n");
8849
8850 if (!dc_stream_set_cursor_position(crtc_state->stream,
8851 &position))
8852 DRM_ERROR("DC failed to set cursor position\n");
8853 mutex_unlock(&adev->dm.dc_lock);
8854 }
8855}
8856
8857static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8858{
8859
8860 assert_spin_locked(&acrtc->base.dev->event_lock);
8861 WARN_ON(acrtc->event);
8862
8863 acrtc->event = acrtc->base.state->event;
8864
8865 /* Set the flip status */
8866 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8867
8868 /* Mark this event as consumed */
8869 acrtc->base.state->event = NULL;
8870
8871 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8872 acrtc->crtc_id);
8873}
8874
8875static void update_freesync_state_on_stream(
8876 struct amdgpu_display_manager *dm,
8877 struct dm_crtc_state *new_crtc_state,
8878 struct dc_stream_state *new_stream,
8879 struct dc_plane_state *surface,
8880 u32 flip_timestamp_in_us)
8881{
8882 struct mod_vrr_params vrr_params;
8883 struct dc_info_packet vrr_infopacket = {0};
8884 struct amdgpu_device *adev = dm->adev;
8885 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8886 unsigned long flags;
8887 bool pack_sdp_v1_3 = false;
8888
8889 if (!new_stream)
8890 return;
8891
8892 /*
8893 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8894 * For now it's sufficient to just guard against these conditions.
8895 */
8896
8897 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8898 return;
8899
8900 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8901 vrr_params = acrtc->dm_irq_params.vrr_params;
8902
8903 if (surface) {
8904 mod_freesync_handle_preflip(
8905 dm->freesync_module,
8906 surface,
8907 new_stream,
8908 flip_timestamp_in_us,
8909 &vrr_params);
8910
8911 if (adev->family < AMDGPU_FAMILY_AI &&
8912 amdgpu_dm_vrr_active(new_crtc_state)) {
8913 mod_freesync_handle_v_update(dm->freesync_module,
8914 new_stream, &vrr_params);
8915
8916 /* Need to call this before the frame ends. */
8917 dc_stream_adjust_vmin_vmax(dm->dc,
8918 new_crtc_state->stream,
8919 &vrr_params.adjust);
8920 }
8921 }
8922
8923 mod_freesync_build_vrr_infopacket(
8924 dm->freesync_module,
8925 new_stream,
8926 &vrr_params,
8927 PACKET_TYPE_VRR,
8928 TRANSFER_FUNC_UNKNOWN,
8929 &vrr_infopacket,
8930 pack_sdp_v1_3);
8931
8932 new_crtc_state->freesync_timing_changed |=
8933 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8934 &vrr_params.adjust,
8935 sizeof(vrr_params.adjust)) != 0);
8936
8937 new_crtc_state->freesync_vrr_info_changed |=
8938 (memcmp(&new_crtc_state->vrr_infopacket,
8939 &vrr_infopacket,
8940 sizeof(vrr_infopacket)) != 0);
8941
8942 acrtc->dm_irq_params.vrr_params = vrr_params;
8943 new_crtc_state->vrr_infopacket = vrr_infopacket;
8944
8945 new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8946 new_stream->vrr_infopacket = vrr_infopacket;
8947
8948 if (new_crtc_state->freesync_vrr_info_changed)
8949 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8950 new_crtc_state->base.crtc->base.id,
8951 (int)new_crtc_state->base.vrr_enabled,
8952 (int)vrr_params.state);
8953
8954 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8955}
8956
8957static void update_stream_irq_parameters(
8958 struct amdgpu_display_manager *dm,
8959 struct dm_crtc_state *new_crtc_state)
8960{
8961 struct dc_stream_state *new_stream = new_crtc_state->stream;
8962 struct mod_vrr_params vrr_params;
8963 struct mod_freesync_config config = new_crtc_state->freesync_config;
8964 struct amdgpu_device *adev = dm->adev;
8965 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8966 unsigned long flags;
8967
8968 if (!new_stream)
8969 return;
8970
8971 /*
8972 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8973 * For now it's sufficient to just guard against these conditions.
8974 */
8975 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8976 return;
8977
8978 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8979 vrr_params = acrtc->dm_irq_params.vrr_params;
8980
8981 if (new_crtc_state->vrr_supported &&
8982 config.min_refresh_in_uhz &&
8983 config.max_refresh_in_uhz) {
8984 /*
8985 * if freesync compatible mode was set, config.state will be set
8986 * in atomic check
8987 */
8988 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8989 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8990 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8991 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8992 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8993 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8994 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8995 } else {
8996 config.state = new_crtc_state->base.vrr_enabled ?
8997 VRR_STATE_ACTIVE_VARIABLE :
8998 VRR_STATE_INACTIVE;
8999 }
9000 } else {
9001 config.state = VRR_STATE_UNSUPPORTED;
9002 }
9003
9004 mod_freesync_build_vrr_params(dm->freesync_module,
9005 new_stream,
9006 &config, &vrr_params);
9007
9008 new_crtc_state->freesync_timing_changed |=
9009 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
9010 &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
9011
9012 new_crtc_state->freesync_config = config;
9013 /* Copy state for access from DM IRQ handler */
9014 acrtc->dm_irq_params.freesync_config = config;
9015 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
9016 acrtc->dm_irq_params.vrr_params = vrr_params;
9017 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9018}
9019
9020static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
9021 struct dm_crtc_state *new_state)
9022{
9023 bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
9024 bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
9025
9026 if (!old_vrr_active && new_vrr_active) {
9027 /* Transition VRR inactive -> active:
9028 * While VRR is active, we must not disable vblank irq, as a
9029 * reenable after disable would compute bogus vblank/pflip
9030 * timestamps if it likely happened inside display front-porch.
9031 *
9032 * We also need vupdate irq for the actual core vblank handling
9033 * at end of vblank.
9034 */
9035 dm_set_vupdate_irq(new_state->base.crtc, true);
9036 drm_crtc_vblank_get(new_state->base.crtc);
9037 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
9038 __func__, new_state->base.crtc->base.id);
9039 } else if (old_vrr_active && !new_vrr_active) {
9040 /* Transition VRR active -> inactive:
9041 * Allow vblank irq disable again for fixed refresh rate.
9042 */
9043 dm_set_vupdate_irq(new_state->base.crtc, false);
9044 drm_crtc_vblank_put(new_state->base.crtc);
9045 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
9046 __func__, new_state->base.crtc->base.id);
9047 }
9048}
9049
9050static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
9051{
9052 struct drm_plane *plane;
9053 struct drm_plane_state *old_plane_state;
9054 int i;
9055
9056 /*
9057 * TODO: Make this per-stream so we don't issue redundant updates for
9058 * commits with multiple streams.
9059 */
9060 for_each_old_plane_in_state(state, plane, old_plane_state, i)
9061 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9062 handle_cursor_update(plane, old_plane_state);
9063}
9064
9065static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
9066 struct dc_state *dc_state,
9067 struct drm_device *dev,
9068 struct amdgpu_display_manager *dm,
9069 struct drm_crtc *pcrtc,
9070 bool wait_for_vblank)
9071{
9072 uint32_t i;
9073 uint64_t timestamp_ns;
9074 struct drm_plane *plane;
9075 struct drm_plane_state *old_plane_state, *new_plane_state;
9076 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
9077 struct drm_crtc_state *new_pcrtc_state =
9078 drm_atomic_get_new_crtc_state(state, pcrtc);
9079 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
9080 struct dm_crtc_state *dm_old_crtc_state =
9081 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
9082 int planes_count = 0, vpos, hpos;
9083 long r;
9084 unsigned long flags;
9085 struct amdgpu_bo *abo;
9086 uint32_t target_vblank, last_flip_vblank;
9087 bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
9088 bool pflip_present = false;
9089 struct {
9090 struct dc_surface_update surface_updates[MAX_SURFACES];
9091 struct dc_plane_info plane_infos[MAX_SURFACES];
9092 struct dc_scaling_info scaling_infos[MAX_SURFACES];
9093 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
9094 struct dc_stream_update stream_update;
9095 } *bundle;
9096
9097 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
9098
9099 if (!bundle) {
9100 dm_error("Failed to allocate update bundle\n");
9101 goto cleanup;
9102 }
9103
9104 /*
9105 * Disable the cursor first if we're disabling all the planes.
9106 * It'll remain on the screen after the planes are re-enabled
9107 * if we don't.
9108 */
9109 if (acrtc_state->active_planes == 0)
9110 amdgpu_dm_commit_cursors(state);
9111
9112 /* update planes when needed */
9113 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9114 struct drm_crtc *crtc = new_plane_state->crtc;
9115 struct drm_crtc_state *new_crtc_state;
9116 struct drm_framebuffer *fb = new_plane_state->fb;
9117 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
9118 bool plane_needs_flip;
9119 struct dc_plane_state *dc_plane;
9120 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
9121
9122 /* Cursor plane is handled after stream updates */
9123 if (plane->type == DRM_PLANE_TYPE_CURSOR)
9124 continue;
9125
9126 if (!fb || !crtc || pcrtc != crtc)
9127 continue;
9128
9129 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
9130 if (!new_crtc_state->active)
9131 continue;
9132
9133 dc_plane = dm_new_plane_state->dc_state;
9134
9135 bundle->surface_updates[planes_count].surface = dc_plane;
9136 if (new_pcrtc_state->color_mgmt_changed) {
9137 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
9138 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
9139 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
9140 }
9141
9142 fill_dc_scaling_info(dm->adev, new_plane_state,
9143 &bundle->scaling_infos[planes_count]);
9144
9145 bundle->surface_updates[planes_count].scaling_info =
9146 &bundle->scaling_infos[planes_count];
9147
9148 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
9149
9150 pflip_present = pflip_present || plane_needs_flip;
9151
9152 if (!plane_needs_flip) {
9153 planes_count += 1;
9154 continue;
9155 }
9156
9157 abo = gem_to_amdgpu_bo(fb->obj[0]);
9158
9159 /*
9160 * Wait for all fences on this FB. Do limited wait to avoid
9161 * deadlock during GPU reset when this fence will not signal
9162 * but we hold reservation lock for the BO.
9163 */
9164 r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
9165 msecs_to_jiffies(5000));
9166 if (unlikely(r <= 0))
9167 DRM_ERROR("Waiting for fences timed out!");
9168
9169 fill_dc_plane_info_and_addr(
9170 dm->adev, new_plane_state,
9171 afb->tiling_flags,
9172 &bundle->plane_infos[planes_count],
9173 &bundle->flip_addrs[planes_count].address,
9174 afb->tmz_surface, false);
9175
9176 DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
9177 new_plane_state->plane->index,
9178 bundle->plane_infos[planes_count].dcc.enable);
9179
9180 bundle->surface_updates[planes_count].plane_info =
9181 &bundle->plane_infos[planes_count];
9182
9183 /*
9184 * Only allow immediate flips for fast updates that don't
9185 * change FB pitch, DCC state, rotation or mirroing.
9186 */
9187 bundle->flip_addrs[planes_count].flip_immediate =
9188 crtc->state->async_flip &&
9189 acrtc_state->update_type == UPDATE_TYPE_FAST;
9190
9191 timestamp_ns = ktime_get_ns();
9192 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9193 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9194 bundle->surface_updates[planes_count].surface = dc_plane;
9195
9196 if (!bundle->surface_updates[planes_count].surface) {
9197 DRM_ERROR("No surface for CRTC: id=%d\n",
9198 acrtc_attach->crtc_id);
9199 continue;
9200 }
9201
9202 if (plane == pcrtc->primary)
9203 update_freesync_state_on_stream(
9204 dm,
9205 acrtc_state,
9206 acrtc_state->stream,
9207 dc_plane,
9208 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9209
9210 DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
9211 __func__,
9212 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9213 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9214
9215 planes_count += 1;
9216
9217 }
9218
9219 if (pflip_present) {
9220 if (!vrr_active) {
9221 /* Use old throttling in non-vrr fixed refresh rate mode
9222 * to keep flip scheduling based on target vblank counts
9223 * working in a backwards compatible way, e.g., for
9224 * clients using the GLX_OML_sync_control extension or
9225 * DRI3/Present extension with defined target_msc.
9226 */
9227 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9228 }
9229 else {
9230 /* For variable refresh rate mode only:
9231 * Get vblank of last completed flip to avoid > 1 vrr
9232 * flips per video frame by use of throttling, but allow
9233 * flip programming anywhere in the possibly large
9234 * variable vrr vblank interval for fine-grained flip
9235 * timing control and more opportunity to avoid stutter
9236 * on late submission of flips.
9237 */
9238 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9239 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9240 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9241 }
9242
9243 target_vblank = last_flip_vblank + wait_for_vblank;
9244
9245 /*
9246 * Wait until we're out of the vertical blank period before the one
9247 * targeted by the flip
9248 */
9249 while ((acrtc_attach->enabled &&
9250 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9251 0, &vpos, &hpos, NULL,
9252 NULL, &pcrtc->hwmode)
9253 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9254 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9255 (int)(target_vblank -
9256 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9257 usleep_range(1000, 1100);
9258 }
9259
9260 /**
9261 * Prepare the flip event for the pageflip interrupt to handle.
9262 *
9263 * This only works in the case where we've already turned on the
9264 * appropriate hardware blocks (eg. HUBP) so in the transition case
9265 * from 0 -> n planes we have to skip a hardware generated event
9266 * and rely on sending it from software.
9267 */
9268 if (acrtc_attach->base.state->event &&
9269 acrtc_state->active_planes > 0 &&
9270 !acrtc_state->force_dpms_off) {
9271 drm_crtc_vblank_get(pcrtc);
9272
9273 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9274
9275 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9276 prepare_flip_isr(acrtc_attach);
9277
9278 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9279 }
9280
9281 if (acrtc_state->stream) {
9282 if (acrtc_state->freesync_vrr_info_changed)
9283 bundle->stream_update.vrr_infopacket =
9284 &acrtc_state->stream->vrr_infopacket;
9285 }
9286 }
9287
9288 /* Update the planes if changed or disable if we don't have any. */
9289 if ((planes_count || acrtc_state->active_planes == 0) &&
9290 acrtc_state->stream) {
9291#if defined(CONFIG_DRM_AMD_DC_DCN)
9292 /*
9293 * If PSR or idle optimizations are enabled then flush out
9294 * any pending work before hardware programming.
9295 */
9296 if (dm->vblank_control_workqueue)
9297 flush_workqueue(dm->vblank_control_workqueue);
9298#endif
9299
9300 bundle->stream_update.stream = acrtc_state->stream;
9301 if (new_pcrtc_state->mode_changed) {
9302 bundle->stream_update.src = acrtc_state->stream->src;
9303 bundle->stream_update.dst = acrtc_state->stream->dst;
9304 }
9305
9306 if (new_pcrtc_state->color_mgmt_changed) {
9307 /*
9308 * TODO: This isn't fully correct since we've actually
9309 * already modified the stream in place.
9310 */
9311 bundle->stream_update.gamut_remap =
9312 &acrtc_state->stream->gamut_remap_matrix;
9313 bundle->stream_update.output_csc_transform =
9314 &acrtc_state->stream->csc_color_matrix;
9315 bundle->stream_update.out_transfer_func =
9316 acrtc_state->stream->out_transfer_func;
9317 }
9318
9319 acrtc_state->stream->abm_level = acrtc_state->abm_level;
9320 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9321 bundle->stream_update.abm_level = &acrtc_state->abm_level;
9322
9323 /*
9324 * If FreeSync state on the stream has changed then we need to
9325 * re-adjust the min/max bounds now that DC doesn't handle this
9326 * as part of commit.
9327 */
9328 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9329 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9330 dc_stream_adjust_vmin_vmax(
9331 dm->dc, acrtc_state->stream,
9332 &acrtc_attach->dm_irq_params.vrr_params.adjust);
9333 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9334 }
9335 mutex_lock(&dm->dc_lock);
9336 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9337 acrtc_state->stream->link->psr_settings.psr_allow_active)
9338 amdgpu_dm_psr_disable(acrtc_state->stream);
9339
9340 dc_commit_updates_for_stream(dm->dc,
9341 bundle->surface_updates,
9342 planes_count,
9343 acrtc_state->stream,
9344 &bundle->stream_update,
9345 dc_state);
9346
9347 /**
9348 * Enable or disable the interrupts on the backend.
9349 *
9350 * Most pipes are put into power gating when unused.
9351 *
9352 * When power gating is enabled on a pipe we lose the
9353 * interrupt enablement state when power gating is disabled.
9354 *
9355 * So we need to update the IRQ control state in hardware
9356 * whenever the pipe turns on (since it could be previously
9357 * power gated) or off (since some pipes can't be power gated
9358 * on some ASICs).
9359 */
9360 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9361 dm_update_pflip_irq_state(drm_to_adev(dev),
9362 acrtc_attach);
9363
9364 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9365 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9366 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9367 amdgpu_dm_link_setup_psr(acrtc_state->stream);
9368
9369 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
9370 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9371 acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9372 struct amdgpu_dm_connector *aconn =
9373 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9374
9375 if (aconn->psr_skip_count > 0)
9376 aconn->psr_skip_count--;
9377
9378 /* Allow PSR when skip count is 0. */
9379 acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9380 } else {
9381 acrtc_attach->dm_irq_params.allow_psr_entry = false;
9382 }
9383
9384 mutex_unlock(&dm->dc_lock);
9385 }
9386
9387 /*
9388 * Update cursor state *after* programming all the planes.
9389 * This avoids redundant programming in the case where we're going
9390 * to be disabling a single plane - those pipes are being disabled.
9391 */
9392 if (acrtc_state->active_planes)
9393 amdgpu_dm_commit_cursors(state);
9394
9395cleanup:
9396 kfree(bundle);
9397}
9398
9399static void amdgpu_dm_commit_audio(struct drm_device *dev,
9400 struct drm_atomic_state *state)
9401{
9402 struct amdgpu_device *adev = drm_to_adev(dev);
9403 struct amdgpu_dm_connector *aconnector;
9404 struct drm_connector *connector;
9405 struct drm_connector_state *old_con_state, *new_con_state;
9406 struct drm_crtc_state *new_crtc_state;
9407 struct dm_crtc_state *new_dm_crtc_state;
9408 const struct dc_stream_status *status;
9409 int i, inst;
9410
9411 /* Notify device removals. */
9412 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9413 if (old_con_state->crtc != new_con_state->crtc) {
9414 /* CRTC changes require notification. */
9415 goto notify;
9416 }
9417
9418 if (!new_con_state->crtc)
9419 continue;
9420
9421 new_crtc_state = drm_atomic_get_new_crtc_state(
9422 state, new_con_state->crtc);
9423
9424 if (!new_crtc_state)
9425 continue;
9426
9427 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9428 continue;
9429
9430 notify:
9431 aconnector = to_amdgpu_dm_connector(connector);
9432
9433 mutex_lock(&adev->dm.audio_lock);
9434 inst = aconnector->audio_inst;
9435 aconnector->audio_inst = -1;
9436 mutex_unlock(&adev->dm.audio_lock);
9437
9438 amdgpu_dm_audio_eld_notify(adev, inst);
9439 }
9440
9441 /* Notify audio device additions. */
9442 for_each_new_connector_in_state(state, connector, new_con_state, i) {
9443 if (!new_con_state->crtc)
9444 continue;
9445
9446 new_crtc_state = drm_atomic_get_new_crtc_state(
9447 state, new_con_state->crtc);
9448
9449 if (!new_crtc_state)
9450 continue;
9451
9452 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9453 continue;
9454
9455 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9456 if (!new_dm_crtc_state->stream)
9457 continue;
9458
9459 status = dc_stream_get_status(new_dm_crtc_state->stream);
9460 if (!status)
9461 continue;
9462
9463 aconnector = to_amdgpu_dm_connector(connector);
9464
9465 mutex_lock(&adev->dm.audio_lock);
9466 inst = status->audio_inst;
9467 aconnector->audio_inst = inst;
9468 mutex_unlock(&adev->dm.audio_lock);
9469
9470 amdgpu_dm_audio_eld_notify(adev, inst);
9471 }
9472}
9473
9474/*
9475 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9476 * @crtc_state: the DRM CRTC state
9477 * @stream_state: the DC stream state.
9478 *
9479 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9480 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9481 */
9482static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9483 struct dc_stream_state *stream_state)
9484{
9485 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9486}
9487
9488/**
9489 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9490 * @state: The atomic state to commit
9491 *
9492 * This will tell DC to commit the constructed DC state from atomic_check,
9493 * programming the hardware. Any failures here implies a hardware failure, since
9494 * atomic check should have filtered anything non-kosher.
9495 */
9496static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9497{
9498 struct drm_device *dev = state->dev;
9499 struct amdgpu_device *adev = drm_to_adev(dev);
9500 struct amdgpu_display_manager *dm = &adev->dm;
9501 struct dm_atomic_state *dm_state;
9502 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9503 uint32_t i, j;
9504 struct drm_crtc *crtc;
9505 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9506 unsigned long flags;
9507 bool wait_for_vblank = true;
9508 struct drm_connector *connector;
9509 struct drm_connector_state *old_con_state, *new_con_state;
9510 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9511 int crtc_disable_count = 0;
9512 bool mode_set_reset_required = false;
9513
9514 trace_amdgpu_dm_atomic_commit_tail_begin(state);
9515
9516 drm_atomic_helper_update_legacy_modeset_state(dev, state);
9517
9518 dm_state = dm_atomic_get_new_state(state);
9519 if (dm_state && dm_state->context) {
9520 dc_state = dm_state->context;
9521 } else {
9522 /* No state changes, retain current state. */
9523 dc_state_temp = dc_create_state(dm->dc);
9524 ASSERT(dc_state_temp);
9525 dc_state = dc_state_temp;
9526 dc_resource_state_copy_construct_current(dm->dc, dc_state);
9527 }
9528
9529 for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9530 new_crtc_state, i) {
9531 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9532
9533 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9534
9535 if (old_crtc_state->active &&
9536 (!new_crtc_state->active ||
9537 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9538 manage_dm_interrupts(adev, acrtc, false);
9539 dc_stream_release(dm_old_crtc_state->stream);
9540 }
9541 }
9542
9543 drm_atomic_helper_calc_timestamping_constants(state);
9544
9545 /* update changed items */
9546 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9547 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9548
9549 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9550 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9551
9552 DRM_DEBUG_ATOMIC(
9553 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9554 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
9555 "connectors_changed:%d\n",
9556 acrtc->crtc_id,
9557 new_crtc_state->enable,
9558 new_crtc_state->active,
9559 new_crtc_state->planes_changed,
9560 new_crtc_state->mode_changed,
9561 new_crtc_state->active_changed,
9562 new_crtc_state->connectors_changed);
9563
9564 /* Disable cursor if disabling crtc */
9565 if (old_crtc_state->active && !new_crtc_state->active) {
9566 struct dc_cursor_position position;
9567
9568 memset(&position, 0, sizeof(position));
9569 mutex_lock(&dm->dc_lock);
9570 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9571 mutex_unlock(&dm->dc_lock);
9572 }
9573
9574 /* Copy all transient state flags into dc state */
9575 if (dm_new_crtc_state->stream) {
9576 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9577 dm_new_crtc_state->stream);
9578 }
9579
9580 /* handles headless hotplug case, updating new_state and
9581 * aconnector as needed
9582 */
9583
9584 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9585
9586 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9587
9588 if (!dm_new_crtc_state->stream) {
9589 /*
9590 * this could happen because of issues with
9591 * userspace notifications delivery.
9592 * In this case userspace tries to set mode on
9593 * display which is disconnected in fact.
9594 * dc_sink is NULL in this case on aconnector.
9595 * We expect reset mode will come soon.
9596 *
9597 * This can also happen when unplug is done
9598 * during resume sequence ended
9599 *
9600 * In this case, we want to pretend we still
9601 * have a sink to keep the pipe running so that
9602 * hw state is consistent with the sw state
9603 */
9604 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9605 __func__, acrtc->base.base.id);
9606 continue;
9607 }
9608
9609 if (dm_old_crtc_state->stream)
9610 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9611
9612 pm_runtime_get_noresume(dev->dev);
9613
9614 acrtc->enabled = true;
9615 acrtc->hw_mode = new_crtc_state->mode;
9616 crtc->hwmode = new_crtc_state->mode;
9617 mode_set_reset_required = true;
9618 } else if (modereset_required(new_crtc_state)) {
9619 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9620 /* i.e. reset mode */
9621 if (dm_old_crtc_state->stream)
9622 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9623
9624 mode_set_reset_required = true;
9625 }
9626 } /* for_each_crtc_in_state() */
9627
9628 if (dc_state) {
9629 /* if there mode set or reset, disable eDP PSR */
9630 if (mode_set_reset_required) {
9631#if defined(CONFIG_DRM_AMD_DC_DCN)
9632 if (dm->vblank_control_workqueue)
9633 flush_workqueue(dm->vblank_control_workqueue);
9634#endif
9635 amdgpu_dm_psr_disable_all(dm);
9636 }
9637
9638 dm_enable_per_frame_crtc_master_sync(dc_state);
9639 mutex_lock(&dm->dc_lock);
9640 WARN_ON(!dc_commit_state(dm->dc, dc_state));
9641#if defined(CONFIG_DRM_AMD_DC_DCN)
9642 /* Allow idle optimization when vblank count is 0 for display off */
9643 if (dm->active_vblank_irq_count == 0)
9644 dc_allow_idle_optimizations(dm->dc,true);
9645#endif
9646 mutex_unlock(&dm->dc_lock);
9647 }
9648
9649 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9650 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9651
9652 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9653
9654 if (dm_new_crtc_state->stream != NULL) {
9655 const struct dc_stream_status *status =
9656 dc_stream_get_status(dm_new_crtc_state->stream);
9657
9658 if (!status)
9659 status = dc_stream_get_status_from_state(dc_state,
9660 dm_new_crtc_state->stream);
9661 if (!status)
9662 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9663 else
9664 acrtc->otg_inst = status->primary_otg_inst;
9665 }
9666 }
9667#ifdef CONFIG_DRM_AMD_DC_HDCP
9668 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9669 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9670 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9671 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9672
9673 new_crtc_state = NULL;
9674
9675 if (acrtc)
9676 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9677
9678 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9679
9680 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9681 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9682 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9683 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9684 dm_new_con_state->update_hdcp = true;
9685 continue;
9686 }
9687
9688 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9689 hdcp_update_display(
9690 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9691 new_con_state->hdcp_content_type,
9692 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9693 }
9694#endif
9695
9696 /* Handle connector state changes */
9697 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9698 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9699 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9700 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9701 struct dc_surface_update dummy_updates[MAX_SURFACES];
9702 struct dc_stream_update stream_update;
9703 struct dc_info_packet hdr_packet;
9704 struct dc_stream_status *status = NULL;
9705 bool abm_changed, hdr_changed, scaling_changed;
9706
9707 memset(&dummy_updates, 0, sizeof(dummy_updates));
9708 memset(&stream_update, 0, sizeof(stream_update));
9709
9710 if (acrtc) {
9711 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9712 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9713 }
9714
9715 /* Skip any modesets/resets */
9716 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9717 continue;
9718
9719 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9720 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9721
9722 scaling_changed = is_scaling_state_different(dm_new_con_state,
9723 dm_old_con_state);
9724
9725 abm_changed = dm_new_crtc_state->abm_level !=
9726 dm_old_crtc_state->abm_level;
9727
9728 hdr_changed =
9729 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9730
9731 if (!scaling_changed && !abm_changed && !hdr_changed)
9732 continue;
9733
9734 stream_update.stream = dm_new_crtc_state->stream;
9735 if (scaling_changed) {
9736 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9737 dm_new_con_state, dm_new_crtc_state->stream);
9738
9739 stream_update.src = dm_new_crtc_state->stream->src;
9740 stream_update.dst = dm_new_crtc_state->stream->dst;
9741 }
9742
9743 if (abm_changed) {
9744 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9745
9746 stream_update.abm_level = &dm_new_crtc_state->abm_level;
9747 }
9748
9749 if (hdr_changed) {
9750 fill_hdr_info_packet(new_con_state, &hdr_packet);
9751 stream_update.hdr_static_metadata = &hdr_packet;
9752 }
9753
9754 status = dc_stream_get_status(dm_new_crtc_state->stream);
9755
9756 if (WARN_ON(!status))
9757 continue;
9758
9759 WARN_ON(!status->plane_count);
9760
9761 /*
9762 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9763 * Here we create an empty update on each plane.
9764 * To fix this, DC should permit updating only stream properties.
9765 */
9766 for (j = 0; j < status->plane_count; j++)
9767 dummy_updates[j].surface = status->plane_states[0];
9768
9769
9770 mutex_lock(&dm->dc_lock);
9771 dc_commit_updates_for_stream(dm->dc,
9772 dummy_updates,
9773 status->plane_count,
9774 dm_new_crtc_state->stream,
9775 &stream_update,
9776 dc_state);
9777 mutex_unlock(&dm->dc_lock);
9778 }
9779
9780 /* Count number of newly disabled CRTCs for dropping PM refs later. */
9781 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9782 new_crtc_state, i) {
9783 if (old_crtc_state->active && !new_crtc_state->active)
9784 crtc_disable_count++;
9785
9786 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9787 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9788
9789 /* For freesync config update on crtc state and params for irq */
9790 update_stream_irq_parameters(dm, dm_new_crtc_state);
9791
9792 /* Handle vrr on->off / off->on transitions */
9793 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9794 dm_new_crtc_state);
9795 }
9796
9797 /**
9798 * Enable interrupts for CRTCs that are newly enabled or went through
9799 * a modeset. It was intentionally deferred until after the front end
9800 * state was modified to wait until the OTG was on and so the IRQ
9801 * handlers didn't access stale or invalid state.
9802 */
9803 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9804 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9805#ifdef CONFIG_DEBUG_FS
9806 bool configure_crc = false;
9807 enum amdgpu_dm_pipe_crc_source cur_crc_src;
9808#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9809 struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9810#endif
9811 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9812 cur_crc_src = acrtc->dm_irq_params.crc_src;
9813 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9814#endif
9815 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9816
9817 if (new_crtc_state->active &&
9818 (!old_crtc_state->active ||
9819 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9820 dc_stream_retain(dm_new_crtc_state->stream);
9821 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9822 manage_dm_interrupts(adev, acrtc, true);
9823
9824#ifdef CONFIG_DEBUG_FS
9825 /**
9826 * Frontend may have changed so reapply the CRC capture
9827 * settings for the stream.
9828 */
9829 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9830
9831 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9832 configure_crc = true;
9833#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9834 if (amdgpu_dm_crc_window_is_activated(crtc)) {
9835 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9836 acrtc->dm_irq_params.crc_window.update_win = true;
9837 acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9838 spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9839 crc_rd_wrk->crtc = crtc;
9840 spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9841 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9842 }
9843#endif
9844 }
9845
9846 if (configure_crc)
9847 if (amdgpu_dm_crtc_configure_crc_source(
9848 crtc, dm_new_crtc_state, cur_crc_src))
9849 DRM_DEBUG_DRIVER("Failed to configure crc source");
9850#endif
9851 }
9852 }
9853
9854 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9855 if (new_crtc_state->async_flip)
9856 wait_for_vblank = false;
9857
9858 /* update planes when needed per crtc*/
9859 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9860 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9861
9862 if (dm_new_crtc_state->stream)
9863 amdgpu_dm_commit_planes(state, dc_state, dev,
9864 dm, crtc, wait_for_vblank);
9865 }
9866
9867 /* Update audio instances for each connector. */
9868 amdgpu_dm_commit_audio(dev, state);
9869
9870#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
9871 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9872 /* restore the backlight level */
9873 for (i = 0; i < dm->num_of_edps; i++) {
9874 if (dm->backlight_dev[i] &&
9875 (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
9876 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9877 }
9878#endif
9879 /*
9880 * send vblank event on all events not handled in flip and
9881 * mark consumed event for drm_atomic_helper_commit_hw_done
9882 */
9883 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9884 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9885
9886 if (new_crtc_state->event)
9887 drm_send_event_locked(dev, &new_crtc_state->event->base);
9888
9889 new_crtc_state->event = NULL;
9890 }
9891 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9892
9893 /* Signal HW programming completion */
9894 drm_atomic_helper_commit_hw_done(state);
9895
9896 if (wait_for_vblank)
9897 drm_atomic_helper_wait_for_flip_done(dev, state);
9898
9899 drm_atomic_helper_cleanup_planes(dev, state);
9900
9901 /* return the stolen vga memory back to VRAM */
9902 if (!adev->mman.keep_stolen_vga_memory)
9903 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9904 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9905
9906 /*
9907 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9908 * so we can put the GPU into runtime suspend if we're not driving any
9909 * displays anymore
9910 */
9911 for (i = 0; i < crtc_disable_count; i++)
9912 pm_runtime_put_autosuspend(dev->dev);
9913 pm_runtime_mark_last_busy(dev->dev);
9914
9915 if (dc_state_temp)
9916 dc_release_state(dc_state_temp);
9917}
9918
9919
9920static int dm_force_atomic_commit(struct drm_connector *connector)
9921{
9922 int ret = 0;
9923 struct drm_device *ddev = connector->dev;
9924 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9925 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9926 struct drm_plane *plane = disconnected_acrtc->base.primary;
9927 struct drm_connector_state *conn_state;
9928 struct drm_crtc_state *crtc_state;
9929 struct drm_plane_state *plane_state;
9930
9931 if (!state)
9932 return -ENOMEM;
9933
9934 state->acquire_ctx = ddev->mode_config.acquire_ctx;
9935
9936 /* Construct an atomic state to restore previous display setting */
9937
9938 /*
9939 * Attach connectors to drm_atomic_state
9940 */
9941 conn_state = drm_atomic_get_connector_state(state, connector);
9942
9943 ret = PTR_ERR_OR_ZERO(conn_state);
9944 if (ret)
9945 goto out;
9946
9947 /* Attach crtc to drm_atomic_state*/
9948 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9949
9950 ret = PTR_ERR_OR_ZERO(crtc_state);
9951 if (ret)
9952 goto out;
9953
9954 /* force a restore */
9955 crtc_state->mode_changed = true;
9956
9957 /* Attach plane to drm_atomic_state */
9958 plane_state = drm_atomic_get_plane_state(state, plane);
9959
9960 ret = PTR_ERR_OR_ZERO(plane_state);
9961 if (ret)
9962 goto out;
9963
9964 /* Call commit internally with the state we just constructed */
9965 ret = drm_atomic_commit(state);
9966
9967out:
9968 drm_atomic_state_put(state);
9969 if (ret)
9970 DRM_ERROR("Restoring old state failed with %i\n", ret);
9971
9972 return ret;
9973}
9974
9975/*
9976 * This function handles all cases when set mode does not come upon hotplug.
9977 * This includes when a display is unplugged then plugged back into the
9978 * same port and when running without usermode desktop manager supprot
9979 */
9980void dm_restore_drm_connector_state(struct drm_device *dev,
9981 struct drm_connector *connector)
9982{
9983 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9984 struct amdgpu_crtc *disconnected_acrtc;
9985 struct dm_crtc_state *acrtc_state;
9986
9987 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9988 return;
9989
9990 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9991 if (!disconnected_acrtc)
9992 return;
9993
9994 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9995 if (!acrtc_state->stream)
9996 return;
9997
9998 /*
9999 * If the previous sink is not released and different from the current,
10000 * we deduce we are in a state where we can not rely on usermode call
10001 * to turn on the display, so we do it here
10002 */
10003 if (acrtc_state->stream->sink != aconnector->dc_sink)
10004 dm_force_atomic_commit(&aconnector->base);
10005}
10006
10007/*
10008 * Grabs all modesetting locks to serialize against any blocking commits,
10009 * Waits for completion of all non blocking commits.
10010 */
10011static int do_aquire_global_lock(struct drm_device *dev,
10012 struct drm_atomic_state *state)
10013{
10014 struct drm_crtc *crtc;
10015 struct drm_crtc_commit *commit;
10016 long ret;
10017
10018 /*
10019 * Adding all modeset locks to aquire_ctx will
10020 * ensure that when the framework release it the
10021 * extra locks we are locking here will get released to
10022 */
10023 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
10024 if (ret)
10025 return ret;
10026
10027 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10028 spin_lock(&crtc->commit_lock);
10029 commit = list_first_entry_or_null(&crtc->commit_list,
10030 struct drm_crtc_commit, commit_entry);
10031 if (commit)
10032 drm_crtc_commit_get(commit);
10033 spin_unlock(&crtc->commit_lock);
10034
10035 if (!commit)
10036 continue;
10037
10038 /*
10039 * Make sure all pending HW programming completed and
10040 * page flips done
10041 */
10042 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
10043
10044 if (ret > 0)
10045 ret = wait_for_completion_interruptible_timeout(
10046 &commit->flip_done, 10*HZ);
10047
10048 if (ret == 0)
10049 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
10050 "timed out\n", crtc->base.id, crtc->name);
10051
10052 drm_crtc_commit_put(commit);
10053 }
10054
10055 return ret < 0 ? ret : 0;
10056}
10057
10058static void get_freesync_config_for_crtc(
10059 struct dm_crtc_state *new_crtc_state,
10060 struct dm_connector_state *new_con_state)
10061{
10062 struct mod_freesync_config config = {0};
10063 struct amdgpu_dm_connector *aconnector =
10064 to_amdgpu_dm_connector(new_con_state->base.connector);
10065 struct drm_display_mode *mode = &new_crtc_state->base.mode;
10066 int vrefresh = drm_mode_vrefresh(mode);
10067 bool fs_vid_mode = false;
10068
10069 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
10070 vrefresh >= aconnector->min_vfreq &&
10071 vrefresh <= aconnector->max_vfreq;
10072
10073 if (new_crtc_state->vrr_supported) {
10074 new_crtc_state->stream->ignore_msa_timing_param = true;
10075 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
10076
10077 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
10078 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
10079 config.vsif_supported = true;
10080 config.btr = true;
10081
10082 if (fs_vid_mode) {
10083 config.state = VRR_STATE_ACTIVE_FIXED;
10084 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
10085 goto out;
10086 } else if (new_crtc_state->base.vrr_enabled) {
10087 config.state = VRR_STATE_ACTIVE_VARIABLE;
10088 } else {
10089 config.state = VRR_STATE_INACTIVE;
10090 }
10091 }
10092out:
10093 new_crtc_state->freesync_config = config;
10094}
10095
10096static void reset_freesync_config_for_crtc(
10097 struct dm_crtc_state *new_crtc_state)
10098{
10099 new_crtc_state->vrr_supported = false;
10100
10101 memset(&new_crtc_state->vrr_infopacket, 0,
10102 sizeof(new_crtc_state->vrr_infopacket));
10103}
10104
10105static bool
10106is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
10107 struct drm_crtc_state *new_crtc_state)
10108{
10109 struct drm_display_mode old_mode, new_mode;
10110
10111 if (!old_crtc_state || !new_crtc_state)
10112 return false;
10113
10114 old_mode = old_crtc_state->mode;
10115 new_mode = new_crtc_state->mode;
10116
10117 if (old_mode.clock == new_mode.clock &&
10118 old_mode.hdisplay == new_mode.hdisplay &&
10119 old_mode.vdisplay == new_mode.vdisplay &&
10120 old_mode.htotal == new_mode.htotal &&
10121 old_mode.vtotal != new_mode.vtotal &&
10122 old_mode.hsync_start == new_mode.hsync_start &&
10123 old_mode.vsync_start != new_mode.vsync_start &&
10124 old_mode.hsync_end == new_mode.hsync_end &&
10125 old_mode.vsync_end != new_mode.vsync_end &&
10126 old_mode.hskew == new_mode.hskew &&
10127 old_mode.vscan == new_mode.vscan &&
10128 (old_mode.vsync_end - old_mode.vsync_start) ==
10129 (new_mode.vsync_end - new_mode.vsync_start))
10130 return true;
10131
10132 return false;
10133}
10134
10135static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
10136 uint64_t num, den, res;
10137 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
10138
10139 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
10140
10141 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
10142 den = (unsigned long long)new_crtc_state->mode.htotal *
10143 (unsigned long long)new_crtc_state->mode.vtotal;
10144
10145 res = div_u64(num, den);
10146 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
10147}
10148
10149static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
10150 struct drm_atomic_state *state,
10151 struct drm_crtc *crtc,
10152 struct drm_crtc_state *old_crtc_state,
10153 struct drm_crtc_state *new_crtc_state,
10154 bool enable,
10155 bool *lock_and_validation_needed)
10156{
10157 struct dm_atomic_state *dm_state = NULL;
10158 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10159 struct dc_stream_state *new_stream;
10160 int ret = 0;
10161
10162 /*
10163 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10164 * update changed items
10165 */
10166 struct amdgpu_crtc *acrtc = NULL;
10167 struct amdgpu_dm_connector *aconnector = NULL;
10168 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10169 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10170
10171 new_stream = NULL;
10172
10173 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10174 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10175 acrtc = to_amdgpu_crtc(crtc);
10176 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10177
10178 /* TODO This hack should go away */
10179 if (aconnector && enable) {
10180 /* Make sure fake sink is created in plug-in scenario */
10181 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10182 &aconnector->base);
10183 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10184 &aconnector->base);
10185
10186 if (IS_ERR(drm_new_conn_state)) {
10187 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10188 goto fail;
10189 }
10190
10191 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10192 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10193
10194 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10195 goto skip_modeset;
10196
10197 new_stream = create_validate_stream_for_sink(aconnector,
10198 &new_crtc_state->mode,
10199 dm_new_conn_state,
10200 dm_old_crtc_state->stream);
10201
10202 /*
10203 * we can have no stream on ACTION_SET if a display
10204 * was disconnected during S3, in this case it is not an
10205 * error, the OS will be updated after detection, and
10206 * will do the right thing on next atomic commit
10207 */
10208
10209 if (!new_stream) {
10210 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10211 __func__, acrtc->base.base.id);
10212 ret = -ENOMEM;
10213 goto fail;
10214 }
10215
10216 /*
10217 * TODO: Check VSDB bits to decide whether this should
10218 * be enabled or not.
10219 */
10220 new_stream->triggered_crtc_reset.enabled =
10221 dm->force_timing_sync;
10222
10223 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10224
10225 ret = fill_hdr_info_packet(drm_new_conn_state,
10226 &new_stream->hdr_static_metadata);
10227 if (ret)
10228 goto fail;
10229
10230 /*
10231 * If we already removed the old stream from the context
10232 * (and set the new stream to NULL) then we can't reuse
10233 * the old stream even if the stream and scaling are unchanged.
10234 * We'll hit the BUG_ON and black screen.
10235 *
10236 * TODO: Refactor this function to allow this check to work
10237 * in all conditions.
10238 */
10239 if (amdgpu_freesync_vid_mode &&
10240 dm_new_crtc_state->stream &&
10241 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10242 goto skip_modeset;
10243
10244 if (dm_new_crtc_state->stream &&
10245 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10246 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10247 new_crtc_state->mode_changed = false;
10248 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10249 new_crtc_state->mode_changed);
10250 }
10251 }
10252
10253 /* mode_changed flag may get updated above, need to check again */
10254 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10255 goto skip_modeset;
10256
10257 DRM_DEBUG_ATOMIC(
10258 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10259 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
10260 "connectors_changed:%d\n",
10261 acrtc->crtc_id,
10262 new_crtc_state->enable,
10263 new_crtc_state->active,
10264 new_crtc_state->planes_changed,
10265 new_crtc_state->mode_changed,
10266 new_crtc_state->active_changed,
10267 new_crtc_state->connectors_changed);
10268
10269 /* Remove stream for any changed/disabled CRTC */
10270 if (!enable) {
10271
10272 if (!dm_old_crtc_state->stream)
10273 goto skip_modeset;
10274
10275 if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
10276 is_timing_unchanged_for_freesync(new_crtc_state,
10277 old_crtc_state)) {
10278 new_crtc_state->mode_changed = false;
10279 DRM_DEBUG_DRIVER(
10280 "Mode change not required for front porch change, "
10281 "setting mode_changed to %d",
10282 new_crtc_state->mode_changed);
10283
10284 set_freesync_fixed_config(dm_new_crtc_state);
10285
10286 goto skip_modeset;
10287 } else if (amdgpu_freesync_vid_mode && aconnector &&
10288 is_freesync_video_mode(&new_crtc_state->mode,
10289 aconnector)) {
10290 struct drm_display_mode *high_mode;
10291
10292 high_mode = get_highest_refresh_rate_mode(aconnector, false);
10293 if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10294 set_freesync_fixed_config(dm_new_crtc_state);
10295 }
10296 }
10297
10298 ret = dm_atomic_get_state(state, &dm_state);
10299 if (ret)
10300 goto fail;
10301
10302 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10303 crtc->base.id);
10304
10305 /* i.e. reset mode */
10306 if (dc_remove_stream_from_ctx(
10307 dm->dc,
10308 dm_state->context,
10309 dm_old_crtc_state->stream) != DC_OK) {
10310 ret = -EINVAL;
10311 goto fail;
10312 }
10313
10314 dc_stream_release(dm_old_crtc_state->stream);
10315 dm_new_crtc_state->stream = NULL;
10316
10317 reset_freesync_config_for_crtc(dm_new_crtc_state);
10318
10319 *lock_and_validation_needed = true;
10320
10321 } else {/* Add stream for any updated/enabled CRTC */
10322 /*
10323 * Quick fix to prevent NULL pointer on new_stream when
10324 * added MST connectors not found in existing crtc_state in the chained mode
10325 * TODO: need to dig out the root cause of that
10326 */
10327 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10328 goto skip_modeset;
10329
10330 if (modereset_required(new_crtc_state))
10331 goto skip_modeset;
10332
10333 if (modeset_required(new_crtc_state, new_stream,
10334 dm_old_crtc_state->stream)) {
10335
10336 WARN_ON(dm_new_crtc_state->stream);
10337
10338 ret = dm_atomic_get_state(state, &dm_state);
10339 if (ret)
10340 goto fail;
10341
10342 dm_new_crtc_state->stream = new_stream;
10343
10344 dc_stream_retain(new_stream);
10345
10346 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10347 crtc->base.id);
10348
10349 if (dc_add_stream_to_ctx(
10350 dm->dc,
10351 dm_state->context,
10352 dm_new_crtc_state->stream) != DC_OK) {
10353 ret = -EINVAL;
10354 goto fail;
10355 }
10356
10357 *lock_and_validation_needed = true;
10358 }
10359 }
10360
10361skip_modeset:
10362 /* Release extra reference */
10363 if (new_stream)
10364 dc_stream_release(new_stream);
10365
10366 /*
10367 * We want to do dc stream updates that do not require a
10368 * full modeset below.
10369 */
10370 if (!(enable && aconnector && new_crtc_state->active))
10371 return 0;
10372 /*
10373 * Given above conditions, the dc state cannot be NULL because:
10374 * 1. We're in the process of enabling CRTCs (just been added
10375 * to the dc context, or already is on the context)
10376 * 2. Has a valid connector attached, and
10377 * 3. Is currently active and enabled.
10378 * => The dc stream state currently exists.
10379 */
10380 BUG_ON(dm_new_crtc_state->stream == NULL);
10381
10382 /* Scaling or underscan settings */
10383 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10384 drm_atomic_crtc_needs_modeset(new_crtc_state))
10385 update_stream_scaling_settings(
10386 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10387
10388 /* ABM settings */
10389 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10390
10391 /*
10392 * Color management settings. We also update color properties
10393 * when a modeset is needed, to ensure it gets reprogrammed.
10394 */
10395 if (dm_new_crtc_state->base.color_mgmt_changed ||
10396 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10397 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10398 if (ret)
10399 goto fail;
10400 }
10401
10402 /* Update Freesync settings. */
10403 get_freesync_config_for_crtc(dm_new_crtc_state,
10404 dm_new_conn_state);
10405
10406 return ret;
10407
10408fail:
10409 if (new_stream)
10410 dc_stream_release(new_stream);
10411 return ret;
10412}
10413
10414static bool should_reset_plane(struct drm_atomic_state *state,
10415 struct drm_plane *plane,
10416 struct drm_plane_state *old_plane_state,
10417 struct drm_plane_state *new_plane_state)
10418{
10419 struct drm_plane *other;
10420 struct drm_plane_state *old_other_state, *new_other_state;
10421 struct drm_crtc_state *new_crtc_state;
10422 int i;
10423
10424 /*
10425 * TODO: Remove this hack once the checks below are sufficient
10426 * enough to determine when we need to reset all the planes on
10427 * the stream.
10428 */
10429 if (state->allow_modeset)
10430 return true;
10431
10432 /* Exit early if we know that we're adding or removing the plane. */
10433 if (old_plane_state->crtc != new_plane_state->crtc)
10434 return true;
10435
10436 /* old crtc == new_crtc == NULL, plane not in context. */
10437 if (!new_plane_state->crtc)
10438 return false;
10439
10440 new_crtc_state =
10441 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10442
10443 if (!new_crtc_state)
10444 return true;
10445
10446 /* CRTC Degamma changes currently require us to recreate planes. */
10447 if (new_crtc_state->color_mgmt_changed)
10448 return true;
10449
10450 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10451 return true;
10452
10453 /*
10454 * If there are any new primary or overlay planes being added or
10455 * removed then the z-order can potentially change. To ensure
10456 * correct z-order and pipe acquisition the current DC architecture
10457 * requires us to remove and recreate all existing planes.
10458 *
10459 * TODO: Come up with a more elegant solution for this.
10460 */
10461 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10462 struct amdgpu_framebuffer *old_afb, *new_afb;
10463 if (other->type == DRM_PLANE_TYPE_CURSOR)
10464 continue;
10465
10466 if (old_other_state->crtc != new_plane_state->crtc &&
10467 new_other_state->crtc != new_plane_state->crtc)
10468 continue;
10469
10470 if (old_other_state->crtc != new_other_state->crtc)
10471 return true;
10472
10473 /* Src/dst size and scaling updates. */
10474 if (old_other_state->src_w != new_other_state->src_w ||
10475 old_other_state->src_h != new_other_state->src_h ||
10476 old_other_state->crtc_w != new_other_state->crtc_w ||
10477 old_other_state->crtc_h != new_other_state->crtc_h)
10478 return true;
10479
10480 /* Rotation / mirroring updates. */
10481 if (old_other_state->rotation != new_other_state->rotation)
10482 return true;
10483
10484 /* Blending updates. */
10485 if (old_other_state->pixel_blend_mode !=
10486 new_other_state->pixel_blend_mode)
10487 return true;
10488
10489 /* Alpha updates. */
10490 if (old_other_state->alpha != new_other_state->alpha)
10491 return true;
10492
10493 /* Colorspace changes. */
10494 if (old_other_state->color_range != new_other_state->color_range ||
10495 old_other_state->color_encoding != new_other_state->color_encoding)
10496 return true;
10497
10498 /* Framebuffer checks fall at the end. */
10499 if (!old_other_state->fb || !new_other_state->fb)
10500 continue;
10501
10502 /* Pixel format changes can require bandwidth updates. */
10503 if (old_other_state->fb->format != new_other_state->fb->format)
10504 return true;
10505
10506 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10507 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10508
10509 /* Tiling and DCC changes also require bandwidth updates. */
10510 if (old_afb->tiling_flags != new_afb->tiling_flags ||
10511 old_afb->base.modifier != new_afb->base.modifier)
10512 return true;
10513 }
10514
10515 return false;
10516}
10517
10518static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10519 struct drm_plane_state *new_plane_state,
10520 struct drm_framebuffer *fb)
10521{
10522 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10523 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10524 unsigned int pitch;
10525 bool linear;
10526
10527 if (fb->width > new_acrtc->max_cursor_width ||
10528 fb->height > new_acrtc->max_cursor_height) {
10529 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10530 new_plane_state->fb->width,
10531 new_plane_state->fb->height);
10532 return -EINVAL;
10533 }
10534 if (new_plane_state->src_w != fb->width << 16 ||
10535 new_plane_state->src_h != fb->height << 16) {
10536 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10537 return -EINVAL;
10538 }
10539
10540 /* Pitch in pixels */
10541 pitch = fb->pitches[0] / fb->format->cpp[0];
10542
10543 if (fb->width != pitch) {
10544 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10545 fb->width, pitch);
10546 return -EINVAL;
10547 }
10548
10549 switch (pitch) {
10550 case 64:
10551 case 128:
10552 case 256:
10553 /* FB pitch is supported by cursor plane */
10554 break;
10555 default:
10556 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10557 return -EINVAL;
10558 }
10559
10560 /* Core DRM takes care of checking FB modifiers, so we only need to
10561 * check tiling flags when the FB doesn't have a modifier. */
10562 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10563 if (adev->family < AMDGPU_FAMILY_AI) {
10564 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10565 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10566 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10567 } else {
10568 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10569 }
10570 if (!linear) {
10571 DRM_DEBUG_ATOMIC("Cursor FB not linear");
10572 return -EINVAL;
10573 }
10574 }
10575
10576 return 0;
10577}
10578
10579static int dm_update_plane_state(struct dc *dc,
10580 struct drm_atomic_state *state,
10581 struct drm_plane *plane,
10582 struct drm_plane_state *old_plane_state,
10583 struct drm_plane_state *new_plane_state,
10584 bool enable,
10585 bool *lock_and_validation_needed)
10586{
10587
10588 struct dm_atomic_state *dm_state = NULL;
10589 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10590 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10591 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10592 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10593 struct amdgpu_crtc *new_acrtc;
10594 bool needs_reset;
10595 int ret = 0;
10596
10597
10598 new_plane_crtc = new_plane_state->crtc;
10599 old_plane_crtc = old_plane_state->crtc;
10600 dm_new_plane_state = to_dm_plane_state(new_plane_state);
10601 dm_old_plane_state = to_dm_plane_state(old_plane_state);
10602
10603 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10604 if (!enable || !new_plane_crtc ||
10605 drm_atomic_plane_disabling(plane->state, new_plane_state))
10606 return 0;
10607
10608 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10609
10610 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10611 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10612 return -EINVAL;
10613 }
10614
10615 if (new_plane_state->fb) {
10616 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10617 new_plane_state->fb);
10618 if (ret)
10619 return ret;
10620 }
10621
10622 return 0;
10623 }
10624
10625 needs_reset = should_reset_plane(state, plane, old_plane_state,
10626 new_plane_state);
10627
10628 /* Remove any changed/removed planes */
10629 if (!enable) {
10630 if (!needs_reset)
10631 return 0;
10632
10633 if (!old_plane_crtc)
10634 return 0;
10635
10636 old_crtc_state = drm_atomic_get_old_crtc_state(
10637 state, old_plane_crtc);
10638 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10639
10640 if (!dm_old_crtc_state->stream)
10641 return 0;
10642
10643 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10644 plane->base.id, old_plane_crtc->base.id);
10645
10646 ret = dm_atomic_get_state(state, &dm_state);
10647 if (ret)
10648 return ret;
10649
10650 if (!dc_remove_plane_from_context(
10651 dc,
10652 dm_old_crtc_state->stream,
10653 dm_old_plane_state->dc_state,
10654 dm_state->context)) {
10655
10656 return -EINVAL;
10657 }
10658
10659
10660 dc_plane_state_release(dm_old_plane_state->dc_state);
10661 dm_new_plane_state->dc_state = NULL;
10662
10663 *lock_and_validation_needed = true;
10664
10665 } else { /* Add new planes */
10666 struct dc_plane_state *dc_new_plane_state;
10667
10668 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10669 return 0;
10670
10671 if (!new_plane_crtc)
10672 return 0;
10673
10674 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10675 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10676
10677 if (!dm_new_crtc_state->stream)
10678 return 0;
10679
10680 if (!needs_reset)
10681 return 0;
10682
10683 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10684 if (ret)
10685 return ret;
10686
10687 WARN_ON(dm_new_plane_state->dc_state);
10688
10689 dc_new_plane_state = dc_create_plane_state(dc);
10690 if (!dc_new_plane_state)
10691 return -ENOMEM;
10692
10693 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10694 plane->base.id, new_plane_crtc->base.id);
10695
10696 ret = fill_dc_plane_attributes(
10697 drm_to_adev(new_plane_crtc->dev),
10698 dc_new_plane_state,
10699 new_plane_state,
10700 new_crtc_state);
10701 if (ret) {
10702 dc_plane_state_release(dc_new_plane_state);
10703 return ret;
10704 }
10705
10706 ret = dm_atomic_get_state(state, &dm_state);
10707 if (ret) {
10708 dc_plane_state_release(dc_new_plane_state);
10709 return ret;
10710 }
10711
10712 /*
10713 * Any atomic check errors that occur after this will
10714 * not need a release. The plane state will be attached
10715 * to the stream, and therefore part of the atomic
10716 * state. It'll be released when the atomic state is
10717 * cleaned.
10718 */
10719 if (!dc_add_plane_to_context(
10720 dc,
10721 dm_new_crtc_state->stream,
10722 dc_new_plane_state,
10723 dm_state->context)) {
10724
10725 dc_plane_state_release(dc_new_plane_state);
10726 return -EINVAL;
10727 }
10728
10729 dm_new_plane_state->dc_state = dc_new_plane_state;
10730
10731 /* Tell DC to do a full surface update every time there
10732 * is a plane change. Inefficient, but works for now.
10733 */
10734 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10735
10736 *lock_and_validation_needed = true;
10737 }
10738
10739
10740 return ret;
10741}
10742
10743static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
10744 int *src_w, int *src_h)
10745{
10746 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
10747 case DRM_MODE_ROTATE_90:
10748 case DRM_MODE_ROTATE_270:
10749 *src_w = plane_state->src_h >> 16;
10750 *src_h = plane_state->src_w >> 16;
10751 break;
10752 case DRM_MODE_ROTATE_0:
10753 case DRM_MODE_ROTATE_180:
10754 default:
10755 *src_w = plane_state->src_w >> 16;
10756 *src_h = plane_state->src_h >> 16;
10757 break;
10758 }
10759}
10760
10761static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10762 struct drm_crtc *crtc,
10763 struct drm_crtc_state *new_crtc_state)
10764{
10765 struct drm_plane *cursor = crtc->cursor, *underlying;
10766 struct drm_plane_state *new_cursor_state, *new_underlying_state;
10767 int i;
10768 int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
10769 int cursor_src_w, cursor_src_h;
10770 int underlying_src_w, underlying_src_h;
10771
10772 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10773 * cursor per pipe but it's going to inherit the scaling and
10774 * positioning from the underlying pipe. Check the cursor plane's
10775 * blending properties match the underlying planes'. */
10776
10777 new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
10778 if (!new_cursor_state || !new_cursor_state->fb) {
10779 return 0;
10780 }
10781
10782 dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
10783 cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
10784 cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
10785
10786 for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
10787 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
10788 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
10789 continue;
10790
10791 /* Ignore disabled planes */
10792 if (!new_underlying_state->fb)
10793 continue;
10794
10795 dm_get_oriented_plane_size(new_underlying_state,
10796 &underlying_src_w, &underlying_src_h);
10797 underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
10798 underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
10799
10800 if (cursor_scale_w != underlying_scale_w ||
10801 cursor_scale_h != underlying_scale_h) {
10802 drm_dbg_atomic(crtc->dev,
10803 "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
10804 cursor->base.id, cursor->name, underlying->base.id, underlying->name);
10805 return -EINVAL;
10806 }
10807
10808 /* If this plane covers the whole CRTC, no need to check planes underneath */
10809 if (new_underlying_state->crtc_x <= 0 &&
10810 new_underlying_state->crtc_y <= 0 &&
10811 new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
10812 new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
10813 break;
10814 }
10815
10816 return 0;
10817}
10818
10819#if defined(CONFIG_DRM_AMD_DC_DCN)
10820static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10821{
10822 struct drm_connector *connector;
10823 struct drm_connector_state *conn_state;
10824 struct amdgpu_dm_connector *aconnector = NULL;
10825 int i;
10826 for_each_new_connector_in_state(state, connector, conn_state, i) {
10827 if (conn_state->crtc != crtc)
10828 continue;
10829
10830 aconnector = to_amdgpu_dm_connector(connector);
10831 if (!aconnector->port || !aconnector->mst_port)
10832 aconnector = NULL;
10833 else
10834 break;
10835 }
10836
10837 if (!aconnector)
10838 return 0;
10839
10840 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10841}
10842#endif
10843
10844/**
10845 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10846 * @dev: The DRM device
10847 * @state: The atomic state to commit
10848 *
10849 * Validate that the given atomic state is programmable by DC into hardware.
10850 * This involves constructing a &struct dc_state reflecting the new hardware
10851 * state we wish to commit, then querying DC to see if it is programmable. It's
10852 * important not to modify the existing DC state. Otherwise, atomic_check
10853 * may unexpectedly commit hardware changes.
10854 *
10855 * When validating the DC state, it's important that the right locks are
10856 * acquired. For full updates case which removes/adds/updates streams on one
10857 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10858 * that any such full update commit will wait for completion of any outstanding
10859 * flip using DRMs synchronization events.
10860 *
10861 * Note that DM adds the affected connectors for all CRTCs in state, when that
10862 * might not seem necessary. This is because DC stream creation requires the
10863 * DC sink, which is tied to the DRM connector state. Cleaning this up should
10864 * be possible but non-trivial - a possible TODO item.
10865 *
10866 * Return: -Error code if validation failed.
10867 */
10868static int amdgpu_dm_atomic_check(struct drm_device *dev,
10869 struct drm_atomic_state *state)
10870{
10871 struct amdgpu_device *adev = drm_to_adev(dev);
10872 struct dm_atomic_state *dm_state = NULL;
10873 struct dc *dc = adev->dm.dc;
10874 struct drm_connector *connector;
10875 struct drm_connector_state *old_con_state, *new_con_state;
10876 struct drm_crtc *crtc;
10877 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10878 struct drm_plane *plane;
10879 struct drm_plane_state *old_plane_state, *new_plane_state;
10880 enum dc_status status;
10881 int ret, i;
10882 bool lock_and_validation_needed = false;
10883 struct dm_crtc_state *dm_old_crtc_state;
10884#if defined(CONFIG_DRM_AMD_DC_DCN)
10885 struct dsc_mst_fairness_vars vars[MAX_PIPES];
10886 struct drm_dp_mst_topology_state *mst_state;
10887 struct drm_dp_mst_topology_mgr *mgr;
10888#endif
10889
10890 trace_amdgpu_dm_atomic_check_begin(state);
10891
10892 ret = drm_atomic_helper_check_modeset(dev, state);
10893 if (ret) {
10894 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
10895 goto fail;
10896 }
10897
10898 /* Check connector changes */
10899 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10900 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10901 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10902
10903 /* Skip connectors that are disabled or part of modeset already. */
10904 if (!old_con_state->crtc && !new_con_state->crtc)
10905 continue;
10906
10907 if (!new_con_state->crtc)
10908 continue;
10909
10910 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10911 if (IS_ERR(new_crtc_state)) {
10912 DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
10913 ret = PTR_ERR(new_crtc_state);
10914 goto fail;
10915 }
10916
10917 if (dm_old_con_state->abm_level !=
10918 dm_new_con_state->abm_level)
10919 new_crtc_state->connectors_changed = true;
10920 }
10921
10922#if defined(CONFIG_DRM_AMD_DC_DCN)
10923 if (dc_resource_is_dsc_encoding_supported(dc)) {
10924 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10925 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10926 ret = add_affected_mst_dsc_crtcs(state, crtc);
10927 if (ret) {
10928 DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
10929 goto fail;
10930 }
10931 }
10932 }
10933 }
10934#endif
10935 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10936 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10937
10938 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10939 !new_crtc_state->color_mgmt_changed &&
10940 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10941 dm_old_crtc_state->dsc_force_changed == false)
10942 continue;
10943
10944 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10945 if (ret) {
10946 DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
10947 goto fail;
10948 }
10949
10950 if (!new_crtc_state->enable)
10951 continue;
10952
10953 ret = drm_atomic_add_affected_connectors(state, crtc);
10954 if (ret) {
10955 DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
10956 goto fail;
10957 }
10958
10959 ret = drm_atomic_add_affected_planes(state, crtc);
10960 if (ret) {
10961 DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
10962 goto fail;
10963 }
10964
10965 if (dm_old_crtc_state->dsc_force_changed)
10966 new_crtc_state->mode_changed = true;
10967 }
10968
10969 /*
10970 * Add all primary and overlay planes on the CRTC to the state
10971 * whenever a plane is enabled to maintain correct z-ordering
10972 * and to enable fast surface updates.
10973 */
10974 drm_for_each_crtc(crtc, dev) {
10975 bool modified = false;
10976
10977 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10978 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10979 continue;
10980
10981 if (new_plane_state->crtc == crtc ||
10982 old_plane_state->crtc == crtc) {
10983 modified = true;
10984 break;
10985 }
10986 }
10987
10988 if (!modified)
10989 continue;
10990
10991 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10992 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10993 continue;
10994
10995 new_plane_state =
10996 drm_atomic_get_plane_state(state, plane);
10997
10998 if (IS_ERR(new_plane_state)) {
10999 ret = PTR_ERR(new_plane_state);
11000 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
11001 goto fail;
11002 }
11003 }
11004 }
11005
11006 /* Remove exiting planes if they are modified */
11007 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11008 ret = dm_update_plane_state(dc, state, plane,
11009 old_plane_state,
11010 new_plane_state,
11011 false,
11012 &lock_and_validation_needed);
11013 if (ret) {
11014 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11015 goto fail;
11016 }
11017 }
11018
11019 /* Disable all crtcs which require disable */
11020 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11021 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11022 old_crtc_state,
11023 new_crtc_state,
11024 false,
11025 &lock_and_validation_needed);
11026 if (ret) {
11027 DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
11028 goto fail;
11029 }
11030 }
11031
11032 /* Enable all crtcs which require enable */
11033 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
11034 ret = dm_update_crtc_state(&adev->dm, state, crtc,
11035 old_crtc_state,
11036 new_crtc_state,
11037 true,
11038 &lock_and_validation_needed);
11039 if (ret) {
11040 DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
11041 goto fail;
11042 }
11043 }
11044
11045 /* Add new/modified planes */
11046 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
11047 ret = dm_update_plane_state(dc, state, plane,
11048 old_plane_state,
11049 new_plane_state,
11050 true,
11051 &lock_and_validation_needed);
11052 if (ret) {
11053 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
11054 goto fail;
11055 }
11056 }
11057
11058 /* Run this here since we want to validate the streams we created */
11059 ret = drm_atomic_helper_check_planes(dev, state);
11060 if (ret) {
11061 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
11062 goto fail;
11063 }
11064
11065 /* Check cursor planes scaling */
11066 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
11067 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
11068 if (ret) {
11069 DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
11070 goto fail;
11071 }
11072 }
11073
11074 if (state->legacy_cursor_update) {
11075 /*
11076 * This is a fast cursor update coming from the plane update
11077 * helper, check if it can be done asynchronously for better
11078 * performance.
11079 */
11080 state->async_update =
11081 !drm_atomic_helper_async_check(dev, state);
11082
11083 /*
11084 * Skip the remaining global validation if this is an async
11085 * update. Cursor updates can be done without affecting
11086 * state or bandwidth calcs and this avoids the performance
11087 * penalty of locking the private state object and
11088 * allocating a new dc_state.
11089 */
11090 if (state->async_update)
11091 return 0;
11092 }
11093
11094 /* Check scaling and underscan changes*/
11095 /* TODO Removed scaling changes validation due to inability to commit
11096 * new stream into context w\o causing full reset. Need to
11097 * decide how to handle.
11098 */
11099 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
11100 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
11101 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
11102 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
11103
11104 /* Skip any modesets/resets */
11105 if (!acrtc || drm_atomic_crtc_needs_modeset(
11106 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
11107 continue;
11108
11109 /* Skip any thing not scale or underscan changes */
11110 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
11111 continue;
11112
11113 lock_and_validation_needed = true;
11114 }
11115
11116#if defined(CONFIG_DRM_AMD_DC_DCN)
11117 /* set the slot info for each mst_state based on the link encoding format */
11118 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
11119 struct amdgpu_dm_connector *aconnector;
11120 struct drm_connector *connector;
11121 struct drm_connector_list_iter iter;
11122 u8 link_coding_cap;
11123
11124 if (!mgr->mst_state )
11125 continue;
11126
11127 drm_connector_list_iter_begin(dev, &iter);
11128 drm_for_each_connector_iter(connector, &iter) {
11129 int id = connector->index;
11130
11131 if (id == mst_state->mgr->conn_base_id) {
11132 aconnector = to_amdgpu_dm_connector(connector);
11133 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
11134 drm_dp_mst_update_slots(mst_state, link_coding_cap);
11135
11136 break;
11137 }
11138 }
11139 drm_connector_list_iter_end(&iter);
11140
11141 }
11142#endif
11143 /**
11144 * Streams and planes are reset when there are changes that affect
11145 * bandwidth. Anything that affects bandwidth needs to go through
11146 * DC global validation to ensure that the configuration can be applied
11147 * to hardware.
11148 *
11149 * We have to currently stall out here in atomic_check for outstanding
11150 * commits to finish in this case because our IRQ handlers reference
11151 * DRM state directly - we can end up disabling interrupts too early
11152 * if we don't.
11153 *
11154 * TODO: Remove this stall and drop DM state private objects.
11155 */
11156 if (lock_and_validation_needed) {
11157 ret = dm_atomic_get_state(state, &dm_state);
11158 if (ret) {
11159 DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
11160 goto fail;
11161 }
11162
11163 ret = do_aquire_global_lock(dev, state);
11164 if (ret) {
11165 DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
11166 goto fail;
11167 }
11168
11169#if defined(CONFIG_DRM_AMD_DC_DCN)
11170 if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
11171 DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
11172 goto fail;
11173 }
11174
11175 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
11176 if (ret) {
11177 DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
11178 goto fail;
11179 }
11180#endif
11181
11182 /*
11183 * Perform validation of MST topology in the state:
11184 * We need to perform MST atomic check before calling
11185 * dc_validate_global_state(), or there is a chance
11186 * to get stuck in an infinite loop and hang eventually.
11187 */
11188 ret = drm_dp_mst_atomic_check(state);
11189 if (ret) {
11190 DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
11191 goto fail;
11192 }
11193 status = dc_validate_global_state(dc, dm_state->context, true);
11194 if (status != DC_OK) {
11195 DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
11196 dc_status_to_str(status), status);
11197 ret = -EINVAL;
11198 goto fail;
11199 }
11200 } else {
11201 /*
11202 * The commit is a fast update. Fast updates shouldn't change
11203 * the DC context, affect global validation, and can have their
11204 * commit work done in parallel with other commits not touching
11205 * the same resource. If we have a new DC context as part of
11206 * the DM atomic state from validation we need to free it and
11207 * retain the existing one instead.
11208 *
11209 * Furthermore, since the DM atomic state only contains the DC
11210 * context and can safely be annulled, we can free the state
11211 * and clear the associated private object now to free
11212 * some memory and avoid a possible use-after-free later.
11213 */
11214
11215 for (i = 0; i < state->num_private_objs; i++) {
11216 struct drm_private_obj *obj = state->private_objs[i].ptr;
11217
11218 if (obj->funcs == adev->dm.atomic_obj.funcs) {
11219 int j = state->num_private_objs-1;
11220
11221 dm_atomic_destroy_state(obj,
11222 state->private_objs[i].state);
11223
11224 /* If i is not at the end of the array then the
11225 * last element needs to be moved to where i was
11226 * before the array can safely be truncated.
11227 */
11228 if (i != j)
11229 state->private_objs[i] =
11230 state->private_objs[j];
11231
11232 state->private_objs[j].ptr = NULL;
11233 state->private_objs[j].state = NULL;
11234 state->private_objs[j].old_state = NULL;
11235 state->private_objs[j].new_state = NULL;
11236
11237 state->num_private_objs = j;
11238 break;
11239 }
11240 }
11241 }
11242
11243 /* Store the overall update type for use later in atomic check. */
11244 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11245 struct dm_crtc_state *dm_new_crtc_state =
11246 to_dm_crtc_state(new_crtc_state);
11247
11248 dm_new_crtc_state->update_type = lock_and_validation_needed ?
11249 UPDATE_TYPE_FULL :
11250 UPDATE_TYPE_FAST;
11251 }
11252
11253 /* Must be success */
11254 WARN_ON(ret);
11255
11256 trace_amdgpu_dm_atomic_check_finish(state, ret);
11257
11258 return ret;
11259
11260fail:
11261 if (ret == -EDEADLK)
11262 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11263 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11264 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11265 else
11266 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11267
11268 trace_amdgpu_dm_atomic_check_finish(state, ret);
11269
11270 return ret;
11271}
11272
11273static bool is_dp_capable_without_timing_msa(struct dc *dc,
11274 struct amdgpu_dm_connector *amdgpu_dm_connector)
11275{
11276 uint8_t dpcd_data;
11277 bool capable = false;
11278
11279 if (amdgpu_dm_connector->dc_link &&
11280 dm_helpers_dp_read_dpcd(
11281 NULL,
11282 amdgpu_dm_connector->dc_link,
11283 DP_DOWN_STREAM_PORT_COUNT,
11284 &dpcd_data,
11285 sizeof(dpcd_data))) {
11286 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11287 }
11288
11289 return capable;
11290}
11291
11292static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11293 unsigned int offset,
11294 unsigned int total_length,
11295 uint8_t *data,
11296 unsigned int length,
11297 struct amdgpu_hdmi_vsdb_info *vsdb)
11298{
11299 bool res;
11300 union dmub_rb_cmd cmd;
11301 struct dmub_cmd_send_edid_cea *input;
11302 struct dmub_cmd_edid_cea_output *output;
11303
11304 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11305 return false;
11306
11307 memset(&cmd, 0, sizeof(cmd));
11308
11309 input = &cmd.edid_cea.data.input;
11310
11311 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11312 cmd.edid_cea.header.sub_type = 0;
11313 cmd.edid_cea.header.payload_bytes =
11314 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11315 input->offset = offset;
11316 input->length = length;
11317 input->cea_total_length = total_length;
11318 memcpy(input->payload, data, length);
11319
11320 res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11321 if (!res) {
11322 DRM_ERROR("EDID CEA parser failed\n");
11323 return false;
11324 }
11325
11326 output = &cmd.edid_cea.data.output;
11327
11328 if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11329 if (!output->ack.success) {
11330 DRM_ERROR("EDID CEA ack failed at offset %d\n",
11331 output->ack.offset);
11332 }
11333 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11334 if (!output->amd_vsdb.vsdb_found)
11335 return false;
11336
11337 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11338 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11339 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11340 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11341 } else {
11342 DRM_WARN("Unknown EDID CEA parser results\n");
11343 return false;
11344 }
11345
11346 return true;
11347}
11348
11349static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11350 uint8_t *edid_ext, int len,
11351 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11352{
11353 int i;
11354
11355 /* send extension block to DMCU for parsing */
11356 for (i = 0; i < len; i += 8) {
11357 bool res;
11358 int offset;
11359
11360 /* send 8 bytes a time */
11361 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11362 return false;
11363
11364 if (i+8 == len) {
11365 /* EDID block sent completed, expect result */
11366 int version, min_rate, max_rate;
11367
11368 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11369 if (res) {
11370 /* amd vsdb found */
11371 vsdb_info->freesync_supported = 1;
11372 vsdb_info->amd_vsdb_version = version;
11373 vsdb_info->min_refresh_rate_hz = min_rate;
11374 vsdb_info->max_refresh_rate_hz = max_rate;
11375 return true;
11376 }
11377 /* not amd vsdb */
11378 return false;
11379 }
11380
11381 /* check for ack*/
11382 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11383 if (!res)
11384 return false;
11385 }
11386
11387 return false;
11388}
11389
11390static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11391 uint8_t *edid_ext, int len,
11392 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11393{
11394 int i;
11395
11396 /* send extension block to DMCU for parsing */
11397 for (i = 0; i < len; i += 8) {
11398 /* send 8 bytes a time */
11399 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11400 return false;
11401 }
11402
11403 return vsdb_info->freesync_supported;
11404}
11405
11406static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11407 uint8_t *edid_ext, int len,
11408 struct amdgpu_hdmi_vsdb_info *vsdb_info)
11409{
11410 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11411
11412 if (adev->dm.dmub_srv)
11413 return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11414 else
11415 return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11416}
11417
11418static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11419 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11420{
11421 uint8_t *edid_ext = NULL;
11422 int i;
11423 bool valid_vsdb_found = false;
11424
11425 /*----- drm_find_cea_extension() -----*/
11426 /* No EDID or EDID extensions */
11427 if (edid == NULL || edid->extensions == 0)
11428 return -ENODEV;
11429
11430 /* Find CEA extension */
11431 for (i = 0; i < edid->extensions; i++) {
11432 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11433 if (edid_ext[0] == CEA_EXT)
11434 break;
11435 }
11436
11437 if (i == edid->extensions)
11438 return -ENODEV;
11439
11440 /*----- cea_db_offsets() -----*/
11441 if (edid_ext[0] != CEA_EXT)
11442 return -ENODEV;
11443
11444 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11445
11446 return valid_vsdb_found ? i : -ENODEV;
11447}
11448
11449void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11450 struct edid *edid)
11451{
11452 int i = 0;
11453 struct detailed_timing *timing;
11454 struct detailed_non_pixel *data;
11455 struct detailed_data_monitor_range *range;
11456 struct amdgpu_dm_connector *amdgpu_dm_connector =
11457 to_amdgpu_dm_connector(connector);
11458 struct dm_connector_state *dm_con_state = NULL;
11459 struct dc_sink *sink;
11460
11461 struct drm_device *dev = connector->dev;
11462 struct amdgpu_device *adev = drm_to_adev(dev);
11463 bool freesync_capable = false;
11464 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11465
11466 if (!connector->state) {
11467 DRM_ERROR("%s - Connector has no state", __func__);
11468 goto update;
11469 }
11470
11471 sink = amdgpu_dm_connector->dc_sink ?
11472 amdgpu_dm_connector->dc_sink :
11473 amdgpu_dm_connector->dc_em_sink;
11474
11475 if (!edid || !sink) {
11476 dm_con_state = to_dm_connector_state(connector->state);
11477
11478 amdgpu_dm_connector->min_vfreq = 0;
11479 amdgpu_dm_connector->max_vfreq = 0;
11480 amdgpu_dm_connector->pixel_clock_mhz = 0;
11481 connector->display_info.monitor_range.min_vfreq = 0;
11482 connector->display_info.monitor_range.max_vfreq = 0;
11483 freesync_capable = false;
11484
11485 goto update;
11486 }
11487
11488 dm_con_state = to_dm_connector_state(connector->state);
11489
11490 if (!adev->dm.freesync_module)
11491 goto update;
11492
11493
11494 if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11495 || sink->sink_signal == SIGNAL_TYPE_EDP) {
11496 bool edid_check_required = false;
11497
11498 if (edid) {
11499 edid_check_required = is_dp_capable_without_timing_msa(
11500 adev->dm.dc,
11501 amdgpu_dm_connector);
11502 }
11503
11504 if (edid_check_required == true && (edid->version > 1 ||
11505 (edid->version == 1 && edid->revision > 1))) {
11506 for (i = 0; i < 4; i++) {
11507
11508 timing = &edid->detailed_timings[i];
11509 data = &timing->data.other_data;
11510 range = &data->data.range;
11511 /*
11512 * Check if monitor has continuous frequency mode
11513 */
11514 if (data->type != EDID_DETAIL_MONITOR_RANGE)
11515 continue;
11516 /*
11517 * Check for flag range limits only. If flag == 1 then
11518 * no additional timing information provided.
11519 * Default GTF, GTF Secondary curve and CVT are not
11520 * supported
11521 */
11522 if (range->flags != 1)
11523 continue;
11524
11525 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11526 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11527 amdgpu_dm_connector->pixel_clock_mhz =
11528 range->pixel_clock_mhz * 10;
11529
11530 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11531 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11532
11533 break;
11534 }
11535
11536 if (amdgpu_dm_connector->max_vfreq -
11537 amdgpu_dm_connector->min_vfreq > 10) {
11538
11539 freesync_capable = true;
11540 }
11541 }
11542 } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11543 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11544 if (i >= 0 && vsdb_info.freesync_supported) {
11545 timing = &edid->detailed_timings[i];
11546 data = &timing->data.other_data;
11547
11548 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11549 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11550 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11551 freesync_capable = true;
11552
11553 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11554 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11555 }
11556 }
11557
11558update:
11559 if (dm_con_state)
11560 dm_con_state->freesync_capable = freesync_capable;
11561
11562 if (connector->vrr_capable_property)
11563 drm_connector_set_vrr_capable_property(connector,
11564 freesync_capable);
11565}
11566
11567void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11568{
11569 struct amdgpu_device *adev = drm_to_adev(dev);
11570 struct dc *dc = adev->dm.dc;
11571 int i;
11572
11573 mutex_lock(&adev->dm.dc_lock);
11574 if (dc->current_state) {
11575 for (i = 0; i < dc->current_state->stream_count; ++i)
11576 dc->current_state->streams[i]
11577 ->triggered_crtc_reset.enabled =
11578 adev->dm.force_timing_sync;
11579
11580 dm_enable_per_frame_crtc_master_sync(dc->current_state);
11581 dc_trigger_sync(dc, dc->current_state);
11582 }
11583 mutex_unlock(&adev->dm.dc_lock);
11584}
11585
11586void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11587 uint32_t value, const char *func_name)
11588{
11589#ifdef DM_CHECK_ADDR_0
11590 if (address == 0) {
11591 DC_ERR("invalid register write. address = 0");
11592 return;
11593 }
11594#endif
11595 cgs_write_register(ctx->cgs_device, address, value);
11596 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11597}
11598
11599uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11600 const char *func_name)
11601{
11602 uint32_t value;
11603#ifdef DM_CHECK_ADDR_0
11604 if (address == 0) {
11605 DC_ERR("invalid register read; address = 0\n");
11606 return 0;
11607 }
11608#endif
11609
11610 if (ctx->dmub_srv &&
11611 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11612 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11613 ASSERT(false);
11614 return 0;
11615 }
11616
11617 value = cgs_read_register(ctx->cgs_device, address);
11618
11619 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11620
11621 return value;
11622}
11623
11624int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, struct dc_context *ctx,
11625 uint8_t status_type, uint32_t *operation_result)
11626{
11627 struct amdgpu_device *adev = ctx->driver_context;
11628 int return_status = -1;
11629 struct dmub_notification *p_notify = adev->dm.dmub_notify;
11630
11631 if (is_cmd_aux) {
11632 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11633 return_status = p_notify->aux_reply.length;
11634 *operation_result = p_notify->result;
11635 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
11636 *operation_result = AUX_RET_ERROR_TIMEOUT;
11637 } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
11638 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
11639 } else {
11640 *operation_result = AUX_RET_ERROR_UNKNOWN;
11641 }
11642 } else {
11643 if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
11644 return_status = 0;
11645 *operation_result = p_notify->sc_status;
11646 } else {
11647 *operation_result = SET_CONFIG_UNKNOWN_ERROR;
11648 }
11649 }
11650
11651 return return_status;
11652}
11653
11654int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
11655 unsigned int link_index, void *cmd_payload, void *operation_result)
11656{
11657 struct amdgpu_device *adev = ctx->driver_context;
11658 int ret = 0;
11659
11660 if (is_cmd_aux) {
11661 dc_process_dmub_aux_transfer_async(ctx->dc,
11662 link_index, (struct aux_payload *)cmd_payload);
11663 } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
11664 (struct set_config_cmd_payload *)cmd_payload,
11665 adev->dm.dmub_notify)) {
11666 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11667 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11668 (uint32_t *)operation_result);
11669 }
11670
11671 ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
11672 if (ret == 0) {
11673 DRM_ERROR("wait_for_completion_timeout timeout!");
11674 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11675 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
11676 (uint32_t *)operation_result);
11677 }
11678
11679 if (is_cmd_aux) {
11680 if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11681 struct aux_payload *payload = (struct aux_payload *)cmd_payload;
11682
11683 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
11684 if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11685 payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
11686 memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11687 adev->dm.dmub_notify->aux_reply.length);
11688 }
11689 }
11690 }
11691
11692 return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
11693 ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
11694 (uint32_t *)operation_result);
11695}
11696
11697/*
11698 * Check whether seamless boot is supported.
11699 *
11700 * So far we only support seamless boot on CHIP_VANGOGH.
11701 * If everything goes well, we may consider expanding
11702 * seamless boot to other ASICs.
11703 */
11704bool check_seamless_boot_capability(struct amdgpu_device *adev)
11705{
11706 switch (adev->asic_type) {
11707 case CHIP_VANGOGH:
11708 if (!adev->mman.keep_stolen_vga_memory)
11709 return true;
11710 break;
11711 default:
11712 break;
11713 }
11714
11715 return false;
11716}