Merge tag 'imx-drm-next-2021-01-08' of git://git.pengutronix.de/git/pza/linux into...
[linux-2.6-block.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc/inc/core_types.h"
32 #include "dal_asic_id.h"
33 #include "dmub/dmub_srv.h"
34 #include "dc/inc/hw/dmcu.h"
35 #include "dc/inc/hw/abm.h"
36 #include "dc/dc_dmub_srv.h"
37 #include "amdgpu_dm_trace.h"
38
39 #include "vid.h"
40 #include "amdgpu.h"
41 #include "amdgpu_display.h"
42 #include "amdgpu_ucode.h"
43 #include "atom.h"
44 #include "amdgpu_dm.h"
45 #ifdef CONFIG_DRM_AMD_DC_HDCP
46 #include "amdgpu_dm_hdcp.h"
47 #include <drm/drm_hdcp.h>
48 #endif
49 #include "amdgpu_pm.h"
50
51 #include "amd_shared.h"
52 #include "amdgpu_dm_irq.h"
53 #include "dm_helpers.h"
54 #include "amdgpu_dm_mst_types.h"
55 #if defined(CONFIG_DEBUG_FS)
56 #include "amdgpu_dm_debugfs.h"
57 #endif
58
59 #include "ivsrcid/ivsrcid_vislands30.h"
60
61 #include <linux/module.h>
62 #include <linux/moduleparam.h>
63 #include <linux/types.h>
64 #include <linux/pm_runtime.h>
65 #include <linux/pci.h>
66 #include <linux/firmware.h>
67 #include <linux/component.h>
68
69 #include <drm/drm_atomic.h>
70 #include <drm/drm_atomic_uapi.h>
71 #include <drm/drm_atomic_helper.h>
72 #include <drm/drm_dp_mst_helper.h>
73 #include <drm/drm_fb_helper.h>
74 #include <drm/drm_fourcc.h>
75 #include <drm/drm_edid.h>
76 #include <drm/drm_vblank.h>
77 #include <drm/drm_audio_component.h>
78 #include <drm/drm_hdcp.h>
79
80 #if defined(CONFIG_DRM_AMD_DC_DCN)
81 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
82
83 #include "dcn/dcn_1_0_offset.h"
84 #include "dcn/dcn_1_0_sh_mask.h"
85 #include "soc15_hw_ip.h"
86 #include "vega10_ip_offset.h"
87
88 #include "soc15_common.h"
89 #endif
90
91 #include "modules/inc/mod_freesync.h"
92 #include "modules/power/power_helpers.h"
93 #include "modules/inc/mod_info_packet.h"
94
95 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
96 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
97 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
98 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
99 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
100 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
101 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
102 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
103 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
104 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
105 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
106 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
107
108 #define FIRMWARE_RAVEN_DMCU             "amdgpu/raven_dmcu.bin"
109 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
110
111 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
112 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
113
114 /* Number of bytes in PSP header for firmware. */
115 #define PSP_HEADER_BYTES 0x100
116
117 /* Number of bytes in PSP footer for firmware. */
118 #define PSP_FOOTER_BYTES 0x100
119
120 /**
121  * DOC: overview
122  *
123  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
124  * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
125  * requests into DC requests, and DC responses into DRM responses.
126  *
127  * The root control structure is &struct amdgpu_display_manager.
128  */
129
130 /* basic init/fini API */
131 static int amdgpu_dm_init(struct amdgpu_device *adev);
132 static void amdgpu_dm_fini(struct amdgpu_device *adev);
133
134 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
135 {
136         switch (link->dpcd_caps.dongle_type) {
137         case DISPLAY_DONGLE_NONE:
138                 return DRM_MODE_SUBCONNECTOR_Native;
139         case DISPLAY_DONGLE_DP_VGA_CONVERTER:
140                 return DRM_MODE_SUBCONNECTOR_VGA;
141         case DISPLAY_DONGLE_DP_DVI_CONVERTER:
142         case DISPLAY_DONGLE_DP_DVI_DONGLE:
143                 return DRM_MODE_SUBCONNECTOR_DVID;
144         case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
145         case DISPLAY_DONGLE_DP_HDMI_DONGLE:
146                 return DRM_MODE_SUBCONNECTOR_HDMIA;
147         case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
148         default:
149                 return DRM_MODE_SUBCONNECTOR_Unknown;
150         }
151 }
152
153 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
154 {
155         struct dc_link *link = aconnector->dc_link;
156         struct drm_connector *connector = &aconnector->base;
157         enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
158
159         if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
160                 return;
161
162         if (aconnector->dc_sink)
163                 subconnector = get_subconnector_type(link);
164
165         drm_object_property_set_value(&connector->base,
166                         connector->dev->mode_config.dp_subconnector_property,
167                         subconnector);
168 }
169
170 /*
171  * initializes drm_device display related structures, based on the information
172  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
173  * drm_encoder, drm_mode_config
174  *
175  * Returns 0 on success
176  */
177 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
178 /* removes and deallocates the drm structures, created by the above function */
179 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
180
181 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
182                                 struct drm_plane *plane,
183                                 unsigned long possible_crtcs,
184                                 const struct dc_plane_cap *plane_cap);
185 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
186                                struct drm_plane *plane,
187                                uint32_t link_index);
188 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
189                                     struct amdgpu_dm_connector *amdgpu_dm_connector,
190                                     uint32_t link_index,
191                                     struct amdgpu_encoder *amdgpu_encoder);
192 static int amdgpu_dm_encoder_init(struct drm_device *dev,
193                                   struct amdgpu_encoder *aencoder,
194                                   uint32_t link_index);
195
196 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
197
198 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
199
200 static int amdgpu_dm_atomic_check(struct drm_device *dev,
201                                   struct drm_atomic_state *state);
202
203 static void handle_cursor_update(struct drm_plane *plane,
204                                  struct drm_plane_state *old_plane_state);
205
206 static void amdgpu_dm_set_psr_caps(struct dc_link *link);
207 static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
208 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
209 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
210 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
211
212 static const struct drm_format_info *
213 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
214
215 /*
216  * dm_vblank_get_counter
217  *
218  * @brief
219  * Get counter for number of vertical blanks
220  *
221  * @param
222  * struct amdgpu_device *adev - [in] desired amdgpu device
223  * int disp_idx - [in] which CRTC to get the counter from
224  *
225  * @return
226  * Counter for vertical blanks
227  */
228 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
229 {
230         if (crtc >= adev->mode_info.num_crtc)
231                 return 0;
232         else {
233                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
234
235                 if (acrtc->dm_irq_params.stream == NULL) {
236                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
237                                   crtc);
238                         return 0;
239                 }
240
241                 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
242         }
243 }
244
245 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
246                                   u32 *vbl, u32 *position)
247 {
248         uint32_t v_blank_start, v_blank_end, h_position, v_position;
249
250         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
251                 return -EINVAL;
252         else {
253                 struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
254
255                 if (acrtc->dm_irq_params.stream ==  NULL) {
256                         DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
257                                   crtc);
258                         return 0;
259                 }
260
261                 /*
262                  * TODO rework base driver to use values directly.
263                  * for now parse it back into reg-format
264                  */
265                 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
266                                          &v_blank_start,
267                                          &v_blank_end,
268                                          &h_position,
269                                          &v_position);
270
271                 *position = v_position | (h_position << 16);
272                 *vbl = v_blank_start | (v_blank_end << 16);
273         }
274
275         return 0;
276 }
277
278 static bool dm_is_idle(void *handle)
279 {
280         /* XXX todo */
281         return true;
282 }
283
284 static int dm_wait_for_idle(void *handle)
285 {
286         /* XXX todo */
287         return 0;
288 }
289
290 static bool dm_check_soft_reset(void *handle)
291 {
292         return false;
293 }
294
295 static int dm_soft_reset(void *handle)
296 {
297         /* XXX todo */
298         return 0;
299 }
300
301 static struct amdgpu_crtc *
302 get_crtc_by_otg_inst(struct amdgpu_device *adev,
303                      int otg_inst)
304 {
305         struct drm_device *dev = adev_to_drm(adev);
306         struct drm_crtc *crtc;
307         struct amdgpu_crtc *amdgpu_crtc;
308
309         if (otg_inst == -1) {
310                 WARN_ON(1);
311                 return adev->mode_info.crtcs[0];
312         }
313
314         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
315                 amdgpu_crtc = to_amdgpu_crtc(crtc);
316
317                 if (amdgpu_crtc->otg_inst == otg_inst)
318                         return amdgpu_crtc;
319         }
320
321         return NULL;
322 }
323
324 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
325 {
326         return acrtc->dm_irq_params.freesync_config.state ==
327                        VRR_STATE_ACTIVE_VARIABLE ||
328                acrtc->dm_irq_params.freesync_config.state ==
329                        VRR_STATE_ACTIVE_FIXED;
330 }
331
332 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
333 {
334         return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
335                dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
336 }
337
338 /**
339  * dm_pflip_high_irq() - Handle pageflip interrupt
340  * @interrupt_params: ignored
341  *
342  * Handles the pageflip interrupt by notifying all interested parties
343  * that the pageflip has been completed.
344  */
345 static void dm_pflip_high_irq(void *interrupt_params)
346 {
347         struct amdgpu_crtc *amdgpu_crtc;
348         struct common_irq_params *irq_params = interrupt_params;
349         struct amdgpu_device *adev = irq_params->adev;
350         unsigned long flags;
351         struct drm_pending_vblank_event *e;
352         uint32_t vpos, hpos, v_blank_start, v_blank_end;
353         bool vrr_active;
354
355         amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
356
357         /* IRQ could occur when in initial stage */
358         /* TODO work and BO cleanup */
359         if (amdgpu_crtc == NULL) {
360                 DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
361                 return;
362         }
363
364         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
365
366         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
367                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
368                                                  amdgpu_crtc->pflip_status,
369                                                  AMDGPU_FLIP_SUBMITTED,
370                                                  amdgpu_crtc->crtc_id,
371                                                  amdgpu_crtc);
372                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
373                 return;
374         }
375
376         /* page flip completed. */
377         e = amdgpu_crtc->event;
378         amdgpu_crtc->event = NULL;
379
380         if (!e)
381                 WARN_ON(1);
382
383         vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
384
385         /* Fixed refresh rate, or VRR scanout position outside front-porch? */
386         if (!vrr_active ||
387             !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
388                                       &v_blank_end, &hpos, &vpos) ||
389             (vpos < v_blank_start)) {
390                 /* Update to correct count and vblank timestamp if racing with
391                  * vblank irq. This also updates to the correct vblank timestamp
392                  * even in VRR mode, as scanout is past the front-porch atm.
393                  */
394                 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
395
396                 /* Wake up userspace by sending the pageflip event with proper
397                  * count and timestamp of vblank of flip completion.
398                  */
399                 if (e) {
400                         drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
401
402                         /* Event sent, so done with vblank for this flip */
403                         drm_crtc_vblank_put(&amdgpu_crtc->base);
404                 }
405         } else if (e) {
406                 /* VRR active and inside front-porch: vblank count and
407                  * timestamp for pageflip event will only be up to date after
408                  * drm_crtc_handle_vblank() has been executed from late vblank
409                  * irq handler after start of back-porch (vline 0). We queue the
410                  * pageflip event for send-out by drm_crtc_handle_vblank() with
411                  * updated timestamp and count, once it runs after us.
412                  *
413                  * We need to open-code this instead of using the helper
414                  * drm_crtc_arm_vblank_event(), as that helper would
415                  * call drm_crtc_accurate_vblank_count(), which we must
416                  * not call in VRR mode while we are in front-porch!
417                  */
418
419                 /* sequence will be replaced by real count during send-out. */
420                 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
421                 e->pipe = amdgpu_crtc->crtc_id;
422
423                 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
424                 e = NULL;
425         }
426
427         /* Keep track of vblank of this flip for flip throttling. We use the
428          * cooked hw counter, as that one incremented at start of this vblank
429          * of pageflip completion, so last_flip_vblank is the forbidden count
430          * for queueing new pageflips if vsync + VRR is enabled.
431          */
432         amdgpu_crtc->dm_irq_params.last_flip_vblank =
433                 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
434
435         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
436         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
437
438         DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
439                          amdgpu_crtc->crtc_id, amdgpu_crtc,
440                          vrr_active, (int) !e);
441 }
442
443 static void dm_vupdate_high_irq(void *interrupt_params)
444 {
445         struct common_irq_params *irq_params = interrupt_params;
446         struct amdgpu_device *adev = irq_params->adev;
447         struct amdgpu_crtc *acrtc;
448         unsigned long flags;
449         int vrr_active;
450
451         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
452
453         if (acrtc) {
454                 vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
455
456                 DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
457                               acrtc->crtc_id,
458                               vrr_active);
459
460                 /* Core vblank handling is done here after end of front-porch in
461                  * vrr mode, as vblank timestamping will give valid results
462                  * while now done after front-porch. This will also deliver
463                  * page-flip completion events that have been queued to us
464                  * if a pageflip happened inside front-porch.
465                  */
466                 if (vrr_active) {
467                         drm_crtc_handle_vblank(&acrtc->base);
468
469                         /* BTR processing for pre-DCE12 ASICs */
470                         if (acrtc->dm_irq_params.stream &&
471                             adev->family < AMDGPU_FAMILY_AI) {
472                                 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
473                                 mod_freesync_handle_v_update(
474                                     adev->dm.freesync_module,
475                                     acrtc->dm_irq_params.stream,
476                                     &acrtc->dm_irq_params.vrr_params);
477
478                                 dc_stream_adjust_vmin_vmax(
479                                     adev->dm.dc,
480                                     acrtc->dm_irq_params.stream,
481                                     &acrtc->dm_irq_params.vrr_params.adjust);
482                                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
483                         }
484                 }
485         }
486 }
487
488 /**
489  * dm_crtc_high_irq() - Handles CRTC interrupt
490  * @interrupt_params: used for determining the CRTC instance
491  *
492  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
493  * event handler.
494  */
495 static void dm_crtc_high_irq(void *interrupt_params)
496 {
497         struct common_irq_params *irq_params = interrupt_params;
498         struct amdgpu_device *adev = irq_params->adev;
499         struct amdgpu_crtc *acrtc;
500         unsigned long flags;
501         int vrr_active;
502
503         acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
504         if (!acrtc)
505                 return;
506
507         vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
508
509         DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
510                       vrr_active, acrtc->dm_irq_params.active_planes);
511
512         /**
513          * Core vblank handling at start of front-porch is only possible
514          * in non-vrr mode, as only there vblank timestamping will give
515          * valid results while done in front-porch. Otherwise defer it
516          * to dm_vupdate_high_irq after end of front-porch.
517          */
518         if (!vrr_active)
519                 drm_crtc_handle_vblank(&acrtc->base);
520
521         /**
522          * Following stuff must happen at start of vblank, for crc
523          * computation and below-the-range btr support in vrr mode.
524          */
525         amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
526
527         /* BTR updates need to happen before VUPDATE on Vega and above. */
528         if (adev->family < AMDGPU_FAMILY_AI)
529                 return;
530
531         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
532
533         if (acrtc->dm_irq_params.stream &&
534             acrtc->dm_irq_params.vrr_params.supported &&
535             acrtc->dm_irq_params.freesync_config.state ==
536                     VRR_STATE_ACTIVE_VARIABLE) {
537                 mod_freesync_handle_v_update(adev->dm.freesync_module,
538                                              acrtc->dm_irq_params.stream,
539                                              &acrtc->dm_irq_params.vrr_params);
540
541                 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
542                                            &acrtc->dm_irq_params.vrr_params.adjust);
543         }
544
545         /*
546          * If there aren't any active_planes then DCH HUBP may be clock-gated.
547          * In that case, pageflip completion interrupts won't fire and pageflip
548          * completion events won't get delivered. Prevent this by sending
549          * pending pageflip events from here if a flip is still pending.
550          *
551          * If any planes are enabled, use dm_pflip_high_irq() instead, to
552          * avoid race conditions between flip programming and completion,
553          * which could cause too early flip completion events.
554          */
555         if (adev->family >= AMDGPU_FAMILY_RV &&
556             acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
557             acrtc->dm_irq_params.active_planes == 0) {
558                 if (acrtc->event) {
559                         drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
560                         acrtc->event = NULL;
561                         drm_crtc_vblank_put(&acrtc->base);
562                 }
563                 acrtc->pflip_status = AMDGPU_FLIP_NONE;
564         }
565
566         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
567 }
568
569 static int dm_set_clockgating_state(void *handle,
570                   enum amd_clockgating_state state)
571 {
572         return 0;
573 }
574
575 static int dm_set_powergating_state(void *handle,
576                   enum amd_powergating_state state)
577 {
578         return 0;
579 }
580
581 /* Prototypes of private functions */
582 static int dm_early_init(void* handle);
583
584 /* Allocate memory for FBC compressed data  */
585 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
586 {
587         struct drm_device *dev = connector->dev;
588         struct amdgpu_device *adev = drm_to_adev(dev);
589         struct dm_compressor_info *compressor = &adev->dm.compressor;
590         struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
591         struct drm_display_mode *mode;
592         unsigned long max_size = 0;
593
594         if (adev->dm.dc->fbc_compressor == NULL)
595                 return;
596
597         if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
598                 return;
599
600         if (compressor->bo_ptr)
601                 return;
602
603
604         list_for_each_entry(mode, &connector->modes, head) {
605                 if (max_size < mode->htotal * mode->vtotal)
606                         max_size = mode->htotal * mode->vtotal;
607         }
608
609         if (max_size) {
610                 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
611                             AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
612                             &compressor->gpu_addr, &compressor->cpu_addr);
613
614                 if (r)
615                         DRM_ERROR("DM: Failed to initialize FBC\n");
616                 else {
617                         adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
618                         DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
619                 }
620
621         }
622
623 }
624
625 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
626                                           int pipe, bool *enabled,
627                                           unsigned char *buf, int max_bytes)
628 {
629         struct drm_device *dev = dev_get_drvdata(kdev);
630         struct amdgpu_device *adev = drm_to_adev(dev);
631         struct drm_connector *connector;
632         struct drm_connector_list_iter conn_iter;
633         struct amdgpu_dm_connector *aconnector;
634         int ret = 0;
635
636         *enabled = false;
637
638         mutex_lock(&adev->dm.audio_lock);
639
640         drm_connector_list_iter_begin(dev, &conn_iter);
641         drm_for_each_connector_iter(connector, &conn_iter) {
642                 aconnector = to_amdgpu_dm_connector(connector);
643                 if (aconnector->audio_inst != port)
644                         continue;
645
646                 *enabled = true;
647                 ret = drm_eld_size(connector->eld);
648                 memcpy(buf, connector->eld, min(max_bytes, ret));
649
650                 break;
651         }
652         drm_connector_list_iter_end(&conn_iter);
653
654         mutex_unlock(&adev->dm.audio_lock);
655
656         DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
657
658         return ret;
659 }
660
661 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
662         .get_eld = amdgpu_dm_audio_component_get_eld,
663 };
664
665 static int amdgpu_dm_audio_component_bind(struct device *kdev,
666                                        struct device *hda_kdev, void *data)
667 {
668         struct drm_device *dev = dev_get_drvdata(kdev);
669         struct amdgpu_device *adev = drm_to_adev(dev);
670         struct drm_audio_component *acomp = data;
671
672         acomp->ops = &amdgpu_dm_audio_component_ops;
673         acomp->dev = kdev;
674         adev->dm.audio_component = acomp;
675
676         return 0;
677 }
678
679 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
680                                           struct device *hda_kdev, void *data)
681 {
682         struct drm_device *dev = dev_get_drvdata(kdev);
683         struct amdgpu_device *adev = drm_to_adev(dev);
684         struct drm_audio_component *acomp = data;
685
686         acomp->ops = NULL;
687         acomp->dev = NULL;
688         adev->dm.audio_component = NULL;
689 }
690
691 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
692         .bind   = amdgpu_dm_audio_component_bind,
693         .unbind = amdgpu_dm_audio_component_unbind,
694 };
695
696 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
697 {
698         int i, ret;
699
700         if (!amdgpu_audio)
701                 return 0;
702
703         adev->mode_info.audio.enabled = true;
704
705         adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
706
707         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
708                 adev->mode_info.audio.pin[i].channels = -1;
709                 adev->mode_info.audio.pin[i].rate = -1;
710                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
711                 adev->mode_info.audio.pin[i].status_bits = 0;
712                 adev->mode_info.audio.pin[i].category_code = 0;
713                 adev->mode_info.audio.pin[i].connected = false;
714                 adev->mode_info.audio.pin[i].id =
715                         adev->dm.dc->res_pool->audios[i]->inst;
716                 adev->mode_info.audio.pin[i].offset = 0;
717         }
718
719         ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
720         if (ret < 0)
721                 return ret;
722
723         adev->dm.audio_registered = true;
724
725         return 0;
726 }
727
728 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
729 {
730         if (!amdgpu_audio)
731                 return;
732
733         if (!adev->mode_info.audio.enabled)
734                 return;
735
736         if (adev->dm.audio_registered) {
737                 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
738                 adev->dm.audio_registered = false;
739         }
740
741         /* TODO: Disable audio? */
742
743         adev->mode_info.audio.enabled = false;
744 }
745
746 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
747 {
748         struct drm_audio_component *acomp = adev->dm.audio_component;
749
750         if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
751                 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
752
753                 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
754                                                  pin, -1);
755         }
756 }
757
758 static int dm_dmub_hw_init(struct amdgpu_device *adev)
759 {
760         const struct dmcub_firmware_header_v1_0 *hdr;
761         struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
762         struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
763         const struct firmware *dmub_fw = adev->dm.dmub_fw;
764         struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
765         struct abm *abm = adev->dm.dc->res_pool->abm;
766         struct dmub_srv_hw_params hw_params;
767         enum dmub_status status;
768         const unsigned char *fw_inst_const, *fw_bss_data;
769         uint32_t i, fw_inst_const_size, fw_bss_data_size;
770         bool has_hw_support;
771
772         if (!dmub_srv)
773                 /* DMUB isn't supported on the ASIC. */
774                 return 0;
775
776         if (!fb_info) {
777                 DRM_ERROR("No framebuffer info for DMUB service.\n");
778                 return -EINVAL;
779         }
780
781         if (!dmub_fw) {
782                 /* Firmware required for DMUB support. */
783                 DRM_ERROR("No firmware provided for DMUB.\n");
784                 return -EINVAL;
785         }
786
787         status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
788         if (status != DMUB_STATUS_OK) {
789                 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
790                 return -EINVAL;
791         }
792
793         if (!has_hw_support) {
794                 DRM_INFO("DMUB unsupported on ASIC\n");
795                 return 0;
796         }
797
798         hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
799
800         fw_inst_const = dmub_fw->data +
801                         le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
802                         PSP_HEADER_BYTES;
803
804         fw_bss_data = dmub_fw->data +
805                       le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
806                       le32_to_cpu(hdr->inst_const_bytes);
807
808         /* Copy firmware and bios info into FB memory. */
809         fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
810                              PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
811
812         fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
813
814         /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
815          * amdgpu_ucode_init_single_fw will load dmub firmware
816          * fw_inst_const part to cw0; otherwise, the firmware back door load
817          * will be done by dm_dmub_hw_init
818          */
819         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
820                 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
821                                 fw_inst_const_size);
822         }
823
824         if (fw_bss_data_size)
825                 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
826                        fw_bss_data, fw_bss_data_size);
827
828         /* Copy firmware bios info into FB memory. */
829         memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
830                adev->bios_size);
831
832         /* Reset regions that need to be reset. */
833         memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
834         fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
835
836         memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
837                fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
838
839         memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
840                fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
841
842         /* Initialize hardware. */
843         memset(&hw_params, 0, sizeof(hw_params));
844         hw_params.fb_base = adev->gmc.fb_start;
845         hw_params.fb_offset = adev->gmc.aper_base;
846
847         /* backdoor load firmware and trigger dmub running */
848         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
849                 hw_params.load_inst_const = true;
850
851         if (dmcu)
852                 hw_params.psp_version = dmcu->psp_version;
853
854         for (i = 0; i < fb_info->num_fb; ++i)
855                 hw_params.fb[i] = &fb_info->fb[i];
856
857         status = dmub_srv_hw_init(dmub_srv, &hw_params);
858         if (status != DMUB_STATUS_OK) {
859                 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
860                 return -EINVAL;
861         }
862
863         /* Wait for firmware load to finish. */
864         status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
865         if (status != DMUB_STATUS_OK)
866                 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
867
868         /* Init DMCU and ABM if available. */
869         if (dmcu && abm) {
870                 dmcu->funcs->dmcu_init(dmcu);
871                 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
872         }
873
874         adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
875         if (!adev->dm.dc->ctx->dmub_srv) {
876                 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
877                 return -ENOMEM;
878         }
879
880         DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
881                  adev->dm.dmcub_fw_version);
882
883         return 0;
884 }
885
886 #if defined(CONFIG_DRM_AMD_DC_DCN)
887 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
888 {
889         uint64_t pt_base;
890         uint32_t logical_addr_low;
891         uint32_t logical_addr_high;
892         uint32_t agp_base, agp_bot, agp_top;
893         PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
894
895         logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
896         pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
897
898         if (adev->apu_flags & AMD_APU_IS_RAVEN2)
899                 /*
900                  * Raven2 has a HW issue that it is unable to use the vram which
901                  * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
902                  * workaround that increase system aperture high address (add 1)
903                  * to get rid of the VM fault and hardware hang.
904                  */
905                 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
906         else
907                 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
908
909         agp_base = 0;
910         agp_bot = adev->gmc.agp_start >> 24;
911         agp_top = adev->gmc.agp_end >> 24;
912
913
914         page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
915         page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
916         page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
917         page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
918         page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
919         page_table_base.low_part = lower_32_bits(pt_base);
920
921         pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
922         pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
923
924         pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
925         pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
926         pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
927
928         pa_config->system_aperture.fb_base = adev->gmc.fb_start;
929         pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
930         pa_config->system_aperture.fb_top = adev->gmc.fb_end;
931
932         pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
933         pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
934         pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
935
936         pa_config->is_hvm_enabled = 0;
937
938 }
939 #endif
940
941 #ifdef CONFIG_DEBUG_FS
942 static int create_crtc_crc_properties(struct amdgpu_display_manager *dm)
943 {
944         dm->crc_win_x_start_property =
945                 drm_property_create_range(adev_to_drm(dm->adev),
946                                           DRM_MODE_PROP_ATOMIC,
947                                           "AMD_CRC_WIN_X_START", 0, U16_MAX);
948         if (!dm->crc_win_x_start_property)
949                 return -ENOMEM;
950
951         dm->crc_win_y_start_property =
952                 drm_property_create_range(adev_to_drm(dm->adev),
953                                           DRM_MODE_PROP_ATOMIC,
954                                           "AMD_CRC_WIN_Y_START", 0, U16_MAX);
955         if (!dm->crc_win_y_start_property)
956                 return -ENOMEM;
957
958         dm->crc_win_x_end_property =
959                 drm_property_create_range(adev_to_drm(dm->adev),
960                                           DRM_MODE_PROP_ATOMIC,
961                                           "AMD_CRC_WIN_X_END", 0, U16_MAX);
962         if (!dm->crc_win_x_end_property)
963                 return -ENOMEM;
964
965         dm->crc_win_y_end_property =
966                 drm_property_create_range(adev_to_drm(dm->adev),
967                                           DRM_MODE_PROP_ATOMIC,
968                                           "AMD_CRC_WIN_Y_END", 0, U16_MAX);
969         if (!dm->crc_win_y_end_property)
970                 return -ENOMEM;
971
972         return 0;
973 }
974 #endif
975
976 static int amdgpu_dm_init(struct amdgpu_device *adev)
977 {
978         struct dc_init_data init_data;
979 #ifdef CONFIG_DRM_AMD_DC_HDCP
980         struct dc_callback_init init_params;
981 #endif
982         int r;
983
984         adev->dm.ddev = adev_to_drm(adev);
985         adev->dm.adev = adev;
986
987         /* Zero all the fields */
988         memset(&init_data, 0, sizeof(init_data));
989 #ifdef CONFIG_DRM_AMD_DC_HDCP
990         memset(&init_params, 0, sizeof(init_params));
991 #endif
992
993         mutex_init(&adev->dm.dc_lock);
994         mutex_init(&adev->dm.audio_lock);
995
996         if(amdgpu_dm_irq_init(adev)) {
997                 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
998                 goto error;
999         }
1000
1001         init_data.asic_id.chip_family = adev->family;
1002
1003         init_data.asic_id.pci_revision_id = adev->pdev->revision;
1004         init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1005
1006         init_data.asic_id.vram_width = adev->gmc.vram_width;
1007         /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1008         init_data.asic_id.atombios_base_address =
1009                 adev->mode_info.atom_context->bios;
1010
1011         init_data.driver = adev;
1012
1013         adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1014
1015         if (!adev->dm.cgs_device) {
1016                 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1017                 goto error;
1018         }
1019
1020         init_data.cgs_device = adev->dm.cgs_device;
1021
1022         init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1023
1024         switch (adev->asic_type) {
1025         case CHIP_CARRIZO:
1026         case CHIP_STONEY:
1027         case CHIP_RAVEN:
1028         case CHIP_RENOIR:
1029                 init_data.flags.gpu_vm_support = true;
1030                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1031                         init_data.flags.disable_dmcu = true;
1032                 break;
1033 #if defined(CONFIG_DRM_AMD_DC_DCN)
1034         case CHIP_VANGOGH:
1035                 init_data.flags.gpu_vm_support = true;
1036                 break;
1037 #endif
1038         default:
1039                 break;
1040         }
1041
1042         if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1043                 init_data.flags.fbc_support = true;
1044
1045         if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1046                 init_data.flags.multi_mon_pp_mclk_switch = true;
1047
1048         if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1049                 init_data.flags.disable_fractional_pwm = true;
1050
1051         init_data.flags.power_down_display_on_boot = true;
1052
1053         init_data.soc_bounding_box = adev->dm.soc_bounding_box;
1054
1055         /* Display Core create. */
1056         adev->dm.dc = dc_create(&init_data);
1057
1058         if (adev->dm.dc) {
1059                 DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1060         } else {
1061                 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1062                 goto error;
1063         }
1064
1065         if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1066                 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1067                 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1068         }
1069
1070         if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1071                 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1072
1073         if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1074                 adev->dm.dc->debug.disable_stutter = true;
1075
1076         if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1077                 adev->dm.dc->debug.disable_dsc = true;
1078
1079         if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1080                 adev->dm.dc->debug.disable_clock_gate = true;
1081
1082         r = dm_dmub_hw_init(adev);
1083         if (r) {
1084                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1085                 goto error;
1086         }
1087
1088         dc_hardware_init(adev->dm.dc);
1089
1090 #if defined(CONFIG_DRM_AMD_DC_DCN)
1091         if (adev->apu_flags) {
1092                 struct dc_phy_addr_space_config pa_config;
1093
1094                 mmhub_read_system_context(adev, &pa_config);
1095
1096                 // Call the DC init_memory func
1097                 dc_setup_system_context(adev->dm.dc, &pa_config);
1098         }
1099 #endif
1100
1101         adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1102         if (!adev->dm.freesync_module) {
1103                 DRM_ERROR(
1104                 "amdgpu: failed to initialize freesync_module.\n");
1105         } else
1106                 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1107                                 adev->dm.freesync_module);
1108
1109         amdgpu_dm_init_color_mod();
1110
1111 #ifdef CONFIG_DRM_AMD_DC_HDCP
1112         if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1113                 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1114
1115                 if (!adev->dm.hdcp_workqueue)
1116                         DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1117                 else
1118                         DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1119
1120                 dc_init_callbacks(adev->dm.dc, &init_params);
1121         }
1122 #endif
1123 #ifdef CONFIG_DEBUG_FS
1124         if (create_crtc_crc_properties(&adev->dm))
1125                 DRM_ERROR("amdgpu: failed to create crc property.\n");
1126 #endif
1127         if (amdgpu_dm_initialize_drm_device(adev)) {
1128                 DRM_ERROR(
1129                 "amdgpu: failed to initialize sw for display support.\n");
1130                 goto error;
1131         }
1132
1133         /* create fake encoders for MST */
1134         dm_dp_create_fake_mst_encoders(adev);
1135
1136         /* TODO: Add_display_info? */
1137
1138         /* TODO use dynamic cursor width */
1139         adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1140         adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1141
1142         if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1143                 DRM_ERROR(
1144                 "amdgpu: failed to initialize sw for display support.\n");
1145                 goto error;
1146         }
1147
1148
1149         DRM_DEBUG_DRIVER("KMS initialized.\n");
1150
1151         return 0;
1152 error:
1153         amdgpu_dm_fini(adev);
1154
1155         return -EINVAL;
1156 }
1157
1158 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1159 {
1160         int i;
1161
1162         for (i = 0; i < adev->dm.display_indexes_num; i++) {
1163                 drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1164         }
1165
1166         amdgpu_dm_audio_fini(adev);
1167
1168         amdgpu_dm_destroy_drm_device(&adev->dm);
1169
1170 #ifdef CONFIG_DRM_AMD_DC_HDCP
1171         if (adev->dm.hdcp_workqueue) {
1172                 hdcp_destroy(adev->dm.hdcp_workqueue);
1173                 adev->dm.hdcp_workqueue = NULL;
1174         }
1175
1176         if (adev->dm.dc)
1177                 dc_deinit_callbacks(adev->dm.dc);
1178 #endif
1179         if (adev->dm.dc->ctx->dmub_srv) {
1180                 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1181                 adev->dm.dc->ctx->dmub_srv = NULL;
1182         }
1183
1184         if (adev->dm.dmub_bo)
1185                 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1186                                       &adev->dm.dmub_bo_gpu_addr,
1187                                       &adev->dm.dmub_bo_cpu_addr);
1188
1189         /* DC Destroy TODO: Replace destroy DAL */
1190         if (adev->dm.dc)
1191                 dc_destroy(&adev->dm.dc);
1192         /*
1193          * TODO: pageflip, vlank interrupt
1194          *
1195          * amdgpu_dm_irq_fini(adev);
1196          */
1197
1198         if (adev->dm.cgs_device) {
1199                 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1200                 adev->dm.cgs_device = NULL;
1201         }
1202         if (adev->dm.freesync_module) {
1203                 mod_freesync_destroy(adev->dm.freesync_module);
1204                 adev->dm.freesync_module = NULL;
1205         }
1206
1207         mutex_destroy(&adev->dm.audio_lock);
1208         mutex_destroy(&adev->dm.dc_lock);
1209
1210         return;
1211 }
1212
1213 static int load_dmcu_fw(struct amdgpu_device *adev)
1214 {
1215         const char *fw_name_dmcu = NULL;
1216         int r;
1217         const struct dmcu_firmware_header_v1_0 *hdr;
1218
1219         switch(adev->asic_type) {
1220 #if defined(CONFIG_DRM_AMD_DC_SI)
1221         case CHIP_TAHITI:
1222         case CHIP_PITCAIRN:
1223         case CHIP_VERDE:
1224         case CHIP_OLAND:
1225 #endif
1226         case CHIP_BONAIRE:
1227         case CHIP_HAWAII:
1228         case CHIP_KAVERI:
1229         case CHIP_KABINI:
1230         case CHIP_MULLINS:
1231         case CHIP_TONGA:
1232         case CHIP_FIJI:
1233         case CHIP_CARRIZO:
1234         case CHIP_STONEY:
1235         case CHIP_POLARIS11:
1236         case CHIP_POLARIS10:
1237         case CHIP_POLARIS12:
1238         case CHIP_VEGAM:
1239         case CHIP_VEGA10:
1240         case CHIP_VEGA12:
1241         case CHIP_VEGA20:
1242         case CHIP_NAVI10:
1243         case CHIP_NAVI14:
1244         case CHIP_RENOIR:
1245         case CHIP_SIENNA_CICHLID:
1246         case CHIP_NAVY_FLOUNDER:
1247         case CHIP_DIMGREY_CAVEFISH:
1248         case CHIP_VANGOGH:
1249                 return 0;
1250         case CHIP_NAVI12:
1251                 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1252                 break;
1253         case CHIP_RAVEN:
1254                 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1255                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1256                 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1257                         fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1258                 else
1259                         return 0;
1260                 break;
1261         default:
1262                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1263                 return -EINVAL;
1264         }
1265
1266         if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1267                 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1268                 return 0;
1269         }
1270
1271         r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1272         if (r == -ENOENT) {
1273                 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1274                 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1275                 adev->dm.fw_dmcu = NULL;
1276                 return 0;
1277         }
1278         if (r) {
1279                 dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1280                         fw_name_dmcu);
1281                 return r;
1282         }
1283
1284         r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1285         if (r) {
1286                 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1287                         fw_name_dmcu);
1288                 release_firmware(adev->dm.fw_dmcu);
1289                 adev->dm.fw_dmcu = NULL;
1290                 return r;
1291         }
1292
1293         hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1294         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1295         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1296         adev->firmware.fw_size +=
1297                 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1298
1299         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1300         adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1301         adev->firmware.fw_size +=
1302                 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1303
1304         adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1305
1306         DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1307
1308         return 0;
1309 }
1310
1311 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1312 {
1313         struct amdgpu_device *adev = ctx;
1314
1315         return dm_read_reg(adev->dm.dc->ctx, address);
1316 }
1317
1318 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1319                                      uint32_t value)
1320 {
1321         struct amdgpu_device *adev = ctx;
1322
1323         return dm_write_reg(adev->dm.dc->ctx, address, value);
1324 }
1325
1326 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1327 {
1328         struct dmub_srv_create_params create_params;
1329         struct dmub_srv_region_params region_params;
1330         struct dmub_srv_region_info region_info;
1331         struct dmub_srv_fb_params fb_params;
1332         struct dmub_srv_fb_info *fb_info;
1333         struct dmub_srv *dmub_srv;
1334         const struct dmcub_firmware_header_v1_0 *hdr;
1335         const char *fw_name_dmub;
1336         enum dmub_asic dmub_asic;
1337         enum dmub_status status;
1338         int r;
1339
1340         switch (adev->asic_type) {
1341         case CHIP_RENOIR:
1342                 dmub_asic = DMUB_ASIC_DCN21;
1343                 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1344                 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1345                         fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1346                 break;
1347         case CHIP_SIENNA_CICHLID:
1348                 dmub_asic = DMUB_ASIC_DCN30;
1349                 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1350                 break;
1351         case CHIP_NAVY_FLOUNDER:
1352                 dmub_asic = DMUB_ASIC_DCN30;
1353                 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1354                 break;
1355         case CHIP_VANGOGH:
1356                 dmub_asic = DMUB_ASIC_DCN301;
1357                 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1358                 break;
1359         case CHIP_DIMGREY_CAVEFISH:
1360                 dmub_asic = DMUB_ASIC_DCN302;
1361                 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1362                 break;
1363
1364         default:
1365                 /* ASIC doesn't support DMUB. */
1366                 return 0;
1367         }
1368
1369         r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1370         if (r) {
1371                 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1372                 return 0;
1373         }
1374
1375         r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1376         if (r) {
1377                 DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1378                 return 0;
1379         }
1380
1381         hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1382
1383         if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1384                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1385                         AMDGPU_UCODE_ID_DMCUB;
1386                 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1387                         adev->dm.dmub_fw;
1388                 adev->firmware.fw_size +=
1389                         ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1390
1391                 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1392                          adev->dm.dmcub_fw_version);
1393         }
1394
1395         adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1396
1397         adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1398         dmub_srv = adev->dm.dmub_srv;
1399
1400         if (!dmub_srv) {
1401                 DRM_ERROR("Failed to allocate DMUB service!\n");
1402                 return -ENOMEM;
1403         }
1404
1405         memset(&create_params, 0, sizeof(create_params));
1406         create_params.user_ctx = adev;
1407         create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1408         create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1409         create_params.asic = dmub_asic;
1410
1411         /* Create the DMUB service. */
1412         status = dmub_srv_create(dmub_srv, &create_params);
1413         if (status != DMUB_STATUS_OK) {
1414                 DRM_ERROR("Error creating DMUB service: %d\n", status);
1415                 return -EINVAL;
1416         }
1417
1418         /* Calculate the size of all the regions for the DMUB service. */
1419         memset(&region_params, 0, sizeof(region_params));
1420
1421         region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1422                                         PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1423         region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1424         region_params.vbios_size = adev->bios_size;
1425         region_params.fw_bss_data = region_params.bss_data_size ?
1426                 adev->dm.dmub_fw->data +
1427                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1428                 le32_to_cpu(hdr->inst_const_bytes) : NULL;
1429         region_params.fw_inst_const =
1430                 adev->dm.dmub_fw->data +
1431                 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1432                 PSP_HEADER_BYTES;
1433
1434         status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1435                                            &region_info);
1436
1437         if (status != DMUB_STATUS_OK) {
1438                 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1439                 return -EINVAL;
1440         }
1441
1442         /*
1443          * Allocate a framebuffer based on the total size of all the regions.
1444          * TODO: Move this into GART.
1445          */
1446         r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1447                                     AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1448                                     &adev->dm.dmub_bo_gpu_addr,
1449                                     &adev->dm.dmub_bo_cpu_addr);
1450         if (r)
1451                 return r;
1452
1453         /* Rebase the regions on the framebuffer address. */
1454         memset(&fb_params, 0, sizeof(fb_params));
1455         fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1456         fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1457         fb_params.region_info = &region_info;
1458
1459         adev->dm.dmub_fb_info =
1460                 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1461         fb_info = adev->dm.dmub_fb_info;
1462
1463         if (!fb_info) {
1464                 DRM_ERROR(
1465                         "Failed to allocate framebuffer info for DMUB service!\n");
1466                 return -ENOMEM;
1467         }
1468
1469         status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1470         if (status != DMUB_STATUS_OK) {
1471                 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1472                 return -EINVAL;
1473         }
1474
1475         return 0;
1476 }
1477
1478 static int dm_sw_init(void *handle)
1479 {
1480         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1481         int r;
1482
1483         r = dm_dmub_sw_init(adev);
1484         if (r)
1485                 return r;
1486
1487         return load_dmcu_fw(adev);
1488 }
1489
1490 static int dm_sw_fini(void *handle)
1491 {
1492         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1493
1494         kfree(adev->dm.dmub_fb_info);
1495         adev->dm.dmub_fb_info = NULL;
1496
1497         if (adev->dm.dmub_srv) {
1498                 dmub_srv_destroy(adev->dm.dmub_srv);
1499                 adev->dm.dmub_srv = NULL;
1500         }
1501
1502         release_firmware(adev->dm.dmub_fw);
1503         adev->dm.dmub_fw = NULL;
1504
1505         release_firmware(adev->dm.fw_dmcu);
1506         adev->dm.fw_dmcu = NULL;
1507
1508         return 0;
1509 }
1510
1511 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
1512 {
1513         struct amdgpu_dm_connector *aconnector;
1514         struct drm_connector *connector;
1515         struct drm_connector_list_iter iter;
1516         int ret = 0;
1517
1518         drm_connector_list_iter_begin(dev, &iter);
1519         drm_for_each_connector_iter(connector, &iter) {
1520                 aconnector = to_amdgpu_dm_connector(connector);
1521                 if (aconnector->dc_link->type == dc_connection_mst_branch &&
1522                     aconnector->mst_mgr.aux) {
1523                         DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
1524                                          aconnector,
1525                                          aconnector->base.base.id);
1526
1527                         ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
1528                         if (ret < 0) {
1529                                 DRM_ERROR("DM_MST: Failed to start MST\n");
1530                                 aconnector->dc_link->type =
1531                                         dc_connection_single;
1532                                 break;
1533                         }
1534                 }
1535         }
1536         drm_connector_list_iter_end(&iter);
1537
1538         return ret;
1539 }
1540
1541 static int dm_late_init(void *handle)
1542 {
1543         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1544
1545         struct dmcu_iram_parameters params;
1546         unsigned int linear_lut[16];
1547         int i;
1548         struct dmcu *dmcu = NULL;
1549         bool ret = true;
1550
1551         dmcu = adev->dm.dc->res_pool->dmcu;
1552
1553         for (i = 0; i < 16; i++)
1554                 linear_lut[i] = 0xFFFF * i / 15;
1555
1556         params.set = 0;
1557         params.backlight_ramping_start = 0xCCCC;
1558         params.backlight_ramping_reduction = 0xCCCCCCCC;
1559         params.backlight_lut_array_size = 16;
1560         params.backlight_lut_array = linear_lut;
1561
1562         /* Min backlight level after ABM reduction,  Don't allow below 1%
1563          * 0xFFFF x 0.01 = 0x28F
1564          */
1565         params.min_abm_backlight = 0x28F;
1566
1567         /* In the case where abm is implemented on dmcub,
1568          * dmcu object will be null.
1569          * ABM 2.4 and up are implemented on dmcub.
1570          */
1571         if (dmcu)
1572                 ret = dmcu_load_iram(dmcu, params);
1573         else if (adev->dm.dc->ctx->dmub_srv)
1574                 ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
1575
1576         if (!ret)
1577                 return -EINVAL;
1578
1579         return detect_mst_link_for_all_connectors(adev_to_drm(adev));
1580 }
1581
1582 static void s3_handle_mst(struct drm_device *dev, bool suspend)
1583 {
1584         struct amdgpu_dm_connector *aconnector;
1585         struct drm_connector *connector;
1586         struct drm_connector_list_iter iter;
1587         struct drm_dp_mst_topology_mgr *mgr;
1588         int ret;
1589         bool need_hotplug = false;
1590
1591         drm_connector_list_iter_begin(dev, &iter);
1592         drm_for_each_connector_iter(connector, &iter) {
1593                 aconnector = to_amdgpu_dm_connector(connector);
1594                 if (aconnector->dc_link->type != dc_connection_mst_branch ||
1595                     aconnector->mst_port)
1596                         continue;
1597
1598                 mgr = &aconnector->mst_mgr;
1599
1600                 if (suspend) {
1601                         drm_dp_mst_topology_mgr_suspend(mgr);
1602                 } else {
1603                         ret = drm_dp_mst_topology_mgr_resume(mgr, true);
1604                         if (ret < 0) {
1605                                 drm_dp_mst_topology_mgr_set_mst(mgr, false);
1606                                 need_hotplug = true;
1607                         }
1608                 }
1609         }
1610         drm_connector_list_iter_end(&iter);
1611
1612         if (need_hotplug)
1613                 drm_kms_helper_hotplug_event(dev);
1614 }
1615
1616 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
1617 {
1618         struct smu_context *smu = &adev->smu;
1619         int ret = 0;
1620
1621         if (!is_support_sw_smu(adev))
1622                 return 0;
1623
1624         /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
1625          * on window driver dc implementation.
1626          * For Navi1x, clock settings of dcn watermarks are fixed. the settings
1627          * should be passed to smu during boot up and resume from s3.
1628          * boot up: dc calculate dcn watermark clock settings within dc_create,
1629          * dcn20_resource_construct
1630          * then call pplib functions below to pass the settings to smu:
1631          * smu_set_watermarks_for_clock_ranges
1632          * smu_set_watermarks_table
1633          * navi10_set_watermarks_table
1634          * smu_write_watermarks_table
1635          *
1636          * For Renoir, clock settings of dcn watermark are also fixed values.
1637          * dc has implemented different flow for window driver:
1638          * dc_hardware_init / dc_set_power_state
1639          * dcn10_init_hw
1640          * notify_wm_ranges
1641          * set_wm_ranges
1642          * -- Linux
1643          * smu_set_watermarks_for_clock_ranges
1644          * renoir_set_watermarks_table
1645          * smu_write_watermarks_table
1646          *
1647          * For Linux,
1648          * dc_hardware_init -> amdgpu_dm_init
1649          * dc_set_power_state --> dm_resume
1650          *
1651          * therefore, this function apply to navi10/12/14 but not Renoir
1652          * *
1653          */
1654         switch(adev->asic_type) {
1655         case CHIP_NAVI10:
1656         case CHIP_NAVI14:
1657         case CHIP_NAVI12:
1658                 break;
1659         default:
1660                 return 0;
1661         }
1662
1663         ret = smu_write_watermarks_table(smu);
1664         if (ret) {
1665                 DRM_ERROR("Failed to update WMTABLE!\n");
1666                 return ret;
1667         }
1668
1669         return 0;
1670 }
1671
1672 /**
1673  * dm_hw_init() - Initialize DC device
1674  * @handle: The base driver device containing the amdgpu_dm device.
1675  *
1676  * Initialize the &struct amdgpu_display_manager device. This involves calling
1677  * the initializers of each DM component, then populating the struct with them.
1678  *
1679  * Although the function implies hardware initialization, both hardware and
1680  * software are initialized here. Splitting them out to their relevant init
1681  * hooks is a future TODO item.
1682  *
1683  * Some notable things that are initialized here:
1684  *
1685  * - Display Core, both software and hardware
1686  * - DC modules that we need (freesync and color management)
1687  * - DRM software states
1688  * - Interrupt sources and handlers
1689  * - Vblank support
1690  * - Debug FS entries, if enabled
1691  */
1692 static int dm_hw_init(void *handle)
1693 {
1694         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1695         /* Create DAL display manager */
1696         amdgpu_dm_init(adev);
1697         amdgpu_dm_hpd_init(adev);
1698
1699         return 0;
1700 }
1701
1702 /**
1703  * dm_hw_fini() - Teardown DC device
1704  * @handle: The base driver device containing the amdgpu_dm device.
1705  *
1706  * Teardown components within &struct amdgpu_display_manager that require
1707  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
1708  * were loaded. Also flush IRQ workqueues and disable them.
1709  */
1710 static int dm_hw_fini(void *handle)
1711 {
1712         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1713
1714         amdgpu_dm_hpd_fini(adev);
1715
1716         amdgpu_dm_irq_fini(adev);
1717         amdgpu_dm_fini(adev);
1718         return 0;
1719 }
1720
1721
1722 static int dm_enable_vblank(struct drm_crtc *crtc);
1723 static void dm_disable_vblank(struct drm_crtc *crtc);
1724
1725 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
1726                                  struct dc_state *state, bool enable)
1727 {
1728         enum dc_irq_source irq_source;
1729         struct amdgpu_crtc *acrtc;
1730         int rc = -EBUSY;
1731         int i = 0;
1732
1733         for (i = 0; i < state->stream_count; i++) {
1734                 acrtc = get_crtc_by_otg_inst(
1735                                 adev, state->stream_status[i].primary_otg_inst);
1736
1737                 if (acrtc && state->stream_status[i].plane_count != 0) {
1738                         irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
1739                         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
1740                         DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
1741                                   acrtc->crtc_id, enable ? "en" : "dis", rc);
1742                         if (rc)
1743                                 DRM_WARN("Failed to %s pflip interrupts\n",
1744                                          enable ? "enable" : "disable");
1745
1746                         if (enable) {
1747                                 rc = dm_enable_vblank(&acrtc->base);
1748                                 if (rc)
1749                                         DRM_WARN("Failed to enable vblank interrupts\n");
1750                         } else {
1751                                 dm_disable_vblank(&acrtc->base);
1752                         }
1753
1754                 }
1755         }
1756
1757 }
1758
1759 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
1760 {
1761         struct dc_state *context = NULL;
1762         enum dc_status res = DC_ERROR_UNEXPECTED;
1763         int i;
1764         struct dc_stream_state *del_streams[MAX_PIPES];
1765         int del_streams_count = 0;
1766
1767         memset(del_streams, 0, sizeof(del_streams));
1768
1769         context = dc_create_state(dc);
1770         if (context == NULL)
1771                 goto context_alloc_fail;
1772
1773         dc_resource_state_copy_construct_current(dc, context);
1774
1775         /* First remove from context all streams */
1776         for (i = 0; i < context->stream_count; i++) {
1777                 struct dc_stream_state *stream = context->streams[i];
1778
1779                 del_streams[del_streams_count++] = stream;
1780         }
1781
1782         /* Remove all planes for removed streams and then remove the streams */
1783         for (i = 0; i < del_streams_count; i++) {
1784                 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
1785                         res = DC_FAIL_DETACH_SURFACES;
1786                         goto fail;
1787                 }
1788
1789                 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
1790                 if (res != DC_OK)
1791                         goto fail;
1792         }
1793
1794
1795         res = dc_validate_global_state(dc, context, false);
1796
1797         if (res != DC_OK) {
1798                 DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
1799                 goto fail;
1800         }
1801
1802         res = dc_commit_state(dc, context);
1803
1804 fail:
1805         dc_release_state(context);
1806
1807 context_alloc_fail:
1808         return res;
1809 }
1810
1811 static int dm_suspend(void *handle)
1812 {
1813         struct amdgpu_device *adev = handle;
1814         struct amdgpu_display_manager *dm = &adev->dm;
1815         int ret = 0;
1816
1817         if (amdgpu_in_reset(adev)) {
1818                 mutex_lock(&dm->dc_lock);
1819                 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
1820
1821                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
1822
1823                 amdgpu_dm_commit_zero_streams(dm->dc);
1824
1825                 amdgpu_dm_irq_suspend(adev);
1826
1827                 return ret;
1828         }
1829
1830         WARN_ON(adev->dm.cached_state);
1831         adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
1832
1833         s3_handle_mst(adev_to_drm(adev), true);
1834
1835         amdgpu_dm_irq_suspend(adev);
1836
1837
1838         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
1839
1840         return 0;
1841 }
1842
1843 static struct amdgpu_dm_connector *
1844 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
1845                                              struct drm_crtc *crtc)
1846 {
1847         uint32_t i;
1848         struct drm_connector_state *new_con_state;
1849         struct drm_connector *connector;
1850         struct drm_crtc *crtc_from_state;
1851
1852         for_each_new_connector_in_state(state, connector, new_con_state, i) {
1853                 crtc_from_state = new_con_state->crtc;
1854
1855                 if (crtc_from_state == crtc)
1856                         return to_amdgpu_dm_connector(connector);
1857         }
1858
1859         return NULL;
1860 }
1861
1862 static void emulated_link_detect(struct dc_link *link)
1863 {
1864         struct dc_sink_init_data sink_init_data = { 0 };
1865         struct display_sink_capability sink_caps = { 0 };
1866         enum dc_edid_status edid_status;
1867         struct dc_context *dc_ctx = link->ctx;
1868         struct dc_sink *sink = NULL;
1869         struct dc_sink *prev_sink = NULL;
1870
1871         link->type = dc_connection_none;
1872         prev_sink = link->local_sink;
1873
1874         if (prev_sink != NULL)
1875                 dc_sink_retain(prev_sink);
1876
1877         switch (link->connector_signal) {
1878         case SIGNAL_TYPE_HDMI_TYPE_A: {
1879                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1880                 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
1881                 break;
1882         }
1883
1884         case SIGNAL_TYPE_DVI_SINGLE_LINK: {
1885                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1886                 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
1887                 break;
1888         }
1889
1890         case SIGNAL_TYPE_DVI_DUAL_LINK: {
1891                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1892                 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
1893                 break;
1894         }
1895
1896         case SIGNAL_TYPE_LVDS: {
1897                 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
1898                 sink_caps.signal = SIGNAL_TYPE_LVDS;
1899                 break;
1900         }
1901
1902         case SIGNAL_TYPE_EDP: {
1903                 sink_caps.transaction_type =
1904                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1905                 sink_caps.signal = SIGNAL_TYPE_EDP;
1906                 break;
1907         }
1908
1909         case SIGNAL_TYPE_DISPLAY_PORT: {
1910                 sink_caps.transaction_type =
1911                         DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
1912                 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
1913                 break;
1914         }
1915
1916         default:
1917                 DC_ERROR("Invalid connector type! signal:%d\n",
1918                         link->connector_signal);
1919                 return;
1920         }
1921
1922         sink_init_data.link = link;
1923         sink_init_data.sink_signal = sink_caps.signal;
1924
1925         sink = dc_sink_create(&sink_init_data);
1926         if (!sink) {
1927                 DC_ERROR("Failed to create sink!\n");
1928                 return;
1929         }
1930
1931         /* dc_sink_create returns a new reference */
1932         link->local_sink = sink;
1933
1934         edid_status = dm_helpers_read_local_edid(
1935                         link->ctx,
1936                         link,
1937                         sink);
1938
1939         if (edid_status != EDID_OK)
1940                 DC_ERROR("Failed to read EDID");
1941
1942 }
1943
1944 static void dm_gpureset_commit_state(struct dc_state *dc_state,
1945                                      struct amdgpu_display_manager *dm)
1946 {
1947         struct {
1948                 struct dc_surface_update surface_updates[MAX_SURFACES];
1949                 struct dc_plane_info plane_infos[MAX_SURFACES];
1950                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
1951                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
1952                 struct dc_stream_update stream_update;
1953         } * bundle;
1954         int k, m;
1955
1956         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
1957
1958         if (!bundle) {
1959                 dm_error("Failed to allocate update bundle\n");
1960                 goto cleanup;
1961         }
1962
1963         for (k = 0; k < dc_state->stream_count; k++) {
1964                 bundle->stream_update.stream = dc_state->streams[k];
1965
1966                 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
1967                         bundle->surface_updates[m].surface =
1968                                 dc_state->stream_status->plane_states[m];
1969                         bundle->surface_updates[m].surface->force_full_update =
1970                                 true;
1971                 }
1972                 dc_commit_updates_for_stream(
1973                         dm->dc, bundle->surface_updates,
1974                         dc_state->stream_status->plane_count,
1975                         dc_state->streams[k], &bundle->stream_update, dc_state);
1976         }
1977
1978 cleanup:
1979         kfree(bundle);
1980
1981         return;
1982 }
1983
1984 static void dm_set_dpms_off(struct dc_link *link)
1985 {
1986         struct dc_stream_state *stream_state;
1987         struct amdgpu_dm_connector *aconnector = link->priv;
1988         struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
1989         struct dc_stream_update stream_update;
1990         bool dpms_off = true;
1991
1992         memset(&stream_update, 0, sizeof(stream_update));
1993         stream_update.dpms_off = &dpms_off;
1994
1995         mutex_lock(&adev->dm.dc_lock);
1996         stream_state = dc_stream_find_from_link(link);
1997
1998         if (stream_state == NULL) {
1999                 DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2000                 mutex_unlock(&adev->dm.dc_lock);
2001                 return;
2002         }
2003
2004         stream_update.stream = stream_state;
2005         dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2006                                      stream_state, &stream_update,
2007                                      stream_state->ctx->dc->current_state);
2008         mutex_unlock(&adev->dm.dc_lock);
2009 }
2010
2011 static int dm_resume(void *handle)
2012 {
2013         struct amdgpu_device *adev = handle;
2014         struct drm_device *ddev = adev_to_drm(adev);
2015         struct amdgpu_display_manager *dm = &adev->dm;
2016         struct amdgpu_dm_connector *aconnector;
2017         struct drm_connector *connector;
2018         struct drm_connector_list_iter iter;
2019         struct drm_crtc *crtc;
2020         struct drm_crtc_state *new_crtc_state;
2021         struct dm_crtc_state *dm_new_crtc_state;
2022         struct drm_plane *plane;
2023         struct drm_plane_state *new_plane_state;
2024         struct dm_plane_state *dm_new_plane_state;
2025         struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2026         enum dc_connection_type new_connection_type = dc_connection_none;
2027         struct dc_state *dc_state;
2028         int i, r, j;
2029
2030         if (amdgpu_in_reset(adev)) {
2031                 dc_state = dm->cached_dc_state;
2032
2033                 r = dm_dmub_hw_init(adev);
2034                 if (r)
2035                         DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2036
2037                 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2038                 dc_resume(dm->dc);
2039
2040                 amdgpu_dm_irq_resume_early(adev);
2041
2042                 for (i = 0; i < dc_state->stream_count; i++) {
2043                         dc_state->streams[i]->mode_changed = true;
2044                         for (j = 0; j < dc_state->stream_status->plane_count; j++) {
2045                                 dc_state->stream_status->plane_states[j]->update_flags.raw
2046                                         = 0xffffffff;
2047                         }
2048                 }
2049
2050                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
2051
2052                 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2053
2054                 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2055
2056                 dc_release_state(dm->cached_dc_state);
2057                 dm->cached_dc_state = NULL;
2058
2059                 amdgpu_dm_irq_resume_late(adev);
2060
2061                 mutex_unlock(&dm->dc_lock);
2062
2063                 return 0;
2064         }
2065         /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2066         dc_release_state(dm_state->context);
2067         dm_state->context = dc_create_state(dm->dc);
2068         /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2069         dc_resource_state_construct(dm->dc, dm_state->context);
2070
2071         /* Before powering on DC we need to re-initialize DMUB. */
2072         r = dm_dmub_hw_init(adev);
2073         if (r)
2074                 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2075
2076         /* power on hardware */
2077         dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2078
2079         /* program HPD filter */
2080         dc_resume(dm->dc);
2081
2082         /*
2083          * early enable HPD Rx IRQ, should be done before set mode as short
2084          * pulse interrupts are used for MST
2085          */
2086         amdgpu_dm_irq_resume_early(adev);
2087
2088         /* On resume we need to rewrite the MSTM control bits to enable MST*/
2089         s3_handle_mst(ddev, false);
2090
2091         /* Do detection*/
2092         drm_connector_list_iter_begin(ddev, &iter);
2093         drm_for_each_connector_iter(connector, &iter) {
2094                 aconnector = to_amdgpu_dm_connector(connector);
2095
2096                 /*
2097                  * this is the case when traversing through already created
2098                  * MST connectors, should be skipped
2099                  */
2100                 if (aconnector->mst_port)
2101                         continue;
2102
2103                 mutex_lock(&aconnector->hpd_lock);
2104                 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2105                         DRM_ERROR("KMS: Failed to detect connector\n");
2106
2107                 if (aconnector->base.force && new_connection_type == dc_connection_none)
2108                         emulated_link_detect(aconnector->dc_link);
2109                 else
2110                         dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2111
2112                 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2113                         aconnector->fake_enable = false;
2114
2115                 if (aconnector->dc_sink)
2116                         dc_sink_release(aconnector->dc_sink);
2117                 aconnector->dc_sink = NULL;
2118                 amdgpu_dm_update_connector_after_detect(aconnector);
2119                 mutex_unlock(&aconnector->hpd_lock);
2120         }
2121         drm_connector_list_iter_end(&iter);
2122
2123         /* Force mode set in atomic commit */
2124         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2125                 new_crtc_state->active_changed = true;
2126
2127         /*
2128          * atomic_check is expected to create the dc states. We need to release
2129          * them here, since they were duplicated as part of the suspend
2130          * procedure.
2131          */
2132         for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2133                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2134                 if (dm_new_crtc_state->stream) {
2135                         WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2136                         dc_stream_release(dm_new_crtc_state->stream);
2137                         dm_new_crtc_state->stream = NULL;
2138                 }
2139         }
2140
2141         for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2142                 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2143                 if (dm_new_plane_state->dc_state) {
2144                         WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2145                         dc_plane_state_release(dm_new_plane_state->dc_state);
2146                         dm_new_plane_state->dc_state = NULL;
2147                 }
2148         }
2149
2150         drm_atomic_helper_resume(ddev, dm->cached_state);
2151
2152         dm->cached_state = NULL;
2153
2154         amdgpu_dm_irq_resume_late(adev);
2155
2156         amdgpu_dm_smu_write_watermarks_table(adev);
2157
2158         return 0;
2159 }
2160
2161 /**
2162  * DOC: DM Lifecycle
2163  *
2164  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2165  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2166  * the base driver's device list to be initialized and torn down accordingly.
2167  *
2168  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2169  */
2170
2171 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2172         .name = "dm",
2173         .early_init = dm_early_init,
2174         .late_init = dm_late_init,
2175         .sw_init = dm_sw_init,
2176         .sw_fini = dm_sw_fini,
2177         .hw_init = dm_hw_init,
2178         .hw_fini = dm_hw_fini,
2179         .suspend = dm_suspend,
2180         .resume = dm_resume,
2181         .is_idle = dm_is_idle,
2182         .wait_for_idle = dm_wait_for_idle,
2183         .check_soft_reset = dm_check_soft_reset,
2184         .soft_reset = dm_soft_reset,
2185         .set_clockgating_state = dm_set_clockgating_state,
2186         .set_powergating_state = dm_set_powergating_state,
2187 };
2188
2189 const struct amdgpu_ip_block_version dm_ip_block =
2190 {
2191         .type = AMD_IP_BLOCK_TYPE_DCE,
2192         .major = 1,
2193         .minor = 0,
2194         .rev = 0,
2195         .funcs = &amdgpu_dm_funcs,
2196 };
2197
2198
2199 /**
2200  * DOC: atomic
2201  *
2202  * *WIP*
2203  */
2204
2205 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2206         .fb_create = amdgpu_display_user_framebuffer_create,
2207         .get_format_info = amd_get_format_info,
2208         .output_poll_changed = drm_fb_helper_output_poll_changed,
2209         .atomic_check = amdgpu_dm_atomic_check,
2210         .atomic_commit = drm_atomic_helper_commit,
2211 };
2212
2213 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2214         .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2215 };
2216
2217 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2218 {
2219         u32 max_cll, min_cll, max, min, q, r;
2220         struct amdgpu_dm_backlight_caps *caps;
2221         struct amdgpu_display_manager *dm;
2222         struct drm_connector *conn_base;
2223         struct amdgpu_device *adev;
2224         struct dc_link *link = NULL;
2225         static const u8 pre_computed_values[] = {
2226                 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2227                 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2228
2229         if (!aconnector || !aconnector->dc_link)
2230                 return;
2231
2232         link = aconnector->dc_link;
2233         if (link->connector_signal != SIGNAL_TYPE_EDP)
2234                 return;
2235
2236         conn_base = &aconnector->base;
2237         adev = drm_to_adev(conn_base->dev);
2238         dm = &adev->dm;
2239         caps = &dm->backlight_caps;
2240         caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2241         caps->aux_support = false;
2242         max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
2243         min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2244
2245         if (caps->ext_caps->bits.oled == 1 ||
2246             caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2247             caps->ext_caps->bits.hdr_aux_backlight_control == 1)
2248                 caps->aux_support = true;
2249
2250         /* From the specification (CTA-861-G), for calculating the maximum
2251          * luminance we need to use:
2252          *      Luminance = 50*2**(CV/32)
2253          * Where CV is a one-byte value.
2254          * For calculating this expression we may need float point precision;
2255          * to avoid this complexity level, we take advantage that CV is divided
2256          * by a constant. From the Euclids division algorithm, we know that CV
2257          * can be written as: CV = 32*q + r. Next, we replace CV in the
2258          * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2259          * need to pre-compute the value of r/32. For pre-computing the values
2260          * We just used the following Ruby line:
2261          *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2262          * The results of the above expressions can be verified at
2263          * pre_computed_values.
2264          */
2265         q = max_cll >> 5;
2266         r = max_cll % 32;
2267         max = (1 << q) * pre_computed_values[r];
2268
2269         // min luminance: maxLum * (CV/255)^2 / 100
2270         q = DIV_ROUND_CLOSEST(min_cll, 255);
2271         min = max * DIV_ROUND_CLOSEST((q * q), 100);
2272
2273         caps->aux_max_input_signal = max;
2274         caps->aux_min_input_signal = min;
2275 }
2276
2277 void amdgpu_dm_update_connector_after_detect(
2278                 struct amdgpu_dm_connector *aconnector)
2279 {
2280         struct drm_connector *connector = &aconnector->base;
2281         struct drm_device *dev = connector->dev;
2282         struct dc_sink *sink;
2283
2284         /* MST handled by drm_mst framework */
2285         if (aconnector->mst_mgr.mst_state == true)
2286                 return;
2287
2288         sink = aconnector->dc_link->local_sink;
2289         if (sink)
2290                 dc_sink_retain(sink);
2291
2292         /*
2293          * Edid mgmt connector gets first update only in mode_valid hook and then
2294          * the connector sink is set to either fake or physical sink depends on link status.
2295          * Skip if already done during boot.
2296          */
2297         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2298                         && aconnector->dc_em_sink) {
2299
2300                 /*
2301                  * For S3 resume with headless use eml_sink to fake stream
2302                  * because on resume connector->sink is set to NULL
2303                  */
2304                 mutex_lock(&dev->mode_config.mutex);
2305
2306                 if (sink) {
2307                         if (aconnector->dc_sink) {
2308                                 amdgpu_dm_update_freesync_caps(connector, NULL);
2309                                 /*
2310                                  * retain and release below are used to
2311                                  * bump up refcount for sink because the link doesn't point
2312                                  * to it anymore after disconnect, so on next crtc to connector
2313                                  * reshuffle by UMD we will get into unwanted dc_sink release
2314                                  */
2315                                 dc_sink_release(aconnector->dc_sink);
2316                         }
2317                         aconnector->dc_sink = sink;
2318                         dc_sink_retain(aconnector->dc_sink);
2319                         amdgpu_dm_update_freesync_caps(connector,
2320                                         aconnector->edid);
2321                 } else {
2322                         amdgpu_dm_update_freesync_caps(connector, NULL);
2323                         if (!aconnector->dc_sink) {
2324                                 aconnector->dc_sink = aconnector->dc_em_sink;
2325                                 dc_sink_retain(aconnector->dc_sink);
2326                         }
2327                 }
2328
2329                 mutex_unlock(&dev->mode_config.mutex);
2330
2331                 if (sink)
2332                         dc_sink_release(sink);
2333                 return;
2334         }
2335
2336         /*
2337          * TODO: temporary guard to look for proper fix
2338          * if this sink is MST sink, we should not do anything
2339          */
2340         if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2341                 dc_sink_release(sink);
2342                 return;
2343         }
2344
2345         if (aconnector->dc_sink == sink) {
2346                 /*
2347                  * We got a DP short pulse (Link Loss, DP CTS, etc...).
2348                  * Do nothing!!
2349                  */
2350                 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2351                                 aconnector->connector_id);
2352                 if (sink)
2353                         dc_sink_release(sink);
2354                 return;
2355         }
2356
2357         DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2358                 aconnector->connector_id, aconnector->dc_sink, sink);
2359
2360         mutex_lock(&dev->mode_config.mutex);
2361
2362         /*
2363          * 1. Update status of the drm connector
2364          * 2. Send an event and let userspace tell us what to do
2365          */
2366         if (sink) {
2367                 /*
2368                  * TODO: check if we still need the S3 mode update workaround.
2369                  * If yes, put it here.
2370                  */
2371                 if (aconnector->dc_sink)
2372                         amdgpu_dm_update_freesync_caps(connector, NULL);
2373
2374                 aconnector->dc_sink = sink;
2375                 dc_sink_retain(aconnector->dc_sink);
2376                 if (sink->dc_edid.length == 0) {
2377                         aconnector->edid = NULL;
2378                         if (aconnector->dc_link->aux_mode) {
2379                                 drm_dp_cec_unset_edid(
2380                                         &aconnector->dm_dp_aux.aux);
2381                         }
2382                 } else {
2383                         aconnector->edid =
2384                                 (struct edid *)sink->dc_edid.raw_edid;
2385
2386                         drm_connector_update_edid_property(connector,
2387                                                            aconnector->edid);
2388                         drm_add_edid_modes(connector, aconnector->edid);
2389
2390                         if (aconnector->dc_link->aux_mode)
2391                                 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2392                                                     aconnector->edid);
2393                 }
2394
2395                 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2396                 update_connector_ext_caps(aconnector);
2397         } else {
2398                 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2399                 amdgpu_dm_update_freesync_caps(connector, NULL);
2400                 drm_connector_update_edid_property(connector, NULL);
2401                 aconnector->num_modes = 0;
2402                 dc_sink_release(aconnector->dc_sink);
2403                 aconnector->dc_sink = NULL;
2404                 aconnector->edid = NULL;
2405 #ifdef CONFIG_DRM_AMD_DC_HDCP
2406                 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2407                 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2408                         connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2409 #endif
2410         }
2411
2412         mutex_unlock(&dev->mode_config.mutex);
2413
2414         update_subconnector_property(aconnector);
2415
2416         if (sink)
2417                 dc_sink_release(sink);
2418 }
2419
2420 static void handle_hpd_irq(void *param)
2421 {
2422         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2423         struct drm_connector *connector = &aconnector->base;
2424         struct drm_device *dev = connector->dev;
2425         enum dc_connection_type new_connection_type = dc_connection_none;
2426 #ifdef CONFIG_DRM_AMD_DC_HDCP
2427         struct amdgpu_device *adev = drm_to_adev(dev);
2428         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2429 #endif
2430
2431         /*
2432          * In case of failure or MST no need to update connector status or notify the OS
2433          * since (for MST case) MST does this in its own context.
2434          */
2435         mutex_lock(&aconnector->hpd_lock);
2436
2437 #ifdef CONFIG_DRM_AMD_DC_HDCP
2438         if (adev->dm.hdcp_workqueue) {
2439                 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
2440                 dm_con_state->update_hdcp = true;
2441         }
2442 #endif
2443         if (aconnector->fake_enable)
2444                 aconnector->fake_enable = false;
2445
2446         if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2447                 DRM_ERROR("KMS: Failed to detect connector\n");
2448
2449         if (aconnector->base.force && new_connection_type == dc_connection_none) {
2450                 emulated_link_detect(aconnector->dc_link);
2451
2452
2453                 drm_modeset_lock_all(dev);
2454                 dm_restore_drm_connector_state(dev, connector);
2455                 drm_modeset_unlock_all(dev);
2456
2457                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2458                         drm_kms_helper_hotplug_event(dev);
2459
2460         } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
2461                 if (new_connection_type == dc_connection_none &&
2462                     aconnector->dc_link->type == dc_connection_none)
2463                         dm_set_dpms_off(aconnector->dc_link);
2464
2465                 amdgpu_dm_update_connector_after_detect(aconnector);
2466
2467                 drm_modeset_lock_all(dev);
2468                 dm_restore_drm_connector_state(dev, connector);
2469                 drm_modeset_unlock_all(dev);
2470
2471                 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
2472                         drm_kms_helper_hotplug_event(dev);
2473         }
2474         mutex_unlock(&aconnector->hpd_lock);
2475
2476 }
2477
2478 static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
2479 {
2480         uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
2481         uint8_t dret;
2482         bool new_irq_handled = false;
2483         int dpcd_addr;
2484         int dpcd_bytes_to_read;
2485
2486         const int max_process_count = 30;
2487         int process_count = 0;
2488
2489         const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
2490
2491         if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
2492                 dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
2493                 /* DPCD 0x200 - 0x201 for downstream IRQ */
2494                 dpcd_addr = DP_SINK_COUNT;
2495         } else {
2496                 dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
2497                 /* DPCD 0x2002 - 0x2005 for downstream IRQ */
2498                 dpcd_addr = DP_SINK_COUNT_ESI;
2499         }
2500
2501         dret = drm_dp_dpcd_read(
2502                 &aconnector->dm_dp_aux.aux,
2503                 dpcd_addr,
2504                 esi,
2505                 dpcd_bytes_to_read);
2506
2507         while (dret == dpcd_bytes_to_read &&
2508                 process_count < max_process_count) {
2509                 uint8_t retry;
2510                 dret = 0;
2511
2512                 process_count++;
2513
2514                 DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
2515                 /* handle HPD short pulse irq */
2516                 if (aconnector->mst_mgr.mst_state)
2517                         drm_dp_mst_hpd_irq(
2518                                 &aconnector->mst_mgr,
2519                                 esi,
2520                                 &new_irq_handled);
2521
2522                 if (new_irq_handled) {
2523                         /* ACK at DPCD to notify down stream */
2524                         const int ack_dpcd_bytes_to_write =
2525                                 dpcd_bytes_to_read - 1;
2526
2527                         for (retry = 0; retry < 3; retry++) {
2528                                 uint8_t wret;
2529
2530                                 wret = drm_dp_dpcd_write(
2531                                         &aconnector->dm_dp_aux.aux,
2532                                         dpcd_addr + 1,
2533                                         &esi[1],
2534                                         ack_dpcd_bytes_to_write);
2535                                 if (wret == ack_dpcd_bytes_to_write)
2536                                         break;
2537                         }
2538
2539                         /* check if there is new irq to be handled */
2540                         dret = drm_dp_dpcd_read(
2541                                 &aconnector->dm_dp_aux.aux,
2542                                 dpcd_addr,
2543                                 esi,
2544                                 dpcd_bytes_to_read);
2545
2546                         new_irq_handled = false;
2547                 } else {
2548                         break;
2549                 }
2550         }
2551
2552         if (process_count == max_process_count)
2553                 DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
2554 }
2555
2556 static void handle_hpd_rx_irq(void *param)
2557 {
2558         struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
2559         struct drm_connector *connector = &aconnector->base;
2560         struct drm_device *dev = connector->dev;
2561         struct dc_link *dc_link = aconnector->dc_link;
2562         bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
2563         bool result = false;
2564         enum dc_connection_type new_connection_type = dc_connection_none;
2565         struct amdgpu_device *adev = drm_to_adev(dev);
2566         union hpd_irq_data hpd_irq_data;
2567
2568         memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
2569
2570         /*
2571          * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
2572          * conflict, after implement i2c helper, this mutex should be
2573          * retired.
2574          */
2575         if (dc_link->type != dc_connection_mst_branch)
2576                 mutex_lock(&aconnector->hpd_lock);
2577
2578         read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
2579
2580         if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
2581                 (dc_link->type == dc_connection_mst_branch)) {
2582                 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
2583                         result = true;
2584                         dm_handle_hpd_rx_irq(aconnector);
2585                         goto out;
2586                 } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
2587                         result = false;
2588                         dm_handle_hpd_rx_irq(aconnector);
2589                         goto out;
2590                 }
2591         }
2592
2593         mutex_lock(&adev->dm.dc_lock);
2594 #ifdef CONFIG_DRM_AMD_DC_HDCP
2595         result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
2596 #else
2597         result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
2598 #endif
2599         mutex_unlock(&adev->dm.dc_lock);
2600
2601 out:
2602         if (result && !is_mst_root_connector) {
2603                 /* Downstream Port status changed. */
2604                 if (!dc_link_detect_sink(dc_link, &new_connection_type))
2605                         DRM_ERROR("KMS: Failed to detect connector\n");
2606
2607                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2608                         emulated_link_detect(dc_link);
2609
2610                         if (aconnector->fake_enable)
2611                                 aconnector->fake_enable = false;
2612
2613                         amdgpu_dm_update_connector_after_detect(aconnector);
2614
2615
2616                         drm_modeset_lock_all(dev);
2617                         dm_restore_drm_connector_state(dev, connector);
2618                         drm_modeset_unlock_all(dev);
2619
2620                         drm_kms_helper_hotplug_event(dev);
2621                 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
2622
2623                         if (aconnector->fake_enable)
2624                                 aconnector->fake_enable = false;
2625
2626                         amdgpu_dm_update_connector_after_detect(aconnector);
2627
2628
2629                         drm_modeset_lock_all(dev);
2630                         dm_restore_drm_connector_state(dev, connector);
2631                         drm_modeset_unlock_all(dev);
2632
2633                         drm_kms_helper_hotplug_event(dev);
2634                 }
2635         }
2636 #ifdef CONFIG_DRM_AMD_DC_HDCP
2637         if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
2638                 if (adev->dm.hdcp_workqueue)
2639                         hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
2640         }
2641 #endif
2642
2643         if (dc_link->type != dc_connection_mst_branch) {
2644                 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
2645                 mutex_unlock(&aconnector->hpd_lock);
2646         }
2647 }
2648
2649 static void register_hpd_handlers(struct amdgpu_device *adev)
2650 {
2651         struct drm_device *dev = adev_to_drm(adev);
2652         struct drm_connector *connector;
2653         struct amdgpu_dm_connector *aconnector;
2654         const struct dc_link *dc_link;
2655         struct dc_interrupt_params int_params = {0};
2656
2657         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2658         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2659
2660         list_for_each_entry(connector,
2661                         &dev->mode_config.connector_list, head) {
2662
2663                 aconnector = to_amdgpu_dm_connector(connector);
2664                 dc_link = aconnector->dc_link;
2665
2666                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
2667                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2668                         int_params.irq_source = dc_link->irq_source_hpd;
2669
2670                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2671                                         handle_hpd_irq,
2672                                         (void *) aconnector);
2673                 }
2674
2675                 if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
2676
2677                         /* Also register for DP short pulse (hpd_rx). */
2678                         int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
2679                         int_params.irq_source = dc_link->irq_source_hpd_rx;
2680
2681                         amdgpu_dm_irq_register_interrupt(adev, &int_params,
2682                                         handle_hpd_rx_irq,
2683                                         (void *) aconnector);
2684                 }
2685         }
2686 }
2687
2688 #if defined(CONFIG_DRM_AMD_DC_SI)
2689 /* Register IRQ sources and initialize IRQ callbacks */
2690 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
2691 {
2692         struct dc *dc = adev->dm.dc;
2693         struct common_irq_params *c_irq_params;
2694         struct dc_interrupt_params int_params = {0};
2695         int r;
2696         int i;
2697         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2698
2699         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2700         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2701
2702         /*
2703          * Actions of amdgpu_irq_add_id():
2704          * 1. Register a set() function with base driver.
2705          *    Base driver will call set() function to enable/disable an
2706          *    interrupt in DC hardware.
2707          * 2. Register amdgpu_dm_irq_handler().
2708          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2709          *    coming from DC hardware.
2710          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2711          *    for acknowledging and handling. */
2712
2713         /* Use VBLANK interrupt */
2714         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2715                 r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
2716                 if (r) {
2717                         DRM_ERROR("Failed to add crtc irq id!\n");
2718                         return r;
2719                 }
2720
2721                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2722                 int_params.irq_source =
2723                         dc_interrupt_to_irq_source(dc, i+1 , 0);
2724
2725                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2726
2727                 c_irq_params->adev = adev;
2728                 c_irq_params->irq_src = int_params.irq_source;
2729
2730                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2731                                 dm_crtc_high_irq, c_irq_params);
2732         }
2733
2734         /* Use GRPH_PFLIP interrupt */
2735         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2736                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2737                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2738                 if (r) {
2739                         DRM_ERROR("Failed to add page flip irq id!\n");
2740                         return r;
2741                 }
2742
2743                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2744                 int_params.irq_source =
2745                         dc_interrupt_to_irq_source(dc, i, 0);
2746
2747                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2748
2749                 c_irq_params->adev = adev;
2750                 c_irq_params->irq_src = int_params.irq_source;
2751
2752                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2753                                 dm_pflip_high_irq, c_irq_params);
2754
2755         }
2756
2757         /* HPD */
2758         r = amdgpu_irq_add_id(adev, client_id,
2759                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2760         if (r) {
2761                 DRM_ERROR("Failed to add hpd irq id!\n");
2762                 return r;
2763         }
2764
2765         register_hpd_handlers(adev);
2766
2767         return 0;
2768 }
2769 #endif
2770
2771 /* Register IRQ sources and initialize IRQ callbacks */
2772 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
2773 {
2774         struct dc *dc = adev->dm.dc;
2775         struct common_irq_params *c_irq_params;
2776         struct dc_interrupt_params int_params = {0};
2777         int r;
2778         int i;
2779         unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
2780
2781         if (adev->asic_type >= CHIP_VEGA10)
2782                 client_id = SOC15_IH_CLIENTID_DCE;
2783
2784         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2785         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2786
2787         /*
2788          * Actions of amdgpu_irq_add_id():
2789          * 1. Register a set() function with base driver.
2790          *    Base driver will call set() function to enable/disable an
2791          *    interrupt in DC hardware.
2792          * 2. Register amdgpu_dm_irq_handler().
2793          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2794          *    coming from DC hardware.
2795          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2796          *    for acknowledging and handling. */
2797
2798         /* Use VBLANK interrupt */
2799         for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
2800                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
2801                 if (r) {
2802                         DRM_ERROR("Failed to add crtc irq id!\n");
2803                         return r;
2804                 }
2805
2806                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2807                 int_params.irq_source =
2808                         dc_interrupt_to_irq_source(dc, i, 0);
2809
2810                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2811
2812                 c_irq_params->adev = adev;
2813                 c_irq_params->irq_src = int_params.irq_source;
2814
2815                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2816                                 dm_crtc_high_irq, c_irq_params);
2817         }
2818
2819         /* Use VUPDATE interrupt */
2820         for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
2821                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
2822                 if (r) {
2823                         DRM_ERROR("Failed to add vupdate irq id!\n");
2824                         return r;
2825                 }
2826
2827                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2828                 int_params.irq_source =
2829                         dc_interrupt_to_irq_source(dc, i, 0);
2830
2831                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2832
2833                 c_irq_params->adev = adev;
2834                 c_irq_params->irq_src = int_params.irq_source;
2835
2836                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2837                                 dm_vupdate_high_irq, c_irq_params);
2838         }
2839
2840         /* Use GRPH_PFLIP interrupt */
2841         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
2842                         i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
2843                 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
2844                 if (r) {
2845                         DRM_ERROR("Failed to add page flip irq id!\n");
2846                         return r;
2847                 }
2848
2849                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2850                 int_params.irq_source =
2851                         dc_interrupt_to_irq_source(dc, i, 0);
2852
2853                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2854
2855                 c_irq_params->adev = adev;
2856                 c_irq_params->irq_src = int_params.irq_source;
2857
2858                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2859                                 dm_pflip_high_irq, c_irq_params);
2860
2861         }
2862
2863         /* HPD */
2864         r = amdgpu_irq_add_id(adev, client_id,
2865                         VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2866         if (r) {
2867                 DRM_ERROR("Failed to add hpd irq id!\n");
2868                 return r;
2869         }
2870
2871         register_hpd_handlers(adev);
2872
2873         return 0;
2874 }
2875
2876 #if defined(CONFIG_DRM_AMD_DC_DCN)
2877 /* Register IRQ sources and initialize IRQ callbacks */
2878 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
2879 {
2880         struct dc *dc = adev->dm.dc;
2881         struct common_irq_params *c_irq_params;
2882         struct dc_interrupt_params int_params = {0};
2883         int r;
2884         int i;
2885
2886         int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
2887         int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
2888
2889         /*
2890          * Actions of amdgpu_irq_add_id():
2891          * 1. Register a set() function with base driver.
2892          *    Base driver will call set() function to enable/disable an
2893          *    interrupt in DC hardware.
2894          * 2. Register amdgpu_dm_irq_handler().
2895          *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
2896          *    coming from DC hardware.
2897          *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
2898          *    for acknowledging and handling.
2899          */
2900
2901         /* Use VSTARTUP interrupt */
2902         for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
2903                         i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
2904                         i++) {
2905                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
2906
2907                 if (r) {
2908                         DRM_ERROR("Failed to add crtc irq id!\n");
2909                         return r;
2910                 }
2911
2912                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2913                 int_params.irq_source =
2914                         dc_interrupt_to_irq_source(dc, i, 0);
2915
2916                 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
2917
2918                 c_irq_params->adev = adev;
2919                 c_irq_params->irq_src = int_params.irq_source;
2920
2921                 amdgpu_dm_irq_register_interrupt(
2922                         adev, &int_params, dm_crtc_high_irq, c_irq_params);
2923         }
2924
2925         /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
2926          * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
2927          * to trigger at end of each vblank, regardless of state of the lock,
2928          * matching DCE behaviour.
2929          */
2930         for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
2931              i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
2932              i++) {
2933                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
2934
2935                 if (r) {
2936                         DRM_ERROR("Failed to add vupdate irq id!\n");
2937                         return r;
2938                 }
2939
2940                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2941                 int_params.irq_source =
2942                         dc_interrupt_to_irq_source(dc, i, 0);
2943
2944                 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
2945
2946                 c_irq_params->adev = adev;
2947                 c_irq_params->irq_src = int_params.irq_source;
2948
2949                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2950                                 dm_vupdate_high_irq, c_irq_params);
2951         }
2952
2953         /* Use GRPH_PFLIP interrupt */
2954         for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
2955                         i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
2956                         i++) {
2957                 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
2958                 if (r) {
2959                         DRM_ERROR("Failed to add page flip irq id!\n");
2960                         return r;
2961                 }
2962
2963                 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
2964                 int_params.irq_source =
2965                         dc_interrupt_to_irq_source(dc, i, 0);
2966
2967                 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
2968
2969                 c_irq_params->adev = adev;
2970                 c_irq_params->irq_src = int_params.irq_source;
2971
2972                 amdgpu_dm_irq_register_interrupt(adev, &int_params,
2973                                 dm_pflip_high_irq, c_irq_params);
2974
2975         }
2976
2977         /* HPD */
2978         r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
2979                         &adev->hpd_irq);
2980         if (r) {
2981                 DRM_ERROR("Failed to add hpd irq id!\n");
2982                 return r;
2983         }
2984
2985         register_hpd_handlers(adev);
2986
2987         return 0;
2988 }
2989 #endif
2990
2991 /*
2992  * Acquires the lock for the atomic state object and returns
2993  * the new atomic state.
2994  *
2995  * This should only be called during atomic check.
2996  */
2997 static int dm_atomic_get_state(struct drm_atomic_state *state,
2998                                struct dm_atomic_state **dm_state)
2999 {
3000         struct drm_device *dev = state->dev;
3001         struct amdgpu_device *adev = drm_to_adev(dev);
3002         struct amdgpu_display_manager *dm = &adev->dm;
3003         struct drm_private_state *priv_state;
3004
3005         if (*dm_state)
3006                 return 0;
3007
3008         priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3009         if (IS_ERR(priv_state))
3010                 return PTR_ERR(priv_state);
3011
3012         *dm_state = to_dm_atomic_state(priv_state);
3013
3014         return 0;
3015 }
3016
3017 static struct dm_atomic_state *
3018 dm_atomic_get_new_state(struct drm_atomic_state *state)
3019 {
3020         struct drm_device *dev = state->dev;
3021         struct amdgpu_device *adev = drm_to_adev(dev);
3022         struct amdgpu_display_manager *dm = &adev->dm;
3023         struct drm_private_obj *obj;
3024         struct drm_private_state *new_obj_state;
3025         int i;
3026
3027         for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3028                 if (obj->funcs == dm->atomic_obj.funcs)
3029                         return to_dm_atomic_state(new_obj_state);
3030         }
3031
3032         return NULL;
3033 }
3034
3035 static struct drm_private_state *
3036 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3037 {
3038         struct dm_atomic_state *old_state, *new_state;
3039
3040         new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3041         if (!new_state)
3042                 return NULL;
3043
3044         __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3045
3046         old_state = to_dm_atomic_state(obj->state);
3047
3048         if (old_state && old_state->context)
3049                 new_state->context = dc_copy_state(old_state->context);
3050
3051         if (!new_state->context) {
3052                 kfree(new_state);
3053                 return NULL;
3054         }
3055
3056         return &new_state->base;
3057 }
3058
3059 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3060                                     struct drm_private_state *state)
3061 {
3062         struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3063
3064         if (dm_state && dm_state->context)
3065                 dc_release_state(dm_state->context);
3066
3067         kfree(dm_state);
3068 }
3069
3070 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3071         .atomic_duplicate_state = dm_atomic_duplicate_state,
3072         .atomic_destroy_state = dm_atomic_destroy_state,
3073 };
3074
3075 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3076 {
3077         struct dm_atomic_state *state;
3078         int r;
3079
3080         adev->mode_info.mode_config_initialized = true;
3081
3082         adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3083         adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3084
3085         adev_to_drm(adev)->mode_config.max_width = 16384;
3086         adev_to_drm(adev)->mode_config.max_height = 16384;
3087
3088         adev_to_drm(adev)->mode_config.preferred_depth = 24;
3089         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3090         /* indicates support for immediate flip */
3091         adev_to_drm(adev)->mode_config.async_page_flip = true;
3092
3093         adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3094
3095         state = kzalloc(sizeof(*state), GFP_KERNEL);
3096         if (!state)
3097                 return -ENOMEM;
3098
3099         state->context = dc_create_state(adev->dm.dc);
3100         if (!state->context) {
3101                 kfree(state);
3102                 return -ENOMEM;
3103         }
3104
3105         dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3106
3107         drm_atomic_private_obj_init(adev_to_drm(adev),
3108                                     &adev->dm.atomic_obj,
3109                                     &state->base,
3110                                     &dm_atomic_state_funcs);
3111
3112         r = amdgpu_display_modeset_create_props(adev);
3113         if (r) {
3114                 dc_release_state(state->context);
3115                 kfree(state);
3116                 return r;
3117         }
3118
3119         r = amdgpu_dm_audio_init(adev);
3120         if (r) {
3121                 dc_release_state(state->context);
3122                 kfree(state);
3123                 return r;
3124         }
3125
3126         return 0;
3127 }
3128
3129 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3130 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3131 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3132
3133 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3134         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3135
3136 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
3137 {
3138 #if defined(CONFIG_ACPI)
3139         struct amdgpu_dm_backlight_caps caps;
3140
3141         memset(&caps, 0, sizeof(caps));
3142
3143         if (dm->backlight_caps.caps_valid)
3144                 return;
3145
3146         amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
3147         if (caps.caps_valid) {
3148                 dm->backlight_caps.caps_valid = true;
3149                 if (caps.aux_support)
3150                         return;
3151                 dm->backlight_caps.min_input_signal = caps.min_input_signal;
3152                 dm->backlight_caps.max_input_signal = caps.max_input_signal;
3153         } else {
3154                 dm->backlight_caps.min_input_signal =
3155                                 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3156                 dm->backlight_caps.max_input_signal =
3157                                 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3158         }
3159 #else
3160         if (dm->backlight_caps.aux_support)
3161                 return;
3162
3163         dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3164         dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3165 #endif
3166 }
3167
3168 static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
3169 {
3170         bool rc;
3171
3172         if (!link)
3173                 return 1;
3174
3175         rc = dc_link_set_backlight_level_nits(link, true, brightness,
3176                                               AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3177
3178         return rc ? 0 : 1;
3179 }
3180
3181 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3182                                 unsigned *min, unsigned *max)
3183 {
3184         if (!caps)
3185                 return 0;
3186
3187         if (caps->aux_support) {
3188                 // Firmware limits are in nits, DC API wants millinits.
3189                 *max = 1000 * caps->aux_max_input_signal;
3190                 *min = 1000 * caps->aux_min_input_signal;
3191         } else {
3192                 // Firmware limits are 8-bit, PWM control is 16-bit.
3193                 *max = 0x101 * caps->max_input_signal;
3194                 *min = 0x101 * caps->min_input_signal;
3195         }
3196         return 1;
3197 }
3198
3199 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3200                                         uint32_t brightness)
3201 {
3202         unsigned min, max;
3203
3204         if (!get_brightness_range(caps, &min, &max))
3205                 return brightness;
3206
3207         // Rescale 0..255 to min..max
3208         return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3209                                        AMDGPU_MAX_BL_LEVEL);
3210 }
3211
3212 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3213                                       uint32_t brightness)
3214 {
3215         unsigned min, max;
3216
3217         if (!get_brightness_range(caps, &min, &max))
3218                 return brightness;
3219
3220         if (brightness < min)
3221                 return 0;
3222         // Rescale min..max to 0..255
3223         return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3224                                  max - min);
3225 }
3226
3227 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3228 {
3229         struct amdgpu_display_manager *dm = bl_get_data(bd);
3230         struct amdgpu_dm_backlight_caps caps;
3231         struct dc_link *link = NULL;
3232         u32 brightness;
3233         bool rc;
3234
3235         amdgpu_dm_update_backlight_caps(dm);
3236         caps = dm->backlight_caps;
3237
3238         link = (struct dc_link *)dm->backlight_link;
3239
3240         brightness = convert_brightness_from_user(&caps, bd->props.brightness);
3241         // Change brightness based on AUX property
3242         if (caps.aux_support)
3243                 return set_backlight_via_aux(link, brightness);
3244
3245         rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
3246
3247         return rc ? 0 : 1;
3248 }
3249
3250 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3251 {
3252         struct amdgpu_display_manager *dm = bl_get_data(bd);
3253         int ret = dc_link_get_backlight_level(dm->backlight_link);
3254
3255         if (ret == DC_ERROR_UNEXPECTED)
3256                 return bd->props.brightness;
3257         return convert_brightness_to_user(&dm->backlight_caps, ret);
3258 }
3259
3260 static const struct backlight_ops amdgpu_dm_backlight_ops = {
3261         .options = BL_CORE_SUSPENDRESUME,
3262         .get_brightness = amdgpu_dm_backlight_get_brightness,
3263         .update_status  = amdgpu_dm_backlight_update_status,
3264 };
3265
3266 static void
3267 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
3268 {
3269         char bl_name[16];
3270         struct backlight_properties props = { 0 };
3271
3272         amdgpu_dm_update_backlight_caps(dm);
3273
3274         props.max_brightness = AMDGPU_MAX_BL_LEVEL;
3275         props.brightness = AMDGPU_MAX_BL_LEVEL;
3276         props.type = BACKLIGHT_RAW;
3277
3278         snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
3279                  adev_to_drm(dm->adev)->primary->index);
3280
3281         dm->backlight_dev = backlight_device_register(bl_name,
3282                                                       adev_to_drm(dm->adev)->dev,
3283                                                       dm,
3284                                                       &amdgpu_dm_backlight_ops,
3285                                                       &props);
3286
3287         if (IS_ERR(dm->backlight_dev))
3288                 DRM_ERROR("DM: Backlight registration failed!\n");
3289         else
3290                 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
3291 }
3292
3293 #endif
3294
3295 static int initialize_plane(struct amdgpu_display_manager *dm,
3296                             struct amdgpu_mode_info *mode_info, int plane_id,
3297                             enum drm_plane_type plane_type,
3298                             const struct dc_plane_cap *plane_cap)
3299 {
3300         struct drm_plane *plane;
3301         unsigned long possible_crtcs;
3302         int ret = 0;
3303
3304         plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
3305         if (!plane) {
3306                 DRM_ERROR("KMS: Failed to allocate plane\n");
3307                 return -ENOMEM;
3308         }
3309         plane->type = plane_type;
3310
3311         /*
3312          * HACK: IGT tests expect that the primary plane for a CRTC
3313          * can only have one possible CRTC. Only expose support for
3314          * any CRTC if they're not going to be used as a primary plane
3315          * for a CRTC - like overlay or underlay planes.
3316          */
3317         possible_crtcs = 1 << plane_id;
3318         if (plane_id >= dm->dc->caps.max_streams)
3319                 possible_crtcs = 0xff;
3320
3321         ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
3322
3323         if (ret) {
3324                 DRM_ERROR("KMS: Failed to initialize plane\n");
3325                 kfree(plane);
3326                 return ret;
3327         }
3328
3329         if (mode_info)
3330                 mode_info->planes[plane_id] = plane;
3331
3332         return ret;
3333 }
3334
3335
3336 static void register_backlight_device(struct amdgpu_display_manager *dm,
3337                                       struct dc_link *link)
3338 {
3339 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3340         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3341
3342         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
3343             link->type != dc_connection_none) {
3344                 /*
3345                  * Event if registration failed, we should continue with
3346                  * DM initialization because not having a backlight control
3347                  * is better then a black screen.
3348                  */
3349                 amdgpu_dm_register_backlight_device(dm);
3350
3351                 if (dm->backlight_dev)
3352                         dm->backlight_link = link;
3353         }
3354 #endif
3355 }
3356
3357
3358 /*
3359  * In this architecture, the association
3360  * connector -> encoder -> crtc
3361  * id not really requried. The crtc and connector will hold the
3362  * display_index as an abstraction to use with DAL component
3363  *
3364  * Returns 0 on success
3365  */
3366 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
3367 {
3368         struct amdgpu_display_manager *dm = &adev->dm;
3369         int32_t i;
3370         struct amdgpu_dm_connector *aconnector = NULL;
3371         struct amdgpu_encoder *aencoder = NULL;
3372         struct amdgpu_mode_info *mode_info = &adev->mode_info;
3373         uint32_t link_cnt;
3374         int32_t primary_planes;
3375         enum dc_connection_type new_connection_type = dc_connection_none;
3376         const struct dc_plane_cap *plane;
3377
3378         dm->display_indexes_num = dm->dc->caps.max_streams;
3379         /* Update the actual used number of crtc */
3380         adev->mode_info.num_crtc = adev->dm.display_indexes_num;
3381
3382         link_cnt = dm->dc->caps.max_links;
3383         if (amdgpu_dm_mode_config_init(dm->adev)) {
3384                 DRM_ERROR("DM: Failed to initialize mode config\n");
3385                 return -EINVAL;
3386         }
3387
3388         /* There is one primary plane per CRTC */
3389         primary_planes = dm->dc->caps.max_streams;
3390         ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
3391
3392         /*
3393          * Initialize primary planes, implicit planes for legacy IOCTLS.
3394          * Order is reversed to match iteration order in atomic check.
3395          */
3396         for (i = (primary_planes - 1); i >= 0; i--) {
3397                 plane = &dm->dc->caps.planes[i];
3398
3399                 if (initialize_plane(dm, mode_info, i,
3400                                      DRM_PLANE_TYPE_PRIMARY, plane)) {
3401                         DRM_ERROR("KMS: Failed to initialize primary plane\n");
3402                         goto fail;
3403                 }
3404         }
3405
3406         /*
3407          * Initialize overlay planes, index starting after primary planes.
3408          * These planes have a higher DRM index than the primary planes since
3409          * they should be considered as having a higher z-order.
3410          * Order is reversed to match iteration order in atomic check.
3411          *
3412          * Only support DCN for now, and only expose one so we don't encourage
3413          * userspace to use up all the pipes.
3414          */
3415         for (i = 0; i < dm->dc->caps.max_planes; ++i) {
3416                 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
3417
3418                 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
3419                         continue;
3420
3421                 if (!plane->blends_with_above || !plane->blends_with_below)
3422                         continue;
3423
3424                 if (!plane->pixel_format_support.argb8888)
3425                         continue;
3426
3427                 if (initialize_plane(dm, NULL, primary_planes + i,
3428                                      DRM_PLANE_TYPE_OVERLAY, plane)) {
3429                         DRM_ERROR("KMS: Failed to initialize overlay plane\n");
3430                         goto fail;
3431                 }
3432
3433                 /* Only create one overlay plane. */
3434                 break;
3435         }
3436
3437         for (i = 0; i < dm->dc->caps.max_streams; i++)
3438                 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
3439                         DRM_ERROR("KMS: Failed to initialize crtc\n");
3440                         goto fail;
3441                 }
3442
3443         /* loops over all connectors on the board */
3444         for (i = 0; i < link_cnt; i++) {
3445                 struct dc_link *link = NULL;
3446
3447                 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
3448                         DRM_ERROR(
3449                                 "KMS: Cannot support more than %d display indexes\n",
3450                                         AMDGPU_DM_MAX_DISPLAY_INDEX);
3451                         continue;
3452                 }
3453
3454                 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
3455                 if (!aconnector)
3456                         goto fail;
3457
3458                 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
3459                 if (!aencoder)
3460                         goto fail;
3461
3462                 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
3463                         DRM_ERROR("KMS: Failed to initialize encoder\n");
3464                         goto fail;
3465                 }
3466
3467                 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
3468                         DRM_ERROR("KMS: Failed to initialize connector\n");
3469                         goto fail;
3470                 }
3471
3472                 link = dc_get_link_at_index(dm->dc, i);
3473
3474                 if (!dc_link_detect_sink(link, &new_connection_type))
3475                         DRM_ERROR("KMS: Failed to detect connector\n");
3476
3477                 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3478                         emulated_link_detect(link);
3479                         amdgpu_dm_update_connector_after_detect(aconnector);
3480
3481                 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
3482                         amdgpu_dm_update_connector_after_detect(aconnector);
3483                         register_backlight_device(dm, link);
3484                         if (amdgpu_dc_feature_mask & DC_PSR_MASK)
3485                                 amdgpu_dm_set_psr_caps(link);
3486                 }
3487
3488
3489         }
3490
3491         /* Software is initialized. Now we can register interrupt handlers. */
3492         switch (adev->asic_type) {
3493 #if defined(CONFIG_DRM_AMD_DC_SI)
3494         case CHIP_TAHITI:
3495         case CHIP_PITCAIRN:
3496         case CHIP_VERDE:
3497         case CHIP_OLAND:
3498                 if (dce60_register_irq_handlers(dm->adev)) {
3499                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3500                         goto fail;
3501                 }
3502                 break;
3503 #endif
3504         case CHIP_BONAIRE:
3505         case CHIP_HAWAII:
3506         case CHIP_KAVERI:
3507         case CHIP_KABINI:
3508         case CHIP_MULLINS:
3509         case CHIP_TONGA:
3510         case CHIP_FIJI:
3511         case CHIP_CARRIZO:
3512         case CHIP_STONEY:
3513         case CHIP_POLARIS11:
3514         case CHIP_POLARIS10:
3515         case CHIP_POLARIS12:
3516         case CHIP_VEGAM:
3517         case CHIP_VEGA10:
3518         case CHIP_VEGA12:
3519         case CHIP_VEGA20:
3520                 if (dce110_register_irq_handlers(dm->adev)) {
3521                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3522                         goto fail;
3523                 }
3524                 break;
3525 #if defined(CONFIG_DRM_AMD_DC_DCN)
3526         case CHIP_RAVEN:
3527         case CHIP_NAVI12:
3528         case CHIP_NAVI10:
3529         case CHIP_NAVI14:
3530         case CHIP_RENOIR:
3531         case CHIP_SIENNA_CICHLID:
3532         case CHIP_NAVY_FLOUNDER:
3533         case CHIP_DIMGREY_CAVEFISH:
3534         case CHIP_VANGOGH:
3535                 if (dcn10_register_irq_handlers(dm->adev)) {
3536                         DRM_ERROR("DM: Failed to initialize IRQ\n");
3537                         goto fail;
3538                 }
3539                 break;
3540 #endif
3541         default:
3542                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3543                 goto fail;
3544         }
3545
3546         return 0;
3547 fail:
3548         kfree(aencoder);
3549         kfree(aconnector);
3550
3551         return -EINVAL;
3552 }
3553
3554 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
3555 {
3556         drm_mode_config_cleanup(dm->ddev);
3557         drm_atomic_private_obj_fini(&dm->atomic_obj);
3558         return;
3559 }
3560
3561 /******************************************************************************
3562  * amdgpu_display_funcs functions
3563  *****************************************************************************/
3564
3565 /*
3566  * dm_bandwidth_update - program display watermarks
3567  *
3568  * @adev: amdgpu_device pointer
3569  *
3570  * Calculate and program the display watermarks and line buffer allocation.
3571  */
3572 static void dm_bandwidth_update(struct amdgpu_device *adev)
3573 {
3574         /* TODO: implement later */
3575 }
3576
3577 static const struct amdgpu_display_funcs dm_display_funcs = {
3578         .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
3579         .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
3580         .backlight_set_level = NULL, /* never called for DC */
3581         .backlight_get_level = NULL, /* never called for DC */
3582         .hpd_sense = NULL,/* called unconditionally */
3583         .hpd_set_polarity = NULL, /* called unconditionally */
3584         .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
3585         .page_flip_get_scanoutpos =
3586                 dm_crtc_get_scanoutpos,/* called unconditionally */
3587         .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
3588         .add_connector = NULL, /* VBIOS parsing. DAL does it. */
3589 };
3590
3591 #if defined(CONFIG_DEBUG_KERNEL_DC)
3592
3593 static ssize_t s3_debug_store(struct device *device,
3594                               struct device_attribute *attr,
3595                               const char *buf,
3596                               size_t count)
3597 {
3598         int ret;
3599         int s3_state;
3600         struct drm_device *drm_dev = dev_get_drvdata(device);
3601         struct amdgpu_device *adev = drm_to_adev(drm_dev);
3602
3603         ret = kstrtoint(buf, 0, &s3_state);
3604
3605         if (ret == 0) {
3606                 if (s3_state) {
3607                         dm_resume(adev);
3608                         drm_kms_helper_hotplug_event(adev_to_drm(adev));
3609                 } else
3610                         dm_suspend(adev);
3611         }
3612
3613         return ret == 0 ? count : 0;
3614 }
3615
3616 DEVICE_ATTR_WO(s3_debug);
3617
3618 #endif
3619
3620 static int dm_early_init(void *handle)
3621 {
3622         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3623
3624         switch (adev->asic_type) {
3625 #if defined(CONFIG_DRM_AMD_DC_SI)
3626         case CHIP_TAHITI:
3627         case CHIP_PITCAIRN:
3628         case CHIP_VERDE:
3629                 adev->mode_info.num_crtc = 6;
3630                 adev->mode_info.num_hpd = 6;
3631                 adev->mode_info.num_dig = 6;
3632                 break;
3633         case CHIP_OLAND:
3634                 adev->mode_info.num_crtc = 2;
3635                 adev->mode_info.num_hpd = 2;
3636                 adev->mode_info.num_dig = 2;
3637                 break;
3638 #endif
3639         case CHIP_BONAIRE:
3640         case CHIP_HAWAII:
3641                 adev->mode_info.num_crtc = 6;
3642                 adev->mode_info.num_hpd = 6;
3643                 adev->mode_info.num_dig = 6;
3644                 break;
3645         case CHIP_KAVERI:
3646                 adev->mode_info.num_crtc = 4;
3647                 adev->mode_info.num_hpd = 6;
3648                 adev->mode_info.num_dig = 7;
3649                 break;
3650         case CHIP_KABINI:
3651         case CHIP_MULLINS:
3652                 adev->mode_info.num_crtc = 2;
3653                 adev->mode_info.num_hpd = 6;
3654                 adev->mode_info.num_dig = 6;
3655                 break;
3656         case CHIP_FIJI:
3657         case CHIP_TONGA:
3658                 adev->mode_info.num_crtc = 6;
3659                 adev->mode_info.num_hpd = 6;
3660                 adev->mode_info.num_dig = 7;
3661                 break;
3662         case CHIP_CARRIZO:
3663                 adev->mode_info.num_crtc = 3;
3664                 adev->mode_info.num_hpd = 6;
3665                 adev->mode_info.num_dig = 9;
3666                 break;
3667         case CHIP_STONEY:
3668                 adev->mode_info.num_crtc = 2;
3669                 adev->mode_info.num_hpd = 6;
3670                 adev->mode_info.num_dig = 9;
3671                 break;
3672         case CHIP_POLARIS11:
3673         case CHIP_POLARIS12:
3674                 adev->mode_info.num_crtc = 5;
3675                 adev->mode_info.num_hpd = 5;
3676                 adev->mode_info.num_dig = 5;
3677                 break;
3678         case CHIP_POLARIS10:
3679         case CHIP_VEGAM:
3680                 adev->mode_info.num_crtc = 6;
3681                 adev->mode_info.num_hpd = 6;
3682                 adev->mode_info.num_dig = 6;
3683                 break;
3684         case CHIP_VEGA10:
3685         case CHIP_VEGA12:
3686         case CHIP_VEGA20:
3687                 adev->mode_info.num_crtc = 6;
3688                 adev->mode_info.num_hpd = 6;
3689                 adev->mode_info.num_dig = 6;
3690                 break;
3691 #if defined(CONFIG_DRM_AMD_DC_DCN)
3692         case CHIP_RAVEN:
3693         case CHIP_RENOIR:
3694         case CHIP_VANGOGH:
3695                 adev->mode_info.num_crtc = 4;
3696                 adev->mode_info.num_hpd = 4;
3697                 adev->mode_info.num_dig = 4;
3698                 break;
3699         case CHIP_NAVI10:
3700         case CHIP_NAVI12:
3701         case CHIP_SIENNA_CICHLID:
3702         case CHIP_NAVY_FLOUNDER:
3703                 adev->mode_info.num_crtc = 6;
3704                 adev->mode_info.num_hpd = 6;
3705                 adev->mode_info.num_dig = 6;
3706                 break;
3707         case CHIP_NAVI14:
3708         case CHIP_DIMGREY_CAVEFISH:
3709                 adev->mode_info.num_crtc = 5;
3710                 adev->mode_info.num_hpd = 5;
3711                 adev->mode_info.num_dig = 5;
3712                 break;
3713 #endif
3714         default:
3715                 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
3716                 return -EINVAL;
3717         }
3718
3719         amdgpu_dm_set_irq_funcs(adev);
3720
3721         if (adev->mode_info.funcs == NULL)
3722                 adev->mode_info.funcs = &dm_display_funcs;
3723
3724         /*
3725          * Note: Do NOT change adev->audio_endpt_rreg and
3726          * adev->audio_endpt_wreg because they are initialised in
3727          * amdgpu_device_init()
3728          */
3729 #if defined(CONFIG_DEBUG_KERNEL_DC)
3730         device_create_file(
3731                 adev_to_drm(adev)->dev,
3732                 &dev_attr_s3_debug);
3733 #endif
3734
3735         return 0;
3736 }
3737
3738 static bool modeset_required(struct drm_crtc_state *crtc_state,
3739                              struct dc_stream_state *new_stream,
3740                              struct dc_stream_state *old_stream)
3741 {
3742         return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3743 }
3744
3745 static bool modereset_required(struct drm_crtc_state *crtc_state)
3746 {
3747         return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
3748 }
3749
3750 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
3751 {
3752         drm_encoder_cleanup(encoder);
3753         kfree(encoder);
3754 }
3755
3756 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
3757         .destroy = amdgpu_dm_encoder_destroy,
3758 };
3759
3760
3761 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
3762                                          struct drm_framebuffer *fb,
3763                                          int *min_downscale, int *max_upscale)
3764 {
3765         struct amdgpu_device *adev = drm_to_adev(dev);
3766         struct dc *dc = adev->dm.dc;
3767         /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
3768         struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
3769
3770         switch (fb->format->format) {
3771         case DRM_FORMAT_P010:
3772         case DRM_FORMAT_NV12:
3773         case DRM_FORMAT_NV21:
3774                 *max_upscale = plane_cap->max_upscale_factor.nv12;
3775                 *min_downscale = plane_cap->max_downscale_factor.nv12;
3776                 break;
3777
3778         case DRM_FORMAT_XRGB16161616F:
3779         case DRM_FORMAT_ARGB16161616F:
3780         case DRM_FORMAT_XBGR16161616F:
3781         case DRM_FORMAT_ABGR16161616F:
3782                 *max_upscale = plane_cap->max_upscale_factor.fp16;
3783                 *min_downscale = plane_cap->max_downscale_factor.fp16;
3784                 break;
3785
3786         default:
3787                 *max_upscale = plane_cap->max_upscale_factor.argb8888;
3788                 *min_downscale = plane_cap->max_downscale_factor.argb8888;
3789                 break;
3790         }
3791
3792         /*
3793          * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
3794          * scaling factor of 1.0 == 1000 units.
3795          */
3796         if (*max_upscale == 1)
3797                 *max_upscale = 1000;
3798
3799         if (*min_downscale == 1)
3800                 *min_downscale = 1000;
3801 }
3802
3803
3804 static int fill_dc_scaling_info(const struct drm_plane_state *state,
3805                                 struct dc_scaling_info *scaling_info)
3806 {
3807         int scale_w, scale_h, min_downscale, max_upscale;
3808
3809         memset(scaling_info, 0, sizeof(*scaling_info));
3810
3811         /* Source is fixed 16.16 but we ignore mantissa for now... */
3812         scaling_info->src_rect.x = state->src_x >> 16;
3813         scaling_info->src_rect.y = state->src_y >> 16;
3814
3815         scaling_info->src_rect.width = state->src_w >> 16;
3816         if (scaling_info->src_rect.width == 0)
3817                 return -EINVAL;
3818
3819         scaling_info->src_rect.height = state->src_h >> 16;
3820         if (scaling_info->src_rect.height == 0)
3821                 return -EINVAL;
3822
3823         scaling_info->dst_rect.x = state->crtc_x;
3824         scaling_info->dst_rect.y = state->crtc_y;
3825
3826         if (state->crtc_w == 0)
3827                 return -EINVAL;
3828
3829         scaling_info->dst_rect.width = state->crtc_w;
3830
3831         if (state->crtc_h == 0)
3832                 return -EINVAL;
3833
3834         scaling_info->dst_rect.height = state->crtc_h;
3835
3836         /* DRM doesn't specify clipping on destination output. */
3837         scaling_info->clip_rect = scaling_info->dst_rect;
3838
3839         /* Validate scaling per-format with DC plane caps */
3840         if (state->plane && state->plane->dev && state->fb) {
3841                 get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
3842                                              &min_downscale, &max_upscale);
3843         } else {
3844                 min_downscale = 250;
3845                 max_upscale = 16000;
3846         }
3847
3848         scale_w = scaling_info->dst_rect.width * 1000 /
3849                   scaling_info->src_rect.width;
3850
3851         if (scale_w < min_downscale || scale_w > max_upscale)
3852                 return -EINVAL;
3853
3854         scale_h = scaling_info->dst_rect.height * 1000 /
3855                   scaling_info->src_rect.height;
3856
3857         if (scale_h < min_downscale || scale_h > max_upscale)
3858                 return -EINVAL;
3859
3860         /*
3861          * The "scaling_quality" can be ignored for now, quality = 0 has DC
3862          * assume reasonable defaults based on the format.
3863          */
3864
3865         return 0;
3866 }
3867
3868 static void
3869 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
3870                                  uint64_t tiling_flags)
3871 {
3872         /* Fill GFX8 params */
3873         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
3874                 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
3875
3876                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3877                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3878                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3879                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
3880                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3881
3882                 /* XXX fix me for VI */
3883                 tiling_info->gfx8.num_banks = num_banks;
3884                 tiling_info->gfx8.array_mode =
3885                                 DC_ARRAY_2D_TILED_THIN1;
3886                 tiling_info->gfx8.tile_split = tile_split;
3887                 tiling_info->gfx8.bank_width = bankw;
3888                 tiling_info->gfx8.bank_height = bankh;
3889                 tiling_info->gfx8.tile_aspect = mtaspect;
3890                 tiling_info->gfx8.tile_mode =
3891                                 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
3892         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
3893                         == DC_ARRAY_1D_TILED_THIN1) {
3894                 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
3895         }
3896
3897         tiling_info->gfx8.pipe_config =
3898                         AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3899 }
3900
3901 static void
3902 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
3903                                   union dc_tiling_info *tiling_info)
3904 {
3905         tiling_info->gfx9.num_pipes =
3906                 adev->gfx.config.gb_addr_config_fields.num_pipes;
3907         tiling_info->gfx9.num_banks =
3908                 adev->gfx.config.gb_addr_config_fields.num_banks;
3909         tiling_info->gfx9.pipe_interleave =
3910                 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
3911         tiling_info->gfx9.num_shader_engines =
3912                 adev->gfx.config.gb_addr_config_fields.num_se;
3913         tiling_info->gfx9.max_compressed_frags =
3914                 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
3915         tiling_info->gfx9.num_rb_per_se =
3916                 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
3917         tiling_info->gfx9.shaderEnable = 1;
3918         if (adev->asic_type == CHIP_SIENNA_CICHLID ||
3919             adev->asic_type == CHIP_NAVY_FLOUNDER ||
3920             adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
3921             adev->asic_type == CHIP_VANGOGH)
3922                 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
3923 }
3924
3925 static int
3926 validate_dcc(struct amdgpu_device *adev,
3927              const enum surface_pixel_format format,
3928              const enum dc_rotation_angle rotation,
3929              const union dc_tiling_info *tiling_info,
3930              const struct dc_plane_dcc_param *dcc,
3931              const struct dc_plane_address *address,
3932              const struct plane_size *plane_size)
3933 {
3934         struct dc *dc = adev->dm.dc;
3935         struct dc_dcc_surface_param input;
3936         struct dc_surface_dcc_cap output;
3937
3938         memset(&input, 0, sizeof(input));
3939         memset(&output, 0, sizeof(output));
3940
3941         if (!dcc->enable)
3942                 return 0;
3943
3944         if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
3945             !dc->cap_funcs.get_dcc_compression_cap)
3946                 return -EINVAL;
3947
3948         input.format = format;
3949         input.surface_size.width = plane_size->surface_size.width;
3950         input.surface_size.height = plane_size->surface_size.height;
3951         input.swizzle_mode = tiling_info->gfx9.swizzle;
3952
3953         if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
3954                 input.scan = SCAN_DIRECTION_HORIZONTAL;
3955         else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
3956                 input.scan = SCAN_DIRECTION_VERTICAL;
3957
3958         if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
3959                 return -EINVAL;
3960
3961         if (!output.capable)
3962                 return -EINVAL;
3963
3964         if (dcc->independent_64b_blks == 0 &&
3965             output.grph.rgb.independent_64b_blks != 0)
3966                 return -EINVAL;
3967
3968         return 0;
3969 }
3970
3971 static bool
3972 modifier_has_dcc(uint64_t modifier)
3973 {
3974         return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
3975 }
3976
3977 static unsigned
3978 modifier_gfx9_swizzle_mode(uint64_t modifier)
3979 {
3980         if (modifier == DRM_FORMAT_MOD_LINEAR)
3981                 return 0;
3982
3983         return AMD_FMT_MOD_GET(TILE, modifier);
3984 }
3985
3986 static const struct drm_format_info *
3987 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
3988 {
3989         return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
3990 }
3991
3992 static void
3993 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
3994                                     union dc_tiling_info *tiling_info,
3995                                     uint64_t modifier)
3996 {
3997         unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
3998         unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
3999         unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4000         unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4001
4002         fill_gfx9_tiling_info_from_device(adev, tiling_info);
4003
4004         if (!IS_AMD_FMT_MOD(modifier))
4005                 return;
4006
4007         tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4008         tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4009
4010         if (adev->family >= AMDGPU_FAMILY_NV) {
4011                 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4012         } else {
4013                 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4014
4015                 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4016         }
4017 }
4018
4019 enum dm_micro_swizzle {
4020         MICRO_SWIZZLE_Z = 0,
4021         MICRO_SWIZZLE_S = 1,
4022         MICRO_SWIZZLE_D = 2,
4023         MICRO_SWIZZLE_R = 3
4024 };
4025
4026 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4027                                           uint32_t format,
4028                                           uint64_t modifier)
4029 {
4030         struct amdgpu_device *adev = drm_to_adev(plane->dev);
4031         const struct drm_format_info *info = drm_format_info(format);
4032
4033         enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4034
4035         if (!info)
4036                 return false;
4037
4038         /*
4039          * We always have to allow this modifier, because core DRM still
4040          * checks LINEAR support if userspace does not provide modifers.
4041          */
4042         if (modifier == DRM_FORMAT_MOD_LINEAR)
4043                 return true;
4044
4045         /*
4046          * The arbitrary tiling support for multiplane formats has not been hooked
4047          * up.
4048          */
4049         if (info->num_planes > 1)
4050                 return false;
4051
4052         /*
4053          * For D swizzle the canonical modifier depends on the bpp, so check
4054          * it here.
4055          */
4056         if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4057             adev->family >= AMDGPU_FAMILY_NV) {
4058                 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4059                         return false;
4060         }
4061
4062         if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4063             info->cpp[0] < 8)
4064                 return false;
4065
4066         if (modifier_has_dcc(modifier)) {
4067                 /* Per radeonsi comments 16/64 bpp are more complicated. */
4068                 if (info->cpp[0] != 4)
4069                         return false;
4070         }
4071
4072         return true;
4073 }
4074
4075 static void
4076 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4077 {
4078         if (!*mods)
4079                 return;
4080
4081         if (*cap - *size < 1) {
4082                 uint64_t new_cap = *cap * 2;
4083                 uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4084
4085                 if (!new_mods) {
4086                         kfree(*mods);
4087                         *mods = NULL;
4088                         return;
4089                 }
4090
4091                 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4092                 kfree(*mods);
4093                 *mods = new_mods;
4094                 *cap = new_cap;
4095         }
4096
4097         (*mods)[*size] = mod;
4098         *size += 1;
4099 }
4100
4101 static void
4102 add_gfx9_modifiers(const struct amdgpu_device *adev,
4103                    uint64_t **mods, uint64_t *size, uint64_t *capacity)
4104 {
4105         int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4106         int pipe_xor_bits = min(8, pipes +
4107                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4108         int bank_xor_bits = min(8 - pipe_xor_bits,
4109                                 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4110         int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4111                  ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4112
4113
4114         if (adev->family == AMDGPU_FAMILY_RV) {
4115                 /* Raven2 and later */
4116                 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4117
4118                 /*
4119                  * No _D DCC swizzles yet because we only allow 32bpp, which
4120                  * doesn't support _D on DCN
4121                  */
4122
4123                 if (has_constant_encode) {
4124                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4125                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4126                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4127                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4128                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4129                                     AMD_FMT_MOD_SET(DCC, 1) |
4130                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4131                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4132                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4133                 }
4134
4135                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4136                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4137                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4138                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4139                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4140                             AMD_FMT_MOD_SET(DCC, 1) |
4141                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4142                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4143                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4144
4145                 if (has_constant_encode) {
4146                         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4147                                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4148                                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4149                                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4150                                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4151                                     AMD_FMT_MOD_SET(DCC, 1) |
4152                                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4153                                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4154                                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4155
4156                                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4157                                     AMD_FMT_MOD_SET(RB, rb) |
4158                                     AMD_FMT_MOD_SET(PIPE, pipes));
4159                 }
4160
4161                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4162                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4163                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4164                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4165                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4166                             AMD_FMT_MOD_SET(DCC, 1) |
4167                             AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4168                             AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4169                             AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4170                             AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4171                             AMD_FMT_MOD_SET(RB, rb) |
4172                             AMD_FMT_MOD_SET(PIPE, pipes));
4173         }
4174
4175         /*
4176          * Only supported for 64bpp on Raven, will be filtered on format in
4177          * dm_plane_format_mod_supported.
4178          */
4179         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4180                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4181                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4182                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4183                     AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4184
4185         if (adev->family == AMDGPU_FAMILY_RV) {
4186                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4187                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4188                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4189                             AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4190                             AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4191         }
4192
4193         /*
4194          * Only supported for 64bpp on Raven, will be filtered on format in
4195          * dm_plane_format_mod_supported.
4196          */
4197         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4198                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4199                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4200
4201         if (adev->family == AMDGPU_FAMILY_RV) {
4202                 add_modifier(mods, size, capacity, AMD_FMT_MOD |
4203                             AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4204                             AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4205         }
4206 }
4207
4208 static void
4209 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
4210                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4211 {
4212         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4213
4214         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4215                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4216                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4217                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4218                     AMD_FMT_MOD_SET(DCC, 1) |
4219                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4220                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4221                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4222
4223         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4224                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4225                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4226                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4227                     AMD_FMT_MOD_SET(DCC, 1) |
4228                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4229                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4230                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4231                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
4232
4233         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4234                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4235                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4236                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4237
4238         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4239                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4240                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
4241                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
4242
4243
4244         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4245         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4246                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4247                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4248
4249         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4250                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4251                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4252 }
4253
4254 static void
4255 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
4256                       uint64_t **mods, uint64_t *size, uint64_t *capacity)
4257 {
4258         int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4259         int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
4260
4261         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4262                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4263                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4264                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4265                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4266                     AMD_FMT_MOD_SET(DCC, 1) |
4267                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4268                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4269                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4270                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4271
4272         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4273                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4274                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4275                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4276                     AMD_FMT_MOD_SET(PACKERS, pkrs) |
4277                     AMD_FMT_MOD_SET(DCC, 1) |
4278                     AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4279                     AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4280                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4281                     AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
4282                     AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
4283
4284         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4285                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
4286                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4287                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4288                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4289
4290         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4291                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4292                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
4293                     AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4294                     AMD_FMT_MOD_SET(PACKERS, pkrs));
4295
4296         /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
4297         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4298                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
4299                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4300
4301         add_modifier(mods, size, capacity, AMD_FMT_MOD |
4302                     AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
4303                     AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
4304 }
4305
4306 static int
4307 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
4308 {
4309         uint64_t size = 0, capacity = 128;
4310         *mods = NULL;
4311
4312         /* We have not hooked up any pre-GFX9 modifiers. */
4313         if (adev->family < AMDGPU_FAMILY_AI)
4314                 return 0;
4315
4316         *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
4317
4318         if (plane_type == DRM_PLANE_TYPE_CURSOR) {
4319                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4320                 add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4321                 return *mods ? 0 : -ENOMEM;
4322         }
4323
4324         switch (adev->family) {
4325         case AMDGPU_FAMILY_AI:
4326         case AMDGPU_FAMILY_RV:
4327                 add_gfx9_modifiers(adev, mods, &size, &capacity);
4328                 break;
4329         case AMDGPU_FAMILY_NV:
4330         case AMDGPU_FAMILY_VGH:
4331                 if (adev->asic_type >= CHIP_SIENNA_CICHLID)
4332                         add_gfx10_3_modifiers(adev, mods, &size, &capacity);
4333                 else
4334                         add_gfx10_1_modifiers(adev, mods, &size, &capacity);
4335                 break;
4336         }
4337
4338         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
4339
4340         /* INVALID marks the end of the list. */
4341         add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
4342
4343         if (!*mods)
4344                 return -ENOMEM;
4345
4346         return 0;
4347 }
4348
4349 static int
4350 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
4351                                           const struct amdgpu_framebuffer *afb,
4352                                           const enum surface_pixel_format format,
4353                                           const enum dc_rotation_angle rotation,
4354                                           const struct plane_size *plane_size,
4355                                           union dc_tiling_info *tiling_info,
4356                                           struct dc_plane_dcc_param *dcc,
4357                                           struct dc_plane_address *address,
4358                                           const bool force_disable_dcc)
4359 {
4360         const uint64_t modifier = afb->base.modifier;
4361         int ret;
4362
4363         fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
4364         tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
4365
4366         if (modifier_has_dcc(modifier) && !force_disable_dcc) {
4367                 uint64_t dcc_address = afb->address + afb->base.offsets[1];
4368
4369                 dcc->enable = 1;
4370                 dcc->meta_pitch = afb->base.pitches[1];
4371                 dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
4372
4373                 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
4374                 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
4375         }
4376
4377         ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
4378         if (ret)
4379                 return ret;
4380
4381         return 0;
4382 }
4383
4384 static int
4385 fill_plane_buffer_attributes(struct amdgpu_device *adev,
4386                              const struct amdgpu_framebuffer *afb,
4387                              const enum surface_pixel_format format,
4388                              const enum dc_rotation_angle rotation,
4389                              const uint64_t tiling_flags,
4390                              union dc_tiling_info *tiling_info,
4391                              struct plane_size *plane_size,
4392                              struct dc_plane_dcc_param *dcc,
4393                              struct dc_plane_address *address,
4394                              bool tmz_surface,
4395                              bool force_disable_dcc)
4396 {
4397         const struct drm_framebuffer *fb = &afb->base;
4398         int ret;
4399
4400         memset(tiling_info, 0, sizeof(*tiling_info));
4401         memset(plane_size, 0, sizeof(*plane_size));
4402         memset(dcc, 0, sizeof(*dcc));
4403         memset(address, 0, sizeof(*address));
4404
4405         address->tmz_surface = tmz_surface;
4406
4407         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
4408                 uint64_t addr = afb->address + fb->offsets[0];
4409
4410                 plane_size->surface_size.x = 0;
4411                 plane_size->surface_size.y = 0;
4412                 plane_size->surface_size.width = fb->width;
4413                 plane_size->surface_size.height = fb->height;
4414                 plane_size->surface_pitch =
4415                         fb->pitches[0] / fb->format->cpp[0];
4416
4417                 address->type = PLN_ADDR_TYPE_GRAPHICS;
4418                 address->grph.addr.low_part = lower_32_bits(addr);
4419                 address->grph.addr.high_part = upper_32_bits(addr);
4420         } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
4421                 uint64_t luma_addr = afb->address + fb->offsets[0];
4422                 uint64_t chroma_addr = afb->address + fb->offsets[1];
4423
4424                 plane_size->surface_size.x = 0;
4425                 plane_size->surface_size.y = 0;
4426                 plane_size->surface_size.width = fb->width;
4427                 plane_size->surface_size.height = fb->height;
4428                 plane_size->surface_pitch =
4429                         fb->pitches[0] / fb->format->cpp[0];
4430
4431                 plane_size->chroma_size.x = 0;
4432                 plane_size->chroma_size.y = 0;
4433                 /* TODO: set these based on surface format */
4434                 plane_size->chroma_size.width = fb->width / 2;
4435                 plane_size->chroma_size.height = fb->height / 2;
4436
4437                 plane_size->chroma_pitch =
4438                         fb->pitches[1] / fb->format->cpp[1];
4439
4440                 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
4441                 address->video_progressive.luma_addr.low_part =
4442                         lower_32_bits(luma_addr);
4443                 address->video_progressive.luma_addr.high_part =
4444                         upper_32_bits(luma_addr);
4445                 address->video_progressive.chroma_addr.low_part =
4446                         lower_32_bits(chroma_addr);
4447                 address->video_progressive.chroma_addr.high_part =
4448                         upper_32_bits(chroma_addr);
4449         }
4450
4451         if (adev->family >= AMDGPU_FAMILY_AI) {
4452                 ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
4453                                                                 rotation, plane_size,
4454                                                                 tiling_info, dcc,
4455                                                                 address,
4456                                                                 force_disable_dcc);
4457                 if (ret)
4458                         return ret;
4459         } else {
4460                 fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
4461         }
4462
4463         return 0;
4464 }
4465
4466 static void
4467 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
4468                                bool *per_pixel_alpha, bool *global_alpha,
4469                                int *global_alpha_value)
4470 {
4471         *per_pixel_alpha = false;
4472         *global_alpha = false;
4473         *global_alpha_value = 0xff;
4474
4475         if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
4476                 return;
4477
4478         if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
4479                 static const uint32_t alpha_formats[] = {
4480                         DRM_FORMAT_ARGB8888,
4481                         DRM_FORMAT_RGBA8888,
4482                         DRM_FORMAT_ABGR8888,
4483                 };
4484                 uint32_t format = plane_state->fb->format->format;
4485                 unsigned int i;
4486
4487                 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
4488                         if (format == alpha_formats[i]) {
4489                                 *per_pixel_alpha = true;
4490                                 break;
4491                         }
4492                 }
4493         }
4494
4495         if (plane_state->alpha < 0xffff) {
4496                 *global_alpha = true;
4497                 *global_alpha_value = plane_state->alpha >> 8;
4498         }
4499 }
4500
4501 static int
4502 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4503                             const enum surface_pixel_format format,
4504                             enum dc_color_space *color_space)
4505 {
4506         bool full_range;
4507
4508         *color_space = COLOR_SPACE_SRGB;
4509
4510         /* DRM color properties only affect non-RGB formats. */
4511         if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4512                 return 0;
4513
4514         full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4515
4516         switch (plane_state->color_encoding) {
4517         case DRM_COLOR_YCBCR_BT601:
4518                 if (full_range)
4519                         *color_space = COLOR_SPACE_YCBCR601;
4520                 else
4521                         *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4522                 break;
4523
4524         case DRM_COLOR_YCBCR_BT709:
4525                 if (full_range)
4526                         *color_space = COLOR_SPACE_YCBCR709;
4527                 else
4528                         *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4529                 break;
4530
4531         case DRM_COLOR_YCBCR_BT2020:
4532                 if (full_range)
4533                         *color_space = COLOR_SPACE_2020_YCBCR;
4534                 else
4535                         return -EINVAL;
4536                 break;
4537
4538         default:
4539                 return -EINVAL;
4540         }
4541
4542         return 0;
4543 }
4544
4545 static int
4546 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4547                             const struct drm_plane_state *plane_state,
4548                             const uint64_t tiling_flags,
4549                             struct dc_plane_info *plane_info,
4550                             struct dc_plane_address *address,
4551                             bool tmz_surface,
4552                             bool force_disable_dcc)
4553 {
4554         const struct drm_framebuffer *fb = plane_state->fb;
4555         const struct amdgpu_framebuffer *afb =
4556                 to_amdgpu_framebuffer(plane_state->fb);
4557         struct drm_format_name_buf format_name;
4558         int ret;
4559
4560         memset(plane_info, 0, sizeof(*plane_info));
4561
4562         switch (fb->format->format) {
4563         case DRM_FORMAT_C8:
4564                 plane_info->format =
4565                         SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4566                 break;
4567         case DRM_FORMAT_RGB565:
4568                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4569                 break;
4570         case DRM_FORMAT_XRGB8888:
4571         case DRM_FORMAT_ARGB8888:
4572                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4573                 break;
4574         case DRM_FORMAT_XRGB2101010:
4575         case DRM_FORMAT_ARGB2101010:
4576                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4577                 break;
4578         case DRM_FORMAT_XBGR2101010:
4579         case DRM_FORMAT_ABGR2101010:
4580                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4581                 break;
4582         case DRM_FORMAT_XBGR8888:
4583         case DRM_FORMAT_ABGR8888:
4584                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4585                 break;
4586         case DRM_FORMAT_NV21:
4587                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4588                 break;
4589         case DRM_FORMAT_NV12:
4590                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4591                 break;
4592         case DRM_FORMAT_P010:
4593                 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4594                 break;
4595         case DRM_FORMAT_XRGB16161616F:
4596         case DRM_FORMAT_ARGB16161616F:
4597                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4598                 break;
4599         case DRM_FORMAT_XBGR16161616F:
4600         case DRM_FORMAT_ABGR16161616F:
4601                 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4602                 break;
4603         default:
4604                 DRM_ERROR(
4605                         "Unsupported screen format %s\n",
4606                         drm_get_format_name(fb->format->format, &format_name));
4607                 return -EINVAL;
4608         }
4609
4610         switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4611         case DRM_MODE_ROTATE_0:
4612                 plane_info->rotation = ROTATION_ANGLE_0;
4613                 break;
4614         case DRM_MODE_ROTATE_90:
4615                 plane_info->rotation = ROTATION_ANGLE_90;
4616                 break;
4617         case DRM_MODE_ROTATE_180:
4618                 plane_info->rotation = ROTATION_ANGLE_180;
4619                 break;
4620         case DRM_MODE_ROTATE_270:
4621                 plane_info->rotation = ROTATION_ANGLE_270;
4622                 break;
4623         default:
4624                 plane_info->rotation = ROTATION_ANGLE_0;
4625                 break;
4626         }
4627
4628         plane_info->visible = true;
4629         plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
4630
4631         plane_info->layer_index = 0;
4632
4633         ret = fill_plane_color_attributes(plane_state, plane_info->format,
4634                                           &plane_info->color_space);
4635         if (ret)
4636                 return ret;
4637
4638         ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
4639                                            plane_info->rotation, tiling_flags,
4640                                            &plane_info->tiling_info,
4641                                            &plane_info->plane_size,
4642                                            &plane_info->dcc, address, tmz_surface,
4643                                            force_disable_dcc);
4644         if (ret)
4645                 return ret;
4646
4647         fill_blending_from_plane_state(
4648                 plane_state, &plane_info->per_pixel_alpha,
4649                 &plane_info->global_alpha, &plane_info->global_alpha_value);
4650
4651         return 0;
4652 }
4653
4654 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
4655                                     struct dc_plane_state *dc_plane_state,
4656                                     struct drm_plane_state *plane_state,
4657                                     struct drm_crtc_state *crtc_state)
4658 {
4659         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
4660         struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
4661         struct dc_scaling_info scaling_info;
4662         struct dc_plane_info plane_info;
4663         int ret;
4664         bool force_disable_dcc = false;
4665
4666         ret = fill_dc_scaling_info(plane_state, &scaling_info);
4667         if (ret)
4668                 return ret;
4669
4670         dc_plane_state->src_rect = scaling_info.src_rect;
4671         dc_plane_state->dst_rect = scaling_info.dst_rect;
4672         dc_plane_state->clip_rect = scaling_info.clip_rect;
4673         dc_plane_state->scaling_quality = scaling_info.scaling_quality;
4674
4675         force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
4676         ret = fill_dc_plane_info_and_addr(adev, plane_state,
4677                                           afb->tiling_flags,
4678                                           &plane_info,
4679                                           &dc_plane_state->address,
4680                                           afb->tmz_surface,
4681                                           force_disable_dcc);
4682         if (ret)
4683                 return ret;
4684
4685         dc_plane_state->format = plane_info.format;
4686         dc_plane_state->color_space = plane_info.color_space;
4687         dc_plane_state->format = plane_info.format;
4688         dc_plane_state->plane_size = plane_info.plane_size;
4689         dc_plane_state->rotation = plane_info.rotation;
4690         dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
4691         dc_plane_state->stereo_format = plane_info.stereo_format;
4692         dc_plane_state->tiling_info = plane_info.tiling_info;
4693         dc_plane_state->visible = plane_info.visible;
4694         dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
4695         dc_plane_state->global_alpha = plane_info.global_alpha;
4696         dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
4697         dc_plane_state->dcc = plane_info.dcc;
4698         dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
4699
4700         /*
4701          * Always set input transfer function, since plane state is refreshed
4702          * every time.
4703          */
4704         ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
4705         if (ret)
4706                 return ret;
4707
4708         return 0;
4709 }
4710
4711 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
4712                                            const struct dm_connector_state *dm_state,
4713                                            struct dc_stream_state *stream)
4714 {
4715         enum amdgpu_rmx_type rmx_type;
4716
4717         struct rect src = { 0 }; /* viewport in composition space*/
4718         struct rect dst = { 0 }; /* stream addressable area */
4719
4720         /* no mode. nothing to be done */
4721         if (!mode)
4722                 return;
4723
4724         /* Full screen scaling by default */
4725         src.width = mode->hdisplay;
4726         src.height = mode->vdisplay;
4727         dst.width = stream->timing.h_addressable;
4728         dst.height = stream->timing.v_addressable;
4729
4730         if (dm_state) {
4731                 rmx_type = dm_state->scaling;
4732                 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
4733                         if (src.width * dst.height <
4734                                         src.height * dst.width) {
4735                                 /* height needs less upscaling/more downscaling */
4736                                 dst.width = src.width *
4737                                                 dst.height / src.height;
4738                         } else {
4739                                 /* width needs less upscaling/more downscaling */
4740                                 dst.height = src.height *
4741                                                 dst.width / src.width;
4742                         }
4743                 } else if (rmx_type == RMX_CENTER) {
4744                         dst = src;
4745                 }
4746
4747                 dst.x = (stream->timing.h_addressable - dst.width) / 2;
4748                 dst.y = (stream->timing.v_addressable - dst.height) / 2;
4749
4750                 if (dm_state->underscan_enable) {
4751                         dst.x += dm_state->underscan_hborder / 2;
4752                         dst.y += dm_state->underscan_vborder / 2;
4753                         dst.width -= dm_state->underscan_hborder;
4754                         dst.height -= dm_state->underscan_vborder;
4755                 }
4756         }
4757
4758         stream->src = src;
4759         stream->dst = dst;
4760
4761         DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
4762                         dst.x, dst.y, dst.width, dst.height);
4763
4764 }
4765
4766 static enum dc_color_depth
4767 convert_color_depth_from_display_info(const struct drm_connector *connector,
4768                                       bool is_y420, int requested_bpc)
4769 {
4770         uint8_t bpc;
4771
4772         if (is_y420) {
4773                 bpc = 8;
4774
4775                 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
4776                 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
4777                         bpc = 16;
4778                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
4779                         bpc = 12;
4780                 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
4781                         bpc = 10;
4782         } else {
4783                 bpc = (uint8_t)connector->display_info.bpc;
4784                 /* Assume 8 bpc by default if no bpc is specified. */
4785                 bpc = bpc ? bpc : 8;
4786         }
4787
4788         if (requested_bpc > 0) {
4789                 /*
4790                  * Cap display bpc based on the user requested value.
4791                  *
4792                  * The value for state->max_bpc may not correctly updated
4793                  * depending on when the connector gets added to the state
4794                  * or if this was called outside of atomic check, so it
4795                  * can't be used directly.
4796                  */
4797                 bpc = min_t(u8, bpc, requested_bpc);
4798
4799                 /* Round down to the nearest even number. */
4800                 bpc = bpc - (bpc & 1);
4801         }
4802
4803         switch (bpc) {
4804         case 0:
4805                 /*
4806                  * Temporary Work around, DRM doesn't parse color depth for
4807                  * EDID revision before 1.4
4808                  * TODO: Fix edid parsing
4809                  */
4810                 return COLOR_DEPTH_888;
4811         case 6:
4812                 return COLOR_DEPTH_666;
4813         case 8:
4814                 return COLOR_DEPTH_888;
4815         case 10:
4816                 return COLOR_DEPTH_101010;
4817         case 12:
4818                 return COLOR_DEPTH_121212;
4819         case 14:
4820                 return COLOR_DEPTH_141414;
4821         case 16:
4822                 return COLOR_DEPTH_161616;
4823         default:
4824                 return COLOR_DEPTH_UNDEFINED;
4825         }
4826 }
4827
4828 static enum dc_aspect_ratio
4829 get_aspect_ratio(const struct drm_display_mode *mode_in)
4830 {
4831         /* 1-1 mapping, since both enums follow the HDMI spec. */
4832         return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
4833 }
4834
4835 static enum dc_color_space
4836 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
4837 {
4838         enum dc_color_space color_space = COLOR_SPACE_SRGB;
4839
4840         switch (dc_crtc_timing->pixel_encoding) {
4841         case PIXEL_ENCODING_YCBCR422:
4842         case PIXEL_ENCODING_YCBCR444:
4843         case PIXEL_ENCODING_YCBCR420:
4844         {
4845                 /*
4846                  * 27030khz is the separation point between HDTV and SDTV
4847                  * according to HDMI spec, we use YCbCr709 and YCbCr601
4848                  * respectively
4849                  */
4850                 if (dc_crtc_timing->pix_clk_100hz > 270300) {
4851                         if (dc_crtc_timing->flags.Y_ONLY)
4852                                 color_space =
4853                                         COLOR_SPACE_YCBCR709_LIMITED;
4854                         else
4855                                 color_space = COLOR_SPACE_YCBCR709;
4856                 } else {
4857                         if (dc_crtc_timing->flags.Y_ONLY)
4858                                 color_space =
4859                                         COLOR_SPACE_YCBCR601_LIMITED;
4860                         else
4861                                 color_space = COLOR_SPACE_YCBCR601;
4862                 }
4863
4864         }
4865         break;
4866         case PIXEL_ENCODING_RGB:
4867                 color_space = COLOR_SPACE_SRGB;
4868                 break;
4869
4870         default:
4871                 WARN_ON(1);
4872                 break;
4873         }
4874
4875         return color_space;
4876 }
4877
4878 static bool adjust_colour_depth_from_display_info(
4879         struct dc_crtc_timing *timing_out,
4880         const struct drm_display_info *info)
4881 {
4882         enum dc_color_depth depth = timing_out->display_color_depth;
4883         int normalized_clk;
4884         do {
4885                 normalized_clk = timing_out->pix_clk_100hz / 10;
4886                 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
4887                 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
4888                         normalized_clk /= 2;
4889                 /* Adjusting pix clock following on HDMI spec based on colour depth */
4890                 switch (depth) {
4891                 case COLOR_DEPTH_888:
4892                         break;
4893                 case COLOR_DEPTH_101010:
4894                         normalized_clk = (normalized_clk * 30) / 24;
4895                         break;
4896                 case COLOR_DEPTH_121212:
4897                         normalized_clk = (normalized_clk * 36) / 24;
4898                         break;
4899                 case COLOR_DEPTH_161616:
4900                         normalized_clk = (normalized_clk * 48) / 24;
4901                         break;
4902                 default:
4903                         /* The above depths are the only ones valid for HDMI. */
4904                         return false;
4905                 }
4906                 if (normalized_clk <= info->max_tmds_clock) {
4907                         timing_out->display_color_depth = depth;
4908                         return true;
4909                 }
4910         } while (--depth > COLOR_DEPTH_666);
4911         return false;
4912 }
4913
4914 static void fill_stream_properties_from_drm_display_mode(
4915         struct dc_stream_state *stream,
4916         const struct drm_display_mode *mode_in,
4917         const struct drm_connector *connector,
4918         const struct drm_connector_state *connector_state,
4919         const struct dc_stream_state *old_stream,
4920         int requested_bpc)
4921 {
4922         struct dc_crtc_timing *timing_out = &stream->timing;
4923         const struct drm_display_info *info = &connector->display_info;
4924         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
4925         struct hdmi_vendor_infoframe hv_frame;
4926         struct hdmi_avi_infoframe avi_frame;
4927
4928         memset(&hv_frame, 0, sizeof(hv_frame));
4929         memset(&avi_frame, 0, sizeof(avi_frame));
4930
4931         timing_out->h_border_left = 0;
4932         timing_out->h_border_right = 0;
4933         timing_out->v_border_top = 0;
4934         timing_out->v_border_bottom = 0;
4935         /* TODO: un-hardcode */
4936         if (drm_mode_is_420_only(info, mode_in)
4937                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4938                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4939         else if (drm_mode_is_420_also(info, mode_in)
4940                         && aconnector->force_yuv420_output)
4941                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4942         else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
4943                         && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
4944                 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
4945         else
4946                 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
4947
4948         timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
4949         timing_out->display_color_depth = convert_color_depth_from_display_info(
4950                 connector,
4951                 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
4952                 requested_bpc);
4953         timing_out->scan_type = SCANNING_TYPE_NODATA;
4954         timing_out->hdmi_vic = 0;
4955
4956         if(old_stream) {
4957                 timing_out->vic = old_stream->timing.vic;
4958                 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
4959                 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
4960         } else {
4961                 timing_out->vic = drm_match_cea_mode(mode_in);
4962                 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
4963                         timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
4964                 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
4965                         timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
4966         }
4967
4968         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4969                 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
4970                 timing_out->vic = avi_frame.video_code;
4971                 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
4972                 timing_out->hdmi_vic = hv_frame.vic;
4973         }
4974
4975         timing_out->h_addressable = mode_in->crtc_hdisplay;
4976         timing_out->h_total = mode_in->crtc_htotal;
4977         timing_out->h_sync_width =
4978                 mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
4979         timing_out->h_front_porch =
4980                 mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
4981         timing_out->v_total = mode_in->crtc_vtotal;
4982         timing_out->v_addressable = mode_in->crtc_vdisplay;
4983         timing_out->v_front_porch =
4984                 mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
4985         timing_out->v_sync_width =
4986                 mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
4987         timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
4988         timing_out->aspect_ratio = get_aspect_ratio(mode_in);
4989
4990         stream->output_color_space = get_output_color_space(timing_out);
4991
4992         stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
4993         stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
4994         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
4995                 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
4996                     drm_mode_is_420_also(info, mode_in) &&
4997                     timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
4998                         timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
4999                         adjust_colour_depth_from_display_info(timing_out, info);
5000                 }
5001         }
5002 }
5003
5004 static void fill_audio_info(struct audio_info *audio_info,
5005                             const struct drm_connector *drm_connector,
5006                             const struct dc_sink *dc_sink)
5007 {
5008         int i = 0;
5009         int cea_revision = 0;
5010         const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5011
5012         audio_info->manufacture_id = edid_caps->manufacturer_id;
5013         audio_info->product_id = edid_caps->product_id;
5014
5015         cea_revision = drm_connector->display_info.cea_rev;
5016
5017         strscpy(audio_info->display_name,
5018                 edid_caps->display_name,
5019                 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5020
5021         if (cea_revision >= 3) {
5022                 audio_info->mode_count = edid_caps->audio_mode_count;
5023
5024                 for (i = 0; i < audio_info->mode_count; ++i) {
5025                         audio_info->modes[i].format_code =
5026                                         (enum audio_format_code)
5027                                         (edid_caps->audio_modes[i].format_code);
5028                         audio_info->modes[i].channel_count =
5029                                         edid_caps->audio_modes[i].channel_count;
5030                         audio_info->modes[i].sample_rates.all =
5031                                         edid_caps->audio_modes[i].sample_rate;
5032                         audio_info->modes[i].sample_size =
5033                                         edid_caps->audio_modes[i].sample_size;
5034                 }
5035         }
5036
5037         audio_info->flags.all = edid_caps->speaker_flags;
5038
5039         /* TODO: We only check for the progressive mode, check for interlace mode too */
5040         if (drm_connector->latency_present[0]) {
5041                 audio_info->video_latency = drm_connector->video_latency[0];
5042                 audio_info->audio_latency = drm_connector->audio_latency[0];
5043         }
5044
5045         /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5046
5047 }
5048
5049 static void
5050 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5051                                       struct drm_display_mode *dst_mode)
5052 {
5053         dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5054         dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5055         dst_mode->crtc_clock = src_mode->crtc_clock;
5056         dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5057         dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5058         dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5059         dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5060         dst_mode->crtc_htotal = src_mode->crtc_htotal;
5061         dst_mode->crtc_hskew = src_mode->crtc_hskew;
5062         dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5063         dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5064         dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5065         dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5066         dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5067 }
5068
5069 static void
5070 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5071                                         const struct drm_display_mode *native_mode,
5072                                         bool scale_enabled)
5073 {
5074         if (scale_enabled) {
5075                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5076         } else if (native_mode->clock == drm_mode->clock &&
5077                         native_mode->htotal == drm_mode->htotal &&
5078                         native_mode->vtotal == drm_mode->vtotal) {
5079                 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5080         } else {
5081                 /* no scaling nor amdgpu inserted, no need to patch */
5082         }
5083 }
5084
5085 static struct dc_sink *
5086 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5087 {
5088         struct dc_sink_init_data sink_init_data = { 0 };
5089         struct dc_sink *sink = NULL;
5090         sink_init_data.link = aconnector->dc_link;
5091         sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5092
5093         sink = dc_sink_create(&sink_init_data);
5094         if (!sink) {
5095                 DRM_ERROR("Failed to create sink!\n");
5096                 return NULL;
5097         }
5098         sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5099
5100         return sink;
5101 }
5102
5103 static void set_multisync_trigger_params(
5104                 struct dc_stream_state *stream)
5105 {
5106         if (stream->triggered_crtc_reset.enabled) {
5107                 stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
5108                 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
5109         }
5110 }
5111
5112 static void set_master_stream(struct dc_stream_state *stream_set[],
5113                               int stream_count)
5114 {
5115         int j, highest_rfr = 0, master_stream = 0;
5116
5117         for (j = 0;  j < stream_count; j++) {
5118                 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5119                         int refresh_rate = 0;
5120
5121                         refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5122                                 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5123                         if (refresh_rate > highest_rfr) {
5124                                 highest_rfr = refresh_rate;
5125                                 master_stream = j;
5126                         }
5127                 }
5128         }
5129         for (j = 0;  j < stream_count; j++) {
5130                 if (stream_set[j])
5131                         stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5132         }
5133 }
5134
5135 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5136 {
5137         int i = 0;
5138
5139         if (context->stream_count < 2)
5140                 return;
5141         for (i = 0; i < context->stream_count ; i++) {
5142                 if (!context->streams[i])
5143                         continue;
5144                 /*
5145                  * TODO: add a function to read AMD VSDB bits and set
5146                  * crtc_sync_master.multi_sync_enabled flag
5147                  * For now it's set to false
5148                  */
5149                 set_multisync_trigger_params(context->streams[i]);
5150         }
5151         set_master_stream(context->streams, context->stream_count);
5152 }
5153
5154 static struct dc_stream_state *
5155 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5156                        const struct drm_display_mode *drm_mode,
5157                        const struct dm_connector_state *dm_state,
5158                        const struct dc_stream_state *old_stream,
5159                        int requested_bpc)
5160 {
5161         struct drm_display_mode *preferred_mode = NULL;
5162         struct drm_connector *drm_connector;
5163         const struct drm_connector_state *con_state =
5164                 dm_state ? &dm_state->base : NULL;
5165         struct dc_stream_state *stream = NULL;
5166         struct drm_display_mode mode = *drm_mode;
5167         bool native_mode_found = false;
5168         bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
5169         int mode_refresh;
5170         int preferred_refresh = 0;
5171 #if defined(CONFIG_DRM_AMD_DC_DCN)
5172         struct dsc_dec_dpcd_caps dsc_caps;
5173         uint32_t link_bandwidth_kbps;
5174 #endif
5175         struct dc_sink *sink = NULL;
5176         if (aconnector == NULL) {
5177                 DRM_ERROR("aconnector is NULL!\n");
5178                 return stream;
5179         }
5180
5181         drm_connector = &aconnector->base;
5182
5183         if (!aconnector->dc_sink) {
5184                 sink = create_fake_sink(aconnector);
5185                 if (!sink)
5186                         return stream;
5187         } else {
5188                 sink = aconnector->dc_sink;
5189                 dc_sink_retain(sink);
5190         }
5191
5192         stream = dc_create_stream_for_sink(sink);
5193
5194         if (stream == NULL) {
5195                 DRM_ERROR("Failed to create stream for sink!\n");
5196                 goto finish;
5197         }
5198
5199         stream->dm_stream_context = aconnector;
5200
5201         stream->timing.flags.LTE_340MCSC_SCRAMBLE =
5202                 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
5203
5204         list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
5205                 /* Search for preferred mode */
5206                 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
5207                         native_mode_found = true;
5208                         break;
5209                 }
5210         }
5211         if (!native_mode_found)
5212                 preferred_mode = list_first_entry_or_null(
5213                                 &aconnector->base.modes,
5214                                 struct drm_display_mode,
5215                                 head);
5216
5217         mode_refresh = drm_mode_vrefresh(&mode);
5218
5219         if (preferred_mode == NULL) {
5220                 /*
5221                  * This may not be an error, the use case is when we have no
5222                  * usermode calls to reset and set mode upon hotplug. In this
5223                  * case, we call set mode ourselves to restore the previous mode
5224                  * and the modelist may not be filled in in time.
5225                  */
5226                 DRM_DEBUG_DRIVER("No preferred mode found\n");
5227         } else {
5228                 decide_crtc_timing_for_drm_display_mode(
5229                                 &mode, preferred_mode,
5230                                 dm_state ? (dm_state->scaling != RMX_OFF) : false);
5231                 preferred_refresh = drm_mode_vrefresh(preferred_mode);
5232         }
5233
5234         if (!dm_state)
5235                 drm_mode_set_crtcinfo(&mode, 0);
5236
5237         /*
5238         * If scaling is enabled and refresh rate didn't change
5239         * we copy the vic and polarities of the old timings
5240         */
5241         if (!scale || mode_refresh != preferred_refresh)
5242                 fill_stream_properties_from_drm_display_mode(stream,
5243                         &mode, &aconnector->base, con_state, NULL, requested_bpc);
5244         else
5245                 fill_stream_properties_from_drm_display_mode(stream,
5246                         &mode, &aconnector->base, con_state, old_stream, requested_bpc);
5247
5248         stream->timing.flags.DSC = 0;
5249
5250         if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5251 #if defined(CONFIG_DRM_AMD_DC_DCN)
5252                 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5253                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5254                                       aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5255                                       &dsc_caps);
5256                 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5257                                                              dc_link_get_link_cap(aconnector->dc_link));
5258
5259                 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
5260                         /* Set DSC policy according to dsc_clock_en */
5261                         dc_dsc_policy_set_enable_dsc_when_not_needed(
5262                                 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5263
5264                         if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5265                                                   &dsc_caps,
5266                                                   aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
5267                                                   0,
5268                                                   link_bandwidth_kbps,
5269                                                   &stream->timing,
5270                                                   &stream->timing.dsc_cfg))
5271                                 stream->timing.flags.DSC = 1;
5272                         /* Overwrite the stream flag if DSC is enabled through debugfs */
5273                         if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5274                                 stream->timing.flags.DSC = 1;
5275
5276                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5277                                 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5278
5279                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5280                                 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5281
5282                         if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5283                                 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5284                 }
5285 #endif
5286         }
5287
5288         update_stream_scaling_settings(&mode, dm_state, stream);
5289
5290         fill_audio_info(
5291                 &stream->audio_info,
5292                 drm_connector,
5293                 sink);
5294
5295         update_stream_signal(stream, sink);
5296
5297         if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5298                 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
5299
5300         if (stream->link->psr_settings.psr_feature_enabled) {
5301                 //
5302                 // should decide stream support vsc sdp colorimetry capability
5303                 // before building vsc info packet
5304                 //
5305                 stream->use_vsc_sdp_for_colorimetry = false;
5306                 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
5307                         stream->use_vsc_sdp_for_colorimetry =
5308                                 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
5309                 } else {
5310                         if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
5311                                 stream->use_vsc_sdp_for_colorimetry = true;
5312                 }
5313                 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
5314         }
5315 finish:
5316         dc_sink_release(sink);
5317
5318         return stream;
5319 }
5320
5321 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
5322 {
5323         drm_crtc_cleanup(crtc);
5324         kfree(crtc);
5325 }
5326
5327 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
5328                                   struct drm_crtc_state *state)
5329 {
5330         struct dm_crtc_state *cur = to_dm_crtc_state(state);
5331
5332         /* TODO Destroy dc_stream objects are stream object is flattened */
5333         if (cur->stream)
5334                 dc_stream_release(cur->stream);
5335
5336
5337         __drm_atomic_helper_crtc_destroy_state(state);
5338
5339
5340         kfree(state);
5341 }
5342
5343 static void dm_crtc_reset_state(struct drm_crtc *crtc)
5344 {
5345         struct dm_crtc_state *state;
5346
5347         if (crtc->state)
5348                 dm_crtc_destroy_state(crtc, crtc->state);
5349
5350         state = kzalloc(sizeof(*state), GFP_KERNEL);
5351         if (WARN_ON(!state))
5352                 return;
5353
5354         __drm_atomic_helper_crtc_reset(crtc, &state->base);
5355 }
5356
5357 static struct drm_crtc_state *
5358 dm_crtc_duplicate_state(struct drm_crtc *crtc)
5359 {
5360         struct dm_crtc_state *state, *cur;
5361
5362         cur = to_dm_crtc_state(crtc->state);
5363
5364         if (WARN_ON(!crtc->state))
5365                 return NULL;
5366
5367         state = kzalloc(sizeof(*state), GFP_KERNEL);
5368         if (!state)
5369                 return NULL;
5370
5371         __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
5372
5373         if (cur->stream) {
5374                 state->stream = cur->stream;
5375                 dc_stream_retain(state->stream);
5376         }
5377
5378         state->active_planes = cur->active_planes;
5379         state->vrr_infopacket = cur->vrr_infopacket;
5380         state->abm_level = cur->abm_level;
5381         state->vrr_supported = cur->vrr_supported;
5382         state->freesync_config = cur->freesync_config;
5383         state->crc_src = cur->crc_src;
5384         state->cm_has_degamma = cur->cm_has_degamma;
5385         state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
5386 #ifdef CONFIG_DEBUG_FS
5387         state->crc_window = cur->crc_window;
5388 #endif
5389         /* TODO Duplicate dc_stream after objects are stream object is flattened */
5390
5391         return &state->base;
5392 }
5393
5394 #ifdef CONFIG_DEBUG_FS
5395 static int amdgpu_dm_crtc_atomic_set_property(struct drm_crtc *crtc,
5396                                             struct drm_crtc_state *crtc_state,
5397                                             struct drm_property *property,
5398                                             uint64_t val)
5399 {
5400         struct drm_device *dev = crtc->dev;
5401         struct amdgpu_device *adev = drm_to_adev(dev);
5402         struct dm_crtc_state *dm_new_state =
5403                 to_dm_crtc_state(crtc_state);
5404
5405         if (property == adev->dm.crc_win_x_start_property)
5406                 dm_new_state->crc_window.x_start = val;
5407         else if (property == adev->dm.crc_win_y_start_property)
5408                 dm_new_state->crc_window.y_start = val;
5409         else if (property == adev->dm.crc_win_x_end_property)
5410                 dm_new_state->crc_window.x_end = val;
5411         else if (property == adev->dm.crc_win_y_end_property)
5412                 dm_new_state->crc_window.y_end = val;
5413         else
5414                 return -EINVAL;
5415
5416         return 0;
5417 }
5418
5419 static int amdgpu_dm_crtc_atomic_get_property(struct drm_crtc *crtc,
5420                                             const struct drm_crtc_state *state,
5421                                             struct drm_property *property,
5422                                             uint64_t *val)
5423 {
5424         struct drm_device *dev = crtc->dev;
5425         struct amdgpu_device *adev = drm_to_adev(dev);
5426         struct dm_crtc_state *dm_state =
5427                 to_dm_crtc_state(state);
5428
5429         if (property == adev->dm.crc_win_x_start_property)
5430                 *val = dm_state->crc_window.x_start;
5431         else if (property == adev->dm.crc_win_y_start_property)
5432                 *val = dm_state->crc_window.y_start;
5433         else if (property == adev->dm.crc_win_x_end_property)
5434                 *val = dm_state->crc_window.x_end;
5435         else if (property == adev->dm.crc_win_y_end_property)
5436                 *val = dm_state->crc_window.y_end;
5437         else
5438                 return -EINVAL;
5439
5440         return 0;
5441 }
5442 #endif
5443
5444 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
5445 {
5446         enum dc_irq_source irq_source;
5447         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5448         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5449         int rc;
5450
5451         irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
5452
5453         rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
5454
5455         DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
5456                          acrtc->crtc_id, enable ? "en" : "dis", rc);
5457         return rc;
5458 }
5459
5460 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
5461 {
5462         enum dc_irq_source irq_source;
5463         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
5464         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
5465         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
5466         struct amdgpu_display_manager *dm = &adev->dm;
5467         int rc = 0;
5468
5469         if (enable) {
5470                 /* vblank irq on -> Only need vupdate irq in vrr mode */
5471                 if (amdgpu_dm_vrr_active(acrtc_state))
5472                         rc = dm_set_vupdate_irq(crtc, true);
5473         } else {
5474                 /* vblank irq off -> vupdate irq off */
5475                 rc = dm_set_vupdate_irq(crtc, false);
5476         }
5477
5478         if (rc)
5479                 return rc;
5480
5481         irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
5482
5483         if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
5484                 return -EBUSY;
5485
5486         mutex_lock(&dm->dc_lock);
5487
5488         if (enable)
5489                 dm->active_vblank_irq_count++;
5490         else
5491                 dm->active_vblank_irq_count--;
5492
5493 #if defined(CONFIG_DRM_AMD_DC_DCN)
5494         dc_allow_idle_optimizations(
5495                 adev->dm.dc, dm->active_vblank_irq_count == 0 ? true : false);
5496
5497         DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
5498 #endif
5499
5500         mutex_unlock(&dm->dc_lock);
5501
5502         return 0;
5503 }
5504
5505 static int dm_enable_vblank(struct drm_crtc *crtc)
5506 {
5507         return dm_set_vblank(crtc, true);
5508 }
5509
5510 static void dm_disable_vblank(struct drm_crtc *crtc)
5511 {
5512         dm_set_vblank(crtc, false);
5513 }
5514
5515 /* Implemented only the options currently availible for the driver */
5516 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
5517         .reset = dm_crtc_reset_state,
5518         .destroy = amdgpu_dm_crtc_destroy,
5519         .set_config = drm_atomic_helper_set_config,
5520         .page_flip = drm_atomic_helper_page_flip,
5521         .atomic_duplicate_state = dm_crtc_duplicate_state,
5522         .atomic_destroy_state = dm_crtc_destroy_state,
5523         .set_crc_source = amdgpu_dm_crtc_set_crc_source,
5524         .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
5525         .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
5526         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
5527         .enable_vblank = dm_enable_vblank,
5528         .disable_vblank = dm_disable_vblank,
5529         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
5530 #ifdef CONFIG_DEBUG_FS
5531         .atomic_set_property = amdgpu_dm_crtc_atomic_set_property,
5532         .atomic_get_property = amdgpu_dm_crtc_atomic_get_property,
5533 #endif
5534 };
5535
5536 static enum drm_connector_status
5537 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
5538 {
5539         bool connected;
5540         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5541
5542         /*
5543          * Notes:
5544          * 1. This interface is NOT called in context of HPD irq.
5545          * 2. This interface *is called* in context of user-mode ioctl. Which
5546          * makes it a bad place for *any* MST-related activity.
5547          */
5548
5549         if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
5550             !aconnector->fake_enable)
5551                 connected = (aconnector->dc_sink != NULL);
5552         else
5553                 connected = (aconnector->base.force == DRM_FORCE_ON);
5554
5555         update_subconnector_property(aconnector);
5556
5557         return (connected ? connector_status_connected :
5558                         connector_status_disconnected);
5559 }
5560
5561 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
5562                                             struct drm_connector_state *connector_state,
5563                                             struct drm_property *property,
5564                                             uint64_t val)
5565 {
5566         struct drm_device *dev = connector->dev;
5567         struct amdgpu_device *adev = drm_to_adev(dev);
5568         struct dm_connector_state *dm_old_state =
5569                 to_dm_connector_state(connector->state);
5570         struct dm_connector_state *dm_new_state =
5571                 to_dm_connector_state(connector_state);
5572
5573         int ret = -EINVAL;
5574
5575         if (property == dev->mode_config.scaling_mode_property) {
5576                 enum amdgpu_rmx_type rmx_type;
5577
5578                 switch (val) {
5579                 case DRM_MODE_SCALE_CENTER:
5580                         rmx_type = RMX_CENTER;
5581                         break;
5582                 case DRM_MODE_SCALE_ASPECT:
5583                         rmx_type = RMX_ASPECT;
5584                         break;
5585                 case DRM_MODE_SCALE_FULLSCREEN:
5586                         rmx_type = RMX_FULL;
5587                         break;
5588                 case DRM_MODE_SCALE_NONE:
5589                 default:
5590                         rmx_type = RMX_OFF;
5591                         break;
5592                 }
5593
5594                 if (dm_old_state->scaling == rmx_type)
5595                         return 0;
5596
5597                 dm_new_state->scaling = rmx_type;
5598                 ret = 0;
5599         } else if (property == adev->mode_info.underscan_hborder_property) {
5600                 dm_new_state->underscan_hborder = val;
5601                 ret = 0;
5602         } else if (property == adev->mode_info.underscan_vborder_property) {
5603                 dm_new_state->underscan_vborder = val;
5604                 ret = 0;
5605         } else if (property == adev->mode_info.underscan_property) {
5606                 dm_new_state->underscan_enable = val;
5607                 ret = 0;
5608         } else if (property == adev->mode_info.abm_level_property) {
5609                 dm_new_state->abm_level = val;
5610                 ret = 0;
5611         }
5612
5613         return ret;
5614 }
5615
5616 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
5617                                             const struct drm_connector_state *state,
5618                                             struct drm_property *property,
5619                                             uint64_t *val)
5620 {
5621         struct drm_device *dev = connector->dev;
5622         struct amdgpu_device *adev = drm_to_adev(dev);
5623         struct dm_connector_state *dm_state =
5624                 to_dm_connector_state(state);
5625         int ret = -EINVAL;
5626
5627         if (property == dev->mode_config.scaling_mode_property) {
5628                 switch (dm_state->scaling) {
5629                 case RMX_CENTER:
5630                         *val = DRM_MODE_SCALE_CENTER;
5631                         break;
5632                 case RMX_ASPECT:
5633                         *val = DRM_MODE_SCALE_ASPECT;
5634                         break;
5635                 case RMX_FULL:
5636                         *val = DRM_MODE_SCALE_FULLSCREEN;
5637                         break;
5638                 case RMX_OFF:
5639                 default:
5640                         *val = DRM_MODE_SCALE_NONE;
5641                         break;
5642                 }
5643                 ret = 0;
5644         } else if (property == adev->mode_info.underscan_hborder_property) {
5645                 *val = dm_state->underscan_hborder;
5646                 ret = 0;
5647         } else if (property == adev->mode_info.underscan_vborder_property) {
5648                 *val = dm_state->underscan_vborder;
5649                 ret = 0;
5650         } else if (property == adev->mode_info.underscan_property) {
5651                 *val = dm_state->underscan_enable;
5652                 ret = 0;
5653         } else if (property == adev->mode_info.abm_level_property) {
5654                 *val = dm_state->abm_level;
5655                 ret = 0;
5656         }
5657
5658         return ret;
5659 }
5660
5661 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
5662 {
5663         struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
5664
5665         drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
5666 }
5667
5668 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
5669 {
5670         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5671         const struct dc_link *link = aconnector->dc_link;
5672         struct amdgpu_device *adev = drm_to_adev(connector->dev);
5673         struct amdgpu_display_manager *dm = &adev->dm;
5674
5675         /*
5676          * Call only if mst_mgr was iniitalized before since it's not done
5677          * for all connector types.
5678          */
5679         if (aconnector->mst_mgr.dev)
5680                 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
5681
5682 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
5683         defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
5684
5685         if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
5686             link->type != dc_connection_none &&
5687             dm->backlight_dev) {
5688                 backlight_device_unregister(dm->backlight_dev);
5689                 dm->backlight_dev = NULL;
5690         }
5691 #endif
5692
5693         if (aconnector->dc_em_sink)
5694                 dc_sink_release(aconnector->dc_em_sink);
5695         aconnector->dc_em_sink = NULL;
5696         if (aconnector->dc_sink)
5697                 dc_sink_release(aconnector->dc_sink);
5698         aconnector->dc_sink = NULL;
5699
5700         drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
5701         drm_connector_unregister(connector);
5702         drm_connector_cleanup(connector);
5703         if (aconnector->i2c) {
5704                 i2c_del_adapter(&aconnector->i2c->base);
5705                 kfree(aconnector->i2c);
5706         }
5707         kfree(aconnector->dm_dp_aux.aux.name);
5708
5709         kfree(connector);
5710 }
5711
5712 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
5713 {
5714         struct dm_connector_state *state =
5715                 to_dm_connector_state(connector->state);
5716
5717         if (connector->state)
5718                 __drm_atomic_helper_connector_destroy_state(connector->state);
5719
5720         kfree(state);
5721
5722         state = kzalloc(sizeof(*state), GFP_KERNEL);
5723
5724         if (state) {
5725                 state->scaling = RMX_OFF;
5726                 state->underscan_enable = false;
5727                 state->underscan_hborder = 0;
5728                 state->underscan_vborder = 0;
5729                 state->base.max_requested_bpc = 8;
5730                 state->vcpi_slots = 0;
5731                 state->pbn = 0;
5732                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5733                         state->abm_level = amdgpu_dm_abm_level;
5734
5735                 __drm_atomic_helper_connector_reset(connector, &state->base);
5736         }
5737 }
5738
5739 struct drm_connector_state *
5740 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
5741 {
5742         struct dm_connector_state *state =
5743                 to_dm_connector_state(connector->state);
5744
5745         struct dm_connector_state *new_state =
5746                         kmemdup(state, sizeof(*state), GFP_KERNEL);
5747
5748         if (!new_state)
5749                 return NULL;
5750
5751         __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
5752
5753         new_state->freesync_capable = state->freesync_capable;
5754         new_state->abm_level = state->abm_level;
5755         new_state->scaling = state->scaling;
5756         new_state->underscan_enable = state->underscan_enable;
5757         new_state->underscan_hborder = state->underscan_hborder;
5758         new_state->underscan_vborder = state->underscan_vborder;
5759         new_state->vcpi_slots = state->vcpi_slots;
5760         new_state->pbn = state->pbn;
5761         return &new_state->base;
5762 }
5763
5764 static int
5765 amdgpu_dm_connector_late_register(struct drm_connector *connector)
5766 {
5767         struct amdgpu_dm_connector *amdgpu_dm_connector =
5768                 to_amdgpu_dm_connector(connector);
5769         int r;
5770
5771         if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
5772             (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
5773                 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
5774                 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
5775                 if (r)
5776                         return r;
5777         }
5778
5779 #if defined(CONFIG_DEBUG_FS)
5780         connector_debugfs_init(amdgpu_dm_connector);
5781 #endif
5782
5783         return 0;
5784 }
5785
5786 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
5787         .reset = amdgpu_dm_connector_funcs_reset,
5788         .detect = amdgpu_dm_connector_detect,
5789         .fill_modes = drm_helper_probe_single_connector_modes,
5790         .destroy = amdgpu_dm_connector_destroy,
5791         .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
5792         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5793         .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
5794         .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
5795         .late_register = amdgpu_dm_connector_late_register,
5796         .early_unregister = amdgpu_dm_connector_unregister
5797 };
5798
5799 static int get_modes(struct drm_connector *connector)
5800 {
5801         return amdgpu_dm_connector_get_modes(connector);
5802 }
5803
5804 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
5805 {
5806         struct dc_sink_init_data init_params = {
5807                         .link = aconnector->dc_link,
5808                         .sink_signal = SIGNAL_TYPE_VIRTUAL
5809         };
5810         struct edid *edid;
5811
5812         if (!aconnector->base.edid_blob_ptr) {
5813                 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
5814                                 aconnector->base.name);
5815
5816                 aconnector->base.force = DRM_FORCE_OFF;
5817                 aconnector->base.override_edid = false;
5818                 return;
5819         }
5820
5821         edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
5822
5823         aconnector->edid = edid;
5824
5825         aconnector->dc_em_sink = dc_link_add_remote_sink(
5826                 aconnector->dc_link,
5827                 (uint8_t *)edid,
5828                 (edid->extensions + 1) * EDID_LENGTH,
5829                 &init_params);
5830
5831         if (aconnector->base.force == DRM_FORCE_ON) {
5832                 aconnector->dc_sink = aconnector->dc_link->local_sink ?
5833                 aconnector->dc_link->local_sink :
5834                 aconnector->dc_em_sink;
5835                 dc_sink_retain(aconnector->dc_sink);
5836         }
5837 }
5838
5839 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
5840 {
5841         struct dc_link *link = (struct dc_link *)aconnector->dc_link;
5842
5843         /*
5844          * In case of headless boot with force on for DP managed connector
5845          * Those settings have to be != 0 to get initial modeset
5846          */
5847         if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5848                 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
5849                 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
5850         }
5851
5852
5853         aconnector->base.override_edid = true;
5854         create_eml_sink(aconnector);
5855 }
5856
5857 static struct dc_stream_state *
5858 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5859                                 const struct drm_display_mode *drm_mode,
5860                                 const struct dm_connector_state *dm_state,
5861                                 const struct dc_stream_state *old_stream)
5862 {
5863         struct drm_connector *connector = &aconnector->base;
5864         struct amdgpu_device *adev = drm_to_adev(connector->dev);
5865         struct dc_stream_state *stream;
5866         const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
5867         int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
5868         enum dc_status dc_result = DC_OK;
5869
5870         do {
5871                 stream = create_stream_for_sink(aconnector, drm_mode,
5872                                                 dm_state, old_stream,
5873                                                 requested_bpc);
5874                 if (stream == NULL) {
5875                         DRM_ERROR("Failed to create stream for sink!\n");
5876                         break;
5877                 }
5878
5879                 dc_result = dc_validate_stream(adev->dm.dc, stream);
5880
5881                 if (dc_result != DC_OK) {
5882                         DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
5883                                       drm_mode->hdisplay,
5884                                       drm_mode->vdisplay,
5885                                       drm_mode->clock,
5886                                       dc_result,
5887                                       dc_status_to_str(dc_result));
5888
5889                         dc_stream_release(stream);
5890                         stream = NULL;
5891                         requested_bpc -= 2; /* lower bpc to retry validation */
5892                 }
5893
5894         } while (stream == NULL && requested_bpc >= 6);
5895
5896         return stream;
5897 }
5898
5899 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
5900                                    struct drm_display_mode *mode)
5901 {
5902         int result = MODE_ERROR;
5903         struct dc_sink *dc_sink;
5904         /* TODO: Unhardcode stream count */
5905         struct dc_stream_state *stream;
5906         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5907
5908         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
5909                         (mode->flags & DRM_MODE_FLAG_DBLSCAN))
5910                 return result;
5911
5912         /*
5913          * Only run this the first time mode_valid is called to initilialize
5914          * EDID mgmt
5915          */
5916         if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
5917                 !aconnector->dc_em_sink)
5918                 handle_edid_mgmt(aconnector);
5919
5920         dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
5921
5922         if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
5923                                 aconnector->base.force != DRM_FORCE_ON) {
5924                 DRM_ERROR("dc_sink is NULL!\n");
5925                 goto fail;
5926         }
5927
5928         stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
5929         if (stream) {
5930                 dc_stream_release(stream);
5931                 result = MODE_OK;
5932         }
5933
5934 fail:
5935         /* TODO: error handling*/
5936         return result;
5937 }
5938
5939 static int fill_hdr_info_packet(const struct drm_connector_state *state,
5940                                 struct dc_info_packet *out)
5941 {
5942         struct hdmi_drm_infoframe frame;
5943         unsigned char buf[30]; /* 26 + 4 */
5944         ssize_t len;
5945         int ret, i;
5946
5947         memset(out, 0, sizeof(*out));
5948
5949         if (!state->hdr_output_metadata)
5950                 return 0;
5951
5952         ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
5953         if (ret)
5954                 return ret;
5955
5956         len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
5957         if (len < 0)
5958                 return (int)len;
5959
5960         /* Static metadata is a fixed 26 bytes + 4 byte header. */
5961         if (len != 30)
5962                 return -EINVAL;
5963
5964         /* Prepare the infopacket for DC. */
5965         switch (state->connector->connector_type) {
5966         case DRM_MODE_CONNECTOR_HDMIA:
5967                 out->hb0 = 0x87; /* type */
5968                 out->hb1 = 0x01; /* version */
5969                 out->hb2 = 0x1A; /* length */
5970                 out->sb[0] = buf[3]; /* checksum */
5971                 i = 1;
5972                 break;
5973
5974         case DRM_MODE_CONNECTOR_DisplayPort:
5975         case DRM_MODE_CONNECTOR_eDP:
5976                 out->hb0 = 0x00; /* sdp id, zero */
5977                 out->hb1 = 0x87; /* type */
5978                 out->hb2 = 0x1D; /* payload len - 1 */
5979                 out->hb3 = (0x13 << 2); /* sdp version */
5980                 out->sb[0] = 0x01; /* version */
5981                 out->sb[1] = 0x1A; /* length */
5982                 i = 2;
5983                 break;
5984
5985         default:
5986                 return -EINVAL;
5987         }
5988
5989         memcpy(&out->sb[i], &buf[4], 26);
5990         out->valid = true;
5991
5992         print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
5993                        sizeof(out->sb), false);
5994
5995         return 0;
5996 }
5997
5998 static bool
5999 is_hdr_metadata_different(const struct drm_connector_state *old_state,
6000                           const struct drm_connector_state *new_state)
6001 {
6002         struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
6003         struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
6004
6005         if (old_blob != new_blob) {
6006                 if (old_blob && new_blob &&
6007                     old_blob->length == new_blob->length)
6008                         return memcmp(old_blob->data, new_blob->data,
6009                                       old_blob->length);
6010
6011                 return true;
6012         }
6013
6014         return false;
6015 }
6016
6017 static int
6018 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6019                                  struct drm_atomic_state *state)
6020 {
6021         struct drm_connector_state *new_con_state =
6022                 drm_atomic_get_new_connector_state(state, conn);
6023         struct drm_connector_state *old_con_state =
6024                 drm_atomic_get_old_connector_state(state, conn);
6025         struct drm_crtc *crtc = new_con_state->crtc;
6026         struct drm_crtc_state *new_crtc_state;
6027         int ret;
6028
6029         trace_amdgpu_dm_connector_atomic_check(new_con_state);
6030
6031         if (!crtc)
6032                 return 0;
6033
6034         if (is_hdr_metadata_different(old_con_state, new_con_state)) {
6035                 struct dc_info_packet hdr_infopacket;
6036
6037                 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6038                 if (ret)
6039                         return ret;
6040
6041                 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6042                 if (IS_ERR(new_crtc_state))
6043                         return PTR_ERR(new_crtc_state);
6044
6045                 /*
6046                  * DC considers the stream backends changed if the
6047                  * static metadata changes. Forcing the modeset also
6048                  * gives a simple way for userspace to switch from
6049                  * 8bpc to 10bpc when setting the metadata to enter
6050                  * or exit HDR.
6051                  *
6052                  * Changing the static metadata after it's been
6053                  * set is permissible, however. So only force a
6054                  * modeset if we're entering or exiting HDR.
6055                  */
6056                 new_crtc_state->mode_changed =
6057                         !old_con_state->hdr_output_metadata ||
6058                         !new_con_state->hdr_output_metadata;
6059         }
6060
6061         return 0;
6062 }
6063
6064 static const struct drm_connector_helper_funcs
6065 amdgpu_dm_connector_helper_funcs = {
6066         /*
6067          * If hotplugging a second bigger display in FB Con mode, bigger resolution
6068          * modes will be filtered by drm_mode_validate_size(), and those modes
6069          * are missing after user start lightdm. So we need to renew modes list.
6070          * in get_modes call back, not just return the modes count
6071          */
6072         .get_modes = get_modes,
6073         .mode_valid = amdgpu_dm_connector_mode_valid,
6074         .atomic_check = amdgpu_dm_connector_atomic_check,
6075 };
6076
6077 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
6078 {
6079 }
6080
6081 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
6082 {
6083         struct drm_atomic_state *state = new_crtc_state->state;
6084         struct drm_plane *plane;
6085         int num_active = 0;
6086
6087         drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
6088                 struct drm_plane_state *new_plane_state;
6089
6090                 /* Cursor planes are "fake". */
6091                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
6092                         continue;
6093
6094                 new_plane_state = drm_atomic_get_new_plane_state(state, plane);
6095
6096                 if (!new_plane_state) {
6097                         /*
6098                          * The plane is enable on the CRTC and hasn't changed
6099                          * state. This means that it previously passed
6100                          * validation and is therefore enabled.
6101                          */
6102                         num_active += 1;
6103                         continue;
6104                 }
6105
6106                 /* We need a framebuffer to be considered enabled. */
6107                 num_active += (new_plane_state->fb != NULL);
6108         }
6109
6110         return num_active;
6111 }
6112
6113 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
6114                                          struct drm_crtc_state *new_crtc_state)
6115 {
6116         struct dm_crtc_state *dm_new_crtc_state =
6117                 to_dm_crtc_state(new_crtc_state);
6118
6119         dm_new_crtc_state->active_planes = 0;
6120
6121         if (!dm_new_crtc_state->stream)
6122                 return;
6123
6124         dm_new_crtc_state->active_planes =
6125                 count_crtc_active_planes(new_crtc_state);
6126 }
6127
6128 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
6129                                        struct drm_atomic_state *state)
6130 {
6131         struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
6132                                                                           crtc);
6133         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6134         struct dc *dc = adev->dm.dc;
6135         struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
6136         int ret = -EINVAL;
6137
6138         trace_amdgpu_dm_crtc_atomic_check(crtc_state);
6139
6140         dm_update_crtc_active_planes(crtc, crtc_state);
6141
6142         if (unlikely(!dm_crtc_state->stream &&
6143                      modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
6144                 WARN_ON(1);
6145                 return ret;
6146         }
6147
6148         /*
6149          * We require the primary plane to be enabled whenever the CRTC is, otherwise
6150          * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
6151          * planes are disabled, which is not supported by the hardware. And there is legacy
6152          * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
6153          */
6154         if (crtc_state->enable &&
6155             !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
6156                 DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
6157                 return -EINVAL;
6158         }
6159
6160         /* In some use cases, like reset, no stream is attached */
6161         if (!dm_crtc_state->stream)
6162                 return 0;
6163
6164         if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
6165                 return 0;
6166
6167         DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
6168         return ret;
6169 }
6170
6171 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
6172                                       const struct drm_display_mode *mode,
6173                                       struct drm_display_mode *adjusted_mode)
6174 {
6175         return true;
6176 }
6177
6178 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
6179         .disable = dm_crtc_helper_disable,
6180         .atomic_check = dm_crtc_helper_atomic_check,
6181         .mode_fixup = dm_crtc_helper_mode_fixup,
6182         .get_scanout_position = amdgpu_crtc_get_scanout_position,
6183 };
6184
6185 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6186 {
6187
6188 }
6189
6190 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
6191 {
6192         switch (display_color_depth) {
6193                 case COLOR_DEPTH_666:
6194                         return 6;
6195                 case COLOR_DEPTH_888:
6196                         return 8;
6197                 case COLOR_DEPTH_101010:
6198                         return 10;
6199                 case COLOR_DEPTH_121212:
6200                         return 12;
6201                 case COLOR_DEPTH_141414:
6202                         return 14;
6203                 case COLOR_DEPTH_161616:
6204                         return 16;
6205                 default:
6206                         break;
6207                 }
6208         return 0;
6209 }
6210
6211 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6212                                           struct drm_crtc_state *crtc_state,
6213                                           struct drm_connector_state *conn_state)
6214 {
6215         struct drm_atomic_state *state = crtc_state->state;
6216         struct drm_connector *connector = conn_state->connector;
6217         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6218         struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6219         const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6220         struct drm_dp_mst_topology_mgr *mst_mgr;
6221         struct drm_dp_mst_port *mst_port;
6222         enum dc_color_depth color_depth;
6223         int clock, bpp = 0;
6224         bool is_y420 = false;
6225
6226         if (!aconnector->port || !aconnector->dc_sink)
6227                 return 0;
6228
6229         mst_port = aconnector->port;
6230         mst_mgr = &aconnector->mst_port->mst_mgr;
6231
6232         if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6233                 return 0;
6234
6235         if (!state->duplicated) {
6236                 int max_bpc = conn_state->max_requested_bpc;
6237                 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6238                                 aconnector->force_yuv420_output;
6239                 color_depth = convert_color_depth_from_display_info(connector,
6240                                                                     is_y420,
6241                                                                     max_bpc);
6242                 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6243                 clock = adjusted_mode->clock;
6244                 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
6245         }
6246         dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
6247                                                                            mst_mgr,
6248                                                                            mst_port,
6249                                                                            dm_new_connector_state->pbn,
6250                                                                            dm_mst_get_pbn_divider(aconnector->dc_link));
6251         if (dm_new_connector_state->vcpi_slots < 0) {
6252                 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6253                 return dm_new_connector_state->vcpi_slots;
6254         }
6255         return 0;
6256 }
6257
6258 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6259         .disable = dm_encoder_helper_disable,
6260         .atomic_check = dm_encoder_helper_atomic_check
6261 };
6262
6263 #if defined(CONFIG_DRM_AMD_DC_DCN)
6264 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6265                                             struct dc_state *dc_state)
6266 {
6267         struct dc_stream_state *stream = NULL;
6268         struct drm_connector *connector;
6269         struct drm_connector_state *new_con_state, *old_con_state;
6270         struct amdgpu_dm_connector *aconnector;
6271         struct dm_connector_state *dm_conn_state;
6272         int i, j, clock, bpp;
6273         int vcpi, pbn_div, pbn = 0;
6274
6275         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
6276
6277                 aconnector = to_amdgpu_dm_connector(connector);
6278
6279                 if (!aconnector->port)
6280                         continue;
6281
6282                 if (!new_con_state || !new_con_state->crtc)
6283                         continue;
6284
6285                 dm_conn_state = to_dm_connector_state(new_con_state);
6286
6287                 for (j = 0; j < dc_state->stream_count; j++) {
6288                         stream = dc_state->streams[j];
6289                         if (!stream)
6290                                 continue;
6291
6292                         if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
6293                                 break;
6294
6295                         stream = NULL;
6296                 }
6297
6298                 if (!stream)
6299                         continue;
6300
6301                 if (stream->timing.flags.DSC != 1) {
6302                         drm_dp_mst_atomic_enable_dsc(state,
6303                                                      aconnector->port,
6304                                                      dm_conn_state->pbn,
6305                                                      0,
6306                                                      false);
6307                         continue;
6308                 }
6309
6310                 pbn_div = dm_mst_get_pbn_divider(stream->link);
6311                 bpp = stream->timing.dsc_cfg.bits_per_pixel;
6312                 clock = stream->timing.pix_clk_100hz / 10;
6313                 pbn = drm_dp_calc_pbn_mode(clock, bpp, true);
6314                 vcpi = drm_dp_mst_atomic_enable_dsc(state,
6315                                                     aconnector->port,
6316                                                     pbn, pbn_div,
6317                                                     true);
6318                 if (vcpi < 0)
6319                         return vcpi;
6320
6321                 dm_conn_state->pbn = pbn;
6322                 dm_conn_state->vcpi_slots = vcpi;
6323         }
6324         return 0;
6325 }
6326 #endif
6327
6328 static void dm_drm_plane_reset(struct drm_plane *plane)
6329 {
6330         struct dm_plane_state *amdgpu_state = NULL;
6331
6332         if (plane->state)
6333                 plane->funcs->atomic_destroy_state(plane, plane->state);
6334
6335         amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
6336         WARN_ON(amdgpu_state == NULL);
6337
6338         if (amdgpu_state)
6339                 __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
6340 }
6341
6342 static struct drm_plane_state *
6343 dm_drm_plane_duplicate_state(struct drm_plane *plane)
6344 {
6345         struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
6346
6347         old_dm_plane_state = to_dm_plane_state(plane->state);
6348         dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
6349         if (!dm_plane_state)
6350                 return NULL;
6351
6352         __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
6353
6354         if (old_dm_plane_state->dc_state) {
6355                 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
6356                 dc_plane_state_retain(dm_plane_state->dc_state);
6357         }
6358
6359         return &dm_plane_state->base;
6360 }
6361
6362 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
6363                                 struct drm_plane_state *state)
6364 {
6365         struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
6366
6367         if (dm_plane_state->dc_state)
6368                 dc_plane_state_release(dm_plane_state->dc_state);
6369
6370         drm_atomic_helper_plane_destroy_state(plane, state);
6371 }
6372
6373 static const struct drm_plane_funcs dm_plane_funcs = {
6374         .update_plane   = drm_atomic_helper_update_plane,
6375         .disable_plane  = drm_atomic_helper_disable_plane,
6376         .destroy        = drm_primary_helper_destroy,
6377         .reset = dm_drm_plane_reset,
6378         .atomic_duplicate_state = dm_drm_plane_duplicate_state,
6379         .atomic_destroy_state = dm_drm_plane_destroy_state,
6380         .format_mod_supported = dm_plane_format_mod_supported,
6381 };
6382
6383 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
6384                                       struct drm_plane_state *new_state)
6385 {
6386         struct amdgpu_framebuffer *afb;
6387         struct drm_gem_object *obj;
6388         struct amdgpu_device *adev;
6389         struct amdgpu_bo *rbo;
6390         struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
6391         struct list_head list;
6392         struct ttm_validate_buffer tv;
6393         struct ww_acquire_ctx ticket;
6394         uint32_t domain;
6395         int r;
6396
6397         if (!new_state->fb) {
6398                 DRM_DEBUG_DRIVER("No FB bound\n");
6399                 return 0;
6400         }
6401
6402         afb = to_amdgpu_framebuffer(new_state->fb);
6403         obj = new_state->fb->obj[0];
6404         rbo = gem_to_amdgpu_bo(obj);
6405         adev = amdgpu_ttm_adev(rbo->tbo.bdev);
6406         INIT_LIST_HEAD(&list);
6407
6408         tv.bo = &rbo->tbo;
6409         tv.num_shared = 1;
6410         list_add(&tv.head, &list);
6411
6412         r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
6413         if (r) {
6414                 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
6415                 return r;
6416         }
6417
6418         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6419                 domain = amdgpu_display_supported_domains(adev, rbo->flags);
6420         else
6421                 domain = AMDGPU_GEM_DOMAIN_VRAM;
6422
6423         r = amdgpu_bo_pin(rbo, domain);
6424         if (unlikely(r != 0)) {
6425                 if (r != -ERESTARTSYS)
6426                         DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
6427                 ttm_eu_backoff_reservation(&ticket, &list);
6428                 return r;
6429         }
6430
6431         r = amdgpu_ttm_alloc_gart(&rbo->tbo);
6432         if (unlikely(r != 0)) {
6433                 amdgpu_bo_unpin(rbo);
6434                 ttm_eu_backoff_reservation(&ticket, &list);
6435                 DRM_ERROR("%p bind failed\n", rbo);
6436                 return r;
6437         }
6438
6439         ttm_eu_backoff_reservation(&ticket, &list);
6440
6441         afb->address = amdgpu_bo_gpu_offset(rbo);
6442
6443         amdgpu_bo_ref(rbo);
6444
6445         /**
6446          * We don't do surface updates on planes that have been newly created,
6447          * but we also don't have the afb->address during atomic check.
6448          *
6449          * Fill in buffer attributes depending on the address here, but only on
6450          * newly created planes since they're not being used by DC yet and this
6451          * won't modify global state.
6452          */
6453         dm_plane_state_old = to_dm_plane_state(plane->state);
6454         dm_plane_state_new = to_dm_plane_state(new_state);
6455
6456         if (dm_plane_state_new->dc_state &&
6457             dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
6458                 struct dc_plane_state *plane_state =
6459                         dm_plane_state_new->dc_state;
6460                 bool force_disable_dcc = !plane_state->dcc.enable;
6461
6462                 fill_plane_buffer_attributes(
6463                         adev, afb, plane_state->format, plane_state->rotation,
6464                         afb->tiling_flags,
6465                         &plane_state->tiling_info, &plane_state->plane_size,
6466                         &plane_state->dcc, &plane_state->address,
6467                         afb->tmz_surface, force_disable_dcc);
6468         }
6469
6470         return 0;
6471 }
6472
6473 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
6474                                        struct drm_plane_state *old_state)
6475 {
6476         struct amdgpu_bo *rbo;
6477         int r;
6478
6479         if (!old_state->fb)
6480                 return;
6481
6482         rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
6483         r = amdgpu_bo_reserve(rbo, false);
6484         if (unlikely(r)) {
6485                 DRM_ERROR("failed to reserve rbo before unpin\n");
6486                 return;
6487         }
6488
6489         amdgpu_bo_unpin(rbo);
6490         amdgpu_bo_unreserve(rbo);
6491         amdgpu_bo_unref(&rbo);
6492 }
6493
6494 static int dm_plane_helper_check_state(struct drm_plane_state *state,
6495                                        struct drm_crtc_state *new_crtc_state)
6496 {
6497         struct drm_framebuffer *fb = state->fb;
6498         int min_downscale, max_upscale;
6499         int min_scale = 0;
6500         int max_scale = INT_MAX;
6501
6502         /* Plane enabled? Get min/max allowed scaling factors from plane caps. */
6503         if (fb && state->crtc) {
6504                 get_min_max_dc_plane_scaling(state->crtc->dev, fb,
6505                                              &min_downscale, &max_upscale);
6506                 /*
6507                  * Convert to drm convention: 16.16 fixed point, instead of dc's
6508                  * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
6509                  * dst/src, so min_scale = 1.0 / max_upscale, etc.
6510                  */
6511                 min_scale = (1000 << 16) / max_upscale;
6512                 max_scale = (1000 << 16) / min_downscale;
6513         }
6514
6515         return drm_atomic_helper_check_plane_state(
6516                 state, new_crtc_state, min_scale, max_scale, true, true);
6517 }
6518
6519 static int dm_plane_atomic_check(struct drm_plane *plane,
6520                                  struct drm_plane_state *state)
6521 {
6522         struct amdgpu_device *adev = drm_to_adev(plane->dev);
6523         struct dc *dc = adev->dm.dc;
6524         struct dm_plane_state *dm_plane_state;
6525         struct dc_scaling_info scaling_info;
6526         struct drm_crtc_state *new_crtc_state;
6527         int ret;
6528
6529         trace_amdgpu_dm_plane_atomic_check(state);
6530
6531         dm_plane_state = to_dm_plane_state(state);
6532
6533         if (!dm_plane_state->dc_state)
6534                 return 0;
6535
6536         new_crtc_state =
6537                 drm_atomic_get_new_crtc_state(state->state, state->crtc);
6538         if (!new_crtc_state)
6539                 return -EINVAL;
6540
6541         ret = dm_plane_helper_check_state(state, new_crtc_state);
6542         if (ret)
6543                 return ret;
6544
6545         ret = fill_dc_scaling_info(state, &scaling_info);
6546         if (ret)
6547                 return ret;
6548
6549         if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
6550                 return 0;
6551
6552         return -EINVAL;
6553 }
6554
6555 static int dm_plane_atomic_async_check(struct drm_plane *plane,
6556                                        struct drm_plane_state *new_plane_state)
6557 {
6558         /* Only support async updates on cursor planes. */
6559         if (plane->type != DRM_PLANE_TYPE_CURSOR)
6560                 return -EINVAL;
6561
6562         return 0;
6563 }
6564
6565 static void dm_plane_atomic_async_update(struct drm_plane *plane,
6566                                          struct drm_plane_state *new_state)
6567 {
6568         struct drm_plane_state *old_state =
6569                 drm_atomic_get_old_plane_state(new_state->state, plane);
6570
6571         trace_amdgpu_dm_atomic_update_cursor(new_state);
6572
6573         swap(plane->state->fb, new_state->fb);
6574
6575         plane->state->src_x = new_state->src_x;
6576         plane->state->src_y = new_state->src_y;
6577         plane->state->src_w = new_state->src_w;
6578         plane->state->src_h = new_state->src_h;
6579         plane->state->crtc_x = new_state->crtc_x;
6580         plane->state->crtc_y = new_state->crtc_y;
6581         plane->state->crtc_w = new_state->crtc_w;
6582         plane->state->crtc_h = new_state->crtc_h;
6583
6584         handle_cursor_update(plane, old_state);
6585 }
6586
6587 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
6588         .prepare_fb = dm_plane_helper_prepare_fb,
6589         .cleanup_fb = dm_plane_helper_cleanup_fb,
6590         .atomic_check = dm_plane_atomic_check,
6591         .atomic_async_check = dm_plane_atomic_async_check,
6592         .atomic_async_update = dm_plane_atomic_async_update
6593 };
6594
6595 /*
6596  * TODO: these are currently initialized to rgb formats only.
6597  * For future use cases we should either initialize them dynamically based on
6598  * plane capabilities, or initialize this array to all formats, so internal drm
6599  * check will succeed, and let DC implement proper check
6600  */
6601 static const uint32_t rgb_formats[] = {
6602         DRM_FORMAT_XRGB8888,
6603         DRM_FORMAT_ARGB8888,
6604         DRM_FORMAT_RGBA8888,
6605         DRM_FORMAT_XRGB2101010,
6606         DRM_FORMAT_XBGR2101010,
6607         DRM_FORMAT_ARGB2101010,
6608         DRM_FORMAT_ABGR2101010,
6609         DRM_FORMAT_XBGR8888,
6610         DRM_FORMAT_ABGR8888,
6611         DRM_FORMAT_RGB565,
6612 };
6613
6614 static const uint32_t overlay_formats[] = {
6615         DRM_FORMAT_XRGB8888,
6616         DRM_FORMAT_ARGB8888,
6617         DRM_FORMAT_RGBA8888,
6618         DRM_FORMAT_XBGR8888,
6619         DRM_FORMAT_ABGR8888,
6620         DRM_FORMAT_RGB565
6621 };
6622
6623 static const u32 cursor_formats[] = {
6624         DRM_FORMAT_ARGB8888
6625 };
6626
6627 static int get_plane_formats(const struct drm_plane *plane,
6628                              const struct dc_plane_cap *plane_cap,
6629                              uint32_t *formats, int max_formats)
6630 {
6631         int i, num_formats = 0;
6632
6633         /*
6634          * TODO: Query support for each group of formats directly from
6635          * DC plane caps. This will require adding more formats to the
6636          * caps list.
6637          */
6638
6639         switch (plane->type) {
6640         case DRM_PLANE_TYPE_PRIMARY:
6641                 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
6642                         if (num_formats >= max_formats)
6643                                 break;
6644
6645                         formats[num_formats++] = rgb_formats[i];
6646                 }
6647
6648                 if (plane_cap && plane_cap->pixel_format_support.nv12)
6649                         formats[num_formats++] = DRM_FORMAT_NV12;
6650                 if (plane_cap && plane_cap->pixel_format_support.p010)
6651                         formats[num_formats++] = DRM_FORMAT_P010;
6652                 if (plane_cap && plane_cap->pixel_format_support.fp16) {
6653                         formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
6654                         formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
6655                         formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
6656                         formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
6657                 }
6658                 break;
6659
6660         case DRM_PLANE_TYPE_OVERLAY:
6661                 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
6662                         if (num_formats >= max_formats)
6663                                 break;
6664
6665                         formats[num_formats++] = overlay_formats[i];
6666                 }
6667                 break;
6668
6669         case DRM_PLANE_TYPE_CURSOR:
6670                 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
6671                         if (num_formats >= max_formats)
6672                                 break;
6673
6674                         formats[num_formats++] = cursor_formats[i];
6675                 }
6676                 break;
6677         }
6678
6679         return num_formats;
6680 }
6681
6682 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
6683                                 struct drm_plane *plane,
6684                                 unsigned long possible_crtcs,
6685                                 const struct dc_plane_cap *plane_cap)
6686 {
6687         uint32_t formats[32];
6688         int num_formats;
6689         int res = -EPERM;
6690         unsigned int supported_rotations;
6691         uint64_t *modifiers = NULL;
6692
6693         num_formats = get_plane_formats(plane, plane_cap, formats,
6694                                         ARRAY_SIZE(formats));
6695
6696         res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
6697         if (res)
6698                 return res;
6699
6700         res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
6701                                        &dm_plane_funcs, formats, num_formats,
6702                                        modifiers, plane->type, NULL);
6703         kfree(modifiers);
6704         if (res)
6705                 return res;
6706
6707         if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
6708             plane_cap && plane_cap->per_pixel_alpha) {
6709                 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
6710                                           BIT(DRM_MODE_BLEND_PREMULTI);
6711
6712                 drm_plane_create_alpha_property(plane);
6713                 drm_plane_create_blend_mode_property(plane, blend_caps);
6714         }
6715
6716         if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
6717             plane_cap &&
6718             (plane_cap->pixel_format_support.nv12 ||
6719              plane_cap->pixel_format_support.p010)) {
6720                 /* This only affects YUV formats. */
6721                 drm_plane_create_color_properties(
6722                         plane,
6723                         BIT(DRM_COLOR_YCBCR_BT601) |
6724                         BIT(DRM_COLOR_YCBCR_BT709) |
6725                         BIT(DRM_COLOR_YCBCR_BT2020),
6726                         BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
6727                         BIT(DRM_COLOR_YCBCR_FULL_RANGE),
6728                         DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
6729         }
6730
6731         supported_rotations =
6732                 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
6733                 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
6734
6735         if (dm->adev->asic_type >= CHIP_BONAIRE &&
6736             plane->type != DRM_PLANE_TYPE_CURSOR)
6737                 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
6738                                                    supported_rotations);
6739
6740         drm_plane_helper_add(plane, &dm_plane_helper_funcs);
6741
6742         /* Create (reset) the plane state */
6743         if (plane->funcs->reset)
6744                 plane->funcs->reset(plane);
6745
6746         return 0;
6747 }
6748
6749 #ifdef CONFIG_DEBUG_FS
6750 static void attach_crtc_crc_properties(struct amdgpu_display_manager *dm,
6751                                 struct amdgpu_crtc *acrtc)
6752 {
6753         drm_object_attach_property(&acrtc->base.base,
6754                                    dm->crc_win_x_start_property,
6755                                    0);
6756         drm_object_attach_property(&acrtc->base.base,
6757                                    dm->crc_win_y_start_property,
6758                                    0);
6759         drm_object_attach_property(&acrtc->base.base,
6760                                    dm->crc_win_x_end_property,
6761                                    0);
6762         drm_object_attach_property(&acrtc->base.base,
6763                                    dm->crc_win_y_end_property,
6764                                    0);
6765 }
6766 #endif
6767
6768 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
6769                                struct drm_plane *plane,
6770                                uint32_t crtc_index)
6771 {
6772         struct amdgpu_crtc *acrtc = NULL;
6773         struct drm_plane *cursor_plane;
6774
6775         int res = -ENOMEM;
6776
6777         cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
6778         if (!cursor_plane)
6779                 goto fail;
6780
6781         cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
6782         res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
6783
6784         acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
6785         if (!acrtc)
6786                 goto fail;
6787
6788         res = drm_crtc_init_with_planes(
6789                         dm->ddev,
6790                         &acrtc->base,
6791                         plane,
6792                         cursor_plane,
6793                         &amdgpu_dm_crtc_funcs, NULL);
6794
6795         if (res)
6796                 goto fail;
6797
6798         drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
6799
6800         /* Create (reset) the plane state */
6801         if (acrtc->base.funcs->reset)
6802                 acrtc->base.funcs->reset(&acrtc->base);
6803
6804         acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
6805         acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
6806
6807         acrtc->crtc_id = crtc_index;
6808         acrtc->base.enabled = false;
6809         acrtc->otg_inst = -1;
6810
6811         dm->adev->mode_info.crtcs[crtc_index] = acrtc;
6812         drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
6813                                    true, MAX_COLOR_LUT_ENTRIES);
6814         drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
6815 #ifdef CONFIG_DEBUG_FS
6816         attach_crtc_crc_properties(dm, acrtc);
6817 #endif
6818         return 0;
6819
6820 fail:
6821         kfree(acrtc);
6822         kfree(cursor_plane);
6823         return res;
6824 }
6825
6826
6827 static int to_drm_connector_type(enum signal_type st)
6828 {
6829         switch (st) {
6830         case SIGNAL_TYPE_HDMI_TYPE_A:
6831                 return DRM_MODE_CONNECTOR_HDMIA;
6832         case SIGNAL_TYPE_EDP:
6833                 return DRM_MODE_CONNECTOR_eDP;
6834         case SIGNAL_TYPE_LVDS:
6835                 return DRM_MODE_CONNECTOR_LVDS;
6836         case SIGNAL_TYPE_RGB:
6837                 return DRM_MODE_CONNECTOR_VGA;
6838         case SIGNAL_TYPE_DISPLAY_PORT:
6839         case SIGNAL_TYPE_DISPLAY_PORT_MST:
6840                 return DRM_MODE_CONNECTOR_DisplayPort;
6841         case SIGNAL_TYPE_DVI_DUAL_LINK:
6842         case SIGNAL_TYPE_DVI_SINGLE_LINK:
6843                 return DRM_MODE_CONNECTOR_DVID;
6844         case SIGNAL_TYPE_VIRTUAL:
6845                 return DRM_MODE_CONNECTOR_VIRTUAL;
6846
6847         default:
6848                 return DRM_MODE_CONNECTOR_Unknown;
6849         }
6850 }
6851
6852 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6853 {
6854         struct drm_encoder *encoder;
6855
6856         /* There is only one encoder per connector */
6857         drm_connector_for_each_possible_encoder(connector, encoder)
6858                 return encoder;
6859
6860         return NULL;
6861 }
6862
6863 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6864 {
6865         struct drm_encoder *encoder;
6866         struct amdgpu_encoder *amdgpu_encoder;
6867
6868         encoder = amdgpu_dm_connector_to_encoder(connector);
6869
6870         if (encoder == NULL)
6871                 return;
6872
6873         amdgpu_encoder = to_amdgpu_encoder(encoder);
6874
6875         amdgpu_encoder->native_mode.clock = 0;
6876
6877         if (!list_empty(&connector->probed_modes)) {
6878                 struct drm_display_mode *preferred_mode = NULL;
6879
6880                 list_for_each_entry(preferred_mode,
6881                                     &connector->probed_modes,
6882                                     head) {
6883                         if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
6884                                 amdgpu_encoder->native_mode = *preferred_mode;
6885
6886                         break;
6887                 }
6888
6889         }
6890 }
6891
6892 static struct drm_display_mode *
6893 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
6894                              char *name,
6895                              int hdisplay, int vdisplay)
6896 {
6897         struct drm_device *dev = encoder->dev;
6898         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6899         struct drm_display_mode *mode = NULL;
6900         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6901
6902         mode = drm_mode_duplicate(dev, native_mode);
6903
6904         if (mode == NULL)
6905                 return NULL;
6906
6907         mode->hdisplay = hdisplay;
6908         mode->vdisplay = vdisplay;
6909         mode->type &= ~DRM_MODE_TYPE_PREFERRED;
6910         strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
6911
6912         return mode;
6913
6914 }
6915
6916 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
6917                                                  struct drm_connector *connector)
6918 {
6919         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
6920         struct drm_display_mode *mode = NULL;
6921         struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
6922         struct amdgpu_dm_connector *amdgpu_dm_connector =
6923                                 to_amdgpu_dm_connector(connector);
6924         int i;
6925         int n;
6926         struct mode_size {
6927                 char name[DRM_DISPLAY_MODE_LEN];
6928                 int w;
6929                 int h;
6930         } common_modes[] = {
6931                 {  "640x480",  640,  480},
6932                 {  "800x600",  800,  600},
6933                 { "1024x768", 1024,  768},
6934                 { "1280x720", 1280,  720},
6935                 { "1280x800", 1280,  800},
6936                 {"1280x1024", 1280, 1024},
6937                 { "1440x900", 1440,  900},
6938                 {"1680x1050", 1680, 1050},
6939                 {"1600x1200", 1600, 1200},
6940                 {"1920x1080", 1920, 1080},
6941                 {"1920x1200", 1920, 1200}
6942         };
6943
6944         n = ARRAY_SIZE(common_modes);
6945
6946         for (i = 0; i < n; i++) {
6947                 struct drm_display_mode *curmode = NULL;
6948                 bool mode_existed = false;
6949
6950                 if (common_modes[i].w > native_mode->hdisplay ||
6951                     common_modes[i].h > native_mode->vdisplay ||
6952                    (common_modes[i].w == native_mode->hdisplay &&
6953                     common_modes[i].h == native_mode->vdisplay))
6954                         continue;
6955
6956                 list_for_each_entry(curmode, &connector->probed_modes, head) {
6957                         if (common_modes[i].w == curmode->hdisplay &&
6958                             common_modes[i].h == curmode->vdisplay) {
6959                                 mode_existed = true;
6960                                 break;
6961                         }
6962                 }
6963
6964                 if (mode_existed)
6965                         continue;
6966
6967                 mode = amdgpu_dm_create_common_mode(encoder,
6968                                 common_modes[i].name, common_modes[i].w,
6969                                 common_modes[i].h);
6970                 drm_mode_probed_add(connector, mode);
6971                 amdgpu_dm_connector->num_modes++;
6972         }
6973 }
6974
6975 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
6976                                               struct edid *edid)
6977 {
6978         struct amdgpu_dm_connector *amdgpu_dm_connector =
6979                         to_amdgpu_dm_connector(connector);
6980
6981         if (edid) {
6982                 /* empty probed_modes */
6983                 INIT_LIST_HEAD(&connector->probed_modes);
6984                 amdgpu_dm_connector->num_modes =
6985                                 drm_add_edid_modes(connector, edid);
6986
6987                 /* sorting the probed modes before calling function
6988                  * amdgpu_dm_get_native_mode() since EDID can have
6989                  * more than one preferred mode. The modes that are
6990                  * later in the probed mode list could be of higher
6991                  * and preferred resolution. For example, 3840x2160
6992                  * resolution in base EDID preferred timing and 4096x2160
6993                  * preferred resolution in DID extension block later.
6994                  */
6995                 drm_mode_sort(&connector->probed_modes);
6996                 amdgpu_dm_get_native_mode(connector);
6997         } else {
6998                 amdgpu_dm_connector->num_modes = 0;
6999         }
7000 }
7001
7002 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7003 {
7004         struct amdgpu_dm_connector *amdgpu_dm_connector =
7005                         to_amdgpu_dm_connector(connector);
7006         struct drm_encoder *encoder;
7007         struct edid *edid = amdgpu_dm_connector->edid;
7008
7009         encoder = amdgpu_dm_connector_to_encoder(connector);
7010
7011         if (!drm_edid_is_valid(edid)) {
7012                 amdgpu_dm_connector->num_modes =
7013                                 drm_add_modes_noedid(connector, 640, 480);
7014         } else {
7015                 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7016                 amdgpu_dm_connector_add_common_modes(encoder, connector);
7017         }
7018         amdgpu_dm_fbc_init(connector);
7019
7020         return amdgpu_dm_connector->num_modes;
7021 }
7022
7023 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7024                                      struct amdgpu_dm_connector *aconnector,
7025                                      int connector_type,
7026                                      struct dc_link *link,
7027                                      int link_index)
7028 {
7029         struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7030
7031         /*
7032          * Some of the properties below require access to state, like bpc.
7033          * Allocate some default initial connector state with our reset helper.
7034          */
7035         if (aconnector->base.funcs->reset)
7036                 aconnector->base.funcs->reset(&aconnector->base);
7037
7038         aconnector->connector_id = link_index;
7039         aconnector->dc_link = link;
7040         aconnector->base.interlace_allowed = false;
7041         aconnector->base.doublescan_allowed = false;
7042         aconnector->base.stereo_allowed = false;
7043         aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7044         aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7045         aconnector->audio_inst = -1;
7046         mutex_init(&aconnector->hpd_lock);
7047
7048         /*
7049          * configure support HPD hot plug connector_>polled default value is 0
7050          * which means HPD hot plug not supported
7051          */
7052         switch (connector_type) {
7053         case DRM_MODE_CONNECTOR_HDMIA:
7054                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7055                 aconnector->base.ycbcr_420_allowed =
7056                         link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7057                 break;
7058         case DRM_MODE_CONNECTOR_DisplayPort:
7059                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7060                 aconnector->base.ycbcr_420_allowed =
7061                         link->link_enc->features.dp_ycbcr420_supported ? true : false;
7062                 break;
7063         case DRM_MODE_CONNECTOR_DVID:
7064                 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7065                 break;
7066         default:
7067                 break;
7068         }
7069
7070         drm_object_attach_property(&aconnector->base.base,
7071                                 dm->ddev->mode_config.scaling_mode_property,
7072                                 DRM_MODE_SCALE_NONE);
7073
7074         drm_object_attach_property(&aconnector->base.base,
7075                                 adev->mode_info.underscan_property,
7076                                 UNDERSCAN_OFF);
7077         drm_object_attach_property(&aconnector->base.base,
7078                                 adev->mode_info.underscan_hborder_property,
7079                                 0);
7080         drm_object_attach_property(&aconnector->base.base,
7081                                 adev->mode_info.underscan_vborder_property,
7082                                 0);
7083
7084         if (!aconnector->mst_port)
7085                 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7086
7087         /* This defaults to the max in the range, but we want 8bpc for non-edp. */
7088         aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
7089         aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7090
7091         if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7092             (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7093                 drm_object_attach_property(&aconnector->base.base,
7094                                 adev->mode_info.abm_level_property, 0);
7095         }
7096
7097         if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7098             connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7099             connector_type == DRM_MODE_CONNECTOR_eDP) {
7100                 drm_object_attach_property(
7101                         &aconnector->base.base,
7102                         dm->ddev->mode_config.hdr_output_metadata_property, 0);
7103
7104                 if (!aconnector->mst_port)
7105                         drm_connector_attach_vrr_capable_property(&aconnector->base);
7106
7107 #ifdef CONFIG_DRM_AMD_DC_HDCP
7108                 if (adev->dm.hdcp_workqueue)
7109                         drm_connector_attach_content_protection_property(&aconnector->base, true);
7110 #endif
7111         }
7112 }
7113
7114 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7115                               struct i2c_msg *msgs, int num)
7116 {
7117         struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7118         struct ddc_service *ddc_service = i2c->ddc_service;
7119         struct i2c_command cmd;
7120         int i;
7121         int result = -EIO;
7122
7123         cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7124
7125         if (!cmd.payloads)
7126                 return result;
7127
7128         cmd.number_of_payloads = num;
7129         cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7130         cmd.speed = 100;
7131
7132         for (i = 0; i < num; i++) {
7133                 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7134                 cmd.payloads[i].address = msgs[i].addr;
7135                 cmd.payloads[i].length = msgs[i].len;
7136                 cmd.payloads[i].data = msgs[i].buf;
7137         }
7138
7139         if (dc_submit_i2c(
7140                         ddc_service->ctx->dc,
7141                         ddc_service->ddc_pin->hw_info.ddc_channel,
7142                         &cmd))
7143                 result = num;
7144
7145         kfree(cmd.payloads);
7146         return result;
7147 }
7148
7149 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7150 {
7151         return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7152 }
7153
7154 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7155         .master_xfer = amdgpu_dm_i2c_xfer,
7156         .functionality = amdgpu_dm_i2c_func,
7157 };
7158
7159 static struct amdgpu_i2c_adapter *
7160 create_i2c(struct ddc_service *ddc_service,
7161            int link_index,
7162            int *res)
7163 {
7164         struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7165         struct amdgpu_i2c_adapter *i2c;
7166
7167         i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7168         if (!i2c)
7169                 return NULL;
7170         i2c->base.owner = THIS_MODULE;
7171         i2c->base.class = I2C_CLASS_DDC;
7172         i2c->base.dev.parent = &adev->pdev->dev;
7173         i2c->base.algo = &amdgpu_dm_i2c_algo;
7174         snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7175         i2c_set_adapdata(&i2c->base, i2c);
7176         i2c->ddc_service = ddc_service;
7177         i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
7178
7179         return i2c;
7180 }
7181
7182
7183 /*
7184  * Note: this function assumes that dc_link_detect() was called for the
7185  * dc_link which will be represented by this aconnector.
7186  */
7187 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7188                                     struct amdgpu_dm_connector *aconnector,
7189                                     uint32_t link_index,
7190                                     struct amdgpu_encoder *aencoder)
7191 {
7192         int res = 0;
7193         int connector_type;
7194         struct dc *dc = dm->dc;
7195         struct dc_link *link = dc_get_link_at_index(dc, link_index);
7196         struct amdgpu_i2c_adapter *i2c;
7197
7198         link->priv = aconnector;
7199
7200         DRM_DEBUG_DRIVER("%s()\n", __func__);
7201
7202         i2c = create_i2c(link->ddc, link->link_index, &res);
7203         if (!i2c) {
7204                 DRM_ERROR("Failed to create i2c adapter data\n");
7205                 return -ENOMEM;
7206         }
7207
7208         aconnector->i2c = i2c;
7209         res = i2c_add_adapter(&i2c->base);
7210
7211         if (res) {
7212                 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7213                 goto out_free;
7214         }
7215
7216         connector_type = to_drm_connector_type(link->connector_signal);
7217
7218         res = drm_connector_init_with_ddc(
7219                         dm->ddev,
7220                         &aconnector->base,
7221                         &amdgpu_dm_connector_funcs,
7222                         connector_type,
7223                         &i2c->base);
7224
7225         if (res) {
7226                 DRM_ERROR("connector_init failed\n");
7227                 aconnector->connector_id = -1;
7228                 goto out_free;
7229         }
7230
7231         drm_connector_helper_add(
7232                         &aconnector->base,
7233                         &amdgpu_dm_connector_helper_funcs);
7234
7235         amdgpu_dm_connector_init_helper(
7236                 dm,
7237                 aconnector,
7238                 connector_type,
7239                 link,
7240                 link_index);
7241
7242         drm_connector_attach_encoder(
7243                 &aconnector->base, &aencoder->base);
7244
7245         if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7246                 || connector_type == DRM_MODE_CONNECTOR_eDP)
7247                 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7248
7249 out_free:
7250         if (res) {
7251                 kfree(i2c);
7252                 aconnector->i2c = NULL;
7253         }
7254         return res;
7255 }
7256
7257 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7258 {
7259         switch (adev->mode_info.num_crtc) {
7260         case 1:
7261                 return 0x1;
7262         case 2:
7263                 return 0x3;
7264         case 3:
7265                 return 0x7;
7266         case 4:
7267                 return 0xf;
7268         case 5:
7269                 return 0x1f;
7270         case 6:
7271         default:
7272                 return 0x3f;
7273         }
7274 }
7275
7276 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7277                                   struct amdgpu_encoder *aencoder,
7278                                   uint32_t link_index)
7279 {
7280         struct amdgpu_device *adev = drm_to_adev(dev);
7281
7282         int res = drm_encoder_init(dev,
7283                                    &aencoder->base,
7284                                    &amdgpu_dm_encoder_funcs,
7285                                    DRM_MODE_ENCODER_TMDS,
7286                                    NULL);
7287
7288         aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7289
7290         if (!res)
7291                 aencoder->encoder_id = link_index;
7292         else
7293                 aencoder->encoder_id = -1;
7294
7295         drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7296
7297         return res;
7298 }
7299
7300 static void manage_dm_interrupts(struct amdgpu_device *adev,
7301                                  struct amdgpu_crtc *acrtc,
7302                                  bool enable)
7303 {
7304         /*
7305          * We have no guarantee that the frontend index maps to the same
7306          * backend index - some even map to more than one.
7307          *
7308          * TODO: Use a different interrupt or check DC itself for the mapping.
7309          */
7310         int irq_type =
7311                 amdgpu_display_crtc_idx_to_irq_type(
7312                         adev,
7313                         acrtc->crtc_id);
7314
7315         if (enable) {
7316                 drm_crtc_vblank_on(&acrtc->base);
7317                 amdgpu_irq_get(
7318                         adev,
7319                         &adev->pageflip_irq,
7320                         irq_type);
7321         } else {
7322
7323                 amdgpu_irq_put(
7324                         adev,
7325                         &adev->pageflip_irq,
7326                         irq_type);
7327                 drm_crtc_vblank_off(&acrtc->base);
7328         }
7329 }
7330
7331 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7332                                       struct amdgpu_crtc *acrtc)
7333 {
7334         int irq_type =
7335                 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7336
7337         /**
7338          * This reads the current state for the IRQ and force reapplies
7339          * the setting to hardware.
7340          */
7341         amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7342 }
7343
7344 static bool
7345 is_scaling_state_different(const struct dm_connector_state *dm_state,
7346                            const struct dm_connector_state *old_dm_state)
7347 {
7348         if (dm_state->scaling != old_dm_state->scaling)
7349                 return true;
7350         if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7351                 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7352                         return true;
7353         } else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7354                 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7355                         return true;
7356         } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7357                    dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7358                 return true;
7359         return false;
7360 }
7361
7362 #ifdef CONFIG_DRM_AMD_DC_HDCP
7363 static bool is_content_protection_different(struct drm_connector_state *state,
7364                                             const struct drm_connector_state *old_state,
7365                                             const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
7366 {
7367         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7368         struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7369
7370         /* Handle: Type0/1 change */
7371         if (old_state->hdcp_content_type != state->hdcp_content_type &&
7372             state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7373                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7374                 return true;
7375         }
7376
7377         /* CP is being re enabled, ignore this
7378          *
7379          * Handles:     ENABLED -> DESIRED
7380          */
7381         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7382             state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7383                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7384                 return false;
7385         }
7386
7387         /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7388          *
7389          * Handles:     UNDESIRED -> ENABLED
7390          */
7391         if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7392             state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7393                 state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7394
7395         /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled
7396          * hot-plug, headless s3, dpms
7397          *
7398          * Handles:     DESIRED -> DESIRED (Special case)
7399          */
7400         if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7401             connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7402                 dm_con_state->update_hdcp = false;
7403                 return true;
7404         }
7405
7406         /*
7407          * Handles:     UNDESIRED -> UNDESIRED
7408          *              DESIRED -> DESIRED
7409          *              ENABLED -> ENABLED
7410          */
7411         if (old_state->content_protection == state->content_protection)
7412                 return false;
7413
7414         /*
7415          * Handles:     UNDESIRED -> DESIRED
7416          *              DESIRED -> UNDESIRED
7417          *              ENABLED -> UNDESIRED
7418          */
7419         if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
7420                 return true;
7421
7422         /*
7423          * Handles:     DESIRED -> ENABLED
7424          */
7425         return false;
7426 }
7427
7428 #endif
7429 static void remove_stream(struct amdgpu_device *adev,
7430                           struct amdgpu_crtc *acrtc,
7431                           struct dc_stream_state *stream)
7432 {
7433         /* this is the update mode case */
7434
7435         acrtc->otg_inst = -1;
7436         acrtc->enabled = false;
7437 }
7438
7439 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
7440                                struct dc_cursor_position *position)
7441 {
7442         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7443         int x, y;
7444         int xorigin = 0, yorigin = 0;
7445
7446         position->enable = false;
7447         position->x = 0;
7448         position->y = 0;
7449
7450         if (!crtc || !plane->state->fb)
7451                 return 0;
7452
7453         if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
7454             (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
7455                 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
7456                           __func__,
7457                           plane->state->crtc_w,
7458                           plane->state->crtc_h);
7459                 return -EINVAL;
7460         }
7461
7462         x = plane->state->crtc_x;
7463         y = plane->state->crtc_y;
7464
7465         if (x <= -amdgpu_crtc->max_cursor_width ||
7466             y <= -amdgpu_crtc->max_cursor_height)
7467                 return 0;
7468
7469         if (x < 0) {
7470                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
7471                 x = 0;
7472         }
7473         if (y < 0) {
7474                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
7475                 y = 0;
7476         }
7477         position->enable = true;
7478         position->translate_by_source = true;
7479         position->x = x;
7480         position->y = y;
7481         position->x_hotspot = xorigin;
7482         position->y_hotspot = yorigin;
7483
7484         return 0;
7485 }
7486
7487 static void handle_cursor_update(struct drm_plane *plane,
7488                                  struct drm_plane_state *old_plane_state)
7489 {
7490         struct amdgpu_device *adev = drm_to_adev(plane->dev);
7491         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
7492         struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
7493         struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
7494         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
7495         uint64_t address = afb ? afb->address : 0;
7496         struct dc_cursor_position position;
7497         struct dc_cursor_attributes attributes;
7498         int ret;
7499
7500         if (!plane->state->fb && !old_plane_state->fb)
7501                 return;
7502
7503         DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
7504                          __func__,
7505                          amdgpu_crtc->crtc_id,
7506                          plane->state->crtc_w,
7507                          plane->state->crtc_h);
7508
7509         ret = get_cursor_position(plane, crtc, &position);
7510         if (ret)
7511                 return;
7512
7513         if (!position.enable) {
7514                 /* turn off cursor */
7515                 if (crtc_state && crtc_state->stream) {
7516                         mutex_lock(&adev->dm.dc_lock);
7517                         dc_stream_set_cursor_position(crtc_state->stream,
7518                                                       &position);
7519                         mutex_unlock(&adev->dm.dc_lock);
7520                 }
7521                 return;
7522         }
7523
7524         amdgpu_crtc->cursor_width = plane->state->crtc_w;
7525         amdgpu_crtc->cursor_height = plane->state->crtc_h;
7526
7527         memset(&attributes, 0, sizeof(attributes));
7528         attributes.address.high_part = upper_32_bits(address);
7529         attributes.address.low_part  = lower_32_bits(address);
7530         attributes.width             = plane->state->crtc_w;
7531         attributes.height            = plane->state->crtc_h;
7532         attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
7533         attributes.rotation_angle    = 0;
7534         attributes.attribute_flags.value = 0;
7535
7536         attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
7537
7538         if (crtc_state->stream) {
7539                 mutex_lock(&adev->dm.dc_lock);
7540                 if (!dc_stream_set_cursor_attributes(crtc_state->stream,
7541                                                          &attributes))
7542                         DRM_ERROR("DC failed to set cursor attributes\n");
7543
7544                 if (!dc_stream_set_cursor_position(crtc_state->stream,
7545                                                    &position))
7546                         DRM_ERROR("DC failed to set cursor position\n");
7547                 mutex_unlock(&adev->dm.dc_lock);
7548         }
7549 }
7550
7551 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7552 {
7553
7554         assert_spin_locked(&acrtc->base.dev->event_lock);
7555         WARN_ON(acrtc->event);
7556
7557         acrtc->event = acrtc->base.state->event;
7558
7559         /* Set the flip status */
7560         acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7561
7562         /* Mark this event as consumed */
7563         acrtc->base.state->event = NULL;
7564
7565         DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7566                                                  acrtc->crtc_id);
7567 }
7568
7569 static void update_freesync_state_on_stream(
7570         struct amdgpu_display_manager *dm,
7571         struct dm_crtc_state *new_crtc_state,
7572         struct dc_stream_state *new_stream,
7573         struct dc_plane_state *surface,
7574         u32 flip_timestamp_in_us)
7575 {
7576         struct mod_vrr_params vrr_params;
7577         struct dc_info_packet vrr_infopacket = {0};
7578         struct amdgpu_device *adev = dm->adev;
7579         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7580         unsigned long flags;
7581
7582         if (!new_stream)
7583                 return;
7584
7585         /*
7586          * TODO: Determine why min/max totals and vrefresh can be 0 here.
7587          * For now it's sufficient to just guard against these conditions.
7588          */
7589
7590         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7591                 return;
7592
7593         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7594         vrr_params = acrtc->dm_irq_params.vrr_params;
7595
7596         if (surface) {
7597                 mod_freesync_handle_preflip(
7598                         dm->freesync_module,
7599                         surface,
7600                         new_stream,
7601                         flip_timestamp_in_us,
7602                         &vrr_params);
7603
7604                 if (adev->family < AMDGPU_FAMILY_AI &&
7605                     amdgpu_dm_vrr_active(new_crtc_state)) {
7606                         mod_freesync_handle_v_update(dm->freesync_module,
7607                                                      new_stream, &vrr_params);
7608
7609                         /* Need to call this before the frame ends. */
7610                         dc_stream_adjust_vmin_vmax(dm->dc,
7611                                                    new_crtc_state->stream,
7612                                                    &vrr_params.adjust);
7613                 }
7614         }
7615
7616         mod_freesync_build_vrr_infopacket(
7617                 dm->freesync_module,
7618                 new_stream,
7619                 &vrr_params,
7620                 PACKET_TYPE_VRR,
7621                 TRANSFER_FUNC_UNKNOWN,
7622                 &vrr_infopacket);
7623
7624         new_crtc_state->freesync_timing_changed |=
7625                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7626                         &vrr_params.adjust,
7627                         sizeof(vrr_params.adjust)) != 0);
7628
7629         new_crtc_state->freesync_vrr_info_changed |=
7630                 (memcmp(&new_crtc_state->vrr_infopacket,
7631                         &vrr_infopacket,
7632                         sizeof(vrr_infopacket)) != 0);
7633
7634         acrtc->dm_irq_params.vrr_params = vrr_params;
7635         new_crtc_state->vrr_infopacket = vrr_infopacket;
7636
7637         new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
7638         new_stream->vrr_infopacket = vrr_infopacket;
7639
7640         if (new_crtc_state->freesync_vrr_info_changed)
7641                 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7642                               new_crtc_state->base.crtc->base.id,
7643                               (int)new_crtc_state->base.vrr_enabled,
7644                               (int)vrr_params.state);
7645
7646         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7647 }
7648
7649 static void update_stream_irq_parameters(
7650         struct amdgpu_display_manager *dm,
7651         struct dm_crtc_state *new_crtc_state)
7652 {
7653         struct dc_stream_state *new_stream = new_crtc_state->stream;
7654         struct mod_vrr_params vrr_params;
7655         struct mod_freesync_config config = new_crtc_state->freesync_config;
7656         struct amdgpu_device *adev = dm->adev;
7657         struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7658         unsigned long flags;
7659
7660         if (!new_stream)
7661                 return;
7662
7663         /*
7664          * TODO: Determine why min/max totals and vrefresh can be 0 here.
7665          * For now it's sufficient to just guard against these conditions.
7666          */
7667         if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7668                 return;
7669
7670         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7671         vrr_params = acrtc->dm_irq_params.vrr_params;
7672
7673         if (new_crtc_state->vrr_supported &&
7674             config.min_refresh_in_uhz &&
7675             config.max_refresh_in_uhz) {
7676                 config.state = new_crtc_state->base.vrr_enabled ?
7677                         VRR_STATE_ACTIVE_VARIABLE :
7678                         VRR_STATE_INACTIVE;
7679         } else {
7680                 config.state = VRR_STATE_UNSUPPORTED;
7681         }
7682
7683         mod_freesync_build_vrr_params(dm->freesync_module,
7684                                       new_stream,
7685                                       &config, &vrr_params);
7686
7687         new_crtc_state->freesync_timing_changed |=
7688                 (memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
7689                         &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
7690
7691         new_crtc_state->freesync_config = config;
7692         /* Copy state for access from DM IRQ handler */
7693         acrtc->dm_irq_params.freesync_config = config;
7694         acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7695         acrtc->dm_irq_params.vrr_params = vrr_params;
7696         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7697 }
7698
7699 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7700                                             struct dm_crtc_state *new_state)
7701 {
7702         bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
7703         bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
7704
7705         if (!old_vrr_active && new_vrr_active) {
7706                 /* Transition VRR inactive -> active:
7707                  * While VRR is active, we must not disable vblank irq, as a
7708                  * reenable after disable would compute bogus vblank/pflip
7709                  * timestamps if it likely happened inside display front-porch.
7710                  *
7711                  * We also need vupdate irq for the actual core vblank handling
7712                  * at end of vblank.
7713                  */
7714                 dm_set_vupdate_irq(new_state->base.crtc, true);
7715                 drm_crtc_vblank_get(new_state->base.crtc);
7716                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7717                                  __func__, new_state->base.crtc->base.id);
7718         } else if (old_vrr_active && !new_vrr_active) {
7719                 /* Transition VRR active -> inactive:
7720                  * Allow vblank irq disable again for fixed refresh rate.
7721                  */
7722                 dm_set_vupdate_irq(new_state->base.crtc, false);
7723                 drm_crtc_vblank_put(new_state->base.crtc);
7724                 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
7725                                  __func__, new_state->base.crtc->base.id);
7726         }
7727 }
7728
7729 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
7730 {
7731         struct drm_plane *plane;
7732         struct drm_plane_state *old_plane_state, *new_plane_state;
7733         int i;
7734
7735         /*
7736          * TODO: Make this per-stream so we don't issue redundant updates for
7737          * commits with multiple streams.
7738          */
7739         for_each_oldnew_plane_in_state(state, plane, old_plane_state,
7740                                        new_plane_state, i)
7741                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7742                         handle_cursor_update(plane, old_plane_state);
7743 }
7744
7745 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
7746                                     struct dc_state *dc_state,
7747                                     struct drm_device *dev,
7748                                     struct amdgpu_display_manager *dm,
7749                                     struct drm_crtc *pcrtc,
7750                                     bool wait_for_vblank)
7751 {
7752         uint32_t i;
7753         uint64_t timestamp_ns;
7754         struct drm_plane *plane;
7755         struct drm_plane_state *old_plane_state, *new_plane_state;
7756         struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
7757         struct drm_crtc_state *new_pcrtc_state =
7758                         drm_atomic_get_new_crtc_state(state, pcrtc);
7759         struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
7760         struct dm_crtc_state *dm_old_crtc_state =
7761                         to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
7762         int planes_count = 0, vpos, hpos;
7763         long r;
7764         unsigned long flags;
7765         struct amdgpu_bo *abo;
7766         uint32_t target_vblank, last_flip_vblank;
7767         bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
7768         bool pflip_present = false;
7769         struct {
7770                 struct dc_surface_update surface_updates[MAX_SURFACES];
7771                 struct dc_plane_info plane_infos[MAX_SURFACES];
7772                 struct dc_scaling_info scaling_infos[MAX_SURFACES];
7773                 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
7774                 struct dc_stream_update stream_update;
7775         } *bundle;
7776
7777         bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
7778
7779         if (!bundle) {
7780                 dm_error("Failed to allocate update bundle\n");
7781                 goto cleanup;
7782         }
7783
7784         /*
7785          * Disable the cursor first if we're disabling all the planes.
7786          * It'll remain on the screen after the planes are re-enabled
7787          * if we don't.
7788          */
7789         if (acrtc_state->active_planes == 0)
7790                 amdgpu_dm_commit_cursors(state);
7791
7792         /* update planes when needed */
7793         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
7794                 struct drm_crtc *crtc = new_plane_state->crtc;
7795                 struct drm_crtc_state *new_crtc_state;
7796                 struct drm_framebuffer *fb = new_plane_state->fb;
7797                 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
7798                 bool plane_needs_flip;
7799                 struct dc_plane_state *dc_plane;
7800                 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
7801
7802                 /* Cursor plane is handled after stream updates */
7803                 if (plane->type == DRM_PLANE_TYPE_CURSOR)
7804                         continue;
7805
7806                 if (!fb || !crtc || pcrtc != crtc)
7807                         continue;
7808
7809                 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
7810                 if (!new_crtc_state->active)
7811                         continue;
7812
7813                 dc_plane = dm_new_plane_state->dc_state;
7814
7815                 bundle->surface_updates[planes_count].surface = dc_plane;
7816                 if (new_pcrtc_state->color_mgmt_changed) {
7817                         bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
7818                         bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
7819                         bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
7820                 }
7821
7822                 fill_dc_scaling_info(new_plane_state,
7823                                      &bundle->scaling_infos[planes_count]);
7824
7825                 bundle->surface_updates[planes_count].scaling_info =
7826                         &bundle->scaling_infos[planes_count];
7827
7828                 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
7829
7830                 pflip_present = pflip_present || plane_needs_flip;
7831
7832                 if (!plane_needs_flip) {
7833                         planes_count += 1;
7834                         continue;
7835                 }
7836
7837                 abo = gem_to_amdgpu_bo(fb->obj[0]);
7838
7839                 /*
7840                  * Wait for all fences on this FB. Do limited wait to avoid
7841                  * deadlock during GPU reset when this fence will not signal
7842                  * but we hold reservation lock for the BO.
7843                  */
7844                 r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
7845                                                         false,
7846                                                         msecs_to_jiffies(5000));
7847                 if (unlikely(r <= 0))
7848                         DRM_ERROR("Waiting for fences timed out!");
7849
7850                 fill_dc_plane_info_and_addr(
7851                         dm->adev, new_plane_state,
7852                         afb->tiling_flags,
7853                         &bundle->plane_infos[planes_count],
7854                         &bundle->flip_addrs[planes_count].address,
7855                         afb->tmz_surface, false);
7856
7857                 DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
7858                                  new_plane_state->plane->index,
7859                                  bundle->plane_infos[planes_count].dcc.enable);
7860
7861                 bundle->surface_updates[planes_count].plane_info =
7862                         &bundle->plane_infos[planes_count];
7863
7864                 /*
7865                  * Only allow immediate flips for fast updates that don't
7866                  * change FB pitch, DCC state, rotation or mirroing.
7867                  */
7868                 bundle->flip_addrs[planes_count].flip_immediate =
7869                         crtc->state->async_flip &&
7870                         acrtc_state->update_type == UPDATE_TYPE_FAST;
7871
7872                 timestamp_ns = ktime_get_ns();
7873                 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
7874                 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
7875                 bundle->surface_updates[planes_count].surface = dc_plane;
7876
7877                 if (!bundle->surface_updates[planes_count].surface) {
7878                         DRM_ERROR("No surface for CRTC: id=%d\n",
7879                                         acrtc_attach->crtc_id);
7880                         continue;
7881                 }
7882
7883                 if (plane == pcrtc->primary)
7884                         update_freesync_state_on_stream(
7885                                 dm,
7886                                 acrtc_state,
7887                                 acrtc_state->stream,
7888                                 dc_plane,
7889                                 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
7890
7891                 DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
7892                                  __func__,
7893                                  bundle->flip_addrs[planes_count].address.grph.addr.high_part,
7894                                  bundle->flip_addrs[planes_count].address.grph.addr.low_part);
7895
7896                 planes_count += 1;
7897
7898         }
7899
7900         if (pflip_present) {
7901                 if (!vrr_active) {
7902                         /* Use old throttling in non-vrr fixed refresh rate mode
7903                          * to keep flip scheduling based on target vblank counts
7904                          * working in a backwards compatible way, e.g., for
7905                          * clients using the GLX_OML_sync_control extension or
7906                          * DRI3/Present extension with defined target_msc.
7907                          */
7908                         last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
7909                 }
7910                 else {
7911                         /* For variable refresh rate mode only:
7912                          * Get vblank of last completed flip to avoid > 1 vrr
7913                          * flips per video frame by use of throttling, but allow
7914                          * flip programming anywhere in the possibly large
7915                          * variable vrr vblank interval for fine-grained flip
7916                          * timing control and more opportunity to avoid stutter
7917                          * on late submission of flips.
7918                          */
7919                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7920                         last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
7921                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7922                 }
7923
7924                 target_vblank = last_flip_vblank + wait_for_vblank;
7925
7926                 /*
7927                  * Wait until we're out of the vertical blank period before the one
7928                  * targeted by the flip
7929                  */
7930                 while ((acrtc_attach->enabled &&
7931                         (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
7932                                                             0, &vpos, &hpos, NULL,
7933                                                             NULL, &pcrtc->hwmode)
7934                          & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
7935                         (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
7936                         (int)(target_vblank -
7937                           amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
7938                         usleep_range(1000, 1100);
7939                 }
7940
7941                 /**
7942                  * Prepare the flip event for the pageflip interrupt to handle.
7943                  *
7944                  * This only works in the case where we've already turned on the
7945                  * appropriate hardware blocks (eg. HUBP) so in the transition case
7946                  * from 0 -> n planes we have to skip a hardware generated event
7947                  * and rely on sending it from software.
7948                  */
7949                 if (acrtc_attach->base.state->event &&
7950                     acrtc_state->active_planes > 0) {
7951                         drm_crtc_vblank_get(pcrtc);
7952
7953                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
7954
7955                         WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
7956                         prepare_flip_isr(acrtc_attach);
7957
7958                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
7959                 }
7960
7961                 if (acrtc_state->stream) {
7962                         if (acrtc_state->freesync_vrr_info_changed)
7963                                 bundle->stream_update.vrr_infopacket =
7964                                         &acrtc_state->stream->vrr_infopacket;
7965                 }
7966         }
7967
7968         /* Update the planes if changed or disable if we don't have any. */
7969         if ((planes_count || acrtc_state->active_planes == 0) &&
7970                 acrtc_state->stream) {
7971                 bundle->stream_update.stream = acrtc_state->stream;
7972                 if (new_pcrtc_state->mode_changed) {
7973                         bundle->stream_update.src = acrtc_state->stream->src;
7974                         bundle->stream_update.dst = acrtc_state->stream->dst;
7975                 }
7976
7977                 if (new_pcrtc_state->color_mgmt_changed) {
7978                         /*
7979                          * TODO: This isn't fully correct since we've actually
7980                          * already modified the stream in place.
7981                          */
7982                         bundle->stream_update.gamut_remap =
7983                                 &acrtc_state->stream->gamut_remap_matrix;
7984                         bundle->stream_update.output_csc_transform =
7985                                 &acrtc_state->stream->csc_color_matrix;
7986                         bundle->stream_update.out_transfer_func =
7987                                 acrtc_state->stream->out_transfer_func;
7988                 }
7989
7990                 acrtc_state->stream->abm_level = acrtc_state->abm_level;
7991                 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
7992                         bundle->stream_update.abm_level = &acrtc_state->abm_level;
7993
7994                 /*
7995                  * If FreeSync state on the stream has changed then we need to
7996                  * re-adjust the min/max bounds now that DC doesn't handle this
7997                  * as part of commit.
7998                  */
7999                 if (amdgpu_dm_vrr_active(dm_old_crtc_state) !=
8000                     amdgpu_dm_vrr_active(acrtc_state)) {
8001                         spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8002                         dc_stream_adjust_vmin_vmax(
8003                                 dm->dc, acrtc_state->stream,
8004                                 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8005                         spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8006                 }
8007                 mutex_lock(&dm->dc_lock);
8008                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8009                                 acrtc_state->stream->link->psr_settings.psr_allow_active)
8010                         amdgpu_dm_psr_disable(acrtc_state->stream);
8011
8012                 dc_commit_updates_for_stream(dm->dc,
8013                                                      bundle->surface_updates,
8014                                                      planes_count,
8015                                                      acrtc_state->stream,
8016                                                      &bundle->stream_update,
8017                                                      dc_state);
8018
8019                 /**
8020                  * Enable or disable the interrupts on the backend.
8021                  *
8022                  * Most pipes are put into power gating when unused.
8023                  *
8024                  * When power gating is enabled on a pipe we lose the
8025                  * interrupt enablement state when power gating is disabled.
8026                  *
8027                  * So we need to update the IRQ control state in hardware
8028                  * whenever the pipe turns on (since it could be previously
8029                  * power gated) or off (since some pipes can't be power gated
8030                  * on some ASICs).
8031                  */
8032                 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8033                         dm_update_pflip_irq_state(drm_to_adev(dev),
8034                                                   acrtc_attach);
8035
8036                 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8037                                 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8038                                 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8039                         amdgpu_dm_link_setup_psr(acrtc_state->stream);
8040                 else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
8041                                 acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
8042                                 !acrtc_state->stream->link->psr_settings.psr_allow_active) {
8043                         amdgpu_dm_psr_enable(acrtc_state->stream);
8044                 }
8045
8046                 mutex_unlock(&dm->dc_lock);
8047         }
8048
8049         /*
8050          * Update cursor state *after* programming all the planes.
8051          * This avoids redundant programming in the case where we're going
8052          * to be disabling a single plane - those pipes are being disabled.
8053          */
8054         if (acrtc_state->active_planes)
8055                 amdgpu_dm_commit_cursors(state);
8056
8057 cleanup:
8058         kfree(bundle);
8059 }
8060
8061 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8062                                    struct drm_atomic_state *state)
8063 {
8064         struct amdgpu_device *adev = drm_to_adev(dev);
8065         struct amdgpu_dm_connector *aconnector;
8066         struct drm_connector *connector;
8067         struct drm_connector_state *old_con_state, *new_con_state;
8068         struct drm_crtc_state *new_crtc_state;
8069         struct dm_crtc_state *new_dm_crtc_state;
8070         const struct dc_stream_status *status;
8071         int i, inst;
8072
8073         /* Notify device removals. */
8074         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8075                 if (old_con_state->crtc != new_con_state->crtc) {
8076                         /* CRTC changes require notification. */
8077                         goto notify;
8078                 }
8079
8080                 if (!new_con_state->crtc)
8081                         continue;
8082
8083                 new_crtc_state = drm_atomic_get_new_crtc_state(
8084                         state, new_con_state->crtc);
8085
8086                 if (!new_crtc_state)
8087                         continue;
8088
8089                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8090                         continue;
8091
8092         notify:
8093                 aconnector = to_amdgpu_dm_connector(connector);
8094
8095                 mutex_lock(&adev->dm.audio_lock);
8096                 inst = aconnector->audio_inst;
8097                 aconnector->audio_inst = -1;
8098                 mutex_unlock(&adev->dm.audio_lock);
8099
8100                 amdgpu_dm_audio_eld_notify(adev, inst);
8101         }
8102
8103         /* Notify audio device additions. */
8104         for_each_new_connector_in_state(state, connector, new_con_state, i) {
8105                 if (!new_con_state->crtc)
8106                         continue;
8107
8108                 new_crtc_state = drm_atomic_get_new_crtc_state(
8109                         state, new_con_state->crtc);
8110
8111                 if (!new_crtc_state)
8112                         continue;
8113
8114                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8115                         continue;
8116
8117                 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8118                 if (!new_dm_crtc_state->stream)
8119                         continue;
8120
8121                 status = dc_stream_get_status(new_dm_crtc_state->stream);
8122                 if (!status)
8123                         continue;
8124
8125                 aconnector = to_amdgpu_dm_connector(connector);
8126
8127                 mutex_lock(&adev->dm.audio_lock);
8128                 inst = status->audio_inst;
8129                 aconnector->audio_inst = inst;
8130                 mutex_unlock(&adev->dm.audio_lock);
8131
8132                 amdgpu_dm_audio_eld_notify(adev, inst);
8133         }
8134 }
8135
8136 /*
8137  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8138  * @crtc_state: the DRM CRTC state
8139  * @stream_state: the DC stream state.
8140  *
8141  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8142  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8143  */
8144 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8145                                                 struct dc_stream_state *stream_state)
8146 {
8147         stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8148 }
8149
8150 /**
8151  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8152  * @state: The atomic state to commit
8153  *
8154  * This will tell DC to commit the constructed DC state from atomic_check,
8155  * programming the hardware. Any failures here implies a hardware failure, since
8156  * atomic check should have filtered anything non-kosher.
8157  */
8158 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8159 {
8160         struct drm_device *dev = state->dev;
8161         struct amdgpu_device *adev = drm_to_adev(dev);
8162         struct amdgpu_display_manager *dm = &adev->dm;
8163         struct dm_atomic_state *dm_state;
8164         struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
8165         uint32_t i, j;
8166         struct drm_crtc *crtc;
8167         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8168         unsigned long flags;
8169         bool wait_for_vblank = true;
8170         struct drm_connector *connector;
8171         struct drm_connector_state *old_con_state, *new_con_state;
8172         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8173         int crtc_disable_count = 0;
8174         bool mode_set_reset_required = false;
8175
8176         trace_amdgpu_dm_atomic_commit_tail_begin(state);
8177
8178         drm_atomic_helper_update_legacy_modeset_state(dev, state);
8179
8180         dm_state = dm_atomic_get_new_state(state);
8181         if (dm_state && dm_state->context) {
8182                 dc_state = dm_state->context;
8183         } else {
8184                 /* No state changes, retain current state. */
8185                 dc_state_temp = dc_create_state(dm->dc);
8186                 ASSERT(dc_state_temp);
8187                 dc_state = dc_state_temp;
8188                 dc_resource_state_copy_construct_current(dm->dc, dc_state);
8189         }
8190
8191         for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
8192                                        new_crtc_state, i) {
8193                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8194
8195                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8196
8197                 if (old_crtc_state->active &&
8198                     (!new_crtc_state->active ||
8199                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8200                         manage_dm_interrupts(adev, acrtc, false);
8201                         dc_stream_release(dm_old_crtc_state->stream);
8202                 }
8203         }
8204
8205         drm_atomic_helper_calc_timestamping_constants(state);
8206
8207         /* update changed items */
8208         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8209                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8210
8211                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8212                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8213
8214                 DRM_DEBUG_DRIVER(
8215                         "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8216                         "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8217                         "connectors_changed:%d\n",
8218                         acrtc->crtc_id,
8219                         new_crtc_state->enable,
8220                         new_crtc_state->active,
8221                         new_crtc_state->planes_changed,
8222                         new_crtc_state->mode_changed,
8223                         new_crtc_state->active_changed,
8224                         new_crtc_state->connectors_changed);
8225
8226                 /* Disable cursor if disabling crtc */
8227                 if (old_crtc_state->active && !new_crtc_state->active) {
8228                         struct dc_cursor_position position;
8229
8230                         memset(&position, 0, sizeof(position));
8231                         mutex_lock(&dm->dc_lock);
8232                         dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8233                         mutex_unlock(&dm->dc_lock);
8234                 }
8235
8236                 /* Copy all transient state flags into dc state */
8237                 if (dm_new_crtc_state->stream) {
8238                         amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8239                                                             dm_new_crtc_state->stream);
8240                 }
8241
8242                 /* handles headless hotplug case, updating new_state and
8243                  * aconnector as needed
8244                  */
8245
8246                 if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8247
8248                         DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8249
8250                         if (!dm_new_crtc_state->stream) {
8251                                 /*
8252                                  * this could happen because of issues with
8253                                  * userspace notifications delivery.
8254                                  * In this case userspace tries to set mode on
8255                                  * display which is disconnected in fact.
8256                                  * dc_sink is NULL in this case on aconnector.
8257                                  * We expect reset mode will come soon.
8258                                  *
8259                                  * This can also happen when unplug is done
8260                                  * during resume sequence ended
8261                                  *
8262                                  * In this case, we want to pretend we still
8263                                  * have a sink to keep the pipe running so that
8264                                  * hw state is consistent with the sw state
8265                                  */
8266                                 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8267                                                 __func__, acrtc->base.base.id);
8268                                 continue;
8269                         }
8270
8271                         if (dm_old_crtc_state->stream)
8272                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8273
8274                         pm_runtime_get_noresume(dev->dev);
8275
8276                         acrtc->enabled = true;
8277                         acrtc->hw_mode = new_crtc_state->mode;
8278                         crtc->hwmode = new_crtc_state->mode;
8279                         mode_set_reset_required = true;
8280                 } else if (modereset_required(new_crtc_state)) {
8281                         DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8282                         /* i.e. reset mode */
8283                         if (dm_old_crtc_state->stream)
8284                                 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8285                         mode_set_reset_required = true;
8286                 }
8287         } /* for_each_crtc_in_state() */
8288
8289         if (dc_state) {
8290                 /* if there mode set or reset, disable eDP PSR */
8291                 if (mode_set_reset_required)
8292                         amdgpu_dm_psr_disable_all(dm);
8293
8294                 dm_enable_per_frame_crtc_master_sync(dc_state);
8295                 mutex_lock(&dm->dc_lock);
8296                 WARN_ON(!dc_commit_state(dm->dc, dc_state));
8297                 mutex_unlock(&dm->dc_lock);
8298         }
8299
8300         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8301                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8302
8303                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8304
8305                 if (dm_new_crtc_state->stream != NULL) {
8306                         const struct dc_stream_status *status =
8307                                         dc_stream_get_status(dm_new_crtc_state->stream);
8308
8309                         if (!status)
8310                                 status = dc_stream_get_status_from_state(dc_state,
8311                                                                          dm_new_crtc_state->stream);
8312                         if (!status)
8313                                 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8314                         else
8315                                 acrtc->otg_inst = status->primary_otg_inst;
8316                 }
8317         }
8318 #ifdef CONFIG_DRM_AMD_DC_HDCP
8319         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8320                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8321                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8322                 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8323
8324                 new_crtc_state = NULL;
8325
8326                 if (acrtc)
8327                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8328
8329                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8330
8331                 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8332                     connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8333                         hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8334                         new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8335                         dm_new_con_state->update_hdcp = true;
8336                         continue;
8337                 }
8338
8339                 if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
8340                         hdcp_update_display(
8341                                 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8342                                 new_con_state->hdcp_content_type,
8343                                 new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true
8344                                                                                                          : false);
8345         }
8346 #endif
8347
8348         /* Handle connector state changes */
8349         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8350                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8351                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8352                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8353                 struct dc_surface_update dummy_updates[MAX_SURFACES];
8354                 struct dc_stream_update stream_update;
8355                 struct dc_info_packet hdr_packet;
8356                 struct dc_stream_status *status = NULL;
8357                 bool abm_changed, hdr_changed, scaling_changed;
8358
8359                 memset(&dummy_updates, 0, sizeof(dummy_updates));
8360                 memset(&stream_update, 0, sizeof(stream_update));
8361
8362                 if (acrtc) {
8363                         new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8364                         old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8365                 }
8366
8367                 /* Skip any modesets/resets */
8368                 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8369                         continue;
8370
8371                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8372                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8373
8374                 scaling_changed = is_scaling_state_different(dm_new_con_state,
8375                                                              dm_old_con_state);
8376
8377                 abm_changed = dm_new_crtc_state->abm_level !=
8378                               dm_old_crtc_state->abm_level;
8379
8380                 hdr_changed =
8381                         is_hdr_metadata_different(old_con_state, new_con_state);
8382
8383                 if (!scaling_changed && !abm_changed && !hdr_changed)
8384                         continue;
8385
8386                 stream_update.stream = dm_new_crtc_state->stream;
8387                 if (scaling_changed) {
8388                         update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8389                                         dm_new_con_state, dm_new_crtc_state->stream);
8390
8391                         stream_update.src = dm_new_crtc_state->stream->src;
8392                         stream_update.dst = dm_new_crtc_state->stream->dst;
8393                 }
8394
8395                 if (abm_changed) {
8396                         dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8397
8398                         stream_update.abm_level = &dm_new_crtc_state->abm_level;
8399                 }
8400
8401                 if (hdr_changed) {
8402                         fill_hdr_info_packet(new_con_state, &hdr_packet);
8403                         stream_update.hdr_static_metadata = &hdr_packet;
8404                 }
8405
8406                 status = dc_stream_get_status(dm_new_crtc_state->stream);
8407                 WARN_ON(!status);
8408                 WARN_ON(!status->plane_count);
8409
8410                 /*
8411                  * TODO: DC refuses to perform stream updates without a dc_surface_update.
8412                  * Here we create an empty update on each plane.
8413                  * To fix this, DC should permit updating only stream properties.
8414                  */
8415                 for (j = 0; j < status->plane_count; j++)
8416                         dummy_updates[j].surface = status->plane_states[0];
8417
8418
8419                 mutex_lock(&dm->dc_lock);
8420                 dc_commit_updates_for_stream(dm->dc,
8421                                                      dummy_updates,
8422                                                      status->plane_count,
8423                                                      dm_new_crtc_state->stream,
8424                                                      &stream_update,
8425                                                      dc_state);
8426                 mutex_unlock(&dm->dc_lock);
8427         }
8428
8429         /* Count number of newly disabled CRTCs for dropping PM refs later. */
8430         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8431                                       new_crtc_state, i) {
8432                 if (old_crtc_state->active && !new_crtc_state->active)
8433                         crtc_disable_count++;
8434
8435                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8436                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8437
8438                 /* For freesync config update on crtc state and params for irq */
8439                 update_stream_irq_parameters(dm, dm_new_crtc_state);
8440
8441                 /* Handle vrr on->off / off->on transitions */
8442                 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
8443                                                 dm_new_crtc_state);
8444         }
8445
8446         /**
8447          * Enable interrupts for CRTCs that are newly enabled or went through
8448          * a modeset. It was intentionally deferred until after the front end
8449          * state was modified to wait until the OTG was on and so the IRQ
8450          * handlers didn't access stale or invalid state.
8451          */
8452         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8453                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8454                 bool configure_crc = false;
8455
8456                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8457
8458                 if (new_crtc_state->active &&
8459                     (!old_crtc_state->active ||
8460                      drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8461                         dc_stream_retain(dm_new_crtc_state->stream);
8462                         acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8463                         manage_dm_interrupts(adev, acrtc, true);
8464                 }
8465                 if (IS_ENABLED(CONFIG_DEBUG_FS) && new_crtc_state->active &&
8466                         amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
8467                         /**
8468                          * Frontend may have changed so reapply the CRC capture
8469                          * settings for the stream.
8470                          */
8471                         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8472                         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8473
8474                         if (amdgpu_dm_crc_window_is_default(dm_new_crtc_state)) {
8475                                 if (!old_crtc_state->active || drm_atomic_crtc_needs_modeset(new_crtc_state))
8476                                         configure_crc = true;
8477                         } else {
8478                                 if (amdgpu_dm_crc_window_changed(dm_new_crtc_state, dm_old_crtc_state))
8479                                         configure_crc = true;
8480                         }
8481
8482                         if (configure_crc)
8483                                 amdgpu_dm_crtc_configure_crc_source(
8484                                         crtc, dm_new_crtc_state, dm_new_crtc_state->crc_src);
8485                 }
8486         }
8487
8488         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8489                 if (new_crtc_state->async_flip)
8490                         wait_for_vblank = false;
8491
8492         /* update planes when needed per crtc*/
8493         for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8494                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8495
8496                 if (dm_new_crtc_state->stream)
8497                         amdgpu_dm_commit_planes(state, dc_state, dev,
8498                                                 dm, crtc, wait_for_vblank);
8499         }
8500
8501         /* Update audio instances for each connector. */
8502         amdgpu_dm_commit_audio(dev, state);
8503
8504         /*
8505          * send vblank event on all events not handled in flip and
8506          * mark consumed event for drm_atomic_helper_commit_hw_done
8507          */
8508         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8509         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8510
8511                 if (new_crtc_state->event)
8512                         drm_send_event_locked(dev, &new_crtc_state->event->base);
8513
8514                 new_crtc_state->event = NULL;
8515         }
8516         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8517
8518         /* Signal HW programming completion */
8519         drm_atomic_helper_commit_hw_done(state);
8520
8521         if (wait_for_vblank)
8522                 drm_atomic_helper_wait_for_flip_done(dev, state);
8523
8524         drm_atomic_helper_cleanup_planes(dev, state);
8525
8526         /* return the stolen vga memory back to VRAM */
8527         if (!adev->mman.keep_stolen_vga_memory)
8528                 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
8529         amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
8530
8531         /*
8532          * Finally, drop a runtime PM reference for each newly disabled CRTC,
8533          * so we can put the GPU into runtime suspend if we're not driving any
8534          * displays anymore
8535          */
8536         for (i = 0; i < crtc_disable_count; i++)
8537                 pm_runtime_put_autosuspend(dev->dev);
8538         pm_runtime_mark_last_busy(dev->dev);
8539
8540         if (dc_state_temp)
8541                 dc_release_state(dc_state_temp);
8542 }
8543
8544
8545 static int dm_force_atomic_commit(struct drm_connector *connector)
8546 {
8547         int ret = 0;
8548         struct drm_device *ddev = connector->dev;
8549         struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
8550         struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8551         struct drm_plane *plane = disconnected_acrtc->base.primary;
8552         struct drm_connector_state *conn_state;
8553         struct drm_crtc_state *crtc_state;
8554         struct drm_plane_state *plane_state;
8555
8556         if (!state)
8557                 return -ENOMEM;
8558
8559         state->acquire_ctx = ddev->mode_config.acquire_ctx;
8560
8561         /* Construct an atomic state to restore previous display setting */
8562
8563         /*
8564          * Attach connectors to drm_atomic_state
8565          */
8566         conn_state = drm_atomic_get_connector_state(state, connector);
8567
8568         ret = PTR_ERR_OR_ZERO(conn_state);
8569         if (ret)
8570                 goto err;
8571
8572         /* Attach crtc to drm_atomic_state*/
8573         crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
8574
8575         ret = PTR_ERR_OR_ZERO(crtc_state);
8576         if (ret)
8577                 goto err;
8578
8579         /* force a restore */
8580         crtc_state->mode_changed = true;
8581
8582         /* Attach plane to drm_atomic_state */
8583         plane_state = drm_atomic_get_plane_state(state, plane);
8584
8585         ret = PTR_ERR_OR_ZERO(plane_state);
8586         if (ret)
8587                 goto err;
8588
8589
8590         /* Call commit internally with the state we just constructed */
8591         ret = drm_atomic_commit(state);
8592         if (!ret)
8593                 return 0;
8594
8595 err:
8596         DRM_ERROR("Restoring old state failed with %i\n", ret);
8597         drm_atomic_state_put(state);
8598
8599         return ret;
8600 }
8601
8602 /*
8603  * This function handles all cases when set mode does not come upon hotplug.
8604  * This includes when a display is unplugged then plugged back into the
8605  * same port and when running without usermode desktop manager supprot
8606  */
8607 void dm_restore_drm_connector_state(struct drm_device *dev,
8608                                     struct drm_connector *connector)
8609 {
8610         struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8611         struct amdgpu_crtc *disconnected_acrtc;
8612         struct dm_crtc_state *acrtc_state;
8613
8614         if (!aconnector->dc_sink || !connector->state || !connector->encoder)
8615                 return;
8616
8617         disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
8618         if (!disconnected_acrtc)
8619                 return;
8620
8621         acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
8622         if (!acrtc_state->stream)
8623                 return;
8624
8625         /*
8626          * If the previous sink is not released and different from the current,
8627          * we deduce we are in a state where we can not rely on usermode call
8628          * to turn on the display, so we do it here
8629          */
8630         if (acrtc_state->stream->sink != aconnector->dc_sink)
8631                 dm_force_atomic_commit(&aconnector->base);
8632 }
8633
8634 /*
8635  * Grabs all modesetting locks to serialize against any blocking commits,
8636  * Waits for completion of all non blocking commits.
8637  */
8638 static int do_aquire_global_lock(struct drm_device *dev,
8639                                  struct drm_atomic_state *state)
8640 {
8641         struct drm_crtc *crtc;
8642         struct drm_crtc_commit *commit;
8643         long ret;
8644
8645         /*
8646          * Adding all modeset locks to aquire_ctx will
8647          * ensure that when the framework release it the
8648          * extra locks we are locking here will get released to
8649          */
8650         ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
8651         if (ret)
8652                 return ret;
8653
8654         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8655                 spin_lock(&crtc->commit_lock);
8656                 commit = list_first_entry_or_null(&crtc->commit_list,
8657                                 struct drm_crtc_commit, commit_entry);
8658                 if (commit)
8659                         drm_crtc_commit_get(commit);
8660                 spin_unlock(&crtc->commit_lock);
8661
8662                 if (!commit)
8663                         continue;
8664
8665                 /*
8666                  * Make sure all pending HW programming completed and
8667                  * page flips done
8668                  */
8669                 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
8670
8671                 if (ret > 0)
8672                         ret = wait_for_completion_interruptible_timeout(
8673                                         &commit->flip_done, 10*HZ);
8674
8675                 if (ret == 0)
8676                         DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
8677                                   "timed out\n", crtc->base.id, crtc->name);
8678
8679                 drm_crtc_commit_put(commit);
8680         }
8681
8682         return ret < 0 ? ret : 0;
8683 }
8684
8685 static void get_freesync_config_for_crtc(
8686         struct dm_crtc_state *new_crtc_state,
8687         struct dm_connector_state *new_con_state)
8688 {
8689         struct mod_freesync_config config = {0};
8690         struct amdgpu_dm_connector *aconnector =
8691                         to_amdgpu_dm_connector(new_con_state->base.connector);
8692         struct drm_display_mode *mode = &new_crtc_state->base.mode;
8693         int vrefresh = drm_mode_vrefresh(mode);
8694
8695         new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
8696                                         vrefresh >= aconnector->min_vfreq &&
8697                                         vrefresh <= aconnector->max_vfreq;
8698
8699         if (new_crtc_state->vrr_supported) {
8700                 new_crtc_state->stream->ignore_msa_timing_param = true;
8701                 config.state = new_crtc_state->base.vrr_enabled ?
8702                                 VRR_STATE_ACTIVE_VARIABLE :
8703                                 VRR_STATE_INACTIVE;
8704                 config.min_refresh_in_uhz =
8705                                 aconnector->min_vfreq * 1000000;
8706                 config.max_refresh_in_uhz =
8707                                 aconnector->max_vfreq * 1000000;
8708                 config.vsif_supported = true;
8709                 config.btr = true;
8710         }
8711
8712         new_crtc_state->freesync_config = config;
8713 }
8714
8715 static void reset_freesync_config_for_crtc(
8716         struct dm_crtc_state *new_crtc_state)
8717 {
8718         new_crtc_state->vrr_supported = false;
8719
8720         memset(&new_crtc_state->vrr_infopacket, 0,
8721                sizeof(new_crtc_state->vrr_infopacket));
8722 }
8723
8724 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
8725                                 struct drm_atomic_state *state,
8726                                 struct drm_crtc *crtc,
8727                                 struct drm_crtc_state *old_crtc_state,
8728                                 struct drm_crtc_state *new_crtc_state,
8729                                 bool enable,
8730                                 bool *lock_and_validation_needed)
8731 {
8732         struct dm_atomic_state *dm_state = NULL;
8733         struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8734         struct dc_stream_state *new_stream;
8735         int ret = 0;
8736
8737         /*
8738          * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
8739          * update changed items
8740          */
8741         struct amdgpu_crtc *acrtc = NULL;
8742         struct amdgpu_dm_connector *aconnector = NULL;
8743         struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
8744         struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
8745
8746         new_stream = NULL;
8747
8748         dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8749         dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8750         acrtc = to_amdgpu_crtc(crtc);
8751         aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
8752
8753         /* TODO This hack should go away */
8754         if (aconnector && enable) {
8755                 /* Make sure fake sink is created in plug-in scenario */
8756                 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
8757                                                             &aconnector->base);
8758                 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
8759                                                             &aconnector->base);
8760
8761                 if (IS_ERR(drm_new_conn_state)) {
8762                         ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
8763                         goto fail;
8764                 }
8765
8766                 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
8767                 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
8768
8769                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8770                         goto skip_modeset;
8771
8772                 new_stream = create_validate_stream_for_sink(aconnector,
8773                                                              &new_crtc_state->mode,
8774                                                              dm_new_conn_state,
8775                                                              dm_old_crtc_state->stream);
8776
8777                 /*
8778                  * we can have no stream on ACTION_SET if a display
8779                  * was disconnected during S3, in this case it is not an
8780                  * error, the OS will be updated after detection, and
8781                  * will do the right thing on next atomic commit
8782                  */
8783
8784                 if (!new_stream) {
8785                         DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8786                                         __func__, acrtc->base.base.id);
8787                         ret = -ENOMEM;
8788                         goto fail;
8789                 }
8790
8791                 /*
8792                  * TODO: Check VSDB bits to decide whether this should
8793                  * be enabled or not.
8794                  */
8795                 new_stream->triggered_crtc_reset.enabled =
8796                         dm->force_timing_sync;
8797
8798                 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8799
8800                 ret = fill_hdr_info_packet(drm_new_conn_state,
8801                                            &new_stream->hdr_static_metadata);
8802                 if (ret)
8803                         goto fail;
8804
8805                 /*
8806                  * If we already removed the old stream from the context
8807                  * (and set the new stream to NULL) then we can't reuse
8808                  * the old stream even if the stream and scaling are unchanged.
8809                  * We'll hit the BUG_ON and black screen.
8810                  *
8811                  * TODO: Refactor this function to allow this check to work
8812                  * in all conditions.
8813                  */
8814                 if (dm_new_crtc_state->stream &&
8815                     dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
8816                     dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
8817                         new_crtc_state->mode_changed = false;
8818                         DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
8819                                          new_crtc_state->mode_changed);
8820                 }
8821         }
8822
8823         /* mode_changed flag may get updated above, need to check again */
8824         if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8825                 goto skip_modeset;
8826
8827         DRM_DEBUG_DRIVER(
8828                 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
8829                 "planes_changed:%d, mode_changed:%d,active_changed:%d,"
8830                 "connectors_changed:%d\n",
8831                 acrtc->crtc_id,
8832                 new_crtc_state->enable,
8833                 new_crtc_state->active,
8834                 new_crtc_state->planes_changed,
8835                 new_crtc_state->mode_changed,
8836                 new_crtc_state->active_changed,
8837                 new_crtc_state->connectors_changed);
8838
8839         /* Remove stream for any changed/disabled CRTC */
8840         if (!enable) {
8841
8842                 if (!dm_old_crtc_state->stream)
8843                         goto skip_modeset;
8844
8845                 ret = dm_atomic_get_state(state, &dm_state);
8846                 if (ret)
8847                         goto fail;
8848
8849                 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
8850                                 crtc->base.id);
8851
8852                 /* i.e. reset mode */
8853                 if (dc_remove_stream_from_ctx(
8854                                 dm->dc,
8855                                 dm_state->context,
8856                                 dm_old_crtc_state->stream) != DC_OK) {
8857                         ret = -EINVAL;
8858                         goto fail;
8859                 }
8860
8861                 dc_stream_release(dm_old_crtc_state->stream);
8862                 dm_new_crtc_state->stream = NULL;
8863
8864                 reset_freesync_config_for_crtc(dm_new_crtc_state);
8865
8866                 *lock_and_validation_needed = true;
8867
8868         } else {/* Add stream for any updated/enabled CRTC */
8869                 /*
8870                  * Quick fix to prevent NULL pointer on new_stream when
8871                  * added MST connectors not found in existing crtc_state in the chained mode
8872                  * TODO: need to dig out the root cause of that
8873                  */
8874                 if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
8875                         goto skip_modeset;
8876
8877                 if (modereset_required(new_crtc_state))
8878                         goto skip_modeset;
8879
8880                 if (modeset_required(new_crtc_state, new_stream,
8881                                      dm_old_crtc_state->stream)) {
8882
8883                         WARN_ON(dm_new_crtc_state->stream);
8884
8885                         ret = dm_atomic_get_state(state, &dm_state);
8886                         if (ret)
8887                                 goto fail;
8888
8889                         dm_new_crtc_state->stream = new_stream;
8890
8891                         dc_stream_retain(new_stream);
8892
8893                         DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
8894                                                 crtc->base.id);
8895
8896                         if (dc_add_stream_to_ctx(
8897                                         dm->dc,
8898                                         dm_state->context,
8899                                         dm_new_crtc_state->stream) != DC_OK) {
8900                                 ret = -EINVAL;
8901                                 goto fail;
8902                         }
8903
8904                         *lock_and_validation_needed = true;
8905                 }
8906         }
8907
8908 skip_modeset:
8909         /* Release extra reference */
8910         if (new_stream)
8911                  dc_stream_release(new_stream);
8912
8913         /*
8914          * We want to do dc stream updates that do not require a
8915          * full modeset below.
8916          */
8917         if (!(enable && aconnector && new_crtc_state->active))
8918                 return 0;
8919         /*
8920          * Given above conditions, the dc state cannot be NULL because:
8921          * 1. We're in the process of enabling CRTCs (just been added
8922          *    to the dc context, or already is on the context)
8923          * 2. Has a valid connector attached, and
8924          * 3. Is currently active and enabled.
8925          * => The dc stream state currently exists.
8926          */
8927         BUG_ON(dm_new_crtc_state->stream == NULL);
8928
8929         /* Scaling or underscan settings */
8930         if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
8931                 update_stream_scaling_settings(
8932                         &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
8933
8934         /* ABM settings */
8935         dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
8936
8937         /*
8938          * Color management settings. We also update color properties
8939          * when a modeset is needed, to ensure it gets reprogrammed.
8940          */
8941         if (dm_new_crtc_state->base.color_mgmt_changed ||
8942             drm_atomic_crtc_needs_modeset(new_crtc_state)) {
8943                 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
8944                 if (ret)
8945                         goto fail;
8946         }
8947
8948         /* Update Freesync settings. */
8949         get_freesync_config_for_crtc(dm_new_crtc_state,
8950                                      dm_new_conn_state);
8951
8952         return ret;
8953
8954 fail:
8955         if (new_stream)
8956                 dc_stream_release(new_stream);
8957         return ret;
8958 }
8959
8960 static bool should_reset_plane(struct drm_atomic_state *state,
8961                                struct drm_plane *plane,
8962                                struct drm_plane_state *old_plane_state,
8963                                struct drm_plane_state *new_plane_state)
8964 {
8965         struct drm_plane *other;
8966         struct drm_plane_state *old_other_state, *new_other_state;
8967         struct drm_crtc_state *new_crtc_state;
8968         int i;
8969
8970         /*
8971          * TODO: Remove this hack once the checks below are sufficient
8972          * enough to determine when we need to reset all the planes on
8973          * the stream.
8974          */
8975         if (state->allow_modeset)
8976                 return true;
8977
8978         /* Exit early if we know that we're adding or removing the plane. */
8979         if (old_plane_state->crtc != new_plane_state->crtc)
8980                 return true;
8981
8982         /* old crtc == new_crtc == NULL, plane not in context. */
8983         if (!new_plane_state->crtc)
8984                 return false;
8985
8986         new_crtc_state =
8987                 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
8988
8989         if (!new_crtc_state)
8990                 return true;
8991
8992         /* CRTC Degamma changes currently require us to recreate planes. */
8993         if (new_crtc_state->color_mgmt_changed)
8994                 return true;
8995
8996         if (drm_atomic_crtc_needs_modeset(new_crtc_state))
8997                 return true;
8998
8999         /*
9000          * If there are any new primary or overlay planes being added or
9001          * removed then the z-order can potentially change. To ensure
9002          * correct z-order and pipe acquisition the current DC architecture
9003          * requires us to remove and recreate all existing planes.
9004          *
9005          * TODO: Come up with a more elegant solution for this.
9006          */
9007         for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9008                 struct amdgpu_framebuffer *old_afb, *new_afb;
9009                 if (other->type == DRM_PLANE_TYPE_CURSOR)
9010                         continue;
9011
9012                 if (old_other_state->crtc != new_plane_state->crtc &&
9013                     new_other_state->crtc != new_plane_state->crtc)
9014                         continue;
9015
9016                 if (old_other_state->crtc != new_other_state->crtc)
9017                         return true;
9018
9019                 /* Src/dst size and scaling updates. */
9020                 if (old_other_state->src_w != new_other_state->src_w ||
9021                     old_other_state->src_h != new_other_state->src_h ||
9022                     old_other_state->crtc_w != new_other_state->crtc_w ||
9023                     old_other_state->crtc_h != new_other_state->crtc_h)
9024                         return true;
9025
9026                 /* Rotation / mirroring updates. */
9027                 if (old_other_state->rotation != new_other_state->rotation)
9028                         return true;
9029
9030                 /* Blending updates. */
9031                 if (old_other_state->pixel_blend_mode !=
9032                     new_other_state->pixel_blend_mode)
9033                         return true;
9034
9035                 /* Alpha updates. */
9036                 if (old_other_state->alpha != new_other_state->alpha)
9037                         return true;
9038
9039                 /* Colorspace changes. */
9040                 if (old_other_state->color_range != new_other_state->color_range ||
9041                     old_other_state->color_encoding != new_other_state->color_encoding)
9042                         return true;
9043
9044                 /* Framebuffer checks fall at the end. */
9045                 if (!old_other_state->fb || !new_other_state->fb)
9046                         continue;
9047
9048                 /* Pixel format changes can require bandwidth updates. */
9049                 if (old_other_state->fb->format != new_other_state->fb->format)
9050                         return true;
9051
9052                 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9053                 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9054
9055                 /* Tiling and DCC changes also require bandwidth updates. */
9056                 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9057                     old_afb->base.modifier != new_afb->base.modifier)
9058                         return true;
9059         }
9060
9061         return false;
9062 }
9063
9064 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9065                               struct drm_plane_state *new_plane_state,
9066                               struct drm_framebuffer *fb)
9067 {
9068         struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9069         struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9070         unsigned int pitch;
9071         bool linear;
9072
9073         if (fb->width > new_acrtc->max_cursor_width ||
9074             fb->height > new_acrtc->max_cursor_height) {
9075                 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9076                                  new_plane_state->fb->width,
9077                                  new_plane_state->fb->height);
9078                 return -EINVAL;
9079         }
9080         if (new_plane_state->src_w != fb->width << 16 ||
9081             new_plane_state->src_h != fb->height << 16) {
9082                 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9083                 return -EINVAL;
9084         }
9085
9086         /* Pitch in pixels */
9087         pitch = fb->pitches[0] / fb->format->cpp[0];
9088
9089         if (fb->width != pitch) {
9090                 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9091                                  fb->width, pitch);
9092                 return -EINVAL;
9093         }
9094
9095         switch (pitch) {
9096         case 64:
9097         case 128:
9098         case 256:
9099                 /* FB pitch is supported by cursor plane */
9100                 break;
9101         default:
9102                 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9103                 return -EINVAL;
9104         }
9105
9106         /* Core DRM takes care of checking FB modifiers, so we only need to
9107          * check tiling flags when the FB doesn't have a modifier. */
9108         if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9109                 if (adev->family < AMDGPU_FAMILY_AI) {
9110                         linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9111                                  AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9112                                  AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9113                 } else {
9114                         linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9115                 }
9116                 if (!linear) {
9117                         DRM_DEBUG_ATOMIC("Cursor FB not linear");
9118                         return -EINVAL;
9119                 }
9120         }
9121
9122         return 0;
9123 }
9124
9125 static int dm_update_plane_state(struct dc *dc,
9126                                  struct drm_atomic_state *state,
9127                                  struct drm_plane *plane,
9128                                  struct drm_plane_state *old_plane_state,
9129                                  struct drm_plane_state *new_plane_state,
9130                                  bool enable,
9131                                  bool *lock_and_validation_needed)
9132 {
9133
9134         struct dm_atomic_state *dm_state = NULL;
9135         struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9136         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9137         struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9138         struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9139         struct amdgpu_crtc *new_acrtc;
9140         bool needs_reset;
9141         int ret = 0;
9142
9143
9144         new_plane_crtc = new_plane_state->crtc;
9145         old_plane_crtc = old_plane_state->crtc;
9146         dm_new_plane_state = to_dm_plane_state(new_plane_state);
9147         dm_old_plane_state = to_dm_plane_state(old_plane_state);
9148
9149         if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9150                 if (!enable || !new_plane_crtc ||
9151                         drm_atomic_plane_disabling(plane->state, new_plane_state))
9152                         return 0;
9153
9154                 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9155
9156                 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9157                         DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9158                         return -EINVAL;
9159                 }
9160
9161                 if (new_plane_state->fb) {
9162                         ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9163                                                  new_plane_state->fb);
9164                         if (ret)
9165                                 return ret;
9166                 }
9167
9168                 return 0;
9169         }
9170
9171         needs_reset = should_reset_plane(state, plane, old_plane_state,
9172                                          new_plane_state);
9173
9174         /* Remove any changed/removed planes */
9175         if (!enable) {
9176                 if (!needs_reset)
9177                         return 0;
9178
9179                 if (!old_plane_crtc)
9180                         return 0;
9181
9182                 old_crtc_state = drm_atomic_get_old_crtc_state(
9183                                 state, old_plane_crtc);
9184                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9185
9186                 if (!dm_old_crtc_state->stream)
9187                         return 0;
9188
9189                 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9190                                 plane->base.id, old_plane_crtc->base.id);
9191
9192                 ret = dm_atomic_get_state(state, &dm_state);
9193                 if (ret)
9194                         return ret;
9195
9196                 if (!dc_remove_plane_from_context(
9197                                 dc,
9198                                 dm_old_crtc_state->stream,
9199                                 dm_old_plane_state->dc_state,
9200                                 dm_state->context)) {
9201
9202                         return -EINVAL;
9203                 }
9204
9205
9206                 dc_plane_state_release(dm_old_plane_state->dc_state);
9207                 dm_new_plane_state->dc_state = NULL;
9208
9209                 *lock_and_validation_needed = true;
9210
9211         } else { /* Add new planes */
9212                 struct dc_plane_state *dc_new_plane_state;
9213
9214                 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9215                         return 0;
9216
9217                 if (!new_plane_crtc)
9218                         return 0;
9219
9220                 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9221                 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9222
9223                 if (!dm_new_crtc_state->stream)
9224                         return 0;
9225
9226                 if (!needs_reset)
9227                         return 0;
9228
9229                 ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9230                 if (ret)
9231                         return ret;
9232
9233                 WARN_ON(dm_new_plane_state->dc_state);
9234
9235                 dc_new_plane_state = dc_create_plane_state(dc);
9236                 if (!dc_new_plane_state)
9237                         return -ENOMEM;
9238
9239                 DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
9240                                 plane->base.id, new_plane_crtc->base.id);
9241
9242                 ret = fill_dc_plane_attributes(
9243                         drm_to_adev(new_plane_crtc->dev),
9244                         dc_new_plane_state,
9245                         new_plane_state,
9246                         new_crtc_state);
9247                 if (ret) {
9248                         dc_plane_state_release(dc_new_plane_state);
9249                         return ret;
9250                 }
9251
9252                 ret = dm_atomic_get_state(state, &dm_state);
9253                 if (ret) {
9254                         dc_plane_state_release(dc_new_plane_state);
9255                         return ret;
9256                 }
9257
9258                 /*
9259                  * Any atomic check errors that occur after this will
9260                  * not need a release. The plane state will be attached
9261                  * to the stream, and therefore part of the atomic
9262                  * state. It'll be released when the atomic state is
9263                  * cleaned.
9264                  */
9265                 if (!dc_add_plane_to_context(
9266                                 dc,
9267                                 dm_new_crtc_state->stream,
9268                                 dc_new_plane_state,
9269                                 dm_state->context)) {
9270
9271                         dc_plane_state_release(dc_new_plane_state);
9272                         return -EINVAL;
9273                 }
9274
9275                 dm_new_plane_state->dc_state = dc_new_plane_state;
9276
9277                 /* Tell DC to do a full surface update every time there
9278                  * is a plane change. Inefficient, but works for now.
9279                  */
9280                 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9281
9282                 *lock_and_validation_needed = true;
9283         }
9284
9285
9286         return ret;
9287 }
9288
9289 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9290                                 struct drm_crtc *crtc,
9291                                 struct drm_crtc_state *new_crtc_state)
9292 {
9293         struct drm_plane_state *new_cursor_state, *new_primary_state;
9294         int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
9295
9296         /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9297          * cursor per pipe but it's going to inherit the scaling and
9298          * positioning from the underlying pipe. Check the cursor plane's
9299          * blending properties match the primary plane's. */
9300
9301         new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
9302         new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
9303         if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
9304                 return 0;
9305         }
9306
9307         cursor_scale_w = new_cursor_state->crtc_w * 1000 /
9308                          (new_cursor_state->src_w >> 16);
9309         cursor_scale_h = new_cursor_state->crtc_h * 1000 /
9310                          (new_cursor_state->src_h >> 16);
9311
9312         primary_scale_w = new_primary_state->crtc_w * 1000 /
9313                          (new_primary_state->src_w >> 16);
9314         primary_scale_h = new_primary_state->crtc_h * 1000 /
9315                          (new_primary_state->src_h >> 16);
9316
9317         if (cursor_scale_w != primary_scale_w ||
9318             cursor_scale_h != primary_scale_h) {
9319                 DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
9320                 return -EINVAL;
9321         }
9322
9323         return 0;
9324 }
9325
9326 #if defined(CONFIG_DRM_AMD_DC_DCN)
9327 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9328 {
9329         struct drm_connector *connector;
9330         struct drm_connector_state *conn_state;
9331         struct amdgpu_dm_connector *aconnector = NULL;
9332         int i;
9333         for_each_new_connector_in_state(state, connector, conn_state, i) {
9334                 if (conn_state->crtc != crtc)
9335                         continue;
9336
9337                 aconnector = to_amdgpu_dm_connector(connector);
9338                 if (!aconnector->port || !aconnector->mst_port)
9339                         aconnector = NULL;
9340                 else
9341                         break;
9342         }
9343
9344         if (!aconnector)
9345                 return 0;
9346
9347         return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
9348 }
9349 #endif
9350
9351 /**
9352  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
9353  * @dev: The DRM device
9354  * @state: The atomic state to commit
9355  *
9356  * Validate that the given atomic state is programmable by DC into hardware.
9357  * This involves constructing a &struct dc_state reflecting the new hardware
9358  * state we wish to commit, then querying DC to see if it is programmable. It's
9359  * important not to modify the existing DC state. Otherwise, atomic_check
9360  * may unexpectedly commit hardware changes.
9361  *
9362  * When validating the DC state, it's important that the right locks are
9363  * acquired. For full updates case which removes/adds/updates streams on one
9364  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
9365  * that any such full update commit will wait for completion of any outstanding
9366  * flip using DRMs synchronization events.
9367  *
9368  * Note that DM adds the affected connectors for all CRTCs in state, when that
9369  * might not seem necessary. This is because DC stream creation requires the
9370  * DC sink, which is tied to the DRM connector state. Cleaning this up should
9371  * be possible but non-trivial - a possible TODO item.
9372  *
9373  * Return: -Error code if validation failed.
9374  */
9375 static int amdgpu_dm_atomic_check(struct drm_device *dev,
9376                                   struct drm_atomic_state *state)
9377 {
9378         struct amdgpu_device *adev = drm_to_adev(dev);
9379         struct dm_atomic_state *dm_state = NULL;
9380         struct dc *dc = adev->dm.dc;
9381         struct drm_connector *connector;
9382         struct drm_connector_state *old_con_state, *new_con_state;
9383         struct drm_crtc *crtc;
9384         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9385         struct drm_plane *plane;
9386         struct drm_plane_state *old_plane_state, *new_plane_state;
9387         enum dc_status status;
9388         int ret, i;
9389         bool lock_and_validation_needed = false;
9390         struct dm_crtc_state *dm_old_crtc_state;
9391
9392         trace_amdgpu_dm_atomic_check_begin(state);
9393
9394         ret = drm_atomic_helper_check_modeset(dev, state);
9395         if (ret)
9396                 goto fail;
9397
9398         /* Check connector changes */
9399         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9400                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9401                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9402
9403                 /* Skip connectors that are disabled or part of modeset already. */
9404                 if (!old_con_state->crtc && !new_con_state->crtc)
9405                         continue;
9406
9407                 if (!new_con_state->crtc)
9408                         continue;
9409
9410                 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
9411                 if (IS_ERR(new_crtc_state)) {
9412                         ret = PTR_ERR(new_crtc_state);
9413                         goto fail;
9414                 }
9415
9416                 if (dm_old_con_state->abm_level !=
9417                     dm_new_con_state->abm_level)
9418                         new_crtc_state->connectors_changed = true;
9419         }
9420
9421 #if defined(CONFIG_DRM_AMD_DC_DCN)
9422         if (adev->asic_type >= CHIP_NAVI10) {
9423                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9424                         if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9425                                 ret = add_affected_mst_dsc_crtcs(state, crtc);
9426                                 if (ret)
9427                                         goto fail;
9428                         }
9429                 }
9430         }
9431 #endif
9432         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9433                 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9434
9435                 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
9436                     !new_crtc_state->color_mgmt_changed &&
9437                     old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
9438                         dm_old_crtc_state->dsc_force_changed == false)
9439                         continue;
9440
9441                 if (!new_crtc_state->enable)
9442                         continue;
9443
9444                 ret = drm_atomic_add_affected_connectors(state, crtc);
9445                 if (ret)
9446                         return ret;
9447
9448                 ret = drm_atomic_add_affected_planes(state, crtc);
9449                 if (ret)
9450                         goto fail;
9451
9452                 if (dm_old_crtc_state->dsc_force_changed)
9453                         new_crtc_state->mode_changed = true;
9454         }
9455
9456         /*
9457          * Add all primary and overlay planes on the CRTC to the state
9458          * whenever a plane is enabled to maintain correct z-ordering
9459          * and to enable fast surface updates.
9460          */
9461         drm_for_each_crtc(crtc, dev) {
9462                 bool modified = false;
9463
9464                 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9465                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
9466                                 continue;
9467
9468                         if (new_plane_state->crtc == crtc ||
9469                             old_plane_state->crtc == crtc) {
9470                                 modified = true;
9471                                 break;
9472                         }
9473                 }
9474
9475                 if (!modified)
9476                         continue;
9477
9478                 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
9479                         if (plane->type == DRM_PLANE_TYPE_CURSOR)
9480                                 continue;
9481
9482                         new_plane_state =
9483                                 drm_atomic_get_plane_state(state, plane);
9484
9485                         if (IS_ERR(new_plane_state)) {
9486                                 ret = PTR_ERR(new_plane_state);
9487                                 goto fail;
9488                         }
9489                 }
9490         }
9491
9492         /* Remove exiting planes if they are modified */
9493         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9494                 ret = dm_update_plane_state(dc, state, plane,
9495                                             old_plane_state,
9496                                             new_plane_state,
9497                                             false,
9498                                             &lock_and_validation_needed);
9499                 if (ret)
9500                         goto fail;
9501         }
9502
9503         /* Disable all crtcs which require disable */
9504         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9505                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9506                                            old_crtc_state,
9507                                            new_crtc_state,
9508                                            false,
9509                                            &lock_and_validation_needed);
9510                 if (ret)
9511                         goto fail;
9512         }
9513
9514         /* Enable all crtcs which require enable */
9515         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9516                 ret = dm_update_crtc_state(&adev->dm, state, crtc,
9517                                            old_crtc_state,
9518                                            new_crtc_state,
9519                                            true,
9520                                            &lock_and_validation_needed);
9521                 if (ret)
9522                         goto fail;
9523         }
9524
9525         /* Add new/modified planes */
9526         for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
9527                 ret = dm_update_plane_state(dc, state, plane,
9528                                             old_plane_state,
9529                                             new_plane_state,
9530                                             true,
9531                                             &lock_and_validation_needed);
9532                 if (ret)
9533                         goto fail;
9534         }
9535
9536         /* Run this here since we want to validate the streams we created */
9537         ret = drm_atomic_helper_check_planes(dev, state);
9538         if (ret)
9539                 goto fail;
9540
9541         /* Check cursor planes scaling */
9542         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9543                 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
9544                 if (ret)
9545                         goto fail;
9546         }
9547
9548         if (state->legacy_cursor_update) {
9549                 /*
9550                  * This is a fast cursor update coming from the plane update
9551                  * helper, check if it can be done asynchronously for better
9552                  * performance.
9553                  */
9554                 state->async_update =
9555                         !drm_atomic_helper_async_check(dev, state);
9556
9557                 /*
9558                  * Skip the remaining global validation if this is an async
9559                  * update. Cursor updates can be done without affecting
9560                  * state or bandwidth calcs and this avoids the performance
9561                  * penalty of locking the private state object and
9562                  * allocating a new dc_state.
9563                  */
9564                 if (state->async_update)
9565                         return 0;
9566         }
9567
9568         /* Check scaling and underscan changes*/
9569         /* TODO Removed scaling changes validation due to inability to commit
9570          * new stream into context w\o causing full reset. Need to
9571          * decide how to handle.
9572          */
9573         for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9574                 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9575                 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9576                 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9577
9578                 /* Skip any modesets/resets */
9579                 if (!acrtc || drm_atomic_crtc_needs_modeset(
9580                                 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
9581                         continue;
9582
9583                 /* Skip any thing not scale or underscan changes */
9584                 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
9585                         continue;
9586
9587                 lock_and_validation_needed = true;
9588         }
9589
9590         /**
9591          * Streams and planes are reset when there are changes that affect
9592          * bandwidth. Anything that affects bandwidth needs to go through
9593          * DC global validation to ensure that the configuration can be applied
9594          * to hardware.
9595          *
9596          * We have to currently stall out here in atomic_check for outstanding
9597          * commits to finish in this case because our IRQ handlers reference
9598          * DRM state directly - we can end up disabling interrupts too early
9599          * if we don't.
9600          *
9601          * TODO: Remove this stall and drop DM state private objects.
9602          */
9603         if (lock_and_validation_needed) {
9604                 ret = dm_atomic_get_state(state, &dm_state);
9605                 if (ret)
9606                         goto fail;
9607
9608                 ret = do_aquire_global_lock(dev, state);
9609                 if (ret)
9610                         goto fail;
9611
9612 #if defined(CONFIG_DRM_AMD_DC_DCN)
9613                 if (!compute_mst_dsc_configs_for_state(state, dm_state->context))
9614                         goto fail;
9615
9616                 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context);
9617                 if (ret)
9618                         goto fail;
9619 #endif
9620
9621                 /*
9622                  * Perform validation of MST topology in the state:
9623                  * We need to perform MST atomic check before calling
9624                  * dc_validate_global_state(), or there is a chance
9625                  * to get stuck in an infinite loop and hang eventually.
9626                  */
9627                 ret = drm_dp_mst_atomic_check(state);
9628                 if (ret)
9629                         goto fail;
9630                 status = dc_validate_global_state(dc, dm_state->context, false);
9631                 if (status != DC_OK) {
9632                         DC_LOG_WARNING("DC global validation failure: %s (%d)",
9633                                        dc_status_to_str(status), status);
9634                         ret = -EINVAL;
9635                         goto fail;
9636                 }
9637         } else {
9638                 /*
9639                  * The commit is a fast update. Fast updates shouldn't change
9640                  * the DC context, affect global validation, and can have their
9641                  * commit work done in parallel with other commits not touching
9642                  * the same resource. If we have a new DC context as part of
9643                  * the DM atomic state from validation we need to free it and
9644                  * retain the existing one instead.
9645                  *
9646                  * Furthermore, since the DM atomic state only contains the DC
9647                  * context and can safely be annulled, we can free the state
9648                  * and clear the associated private object now to free
9649                  * some memory and avoid a possible use-after-free later.
9650                  */
9651
9652                 for (i = 0; i < state->num_private_objs; i++) {
9653                         struct drm_private_obj *obj = state->private_objs[i].ptr;
9654
9655                         if (obj->funcs == adev->dm.atomic_obj.funcs) {
9656                                 int j = state->num_private_objs-1;
9657
9658                                 dm_atomic_destroy_state(obj,
9659                                                 state->private_objs[i].state);
9660
9661                                 /* If i is not at the end of the array then the
9662                                  * last element needs to be moved to where i was
9663                                  * before the array can safely be truncated.
9664                                  */
9665                                 if (i != j)
9666                                         state->private_objs[i] =
9667                                                 state->private_objs[j];
9668
9669                                 state->private_objs[j].ptr = NULL;
9670                                 state->private_objs[j].state = NULL;
9671                                 state->private_objs[j].old_state = NULL;
9672                                 state->private_objs[j].new_state = NULL;
9673
9674                                 state->num_private_objs = j;
9675                                 break;
9676                         }
9677                 }
9678         }
9679
9680         /* Store the overall update type for use later in atomic check. */
9681         for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
9682                 struct dm_crtc_state *dm_new_crtc_state =
9683                         to_dm_crtc_state(new_crtc_state);
9684
9685                 dm_new_crtc_state->update_type = lock_and_validation_needed ?
9686                                                          UPDATE_TYPE_FULL :
9687                                                          UPDATE_TYPE_FAST;
9688         }
9689
9690         /* Must be success */
9691         WARN_ON(ret);
9692
9693         trace_amdgpu_dm_atomic_check_finish(state, ret);
9694
9695         return ret;
9696
9697 fail:
9698         if (ret == -EDEADLK)
9699                 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
9700         else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
9701                 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
9702         else
9703                 DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
9704
9705         trace_amdgpu_dm_atomic_check_finish(state, ret);
9706
9707         return ret;
9708 }
9709
9710 static bool is_dp_capable_without_timing_msa(struct dc *dc,
9711                                              struct amdgpu_dm_connector *amdgpu_dm_connector)
9712 {
9713         uint8_t dpcd_data;
9714         bool capable = false;
9715
9716         if (amdgpu_dm_connector->dc_link &&
9717                 dm_helpers_dp_read_dpcd(
9718                                 NULL,
9719                                 amdgpu_dm_connector->dc_link,
9720                                 DP_DOWN_STREAM_PORT_COUNT,
9721                                 &dpcd_data,
9722                                 sizeof(dpcd_data))) {
9723                 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
9724         }
9725
9726         return capable;
9727 }
9728 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
9729                                         struct edid *edid)
9730 {
9731         int i;
9732         bool edid_check_required;
9733         struct detailed_timing *timing;
9734         struct detailed_non_pixel *data;
9735         struct detailed_data_monitor_range *range;
9736         struct amdgpu_dm_connector *amdgpu_dm_connector =
9737                         to_amdgpu_dm_connector(connector);
9738         struct dm_connector_state *dm_con_state = NULL;
9739
9740         struct drm_device *dev = connector->dev;
9741         struct amdgpu_device *adev = drm_to_adev(dev);
9742         bool freesync_capable = false;
9743
9744         if (!connector->state) {
9745                 DRM_ERROR("%s - Connector has no state", __func__);
9746                 goto update;
9747         }
9748
9749         if (!edid) {
9750                 dm_con_state = to_dm_connector_state(connector->state);
9751
9752                 amdgpu_dm_connector->min_vfreq = 0;
9753                 amdgpu_dm_connector->max_vfreq = 0;
9754                 amdgpu_dm_connector->pixel_clock_mhz = 0;
9755
9756                 goto update;
9757         }
9758
9759         dm_con_state = to_dm_connector_state(connector->state);
9760
9761         edid_check_required = false;
9762         if (!amdgpu_dm_connector->dc_sink) {
9763                 DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
9764                 goto update;
9765         }
9766         if (!adev->dm.freesync_module)
9767                 goto update;
9768         /*
9769          * if edid non zero restrict freesync only for dp and edp
9770          */
9771         if (edid) {
9772                 if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
9773                         || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
9774                         edid_check_required = is_dp_capable_without_timing_msa(
9775                                                 adev->dm.dc,
9776                                                 amdgpu_dm_connector);
9777                 }
9778         }
9779         if (edid_check_required == true && (edid->version > 1 ||
9780            (edid->version == 1 && edid->revision > 1))) {
9781                 for (i = 0; i < 4; i++) {
9782
9783                         timing  = &edid->detailed_timings[i];
9784                         data    = &timing->data.other_data;
9785                         range   = &data->data.range;
9786                         /*
9787                          * Check if monitor has continuous frequency mode
9788                          */
9789                         if (data->type != EDID_DETAIL_MONITOR_RANGE)
9790                                 continue;
9791                         /*
9792                          * Check for flag range limits only. If flag == 1 then
9793                          * no additional timing information provided.
9794                          * Default GTF, GTF Secondary curve and CVT are not
9795                          * supported
9796                          */
9797                         if (range->flags != 1)
9798                                 continue;
9799
9800                         amdgpu_dm_connector->min_vfreq = range->min_vfreq;
9801                         amdgpu_dm_connector->max_vfreq = range->max_vfreq;
9802                         amdgpu_dm_connector->pixel_clock_mhz =
9803                                 range->pixel_clock_mhz * 10;
9804                         break;
9805                 }
9806
9807                 if (amdgpu_dm_connector->max_vfreq -
9808                     amdgpu_dm_connector->min_vfreq > 10) {
9809
9810                         freesync_capable = true;
9811                 }
9812         }
9813
9814 update:
9815         if (dm_con_state)
9816                 dm_con_state->freesync_capable = freesync_capable;
9817
9818         if (connector->vrr_capable_property)
9819                 drm_connector_set_vrr_capable_property(connector,
9820                                                        freesync_capable);
9821 }
9822
9823 static void amdgpu_dm_set_psr_caps(struct dc_link *link)
9824 {
9825         uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
9826
9827         if (!(link->connector_signal & SIGNAL_TYPE_EDP))
9828                 return;
9829         if (link->type == dc_connection_none)
9830                 return;
9831         if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
9832                                         dpcd_data, sizeof(dpcd_data))) {
9833                 link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
9834
9835                 if (dpcd_data[0] == 0) {
9836                         link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
9837                         link->psr_settings.psr_feature_enabled = false;
9838                 } else {
9839                         link->psr_settings.psr_version = DC_PSR_VERSION_1;
9840                         link->psr_settings.psr_feature_enabled = true;
9841                 }
9842
9843                 DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
9844         }
9845 }
9846
9847 /*
9848  * amdgpu_dm_link_setup_psr() - configure psr link
9849  * @stream: stream state
9850  *
9851  * Return: true if success
9852  */
9853 static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
9854 {
9855         struct dc_link *link = NULL;
9856         struct psr_config psr_config = {0};
9857         struct psr_context psr_context = {0};
9858         bool ret = false;
9859
9860         if (stream == NULL)
9861                 return false;
9862
9863         link = stream->link;
9864
9865         psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
9866
9867         if (psr_config.psr_version > 0) {
9868                 psr_config.psr_exit_link_training_required = 0x1;
9869                 psr_config.psr_frame_capture_indication_req = 0;
9870                 psr_config.psr_rfb_setup_time = 0x37;
9871                 psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
9872                 psr_config.allow_smu_optimizations = 0x0;
9873
9874                 ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
9875
9876         }
9877         DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
9878
9879         return ret;
9880 }
9881
9882 /*
9883  * amdgpu_dm_psr_enable() - enable psr f/w
9884  * @stream: stream state
9885  *
9886  * Return: true if success
9887  */
9888 bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
9889 {
9890         struct dc_link *link = stream->link;
9891         unsigned int vsync_rate_hz = 0;
9892         struct dc_static_screen_params params = {0};
9893         /* Calculate number of static frames before generating interrupt to
9894          * enter PSR.
9895          */
9896         // Init fail safe of 2 frames static
9897         unsigned int num_frames_static = 2;
9898
9899         DRM_DEBUG_DRIVER("Enabling psr...\n");
9900
9901         vsync_rate_hz = div64_u64(div64_u64((
9902                         stream->timing.pix_clk_100hz * 100),
9903                         stream->timing.v_total),
9904                         stream->timing.h_total);
9905
9906         /* Round up
9907          * Calculate number of frames such that at least 30 ms of time has
9908          * passed.
9909          */
9910         if (vsync_rate_hz != 0) {
9911                 unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
9912                 num_frames_static = (30000 / frame_time_microsec) + 1;
9913         }
9914
9915         params.triggers.cursor_update = true;
9916         params.triggers.overlay_update = true;
9917         params.triggers.surface_update = true;
9918         params.num_frames = num_frames_static;
9919
9920         dc_stream_set_static_screen_params(link->ctx->dc,
9921                                            &stream, 1,
9922                                            &params);
9923
9924         return dc_link_set_psr_allow_active(link, true, false, false);
9925 }
9926
9927 /*
9928  * amdgpu_dm_psr_disable() - disable psr f/w
9929  * @stream:  stream state
9930  *
9931  * Return: true if success
9932  */
9933 static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
9934 {
9935
9936         DRM_DEBUG_DRIVER("Disabling psr...\n");
9937
9938         return dc_link_set_psr_allow_active(stream->link, false, true, false);
9939 }
9940
9941 /*
9942  * amdgpu_dm_psr_disable() - disable psr f/w
9943  * if psr is enabled on any stream
9944  *
9945  * Return: true if success
9946  */
9947 static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
9948 {
9949         DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
9950         return dc_set_psr_allow_active(dm->dc, false);
9951 }
9952
9953 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
9954 {
9955         struct amdgpu_device *adev = drm_to_adev(dev);
9956         struct dc *dc = adev->dm.dc;
9957         int i;
9958
9959         mutex_lock(&adev->dm.dc_lock);
9960         if (dc->current_state) {
9961                 for (i = 0; i < dc->current_state->stream_count; ++i)
9962                         dc->current_state->streams[i]
9963                                 ->triggered_crtc_reset.enabled =
9964                                 adev->dm.force_timing_sync;
9965
9966                 dm_enable_per_frame_crtc_master_sync(dc->current_state);
9967                 dc_trigger_sync(dc, dc->current_state);
9968         }
9969         mutex_unlock(&adev->dm.dc_lock);
9970 }
9971
9972 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
9973                        uint32_t value, const char *func_name)
9974 {
9975 #ifdef DM_CHECK_ADDR_0
9976         if (address == 0) {
9977                 DC_ERR("invalid register write. address = 0");
9978                 return;
9979         }
9980 #endif
9981         cgs_write_register(ctx->cgs_device, address, value);
9982         trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
9983 }
9984
9985 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
9986                           const char *func_name)
9987 {
9988         uint32_t value;
9989 #ifdef DM_CHECK_ADDR_0
9990         if (address == 0) {
9991                 DC_ERR("invalid register read; address = 0\n");
9992                 return 0;
9993         }
9994 #endif
9995
9996         if (ctx->dmub_srv &&
9997             ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
9998             !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
9999                 ASSERT(false);
10000                 return 0;
10001         }
10002
10003         value = cgs_read_register(ctx->cgs_device, address);
10004
10005         trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10006
10007         return value;
10008 }